file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
graphics.rs | use crate::mthelper::SharedRef;
use bedrock as br;
use br::{
CommandBuffer, CommandPool, Device, Instance, InstanceChild, PhysicalDevice, Queue,
SubmissionBatch,
};
use log::{debug, info, warn};
use std::ops::Deref;
pub type InstanceObject = SharedRef<br::InstanceObject>;
pub type DeviceObject = SharedRef<br::DeviceObject<InstanceObject>>;
/// Queue object with family index
pub struct QueueSet<Device: br::Device> {
pub(crate) q: parking_lot::Mutex<br::QueueObject<Device>>,
pub(crate) family: u32,
}
mod command_bundle;
pub use self::command_bundle::*;
#[cfg(feature = "mt")]
mod async_fence_driver;
#[cfg(feature = "mt")]
pub use self::async_fence_driver::*;
#[derive(Debug)]
pub enum GraphicsInitializationError {
LayerEnumerationFailed(br::VkResultBox),
VulkanError(br::VkResultBox),
NoPhysicalDevices,
NoSuitableGraphicsQueue,
}
impl From<br::VkResultBox> for GraphicsInitializationError {
fn from(value: br::VkResultBox) -> Self {
Self::VulkanError(value)
}
}
impl std::fmt::Display for GraphicsInitializationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::LayerEnumerationFailed(r) => write!(f, "vk layer enumeration failed: {r}"),
Self::VulkanError(r) => std::fmt::Display::fmt(r, f),
Self::NoPhysicalDevices => write!(f, "no physical devices available on this machine"),
Self::NoSuitableGraphicsQueue => {
write!(f, "no suitable graphics queue found on device")
}
}
}
}
impl std::error::Error for GraphicsInitializationError {}
/// Graphics manager
pub struct Graphics {
pub(crate) adapter: br::PhysicalDeviceObject<InstanceObject>,
pub(crate) device: DeviceObject,
pub(crate) graphics_queue: QueueSet<DeviceObject>,
cp_onetime_submit: br::CommandPoolObject<DeviceObject>,
pub memory_type_manager: MemoryTypeManager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread<DeviceObject>,
#[cfg(feature = "debug")]
_debug_instance: br::DebugUtilsMessengerObject<InstanceObject>,
}
impl Graphics {
pub(crate) fn new(
app_name: &str,
app_version: (u32, u32, u32),
instance_extensions: Vec<&str>,
device_extensions: Vec<&str>,
features: br::vk::VkPhysicalDeviceFeatures,
) -> Result<Self, GraphicsInitializationError> {
info!("Supported Layers: ");
let mut validation_layer_available = false;
#[cfg(debug_assertions)]
for l in br::enumerate_layer_properties()
.map_err(GraphicsInitializationError::LayerEnumerationFailed)?
{
let name_str = l
.layerName
.as_cstr()
.expect("Failed to decode")
.to_str()
.expect("invalid sequence in layer name");
info!(
"* {name_str} :: {}/{}",
l.specVersion, l.implementationVersion
);
if name_str == "VK_LAYER_KHRONOS_validation" {
validation_layer_available = true;
}
}
let mut ib =
br::InstanceBuilder::new(app_name, app_version, "Interlude2:Peridot", (0, 1, 0));
ib.add_extensions(instance_extensions);
#[cfg(debug_assertions)]
ib.add_extension("VK_EXT_debug_report");
if validation_layer_available {
ib.add_layer("VK_LAYER_KHRONOS_validation");
} else {
warn!("Validation Layer is not found!");
}
#[cfg(feature = "debug")]
{
ib.add_extension("VK_EXT_debug_utils");
debug!("Debug reporting activated");
}
let instance = SharedRef::new(ib.create()?);
#[cfg(feature = "debug")]
let _debug_instance = br::DebugUtilsMessengerCreateInfo::new(crate::debug::debug_utils_out)
.filter_severity(br::DebugUtilsMessageSeverityFlags::ERROR.and_warning())
.create(instance.clone())?;
let adapter = instance
.iter_physical_devices()?
.next()
.ok_or(GraphicsInitializationError::NoPhysicalDevices)?;
let memory_type_manager = MemoryTypeManager::new(&adapter);
MemoryTypeManager::diagnose_heaps(&adapter);
memory_type_manager.diagnose_types();
let gqf_index = adapter
.queue_family_properties()
.find_matching_index(br::QueueFlags::GRAPHICS)
.ok_or(GraphicsInitializationError::NoSuitableGraphicsQueue)?;
let qci = br::DeviceQueueCreateInfo(gqf_index, vec![0.0]);
let device = {
let mut db = br::DeviceBuilder::new(&adapter);
db.add_extensions(device_extensions).add_queue(qci);
if validation_layer_available {
db.add_layer("VK_LAYER_KHRONOS_validation");
}
*db.mod_features() = features;
SharedRef::new(db.create()?.clone_parent())
};
Ok(Self {
cp_onetime_submit: device.clone().new_command_pool(gqf_index, true, false)?,
graphics_queue: QueueSet {
q: parking_lot::Mutex::new(device.clone().queue(gqf_index, 0)),
family: gqf_index,
},
adapter: adapter.clone_parent(),
device,
memory_type_manager,
#[cfg(feature = "mt")]
fence_reactor: FenceReactorThread::new(),
#[cfg(feature = "debug")]
_debug_instance,
})
}
/// Submits any commands as transient commands.
pub fn submit_commands(
&mut self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<()> {
let mut cb = LocalCommandBundle(
self.cp_onetime_submit.alloc(1, true)?,
&mut self.cp_onetime_submit,
);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.get_mut().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
None::<&mut br::FenceObject<DeviceObject>>,
)?;
self.graphics_queue.q.get_mut().wait()
}
pub fn submit_buffered_commands(
&mut self,
batches: &[impl br::SubmissionBatch],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue.q.get_mut().submit(batches, Some(fence))
}
pub fn submit_buffered_commands_raw(
&mut self,
batches: &[br::vk::VkSubmitInfo],
fence: &mut (impl br::Fence + br::VkHandleMut),
) -> br::Result<()> {
self.graphics_queue
.q
.get_mut()
.submit_raw(batches, Some(fence))
}
/// Submits any commands as transient commands.
/// ## Note
/// Unlike other futures, commands are submitted **immediately**(even if not awaiting the returned future).
#[cfg(feature = "mt")]
pub fn submit_commands_async<'s>(
&'s self,
generator: impl FnOnce(
br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::CmdRecord<br::CommandBufferObject<DeviceObject>>,
) -> br::Result<impl std::future::Future<Output = br::Result<()>> +'s> {
let mut fence = std::sync::Arc::new(self.device.clone().new_fence(false)?);
let mut pool = self.device.clone().new_command_pool(
self.graphics_queue_family_index(),
true,
false,
)?;
let mut cb = CommandBundle(pool.alloc(1, true)?, pool);
generator(unsafe { cb[0].begin_once()? }).end()?;
self.graphics_queue.q.lock().submit(
&[br::EmptySubmissionBatch.with_command_buffers(&cb[..])],
Some(unsafe { std::sync::Arc::get_mut(&mut fence).unwrap_unchecked() }),
)?;
Ok(async move {
self.await_fence(fence).await?;
// keep alive command buffers while execution
drop(cb);
Ok(())
})
}
/// Awaits fence on background thread
#[cfg(feature = "mt")]
pub const fn await_fence<'s>(
&'s self,
fence: std::sync::Arc<
impl br::Fence<ConcreteDevice = DeviceObject> + Send + Sync +'static,
>,
) -> impl std::future::Future<Output = br::Result<()>> +'s {
FenceWaitFuture {
reactor: &self.fence_reactor,
object: fence,
registered: false,
}
}
pub fn instance(&self) -> &InstanceObject {
self.device.instance()
}
pub const fn adapter(&self) -> &br::PhysicalDeviceObject<InstanceObject> {
&self.adapter
}
pub const fn device(&self) -> &DeviceObject {
&self.device
}
pub const fn graphics_queue_family_index(&self) -> u32 {
self.graphics_queue.family
}
}
impl Deref for Graphics {
type Target = DeviceObject;
fn deref(&self) -> &DeviceObject {
&self.device
}
}
#[derive(Clone)]
pub struct MemoryType(u32, br::vk::VkMemoryType);
impl MemoryType {
pub const fn index(&self) -> u32 {
self.0
}
pub const fn corresponding_mask(&self) -> u32 {
0x01 << self.0
}
pub const fn has_covered_by_mask(&self, mask: u32) -> bool {
(mask & self.corresponding_mask())!= 0
}
pub const fn has_property_flags(&self, other: br::MemoryPropertyFlags) -> bool {
(self.1.propertyFlags & other.bits())!= 0
}
pub const fn is_device_local(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::DEVICE_LOCAL)
}
pub const fn visible_from_host(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_VISIBLE)
}
pub const fn is_host_coherent(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_COHERENT)
}
pub const fn is_host_cached(&self) -> bool {
self.has_property_flags(br::MemoryPropertyFlags::HOST_CACHED)
}
}
impl std::fmt::Debug for MemoryType {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
let mut flags = Vec::with_capacity(6);
if self.is_device_local() |
if self.visible_from_host() {
flags.push("HOST VISIBLE");
}
if self.is_host_cached() {
flags.push("CACHED");
}
if self.is_host_coherent() {
flags.push("COHERENT");
}
if (self.1.propertyFlags & br::vk::VK_MEMORY_PROPERTY_PROTECTED_BIT)!= 0 {
flags.push("PROTECTED");
}
if self.has_property_flags(br::MemoryPropertyFlags::LAZILY_ALLOCATED) {
flags.push("LAZILY ALLOCATED");
}
write!(
fmt,
"{}: [{}] in heap #{}",
self.index(),
flags.join("/"),
self.1.heapIndex
)
}
}
pub struct MemoryTypeManager {
device_memory_types: Vec<MemoryType>,
host_memory_types: Vec<MemoryType>,
}
impl MemoryTypeManager {
fn new(pd: &impl br::PhysicalDevice) -> Self {
let mem = pd.memory_properties();
let (mut device_memory_types, mut host_memory_types) = (Vec::new(), Vec::new());
for mt in mem
.types()
.enumerate()
.map(|(n, mt)| MemoryType(n as _, mt.clone()))
{
if mt.is_device_local() {
device_memory_types.push(mt.clone());
}
if mt.visible_from_host() {
host_memory_types.push(mt.clone());
}
}
Self {
device_memory_types,
host_memory_types,
}
}
pub fn exact_host_visible_index(
&self,
mask: u32,
required: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask) && mt.has_property_flags(required))
}
pub fn host_visible_index(
&self,
mask: u32,
preference: br::MemoryPropertyFlags,
) -> Option<&MemoryType> {
self.exact_host_visible_index(mask, preference).or_else(|| {
self.host_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
})
}
pub fn device_local_index(&self, mask: u32) -> Option<&MemoryType> {
self.device_memory_types
.iter()
.find(|mt| mt.has_covered_by_mask(mask))
}
fn diagnose_heaps(p: &impl br::PhysicalDevice) {
info!("Memory Heaps: ");
for (n, h) in p.memory_properties().heaps().enumerate() {
let (mut nb, mut unit) = (h.size as f32, "bytes");
if nb >= 10000.0 {
nb /= 1024.0;
unit = "KB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "MB";
}
if nb >= 10000.0 {
nb /= 1024.0;
unit = "GB";
}
let is_device_local = (h.flags & br::vk::VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)!= 0;
info!(
" #{n}: {nb} {unit} {}",
if is_device_local {
"[DEVICE_LOCAL]"
} else {
""
}
);
}
}
fn diagnose_types(&self) {
info!("Device Memory Types: ");
for mt in &self.device_memory_types {
info!(" {:?}", mt);
}
info!("Host Visible Memory Types: ");
for mt in &self.host_memory_types {
info!(" {:?}", mt);
}
}
}
| {
flags.push("DEVICE LOCAL");
} | conditional_block |
screen_block.rs | use euclid::*;
use std::cmp;
use std::iter::FusedIterator;
use crate::geometry::*;
/// Coordinates of chunks in the image. The scaling factor is potentially different for every chunk
/// iterator.
struct ChunkSpace;
pub trait ScreenBlockExt {
fn internal_points(&self) -> InternalPoints;
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks;
}
impl ScreenBlockExt for ScreenBlock {
/// Create an iterator over coordinates (x, y) pairs inside the block,
/// in C order (x changes first, then y)
fn internal_points(&self) -> InternalPoints {
if self.is_empty_or_negative() {
InternalPoints::empty()
} else {
InternalPoints {
min_x: self.min.x,
max: self.max,
cursor: self.min,
}
}
}
/// Create an iterator over sub blocks in (roughly) spiral order, starting in the middle of the block.
/// Chunks are chunk_size * chunk_size large, except on the bottom and right side of the
/// block, where they may be clipped if chunk size doesn't evenly divide block size.
/// Chunk size must be larger than zero. May panic if chunk size is small (1 or 2) and block
/// size is very large.
/// Chunk size must be non zero.
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks {
assert!(chunk_size > 0);
if self.is_empty_or_negative() {
return SpiralChunks::empty();
}
let chunk_scale = Scale::new(chunk_size);
let size = divide_round_up(self.size(), chunk_scale).cast::<i32>();
let cursor = Box2D::from(size).center();
let dx = 2 * cursor.y - size.height;
debug_assert!(dx == 0 || dx == -1);
let direction = Vector2D::new(dx, -1 - dx);
SpiralChunks {
block: *self,
chunk_scale,
size,
cursor,
direction,
segment: 2,
segment_remaining: 1,
remaining: size.area() as u32,
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct InternalPoints {
min_x: u32, // Unfortunately this can't easily be Length :-( TODO: Fix this in euclid?
max: ScreenPoint,
cursor: ScreenPoint,
}
impl InternalPoints {
// Construct an iterator over internal points that returns no points
fn empty() -> Self {
InternalPoints {
min_x: 1,
max: Point2D::zero(),
cursor: Point2D::zero(),
}
}
}
impl Iterator for InternalPoints {
type Item = ScreenPoint;
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.y >= self.max.y {
return None;
}
let ret = self.cursor;
debug_assert!(self.cursor.x < self.max.x);
self.cursor.x += 1;
if self.cursor.x >= self.max.x {
self.cursor.x = self.min_x;
self.cursor.y += 1;
}
Some(ret)
}
}
impl ExactSizeIterator for InternalPoints {
fn len(&self) -> usize {
if self.cursor.y >= self.max.y {
0
} else {
let whole_rows = Box2D::new(point2(self.min_x, self.cursor.y + 1), self.max);
let current_row = Box2D::new(self.cursor, point2(self.max.x, self.cursor.y + 1));
(whole_rows.area() + current_row.area()) as usize
}
}
}
impl FusedIterator for InternalPoints {}
/// Iterator over (mostly) square blocks within a rectangular box in spiral order.
#[derive(Copy, Clone, Debug)]
pub struct SpiralChunks {
block: ScreenBlock,
chunk_scale: Scale<u32, ChunkSpace, ScreenSpace>,
size: Size2D<i32, ChunkSpace>,
cursor: Point2D<i32, ChunkSpace>,
direction: Vector2D<i32, ChunkSpace>,
segment: u32,
segment_remaining: i32,
remaining: u32,
}
impl SpiralChunks {
/// Constructs an iterator that returns no blocks.
fn empty() -> SpiralChunks {
SpiralChunks {
block: Box2D::zero(),
chunk_scale: Scale::new(0),
size: Size2D::zero(),
cursor: Point2D::zero(),
direction: vec2(1, 0),
segment: 0,
segment_remaining: 0,
remaining: 0,
}
}
/// Moves to next segment of the spiral (turns 90 degrees and calculates new segment legnth).
fn next_segment(&mut self) {
self.direction = vec2(self.direction.y, -self.direction.x);
self.segment += 1;
self.segment_remaining = (self.segment / 2) as i32;
}
/// Returns a new screen block that corresponds to the current iterator position.
fn current_block(&self) -> ScreenBlock {
let min = self.block.min + self.cursor.to_vector().cast::<u32>() * self.chunk_scale;
let max = min + vec2(1, 1) * self.chunk_scale;
let ret = ScreenBlock {
min,
max: point2(
cmp::min(self.block.max.x, max.x),
cmp::min(self.block.max.y, max.y),
),
};
debug_assert!(self.block.contains_box(&ret));
debug_assert!(!ret.is_empty_or_negative());
ret
}
}
impl Iterator for SpiralChunks {
type Item = ScreenBlock;
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.remaining as usize;
(remaining, Some(remaining))
}
fn next(&mut self) -> Option<Self::Item> {
if self.remaining == 0 {
return None;
}
let ret = self.current_block();
if self.segment_remaining == 0 {
self.next_segment();
}
let new_cursor = self.cursor + self.direction;
self.segment_remaining -= 1;
if Box2D::from(self.size).contains(new_cursor) {
// We're inside boundaries and can continue
self.cursor = new_cursor;
} else {
// Got outside of the area.
// In this case we don't move the cursor (don't use new_x and new_y) and instead
// turn to new segment immediately.
self.next_segment();
// Then we skip the whole next segment (it would be outside the area anyway)
self.cursor += self.direction * self.segment_remaining;
// And finally we turn to the next segment which is inside the area
// Note that segment_remaining for this one is wrong (since we skipped
// its part outside of the screen, but we will terminate through this branch
// of the iterator again, so it's not a problem and we don't need to fix it.
self.next_segment();
}
self.remaining -= 1;
Some(ret)
}
}
impl ExactSizeIterator for SpiralChunks {
fn len(&self) -> usize {
self.remaining as usize
}
}
impl FusedIterator for SpiralChunks {}
fn divide_round_up(
a: ScreenSize,
b: Scale<u32, ChunkSpace, ScreenSpace>,
) -> Size2D<u32, ChunkSpace> {
let div: Size2D<u32, ChunkSpace> = a / b;
let need_round_up = a.not_equal(div * b);
div + need_round_up.select_size(Size2D::new(1, 1), Size2D::zero())
}
#[cfg(test)]
mod test {
use super::*;
use crate::geometry::test::*;
use assert2::assert;
use proptest_attr_macro::proptest;
fn abs_difference(x: u32, y: u32) -> u32 {
if x < y {
y - x
} else {
x - y
}
}
fn safe_area(block: ScreenBlock) -> u32 {
if block.is_empty_or_negative() {
0
} else {
block.area()
}
}
fn check_exact_length_internal<T: Iterator + ExactSizeIterator>(
iterator: &T,
expected_length: usize,
) {
assert!(iterator.len() == expected_length);
let (min, max) = iterator.size_hint();
assert!(min == expected_length);
assert!(max.unwrap() == expected_length);
}
/// Goes through the whole iterator and checks that at every step iterator's size hint is equal
/// to its reported length and equal to the expected number of elements.
fn check_exact_length<T: Iterator + ExactSizeIterator>(
mut iterator: T,
expected_length: usize,
) {
check_exact_length_internal(&iterator, expected_length);
let mut count = 0usize;
while let Some(_) = iterator.next() {
count += 1;
check_exact_length_internal(&iterator, expected_length - count);
}
}
/// Check that all pixels in the block are covered by a pixel iterator
fn check_pixel_iterator_covers_block<T: Iterator<Item = ScreenPoint>>(
mut pixel_iterator: T,
block: ScreenBlock,
) {
let area = safe_area(block);
let mut vec = vec![false; area as usize];
while let Some(p) = pixel_iterator.next() {
assert!(block.contains(p));
let index = (p.x - block.min.x) + (p.y - block.min.y) * block.width();
assert!(!vec[index as usize]);
vec[index as usize] = true;
}
assert!(vec.into_iter().all(|v| v));
}
/// Tests that pixel iterator covers all pixels in a block
#[proptest]
fn pixel_iterator_covers_all(block: ScreenBlockWrapper) {
check_pixel_iterator_covers_block(block.internal_points(), *block);
}
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn pixel_iterator_exact_length(block: ScreenBlockWrapper) {
check_exact_length(block.internal_points(), safe_area(*block) as usize);
}
/// Tests that sub blocks of a spiral chunk iterator when iterated over cover all pixels in
/// a block
#[proptest]
fn spiral_iterator_covers_all(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
check_pixel_iterator_covers_block(
block
.spiral_chunks(chunk_size_minus_one as u32 + 1)
.flat_map(|chunk| chunk.internal_points()),
*block,
);
}
/// Tests that the spiral iterator actually goes in a spiral.
/// This test is not 100% robust, it only checs that we are going through the picture in
/// squares of increasing size. The order hovewer is just a visual feature and if it looks
/// good enough, then it's good enough.
#[proptest]
fn spiral_iterator_is_spiral(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let mut it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
if let Some(first) = it.next() |
}
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn spiral_iterator_exact_length(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
check_exact_length(it, it.len()); // Using first reported length as a baseline, because it's easy
}
#[proptest]
#[should_panic]
fn zero_sized_chunks(block: ScreenBlockWrapper) {
block.spiral_chunks(0);
}
}
| {
let mut prev_distance = 0;
for subblock in it {
let distance = cmp::max(
abs_difference(first.min.x, subblock.min.x),
abs_difference(first.min.y, subblock.min.y),
);
assert!(distance >= prev_distance);
prev_distance = distance;
}
} | conditional_block |
screen_block.rs | use euclid::*;
use std::cmp;
use std::iter::FusedIterator;
use crate::geometry::*;
/// Coordinates of chunks in the image. The scaling factor is potentially different for every chunk
/// iterator.
struct ChunkSpace;
pub trait ScreenBlockExt {
fn internal_points(&self) -> InternalPoints;
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks;
}
impl ScreenBlockExt for ScreenBlock {
/// Create an iterator over coordinates (x, y) pairs inside the block,
/// in C order (x changes first, then y)
fn internal_points(&self) -> InternalPoints {
if self.is_empty_or_negative() {
InternalPoints::empty()
} else {
InternalPoints {
min_x: self.min.x,
max: self.max,
cursor: self.min,
}
}
}
/// Create an iterator over sub blocks in (roughly) spiral order, starting in the middle of the block.
/// Chunks are chunk_size * chunk_size large, except on the bottom and right side of the
/// block, where they may be clipped if chunk size doesn't evenly divide block size.
/// Chunk size must be larger than zero. May panic if chunk size is small (1 or 2) and block
/// size is very large.
/// Chunk size must be non zero.
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks {
assert!(chunk_size > 0);
if self.is_empty_or_negative() {
return SpiralChunks::empty();
}
let chunk_scale = Scale::new(chunk_size);
let size = divide_round_up(self.size(), chunk_scale).cast::<i32>();
let cursor = Box2D::from(size).center();
let dx = 2 * cursor.y - size.height;
debug_assert!(dx == 0 || dx == -1);
let direction = Vector2D::new(dx, -1 - dx);
SpiralChunks {
block: *self,
chunk_scale,
size,
cursor,
direction,
segment: 2,
segment_remaining: 1,
remaining: size.area() as u32,
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct InternalPoints {
min_x: u32, // Unfortunately this can't easily be Length :-( TODO: Fix this in euclid?
max: ScreenPoint,
cursor: ScreenPoint,
}
impl InternalPoints {
// Construct an iterator over internal points that returns no points
fn empty() -> Self {
InternalPoints {
min_x: 1,
max: Point2D::zero(),
cursor: Point2D::zero(),
}
}
}
impl Iterator for InternalPoints {
type Item = ScreenPoint;
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.y >= self.max.y {
return None;
}
let ret = self.cursor;
debug_assert!(self.cursor.x < self.max.x);
self.cursor.x += 1;
if self.cursor.x >= self.max.x {
self.cursor.x = self.min_x;
self.cursor.y += 1;
}
Some(ret)
}
}
impl ExactSizeIterator for InternalPoints {
fn len(&self) -> usize {
if self.cursor.y >= self.max.y {
0
} else {
let whole_rows = Box2D::new(point2(self.min_x, self.cursor.y + 1), self.max);
let current_row = Box2D::new(self.cursor, point2(self.max.x, self.cursor.y + 1));
(whole_rows.area() + current_row.area()) as usize
}
}
}
impl FusedIterator for InternalPoints {}
/// Iterator over (mostly) square blocks within a rectangular box in spiral order.
#[derive(Copy, Clone, Debug)]
pub struct SpiralChunks {
block: ScreenBlock,
chunk_scale: Scale<u32, ChunkSpace, ScreenSpace>,
size: Size2D<i32, ChunkSpace>,
cursor: Point2D<i32, ChunkSpace>,
direction: Vector2D<i32, ChunkSpace>,
segment: u32,
segment_remaining: i32,
remaining: u32,
}
impl SpiralChunks {
/// Constructs an iterator that returns no blocks.
fn empty() -> SpiralChunks {
SpiralChunks {
block: Box2D::zero(),
chunk_scale: Scale::new(0),
size: Size2D::zero(),
cursor: Point2D::zero(),
direction: vec2(1, 0),
segment: 0,
segment_remaining: 0,
remaining: 0,
}
}
/// Moves to next segment of the spiral (turns 90 degrees and calculates new segment legnth).
fn next_segment(&mut self) {
self.direction = vec2(self.direction.y, -self.direction.x);
self.segment += 1;
self.segment_remaining = (self.segment / 2) as i32;
}
/// Returns a new screen block that corresponds to the current iterator position.
fn current_block(&self) -> ScreenBlock {
let min = self.block.min + self.cursor.to_vector().cast::<u32>() * self.chunk_scale;
let max = min + vec2(1, 1) * self.chunk_scale;
let ret = ScreenBlock {
min,
max: point2(
cmp::min(self.block.max.x, max.x),
cmp::min(self.block.max.y, max.y),
),
};
debug_assert!(self.block.contains_box(&ret));
debug_assert!(!ret.is_empty_or_negative());
ret
}
}
impl Iterator for SpiralChunks {
type Item = ScreenBlock;
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.remaining as usize;
(remaining, Some(remaining))
}
fn next(&mut self) -> Option<Self::Item> {
if self.remaining == 0 {
return None;
}
let ret = self.current_block();
if self.segment_remaining == 0 {
self.next_segment();
}
let new_cursor = self.cursor + self.direction;
self.segment_remaining -= 1;
if Box2D::from(self.size).contains(new_cursor) {
// We're inside boundaries and can continue
self.cursor = new_cursor;
} else {
// Got outside of the area.
// In this case we don't move the cursor (don't use new_x and new_y) and instead
// turn to new segment immediately.
self.next_segment();
// Then we skip the whole next segment (it would be outside the area anyway)
self.cursor += self.direction * self.segment_remaining;
// And finally we turn to the next segment which is inside the area
// Note that segment_remaining for this one is wrong (since we skipped
// its part outside of the screen, but we will terminate through this branch
// of the iterator again, so it's not a problem and we don't need to fix it.
self.next_segment();
}
self.remaining -= 1;
Some(ret)
}
}
impl ExactSizeIterator for SpiralChunks {
fn len(&self) -> usize {
self.remaining as usize
}
}
impl FusedIterator for SpiralChunks {}
fn divide_round_up(
a: ScreenSize,
b: Scale<u32, ChunkSpace, ScreenSpace>,
) -> Size2D<u32, ChunkSpace> {
let div: Size2D<u32, ChunkSpace> = a / b;
let need_round_up = a.not_equal(div * b);
div + need_round_up.select_size(Size2D::new(1, 1), Size2D::zero())
}
#[cfg(test)]
mod test {
use super::*;
use crate::geometry::test::*;
use assert2::assert;
use proptest_attr_macro::proptest;
fn abs_difference(x: u32, y: u32) -> u32 {
if x < y {
y - x
} else {
x - y
}
}
fn safe_area(block: ScreenBlock) -> u32 {
if block.is_empty_or_negative() {
0
} else {
block.area()
}
}
fn check_exact_length_internal<T: Iterator + ExactSizeIterator>(
iterator: &T,
expected_length: usize,
) {
assert!(iterator.len() == expected_length);
let (min, max) = iterator.size_hint();
assert!(min == expected_length);
assert!(max.unwrap() == expected_length);
}
/// Goes through the whole iterator and checks that at every step iterator's size hint is equal
/// to its reported length and equal to the expected number of elements.
fn check_exact_length<T: Iterator + ExactSizeIterator>(
mut iterator: T,
expected_length: usize,
) {
check_exact_length_internal(&iterator, expected_length);
let mut count = 0usize;
while let Some(_) = iterator.next() {
count += 1;
check_exact_length_internal(&iterator, expected_length - count);
}
}
/// Check that all pixels in the block are covered by a pixel iterator
fn check_pixel_iterator_covers_block<T: Iterator<Item = ScreenPoint>>(
mut pixel_iterator: T,
block: ScreenBlock,
) {
let area = safe_area(block);
let mut vec = vec![false; area as usize];
while let Some(p) = pixel_iterator.next() {
assert!(block.contains(p));
let index = (p.x - block.min.x) + (p.y - block.min.y) * block.width();
assert!(!vec[index as usize]);
vec[index as usize] = true;
}
assert!(vec.into_iter().all(|v| v));
}
/// Tests that pixel iterator covers all pixels in a block
#[proptest]
fn pixel_iterator_covers_all(block: ScreenBlockWrapper) {
check_pixel_iterator_covers_block(block.internal_points(), *block);
}
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn pixel_iterator_exact_length(block: ScreenBlockWrapper) {
check_exact_length(block.internal_points(), safe_area(*block) as usize);
}
/// Tests that sub blocks of a spiral chunk iterator when iterated over cover all pixels in
/// a block
#[proptest]
fn spiral_iterator_covers_all(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
check_pixel_iterator_covers_block(
block
.spiral_chunks(chunk_size_minus_one as u32 + 1)
.flat_map(|chunk| chunk.internal_points()),
*block,
);
}
/// Tests that the spiral iterator actually goes in a spiral.
/// This test is not 100% robust, it only checs that we are going through the picture in
/// squares of increasing size. The order hovewer is just a visual feature and if it looks
/// good enough, then it's good enough.
#[proptest]
fn | (block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let mut it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
if let Some(first) = it.next() {
let mut prev_distance = 0;
for subblock in it {
let distance = cmp::max(
abs_difference(first.min.x, subblock.min.x),
abs_difference(first.min.y, subblock.min.y),
);
assert!(distance >= prev_distance);
prev_distance = distance;
}
}
}
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn spiral_iterator_exact_length(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
check_exact_length(it, it.len()); // Using first reported length as a baseline, because it's easy
}
#[proptest]
#[should_panic]
fn zero_sized_chunks(block: ScreenBlockWrapper) {
block.spiral_chunks(0);
}
}
| spiral_iterator_is_spiral | identifier_name |
screen_block.rs | use euclid::*;
use std::cmp;
use std::iter::FusedIterator;
use crate::geometry::*;
/// Coordinates of chunks in the image. The scaling factor is potentially different for every chunk
/// iterator.
struct ChunkSpace;
pub trait ScreenBlockExt {
fn internal_points(&self) -> InternalPoints;
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks;
}
impl ScreenBlockExt for ScreenBlock {
/// Create an iterator over coordinates (x, y) pairs inside the block,
/// in C order (x changes first, then y)
fn internal_points(&self) -> InternalPoints {
if self.is_empty_or_negative() {
InternalPoints::empty()
} else {
InternalPoints {
min_x: self.min.x,
max: self.max,
cursor: self.min,
}
}
}
/// Create an iterator over sub blocks in (roughly) spiral order, starting in the middle of the block.
/// Chunks are chunk_size * chunk_size large, except on the bottom and right side of the
/// block, where they may be clipped if chunk size doesn't evenly divide block size.
/// Chunk size must be larger than zero. May panic if chunk size is small (1 or 2) and block
/// size is very large.
/// Chunk size must be non zero.
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks {
assert!(chunk_size > 0);
if self.is_empty_or_negative() {
return SpiralChunks::empty();
}
let chunk_scale = Scale::new(chunk_size);
let size = divide_round_up(self.size(), chunk_scale).cast::<i32>();
let cursor = Box2D::from(size).center();
let dx = 2 * cursor.y - size.height;
debug_assert!(dx == 0 || dx == -1);
let direction = Vector2D::new(dx, -1 - dx);
SpiralChunks {
block: *self,
chunk_scale,
size,
cursor,
direction,
segment: 2,
segment_remaining: 1,
remaining: size.area() as u32,
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct InternalPoints {
min_x: u32, // Unfortunately this can't easily be Length :-( TODO: Fix this in euclid?
max: ScreenPoint,
cursor: ScreenPoint,
}
impl InternalPoints {
// Construct an iterator over internal points that returns no points
fn empty() -> Self {
InternalPoints {
min_x: 1,
max: Point2D::zero(),
cursor: Point2D::zero(),
}
}
}
impl Iterator for InternalPoints {
type Item = ScreenPoint;
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.y >= self.max.y {
return None;
}
let ret = self.cursor;
debug_assert!(self.cursor.x < self.max.x);
self.cursor.x += 1;
if self.cursor.x >= self.max.x {
self.cursor.x = self.min_x;
self.cursor.y += 1;
}
Some(ret)
}
}
impl ExactSizeIterator for InternalPoints {
fn len(&self) -> usize {
if self.cursor.y >= self.max.y {
0
} else {
let whole_rows = Box2D::new(point2(self.min_x, self.cursor.y + 1), self.max);
let current_row = Box2D::new(self.cursor, point2(self.max.x, self.cursor.y + 1));
(whole_rows.area() + current_row.area()) as usize
}
}
}
impl FusedIterator for InternalPoints {}
/// Iterator over (mostly) square blocks within a rectangular box in spiral order.
#[derive(Copy, Clone, Debug)]
pub struct SpiralChunks {
block: ScreenBlock,
chunk_scale: Scale<u32, ChunkSpace, ScreenSpace>,
size: Size2D<i32, ChunkSpace>,
cursor: Point2D<i32, ChunkSpace>,
direction: Vector2D<i32, ChunkSpace>,
segment: u32,
segment_remaining: i32,
remaining: u32,
}
impl SpiralChunks {
/// Constructs an iterator that returns no blocks.
fn empty() -> SpiralChunks {
SpiralChunks {
block: Box2D::zero(),
chunk_scale: Scale::new(0),
size: Size2D::zero(),
cursor: Point2D::zero(),
direction: vec2(1, 0),
segment: 0,
segment_remaining: 0,
remaining: 0,
}
}
/// Moves to next segment of the spiral (turns 90 degrees and calculates new segment legnth).
fn next_segment(&mut self) {
self.direction = vec2(self.direction.y, -self.direction.x);
self.segment += 1;
self.segment_remaining = (self.segment / 2) as i32;
}
/// Returns a new screen block that corresponds to the current iterator position.
fn current_block(&self) -> ScreenBlock {
let min = self.block.min + self.cursor.to_vector().cast::<u32>() * self.chunk_scale;
let max = min + vec2(1, 1) * self.chunk_scale;
let ret = ScreenBlock {
min,
max: point2(
cmp::min(self.block.max.x, max.x),
cmp::min(self.block.max.y, max.y),
),
};
debug_assert!(self.block.contains_box(&ret));
debug_assert!(!ret.is_empty_or_negative());
ret
}
}
impl Iterator for SpiralChunks {
type Item = ScreenBlock;
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.remaining as usize;
(remaining, Some(remaining))
}
fn next(&mut self) -> Option<Self::Item> {
if self.remaining == 0 {
return None;
}
let ret = self.current_block();
if self.segment_remaining == 0 {
self.next_segment();
}
let new_cursor = self.cursor + self.direction;
self.segment_remaining -= 1;
if Box2D::from(self.size).contains(new_cursor) {
// We're inside boundaries and can continue
self.cursor = new_cursor;
} else {
// Got outside of the area.
// In this case we don't move the cursor (don't use new_x and new_y) and instead
// turn to new segment immediately.
self.next_segment();
// Then we skip the whole next segment (it would be outside the area anyway)
self.cursor += self.direction * self.segment_remaining;
// And finally we turn to the next segment which is inside the area
// Note that segment_remaining for this one is wrong (since we skipped
// its part outside of the screen, but we will terminate through this branch
// of the iterator again, so it's not a problem and we don't need to fix it.
self.next_segment();
}
self.remaining -= 1;
Some(ret)
}
}
impl ExactSizeIterator for SpiralChunks {
fn len(&self) -> usize {
self.remaining as usize
}
}
impl FusedIterator for SpiralChunks {}
fn divide_round_up(
a: ScreenSize,
b: Scale<u32, ChunkSpace, ScreenSpace>,
) -> Size2D<u32, ChunkSpace> {
let div: Size2D<u32, ChunkSpace> = a / b;
let need_round_up = a.not_equal(div * b);
div + need_round_up.select_size(Size2D::new(1, 1), Size2D::zero())
}
#[cfg(test)]
mod test {
use super::*;
use crate::geometry::test::*;
use assert2::assert;
use proptest_attr_macro::proptest;
fn abs_difference(x: u32, y: u32) -> u32 {
if x < y {
y - x
} else {
x - y
}
}
fn safe_area(block: ScreenBlock) -> u32 {
if block.is_empty_or_negative() {
0
} else {
block.area()
}
}
fn check_exact_length_internal<T: Iterator + ExactSizeIterator>(
iterator: &T,
expected_length: usize,
) {
assert!(iterator.len() == expected_length);
let (min, max) = iterator.size_hint();
assert!(min == expected_length);
assert!(max.unwrap() == expected_length);
}
/// Goes through the whole iterator and checks that at every step iterator's size hint is equal
/// to its reported length and equal to the expected number of elements.
fn check_exact_length<T: Iterator + ExactSizeIterator>(
mut iterator: T,
expected_length: usize,
) {
check_exact_length_internal(&iterator, expected_length);
let mut count = 0usize;
while let Some(_) = iterator.next() {
count += 1;
check_exact_length_internal(&iterator, expected_length - count);
}
}
/// Check that all pixels in the block are covered by a pixel iterator
fn check_pixel_iterator_covers_block<T: Iterator<Item = ScreenPoint>>(
mut pixel_iterator: T,
block: ScreenBlock,
) {
let area = safe_area(block);
let mut vec = vec![false; area as usize];
while let Some(p) = pixel_iterator.next() {
assert!(block.contains(p));
let index = (p.x - block.min.x) + (p.y - block.min.y) * block.width();
assert!(!vec[index as usize]);
vec[index as usize] = true;
}
assert!(vec.into_iter().all(|v| v));
}
/// Tests that pixel iterator covers all pixels in a block
#[proptest]
fn pixel_iterator_covers_all(block: ScreenBlockWrapper) |
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn pixel_iterator_exact_length(block: ScreenBlockWrapper) {
check_exact_length(block.internal_points(), safe_area(*block) as usize);
}
/// Tests that sub blocks of a spiral chunk iterator when iterated over cover all pixels in
/// a block
#[proptest]
fn spiral_iterator_covers_all(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
check_pixel_iterator_covers_block(
block
.spiral_chunks(chunk_size_minus_one as u32 + 1)
.flat_map(|chunk| chunk.internal_points()),
*block,
);
}
/// Tests that the spiral iterator actually goes in a spiral.
/// This test is not 100% robust, it only checs that we are going through the picture in
/// squares of increasing size. The order hovewer is just a visual feature and if it looks
/// good enough, then it's good enough.
#[proptest]
fn spiral_iterator_is_spiral(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let mut it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
if let Some(first) = it.next() {
let mut prev_distance = 0;
for subblock in it {
let distance = cmp::max(
abs_difference(first.min.x, subblock.min.x),
abs_difference(first.min.y, subblock.min.y),
);
assert!(distance >= prev_distance);
prev_distance = distance;
}
}
}
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn spiral_iterator_exact_length(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
check_exact_length(it, it.len()); // Using first reported length as a baseline, because it's easy
}
#[proptest]
#[should_panic]
fn zero_sized_chunks(block: ScreenBlockWrapper) {
block.spiral_chunks(0);
}
}
| {
check_pixel_iterator_covers_block(block.internal_points(), *block);
} | identifier_body |
screen_block.rs | use euclid::*;
use std::cmp;
use std::iter::FusedIterator;
use crate::geometry::*;
/// Coordinates of chunks in the image. The scaling factor is potentially different for every chunk
/// iterator.
struct ChunkSpace;
pub trait ScreenBlockExt {
fn internal_points(&self) -> InternalPoints;
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks;
}
impl ScreenBlockExt for ScreenBlock {
/// Create an iterator over coordinates (x, y) pairs inside the block,
/// in C order (x changes first, then y)
fn internal_points(&self) -> InternalPoints {
if self.is_empty_or_negative() {
InternalPoints::empty()
} else {
InternalPoints {
min_x: self.min.x,
max: self.max,
cursor: self.min,
}
}
}
/// Create an iterator over sub blocks in (roughly) spiral order, starting in the middle of the block.
/// Chunks are chunk_size * chunk_size large, except on the bottom and right side of the
/// block, where they may be clipped if chunk size doesn't evenly divide block size.
/// Chunk size must be larger than zero. May panic if chunk size is small (1 or 2) and block
/// size is very large.
/// Chunk size must be non zero.
fn spiral_chunks(&self, chunk_size: u32) -> SpiralChunks {
assert!(chunk_size > 0);
if self.is_empty_or_negative() {
return SpiralChunks::empty();
}
let chunk_scale = Scale::new(chunk_size);
let size = divide_round_up(self.size(), chunk_scale).cast::<i32>();
let cursor = Box2D::from(size).center();
let dx = 2 * cursor.y - size.height;
debug_assert!(dx == 0 || dx == -1);
let direction = Vector2D::new(dx, -1 - dx);
SpiralChunks {
block: *self,
chunk_scale,
size,
cursor,
direction,
segment: 2,
segment_remaining: 1,
remaining: size.area() as u32,
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct InternalPoints {
min_x: u32, // Unfortunately this can't easily be Length :-( TODO: Fix this in euclid?
max: ScreenPoint,
cursor: ScreenPoint,
}
impl InternalPoints {
// Construct an iterator over internal points that returns no points
fn empty() -> Self {
InternalPoints {
min_x: 1,
max: Point2D::zero(),
cursor: Point2D::zero(),
}
}
}
impl Iterator for InternalPoints {
type Item = ScreenPoint;
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
fn next(&mut self) -> Option<Self::Item> {
if self.cursor.y >= self.max.y {
return None;
}
let ret = self.cursor;
debug_assert!(self.cursor.x < self.max.x);
self.cursor.x += 1;
if self.cursor.x >= self.max.x {
self.cursor.x = self.min_x;
self.cursor.y += 1;
}
Some(ret)
}
}
impl ExactSizeIterator for InternalPoints {
fn len(&self) -> usize {
if self.cursor.y >= self.max.y {
0
} else {
let whole_rows = Box2D::new(point2(self.min_x, self.cursor.y + 1), self.max);
let current_row = Box2D::new(self.cursor, point2(self.max.x, self.cursor.y + 1));
(whole_rows.area() + current_row.area()) as usize
}
}
}
impl FusedIterator for InternalPoints {}
/// Iterator over (mostly) square blocks within a rectangular box in spiral order.
#[derive(Copy, Clone, Debug)]
pub struct SpiralChunks {
block: ScreenBlock,
chunk_scale: Scale<u32, ChunkSpace, ScreenSpace>,
size: Size2D<i32, ChunkSpace>,
cursor: Point2D<i32, ChunkSpace>,
direction: Vector2D<i32, ChunkSpace>,
segment: u32,
segment_remaining: i32,
remaining: u32,
}
impl SpiralChunks {
/// Constructs an iterator that returns no blocks.
fn empty() -> SpiralChunks {
SpiralChunks {
block: Box2D::zero(),
chunk_scale: Scale::new(0),
size: Size2D::zero(),
cursor: Point2D::zero(),
direction: vec2(1, 0),
segment: 0,
segment_remaining: 0,
remaining: 0,
}
}
/// Moves to next segment of the spiral (turns 90 degrees and calculates new segment legnth).
fn next_segment(&mut self) {
self.direction = vec2(self.direction.y, -self.direction.x);
self.segment += 1;
self.segment_remaining = (self.segment / 2) as i32;
}
/// Returns a new screen block that corresponds to the current iterator position.
fn current_block(&self) -> ScreenBlock {
let min = self.block.min + self.cursor.to_vector().cast::<u32>() * self.chunk_scale;
let max = min + vec2(1, 1) * self.chunk_scale;
let ret = ScreenBlock {
min,
max: point2(
cmp::min(self.block.max.x, max.x),
cmp::min(self.block.max.y, max.y),
),
};
debug_assert!(self.block.contains_box(&ret));
debug_assert!(!ret.is_empty_or_negative());
ret
}
}
impl Iterator for SpiralChunks {
type Item = ScreenBlock;
fn size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.remaining as usize;
(remaining, Some(remaining))
}
fn next(&mut self) -> Option<Self::Item> {
if self.remaining == 0 {
return None;
}
let ret = self.current_block();
if self.segment_remaining == 0 {
self.next_segment();
}
let new_cursor = self.cursor + self.direction;
self.segment_remaining -= 1;
if Box2D::from(self.size).contains(new_cursor) {
// We're inside boundaries and can continue
self.cursor = new_cursor;
} else {
// Got outside of the area.
// In this case we don't move the cursor (don't use new_x and new_y) and instead
// turn to new segment immediately.
self.next_segment();
// Then we skip the whole next segment (it would be outside the area anyway)
self.cursor += self.direction * self.segment_remaining;
// And finally we turn to the next segment which is inside the area
// Note that segment_remaining for this one is wrong (since we skipped
// its part outside of the screen, but we will terminate through this branch
// of the iterator again, so it's not a problem and we don't need to fix it.
self.next_segment();
}
self.remaining -= 1;
Some(ret)
}
}
impl ExactSizeIterator for SpiralChunks {
fn len(&self) -> usize {
self.remaining as usize
}
}
impl FusedIterator for SpiralChunks {}
fn divide_round_up(
a: ScreenSize,
b: Scale<u32, ChunkSpace, ScreenSpace>,
) -> Size2D<u32, ChunkSpace> {
let div: Size2D<u32, ChunkSpace> = a / b;
let need_round_up = a.not_equal(div * b);
div + need_round_up.select_size(Size2D::new(1, 1), Size2D::zero())
}
#[cfg(test)]
mod test {
use super::*;
use crate::geometry::test::*;
use assert2::assert;
use proptest_attr_macro::proptest;
fn abs_difference(x: u32, y: u32) -> u32 {
if x < y {
y - x
} else {
x - y
}
}
fn safe_area(block: ScreenBlock) -> u32 {
if block.is_empty_or_negative() {
0
} else {
block.area()
}
}
fn check_exact_length_internal<T: Iterator + ExactSizeIterator>(
iterator: &T,
expected_length: usize,
) {
assert!(iterator.len() == expected_length);
let (min, max) = iterator.size_hint();
assert!(min == expected_length);
assert!(max.unwrap() == expected_length);
}
/// Goes through the whole iterator and checks that at every step iterator's size hint is equal
/// to its reported length and equal to the expected number of elements.
fn check_exact_length<T: Iterator + ExactSizeIterator>(
mut iterator: T,
expected_length: usize,
) {
check_exact_length_internal(&iterator, expected_length);
let mut count = 0usize;
while let Some(_) = iterator.next() {
count += 1;
check_exact_length_internal(&iterator, expected_length - count);
}
}
/// Check that all pixels in the block are covered by a pixel iterator
fn check_pixel_iterator_covers_block<T: Iterator<Item = ScreenPoint>>(
mut pixel_iterator: T,
block: ScreenBlock,
) {
let area = safe_area(block);
let mut vec = vec![false; area as usize];
while let Some(p) = pixel_iterator.next() {
assert!(block.contains(p));
let index = (p.x - block.min.x) + (p.y - block.min.y) * block.width();
assert!(!vec[index as usize]);
vec[index as usize] = true;
}
assert!(vec.into_iter().all(|v| v));
}
/// Tests that pixel iterator covers all pixels in a block
#[proptest]
fn pixel_iterator_covers_all(block: ScreenBlockWrapper) {
check_pixel_iterator_covers_block(block.internal_points(), *block);
}
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn pixel_iterator_exact_length(block: ScreenBlockWrapper) {
check_exact_length(block.internal_points(), safe_area(*block) as usize);
}
/// Tests that sub blocks of a spiral chunk iterator when iterated over cover all pixels in
/// a block
#[proptest]
fn spiral_iterator_covers_all(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
check_pixel_iterator_covers_block(
block
.spiral_chunks(chunk_size_minus_one as u32 + 1)
.flat_map(|chunk| chunk.internal_points()),
*block,
);
}
/// Tests that the spiral iterator actually goes in a spiral.
/// This test is not 100% robust, it only checs that we are going through the picture in
/// squares of increasing size. The order hovewer is just a visual feature and if it looks
/// good enough, then it's good enough.
#[proptest]
fn spiral_iterator_is_spiral(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let mut it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
if let Some(first) = it.next() {
let mut prev_distance = 0;
for subblock in it {
let distance = cmp::max(
abs_difference(first.min.x, subblock.min.x),
abs_difference(first.min.y, subblock.min.y),
);
assert!(distance >= prev_distance);
prev_distance = distance;
}
}
}
/// Tests that pixel iterator is a well behaved exact length iterator
#[proptest]
fn spiral_iterator_exact_length(block: ScreenBlockWrapper, chunk_size_minus_one: u8) {
let it = block.spiral_chunks(chunk_size_minus_one as u32 + 1);
check_exact_length(it, it.len()); // Using first reported length as a baseline, because it's easy
}
#[proptest] | #[should_panic]
fn zero_sized_chunks(block: ScreenBlockWrapper) {
block.spiral_chunks(0);
}
} | random_line_split |
|
10_msaa.rs | use itertools::izip;
use log::{info, warn, Level};
use sarekt::{
self,
error::{SarektError, SarektResult},
image_data::ImageData,
renderer::{
buffers_and_images::{
BufferType, IndexBufferElemSize, MagnificationMinificationFilter, TextureAddressMode,
},
config::{Config, MsaaConfig},
drawable_object::DrawableObject,
vertex_bindings::{DefaultForwardShaderLayout, DefaultForwardShaderVertex},
Drawer, Renderer, VulkanRenderer,
},
};
use std::{
collections::HashMap, convert::TryInto, f32, fs::File, io::Read, sync::Arc, time::Instant,
};
use ultraviolet as uv;
use wavefront_obj as obj;
use winit::{
dpi::{LogicalSize, PhysicalSize},
event::{ElementState, Event, VirtualKeyCode, WindowEvent},
event_loop::{ControlFlow, EventLoop},
platform::desktop::EventLoopExtDesktop,
window::{WindowBuilder, WindowId},
};
const WIDTH: u32 = 1600;
const HEIGHT: u32 = 1200;
const GLB_MODEL_FILE_NAME: &str = "models/chalet.glb";
const OBJ_MODEL_FILE_NAME: &str = "models/viking_room.obj";
const MODEL_TEXTURE_FILE_NAME_GLB: &str = "textures/chalet.jpg";
const MODEL_TEXTURE_FILE_NAME_OBJ: &str = "textures/viking_room.png";
fn main() {
simple_logger::init_with_level(Level::Info).unwrap();
main_loop();
}
/// Takes full control of the executing thread and runs the event loop for it.
fn main_loop() {
let args: Vec<String> = std::env::args().collect();
let show_fps = args.contains(&"fps".to_owned());
let use_glb = args.contains(&"glb".to_owned());
let msaa_level = if args.contains(&"4x".to_owned()) {
4u8
} else if args.contains(&"8x".to_owned()) {
8u8
} else if args.contains(&"noaa".to_owned()) {
1u8
} else {
2u8
};
info!("MSAA {}x", msaa_level);
info!("Show FPS: {}", show_fps);
info!("Use GLTF Model Type: {}", use_glb);
info!("Running main loop...");
let mut ar = WIDTH as f32 / HEIGHT as f32;
// Build Window.
let mut event_loop = EventLoop::new();
let window = Arc::new(
WindowBuilder::new()
.with_inner_size(LogicalSize::new(WIDTH, HEIGHT))
.build(&event_loop)
.unwrap(),
);
// Build Renderer.
let config = Config::builder()
.requested_width(WIDTH)
.requested_height(HEIGHT)
.msaa_config(MsaaConfig::new(
msaa_level.try_into().unwrap(),
Some(0.2f32),
))
.build()
.unwrap();
let mut renderer = VulkanRenderer::new(window.clone(), config).unwrap();
// Create Vertex Resources.
let (model_vertices, model_indices) = if use_glb {
load_glb_model(GLB_MODEL_FILE_NAME)
} else {
load_obj_models(OBJ_MODEL_FILE_NAME)
};
info!("Model file loaded");
let model_index_buffer = model_indices.map(|mi| {
renderer
.load_buffer(BufferType::Index(IndexBufferElemSize::UInt32), &mi)
.unwrap()
});
let model_buffer = renderer
.load_buffer(BufferType::Vertex, &model_vertices)
.unwrap();
// Create MVP uniform.
let uniform_handle = renderer
.load_uniform_buffer(DefaultForwardShaderLayout::default())
.unwrap();
// Load textures and create image.
let model_texture_file = if use_glb {
image::open(MODEL_TEXTURE_FILE_NAME_GLB).unwrap()
} else {
image::open(MODEL_TEXTURE_FILE_NAME_OBJ).unwrap()
};
let mip_levels = get_mip_levels(model_texture_file.dimensions());
let model_texture = renderer
.load_image_with_staging_initialization(
model_texture_file,
MagnificationMinificationFilter::Linear,
MagnificationMinificationFilter::Linear,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
mip_levels,
)
.unwrap();
let mut drawable_object_builder = DrawableObject::builder(&renderer)
.uniform_buffer(&uniform_handle)
.vertex_buffer(&model_buffer)
.texture_image(&model_texture);
if model_index_buffer.is_some() {
drawable_object_builder =
drawable_object_builder.index_buffer(model_index_buffer.as_ref().unwrap());
}
let drawable_object = drawable_object_builder.build().unwrap();
let start_time = Instant::now();
let mut last_frame_time = start_time;
let mut frame_number = 0;
let mut fps_average = 0f32;
let mut camera_height = -0.5f32;
// Run the loop.
event_loop.run_return(move |event, _, control_flow| {
// By default continuously run this event loop, even if the OS hasn't
// distributed an event, that way we will draw as fast as possible.
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => {
// All the main events to process are done we can do "work" now (game
// engine state update etc.)
let now = Instant::now();
let time_since_start_secs = ((now - start_time).as_millis() as f32) / 1000f32;
if show_fps {
let time_since_last_frame_secs = ((now - last_frame_time).as_nanos() as f32) / 1e9f32;
let fps = 1f32 / time_since_last_frame_secs;
if frame_number == 0 {
fps_average = 0f32;
} else {
fps_average =
((frame_number as f32 * fps_average) + fps) / (frame_number as f32 + 1f32);
}
frame_number += 1;
info!("Frame Period: {}", time_since_last_frame_secs);
info!("FPS: {}", fps);
info!("FPS averaged: {}", fps_average);
last_frame_time = now;
}
// Rise to max height then gently go back down.
let camera_rate = 0.25f32;
let min_camera_height = -0.5f32;
let camera_range = 2f32;
camera_height =
(camera_rate * time_since_start_secs) % (2.0f32 * camera_range) + min_camera_height;
if camera_height >= (camera_range + min_camera_height) {
camera_height = (2.0f32 * (camera_range + min_camera_height)) - camera_height;
}
let rotation = (std::f32::consts::PI + std::f32::consts::PI * time_since_start_secs / 8f32)
% (2f32 * std::f32::consts::PI);
update_uniforms(
&renderer,
&drawable_object,
uv::Vec3::new(0f32, -1f32, -1.5f32),
rotation,
camera_height,
false,
ar,
)
.unwrap();
renderer.draw(&drawable_object).unwrap();
// At the end of work request redraw.
window.request_redraw();
}
Event::RedrawRequested(_) => {
// Redraw requested, this is called after MainEventsCleared.
renderer.frame().unwrap_or_else(|err| {
match err {
SarektError::SwapchainOutOfDate | SarektError::SuboptimalSwapchain => {
// Handle window resize etc.
warn!("Tried to render without processing window resize event!");
let PhysicalSize { width, height } = window.inner_size();
renderer
.recreate_swapchain(width, height)
.expect("Error recreating swapchain");
}
e => panic!("Frame had an unrecoverable error! {}", e),
}
});
}
Event::WindowEvent { window_id, event } => {
main_loop_window_event(&event, &window_id, control_flow, &mut renderer, &mut ar)
.expect("Error processing window event.");
}
Event::LoopDestroyed => {
// Explicitly call exit so resources are cleaned up.
std::process::exit(0);
}
_ => (),
}
});
}
/// Handles all winit window specific events.
fn main_loop_window_event(
event: &WindowEvent, _id: &WindowId, control_flow: &mut winit::event_loop::ControlFlow,
renderer: &mut VulkanRenderer, ar: &mut f32,
) -> SarektResult<()> {
match event {
WindowEvent::CloseRequested => {
// When the window system requests a close, signal to winit that we'd like to
// close the window.
info!("Exiting due to close request event from window system...");
*control_flow = ControlFlow::Exit;
}
WindowEvent::KeyboardInput { input,.. } => {
// When the keyboard input is a press on the escape key, exit and print the
// line.
if let (Some(VirtualKeyCode::Escape), ElementState::Pressed) =
(input.virtual_keycode, input.state)
{
info!("Exiting due to escape press...");
*control_flow = ControlFlow::Exit
}
}
WindowEvent::Resized(size) => {
// If the size is 0, minimization or something like that happened so I
// toggle drawing.
info!("Window resized, recreating renderer swapchain...");
let enabled =!(size.height == 0 && size.width == 0);
if enabled {
*ar = size.width as f32 / size.height as f32;
}
renderer.set_rendering_enabled(enabled);
return renderer.recreate_swapchain(size.width, size.height);
}
_ => (),
}
Ok(())
}
fn update_uniforms(
renderer: &VulkanRenderer, object: &DrawableObject<VulkanRenderer, DefaultForwardShaderLayout>,
position: uv::Vec3, rotation: f32, camera_height: f32, enable_colors: bool, ar: f32,
) -> SarektResult<()> {
// Pi radians per second around the y axis.
let total_rotation =
uv::Mat4::from_rotation_y(rotation) * uv::Mat4::from_rotation_x(-std::f32::consts::PI / 2f32);
let model_matrix = uv::Mat4::from_translation(position) * total_rotation;
let view_matrix = uv::Mat4::look_at(
/* eye= */ uv::Vec3::new(0.0f32, camera_height, 0.0f32),
/* at= */ position,
/* up= */ uv::Vec3::unit_y(),
);
// TODO BACKENDS this proj should be conditional on backend.
let perspective_matrix =
uv::projection::rh_yup::perspective_vk(std::f32::consts::PI / 2f32, ar, 0.1f32, 10f32);
let uniform = DefaultForwardShaderLayout::new(
perspective_matrix * view_matrix * model_matrix,
enable_colors,
/* enable_texture_mixing= */ true,
);
object.set_uniform(renderer, &uniform)
}
/// For now only use the first object in the obj file.
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_obj_models(obj_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let mut model_file = File::open(obj_file_path).unwrap();
let mut model_file_text = String::new();
model_file.read_to_string(&mut model_file_text).unwrap();
let obj_set = obj::obj::parse(&model_file_text).unwrap();
if obj_set.objects.len()!= 1 {
panic!(
"The model you attempted to load has more than one object in it, implying it is a scene, if \
you wish to use it as a single model, modify the application code to ignore that or join \
your meshes into a single model"
);
}
info!("Loaded model {}", OBJ_MODEL_FILE_NAME);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Vec<u32> = Vec::new();
// Map of inserted (obj_vertex_index, obj_texture_index) to index in the
// vertices array im building.
let mut inserted_indices: HashMap<(usize, usize), usize> = HashMap::new();
let model_vertices = &obj_set.objects[0].vertices; | for geo in obj_set.objects[0].geometry.iter() {
// For every set of geometry (regardless of material for now).
for shape in geo.shapes.iter() {
// For every face/shape in the set of geometry.
match shape.primitive {
obj::obj::Primitive::Triangle(x, y, z) => {
for &vert in [x, y, z].iter() {
// We're only building a buffer of indices and vertices which contain position
// and tex coord.
let index_key = (vert.0, vert.1.unwrap());
if let Some(&vtx_index) = inserted_indices.get(&index_key) {
// Already loaded this (vertex index, texture index) combo, just add it to the
// index buffer.
indices.push(vtx_index as _);
continue;
}
// This is a new unique vertex (where a vertex is both a position and it's
// texture coordinate) so add it to the vertex buffer and the index buffer.
let current_vertex = model_vertices[vert.0];
let vertex_as_float = [
current_vertex.x as f32,
current_vertex.y as f32,
current_vertex.z as f32,
];
let texture_vertices = &obj_set.objects[0].tex_vertices;
let tex_vertex = texture_vertices[vert.1.unwrap()];
// TODO BACKENDS only flip on coordinate systems that should.
let texture_vertex_as_float = [tex_vertex.u as f32, 1f32 - tex_vertex.v as f32];
// Ignoring normals, there is no shading in this example.
// Keep track of which keys were inserted and add this vertex to the index
// buffer.
inserted_indices.insert(index_key, vertices.len());
indices.push(vertices.len() as _);
// Add to the vertex buffer.
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&vertex_as_float,
&texture_vertex_as_float,
));
}
}
_ => warn!("Unsupported primitive!"),
}
}
}
info!(
"Vertices/indices in model: {}, {}",
vertices.len(),
indices.len()
);
(vertices, Some(indices))
}
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_glb_model(gltf_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let (document, buffers, _) = gltf::import(gltf_file_path).unwrap();
if document.scenes().len()!= 1 || document.scenes().next().unwrap().nodes().len()!= 1 {
panic!(
"The model you attempted to load has more than one scene or node in it, if you wish to use \
it as a single model, modify the application code to ignore that or join your meshes into \
a single model"
);
}
let mesh = document.meshes().nth(0).unwrap();
info!("Loaded model {}", gltf_file_path);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Option<Vec<u32>> = None;
for primitive in mesh.primitives() {
let reader = primitive.reader(|buffer| Some(&buffers[buffer.index()]));
let positions = reader.read_positions().unwrap();
let tex_coords = reader.read_tex_coords(0).unwrap().into_f32();
for (position, tex_coord) in izip!(positions, tex_coords) {
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&position, &tex_coord,
));
}
reader
.read_indices()
.map(|it| indices.get_or_insert(Vec::new()).extend(&mut it.into_u32()));
}
info!(
"Vertices/indices in model: {}, {:?}",
vertices.len(),
indices.as_ref().map(|i| i.len())
);
(vertices, indices)
}
fn get_mip_levels(dimensions: (u32, u32)) -> u32 {
let w = dimensions.0;
let h = dimensions.1;
(w.max(h) as f32).log2().floor() as u32 + 1
} | random_line_split |
|
10_msaa.rs | use itertools::izip;
use log::{info, warn, Level};
use sarekt::{
self,
error::{SarektError, SarektResult},
image_data::ImageData,
renderer::{
buffers_and_images::{
BufferType, IndexBufferElemSize, MagnificationMinificationFilter, TextureAddressMode,
},
config::{Config, MsaaConfig},
drawable_object::DrawableObject,
vertex_bindings::{DefaultForwardShaderLayout, DefaultForwardShaderVertex},
Drawer, Renderer, VulkanRenderer,
},
};
use std::{
collections::HashMap, convert::TryInto, f32, fs::File, io::Read, sync::Arc, time::Instant,
};
use ultraviolet as uv;
use wavefront_obj as obj;
use winit::{
dpi::{LogicalSize, PhysicalSize},
event::{ElementState, Event, VirtualKeyCode, WindowEvent},
event_loop::{ControlFlow, EventLoop},
platform::desktop::EventLoopExtDesktop,
window::{WindowBuilder, WindowId},
};
const WIDTH: u32 = 1600;
const HEIGHT: u32 = 1200;
const GLB_MODEL_FILE_NAME: &str = "models/chalet.glb";
const OBJ_MODEL_FILE_NAME: &str = "models/viking_room.obj";
const MODEL_TEXTURE_FILE_NAME_GLB: &str = "textures/chalet.jpg";
const MODEL_TEXTURE_FILE_NAME_OBJ: &str = "textures/viking_room.png";
fn main() {
simple_logger::init_with_level(Level::Info).unwrap();
main_loop();
}
/// Takes full control of the executing thread and runs the event loop for it.
fn main_loop() {
let args: Vec<String> = std::env::args().collect();
let show_fps = args.contains(&"fps".to_owned());
let use_glb = args.contains(&"glb".to_owned());
let msaa_level = if args.contains(&"4x".to_owned()) {
4u8
} else if args.contains(&"8x".to_owned()) {
8u8
} else if args.contains(&"noaa".to_owned()) {
1u8
} else {
2u8
};
info!("MSAA {}x", msaa_level);
info!("Show FPS: {}", show_fps);
info!("Use GLTF Model Type: {}", use_glb);
info!("Running main loop...");
let mut ar = WIDTH as f32 / HEIGHT as f32;
// Build Window.
let mut event_loop = EventLoop::new();
let window = Arc::new(
WindowBuilder::new()
.with_inner_size(LogicalSize::new(WIDTH, HEIGHT))
.build(&event_loop)
.unwrap(),
);
// Build Renderer.
let config = Config::builder()
.requested_width(WIDTH)
.requested_height(HEIGHT)
.msaa_config(MsaaConfig::new(
msaa_level.try_into().unwrap(),
Some(0.2f32),
))
.build()
.unwrap();
let mut renderer = VulkanRenderer::new(window.clone(), config).unwrap();
// Create Vertex Resources.
let (model_vertices, model_indices) = if use_glb {
load_glb_model(GLB_MODEL_FILE_NAME)
} else {
load_obj_models(OBJ_MODEL_FILE_NAME)
};
info!("Model file loaded");
let model_index_buffer = model_indices.map(|mi| {
renderer
.load_buffer(BufferType::Index(IndexBufferElemSize::UInt32), &mi)
.unwrap()
});
let model_buffer = renderer
.load_buffer(BufferType::Vertex, &model_vertices)
.unwrap();
// Create MVP uniform.
let uniform_handle = renderer
.load_uniform_buffer(DefaultForwardShaderLayout::default())
.unwrap();
// Load textures and create image.
let model_texture_file = if use_glb {
image::open(MODEL_TEXTURE_FILE_NAME_GLB).unwrap()
} else {
image::open(MODEL_TEXTURE_FILE_NAME_OBJ).unwrap()
};
let mip_levels = get_mip_levels(model_texture_file.dimensions());
let model_texture = renderer
.load_image_with_staging_initialization(
model_texture_file,
MagnificationMinificationFilter::Linear,
MagnificationMinificationFilter::Linear,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
mip_levels,
)
.unwrap();
let mut drawable_object_builder = DrawableObject::builder(&renderer)
.uniform_buffer(&uniform_handle)
.vertex_buffer(&model_buffer)
.texture_image(&model_texture);
if model_index_buffer.is_some() {
drawable_object_builder =
drawable_object_builder.index_buffer(model_index_buffer.as_ref().unwrap());
}
let drawable_object = drawable_object_builder.build().unwrap();
let start_time = Instant::now();
let mut last_frame_time = start_time;
let mut frame_number = 0;
let mut fps_average = 0f32;
let mut camera_height = -0.5f32;
// Run the loop.
event_loop.run_return(move |event, _, control_flow| {
// By default continuously run this event loop, even if the OS hasn't
// distributed an event, that way we will draw as fast as possible.
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => {
// All the main events to process are done we can do "work" now (game
// engine state update etc.)
let now = Instant::now();
let time_since_start_secs = ((now - start_time).as_millis() as f32) / 1000f32;
if show_fps {
let time_since_last_frame_secs = ((now - last_frame_time).as_nanos() as f32) / 1e9f32;
let fps = 1f32 / time_since_last_frame_secs;
if frame_number == 0 {
fps_average = 0f32;
} else {
fps_average =
((frame_number as f32 * fps_average) + fps) / (frame_number as f32 + 1f32);
}
frame_number += 1;
info!("Frame Period: {}", time_since_last_frame_secs);
info!("FPS: {}", fps);
info!("FPS averaged: {}", fps_average);
last_frame_time = now;
}
// Rise to max height then gently go back down.
let camera_rate = 0.25f32;
let min_camera_height = -0.5f32;
let camera_range = 2f32;
camera_height =
(camera_rate * time_since_start_secs) % (2.0f32 * camera_range) + min_camera_height;
if camera_height >= (camera_range + min_camera_height) {
camera_height = (2.0f32 * (camera_range + min_camera_height)) - camera_height;
}
let rotation = (std::f32::consts::PI + std::f32::consts::PI * time_since_start_secs / 8f32)
% (2f32 * std::f32::consts::PI);
update_uniforms(
&renderer,
&drawable_object,
uv::Vec3::new(0f32, -1f32, -1.5f32),
rotation,
camera_height,
false,
ar,
)
.unwrap();
renderer.draw(&drawable_object).unwrap();
// At the end of work request redraw.
window.request_redraw();
}
Event::RedrawRequested(_) => {
// Redraw requested, this is called after MainEventsCleared.
renderer.frame().unwrap_or_else(|err| {
match err {
SarektError::SwapchainOutOfDate | SarektError::SuboptimalSwapchain => {
// Handle window resize etc.
warn!("Tried to render without processing window resize event!");
let PhysicalSize { width, height } = window.inner_size();
renderer
.recreate_swapchain(width, height)
.expect("Error recreating swapchain");
}
e => panic!("Frame had an unrecoverable error! {}", e),
}
});
}
Event::WindowEvent { window_id, event } => {
main_loop_window_event(&event, &window_id, control_flow, &mut renderer, &mut ar)
.expect("Error processing window event.");
}
Event::LoopDestroyed => {
// Explicitly call exit so resources are cleaned up.
std::process::exit(0);
}
_ => (),
}
});
}
/// Handles all winit window specific events.
fn main_loop_window_event(
event: &WindowEvent, _id: &WindowId, control_flow: &mut winit::event_loop::ControlFlow,
renderer: &mut VulkanRenderer, ar: &mut f32,
) -> SarektResult<()> {
match event {
WindowEvent::CloseRequested => {
// When the window system requests a close, signal to winit that we'd like to
// close the window.
info!("Exiting due to close request event from window system...");
*control_flow = ControlFlow::Exit;
}
WindowEvent::KeyboardInput { input,.. } => {
// When the keyboard input is a press on the escape key, exit and print the
// line.
if let (Some(VirtualKeyCode::Escape), ElementState::Pressed) =
(input.virtual_keycode, input.state)
{
info!("Exiting due to escape press...");
*control_flow = ControlFlow::Exit
}
}
WindowEvent::Resized(size) => {
// If the size is 0, minimization or something like that happened so I
// toggle drawing.
info!("Window resized, recreating renderer swapchain...");
let enabled =!(size.height == 0 && size.width == 0);
if enabled {
*ar = size.width as f32 / size.height as f32;
}
renderer.set_rendering_enabled(enabled);
return renderer.recreate_swapchain(size.width, size.height);
}
_ => (),
}
Ok(())
}
fn update_uniforms(
renderer: &VulkanRenderer, object: &DrawableObject<VulkanRenderer, DefaultForwardShaderLayout>,
position: uv::Vec3, rotation: f32, camera_height: f32, enable_colors: bool, ar: f32,
) -> SarektResult<()> {
// Pi radians per second around the y axis.
let total_rotation =
uv::Mat4::from_rotation_y(rotation) * uv::Mat4::from_rotation_x(-std::f32::consts::PI / 2f32);
let model_matrix = uv::Mat4::from_translation(position) * total_rotation;
let view_matrix = uv::Mat4::look_at(
/* eye= */ uv::Vec3::new(0.0f32, camera_height, 0.0f32),
/* at= */ position,
/* up= */ uv::Vec3::unit_y(),
);
// TODO BACKENDS this proj should be conditional on backend.
let perspective_matrix =
uv::projection::rh_yup::perspective_vk(std::f32::consts::PI / 2f32, ar, 0.1f32, 10f32);
let uniform = DefaultForwardShaderLayout::new(
perspective_matrix * view_matrix * model_matrix,
enable_colors,
/* enable_texture_mixing= */ true,
);
object.set_uniform(renderer, &uniform)
}
/// For now only use the first object in the obj file.
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_obj_models(obj_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let mut model_file = File::open(obj_file_path).unwrap();
let mut model_file_text = String::new();
model_file.read_to_string(&mut model_file_text).unwrap();
let obj_set = obj::obj::parse(&model_file_text).unwrap();
if obj_set.objects.len()!= 1 {
panic!(
"The model you attempted to load has more than one object in it, implying it is a scene, if \
you wish to use it as a single model, modify the application code to ignore that or join \
your meshes into a single model"
);
}
info!("Loaded model {}", OBJ_MODEL_FILE_NAME);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Vec<u32> = Vec::new();
// Map of inserted (obj_vertex_index, obj_texture_index) to index in the
// vertices array im building.
let mut inserted_indices: HashMap<(usize, usize), usize> = HashMap::new();
let model_vertices = &obj_set.objects[0].vertices;
for geo in obj_set.objects[0].geometry.iter() {
// For every set of geometry (regardless of material for now).
for shape in geo.shapes.iter() {
// For every face/shape in the set of geometry.
match shape.primitive {
obj::obj::Primitive::Triangle(x, y, z) => | let texture_vertices = &obj_set.objects[0].tex_vertices;
let tex_vertex = texture_vertices[vert.1.unwrap()];
// TODO BACKENDS only flip on coordinate systems that should.
let texture_vertex_as_float = [tex_vertex.u as f32, 1f32 - tex_vertex.v as f32];
// Ignoring normals, there is no shading in this example.
// Keep track of which keys were inserted and add this vertex to the index
// buffer.
inserted_indices.insert(index_key, vertices.len());
indices.push(vertices.len() as _);
// Add to the vertex buffer.
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&vertex_as_float,
&texture_vertex_as_float,
));
}
}
_ => warn!("Unsupported primitive!"),
}
}
}
info!(
"Vertices/indices in model: {}, {}",
vertices.len(),
indices.len()
);
(vertices, Some(indices))
}
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_glb_model(gltf_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let (document, buffers, _) = gltf::import(gltf_file_path).unwrap();
if document.scenes().len()!= 1 || document.scenes().next().unwrap().nodes().len()!= 1 {
panic!(
"The model you attempted to load has more than one scene or node in it, if you wish to use \
it as a single model, modify the application code to ignore that or join your meshes into \
a single model"
);
}
let mesh = document.meshes().nth(0).unwrap();
info!("Loaded model {}", gltf_file_path);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Option<Vec<u32>> = None;
for primitive in mesh.primitives() {
let reader = primitive.reader(|buffer| Some(&buffers[buffer.index()]));
let positions = reader.read_positions().unwrap();
let tex_coords = reader.read_tex_coords(0).unwrap().into_f32();
for (position, tex_coord) in izip!(positions, tex_coords) {
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&position, &tex_coord,
));
}
reader
.read_indices()
.map(|it| indices.get_or_insert(Vec::new()).extend(&mut it.into_u32()));
}
info!(
"Vertices/indices in model: {}, {:?}",
vertices.len(),
indices.as_ref().map(|i| i.len())
);
(vertices, indices)
}
fn get_mip_levels(dimensions: (u32, u32)) -> u32 {
let w = dimensions.0;
let h = dimensions.1;
(w.max(h) as f32).log2().floor() as u32 + 1
}
| {
for &vert in [x, y, z].iter() {
// We're only building a buffer of indices and vertices which contain position
// and tex coord.
let index_key = (vert.0, vert.1.unwrap());
if let Some(&vtx_index) = inserted_indices.get(&index_key) {
// Already loaded this (vertex index, texture index) combo, just add it to the
// index buffer.
indices.push(vtx_index as _);
continue;
}
// This is a new unique vertex (where a vertex is both a position and it's
// texture coordinate) so add it to the vertex buffer and the index buffer.
let current_vertex = model_vertices[vert.0];
let vertex_as_float = [
current_vertex.x as f32,
current_vertex.y as f32,
current_vertex.z as f32,
]; | conditional_block |
10_msaa.rs | use itertools::izip;
use log::{info, warn, Level};
use sarekt::{
self,
error::{SarektError, SarektResult},
image_data::ImageData,
renderer::{
buffers_and_images::{
BufferType, IndexBufferElemSize, MagnificationMinificationFilter, TextureAddressMode,
},
config::{Config, MsaaConfig},
drawable_object::DrawableObject,
vertex_bindings::{DefaultForwardShaderLayout, DefaultForwardShaderVertex},
Drawer, Renderer, VulkanRenderer,
},
};
use std::{
collections::HashMap, convert::TryInto, f32, fs::File, io::Read, sync::Arc, time::Instant,
};
use ultraviolet as uv;
use wavefront_obj as obj;
use winit::{
dpi::{LogicalSize, PhysicalSize},
event::{ElementState, Event, VirtualKeyCode, WindowEvent},
event_loop::{ControlFlow, EventLoop},
platform::desktop::EventLoopExtDesktop,
window::{WindowBuilder, WindowId},
};
const WIDTH: u32 = 1600;
const HEIGHT: u32 = 1200;
const GLB_MODEL_FILE_NAME: &str = "models/chalet.glb";
const OBJ_MODEL_FILE_NAME: &str = "models/viking_room.obj";
const MODEL_TEXTURE_FILE_NAME_GLB: &str = "textures/chalet.jpg";
const MODEL_TEXTURE_FILE_NAME_OBJ: &str = "textures/viking_room.png";
fn | () {
simple_logger::init_with_level(Level::Info).unwrap();
main_loop();
}
/// Takes full control of the executing thread and runs the event loop for it.
fn main_loop() {
let args: Vec<String> = std::env::args().collect();
let show_fps = args.contains(&"fps".to_owned());
let use_glb = args.contains(&"glb".to_owned());
let msaa_level = if args.contains(&"4x".to_owned()) {
4u8
} else if args.contains(&"8x".to_owned()) {
8u8
} else if args.contains(&"noaa".to_owned()) {
1u8
} else {
2u8
};
info!("MSAA {}x", msaa_level);
info!("Show FPS: {}", show_fps);
info!("Use GLTF Model Type: {}", use_glb);
info!("Running main loop...");
let mut ar = WIDTH as f32 / HEIGHT as f32;
// Build Window.
let mut event_loop = EventLoop::new();
let window = Arc::new(
WindowBuilder::new()
.with_inner_size(LogicalSize::new(WIDTH, HEIGHT))
.build(&event_loop)
.unwrap(),
);
// Build Renderer.
let config = Config::builder()
.requested_width(WIDTH)
.requested_height(HEIGHT)
.msaa_config(MsaaConfig::new(
msaa_level.try_into().unwrap(),
Some(0.2f32),
))
.build()
.unwrap();
let mut renderer = VulkanRenderer::new(window.clone(), config).unwrap();
// Create Vertex Resources.
let (model_vertices, model_indices) = if use_glb {
load_glb_model(GLB_MODEL_FILE_NAME)
} else {
load_obj_models(OBJ_MODEL_FILE_NAME)
};
info!("Model file loaded");
let model_index_buffer = model_indices.map(|mi| {
renderer
.load_buffer(BufferType::Index(IndexBufferElemSize::UInt32), &mi)
.unwrap()
});
let model_buffer = renderer
.load_buffer(BufferType::Vertex, &model_vertices)
.unwrap();
// Create MVP uniform.
let uniform_handle = renderer
.load_uniform_buffer(DefaultForwardShaderLayout::default())
.unwrap();
// Load textures and create image.
let model_texture_file = if use_glb {
image::open(MODEL_TEXTURE_FILE_NAME_GLB).unwrap()
} else {
image::open(MODEL_TEXTURE_FILE_NAME_OBJ).unwrap()
};
let mip_levels = get_mip_levels(model_texture_file.dimensions());
let model_texture = renderer
.load_image_with_staging_initialization(
model_texture_file,
MagnificationMinificationFilter::Linear,
MagnificationMinificationFilter::Linear,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
mip_levels,
)
.unwrap();
let mut drawable_object_builder = DrawableObject::builder(&renderer)
.uniform_buffer(&uniform_handle)
.vertex_buffer(&model_buffer)
.texture_image(&model_texture);
if model_index_buffer.is_some() {
drawable_object_builder =
drawable_object_builder.index_buffer(model_index_buffer.as_ref().unwrap());
}
let drawable_object = drawable_object_builder.build().unwrap();
let start_time = Instant::now();
let mut last_frame_time = start_time;
let mut frame_number = 0;
let mut fps_average = 0f32;
let mut camera_height = -0.5f32;
// Run the loop.
event_loop.run_return(move |event, _, control_flow| {
// By default continuously run this event loop, even if the OS hasn't
// distributed an event, that way we will draw as fast as possible.
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => {
// All the main events to process are done we can do "work" now (game
// engine state update etc.)
let now = Instant::now();
let time_since_start_secs = ((now - start_time).as_millis() as f32) / 1000f32;
if show_fps {
let time_since_last_frame_secs = ((now - last_frame_time).as_nanos() as f32) / 1e9f32;
let fps = 1f32 / time_since_last_frame_secs;
if frame_number == 0 {
fps_average = 0f32;
} else {
fps_average =
((frame_number as f32 * fps_average) + fps) / (frame_number as f32 + 1f32);
}
frame_number += 1;
info!("Frame Period: {}", time_since_last_frame_secs);
info!("FPS: {}", fps);
info!("FPS averaged: {}", fps_average);
last_frame_time = now;
}
// Rise to max height then gently go back down.
let camera_rate = 0.25f32;
let min_camera_height = -0.5f32;
let camera_range = 2f32;
camera_height =
(camera_rate * time_since_start_secs) % (2.0f32 * camera_range) + min_camera_height;
if camera_height >= (camera_range + min_camera_height) {
camera_height = (2.0f32 * (camera_range + min_camera_height)) - camera_height;
}
let rotation = (std::f32::consts::PI + std::f32::consts::PI * time_since_start_secs / 8f32)
% (2f32 * std::f32::consts::PI);
update_uniforms(
&renderer,
&drawable_object,
uv::Vec3::new(0f32, -1f32, -1.5f32),
rotation,
camera_height,
false,
ar,
)
.unwrap();
renderer.draw(&drawable_object).unwrap();
// At the end of work request redraw.
window.request_redraw();
}
Event::RedrawRequested(_) => {
// Redraw requested, this is called after MainEventsCleared.
renderer.frame().unwrap_or_else(|err| {
match err {
SarektError::SwapchainOutOfDate | SarektError::SuboptimalSwapchain => {
// Handle window resize etc.
warn!("Tried to render without processing window resize event!");
let PhysicalSize { width, height } = window.inner_size();
renderer
.recreate_swapchain(width, height)
.expect("Error recreating swapchain");
}
e => panic!("Frame had an unrecoverable error! {}", e),
}
});
}
Event::WindowEvent { window_id, event } => {
main_loop_window_event(&event, &window_id, control_flow, &mut renderer, &mut ar)
.expect("Error processing window event.");
}
Event::LoopDestroyed => {
// Explicitly call exit so resources are cleaned up.
std::process::exit(0);
}
_ => (),
}
});
}
/// Handles all winit window specific events.
fn main_loop_window_event(
event: &WindowEvent, _id: &WindowId, control_flow: &mut winit::event_loop::ControlFlow,
renderer: &mut VulkanRenderer, ar: &mut f32,
) -> SarektResult<()> {
match event {
WindowEvent::CloseRequested => {
// When the window system requests a close, signal to winit that we'd like to
// close the window.
info!("Exiting due to close request event from window system...");
*control_flow = ControlFlow::Exit;
}
WindowEvent::KeyboardInput { input,.. } => {
// When the keyboard input is a press on the escape key, exit and print the
// line.
if let (Some(VirtualKeyCode::Escape), ElementState::Pressed) =
(input.virtual_keycode, input.state)
{
info!("Exiting due to escape press...");
*control_flow = ControlFlow::Exit
}
}
WindowEvent::Resized(size) => {
// If the size is 0, minimization or something like that happened so I
// toggle drawing.
info!("Window resized, recreating renderer swapchain...");
let enabled =!(size.height == 0 && size.width == 0);
if enabled {
*ar = size.width as f32 / size.height as f32;
}
renderer.set_rendering_enabled(enabled);
return renderer.recreate_swapchain(size.width, size.height);
}
_ => (),
}
Ok(())
}
fn update_uniforms(
renderer: &VulkanRenderer, object: &DrawableObject<VulkanRenderer, DefaultForwardShaderLayout>,
position: uv::Vec3, rotation: f32, camera_height: f32, enable_colors: bool, ar: f32,
) -> SarektResult<()> {
// Pi radians per second around the y axis.
let total_rotation =
uv::Mat4::from_rotation_y(rotation) * uv::Mat4::from_rotation_x(-std::f32::consts::PI / 2f32);
let model_matrix = uv::Mat4::from_translation(position) * total_rotation;
let view_matrix = uv::Mat4::look_at(
/* eye= */ uv::Vec3::new(0.0f32, camera_height, 0.0f32),
/* at= */ position,
/* up= */ uv::Vec3::unit_y(),
);
// TODO BACKENDS this proj should be conditional on backend.
let perspective_matrix =
uv::projection::rh_yup::perspective_vk(std::f32::consts::PI / 2f32, ar, 0.1f32, 10f32);
let uniform = DefaultForwardShaderLayout::new(
perspective_matrix * view_matrix * model_matrix,
enable_colors,
/* enable_texture_mixing= */ true,
);
object.set_uniform(renderer, &uniform)
}
/// For now only use the first object in the obj file.
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_obj_models(obj_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let mut model_file = File::open(obj_file_path).unwrap();
let mut model_file_text = String::new();
model_file.read_to_string(&mut model_file_text).unwrap();
let obj_set = obj::obj::parse(&model_file_text).unwrap();
if obj_set.objects.len()!= 1 {
panic!(
"The model you attempted to load has more than one object in it, implying it is a scene, if \
you wish to use it as a single model, modify the application code to ignore that or join \
your meshes into a single model"
);
}
info!("Loaded model {}", OBJ_MODEL_FILE_NAME);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Vec<u32> = Vec::new();
// Map of inserted (obj_vertex_index, obj_texture_index) to index in the
// vertices array im building.
let mut inserted_indices: HashMap<(usize, usize), usize> = HashMap::new();
let model_vertices = &obj_set.objects[0].vertices;
for geo in obj_set.objects[0].geometry.iter() {
// For every set of geometry (regardless of material for now).
for shape in geo.shapes.iter() {
// For every face/shape in the set of geometry.
match shape.primitive {
obj::obj::Primitive::Triangle(x, y, z) => {
for &vert in [x, y, z].iter() {
// We're only building a buffer of indices and vertices which contain position
// and tex coord.
let index_key = (vert.0, vert.1.unwrap());
if let Some(&vtx_index) = inserted_indices.get(&index_key) {
// Already loaded this (vertex index, texture index) combo, just add it to the
// index buffer.
indices.push(vtx_index as _);
continue;
}
// This is a new unique vertex (where a vertex is both a position and it's
// texture coordinate) so add it to the vertex buffer and the index buffer.
let current_vertex = model_vertices[vert.0];
let vertex_as_float = [
current_vertex.x as f32,
current_vertex.y as f32,
current_vertex.z as f32,
];
let texture_vertices = &obj_set.objects[0].tex_vertices;
let tex_vertex = texture_vertices[vert.1.unwrap()];
// TODO BACKENDS only flip on coordinate systems that should.
let texture_vertex_as_float = [tex_vertex.u as f32, 1f32 - tex_vertex.v as f32];
// Ignoring normals, there is no shading in this example.
// Keep track of which keys were inserted and add this vertex to the index
// buffer.
inserted_indices.insert(index_key, vertices.len());
indices.push(vertices.len() as _);
// Add to the vertex buffer.
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&vertex_as_float,
&texture_vertex_as_float,
));
}
}
_ => warn!("Unsupported primitive!"),
}
}
}
info!(
"Vertices/indices in model: {}, {}",
vertices.len(),
indices.len()
);
(vertices, Some(indices))
}
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_glb_model(gltf_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let (document, buffers, _) = gltf::import(gltf_file_path).unwrap();
if document.scenes().len()!= 1 || document.scenes().next().unwrap().nodes().len()!= 1 {
panic!(
"The model you attempted to load has more than one scene or node in it, if you wish to use \
it as a single model, modify the application code to ignore that or join your meshes into \
a single model"
);
}
let mesh = document.meshes().nth(0).unwrap();
info!("Loaded model {}", gltf_file_path);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Option<Vec<u32>> = None;
for primitive in mesh.primitives() {
let reader = primitive.reader(|buffer| Some(&buffers[buffer.index()]));
let positions = reader.read_positions().unwrap();
let tex_coords = reader.read_tex_coords(0).unwrap().into_f32();
for (position, tex_coord) in izip!(positions, tex_coords) {
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&position, &tex_coord,
));
}
reader
.read_indices()
.map(|it| indices.get_or_insert(Vec::new()).extend(&mut it.into_u32()));
}
info!(
"Vertices/indices in model: {}, {:?}",
vertices.len(),
indices.as_ref().map(|i| i.len())
);
(vertices, indices)
}
fn get_mip_levels(dimensions: (u32, u32)) -> u32 {
let w = dimensions.0;
let h = dimensions.1;
(w.max(h) as f32).log2().floor() as u32 + 1
}
| main | identifier_name |
10_msaa.rs | use itertools::izip;
use log::{info, warn, Level};
use sarekt::{
self,
error::{SarektError, SarektResult},
image_data::ImageData,
renderer::{
buffers_and_images::{
BufferType, IndexBufferElemSize, MagnificationMinificationFilter, TextureAddressMode,
},
config::{Config, MsaaConfig},
drawable_object::DrawableObject,
vertex_bindings::{DefaultForwardShaderLayout, DefaultForwardShaderVertex},
Drawer, Renderer, VulkanRenderer,
},
};
use std::{
collections::HashMap, convert::TryInto, f32, fs::File, io::Read, sync::Arc, time::Instant,
};
use ultraviolet as uv;
use wavefront_obj as obj;
use winit::{
dpi::{LogicalSize, PhysicalSize},
event::{ElementState, Event, VirtualKeyCode, WindowEvent},
event_loop::{ControlFlow, EventLoop},
platform::desktop::EventLoopExtDesktop,
window::{WindowBuilder, WindowId},
};
const WIDTH: u32 = 1600;
const HEIGHT: u32 = 1200;
const GLB_MODEL_FILE_NAME: &str = "models/chalet.glb";
const OBJ_MODEL_FILE_NAME: &str = "models/viking_room.obj";
const MODEL_TEXTURE_FILE_NAME_GLB: &str = "textures/chalet.jpg";
const MODEL_TEXTURE_FILE_NAME_OBJ: &str = "textures/viking_room.png";
fn main() {
simple_logger::init_with_level(Level::Info).unwrap();
main_loop();
}
/// Takes full control of the executing thread and runs the event loop for it.
fn main_loop() | let mut ar = WIDTH as f32 / HEIGHT as f32;
// Build Window.
let mut event_loop = EventLoop::new();
let window = Arc::new(
WindowBuilder::new()
.with_inner_size(LogicalSize::new(WIDTH, HEIGHT))
.build(&event_loop)
.unwrap(),
);
// Build Renderer.
let config = Config::builder()
.requested_width(WIDTH)
.requested_height(HEIGHT)
.msaa_config(MsaaConfig::new(
msaa_level.try_into().unwrap(),
Some(0.2f32),
))
.build()
.unwrap();
let mut renderer = VulkanRenderer::new(window.clone(), config).unwrap();
// Create Vertex Resources.
let (model_vertices, model_indices) = if use_glb {
load_glb_model(GLB_MODEL_FILE_NAME)
} else {
load_obj_models(OBJ_MODEL_FILE_NAME)
};
info!("Model file loaded");
let model_index_buffer = model_indices.map(|mi| {
renderer
.load_buffer(BufferType::Index(IndexBufferElemSize::UInt32), &mi)
.unwrap()
});
let model_buffer = renderer
.load_buffer(BufferType::Vertex, &model_vertices)
.unwrap();
// Create MVP uniform.
let uniform_handle = renderer
.load_uniform_buffer(DefaultForwardShaderLayout::default())
.unwrap();
// Load textures and create image.
let model_texture_file = if use_glb {
image::open(MODEL_TEXTURE_FILE_NAME_GLB).unwrap()
} else {
image::open(MODEL_TEXTURE_FILE_NAME_OBJ).unwrap()
};
let mip_levels = get_mip_levels(model_texture_file.dimensions());
let model_texture = renderer
.load_image_with_staging_initialization(
model_texture_file,
MagnificationMinificationFilter::Linear,
MagnificationMinificationFilter::Linear,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
TextureAddressMode::ClampToEdge,
mip_levels,
)
.unwrap();
let mut drawable_object_builder = DrawableObject::builder(&renderer)
.uniform_buffer(&uniform_handle)
.vertex_buffer(&model_buffer)
.texture_image(&model_texture);
if model_index_buffer.is_some() {
drawable_object_builder =
drawable_object_builder.index_buffer(model_index_buffer.as_ref().unwrap());
}
let drawable_object = drawable_object_builder.build().unwrap();
let start_time = Instant::now();
let mut last_frame_time = start_time;
let mut frame_number = 0;
let mut fps_average = 0f32;
let mut camera_height = -0.5f32;
// Run the loop.
event_loop.run_return(move |event, _, control_flow| {
// By default continuously run this event loop, even if the OS hasn't
// distributed an event, that way we will draw as fast as possible.
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => {
// All the main events to process are done we can do "work" now (game
// engine state update etc.)
let now = Instant::now();
let time_since_start_secs = ((now - start_time).as_millis() as f32) / 1000f32;
if show_fps {
let time_since_last_frame_secs = ((now - last_frame_time).as_nanos() as f32) / 1e9f32;
let fps = 1f32 / time_since_last_frame_secs;
if frame_number == 0 {
fps_average = 0f32;
} else {
fps_average =
((frame_number as f32 * fps_average) + fps) / (frame_number as f32 + 1f32);
}
frame_number += 1;
info!("Frame Period: {}", time_since_last_frame_secs);
info!("FPS: {}", fps);
info!("FPS averaged: {}", fps_average);
last_frame_time = now;
}
// Rise to max height then gently go back down.
let camera_rate = 0.25f32;
let min_camera_height = -0.5f32;
let camera_range = 2f32;
camera_height =
(camera_rate * time_since_start_secs) % (2.0f32 * camera_range) + min_camera_height;
if camera_height >= (camera_range + min_camera_height) {
camera_height = (2.0f32 * (camera_range + min_camera_height)) - camera_height;
}
let rotation = (std::f32::consts::PI + std::f32::consts::PI * time_since_start_secs / 8f32)
% (2f32 * std::f32::consts::PI);
update_uniforms(
&renderer,
&drawable_object,
uv::Vec3::new(0f32, -1f32, -1.5f32),
rotation,
camera_height,
false,
ar,
)
.unwrap();
renderer.draw(&drawable_object).unwrap();
// At the end of work request redraw.
window.request_redraw();
}
Event::RedrawRequested(_) => {
// Redraw requested, this is called after MainEventsCleared.
renderer.frame().unwrap_or_else(|err| {
match err {
SarektError::SwapchainOutOfDate | SarektError::SuboptimalSwapchain => {
// Handle window resize etc.
warn!("Tried to render without processing window resize event!");
let PhysicalSize { width, height } = window.inner_size();
renderer
.recreate_swapchain(width, height)
.expect("Error recreating swapchain");
}
e => panic!("Frame had an unrecoverable error! {}", e),
}
});
}
Event::WindowEvent { window_id, event } => {
main_loop_window_event(&event, &window_id, control_flow, &mut renderer, &mut ar)
.expect("Error processing window event.");
}
Event::LoopDestroyed => {
// Explicitly call exit so resources are cleaned up.
std::process::exit(0);
}
_ => (),
}
});
}
/// Handles all winit window specific events.
fn main_loop_window_event(
event: &WindowEvent, _id: &WindowId, control_flow: &mut winit::event_loop::ControlFlow,
renderer: &mut VulkanRenderer, ar: &mut f32,
) -> SarektResult<()> {
match event {
WindowEvent::CloseRequested => {
// When the window system requests a close, signal to winit that we'd like to
// close the window.
info!("Exiting due to close request event from window system...");
*control_flow = ControlFlow::Exit;
}
WindowEvent::KeyboardInput { input,.. } => {
// When the keyboard input is a press on the escape key, exit and print the
// line.
if let (Some(VirtualKeyCode::Escape), ElementState::Pressed) =
(input.virtual_keycode, input.state)
{
info!("Exiting due to escape press...");
*control_flow = ControlFlow::Exit
}
}
WindowEvent::Resized(size) => {
// If the size is 0, minimization or something like that happened so I
// toggle drawing.
info!("Window resized, recreating renderer swapchain...");
let enabled =!(size.height == 0 && size.width == 0);
if enabled {
*ar = size.width as f32 / size.height as f32;
}
renderer.set_rendering_enabled(enabled);
return renderer.recreate_swapchain(size.width, size.height);
}
_ => (),
}
Ok(())
}
fn update_uniforms(
renderer: &VulkanRenderer, object: &DrawableObject<VulkanRenderer, DefaultForwardShaderLayout>,
position: uv::Vec3, rotation: f32, camera_height: f32, enable_colors: bool, ar: f32,
) -> SarektResult<()> {
// Pi radians per second around the y axis.
let total_rotation =
uv::Mat4::from_rotation_y(rotation) * uv::Mat4::from_rotation_x(-std::f32::consts::PI / 2f32);
let model_matrix = uv::Mat4::from_translation(position) * total_rotation;
let view_matrix = uv::Mat4::look_at(
/* eye= */ uv::Vec3::new(0.0f32, camera_height, 0.0f32),
/* at= */ position,
/* up= */ uv::Vec3::unit_y(),
);
// TODO BACKENDS this proj should be conditional on backend.
let perspective_matrix =
uv::projection::rh_yup::perspective_vk(std::f32::consts::PI / 2f32, ar, 0.1f32, 10f32);
let uniform = DefaultForwardShaderLayout::new(
perspective_matrix * view_matrix * model_matrix,
enable_colors,
/* enable_texture_mixing= */ true,
);
object.set_uniform(renderer, &uniform)
}
/// For now only use the first object in the obj file.
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_obj_models(obj_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let mut model_file = File::open(obj_file_path).unwrap();
let mut model_file_text = String::new();
model_file.read_to_string(&mut model_file_text).unwrap();
let obj_set = obj::obj::parse(&model_file_text).unwrap();
if obj_set.objects.len()!= 1 {
panic!(
"The model you attempted to load has more than one object in it, implying it is a scene, if \
you wish to use it as a single model, modify the application code to ignore that or join \
your meshes into a single model"
);
}
info!("Loaded model {}", OBJ_MODEL_FILE_NAME);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Vec<u32> = Vec::new();
// Map of inserted (obj_vertex_index, obj_texture_index) to index in the
// vertices array im building.
let mut inserted_indices: HashMap<(usize, usize), usize> = HashMap::new();
let model_vertices = &obj_set.objects[0].vertices;
for geo in obj_set.objects[0].geometry.iter() {
// For every set of geometry (regardless of material for now).
for shape in geo.shapes.iter() {
// For every face/shape in the set of geometry.
match shape.primitive {
obj::obj::Primitive::Triangle(x, y, z) => {
for &vert in [x, y, z].iter() {
// We're only building a buffer of indices and vertices which contain position
// and tex coord.
let index_key = (vert.0, vert.1.unwrap());
if let Some(&vtx_index) = inserted_indices.get(&index_key) {
// Already loaded this (vertex index, texture index) combo, just add it to the
// index buffer.
indices.push(vtx_index as _);
continue;
}
// This is a new unique vertex (where a vertex is both a position and it's
// texture coordinate) so add it to the vertex buffer and the index buffer.
let current_vertex = model_vertices[vert.0];
let vertex_as_float = [
current_vertex.x as f32,
current_vertex.y as f32,
current_vertex.z as f32,
];
let texture_vertices = &obj_set.objects[0].tex_vertices;
let tex_vertex = texture_vertices[vert.1.unwrap()];
// TODO BACKENDS only flip on coordinate systems that should.
let texture_vertex_as_float = [tex_vertex.u as f32, 1f32 - tex_vertex.v as f32];
// Ignoring normals, there is no shading in this example.
// Keep track of which keys were inserted and add this vertex to the index
// buffer.
inserted_indices.insert(index_key, vertices.len());
indices.push(vertices.len() as _);
// Add to the vertex buffer.
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&vertex_as_float,
&texture_vertex_as_float,
));
}
}
_ => warn!("Unsupported primitive!"),
}
}
}
info!(
"Vertices/indices in model: {}, {}",
vertices.len(),
indices.len()
);
(vertices, Some(indices))
}
/// Returns (vertices, vertex_indicies, texture_coordinate indices)
fn load_glb_model(gltf_file_path: &str) -> (Vec<DefaultForwardShaderVertex>, Option<Vec<u32>>) {
let (document, buffers, _) = gltf::import(gltf_file_path).unwrap();
if document.scenes().len()!= 1 || document.scenes().next().unwrap().nodes().len()!= 1 {
panic!(
"The model you attempted to load has more than one scene or node in it, if you wish to use \
it as a single model, modify the application code to ignore that or join your meshes into \
a single model"
);
}
let mesh = document.meshes().nth(0).unwrap();
info!("Loaded model {}", gltf_file_path);
let mut vertices: Vec<DefaultForwardShaderVertex> = Vec::new();
let mut indices: Option<Vec<u32>> = None;
for primitive in mesh.primitives() {
let reader = primitive.reader(|buffer| Some(&buffers[buffer.index()]));
let positions = reader.read_positions().unwrap();
let tex_coords = reader.read_tex_coords(0).unwrap().into_f32();
for (position, tex_coord) in izip!(positions, tex_coords) {
vertices.push(DefaultForwardShaderVertex::new_with_texture(
&position, &tex_coord,
));
}
reader
.read_indices()
.map(|it| indices.get_or_insert(Vec::new()).extend(&mut it.into_u32()));
}
info!(
"Vertices/indices in model: {}, {:?}",
vertices.len(),
indices.as_ref().map(|i| i.len())
);
(vertices, indices)
}
fn get_mip_levels(dimensions: (u32, u32)) -> u32 {
let w = dimensions.0;
let h = dimensions.1;
(w.max(h) as f32).log2().floor() as u32 + 1
}
| {
let args: Vec<String> = std::env::args().collect();
let show_fps = args.contains(&"fps".to_owned());
let use_glb = args.contains(&"glb".to_owned());
let msaa_level = if args.contains(&"4x".to_owned()) {
4u8
} else if args.contains(&"8x".to_owned()) {
8u8
} else if args.contains(&"noaa".to_owned()) {
1u8
} else {
2u8
};
info!("MSAA {}x", msaa_level);
info!("Show FPS: {}", show_fps);
info!("Use GLTF Model Type: {}", use_glb);
info!("Running main loop...");
| identifier_body |
op.rs | //! # Implementing differentiable operations
//!
//! Many of well-known ops are pre-defined in [crate::tensor_ops], but you can also
//! implement custom ops by hand.
//! See also [crate::tensor::TensorBuilder].
//!
//! ```
//! use ndarray;
//! use autograd as ag;
//! use autograd::op::OpError;
//! use autograd::tensor_ops::*;
//!
//! type NdArray<T: ag::Float> = ndarray::Array<T, ndarray::IxDyn>;
//!
//! // Implements `Op` trait for `Sigmoid`.
//! struct Sigmoid;
//!
//! impl<T: ag::Float> ag::op::Op<T> for Sigmoid {
//! fn compute(
//! &self,
//! ctx: &mut ag::op::ComputeContext<T>,
//! ) -> Result<(), OpError> {
//! let x: &ag::NdArrayView<_> = &ctx.input(0);
//! // Use `ndarray::Array::mapv` for element-wise computation.
//! let half = T::from(0.5).unwrap();
//! let y = x.mapv(move |a| ((a * half).tanh() * half) + half);
//! ctx.append_output(y);
//! Ok(())
//! }
//!
//! fn grad(&self, ctx: &mut ag::op::GradientContext<T>) {
//! // gradient of the output of Sigmoid
//! let gy = ctx.output_grad();
//! let y = ctx.output();
//! // gradient of the input of Sigmoid
//! let gx = gy * (y - square(y));
//! ctx.append_input_grad(Some(gx));
//! }
//! }
//!
//! // `sigmoid` function for end-user.
//! fn sigmoid<'graph, F: ag::Float>(x: &ag::Tensor<'graph, F>, g: &'graph ag::Context<F>)
//! -> ag::Tensor<'graph, F> {
//! ag::Tensor::builder(g)
//! .append_input(x, false)
//! .build(Sigmoid)
//! }
//! ```
//!
use std::any::type_name;
use std::fmt;
use std::marker::PhantomData;
use std::mem;
use crate::ndarray_ext::{NdArrayView, NdArrayViewMut, RawNdArrayView};
use crate::smallvec::SmallVec as RawSmallVec;
use crate::tensor::Tensor;
use crate::{Float, NdArray};
use crate::op::OpInput::NonVariable;
pub(crate) const DEFAULT_NUM_EDGES: usize = 2;
pub(crate) type SmallVec<T> = RawSmallVec<[T; DEFAULT_NUM_EDGES]>;
/// Error in `Op`'s computation.
#[derive(Clone, Debug, PartialEq)]
pub enum OpError {
NdArrayError(String, ndarray::ShapeError),
IncompatibleShape(String),
TypeUnsupported(String),
InvalidDims(String),
OutOfBounds(String),
}
impl std::error::Error for OpError {}
impl fmt::Display for OpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
OpError::NdArrayError(pref, e) => write!(f, "{}: ", pref).and_then(|()| e.fmt(f)),
OpError::IncompatibleShape(s) => write!(f, "{}: ", s),
OpError::TypeUnsupported(s) => write!(f, "{}: ", s),
OpError::InvalidDims(s) => write!(f, "{}: ", s),
OpError::OutOfBounds(s) => write!(f, "{}: ", s),
}
}
}
/// Trait for tensor operations. `Tensor` structs wrap this.
pub trait Op<F: Float> {
/// Name of this op
fn name(&self) -> &'static str {
type_name::<Self>()
}
/// Runs this op with `ComputeContext`.
fn compute(&self, ctx: &mut ComputeContext<F>) -> Result<(), OpError>;
/// Returns gradients for input nodes by use of output's gradients etc.
fn grad(&self, ctx: &mut GradientContext<F>);
}
pub(crate) struct DummyOp<F: Float> {
pub phantom: PhantomData<F>,
}
impl<F: Float> DummyOp<F> {
#[allow(dead_code)]
pub(crate) fn new() -> Self {
DummyOp {
phantom: PhantomData,
}
}
}
impl<F: Float> Op<F> for DummyOp<F> {
fn compute(&self, _: &mut ComputeContext<F>) -> Result<(), OpError> {
Ok(())
}
fn grad(&self, _: &mut GradientContext<F>) {}
}
/// Wrapper for NdArrayView/NdArrayViewMut which is fed to `Op::compute`
///
/// Used in `Op::ComputeContext`.
pub(crate) enum OpInput<'v, T: Float> {
NonVariable(Option<NdArrayView<'v, T>>),
RdOnlyVariable(Option<NdArrayView<'v, T>>),
RdWrVariable(Option<NdArrayViewMut<'v, T>>),
}
/// `Op::compute`'s output
#[derive(Clone)]
pub(crate) enum OpOutput<T: Float> {
Owned(NdArray<T>),
View(RawNdArrayView<T>),
}
impl<'view, T: Float> OpInput<'view, T> {
#[inline]
/// Make a read-only input array
pub fn new_non_variable(x: NdArrayView<'view, T>) -> Self {
NonVariable(Some(x))
}
#[inline]
/// Make a read-only input array
pub fn new_rdonly_variable(x: NdArrayView<'view, T>) -> Self {
OpInput::RdOnlyVariable(Some(x))
}
#[inline]
/// Make a read/write input array
pub fn new_rdwr_variable(x: NdArrayViewMut<'view, T>) -> Self {
OpInput::RdWrVariable(Some(x))
}
}
/// Context of an `Op`'s computation phase.
///
/// # Example
///
/// ```
/// use autograd as ag;
///
/// // Implementing `Op` trait for `Sigmoid`.
/// struct Sigmoid;
///
/// impl<T: ag::Float> ag::op::Op<T> for Sigmoid {
/// fn compute(
/// &self,
/// ctx: &mut ag::op::ComputeContext<T>,
/// ) -> Result<(), ag::op::OpError> {
/// // Getting the first input array.
/// let x: &ag::NdArrayView<_> = &ctx.input(0);
/// let half = T::from(0.5).unwrap();
/// let y = x.mapv(move |a| ((a * half).tanh() * half) + half);
/// // Put the computed result.
/// ctx.append_output(y);
/// Ok(())
/// }
///
/// fn grad(&self, ctx: &mut ag::op::GradientContext<T>) { /*... */ }
/// }
/// ```
pub struct ComputeContext<'v, T: Float> {
// Input arrays
xs: SmallVec<OpInput<'v, T>>,
// Output arrays
pub(crate) ys: SmallVec<OpOutput<T>>,
}
impl<'graph, 'view, T: Float> ComputeContext<'view, T> {
#[inline]
pub(crate) fn new(xs: SmallVec<OpInput<'view, T>>) -> Self {
ComputeContext {
xs,
ys: SmallVec::new(),
}
}
/// Grabs the `i` th input array as a *read-only* array view.
///
/// Calling `input(i)` more than once causes panic.
#[inline]
pub fn input(&mut self, i: usize) -> NdArrayView<'view, T> {
let x = match self.xs.get_mut(i) {
Some(x) => x,
None => panic!("Bad op impl: input index out of range."),
};
match x {
NonVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
OpInput::RdOnlyVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
OpInput::RdWrVariable(_) => {
panic!(
"Bad op impl: cannot perform mutable borrowing for input({}). Use input_mut() instead.",
i
);
}
}
}
/// Grabs the `i` th input array as a *read-write* array view.
///
/// Calling `input_mut(i)` more than once causes panic.
#[inline]
pub fn input_mut(&mut self, i: usize) -> NdArrayViewMut<'view, T> {
let x = match self.xs.get_mut(i) {
Some(x) => x,
None => panic!("Bad op impl: {}'s input doesn't exist.", i),
};
match x {
OpInput::RdWrVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
_ => {
panic!(
"Bad op impl: cannot perform mutable borrowing for input({})",
i
);
}
}
}
/// Appends an `ndarray::ArrayView` to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub fn append_output_view(&mut self, y: NdArrayView<'view, T>) {
self.append_output_view_raw(y.raw_view());
}
/// Appends an `ndarray::ArrayView` to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub(crate) fn append_output_view_raw(&mut self, y: RawNdArrayView<T>) {
let mut contains_variable_input= false;
for x in &self.xs {
match x {
NonVariable(_) => {},
_ => contains_variable_input = true
}
}
if contains_variable_input {
// copy it beforehand to avoid use-after-free
self.ys.push(OpOutput::Owned(unsafe { y.deref_into_view().to_owned() }));
} else {
self.ys.push(OpOutput::View(y));
}
}
#[inline]
pub fn append_empty_output(&mut self) {
self.ys.push(OpOutput::Owned(NdArray::zeros(
crate::ndarray::IxDyn(&[]),
)));
}
/// Appends an ndarray to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub fn append_output(&mut self, y: NdArray<T>) {
self.ys.push(OpOutput::Owned(y));
}
/// Returns a number of input arrays.
#[inline]
pub fn num_inputs(&self) -> usize {
self.xs.len()
}
}
/// Context of an `Op`'s gradient propagation phase.
///
/// This is passed to an `Op` through `Op::grad`.
/// `Op::grad` should provide the gradients of its inputs by calling `GradientContext::append_input_grad`.
///
/// Use `graph()` to access `Graph` object for tensor computations.
///
/// ```
/// use autograd as ag;
/// use ag::tensor_ops as T;
///
/// struct Sigmoid;
///
/// impl<F: ag::Float> ag::op::Op<F> for Sigmoid {
/// fn compute(&self, ctx: &mut ag::op::ComputeContext<F>) -> Result<(), ag::op::OpError> {
/// /*... */
/// Ok(())
/// }
///
/// fn grad(&self, ctx: &mut ag::op::GradientContext<F>) {
/// // gradient of the input of Sigmoid
/// let gy = ctx.output_grad();
/// // output tensor
/// let y = ctx.output();
/// // `Tensor` computations
/// let gx = gy * (y - T::square(y));
/// // Propagates input's gradient.
/// ctx.append_input_grad(Some(gx));
/// }
/// }
/// ```
pub struct GradientContext<'graph, T: Float> {
gy: Tensor<'graph, T>,
y: Tensor<'graph, T>,
graph: &'graph crate::graph::Graph<T>,
gxs: SmallVec<Option<Tensor<'graph, T>>>,
}
impl<'graph, T: Float> GradientContext<'graph, T> {
#[inline]
pub(crate) fn new(
gy: Tensor<'graph, T>,
y: Tensor<'graph, T>,
graph: &'graph crate::graph::Graph<T>,
) -> Self |
// Call Op::grad and return `gxs`
pub(crate) fn compute_input_grads(mut self) -> SmallVec<Option<Tensor<'graph, T>>> {
let id = self.y.id;
// steal op
let stolen = self.graph().access_inner_mut(id).op.take().unwrap();
// call Op::grad
stolen.grad(&mut self);
// restore
mem::swap(&mut self.graph().access_inner_mut(id).op, &mut Some(stolen));
debug_assert!(
!self.gxs.is_empty(),
"Bad Op impl: GradientContext::append_input_grad was not called"
);
self.gxs
}
/// Returns the gradient of the op's output.
#[inline]
pub fn output_grad(&self) -> Tensor<'graph, T> {
self.gy
}
/// Grabs the output of the op.
#[inline]
pub fn output(&self) -> Tensor<'graph, T> {
self.y
}
/// Returns input tensors.
#[inline]
pub fn inputs(&self) -> SmallVec<Tensor<'graph, T>> {
let mut ret = SmallVec::new();
for input in self.y.get_incoming_tensors().iter() {
ret.push(self.graph.tensor(input.id));
}
ret
}
/// Grabs the `i` th input tensor.
#[inline]
pub fn input(&self, i: usize) -> Tensor<'graph, T> {
return self
.y
.get_incoming_tensor(i, self.graph)
.expect("bad Op::grad impl");
}
/// Returns the number of inputs.
#[inline]
pub fn num_inputs(&self) -> usize {
self.y.inner().incoming_nodes.len()
}
/// Returns a graph object that is usable for tensor computations in the context.
#[inline]
pub fn graph(&self) -> &'graph crate::graph::Graph<T> {
self.graph
}
/// Back-propagates the input's gradient.
///
/// Appends the given tensor to the back of the input-gradient-list.
/// `None` argument indicates that the `Op`'s input doesn't have gradient.
/// Note that `Op::grad` must call this function as many as `num_inputs()`.
#[inline]
pub fn append_input_grad(&mut self, gx: Option<Tensor<'graph, T>>) {
self.gxs.push(gx);
}
}
| {
GradientContext {
gy,
y,
graph,
gxs: SmallVec::new(),
}
} | identifier_body |
op.rs | //! # Implementing differentiable operations
//!
//! Many of well-known ops are pre-defined in [crate::tensor_ops], but you can also
//! implement custom ops by hand.
//! See also [crate::tensor::TensorBuilder].
//!
//! ```
//! use ndarray;
//! use autograd as ag;
//! use autograd::op::OpError;
//! use autograd::tensor_ops::*;
//!
//! type NdArray<T: ag::Float> = ndarray::Array<T, ndarray::IxDyn>;
//!
//! // Implements `Op` trait for `Sigmoid`.
//! struct Sigmoid;
//!
//! impl<T: ag::Float> ag::op::Op<T> for Sigmoid {
//! fn compute(
//! &self,
//! ctx: &mut ag::op::ComputeContext<T>,
//! ) -> Result<(), OpError> {
//! let x: &ag::NdArrayView<_> = &ctx.input(0);
//! // Use `ndarray::Array::mapv` for element-wise computation.
//! let half = T::from(0.5).unwrap();
//! let y = x.mapv(move |a| ((a * half).tanh() * half) + half);
//! ctx.append_output(y);
//! Ok(())
//! }
//!
//! fn grad(&self, ctx: &mut ag::op::GradientContext<T>) {
//! // gradient of the output of Sigmoid
//! let gy = ctx.output_grad();
//! let y = ctx.output();
//! // gradient of the input of Sigmoid
//! let gx = gy * (y - square(y));
//! ctx.append_input_grad(Some(gx));
//! }
//! }
//!
//! // `sigmoid` function for end-user.
//! fn sigmoid<'graph, F: ag::Float>(x: &ag::Tensor<'graph, F>, g: &'graph ag::Context<F>)
//! -> ag::Tensor<'graph, F> {
//! ag::Tensor::builder(g)
//! .append_input(x, false)
//! .build(Sigmoid)
//! }
//! ```
//!
use std::any::type_name;
use std::fmt;
use std::marker::PhantomData;
use std::mem;
use crate::ndarray_ext::{NdArrayView, NdArrayViewMut, RawNdArrayView};
use crate::smallvec::SmallVec as RawSmallVec;
use crate::tensor::Tensor;
use crate::{Float, NdArray};
use crate::op::OpInput::NonVariable;
pub(crate) const DEFAULT_NUM_EDGES: usize = 2;
pub(crate) type SmallVec<T> = RawSmallVec<[T; DEFAULT_NUM_EDGES]>;
/// Error in `Op`'s computation.
#[derive(Clone, Debug, PartialEq)]
pub enum | {
NdArrayError(String, ndarray::ShapeError),
IncompatibleShape(String),
TypeUnsupported(String),
InvalidDims(String),
OutOfBounds(String),
}
impl std::error::Error for OpError {}
impl fmt::Display for OpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
OpError::NdArrayError(pref, e) => write!(f, "{}: ", pref).and_then(|()| e.fmt(f)),
OpError::IncompatibleShape(s) => write!(f, "{}: ", s),
OpError::TypeUnsupported(s) => write!(f, "{}: ", s),
OpError::InvalidDims(s) => write!(f, "{}: ", s),
OpError::OutOfBounds(s) => write!(f, "{}: ", s),
}
}
}
/// Trait for tensor operations. `Tensor` structs wrap this.
pub trait Op<F: Float> {
/// Name of this op
fn name(&self) -> &'static str {
type_name::<Self>()
}
/// Runs this op with `ComputeContext`.
fn compute(&self, ctx: &mut ComputeContext<F>) -> Result<(), OpError>;
/// Returns gradients for input nodes by use of output's gradients etc.
fn grad(&self, ctx: &mut GradientContext<F>);
}
pub(crate) struct DummyOp<F: Float> {
pub phantom: PhantomData<F>,
}
impl<F: Float> DummyOp<F> {
#[allow(dead_code)]
pub(crate) fn new() -> Self {
DummyOp {
phantom: PhantomData,
}
}
}
impl<F: Float> Op<F> for DummyOp<F> {
fn compute(&self, _: &mut ComputeContext<F>) -> Result<(), OpError> {
Ok(())
}
fn grad(&self, _: &mut GradientContext<F>) {}
}
/// Wrapper for NdArrayView/NdArrayViewMut which is fed to `Op::compute`
///
/// Used in `Op::ComputeContext`.
pub(crate) enum OpInput<'v, T: Float> {
NonVariable(Option<NdArrayView<'v, T>>),
RdOnlyVariable(Option<NdArrayView<'v, T>>),
RdWrVariable(Option<NdArrayViewMut<'v, T>>),
}
/// `Op::compute`'s output
#[derive(Clone)]
pub(crate) enum OpOutput<T: Float> {
Owned(NdArray<T>),
View(RawNdArrayView<T>),
}
impl<'view, T: Float> OpInput<'view, T> {
#[inline]
/// Make a read-only input array
pub fn new_non_variable(x: NdArrayView<'view, T>) -> Self {
NonVariable(Some(x))
}
#[inline]
/// Make a read-only input array
pub fn new_rdonly_variable(x: NdArrayView<'view, T>) -> Self {
OpInput::RdOnlyVariable(Some(x))
}
#[inline]
/// Make a read/write input array
pub fn new_rdwr_variable(x: NdArrayViewMut<'view, T>) -> Self {
OpInput::RdWrVariable(Some(x))
}
}
/// Context of an `Op`'s computation phase.
///
/// # Example
///
/// ```
/// use autograd as ag;
///
/// // Implementing `Op` trait for `Sigmoid`.
/// struct Sigmoid;
///
/// impl<T: ag::Float> ag::op::Op<T> for Sigmoid {
/// fn compute(
/// &self,
/// ctx: &mut ag::op::ComputeContext<T>,
/// ) -> Result<(), ag::op::OpError> {
/// // Getting the first input array.
/// let x: &ag::NdArrayView<_> = &ctx.input(0);
/// let half = T::from(0.5).unwrap();
/// let y = x.mapv(move |a| ((a * half).tanh() * half) + half);
/// // Put the computed result.
/// ctx.append_output(y);
/// Ok(())
/// }
///
/// fn grad(&self, ctx: &mut ag::op::GradientContext<T>) { /*... */ }
/// }
/// ```
pub struct ComputeContext<'v, T: Float> {
// Input arrays
xs: SmallVec<OpInput<'v, T>>,
// Output arrays
pub(crate) ys: SmallVec<OpOutput<T>>,
}
impl<'graph, 'view, T: Float> ComputeContext<'view, T> {
#[inline]
pub(crate) fn new(xs: SmallVec<OpInput<'view, T>>) -> Self {
ComputeContext {
xs,
ys: SmallVec::new(),
}
}
/// Grabs the `i` th input array as a *read-only* array view.
///
/// Calling `input(i)` more than once causes panic.
#[inline]
pub fn input(&mut self, i: usize) -> NdArrayView<'view, T> {
let x = match self.xs.get_mut(i) {
Some(x) => x,
None => panic!("Bad op impl: input index out of range."),
};
match x {
NonVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
OpInput::RdOnlyVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
OpInput::RdWrVariable(_) => {
panic!(
"Bad op impl: cannot perform mutable borrowing for input({}). Use input_mut() instead.",
i
);
}
}
}
/// Grabs the `i` th input array as a *read-write* array view.
///
/// Calling `input_mut(i)` more than once causes panic.
#[inline]
pub fn input_mut(&mut self, i: usize) -> NdArrayViewMut<'view, T> {
let x = match self.xs.get_mut(i) {
Some(x) => x,
None => panic!("Bad op impl: {}'s input doesn't exist.", i),
};
match x {
OpInput::RdWrVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
_ => {
panic!(
"Bad op impl: cannot perform mutable borrowing for input({})",
i
);
}
}
}
/// Appends an `ndarray::ArrayView` to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub fn append_output_view(&mut self, y: NdArrayView<'view, T>) {
self.append_output_view_raw(y.raw_view());
}
/// Appends an `ndarray::ArrayView` to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub(crate) fn append_output_view_raw(&mut self, y: RawNdArrayView<T>) {
let mut contains_variable_input= false;
for x in &self.xs {
match x {
NonVariable(_) => {},
_ => contains_variable_input = true
}
}
if contains_variable_input {
// copy it beforehand to avoid use-after-free
self.ys.push(OpOutput::Owned(unsafe { y.deref_into_view().to_owned() }));
} else {
self.ys.push(OpOutput::View(y));
}
}
#[inline]
pub fn append_empty_output(&mut self) {
self.ys.push(OpOutput::Owned(NdArray::zeros(
crate::ndarray::IxDyn(&[]),
)));
}
/// Appends an ndarray to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub fn append_output(&mut self, y: NdArray<T>) {
self.ys.push(OpOutput::Owned(y));
}
/// Returns a number of input arrays.
#[inline]
pub fn num_inputs(&self) -> usize {
self.xs.len()
}
}
/// Context of an `Op`'s gradient propagation phase.
///
/// This is passed to an `Op` through `Op::grad`.
/// `Op::grad` should provide the gradients of its inputs by calling `GradientContext::append_input_grad`.
///
/// Use `graph()` to access `Graph` object for tensor computations.
///
/// ```
/// use autograd as ag;
/// use ag::tensor_ops as T;
///
/// struct Sigmoid;
///
/// impl<F: ag::Float> ag::op::Op<F> for Sigmoid {
/// fn compute(&self, ctx: &mut ag::op::ComputeContext<F>) -> Result<(), ag::op::OpError> {
/// /*... */
/// Ok(())
/// }
///
/// fn grad(&self, ctx: &mut ag::op::GradientContext<F>) {
/// // gradient of the input of Sigmoid
/// let gy = ctx.output_grad();
/// // output tensor
/// let y = ctx.output();
/// // `Tensor` computations
/// let gx = gy * (y - T::square(y));
/// // Propagates input's gradient.
/// ctx.append_input_grad(Some(gx));
/// }
/// }
/// ```
pub struct GradientContext<'graph, T: Float> {
gy: Tensor<'graph, T>,
y: Tensor<'graph, T>,
graph: &'graph crate::graph::Graph<T>,
gxs: SmallVec<Option<Tensor<'graph, T>>>,
}
impl<'graph, T: Float> GradientContext<'graph, T> {
#[inline]
pub(crate) fn new(
gy: Tensor<'graph, T>,
y: Tensor<'graph, T>,
graph: &'graph crate::graph::Graph<T>,
) -> Self {
GradientContext {
gy,
y,
graph,
gxs: SmallVec::new(),
}
}
// Call Op::grad and return `gxs`
pub(crate) fn compute_input_grads(mut self) -> SmallVec<Option<Tensor<'graph, T>>> {
let id = self.y.id;
// steal op
let stolen = self.graph().access_inner_mut(id).op.take().unwrap();
// call Op::grad
stolen.grad(&mut self);
// restore
mem::swap(&mut self.graph().access_inner_mut(id).op, &mut Some(stolen));
debug_assert!(
!self.gxs.is_empty(),
"Bad Op impl: GradientContext::append_input_grad was not called"
);
self.gxs
}
/// Returns the gradient of the op's output.
#[inline]
pub fn output_grad(&self) -> Tensor<'graph, T> {
self.gy
}
/// Grabs the output of the op.
#[inline]
pub fn output(&self) -> Tensor<'graph, T> {
self.y
}
/// Returns input tensors.
#[inline]
pub fn inputs(&self) -> SmallVec<Tensor<'graph, T>> {
let mut ret = SmallVec::new();
for input in self.y.get_incoming_tensors().iter() {
ret.push(self.graph.tensor(input.id));
}
ret
}
/// Grabs the `i` th input tensor.
#[inline]
pub fn input(&self, i: usize) -> Tensor<'graph, T> {
return self
.y
.get_incoming_tensor(i, self.graph)
.expect("bad Op::grad impl");
}
/// Returns the number of inputs.
#[inline]
pub fn num_inputs(&self) -> usize {
self.y.inner().incoming_nodes.len()
}
/// Returns a graph object that is usable for tensor computations in the context.
#[inline]
pub fn graph(&self) -> &'graph crate::graph::Graph<T> {
self.graph
}
/// Back-propagates the input's gradient.
///
/// Appends the given tensor to the back of the input-gradient-list.
/// `None` argument indicates that the `Op`'s input doesn't have gradient.
/// Note that `Op::grad` must call this function as many as `num_inputs()`.
#[inline]
pub fn append_input_grad(&mut self, gx: Option<Tensor<'graph, T>>) {
self.gxs.push(gx);
}
}
| OpError | identifier_name |
op.rs | //! # Implementing differentiable operations
//!
//! Many of well-known ops are pre-defined in [crate::tensor_ops], but you can also
//! implement custom ops by hand.
//! See also [crate::tensor::TensorBuilder].
//!
//! ```
//! use ndarray;
//! use autograd as ag;
//! use autograd::op::OpError;
//! use autograd::tensor_ops::*;
//!
//! type NdArray<T: ag::Float> = ndarray::Array<T, ndarray::IxDyn>;
//!
//! // Implements `Op` trait for `Sigmoid`.
//! struct Sigmoid;
//!
//! impl<T: ag::Float> ag::op::Op<T> for Sigmoid {
//! fn compute(
//! &self,
//! ctx: &mut ag::op::ComputeContext<T>,
//! ) -> Result<(), OpError> {
//! let x: &ag::NdArrayView<_> = &ctx.input(0);
//! // Use `ndarray::Array::mapv` for element-wise computation.
//! let half = T::from(0.5).unwrap();
//! let y = x.mapv(move |a| ((a * half).tanh() * half) + half);
//! ctx.append_output(y);
//! Ok(())
//! }
//!
//! fn grad(&self, ctx: &mut ag::op::GradientContext<T>) {
//! // gradient of the output of Sigmoid
//! let gy = ctx.output_grad();
//! let y = ctx.output();
//! // gradient of the input of Sigmoid
//! let gx = gy * (y - square(y));
//! ctx.append_input_grad(Some(gx));
//! }
//! }
//!
//! // `sigmoid` function for end-user.
//! fn sigmoid<'graph, F: ag::Float>(x: &ag::Tensor<'graph, F>, g: &'graph ag::Context<F>)
//! -> ag::Tensor<'graph, F> {
//! ag::Tensor::builder(g)
//! .append_input(x, false)
//! .build(Sigmoid)
//! }
//! ```
//!
use std::any::type_name;
use std::fmt;
use std::marker::PhantomData;
use std::mem;
use crate::ndarray_ext::{NdArrayView, NdArrayViewMut, RawNdArrayView};
use crate::smallvec::SmallVec as RawSmallVec;
use crate::tensor::Tensor;
use crate::{Float, NdArray};
use crate::op::OpInput::NonVariable;
pub(crate) const DEFAULT_NUM_EDGES: usize = 2;
pub(crate) type SmallVec<T> = RawSmallVec<[T; DEFAULT_NUM_EDGES]>;
/// Error in `Op`'s computation.
#[derive(Clone, Debug, PartialEq)]
pub enum OpError {
NdArrayError(String, ndarray::ShapeError),
IncompatibleShape(String),
TypeUnsupported(String),
InvalidDims(String),
OutOfBounds(String),
}
impl std::error::Error for OpError {}
impl fmt::Display for OpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
OpError::NdArrayError(pref, e) => write!(f, "{}: ", pref).and_then(|()| e.fmt(f)),
OpError::IncompatibleShape(s) => write!(f, "{}: ", s),
OpError::TypeUnsupported(s) => write!(f, "{}: ", s),
OpError::InvalidDims(s) => write!(f, "{}: ", s),
OpError::OutOfBounds(s) => write!(f, "{}: ", s),
}
}
}
/// Trait for tensor operations. `Tensor` structs wrap this.
pub trait Op<F: Float> {
/// Name of this op
fn name(&self) -> &'static str {
type_name::<Self>()
}
/// Runs this op with `ComputeContext`.
fn compute(&self, ctx: &mut ComputeContext<F>) -> Result<(), OpError>;
/// Returns gradients for input nodes by use of output's gradients etc.
fn grad(&self, ctx: &mut GradientContext<F>);
}
pub(crate) struct DummyOp<F: Float> {
pub phantom: PhantomData<F>,
}
impl<F: Float> DummyOp<F> {
#[allow(dead_code)]
pub(crate) fn new() -> Self {
DummyOp {
phantom: PhantomData,
}
}
}
impl<F: Float> Op<F> for DummyOp<F> {
fn compute(&self, _: &mut ComputeContext<F>) -> Result<(), OpError> {
Ok(())
}
fn grad(&self, _: &mut GradientContext<F>) {}
}
/// Wrapper for NdArrayView/NdArrayViewMut which is fed to `Op::compute`
///
/// Used in `Op::ComputeContext`.
pub(crate) enum OpInput<'v, T: Float> {
NonVariable(Option<NdArrayView<'v, T>>),
RdOnlyVariable(Option<NdArrayView<'v, T>>),
RdWrVariable(Option<NdArrayViewMut<'v, T>>),
}
/// `Op::compute`'s output
#[derive(Clone)]
pub(crate) enum OpOutput<T: Float> {
Owned(NdArray<T>),
View(RawNdArrayView<T>),
}
impl<'view, T: Float> OpInput<'view, T> {
#[inline]
/// Make a read-only input array
pub fn new_non_variable(x: NdArrayView<'view, T>) -> Self {
NonVariable(Some(x))
}
#[inline]
/// Make a read-only input array
pub fn new_rdonly_variable(x: NdArrayView<'view, T>) -> Self {
OpInput::RdOnlyVariable(Some(x))
}
#[inline]
/// Make a read/write input array
pub fn new_rdwr_variable(x: NdArrayViewMut<'view, T>) -> Self {
OpInput::RdWrVariable(Some(x))
}
}
/// Context of an `Op`'s computation phase.
///
/// # Example
///
/// ```
/// use autograd as ag;
///
/// // Implementing `Op` trait for `Sigmoid`.
/// struct Sigmoid;
///
/// impl<T: ag::Float> ag::op::Op<T> for Sigmoid {
/// fn compute(
/// &self,
/// ctx: &mut ag::op::ComputeContext<T>,
/// ) -> Result<(), ag::op::OpError> {
/// // Getting the first input array.
/// let x: &ag::NdArrayView<_> = &ctx.input(0);
/// let half = T::from(0.5).unwrap();
/// let y = x.mapv(move |a| ((a * half).tanh() * half) + half);
/// // Put the computed result.
/// ctx.append_output(y);
/// Ok(())
/// }
///
/// fn grad(&self, ctx: &mut ag::op::GradientContext<T>) { /*... */ }
/// }
/// ```
pub struct ComputeContext<'v, T: Float> {
// Input arrays
xs: SmallVec<OpInput<'v, T>>,
// Output arrays
pub(crate) ys: SmallVec<OpOutput<T>>,
}
impl<'graph, 'view, T: Float> ComputeContext<'view, T> {
#[inline]
pub(crate) fn new(xs: SmallVec<OpInput<'view, T>>) -> Self {
ComputeContext {
xs,
ys: SmallVec::new(),
}
}
/// Grabs the `i` th input array as a *read-only* array view.
///
/// Calling `input(i)` more than once causes panic.
#[inline]
pub fn input(&mut self, i: usize) -> NdArrayView<'view, T> {
let x = match self.xs.get_mut(i) {
Some(x) => x,
None => panic!("Bad op impl: input index out of range."),
};
match x {
NonVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
OpInput::RdOnlyVariable(ref mut a) => match a.take() {
Some(ret) => ret, | i, i
),
},
OpInput::RdWrVariable(_) => {
panic!(
"Bad op impl: cannot perform mutable borrowing for input({}). Use input_mut() instead.",
i
);
}
}
}
/// Grabs the `i` th input array as a *read-write* array view.
///
/// Calling `input_mut(i)` more than once causes panic.
#[inline]
pub fn input_mut(&mut self, i: usize) -> NdArrayViewMut<'view, T> {
let x = match self.xs.get_mut(i) {
Some(x) => x,
None => panic!("Bad op impl: {}'s input doesn't exist.", i),
};
match x {
OpInput::RdWrVariable(ref mut a) => match a.take() {
Some(ret) => ret,
None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice",
i, i
),
},
_ => {
panic!(
"Bad op impl: cannot perform mutable borrowing for input({})",
i
);
}
}
}
/// Appends an `ndarray::ArrayView` to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub fn append_output_view(&mut self, y: NdArrayView<'view, T>) {
self.append_output_view_raw(y.raw_view());
}
/// Appends an `ndarray::ArrayView` to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub(crate) fn append_output_view_raw(&mut self, y: RawNdArrayView<T>) {
let mut contains_variable_input= false;
for x in &self.xs {
match x {
NonVariable(_) => {},
_ => contains_variable_input = true
}
}
if contains_variable_input {
// copy it beforehand to avoid use-after-free
self.ys.push(OpOutput::Owned(unsafe { y.deref_into_view().to_owned() }));
} else {
self.ys.push(OpOutput::View(y));
}
}
#[inline]
pub fn append_empty_output(&mut self) {
self.ys.push(OpOutput::Owned(NdArray::zeros(
crate::ndarray::IxDyn(&[]),
)));
}
/// Appends an ndarray to the back of the output list of the current op.
///
/// NOTE: Implementor of `Op::compute` must not forget to call `append_*` as many as the number of its output in `Op::compute`, otherwise panic occurs.
#[inline]
pub fn append_output(&mut self, y: NdArray<T>) {
self.ys.push(OpOutput::Owned(y));
}
/// Returns a number of input arrays.
#[inline]
pub fn num_inputs(&self) -> usize {
self.xs.len()
}
}
/// Context of an `Op`'s gradient propagation phase.
///
/// This is passed to an `Op` through `Op::grad`.
/// `Op::grad` should provide the gradients of its inputs by calling `GradientContext::append_input_grad`.
///
/// Use `graph()` to access `Graph` object for tensor computations.
///
/// ```
/// use autograd as ag;
/// use ag::tensor_ops as T;
///
/// struct Sigmoid;
///
/// impl<F: ag::Float> ag::op::Op<F> for Sigmoid {
/// fn compute(&self, ctx: &mut ag::op::ComputeContext<F>) -> Result<(), ag::op::OpError> {
/// /*... */
/// Ok(())
/// }
///
/// fn grad(&self, ctx: &mut ag::op::GradientContext<F>) {
/// // gradient of the input of Sigmoid
/// let gy = ctx.output_grad();
/// // output tensor
/// let y = ctx.output();
/// // `Tensor` computations
/// let gx = gy * (y - T::square(y));
/// // Propagates input's gradient.
/// ctx.append_input_grad(Some(gx));
/// }
/// }
/// ```
pub struct GradientContext<'graph, T: Float> {
gy: Tensor<'graph, T>,
y: Tensor<'graph, T>,
graph: &'graph crate::graph::Graph<T>,
gxs: SmallVec<Option<Tensor<'graph, T>>>,
}
impl<'graph, T: Float> GradientContext<'graph, T> {
#[inline]
pub(crate) fn new(
gy: Tensor<'graph, T>,
y: Tensor<'graph, T>,
graph: &'graph crate::graph::Graph<T>,
) -> Self {
GradientContext {
gy,
y,
graph,
gxs: SmallVec::new(),
}
}
// Call Op::grad and return `gxs`
pub(crate) fn compute_input_grads(mut self) -> SmallVec<Option<Tensor<'graph, T>>> {
let id = self.y.id;
// steal op
let stolen = self.graph().access_inner_mut(id).op.take().unwrap();
// call Op::grad
stolen.grad(&mut self);
// restore
mem::swap(&mut self.graph().access_inner_mut(id).op, &mut Some(stolen));
debug_assert!(
!self.gxs.is_empty(),
"Bad Op impl: GradientContext::append_input_grad was not called"
);
self.gxs
}
/// Returns the gradient of the op's output.
#[inline]
pub fn output_grad(&self) -> Tensor<'graph, T> {
self.gy
}
/// Grabs the output of the op.
#[inline]
pub fn output(&self) -> Tensor<'graph, T> {
self.y
}
/// Returns input tensors.
#[inline]
pub fn inputs(&self) -> SmallVec<Tensor<'graph, T>> {
let mut ret = SmallVec::new();
for input in self.y.get_incoming_tensors().iter() {
ret.push(self.graph.tensor(input.id));
}
ret
}
/// Grabs the `i` th input tensor.
#[inline]
pub fn input(&self, i: usize) -> Tensor<'graph, T> {
return self
.y
.get_incoming_tensor(i, self.graph)
.expect("bad Op::grad impl");
}
/// Returns the number of inputs.
#[inline]
pub fn num_inputs(&self) -> usize {
self.y.inner().incoming_nodes.len()
}
/// Returns a graph object that is usable for tensor computations in the context.
#[inline]
pub fn graph(&self) -> &'graph crate::graph::Graph<T> {
self.graph
}
/// Back-propagates the input's gradient.
///
/// Appends the given tensor to the back of the input-gradient-list.
/// `None` argument indicates that the `Op`'s input doesn't have gradient.
/// Note that `Op::grad` must call this function as many as `num_inputs()`.
#[inline]
pub fn append_input_grad(&mut self, gx: Option<Tensor<'graph, T>>) {
self.gxs.push(gx);
}
} | None => panic!(
"Bad op impl: input({})/input_mut({}) cannot be called twice", | random_line_split |
mapfile.rs | use crate::cache::TERRA_DIRECTORY;
use crate::terrain::quadtree::node::VNode;
use crate::terrain::tile_cache::{LayerParams, LayerType, TextureFormat};
use anyhow::Error;
use serde::{Deserialize, Serialize};
use std::fs::{self, File};
use std::io::{BufReader, BufWriter, Read, Write};
use std::path::PathBuf;
use vec_map::VecMap;
#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub(crate) enum TileState {
Missing,
Base,
Generated,
GpuOnly,
MissingBase,
}
#[derive(PartialEq, Eq, Serialize, Deserialize)]
pub(crate) enum TileKind {
Base,
Generate,
GpuOnly,
}
#[derive(PartialEq, Eq, Serialize, Deserialize)]
struct TileMeta {
crc32: u32,
state: TileState,
}
#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
pub(crate) struct TextureDescriptor {
pub width: u32,
pub height: u32,
pub depth: u32,
pub format: TextureFormat,
pub bytes: usize,
}
pub struct MapFile {
layers: VecMap<LayerParams>,
_db: sled::Db,
tiles: sled::Tree,
textures: sled::Tree,
}
impl MapFile {
pub(crate) fn new(layers: VecMap<LayerParams>) -> Self {
let directory = TERRA_DIRECTORY.join("tiles/meta");
let db = sled::open(&directory).expect(&format!(
"Failed to open/create sled database. Deleting the '{}' directory may fix this",
directory.display()
));
db.insert("version", "1").unwrap();
Self {
layers,
tiles: db.open_tree("tiles").unwrap(),
textures: db.open_tree("textures").unwrap(),
_db: db,
}
}
pub(crate) fn tile_state(&self, layer: LayerType, node: VNode) -> Result<TileState, Error> {
Ok(match self.lookup_tile_meta(layer, node)? {
Some(meta) => meta.state,
None => TileState::GpuOnly,
})
}
pub(crate) fn read_tile(&self, layer: LayerType, node: VNode) -> Option<Vec<u8>> {
let filename = Self::tile_name(layer, node);
if!filename.exists() {
return None;
}
match layer {
LayerType::Albedo => Some(image::open(filename).ok()?.to_rgba().into_vec()),
LayerType::Heightmaps => {
let mut data = Vec::new();
snap::read::FrameDecoder::new(BufReader::new(File::open(filename).ok()?))
.read_to_end(&mut data)
.ok()?;
let mut qdata = vec![0i16; data.len() / 2];
bytemuck::cast_slice_mut(&mut qdata).copy_from_slice(&data);
let mut prev = 0;
let mut fdata = vec![0f32; qdata.len()];
for (f, q) in fdata.iter_mut().zip(qdata.iter()) {
let x = (*q).wrapping_add(prev);
*f = x as f32;
prev = x;
}
data.clear();
data.extend_from_slice(bytemuck::cast_slice(&fdata));
Some(data)
}
LayerType::Normals | LayerType::Displacements | LayerType::Roughness => {
fs::read(filename).ok()
}
}
}
pub(crate) fn write_tile(
&mut self,
layer: LayerType,
node: VNode,
data: &[u8],
base: bool,
) -> Result<(), Error> {
let filename = Self::tile_name(layer, node);
match layer {
LayerType::Albedo => image::save_buffer_with_format(
&filename,
data,
self.layers[layer].texture_resolution as u32,
self.layers[layer].texture_resolution as u32,
image::ColorType::Rgba8,
image::ImageFormat::Bmp,
)?,
LayerType::Heightmaps => {
let data: &[f32] = bytemuck::cast_slice(data);
let mut qdata = vec![0i16; data.len()];
let mut prev = 0;
for (q, d) in qdata.iter_mut().zip(data.iter()) {
let x = ((*d as i16) / 4) * 4;
*q = x.wrapping_sub(prev);
prev = x;
}
snap::write::FrameEncoder::new(File::create(filename)?)
.write_all(bytemuck::cast_slice(&qdata))?;
}
LayerType::Normals | LayerType::Displacements | LayerType::Roughness => {
fs::write(filename, data)?;
}
}
self.update_tile_meta(
layer,
node,
TileMeta { crc32: 0, state: if base { TileState::Base } else { TileState::Generated } },
)
}
pub(crate) fn read_texture(
&self,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
name: &str,
) -> Result<wgpu::Texture, Error> {
let desc = self.lookup_texture(name)?.unwrap();
let texture = device.create_texture(&wgpu::TextureDescriptor {
size: wgpu::Extent3d { width: desc.width, height: desc.height, depth: desc.depth },
format: desc.format.to_wgpu(),
mip_level_count: 1,
sample_count: 1,
dimension: if desc.depth == 1 {
wgpu::TextureDimension::D2
} else {
wgpu::TextureDimension::D3
},
usage: wgpu::TextureUsage::COPY_SRC
| wgpu::TextureUsage::COPY_DST
| wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::STORAGE,
label: None,
});
let (width, height) = (desc.width as usize, (desc.height * desc.depth) as usize);
assert_eq!(width % desc.format.block_size() as usize, 0);
assert_eq!(height % desc.format.block_size() as usize, 0);
let (width, height) =
(width / desc.format.block_size() as usize, height / desc.format.block_size() as usize);
let row_bytes = width * desc.format.bytes_per_block();
let row_pitch = (row_bytes + 255) &!255;
let data = if desc.format == TextureFormat::RGBA8 {
image::open(TERRA_DIRECTORY.join(format!("{}.bmp", name)))?.to_rgba().into_vec()
} else {
fs::read(TERRA_DIRECTORY.join(format!("{}.raw", name)))?
};
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
size: (row_pitch * height) as u64,
usage: wgpu::BufferUsage::MAP_WRITE | wgpu::BufferUsage::COPY_SRC,
label: None,
mapped_at_creation: true,
});
let mut buffer_view = buffer.slice(..).get_mapped_range_mut();
for row in 0..height {
buffer_view[row * row_pitch..][..row_bytes]
.copy_from_slice(&data[row * row_bytes..][..row_bytes]);
}
drop(buffer_view);
buffer.unmap();
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &buffer,
layout: wgpu::TextureDataLayout {
offset: 0,
bytes_per_row: row_pitch as u32,
rows_per_image: height as u32 / desc.depth,
},
},
wgpu::TextureCopyView {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d { x: 0, y: 0, z: 0 },
},
wgpu::Extent3d {
width: width as u32,
height: height as u32 / desc.depth,
depth: desc.depth,
},
);
Ok(texture)
}
pub(crate) fn write_texture(
&self,
name: &str,
desc: TextureDescriptor,
data: &[u8],
) -> Result<(), Error> {
self.update_texture(name, desc)?;
if desc.format == TextureFormat::RGBA8 {
let filename = TERRA_DIRECTORY.join(format!("{}.bmp", name));
Ok(image::save_buffer_with_format(
&filename,
data,
desc.width,
desc.height * desc.depth,
image::ColorType::Rgba8,
image::ImageFormat::Bmp,
)?)
} else {
let filename = TERRA_DIRECTORY.join(format!("{}.raw", name));
Ok(fs::write(&filename, data)?)
}
}
pub(crate) fn reload_texture(&self, name: &str) -> bool {
let desc = self.lookup_texture(name);
if let Ok(Some(desc)) = desc {
if desc.format == TextureFormat::RGBA8 {
TERRA_DIRECTORY.join(format!("{}.bmp", name)).exists()
} else {
TERRA_DIRECTORY.join(format!("{}.raw", name)).exists()
}
} else {
false
}
}
pub(crate) fn layers(&self) -> &VecMap<LayerParams> {
&self.layers
}
pub(crate) fn tile_name(layer: LayerType, node: VNode) -> PathBuf {
let face = match node.face() {
0 => "0E",
1 => "180E",
2 => "90E",
3 => "90W",
4 => "N",
5 => "S",
_ => unreachable!(),
};
let (layer, ext) = match layer {
LayerType::Displacements => ("displacements", "raw"),
LayerType::Albedo => ("albedo", "bmp"),
LayerType::Roughness => ("roughness", "raw"),
LayerType::Normals => ("normals", "raw"),
LayerType::Heightmaps => ("heightmaps", "raw.sz"),
};
TERRA_DIRECTORY.join(&format!(
"tiles/{}_{}_{}_{}x{}.{}",
layer,
node.level(),
face,
node.x(),
node.y(),
ext
))
}
pub(crate) fn reload_tile_state(
&self,
layer: LayerType,
node: VNode,
base: bool,
) -> Result<TileState, Error> {
let filename = Self::tile_name(layer, node);
let meta = self.lookup_tile_meta(layer, node);
let exists = filename.exists();
let target_state = if base && exists {
TileState::Base
} else if base {
TileState::MissingBase
} else if exists {
TileState::Generated
} else {
TileState::Missing
};
if let Ok(Some(TileMeta { state,.. })) = meta {
if state == target_state {
return Ok(state);
}
}
let new_meta = TileMeta { state: target_state, crc32: 0 };
self.update_tile_meta(layer, node, new_meta)?;
Ok(target_state)
}
// pub(crate) fn set_missing(
// &self,
// layer: LayerType,
// node: VNode,
// base: bool,
// ) -> Result<(), Error> {
// let state = if base { TileState::MissingBase } else { TileState::Missing };
// self.update_tile_meta(layer, node, TileMeta { crc32: 0, state })
// }
pub(crate) fn clear_generated(&mut self, layer: LayerType) -> Result<(), Error> {
self.scan_tile_meta(layer, |node, meta| {
if let TileState::Generated = meta.state {
self.remove_tile_meta(layer, node)?;
}
Ok(())
})
}
pub(crate) fn get_missing_base(&self, layer: LayerType) -> Result<Vec<VNode>, Error> {
let mut missing = Vec::new();
self.scan_tile_meta(layer, |node, meta| {
if let TileState::MissingBase = meta.state {
missing.push(node);
}
Ok(())
})?;
Ok(missing)
}
//
// These functions use the database.
//
fn lookup_tile_meta(&self, layer: LayerType, node: VNode) -> Result<Option<TileMeta>, Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
Ok(self.tiles.get(key)?.map(|value| bincode::deserialize(&value).unwrap()))
}
fn update_tile_meta(&self, layer: LayerType, node: VNode, meta: TileMeta) -> Result<(), Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
let value = bincode::serialize(&meta).unwrap();
self.tiles.insert(key, value)?;
Ok(())
}
fn remove_tile_meta(&self, layer: LayerType, node: VNode) -> Result<(), Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
self.tiles.remove(key)?;
Ok(())
}
fn scan_tile_meta<F: FnMut(VNode, TileMeta) -> Result<(), Error>>(
&self,
layer: LayerType,
mut f: F,
) -> Result<(), Error> {
let prefix = bincode::serialize(&layer).unwrap();
for i in self.tiles.scan_prefix(&prefix) {
let (k, v) = i?;
let meta = bincode::deserialize::<TileMeta>(&v)?;
let node = bincode::deserialize::<(LayerType, VNode)>(&k)?.1;
f(node, meta)?;
}
Ok(())
}
fn lookup_texture(&self, name: &str) -> Result<Option<TextureDescriptor>, Error> {
Ok(self.textures.get(name)?.map(|value| serde_json::from_slice(&value).unwrap()))
}
fn update_texture(&self, name: &str, desc: TextureDescriptor) -> Result<(), Error> |
}
| {
let value = serde_json::to_vec(&desc).unwrap();
self.textures.insert(name, value)?;
Ok(())
} | identifier_body |
mapfile.rs | use crate::cache::TERRA_DIRECTORY;
use crate::terrain::quadtree::node::VNode;
use crate::terrain::tile_cache::{LayerParams, LayerType, TextureFormat};
use anyhow::Error;
use serde::{Deserialize, Serialize};
use std::fs::{self, File};
use std::io::{BufReader, BufWriter, Read, Write};
use std::path::PathBuf;
use vec_map::VecMap;
#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub(crate) enum TileState {
Missing,
Base,
Generated,
GpuOnly,
MissingBase,
}
#[derive(PartialEq, Eq, Serialize, Deserialize)]
pub(crate) enum TileKind {
Base,
Generate,
GpuOnly,
}
#[derive(PartialEq, Eq, Serialize, Deserialize)]
struct TileMeta {
crc32: u32,
state: TileState,
}
#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
pub(crate) struct TextureDescriptor {
pub width: u32,
pub height: u32,
pub depth: u32,
pub format: TextureFormat,
pub bytes: usize,
}
pub struct MapFile {
layers: VecMap<LayerParams>,
_db: sled::Db,
tiles: sled::Tree,
textures: sled::Tree,
}
impl MapFile {
pub(crate) fn new(layers: VecMap<LayerParams>) -> Self {
let directory = TERRA_DIRECTORY.join("tiles/meta");
let db = sled::open(&directory).expect(&format!(
"Failed to open/create sled database. Deleting the '{}' directory may fix this",
directory.display()
));
db.insert("version", "1").unwrap();
Self {
layers,
tiles: db.open_tree("tiles").unwrap(),
textures: db.open_tree("textures").unwrap(),
_db: db,
}
}
pub(crate) fn tile_state(&self, layer: LayerType, node: VNode) -> Result<TileState, Error> {
Ok(match self.lookup_tile_meta(layer, node)? {
Some(meta) => meta.state,
None => TileState::GpuOnly,
})
}
pub(crate) fn read_tile(&self, layer: LayerType, node: VNode) -> Option<Vec<u8>> {
let filename = Self::tile_name(layer, node);
if!filename.exists() {
return None;
}
match layer {
LayerType::Albedo => Some(image::open(filename).ok()?.to_rgba().into_vec()),
LayerType::Heightmaps => {
let mut data = Vec::new();
snap::read::FrameDecoder::new(BufReader::new(File::open(filename).ok()?))
.read_to_end(&mut data)
.ok()?;
let mut qdata = vec![0i16; data.len() / 2];
bytemuck::cast_slice_mut(&mut qdata).copy_from_slice(&data);
let mut prev = 0;
let mut fdata = vec![0f32; qdata.len()];
for (f, q) in fdata.iter_mut().zip(qdata.iter()) {
let x = (*q).wrapping_add(prev);
*f = x as f32;
prev = x;
}
data.clear();
data.extend_from_slice(bytemuck::cast_slice(&fdata));
Some(data)
}
LayerType::Normals | LayerType::Displacements | LayerType::Roughness => {
fs::read(filename).ok()
}
}
}
pub(crate) fn write_tile(
&mut self,
layer: LayerType,
node: VNode,
data: &[u8],
base: bool,
) -> Result<(), Error> {
let filename = Self::tile_name(layer, node);
match layer {
LayerType::Albedo => image::save_buffer_with_format(
&filename,
data,
self.layers[layer].texture_resolution as u32,
self.layers[layer].texture_resolution as u32,
image::ColorType::Rgba8,
image::ImageFormat::Bmp,
)?,
LayerType::Heightmaps => {
let data: &[f32] = bytemuck::cast_slice(data);
let mut qdata = vec![0i16; data.len()];
let mut prev = 0;
for (q, d) in qdata.iter_mut().zip(data.iter()) {
let x = ((*d as i16) / 4) * 4;
*q = x.wrapping_sub(prev);
prev = x;
}
snap::write::FrameEncoder::new(File::create(filename)?)
.write_all(bytemuck::cast_slice(&qdata))?;
}
LayerType::Normals | LayerType::Displacements | LayerType::Roughness => {
fs::write(filename, data)?;
}
}
self.update_tile_meta(
layer,
node,
TileMeta { crc32: 0, state: if base { TileState::Base } else { TileState::Generated } },
)
}
pub(crate) fn read_texture(
&self,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
name: &str,
) -> Result<wgpu::Texture, Error> {
let desc = self.lookup_texture(name)?.unwrap();
let texture = device.create_texture(&wgpu::TextureDescriptor {
size: wgpu::Extent3d { width: desc.width, height: desc.height, depth: desc.depth },
format: desc.format.to_wgpu(),
mip_level_count: 1,
sample_count: 1,
dimension: if desc.depth == 1 {
wgpu::TextureDimension::D2
} else {
wgpu::TextureDimension::D3
},
usage: wgpu::TextureUsage::COPY_SRC
| wgpu::TextureUsage::COPY_DST
| wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::STORAGE,
label: None,
});
let (width, height) = (desc.width as usize, (desc.height * desc.depth) as usize);
assert_eq!(width % desc.format.block_size() as usize, 0);
assert_eq!(height % desc.format.block_size() as usize, 0);
let (width, height) =
(width / desc.format.block_size() as usize, height / desc.format.block_size() as usize);
let row_bytes = width * desc.format.bytes_per_block();
let row_pitch = (row_bytes + 255) &!255;
let data = if desc.format == TextureFormat::RGBA8 {
image::open(TERRA_DIRECTORY.join(format!("{}.bmp", name)))?.to_rgba().into_vec()
} else {
fs::read(TERRA_DIRECTORY.join(format!("{}.raw", name)))?
};
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
size: (row_pitch * height) as u64,
usage: wgpu::BufferUsage::MAP_WRITE | wgpu::BufferUsage::COPY_SRC,
label: None,
mapped_at_creation: true,
});
let mut buffer_view = buffer.slice(..).get_mapped_range_mut();
for row in 0..height {
buffer_view[row * row_pitch..][..row_bytes]
.copy_from_slice(&data[row * row_bytes..][..row_bytes]);
}
drop(buffer_view);
buffer.unmap();
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &buffer,
layout: wgpu::TextureDataLayout {
offset: 0,
bytes_per_row: row_pitch as u32,
rows_per_image: height as u32 / desc.depth,
},
},
wgpu::TextureCopyView {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d { x: 0, y: 0, z: 0 },
},
wgpu::Extent3d {
width: width as u32,
height: height as u32 / desc.depth,
depth: desc.depth,
},
);
Ok(texture)
}
pub(crate) fn write_texture(
&self,
name: &str,
desc: TextureDescriptor,
data: &[u8],
) -> Result<(), Error> {
self.update_texture(name, desc)?;
if desc.format == TextureFormat::RGBA8 {
let filename = TERRA_DIRECTORY.join(format!("{}.bmp", name));
Ok(image::save_buffer_with_format(
&filename,
data,
desc.width,
desc.height * desc.depth,
image::ColorType::Rgba8,
image::ImageFormat::Bmp,
)?)
} else {
let filename = TERRA_DIRECTORY.join(format!("{}.raw", name));
Ok(fs::write(&filename, data)?)
}
}
pub(crate) fn reload_texture(&self, name: &str) -> bool {
let desc = self.lookup_texture(name);
if let Ok(Some(desc)) = desc {
if desc.format == TextureFormat::RGBA8 {
TERRA_DIRECTORY.join(format!("{}.bmp", name)).exists()
} else {
TERRA_DIRECTORY.join(format!("{}.raw", name)).exists()
}
} else {
false
}
}
pub(crate) fn layers(&self) -> &VecMap<LayerParams> {
&self.layers
}
pub(crate) fn tile_name(layer: LayerType, node: VNode) -> PathBuf {
let face = match node.face() {
0 => "0E",
1 => "180E",
2 => "90E",
3 => "90W",
4 => "N",
5 => "S",
_ => unreachable!(),
};
let (layer, ext) = match layer {
LayerType::Displacements => ("displacements", "raw"),
LayerType::Albedo => ("albedo", "bmp"),
LayerType::Roughness => ("roughness", "raw"),
LayerType::Normals => ("normals", "raw"),
LayerType::Heightmaps => ("heightmaps", "raw.sz"),
};
TERRA_DIRECTORY.join(&format!(
"tiles/{}_{}_{}_{}x{}.{}",
layer,
node.level(),
face,
node.x(),
node.y(),
ext
))
}
pub(crate) fn | (
&self,
layer: LayerType,
node: VNode,
base: bool,
) -> Result<TileState, Error> {
let filename = Self::tile_name(layer, node);
let meta = self.lookup_tile_meta(layer, node);
let exists = filename.exists();
let target_state = if base && exists {
TileState::Base
} else if base {
TileState::MissingBase
} else if exists {
TileState::Generated
} else {
TileState::Missing
};
if let Ok(Some(TileMeta { state,.. })) = meta {
if state == target_state {
return Ok(state);
}
}
let new_meta = TileMeta { state: target_state, crc32: 0 };
self.update_tile_meta(layer, node, new_meta)?;
Ok(target_state)
}
// pub(crate) fn set_missing(
// &self,
// layer: LayerType,
// node: VNode,
// base: bool,
// ) -> Result<(), Error> {
// let state = if base { TileState::MissingBase } else { TileState::Missing };
// self.update_tile_meta(layer, node, TileMeta { crc32: 0, state })
// }
pub(crate) fn clear_generated(&mut self, layer: LayerType) -> Result<(), Error> {
self.scan_tile_meta(layer, |node, meta| {
if let TileState::Generated = meta.state {
self.remove_tile_meta(layer, node)?;
}
Ok(())
})
}
pub(crate) fn get_missing_base(&self, layer: LayerType) -> Result<Vec<VNode>, Error> {
let mut missing = Vec::new();
self.scan_tile_meta(layer, |node, meta| {
if let TileState::MissingBase = meta.state {
missing.push(node);
}
Ok(())
})?;
Ok(missing)
}
//
// These functions use the database.
//
fn lookup_tile_meta(&self, layer: LayerType, node: VNode) -> Result<Option<TileMeta>, Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
Ok(self.tiles.get(key)?.map(|value| bincode::deserialize(&value).unwrap()))
}
fn update_tile_meta(&self, layer: LayerType, node: VNode, meta: TileMeta) -> Result<(), Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
let value = bincode::serialize(&meta).unwrap();
self.tiles.insert(key, value)?;
Ok(())
}
fn remove_tile_meta(&self, layer: LayerType, node: VNode) -> Result<(), Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
self.tiles.remove(key)?;
Ok(())
}
fn scan_tile_meta<F: FnMut(VNode, TileMeta) -> Result<(), Error>>(
&self,
layer: LayerType,
mut f: F,
) -> Result<(), Error> {
let prefix = bincode::serialize(&layer).unwrap();
for i in self.tiles.scan_prefix(&prefix) {
let (k, v) = i?;
let meta = bincode::deserialize::<TileMeta>(&v)?;
let node = bincode::deserialize::<(LayerType, VNode)>(&k)?.1;
f(node, meta)?;
}
Ok(())
}
fn lookup_texture(&self, name: &str) -> Result<Option<TextureDescriptor>, Error> {
Ok(self.textures.get(name)?.map(|value| serde_json::from_slice(&value).unwrap()))
}
fn update_texture(&self, name: &str, desc: TextureDescriptor) -> Result<(), Error> {
let value = serde_json::to_vec(&desc).unwrap();
self.textures.insert(name, value)?;
Ok(())
}
}
| reload_tile_state | identifier_name |
mapfile.rs | use crate::cache::TERRA_DIRECTORY;
use crate::terrain::quadtree::node::VNode;
use crate::terrain::tile_cache::{LayerParams, LayerType, TextureFormat};
use anyhow::Error;
use serde::{Deserialize, Serialize};
use std::fs::{self, File};
use std::io::{BufReader, BufWriter, Read, Write};
use std::path::PathBuf;
use vec_map::VecMap;
#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub(crate) enum TileState {
Missing,
Base,
Generated,
GpuOnly,
MissingBase,
}
#[derive(PartialEq, Eq, Serialize, Deserialize)]
pub(crate) enum TileKind {
Base,
Generate,
GpuOnly,
}
#[derive(PartialEq, Eq, Serialize, Deserialize)]
struct TileMeta {
crc32: u32,
state: TileState,
}
#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
pub(crate) struct TextureDescriptor {
pub width: u32,
pub height: u32,
pub depth: u32,
pub format: TextureFormat,
pub bytes: usize,
}
pub struct MapFile {
layers: VecMap<LayerParams>,
_db: sled::Db,
tiles: sled::Tree,
textures: sled::Tree,
}
impl MapFile {
pub(crate) fn new(layers: VecMap<LayerParams>) -> Self {
let directory = TERRA_DIRECTORY.join("tiles/meta");
let db = sled::open(&directory).expect(&format!(
"Failed to open/create sled database. Deleting the '{}' directory may fix this",
directory.display()
));
db.insert("version", "1").unwrap();
Self {
layers,
tiles: db.open_tree("tiles").unwrap(),
textures: db.open_tree("textures").unwrap(),
_db: db,
}
}
pub(crate) fn tile_state(&self, layer: LayerType, node: VNode) -> Result<TileState, Error> {
Ok(match self.lookup_tile_meta(layer, node)? {
Some(meta) => meta.state,
None => TileState::GpuOnly,
})
}
pub(crate) fn read_tile(&self, layer: LayerType, node: VNode) -> Option<Vec<u8>> {
let filename = Self::tile_name(layer, node);
if!filename.exists() {
return None;
}
match layer {
LayerType::Albedo => Some(image::open(filename).ok()?.to_rgba().into_vec()),
LayerType::Heightmaps => {
let mut data = Vec::new();
snap::read::FrameDecoder::new(BufReader::new(File::open(filename).ok()?))
.read_to_end(&mut data)
.ok()?;
let mut qdata = vec![0i16; data.len() / 2];
bytemuck::cast_slice_mut(&mut qdata).copy_from_slice(&data);
let mut prev = 0;
let mut fdata = vec![0f32; qdata.len()];
for (f, q) in fdata.iter_mut().zip(qdata.iter()) {
let x = (*q).wrapping_add(prev);
*f = x as f32;
prev = x;
}
data.clear();
data.extend_from_slice(bytemuck::cast_slice(&fdata));
Some(data)
}
LayerType::Normals | LayerType::Displacements | LayerType::Roughness => {
fs::read(filename).ok()
}
}
}
pub(crate) fn write_tile(
&mut self,
layer: LayerType,
node: VNode,
data: &[u8],
base: bool,
) -> Result<(), Error> {
let filename = Self::tile_name(layer, node);
match layer {
LayerType::Albedo => image::save_buffer_with_format(
&filename,
data,
self.layers[layer].texture_resolution as u32,
self.layers[layer].texture_resolution as u32,
image::ColorType::Rgba8,
image::ImageFormat::Bmp,
)?,
LayerType::Heightmaps => {
let data: &[f32] = bytemuck::cast_slice(data);
let mut qdata = vec![0i16; data.len()];
let mut prev = 0;
for (q, d) in qdata.iter_mut().zip(data.iter()) {
let x = ((*d as i16) / 4) * 4;
*q = x.wrapping_sub(prev);
prev = x;
}
snap::write::FrameEncoder::new(File::create(filename)?)
.write_all(bytemuck::cast_slice(&qdata))?;
}
LayerType::Normals | LayerType::Displacements | LayerType::Roughness => {
fs::write(filename, data)?;
}
}
self.update_tile_meta(
layer,
node,
TileMeta { crc32: 0, state: if base { TileState::Base } else { TileState::Generated } },
)
}
pub(crate) fn read_texture(
&self,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
name: &str,
) -> Result<wgpu::Texture, Error> {
let desc = self.lookup_texture(name)?.unwrap();
let texture = device.create_texture(&wgpu::TextureDescriptor {
size: wgpu::Extent3d { width: desc.width, height: desc.height, depth: desc.depth },
format: desc.format.to_wgpu(),
mip_level_count: 1,
sample_count: 1,
dimension: if desc.depth == 1 {
wgpu::TextureDimension::D2
} else {
wgpu::TextureDimension::D3
},
usage: wgpu::TextureUsage::COPY_SRC
| wgpu::TextureUsage::COPY_DST
| wgpu::TextureUsage::SAMPLED
| wgpu::TextureUsage::STORAGE,
label: None,
});
let (width, height) = (desc.width as usize, (desc.height * desc.depth) as usize);
assert_eq!(width % desc.format.block_size() as usize, 0);
assert_eq!(height % desc.format.block_size() as usize, 0);
let (width, height) =
(width / desc.format.block_size() as usize, height / desc.format.block_size() as usize);
let row_bytes = width * desc.format.bytes_per_block();
let row_pitch = (row_bytes + 255) &!255;
let data = if desc.format == TextureFormat::RGBA8 {
image::open(TERRA_DIRECTORY.join(format!("{}.bmp", name)))?.to_rgba().into_vec()
} else {
fs::read(TERRA_DIRECTORY.join(format!("{}.raw", name)))?
};
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
size: (row_pitch * height) as u64,
usage: wgpu::BufferUsage::MAP_WRITE | wgpu::BufferUsage::COPY_SRC,
label: None,
mapped_at_creation: true,
});
let mut buffer_view = buffer.slice(..).get_mapped_range_mut();
for row in 0..height {
buffer_view[row * row_pitch..][..row_bytes]
.copy_from_slice(&data[row * row_bytes..][..row_bytes]);
}
drop(buffer_view);
buffer.unmap();
encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &buffer,
layout: wgpu::TextureDataLayout {
offset: 0,
bytes_per_row: row_pitch as u32,
rows_per_image: height as u32 / desc.depth,
},
},
wgpu::TextureCopyView {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d { x: 0, y: 0, z: 0 },
},
wgpu::Extent3d {
width: width as u32,
height: height as u32 / desc.depth,
depth: desc.depth,
},
);
Ok(texture)
}
pub(crate) fn write_texture(
&self,
name: &str,
desc: TextureDescriptor,
data: &[u8],
) -> Result<(), Error> {
self.update_texture(name, desc)?;
if desc.format == TextureFormat::RGBA8 {
let filename = TERRA_DIRECTORY.join(format!("{}.bmp", name));
Ok(image::save_buffer_with_format(
&filename,
data,
desc.width,
desc.height * desc.depth,
image::ColorType::Rgba8,
image::ImageFormat::Bmp,
)?)
} else {
let filename = TERRA_DIRECTORY.join(format!("{}.raw", name));
Ok(fs::write(&filename, data)?)
}
}
pub(crate) fn reload_texture(&self, name: &str) -> bool {
let desc = self.lookup_texture(name);
if let Ok(Some(desc)) = desc {
if desc.format == TextureFormat::RGBA8 {
TERRA_DIRECTORY.join(format!("{}.bmp", name)).exists()
} else {
TERRA_DIRECTORY.join(format!("{}.raw", name)).exists()
}
} else {
false
}
}
pub(crate) fn layers(&self) -> &VecMap<LayerParams> {
&self.layers
}
pub(crate) fn tile_name(layer: LayerType, node: VNode) -> PathBuf {
let face = match node.face() {
0 => "0E",
1 => "180E",
2 => "90E",
3 => "90W",
4 => "N",
5 => "S",
_ => unreachable!(),
};
let (layer, ext) = match layer {
LayerType::Displacements => ("displacements", "raw"),
LayerType::Albedo => ("albedo", "bmp"),
LayerType::Roughness => ("roughness", "raw"),
LayerType::Normals => ("normals", "raw"),
LayerType::Heightmaps => ("heightmaps", "raw.sz"),
};
TERRA_DIRECTORY.join(&format!(
"tiles/{}_{}_{}_{}x{}.{}",
layer,
node.level(),
face,
node.x(),
node.y(),
ext
))
}
pub(crate) fn reload_tile_state(
&self,
layer: LayerType,
node: VNode,
base: bool,
) -> Result<TileState, Error> {
let filename = Self::tile_name(layer, node);
let meta = self.lookup_tile_meta(layer, node);
let exists = filename.exists();
let target_state = if base && exists {
TileState::Base
} else if base {
TileState::MissingBase
} else if exists {
TileState::Generated
} else {
TileState::Missing
};
if let Ok(Some(TileMeta { state,.. })) = meta {
if state == target_state {
return Ok(state);
}
}
let new_meta = TileMeta { state: target_state, crc32: 0 };
self.update_tile_meta(layer, node, new_meta)?;
Ok(target_state)
}
// pub(crate) fn set_missing(
// &self,
// layer: LayerType,
// node: VNode,
// base: bool,
// ) -> Result<(), Error> {
// let state = if base { TileState::MissingBase } else { TileState::Missing };
// self.update_tile_meta(layer, node, TileMeta { crc32: 0, state })
// }
pub(crate) fn clear_generated(&mut self, layer: LayerType) -> Result<(), Error> {
self.scan_tile_meta(layer, |node, meta| {
if let TileState::Generated = meta.state {
self.remove_tile_meta(layer, node)?;
}
Ok(())
})
}
pub(crate) fn get_missing_base(&self, layer: LayerType) -> Result<Vec<VNode>, Error> {
let mut missing = Vec::new();
self.scan_tile_meta(layer, |node, meta| {
if let TileState::MissingBase = meta.state {
missing.push(node);
}
Ok(())
})?;
Ok(missing)
}
//
// These functions use the database.
//
fn lookup_tile_meta(&self, layer: LayerType, node: VNode) -> Result<Option<TileMeta>, Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
Ok(self.tiles.get(key)?.map(|value| bincode::deserialize(&value).unwrap()))
}
fn update_tile_meta(&self, layer: LayerType, node: VNode, meta: TileMeta) -> Result<(), Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
let value = bincode::serialize(&meta).unwrap();
self.tiles.insert(key, value)?;
Ok(())
}
fn remove_tile_meta(&self, layer: LayerType, node: VNode) -> Result<(), Error> {
let key = bincode::serialize(&(layer, node)).unwrap();
self.tiles.remove(key)?;
Ok(())
}
fn scan_tile_meta<F: FnMut(VNode, TileMeta) -> Result<(), Error>>(
&self,
layer: LayerType, | for i in self.tiles.scan_prefix(&prefix) {
let (k, v) = i?;
let meta = bincode::deserialize::<TileMeta>(&v)?;
let node = bincode::deserialize::<(LayerType, VNode)>(&k)?.1;
f(node, meta)?;
}
Ok(())
}
fn lookup_texture(&self, name: &str) -> Result<Option<TextureDescriptor>, Error> {
Ok(self.textures.get(name)?.map(|value| serde_json::from_slice(&value).unwrap()))
}
fn update_texture(&self, name: &str, desc: TextureDescriptor) -> Result<(), Error> {
let value = serde_json::to_vec(&desc).unwrap();
self.textures.insert(name, value)?;
Ok(())
}
} | mut f: F,
) -> Result<(), Error> {
let prefix = bincode::serialize(&layer).unwrap(); | random_line_split |
activity_heartbeat_manager.rs | use crate::task_token::TaskToken;
use crate::{
errors::ActivityHeartbeatError,
protos::{
coresdk::{common, ActivityHeartbeat, PayloadsExt},
temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse,
},
ServerGatewayApis,
};
use std::{
collections::HashMap,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{self, Duration},
};
use tokio::{
select,
sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
watch::{channel, Receiver, Sender},
Mutex,
},
task::{JoinError, JoinHandle},
time::sleep,
};
pub(crate) struct ActivityHeartbeatManager<SG> {
/// Core will aggregate activity heartbeats for each activity and send them to the server
/// periodically. This map contains sender channel for each activity, identified by the task
/// token, that has an active heartbeat processor.
heartbeat_processors: HashMap<TaskToken, ActivityHeartbeatProcessorHandle>,
events_tx: UnboundedSender<LifecycleEvent>,
events_rx: UnboundedReceiver<LifecycleEvent>,
shutdown_tx: Sender<bool>,
shutdown_rx: Receiver<bool>,
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
/// Used to supply new heartbeat events to the activity heartbeat manager, or to send a shutdown
/// request.
pub(crate) struct ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool,
events: UnboundedSender<LifecycleEvent>,
/// Cancellations that have been received when heartbeating are queued here and can be consumed
/// by [fetch_cancellations]
incoming_cancels: Mutex<UnboundedReceiver<TaskToken>>,
/// Used during `shutdown` to await until all inflight requests are sent.
join_handle: Mutex<Option<JoinHandle<()>>>,
}
/// Used to supply heartbeat details to the heartbeat processor, which periodically sends them to
/// the server.
struct ActivityHeartbeatProcessorHandle {
heartbeat_tx: Sender<Vec<common::Payload>>,
join_handle: JoinHandle<()>,
}
/// Heartbeat processor, that aggregates and periodically sends heartbeat requests for a single
/// activity to the server.
struct ActivityHeartbeatProcessor<SG> {
task_token: TaskToken,
delay: time::Duration,
/// Used to receive heartbeat events.
heartbeat_rx: Receiver<Vec<common::Payload>>,
/// Used to receive shutdown notifications.
shutdown_rx: Receiver<bool>,
/// Used to send CleanupProcessor event at the end of the processor loop.
events_tx: UnboundedSender<LifecycleEvent>,
/// Used to send cancellation notices that we learned about when heartbeating back up to core
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
#[derive(Debug)]
pub enum LifecycleEvent {
Heartbeat(ValidActivityHeartbeat),
CleanupProcessor(TaskToken),
Shutdown,
}
#[derive(Debug)]
pub struct ValidActivityHeartbeat {
pub task_token: TaskToken,
pub details: Vec<common::Payload>,
pub delay: time::Duration,
}
/// Handle that is used by the core for all interactions with the manager, allows sending new
/// heartbeats or requesting and awaiting for the shutdown. When shutdown is requested, signal gets
/// sent to all processors, which allows them to complete gracefully.
impl ActivityHeartbeatManagerHandle {
/// Records a new heartbeat, note that first call would result in an immediate call to the
/// server, while rapid successive calls would accumulate for up to `delay`
/// and then latest heartbeat details will be sent to the server. If there is no activity for
/// `delay` then heartbeat processor will be reset and process would start
/// over again, meaning that next heartbeat will be sent immediately, creating a new processor.
pub fn record(
&self,
details: ActivityHeartbeat,
delay: Duration,
) -> Result<(), ActivityHeartbeatError> {
if self.shutting_down.load(Ordering::Relaxed) {
return Err(ActivityHeartbeatError::ShuttingDown);
}
self.events
.send(LifecycleEvent::Heartbeat(ValidActivityHeartbeat {
task_token: TaskToken(details.task_token),
details: details.details,
delay,
}))
.expect("Receive half of the heartbeats event channel must not be dropped");
Ok(())
}
/// Returns a future that resolves any time there is a new activity cancel that must be
/// dispatched to lang
pub async fn next_pending_cancel(&self) -> Option<TaskToken> {
self.incoming_cancels.lock().await.recv().await
}
/// Initiates shutdown procedure by stopping lifecycle loop and awaiting for all heartbeat | if!self.shutting_down.load(Ordering::Relaxed) {
self.events
.send(LifecycleEvent::Shutdown)
.expect("should be able to send shutdown event");
self.shutting_down.store(true, Ordering::Relaxed);
}
let mut handle = self.join_handle.lock().await;
if let Some(h) = handle.take() {
h.await.expect("shutdown should exit cleanly");
}
}
}
impl<SG: ServerGatewayApis + Send + Sync +'static> ActivityHeartbeatManager<SG> {
#![allow(clippy::new_ret_no_self)]
/// Creates a new instance of an activity heartbeat manager and returns a handle to the user,
/// which allows to send new heartbeats and initiate the shutdown.
pub fn new(sg: Arc<SG>) -> ActivityHeartbeatManagerHandle {
let (shutdown_tx, shutdown_rx) = channel(false);
let (events_tx, events_rx) = unbounded_channel();
let (cancels_tx, cancels_rx) = unbounded_channel();
let s = Self {
heartbeat_processors: Default::default(),
events_tx: events_tx.clone(),
events_rx,
shutdown_tx,
shutdown_rx,
cancels_tx,
server_gateway: sg,
};
let join_handle = tokio::spawn(s.lifecycle());
ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool::new(false),
events: events_tx,
incoming_cancels: Mutex::new(cancels_rx),
join_handle: Mutex::new(Some(join_handle)),
}
}
/// Main loop, that handles all heartbeat requests and dispatches them to processors.
async fn lifecycle(mut self) {
while let Some(event) = self.events_rx.recv().await {
match event {
LifecycleEvent::Heartbeat(heartbeat) => self.record(heartbeat),
LifecycleEvent::Shutdown => break,
LifecycleEvent::CleanupProcessor(task_token) => {
self.heartbeat_processors.remove(&task_token);
}
}
}
self.shutdown().await.expect("shutdown should exit cleanly")
}
/// Records heartbeat, by sending it to the processor.
/// New processor is created if one doesn't exist, otherwise new event is dispatched to the
/// existing processor's receiver channel.
fn record(&mut self, heartbeat: ValidActivityHeartbeat) {
match self.heartbeat_processors.get(&heartbeat.task_token) {
Some(handle) => {
handle
.heartbeat_tx
.send(heartbeat.details)
.expect("heartbeat channel can't be dropped if we are inside this method");
}
None => {
let (heartbeat_tx, heartbeat_rx) = channel(heartbeat.details);
let processor = ActivityHeartbeatProcessor {
task_token: heartbeat.task_token.clone(),
delay: heartbeat.delay,
heartbeat_rx,
shutdown_rx: self.shutdown_rx.clone(),
events_tx: self.events_tx.clone(),
cancels_tx: self.cancels_tx.clone(),
server_gateway: self.server_gateway.clone(),
};
let join_handle = tokio::spawn(processor.run());
let handle = ActivityHeartbeatProcessorHandle {
heartbeat_tx,
join_handle,
};
self.heartbeat_processors
.insert(heartbeat.task_token, handle);
}
}
}
/// Initiates termination of all heartbeat processors by sending a signal and awaits termination
pub async fn shutdown(mut self) -> Result<(), JoinError> {
self.shutdown_tx
.send(true)
.expect("shutdown channel can't be dropped before shutdown is complete");
for v in self.heartbeat_processors.drain() {
v.1.join_handle.await?;
}
Ok(())
}
}
impl<SG: ServerGatewayApis + Send + Sync +'static> ActivityHeartbeatProcessor<SG> {
async fn run(mut self) {
// Each processor is initialized with heartbeat payloads, first thing we need to do is send
// it out.
self.record_heartbeat().await;
loop {
sleep(self.delay).await;
select! {
biased;
_ = self.heartbeat_rx.changed() => {
self.record_heartbeat().await;
}
_ = self.shutdown_rx.changed() => {
break;
}
_ = sleep(self.delay) => {
// Timed out while waiting for the next heartbeat. We waited 2 * delay in total,
// where delay is 1/2 of the activity heartbeat timeout. This means that
// activity has either timed out or completed by now.
break;
}
};
}
self.events_tx
.send(LifecycleEvent::CleanupProcessor(self.task_token))
.expect("cleanup requests should not be dropped");
}
async fn record_heartbeat(&mut self) {
let details = self.heartbeat_rx.borrow().clone();
match self
.server_gateway
.record_activity_heartbeat(self.task_token.clone(), details.into_payloads())
.await
{
Ok(RecordActivityTaskHeartbeatResponse { cancel_requested }) => {
if cancel_requested {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
}
}
// Send cancels for any activity that learns its workflow already finished (which is
// one thing not found implies - other reasons would seem equally valid).
Err(s) if s.code() == tonic::Code::NotFound => {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
}
Err(e) => {
warn!("Error when recording heartbeat: {:?}", e)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::pollers::MockServerGatewayApis;
use crate::protos::coresdk::common::Payload;
use crate::protos::temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse;
use std::time::Duration;
/// Ensure that hearbeats that are sent with a small delay are aggregated and sent roughly once
/// every 1/2 of the heartbeat timeout.
#[tokio::test]
async fn process_heartbeats_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..40 {
sleep(Duration::from_millis(10)).await;
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// Ensure that heartbeat can be called from a tight loop without any delays, resulting in two
/// interactions with the server - one immediately and one after 500ms after the delay.
#[tokio::test]
async fn process_tight_loop_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..u8::MAX {
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// This test reports one heartbeat and waits until processor times out and exits then sends another one.
/// Expectation is that new processor should be spawned and heartbeat shouldn't get lost.
#[tokio::test]
async fn report_heartbeat_after_timeout() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
record_heartbeat(&hm, fake_task_token.clone(), 0, Duration::from_millis(100));
sleep(Duration::from_millis(500)).await;
record_heartbeat(&hm, fake_task_token.clone(), 1, Duration::from_millis(100));
hm.shutdown().await;
}
/// Recording new heartbeats after shutdown is not allowed, and will result in error.
#[tokio::test]
async fn record_after_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(0);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
hm.shutdown().await;
match hm.record(
ActivityHeartbeat {
task_token: vec![1, 2, 3],
details: vec![Payload {
// payload doesn't matter in this case, as it shouldn't get sent anyways.
..Default::default()
}],
},
Duration::from_millis(1000),
) {
Ok(_) => {
unreachable!("heartbeat should not be recorded after the shutdown")
}
Err(e) => {
matches!(e, ActivityHeartbeatError::ShuttingDown);
}
}
}
fn record_heartbeat(
hm: &ActivityHeartbeatManagerHandle,
task_token: Vec<u8>,
i: u8,
delay: Duration,
) {
hm.record(
ActivityHeartbeat {
task_token,
details: vec![Payload {
metadata: Default::default(),
data: vec![i],
}],
},
delay,
)
.expect("hearbeat recording should not fail");
}
} | /// processors to terminate gracefully.
pub async fn shutdown(&self) {
// If shutdown was called multiple times, shutdown signal has been sent already and consumer
// might have been dropped already, meaning that sending to the channel may fail.
// All we need to do is to simply await on handle for the completion. | random_line_split |
activity_heartbeat_manager.rs | use crate::task_token::TaskToken;
use crate::{
errors::ActivityHeartbeatError,
protos::{
coresdk::{common, ActivityHeartbeat, PayloadsExt},
temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse,
},
ServerGatewayApis,
};
use std::{
collections::HashMap,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{self, Duration},
};
use tokio::{
select,
sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
watch::{channel, Receiver, Sender},
Mutex,
},
task::{JoinError, JoinHandle},
time::sleep,
};
pub(crate) struct ActivityHeartbeatManager<SG> {
/// Core will aggregate activity heartbeats for each activity and send them to the server
/// periodically. This map contains sender channel for each activity, identified by the task
/// token, that has an active heartbeat processor.
heartbeat_processors: HashMap<TaskToken, ActivityHeartbeatProcessorHandle>,
events_tx: UnboundedSender<LifecycleEvent>,
events_rx: UnboundedReceiver<LifecycleEvent>,
shutdown_tx: Sender<bool>,
shutdown_rx: Receiver<bool>,
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
/// Used to supply new heartbeat events to the activity heartbeat manager, or to send a shutdown
/// request.
pub(crate) struct ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool,
events: UnboundedSender<LifecycleEvent>,
/// Cancellations that have been received when heartbeating are queued here and can be consumed
/// by [fetch_cancellations]
incoming_cancels: Mutex<UnboundedReceiver<TaskToken>>,
/// Used during `shutdown` to await until all inflight requests are sent.
join_handle: Mutex<Option<JoinHandle<()>>>,
}
/// Used to supply heartbeat details to the heartbeat processor, which periodically sends them to
/// the server.
struct ActivityHeartbeatProcessorHandle {
heartbeat_tx: Sender<Vec<common::Payload>>,
join_handle: JoinHandle<()>,
}
/// Heartbeat processor, that aggregates and periodically sends heartbeat requests for a single
/// activity to the server.
struct ActivityHeartbeatProcessor<SG> {
task_token: TaskToken,
delay: time::Duration,
/// Used to receive heartbeat events.
heartbeat_rx: Receiver<Vec<common::Payload>>,
/// Used to receive shutdown notifications.
shutdown_rx: Receiver<bool>,
/// Used to send CleanupProcessor event at the end of the processor loop.
events_tx: UnboundedSender<LifecycleEvent>,
/// Used to send cancellation notices that we learned about when heartbeating back up to core
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
#[derive(Debug)]
pub enum LifecycleEvent {
Heartbeat(ValidActivityHeartbeat),
CleanupProcessor(TaskToken),
Shutdown,
}
#[derive(Debug)]
pub struct ValidActivityHeartbeat {
pub task_token: TaskToken,
pub details: Vec<common::Payload>,
pub delay: time::Duration,
}
/// Handle that is used by the core for all interactions with the manager, allows sending new
/// heartbeats or requesting and awaiting for the shutdown. When shutdown is requested, signal gets
/// sent to all processors, which allows them to complete gracefully.
impl ActivityHeartbeatManagerHandle {
/// Records a new heartbeat, note that first call would result in an immediate call to the
/// server, while rapid successive calls would accumulate for up to `delay`
/// and then latest heartbeat details will be sent to the server. If there is no activity for
/// `delay` then heartbeat processor will be reset and process would start
/// over again, meaning that next heartbeat will be sent immediately, creating a new processor.
pub fn record(
&self,
details: ActivityHeartbeat,
delay: Duration,
) -> Result<(), ActivityHeartbeatError> {
if self.shutting_down.load(Ordering::Relaxed) {
return Err(ActivityHeartbeatError::ShuttingDown);
}
self.events
.send(LifecycleEvent::Heartbeat(ValidActivityHeartbeat {
task_token: TaskToken(details.task_token),
details: details.details,
delay,
}))
.expect("Receive half of the heartbeats event channel must not be dropped");
Ok(())
}
/// Returns a future that resolves any time there is a new activity cancel that must be
/// dispatched to lang
pub async fn next_pending_cancel(&self) -> Option<TaskToken> {
self.incoming_cancels.lock().await.recv().await
}
/// Initiates shutdown procedure by stopping lifecycle loop and awaiting for all heartbeat
/// processors to terminate gracefully.
pub async fn shutdown(&self) {
// If shutdown was called multiple times, shutdown signal has been sent already and consumer
// might have been dropped already, meaning that sending to the channel may fail.
// All we need to do is to simply await on handle for the completion.
if!self.shutting_down.load(Ordering::Relaxed) {
self.events
.send(LifecycleEvent::Shutdown)
.expect("should be able to send shutdown event");
self.shutting_down.store(true, Ordering::Relaxed);
}
let mut handle = self.join_handle.lock().await;
if let Some(h) = handle.take() {
h.await.expect("shutdown should exit cleanly");
}
}
}
impl<SG: ServerGatewayApis + Send + Sync +'static> ActivityHeartbeatManager<SG> {
#![allow(clippy::new_ret_no_self)]
/// Creates a new instance of an activity heartbeat manager and returns a handle to the user,
/// which allows to send new heartbeats and initiate the shutdown.
pub fn new(sg: Arc<SG>) -> ActivityHeartbeatManagerHandle {
let (shutdown_tx, shutdown_rx) = channel(false);
let (events_tx, events_rx) = unbounded_channel();
let (cancels_tx, cancels_rx) = unbounded_channel();
let s = Self {
heartbeat_processors: Default::default(),
events_tx: events_tx.clone(),
events_rx,
shutdown_tx,
shutdown_rx,
cancels_tx,
server_gateway: sg,
};
let join_handle = tokio::spawn(s.lifecycle());
ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool::new(false),
events: events_tx,
incoming_cancels: Mutex::new(cancels_rx),
join_handle: Mutex::new(Some(join_handle)),
}
}
/// Main loop, that handles all heartbeat requests and dispatches them to processors.
async fn lifecycle(mut self) {
while let Some(event) = self.events_rx.recv().await {
match event {
LifecycleEvent::Heartbeat(heartbeat) => self.record(heartbeat),
LifecycleEvent::Shutdown => break,
LifecycleEvent::CleanupProcessor(task_token) => {
self.heartbeat_processors.remove(&task_token);
}
}
}
self.shutdown().await.expect("shutdown should exit cleanly")
}
/// Records heartbeat, by sending it to the processor.
/// New processor is created if one doesn't exist, otherwise new event is dispatched to the
/// existing processor's receiver channel.
fn record(&mut self, heartbeat: ValidActivityHeartbeat) {
match self.heartbeat_processors.get(&heartbeat.task_token) {
Some(handle) => {
handle
.heartbeat_tx
.send(heartbeat.details)
.expect("heartbeat channel can't be dropped if we are inside this method");
}
None => {
let (heartbeat_tx, heartbeat_rx) = channel(heartbeat.details);
let processor = ActivityHeartbeatProcessor {
task_token: heartbeat.task_token.clone(),
delay: heartbeat.delay,
heartbeat_rx,
shutdown_rx: self.shutdown_rx.clone(),
events_tx: self.events_tx.clone(),
cancels_tx: self.cancels_tx.clone(),
server_gateway: self.server_gateway.clone(),
};
let join_handle = tokio::spawn(processor.run());
let handle = ActivityHeartbeatProcessorHandle {
heartbeat_tx,
join_handle,
};
self.heartbeat_processors
.insert(heartbeat.task_token, handle);
}
}
}
/// Initiates termination of all heartbeat processors by sending a signal and awaits termination
pub async fn shutdown(mut self) -> Result<(), JoinError> {
self.shutdown_tx
.send(true)
.expect("shutdown channel can't be dropped before shutdown is complete");
for v in self.heartbeat_processors.drain() {
v.1.join_handle.await?;
}
Ok(())
}
}
impl<SG: ServerGatewayApis + Send + Sync +'static> ActivityHeartbeatProcessor<SG> {
async fn run(mut self) {
// Each processor is initialized with heartbeat payloads, first thing we need to do is send
// it out.
self.record_heartbeat().await;
loop {
sleep(self.delay).await;
select! {
biased;
_ = self.heartbeat_rx.changed() => {
self.record_heartbeat().await;
}
_ = self.shutdown_rx.changed() => {
break;
}
_ = sleep(self.delay) => {
// Timed out while waiting for the next heartbeat. We waited 2 * delay in total,
// where delay is 1/2 of the activity heartbeat timeout. This means that
// activity has either timed out or completed by now.
break;
}
};
}
self.events_tx
.send(LifecycleEvent::CleanupProcessor(self.task_token))
.expect("cleanup requests should not be dropped");
}
async fn record_heartbeat(&mut self) {
let details = self.heartbeat_rx.borrow().clone();
match self
.server_gateway
.record_activity_heartbeat(self.task_token.clone(), details.into_payloads())
.await
{
Ok(RecordActivityTaskHeartbeatResponse { cancel_requested }) => {
if cancel_requested {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
}
}
// Send cancels for any activity that learns its workflow already finished (which is
// one thing not found implies - other reasons would seem equally valid).
Err(s) if s.code() == tonic::Code::NotFound => {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
}
Err(e) => {
warn!("Error when recording heartbeat: {:?}", e)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::pollers::MockServerGatewayApis;
use crate::protos::coresdk::common::Payload;
use crate::protos::temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse;
use std::time::Duration;
/// Ensure that hearbeats that are sent with a small delay are aggregated and sent roughly once
/// every 1/2 of the heartbeat timeout.
#[tokio::test]
async fn process_heartbeats_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..40 {
sleep(Duration::from_millis(10)).await;
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// Ensure that heartbeat can be called from a tight loop without any delays, resulting in two
/// interactions with the server - one immediately and one after 500ms after the delay.
#[tokio::test]
async fn process_tight_loop_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..u8::MAX {
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// This test reports one heartbeat and waits until processor times out and exits then sends another one.
/// Expectation is that new processor should be spawned and heartbeat shouldn't get lost.
#[tokio::test]
async fn report_heartbeat_after_timeout() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
record_heartbeat(&hm, fake_task_token.clone(), 0, Duration::from_millis(100));
sleep(Duration::from_millis(500)).await;
record_heartbeat(&hm, fake_task_token.clone(), 1, Duration::from_millis(100));
hm.shutdown().await;
}
/// Recording new heartbeats after shutdown is not allowed, and will result in error.
#[tokio::test]
async fn record_after_shutdown() | }
Err(e) => {
matches!(e, ActivityHeartbeatError::ShuttingDown);
}
}
}
fn record_heartbeat(
hm: &ActivityHeartbeatManagerHandle,
task_token: Vec<u8>,
i: u8,
delay: Duration,
) {
hm.record(
ActivityHeartbeat {
task_token,
details: vec![Payload {
metadata: Default::default(),
data: vec![i],
}],
},
delay,
)
.expect("hearbeat recording should not fail");
}
}
| {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(0);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
hm.shutdown().await;
match hm.record(
ActivityHeartbeat {
task_token: vec![1, 2, 3],
details: vec![Payload {
// payload doesn't matter in this case, as it shouldn't get sent anyways.
..Default::default()
}],
},
Duration::from_millis(1000),
) {
Ok(_) => {
unreachable!("heartbeat should not be recorded after the shutdown") | identifier_body |
activity_heartbeat_manager.rs | use crate::task_token::TaskToken;
use crate::{
errors::ActivityHeartbeatError,
protos::{
coresdk::{common, ActivityHeartbeat, PayloadsExt},
temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse,
},
ServerGatewayApis,
};
use std::{
collections::HashMap,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{self, Duration},
};
use tokio::{
select,
sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
watch::{channel, Receiver, Sender},
Mutex,
},
task::{JoinError, JoinHandle},
time::sleep,
};
pub(crate) struct ActivityHeartbeatManager<SG> {
/// Core will aggregate activity heartbeats for each activity and send them to the server
/// periodically. This map contains sender channel for each activity, identified by the task
/// token, that has an active heartbeat processor.
heartbeat_processors: HashMap<TaskToken, ActivityHeartbeatProcessorHandle>,
events_tx: UnboundedSender<LifecycleEvent>,
events_rx: UnboundedReceiver<LifecycleEvent>,
shutdown_tx: Sender<bool>,
shutdown_rx: Receiver<bool>,
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
/// Used to supply new heartbeat events to the activity heartbeat manager, or to send a shutdown
/// request.
pub(crate) struct ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool,
events: UnboundedSender<LifecycleEvent>,
/// Cancellations that have been received when heartbeating are queued here and can be consumed
/// by [fetch_cancellations]
incoming_cancels: Mutex<UnboundedReceiver<TaskToken>>,
/// Used during `shutdown` to await until all inflight requests are sent.
join_handle: Mutex<Option<JoinHandle<()>>>,
}
/// Used to supply heartbeat details to the heartbeat processor, which periodically sends them to
/// the server.
struct ActivityHeartbeatProcessorHandle {
heartbeat_tx: Sender<Vec<common::Payload>>,
join_handle: JoinHandle<()>,
}
/// Heartbeat processor, that aggregates and periodically sends heartbeat requests for a single
/// activity to the server.
struct ActivityHeartbeatProcessor<SG> {
task_token: TaskToken,
delay: time::Duration,
/// Used to receive heartbeat events.
heartbeat_rx: Receiver<Vec<common::Payload>>,
/// Used to receive shutdown notifications.
shutdown_rx: Receiver<bool>,
/// Used to send CleanupProcessor event at the end of the processor loop.
events_tx: UnboundedSender<LifecycleEvent>,
/// Used to send cancellation notices that we learned about when heartbeating back up to core
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
#[derive(Debug)]
pub enum LifecycleEvent {
Heartbeat(ValidActivityHeartbeat),
CleanupProcessor(TaskToken),
Shutdown,
}
#[derive(Debug)]
pub struct ValidActivityHeartbeat {
pub task_token: TaskToken,
pub details: Vec<common::Payload>,
pub delay: time::Duration,
}
/// Handle that is used by the core for all interactions with the manager, allows sending new
/// heartbeats or requesting and awaiting for the shutdown. When shutdown is requested, signal gets
/// sent to all processors, which allows them to complete gracefully.
impl ActivityHeartbeatManagerHandle {
/// Records a new heartbeat, note that first call would result in an immediate call to the
/// server, while rapid successive calls would accumulate for up to `delay`
/// and then latest heartbeat details will be sent to the server. If there is no activity for
/// `delay` then heartbeat processor will be reset and process would start
/// over again, meaning that next heartbeat will be sent immediately, creating a new processor.
pub fn record(
&self,
details: ActivityHeartbeat,
delay: Duration,
) -> Result<(), ActivityHeartbeatError> {
if self.shutting_down.load(Ordering::Relaxed) {
return Err(ActivityHeartbeatError::ShuttingDown);
}
self.events
.send(LifecycleEvent::Heartbeat(ValidActivityHeartbeat {
task_token: TaskToken(details.task_token),
details: details.details,
delay,
}))
.expect("Receive half of the heartbeats event channel must not be dropped");
Ok(())
}
/// Returns a future that resolves any time there is a new activity cancel that must be
/// dispatched to lang
pub async fn next_pending_cancel(&self) -> Option<TaskToken> {
self.incoming_cancels.lock().await.recv().await
}
/// Initiates shutdown procedure by stopping lifecycle loop and awaiting for all heartbeat
/// processors to terminate gracefully.
pub async fn shutdown(&self) {
// If shutdown was called multiple times, shutdown signal has been sent already and consumer
// might have been dropped already, meaning that sending to the channel may fail.
// All we need to do is to simply await on handle for the completion.
if!self.shutting_down.load(Ordering::Relaxed) {
self.events
.send(LifecycleEvent::Shutdown)
.expect("should be able to send shutdown event");
self.shutting_down.store(true, Ordering::Relaxed);
}
let mut handle = self.join_handle.lock().await;
if let Some(h) = handle.take() {
h.await.expect("shutdown should exit cleanly");
}
}
}
impl<SG: ServerGatewayApis + Send + Sync +'static> ActivityHeartbeatManager<SG> {
#![allow(clippy::new_ret_no_self)]
/// Creates a new instance of an activity heartbeat manager and returns a handle to the user,
/// which allows to send new heartbeats and initiate the shutdown.
pub fn new(sg: Arc<SG>) -> ActivityHeartbeatManagerHandle {
let (shutdown_tx, shutdown_rx) = channel(false);
let (events_tx, events_rx) = unbounded_channel();
let (cancels_tx, cancels_rx) = unbounded_channel();
let s = Self {
heartbeat_processors: Default::default(),
events_tx: events_tx.clone(),
events_rx,
shutdown_tx,
shutdown_rx,
cancels_tx,
server_gateway: sg,
};
let join_handle = tokio::spawn(s.lifecycle());
ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool::new(false),
events: events_tx,
incoming_cancels: Mutex::new(cancels_rx),
join_handle: Mutex::new(Some(join_handle)),
}
}
/// Main loop, that handles all heartbeat requests and dispatches them to processors.
async fn lifecycle(mut self) {
while let Some(event) = self.events_rx.recv().await {
match event {
LifecycleEvent::Heartbeat(heartbeat) => self.record(heartbeat),
LifecycleEvent::Shutdown => break,
LifecycleEvent::CleanupProcessor(task_token) => {
self.heartbeat_processors.remove(&task_token);
}
}
}
self.shutdown().await.expect("shutdown should exit cleanly")
}
/// Records heartbeat, by sending it to the processor.
/// New processor is created if one doesn't exist, otherwise new event is dispatched to the
/// existing processor's receiver channel.
fn record(&mut self, heartbeat: ValidActivityHeartbeat) {
match self.heartbeat_processors.get(&heartbeat.task_token) {
Some(handle) => {
handle
.heartbeat_tx
.send(heartbeat.details)
.expect("heartbeat channel can't be dropped if we are inside this method");
}
None => {
let (heartbeat_tx, heartbeat_rx) = channel(heartbeat.details);
let processor = ActivityHeartbeatProcessor {
task_token: heartbeat.task_token.clone(),
delay: heartbeat.delay,
heartbeat_rx,
shutdown_rx: self.shutdown_rx.clone(),
events_tx: self.events_tx.clone(),
cancels_tx: self.cancels_tx.clone(),
server_gateway: self.server_gateway.clone(),
};
let join_handle = tokio::spawn(processor.run());
let handle = ActivityHeartbeatProcessorHandle {
heartbeat_tx,
join_handle,
};
self.heartbeat_processors
.insert(heartbeat.task_token, handle);
}
}
}
/// Initiates termination of all heartbeat processors by sending a signal and awaits termination
pub async fn shutdown(mut self) -> Result<(), JoinError> {
self.shutdown_tx
.send(true)
.expect("shutdown channel can't be dropped before shutdown is complete");
for v in self.heartbeat_processors.drain() {
v.1.join_handle.await?;
}
Ok(())
}
}
impl<SG: ServerGatewayApis + Send + Sync +'static> ActivityHeartbeatProcessor<SG> {
async fn run(mut self) {
// Each processor is initialized with heartbeat payloads, first thing we need to do is send
// it out.
self.record_heartbeat().await;
loop {
sleep(self.delay).await;
select! {
biased;
_ = self.heartbeat_rx.changed() => {
self.record_heartbeat().await;
}
_ = self.shutdown_rx.changed() => {
break;
}
_ = sleep(self.delay) => {
// Timed out while waiting for the next heartbeat. We waited 2 * delay in total,
// where delay is 1/2 of the activity heartbeat timeout. This means that
// activity has either timed out or completed by now.
break;
}
};
}
self.events_tx
.send(LifecycleEvent::CleanupProcessor(self.task_token))
.expect("cleanup requests should not be dropped");
}
async fn record_heartbeat(&mut self) {
let details = self.heartbeat_rx.borrow().clone();
match self
.server_gateway
.record_activity_heartbeat(self.task_token.clone(), details.into_payloads())
.await
{
Ok(RecordActivityTaskHeartbeatResponse { cancel_requested }) => {
if cancel_requested {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
}
}
// Send cancels for any activity that learns its workflow already finished (which is
// one thing not found implies - other reasons would seem equally valid).
Err(s) if s.code() == tonic::Code::NotFound => |
Err(e) => {
warn!("Error when recording heartbeat: {:?}", e)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::pollers::MockServerGatewayApis;
use crate::protos::coresdk::common::Payload;
use crate::protos::temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse;
use std::time::Duration;
/// Ensure that hearbeats that are sent with a small delay are aggregated and sent roughly once
/// every 1/2 of the heartbeat timeout.
#[tokio::test]
async fn process_heartbeats_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..40 {
sleep(Duration::from_millis(10)).await;
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// Ensure that heartbeat can be called from a tight loop without any delays, resulting in two
/// interactions with the server - one immediately and one after 500ms after the delay.
#[tokio::test]
async fn process_tight_loop_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..u8::MAX {
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// This test reports one heartbeat and waits until processor times out and exits then sends another one.
/// Expectation is that new processor should be spawned and heartbeat shouldn't get lost.
#[tokio::test]
async fn report_heartbeat_after_timeout() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
record_heartbeat(&hm, fake_task_token.clone(), 0, Duration::from_millis(100));
sleep(Duration::from_millis(500)).await;
record_heartbeat(&hm, fake_task_token.clone(), 1, Duration::from_millis(100));
hm.shutdown().await;
}
/// Recording new heartbeats after shutdown is not allowed, and will result in error.
#[tokio::test]
async fn record_after_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(0);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
hm.shutdown().await;
match hm.record(
ActivityHeartbeat {
task_token: vec![1, 2, 3],
details: vec![Payload {
// payload doesn't matter in this case, as it shouldn't get sent anyways.
..Default::default()
}],
},
Duration::from_millis(1000),
) {
Ok(_) => {
unreachable!("heartbeat should not be recorded after the shutdown")
}
Err(e) => {
matches!(e, ActivityHeartbeatError::ShuttingDown);
}
}
}
fn record_heartbeat(
hm: &ActivityHeartbeatManagerHandle,
task_token: Vec<u8>,
i: u8,
delay: Duration,
) {
hm.record(
ActivityHeartbeat {
task_token,
details: vec![Payload {
metadata: Default::default(),
data: vec![i],
}],
},
delay,
)
.expect("hearbeat recording should not fail");
}
}
| {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
} | conditional_block |
activity_heartbeat_manager.rs | use crate::task_token::TaskToken;
use crate::{
errors::ActivityHeartbeatError,
protos::{
coresdk::{common, ActivityHeartbeat, PayloadsExt},
temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse,
},
ServerGatewayApis,
};
use std::{
collections::HashMap,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{self, Duration},
};
use tokio::{
select,
sync::{
mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},
watch::{channel, Receiver, Sender},
Mutex,
},
task::{JoinError, JoinHandle},
time::sleep,
};
pub(crate) struct ActivityHeartbeatManager<SG> {
/// Core will aggregate activity heartbeats for each activity and send them to the server
/// periodically. This map contains sender channel for each activity, identified by the task
/// token, that has an active heartbeat processor.
heartbeat_processors: HashMap<TaskToken, ActivityHeartbeatProcessorHandle>,
events_tx: UnboundedSender<LifecycleEvent>,
events_rx: UnboundedReceiver<LifecycleEvent>,
shutdown_tx: Sender<bool>,
shutdown_rx: Receiver<bool>,
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
/// Used to supply new heartbeat events to the activity heartbeat manager, or to send a shutdown
/// request.
pub(crate) struct ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool,
events: UnboundedSender<LifecycleEvent>,
/// Cancellations that have been received when heartbeating are queued here and can be consumed
/// by [fetch_cancellations]
incoming_cancels: Mutex<UnboundedReceiver<TaskToken>>,
/// Used during `shutdown` to await until all inflight requests are sent.
join_handle: Mutex<Option<JoinHandle<()>>>,
}
/// Used to supply heartbeat details to the heartbeat processor, which periodically sends them to
/// the server.
struct ActivityHeartbeatProcessorHandle {
heartbeat_tx: Sender<Vec<common::Payload>>,
join_handle: JoinHandle<()>,
}
/// Heartbeat processor, that aggregates and periodically sends heartbeat requests for a single
/// activity to the server.
struct ActivityHeartbeatProcessor<SG> {
task_token: TaskToken,
delay: time::Duration,
/// Used to receive heartbeat events.
heartbeat_rx: Receiver<Vec<common::Payload>>,
/// Used to receive shutdown notifications.
shutdown_rx: Receiver<bool>,
/// Used to send CleanupProcessor event at the end of the processor loop.
events_tx: UnboundedSender<LifecycleEvent>,
/// Used to send cancellation notices that we learned about when heartbeating back up to core
cancels_tx: UnboundedSender<TaskToken>,
server_gateway: Arc<SG>,
}
#[derive(Debug)]
pub enum LifecycleEvent {
Heartbeat(ValidActivityHeartbeat),
CleanupProcessor(TaskToken),
Shutdown,
}
#[derive(Debug)]
pub struct ValidActivityHeartbeat {
pub task_token: TaskToken,
pub details: Vec<common::Payload>,
pub delay: time::Duration,
}
/// Handle that is used by the core for all interactions with the manager, allows sending new
/// heartbeats or requesting and awaiting for the shutdown. When shutdown is requested, signal gets
/// sent to all processors, which allows them to complete gracefully.
impl ActivityHeartbeatManagerHandle {
/// Records a new heartbeat, note that first call would result in an immediate call to the
/// server, while rapid successive calls would accumulate for up to `delay`
/// and then latest heartbeat details will be sent to the server. If there is no activity for
/// `delay` then heartbeat processor will be reset and process would start
/// over again, meaning that next heartbeat will be sent immediately, creating a new processor.
pub fn record(
&self,
details: ActivityHeartbeat,
delay: Duration,
) -> Result<(), ActivityHeartbeatError> {
if self.shutting_down.load(Ordering::Relaxed) {
return Err(ActivityHeartbeatError::ShuttingDown);
}
self.events
.send(LifecycleEvent::Heartbeat(ValidActivityHeartbeat {
task_token: TaskToken(details.task_token),
details: details.details,
delay,
}))
.expect("Receive half of the heartbeats event channel must not be dropped");
Ok(())
}
/// Returns a future that resolves any time there is a new activity cancel that must be
/// dispatched to lang
pub async fn next_pending_cancel(&self) -> Option<TaskToken> {
self.incoming_cancels.lock().await.recv().await
}
/// Initiates shutdown procedure by stopping lifecycle loop and awaiting for all heartbeat
/// processors to terminate gracefully.
pub async fn shutdown(&self) {
// If shutdown was called multiple times, shutdown signal has been sent already and consumer
// might have been dropped already, meaning that sending to the channel may fail.
// All we need to do is to simply await on handle for the completion.
if!self.shutting_down.load(Ordering::Relaxed) {
self.events
.send(LifecycleEvent::Shutdown)
.expect("should be able to send shutdown event");
self.shutting_down.store(true, Ordering::Relaxed);
}
let mut handle = self.join_handle.lock().await;
if let Some(h) = handle.take() {
h.await.expect("shutdown should exit cleanly");
}
}
}
impl<SG: ServerGatewayApis + Send + Sync +'static> ActivityHeartbeatManager<SG> {
#![allow(clippy::new_ret_no_self)]
/// Creates a new instance of an activity heartbeat manager and returns a handle to the user,
/// which allows to send new heartbeats and initiate the shutdown.
pub fn new(sg: Arc<SG>) -> ActivityHeartbeatManagerHandle {
let (shutdown_tx, shutdown_rx) = channel(false);
let (events_tx, events_rx) = unbounded_channel();
let (cancels_tx, cancels_rx) = unbounded_channel();
let s = Self {
heartbeat_processors: Default::default(),
events_tx: events_tx.clone(),
events_rx,
shutdown_tx,
shutdown_rx,
cancels_tx,
server_gateway: sg,
};
let join_handle = tokio::spawn(s.lifecycle());
ActivityHeartbeatManagerHandle {
shutting_down: AtomicBool::new(false),
events: events_tx,
incoming_cancels: Mutex::new(cancels_rx),
join_handle: Mutex::new(Some(join_handle)),
}
}
/// Main loop, that handles all heartbeat requests and dispatches them to processors.
async fn lifecycle(mut self) {
while let Some(event) = self.events_rx.recv().await {
match event {
LifecycleEvent::Heartbeat(heartbeat) => self.record(heartbeat),
LifecycleEvent::Shutdown => break,
LifecycleEvent::CleanupProcessor(task_token) => {
self.heartbeat_processors.remove(&task_token);
}
}
}
self.shutdown().await.expect("shutdown should exit cleanly")
}
/// Records heartbeat, by sending it to the processor.
/// New processor is created if one doesn't exist, otherwise new event is dispatched to the
/// existing processor's receiver channel.
fn | (&mut self, heartbeat: ValidActivityHeartbeat) {
match self.heartbeat_processors.get(&heartbeat.task_token) {
Some(handle) => {
handle
.heartbeat_tx
.send(heartbeat.details)
.expect("heartbeat channel can't be dropped if we are inside this method");
}
None => {
let (heartbeat_tx, heartbeat_rx) = channel(heartbeat.details);
let processor = ActivityHeartbeatProcessor {
task_token: heartbeat.task_token.clone(),
delay: heartbeat.delay,
heartbeat_rx,
shutdown_rx: self.shutdown_rx.clone(),
events_tx: self.events_tx.clone(),
cancels_tx: self.cancels_tx.clone(),
server_gateway: self.server_gateway.clone(),
};
let join_handle = tokio::spawn(processor.run());
let handle = ActivityHeartbeatProcessorHandle {
heartbeat_tx,
join_handle,
};
self.heartbeat_processors
.insert(heartbeat.task_token, handle);
}
}
}
/// Initiates termination of all heartbeat processors by sending a signal and awaits termination
pub async fn shutdown(mut self) -> Result<(), JoinError> {
self.shutdown_tx
.send(true)
.expect("shutdown channel can't be dropped before shutdown is complete");
for v in self.heartbeat_processors.drain() {
v.1.join_handle.await?;
}
Ok(())
}
}
impl<SG: ServerGatewayApis + Send + Sync +'static> ActivityHeartbeatProcessor<SG> {
async fn run(mut self) {
// Each processor is initialized with heartbeat payloads, first thing we need to do is send
// it out.
self.record_heartbeat().await;
loop {
sleep(self.delay).await;
select! {
biased;
_ = self.heartbeat_rx.changed() => {
self.record_heartbeat().await;
}
_ = self.shutdown_rx.changed() => {
break;
}
_ = sleep(self.delay) => {
// Timed out while waiting for the next heartbeat. We waited 2 * delay in total,
// where delay is 1/2 of the activity heartbeat timeout. This means that
// activity has either timed out or completed by now.
break;
}
};
}
self.events_tx
.send(LifecycleEvent::CleanupProcessor(self.task_token))
.expect("cleanup requests should not be dropped");
}
async fn record_heartbeat(&mut self) {
let details = self.heartbeat_rx.borrow().clone();
match self
.server_gateway
.record_activity_heartbeat(self.task_token.clone(), details.into_payloads())
.await
{
Ok(RecordActivityTaskHeartbeatResponse { cancel_requested }) => {
if cancel_requested {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
}
}
// Send cancels for any activity that learns its workflow already finished (which is
// one thing not found implies - other reasons would seem equally valid).
Err(s) if s.code() == tonic::Code::NotFound => {
self.cancels_tx
.send(self.task_token.clone())
.expect("Receive half of heartbeat cancels not blocked");
}
Err(e) => {
warn!("Error when recording heartbeat: {:?}", e)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::pollers::MockServerGatewayApis;
use crate::protos::coresdk::common::Payload;
use crate::protos::temporal::api::workflowservice::v1::RecordActivityTaskHeartbeatResponse;
use std::time::Duration;
/// Ensure that hearbeats that are sent with a small delay are aggregated and sent roughly once
/// every 1/2 of the heartbeat timeout.
#[tokio::test]
async fn process_heartbeats_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..40 {
sleep(Duration::from_millis(10)).await;
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// Ensure that heartbeat can be called from a tight loop without any delays, resulting in two
/// interactions with the server - one immediately and one after 500ms after the delay.
#[tokio::test]
async fn process_tight_loop_and_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
// Sending heartbeat requests for 400ms, this should send first hearbeat right away, and all other
// requests should be aggregated and last one should be sent to the server in 500ms (1/2 of heartbeat timeout).
for i in 0u8..u8::MAX {
record_heartbeat(&hm, fake_task_token.clone(), i, Duration::from_millis(1000));
}
hm.shutdown().await;
}
/// This test reports one heartbeat and waits until processor times out and exits then sends another one.
/// Expectation is that new processor should be spawned and heartbeat shouldn't get lost.
#[tokio::test]
async fn report_heartbeat_after_timeout() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(2);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
let fake_task_token = vec![1, 2, 3];
record_heartbeat(&hm, fake_task_token.clone(), 0, Duration::from_millis(100));
sleep(Duration::from_millis(500)).await;
record_heartbeat(&hm, fake_task_token.clone(), 1, Duration::from_millis(100));
hm.shutdown().await;
}
/// Recording new heartbeats after shutdown is not allowed, and will result in error.
#[tokio::test]
async fn record_after_shutdown() {
let mut mock_gateway = MockServerGatewayApis::new();
mock_gateway
.expect_record_activity_heartbeat()
.returning(|_, _| Ok(RecordActivityTaskHeartbeatResponse::default()))
.times(0);
let hm = ActivityHeartbeatManager::new(Arc::new(mock_gateway));
hm.shutdown().await;
match hm.record(
ActivityHeartbeat {
task_token: vec![1, 2, 3],
details: vec![Payload {
// payload doesn't matter in this case, as it shouldn't get sent anyways.
..Default::default()
}],
},
Duration::from_millis(1000),
) {
Ok(_) => {
unreachable!("heartbeat should not be recorded after the shutdown")
}
Err(e) => {
matches!(e, ActivityHeartbeatError::ShuttingDown);
}
}
}
fn record_heartbeat(
hm: &ActivityHeartbeatManagerHandle,
task_token: Vec<u8>,
i: u8,
delay: Duration,
) {
hm.record(
ActivityHeartbeat {
task_token,
details: vec![Payload {
metadata: Default::default(),
data: vec![i],
}],
},
delay,
)
.expect("hearbeat recording should not fail");
}
}
| record | identifier_name |
process_vm.rs | MLayout::new(stack_size, 16)?,
];
let process_layout = elf_layouts.iter().chain(other_layouts.iter()).fold(
VMLayout::new_empty(),
|mut process_layout, sub_layout| {
process_layout.add(&sub_layout);
process_layout
},
);
// Now that we end up with the memory layout required by the process,
// let's allocate the memory for the process
let mut chunks = HashSet::new();
// Init the memory for ELFs in the process
let mut elf_ranges = Vec::with_capacity(2);
elf_layouts
.iter()
.zip(self.elfs.iter())
.map(|(elf_layout, elf_file)| {
let vm_option = VMMapOptionsBuilder::default()
.size(elf_layout.size())
.align(elf_layout.align())
.perms(VMPerms::ALL) // set it to read | write | exec for simplicity
.initializer(VMInitializer::ElfSpecific {
elf_file: elf_file.file_ref().clone(),
})
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (elf_range, chunk_ref) =
USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(elf_range.start() % elf_layout.align() == 0);
chunks.insert(chunk_ref);
Self::init_elf_memory(&elf_range, elf_file).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
trace!("elf range = {:?}", elf_range);
elf_ranges.push(elf_range);
Ok(())
})
.collect::<Result<()>>()?;
// Init the heap memory in the process
let heap_layout = &other_layouts[0];
let vm_option = VMMapOptionsBuilder::default()
.size(heap_layout.size())
.align(heap_layout.align())
.perms(VMPerms::READ | VMPerms::WRITE)
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (heap_range, chunk_ref) = USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(heap_range.start() % heap_layout.align() == 0);
trace!("heap range = {:?}", heap_range);
let brk = RwLock::new(heap_range.start());
chunks.insert(chunk_ref);
// Init the stack memory in the process
let stack_layout = &other_layouts[1];
let vm_option = VMMapOptionsBuilder::default()
.size(stack_layout.size())
.align(heap_layout.align())
.perms(VMPerms::READ | VMPerms::WRITE)
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (stack_range, chunk_ref) = USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(stack_range.start() % stack_layout.align() == 0);
chunks.insert(chunk_ref);
trace!("stack range = {:?}", stack_range);
let mem_chunks = Arc::new(RwLock::new(chunks));
Ok(ProcessVM {
elf_ranges,
heap_range,
stack_range,
brk,
mem_chunks,
})
}
fn validate(&self) -> Result<()> {
let validate_size = |size_opt| -> Result<()> {
if let Some(size) = size_opt {
if size == 0 || size % PAGE_SIZE!= 0 {
return_errno!(EINVAL, "invalid size");
}
}
Ok(())
};
validate_size(self.heap_size)?;
validate_size(self.stack_size)?;
validate_size(self.mmap_size)?;
Ok(())
}
fn handle_error_when_init(&self, chunks: &HashSet<Arc<Chunk>>) {
chunks.iter().for_each(|chunk| {
USER_SPACE_VM_MANAGER
.internal()
.munmap_chunk(chunk, None, false);
});
}
fn init_elf_memory(elf_range: &VMRange, elf_file: &ElfFile) -> Result<()> {
// Destination buffer: ELF appeared in the process
let elf_proc_buf = unsafe { elf_range.as_slice_mut() };
// Source buffer: ELF stored in the ELF file
let elf_file_buf = elf_file.as_slice();
let base_load_address_offset = elf_file.base_load_address_offset() as usize;
// Offsets to track zerolized range
let mut empty_start_offset = 0;
let mut empty_end_offset = 0;
// Init all loadable segments
elf_file
.program_headers()
.filter(|segment| segment.loadable())
.for_each(|segment| {
let file_size = segment.p_filesz as usize;
let file_offset = segment.p_offset as usize;
let mem_addr = segment.p_vaddr as usize;
let mem_size = segment.p_memsz as usize;
let alignment = segment.p_align as usize;
debug_assert!(file_size <= mem_size);
let mem_start_offset = mem_addr - base_load_address_offset;
// Initialize empty part to zero based on alignment
empty_start_offset = align_down(mem_start_offset, alignment);
for b in &mut elf_proc_buf[empty_start_offset..mem_start_offset] {
*b = 0;
}
// Bytes of file_size length are loaded from the ELF file
elf_file.file_ref().read_at(
file_offset,
&mut elf_proc_buf[mem_start_offset..mem_start_offset + file_size],
);
// Set the remaining part to zero based on alignment
debug_assert!(file_size <= mem_size);
empty_end_offset = align_up(mem_start_offset + mem_size, alignment);
for b in &mut elf_proc_buf[mem_start_offset + file_size..empty_end_offset] {
*b = 0;
}
});
Ok(())
}
}
// MemChunks is the structure to track all the chunks which are used by this process.
type MemChunks = Arc<RwLock<HashSet<ChunkRef>>>;
/// The per-process virtual memory
#[derive(Debug)]
pub struct ProcessVM {
elf_ranges: Vec<VMRange>,
heap_range: VMRange,
stack_range: VMRange,
brk: RwLock<usize>,
// Memory safety notes: the mem_chunks field must be the last one.
//
// Rust drops fields in the same order as they are declared. So by making
// mem_chunks the last field, we ensure that when all other fields are
// dropped, their drop methods (if provided) can still access the memory
// region represented by the mem_chunks field.
mem_chunks: MemChunks,
}
impl Default for ProcessVM {
fn default() -> ProcessVM {
ProcessVM {
elf_ranges: Default::default(),
heap_range: Default::default(),
stack_range: Default::default(),
brk: Default::default(),
mem_chunks: Arc::new(RwLock::new(HashSet::new())),
}
}
}
impl Drop for ProcessVM {
fn drop(&mut self) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
// There are two cases when this drop is called:
// (1) Process exits normally and in the end, drop process VM
// (2) During creating process stage, process VM is ready but there are some other errors when creating the process, e.g. spawn_attribute is set
// to a wrong value
//
// For the first case, the process VM is cleaned in the exit procedure and nothing is needed. For the second cases, mem_chunks is not empty and should
// be cleaned here.
mem_chunks
.drain_filter(|chunk| chunk.is_single_vma())
.for_each(|chunk| {
USER_SPACE_VM_MANAGER
.internal()
.munmap_chunk(&chunk, None, false);
});
assert!(mem_chunks.len() == 0);
info!("Process VM dropped");
}
}
impl ProcessVM {
pub fn mem_chunks(&self) -> &MemChunks {
&self.mem_chunks
}
pub fn stack_range(&self) -> &VMRange {
&self.stack_range
}
pub fn heap_range(&self) -> &VMRange {
&self.heap_range
}
pub fn add_mem_chunk(&self, chunk: ChunkRef) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
mem_chunks.insert(chunk);
}
pub fn remove_mem_chunk(&self, chunk: &ChunkRef) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
mem_chunks.remove(chunk);
}
pub fn replace_mem_chunk(&self, old_chunk: &ChunkRef, new_chunk: ChunkRef) {
self.remove_mem_chunk(old_chunk);
self.add_mem_chunk(new_chunk)
}
// Try merging all connecting single VMAs of the process.
// This is a very expensive operation.
pub fn merge_all_single_vma_chunks(
mem_chunks: &mut RwLockWriteGuard<HashSet<ChunkRef>>,
) -> Result<Vec<VMArea>> {
// Get all single VMA chunks
// Shared chunks shouldn't be merged since they are managed by shm manager and shared by multi processes
let mut single_vma_chunks = mem_chunks
.drain_filter(|chunk| chunk.is_single_vma() &&!chunk.is_shared())
.collect::<Vec<ChunkRef>>();
single_vma_chunks.sort_unstable_by(|chunk_a, chunk_b| {
chunk_a
.range()
.start()
.partial_cmp(&chunk_b.range().start())
.unwrap()
});
// Try merging connecting single VMA chunks
for chunks in single_vma_chunks.windows(2) {
let chunk_a = &chunks[0];
let chunk_b = &chunks[1];
let mut vma_a = match chunk_a.internal() {
ChunkType::MultiVMA(_) => {
unreachable!();
}
ChunkType::SingleVMA(vma) => vma.lock().unwrap(),
};
let mut vma_b = match chunk_b.internal() {
ChunkType::MultiVMA(_) => {
unreachable!();
}
ChunkType::SingleVMA(vma) => vma.lock().unwrap(),
};
if VMArea::can_merge_vmas(&vma_a, &vma_b) {
let new_start = vma_a.start();
vma_b.set_start(new_start);
// set vma_a to zero
vma_a.set_end(new_start);
}
}
// Collect merged vmas which will be the output of this function
let mut merged_vmas = Vec::new();
// Insert unchanged chunks back to mem_chunks list and collect merged vmas for output
for chunk in single_vma_chunks.into_iter().filter_map(|chunk| {
if!chunk.is_single_dummy_vma() {
if chunk.is_single_vma_with_conflict_size() {
let new_vma = chunk.get_vma_for_single_vma_chunk().clone();
merged_vmas.push(new_vma);
// Don't insert the merged chunks to mem_chunk list here. It should be updated later.
None
} else {
Some(chunk)
}
} else {
None
}
}) {
mem_chunks.insert(chunk);
}
Ok(merged_vmas)
}
pub fn get_process_range(&self) -> &VMRange {
USER_SPACE_VM_MANAGER.range()
}
pub fn get_elf_ranges(&self) -> &[VMRange] {
&self.elf_ranges
}
pub fn get_heap_range(&self) -> &VMRange {
&self.heap_range
}
pub fn get_stack_range(&self) -> &VMRange {
&self.stack_range
}
pub fn get_base_addr(&self) -> usize {
self.get_process_range().start()
}
pub fn get_stack_base(&self) -> usize {
self.get_stack_range().end()
}
pub fn get_stack_limit(&self) -> usize {
self.get_stack_range().start()
}
pub fn get_brk(&self) -> usize {
*self.brk.read().unwrap()
}
pub fn brk(&self, brk: usize) -> Result<usize> {
let heap_start = self.heap_range.start();
let heap_end = self.heap_range.end();
// Acquire lock first to avoid data-race.
let mut brk_guard = self.brk.write().unwrap();
if brk >= heap_start && brk <= heap_end {
// Get page-aligned brk address.
let new_brk = align_up(brk, PAGE_SIZE);
// Get page-aligned old brk address.
let old_brk = align_up(*brk_guard, PAGE_SIZE);
// Reset the memory when brk shrinks.
if new_brk < old_brk {
let shrink_brk_range =
VMRange::new(new_brk, old_brk).expect("shrink brk range must be valid");
USER_SPACE_VM_MANAGER.reset_memory(shrink_brk_range)?;
}
// Return the user-specified brk address without page aligned. This is same as Linux.
*brk_guard = brk;
Ok(brk)
} else {
if brk < heap_start {
error!("New brk address is too low");
} else if brk > heap_end {
error!("New brk address is too high");
}
Ok(*brk_guard)
}
}
// Get a NON-accurate free size for current process
pub fn get_free_size(&self) -> usize {
let chunk_free_size = {
let process_chunks = self.mem_chunks.read().unwrap();
process_chunks
.iter()
.fold(0, |acc, chunks| acc + chunks.free_size())
};
let free_size = chunk_free_size + USER_SPACE_VM_MANAGER.free_size();
free_size
}
pub fn mmap(
&self,
addr: usize,
size: usize,
perms: VMPerms,
flags: MMapFlags,
fd: FileDesc,
offset: usize,
) -> Result<usize> {
let addr_option = {
if flags.contains(MMapFlags::MAP_FIXED) {
VMMapAddr::Force(addr)
} else {
if addr == 0 {
VMMapAddr::Any
} else {
VMMapAddr::Hint(addr)
}
}
};
let initializer = {
if flags.contains(MMapFlags::MAP_ANONYMOUS) {
// There is no need to fill zeros in mmap. Cleaning is done after munmap.
VMInitializer::DoNothing()
} else {
let file_ref = current!().file(fd)?;
// Only shared, file-backed memory mappings have write-back files
let need_write_back = if flags.contains(MMapFlags::MAP_SHARED) {
true
} else {
false
};
VMInitializer::FileBacked {
file: FileBacked::new(file_ref, offset, need_write_back),
}
}
};
let mmap_options = VMMapOptionsBuilder::default()
.size(size)
.addr(addr_option)
.perms(perms)
.initializer(initializer)
.build()?;
let mmap_addr = USER_SPACE_VM_MANAGER.mmap(&mmap_options)?;
Ok(mmap_addr)
}
pub fn mremap(
&self,
old_addr: usize,
old_size: usize,
new_size: usize,
flags: MRemapFlags,
) -> Result<usize> {
let mremap_option = VMRemapOptions::new(old_addr, old_size, new_size, flags)?;
USER_SPACE_VM_MANAGER.mremap(&mremap_option)
}
pub fn munmap(&self, addr: usize, size: usize) -> Result<()> {
USER_SPACE_VM_MANAGER.munmap(addr, size)
}
pub fn mprotect(&self, addr: usize, size: usize, perms: VMPerms) -> Result<()> {
let size = {
if size == 0 {
return Ok(());
}
align_up(size, PAGE_SIZE)
};
let protect_range = VMRange::new_with_size(addr, size)?;
return USER_SPACE_VM_MANAGER.mprotect(addr, size, perms);
}
pub fn msync(&self, addr: usize, size: usize) -> Result<()> | {
return USER_SPACE_VM_MANAGER.msync(addr, size);
} | identifier_body |
|
process_vm.rs | .heap_size
.unwrap_or(config::LIBOS_CONFIG.process.default_heap_size);
let stack_size = self
.stack_size
.unwrap_or(config::LIBOS_CONFIG.process.default_stack_size);
// Before allocating memory, let's first calculate how much memory
// we need in total by iterating the memory layouts required by
// all the memory regions
let elf_layouts: Vec<VMLayout> = self
.elfs
.iter()
.map(|elf| {
elf.program_headers()
.filter(|segment| segment.loadable())
.fold(VMLayout::new_empty(), |mut elf_layout, segment| { | elf_layout.extend(&segment_layout);
elf_layout
})
})
.collect();
// Make heap and stack 16-byte aligned
let other_layouts = vec![
VMLayout::new(heap_size, 16)?,
VMLayout::new(stack_size, 16)?,
];
let process_layout = elf_layouts.iter().chain(other_layouts.iter()).fold(
VMLayout::new_empty(),
|mut process_layout, sub_layout| {
process_layout.add(&sub_layout);
process_layout
},
);
// Now that we end up with the memory layout required by the process,
// let's allocate the memory for the process
let mut chunks = HashSet::new();
// Init the memory for ELFs in the process
let mut elf_ranges = Vec::with_capacity(2);
elf_layouts
.iter()
.zip(self.elfs.iter())
.map(|(elf_layout, elf_file)| {
let vm_option = VMMapOptionsBuilder::default()
.size(elf_layout.size())
.align(elf_layout.align())
.perms(VMPerms::ALL) // set it to read | write | exec for simplicity
.initializer(VMInitializer::ElfSpecific {
elf_file: elf_file.file_ref().clone(),
})
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (elf_range, chunk_ref) =
USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(elf_range.start() % elf_layout.align() == 0);
chunks.insert(chunk_ref);
Self::init_elf_memory(&elf_range, elf_file).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
trace!("elf range = {:?}", elf_range);
elf_ranges.push(elf_range);
Ok(())
})
.collect::<Result<()>>()?;
// Init the heap memory in the process
let heap_layout = &other_layouts[0];
let vm_option = VMMapOptionsBuilder::default()
.size(heap_layout.size())
.align(heap_layout.align())
.perms(VMPerms::READ | VMPerms::WRITE)
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (heap_range, chunk_ref) = USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(heap_range.start() % heap_layout.align() == 0);
trace!("heap range = {:?}", heap_range);
let brk = RwLock::new(heap_range.start());
chunks.insert(chunk_ref);
// Init the stack memory in the process
let stack_layout = &other_layouts[1];
let vm_option = VMMapOptionsBuilder::default()
.size(stack_layout.size())
.align(heap_layout.align())
.perms(VMPerms::READ | VMPerms::WRITE)
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (stack_range, chunk_ref) = USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(stack_range.start() % stack_layout.align() == 0);
chunks.insert(chunk_ref);
trace!("stack range = {:?}", stack_range);
let mem_chunks = Arc::new(RwLock::new(chunks));
Ok(ProcessVM {
elf_ranges,
heap_range,
stack_range,
brk,
mem_chunks,
})
}
fn validate(&self) -> Result<()> {
let validate_size = |size_opt| -> Result<()> {
if let Some(size) = size_opt {
if size == 0 || size % PAGE_SIZE!= 0 {
return_errno!(EINVAL, "invalid size");
}
}
Ok(())
};
validate_size(self.heap_size)?;
validate_size(self.stack_size)?;
validate_size(self.mmap_size)?;
Ok(())
}
fn handle_error_when_init(&self, chunks: &HashSet<Arc<Chunk>>) {
chunks.iter().for_each(|chunk| {
USER_SPACE_VM_MANAGER
.internal()
.munmap_chunk(chunk, None, false);
});
}
fn init_elf_memory(elf_range: &VMRange, elf_file: &ElfFile) -> Result<()> {
// Destination buffer: ELF appeared in the process
let elf_proc_buf = unsafe { elf_range.as_slice_mut() };
// Source buffer: ELF stored in the ELF file
let elf_file_buf = elf_file.as_slice();
let base_load_address_offset = elf_file.base_load_address_offset() as usize;
// Offsets to track zerolized range
let mut empty_start_offset = 0;
let mut empty_end_offset = 0;
// Init all loadable segments
elf_file
.program_headers()
.filter(|segment| segment.loadable())
.for_each(|segment| {
let file_size = segment.p_filesz as usize;
let file_offset = segment.p_offset as usize;
let mem_addr = segment.p_vaddr as usize;
let mem_size = segment.p_memsz as usize;
let alignment = segment.p_align as usize;
debug_assert!(file_size <= mem_size);
let mem_start_offset = mem_addr - base_load_address_offset;
// Initialize empty part to zero based on alignment
empty_start_offset = align_down(mem_start_offset, alignment);
for b in &mut elf_proc_buf[empty_start_offset..mem_start_offset] {
*b = 0;
}
// Bytes of file_size length are loaded from the ELF file
elf_file.file_ref().read_at(
file_offset,
&mut elf_proc_buf[mem_start_offset..mem_start_offset + file_size],
);
// Set the remaining part to zero based on alignment
debug_assert!(file_size <= mem_size);
empty_end_offset = align_up(mem_start_offset + mem_size, alignment);
for b in &mut elf_proc_buf[mem_start_offset + file_size..empty_end_offset] {
*b = 0;
}
});
Ok(())
}
}
// MemChunks is the structure to track all the chunks which are used by this process.
type MemChunks = Arc<RwLock<HashSet<ChunkRef>>>;
/// The per-process virtual memory
#[derive(Debug)]
pub struct ProcessVM {
elf_ranges: Vec<VMRange>,
heap_range: VMRange,
stack_range: VMRange,
brk: RwLock<usize>,
// Memory safety notes: the mem_chunks field must be the last one.
//
// Rust drops fields in the same order as they are declared. So by making
// mem_chunks the last field, we ensure that when all other fields are
// dropped, their drop methods (if provided) can still access the memory
// region represented by the mem_chunks field.
mem_chunks: MemChunks,
}
impl Default for ProcessVM {
fn default() -> ProcessVM {
ProcessVM {
elf_ranges: Default::default(),
heap_range: Default::default(),
stack_range: Default::default(),
brk: Default::default(),
mem_chunks: Arc::new(RwLock::new(HashSet::new())),
}
}
}
impl Drop for ProcessVM {
fn drop(&mut self) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
// There are two cases when this drop is called:
// (1) Process exits normally and in the end, drop process VM
// (2) During creating process stage, process VM is ready but there are some other errors when creating the process, e.g. spawn_attribute is set
// to a wrong value
//
// For the first case, the process VM is cleaned in the exit procedure and nothing is needed. For the second cases, mem_chunks is not empty and should
// be cleaned here.
mem_chunks
.drain_filter(|chunk| chunk.is_single_vma())
.for_each(|chunk| {
USER_SPACE_VM_MANAGER
.internal()
.munmap_chunk(&chunk, None, false);
});
assert!(mem_chunks.len() == 0);
info!("Process VM dropped");
}
}
impl ProcessVM {
pub fn mem_chunks(&self) -> &MemChunks {
&self.mem_chunks
}
pub fn stack_range(&self) -> &VMRange {
&self.stack_range
}
pub fn heap_range(&self) -> &VMRange {
&self.heap_range
}
pub fn add_mem_chunk(&self, chunk: ChunkRef) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
mem_chunks.insert(chunk);
}
pub fn remove_mem_chunk(&self, chunk: &ChunkRef) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
mem_chunks.remove(chunk);
}
pub fn replace_mem_chunk(&self, old_chunk: &ChunkRef, new_chunk: ChunkRef) {
self.remove_mem_chunk(old_chunk);
self.add_mem_chunk(new_chunk)
}
// Try merging all connecting single VMAs of the process.
// This is a very expensive operation.
pub fn merge_all_single_vma_chunks(
mem_chunks: &mut RwLockWriteGuard<HashSet<ChunkRef>>,
) -> Result<Vec<VMArea>> {
// Get all single VMA chunks
// Shared chunks shouldn't be merged since they are managed by shm manager and shared by multi processes
let mut single_vma_chunks = mem_chunks
.drain_filter(|chunk| chunk.is_single_vma() &&!chunk.is_shared())
.collect::<Vec<ChunkRef>>();
single_vma_chunks.sort_unstable_by(|chunk_a, chunk_b| {
chunk_a
.range()
.start()
.partial_cmp(&chunk_b.range().start())
.unwrap()
});
// Try merging connecting single VMA chunks
for chunks in single_vma_chunks.windows(2) {
let chunk_a = &chunks[0];
let chunk_b = &chunks[1];
let mut vma_a = match chunk_a.internal() {
ChunkType::MultiVMA(_) => {
unreachable!();
}
ChunkType::SingleVMA(vma) => vma.lock().unwrap(),
};
let mut vma_b = match chunk_b.internal() {
ChunkType::MultiVMA(_) => {
unreachable!();
}
ChunkType::SingleVMA(vma) => vma.lock().unwrap(),
};
if VMArea::can_merge_vmas(&vma_a, &vma_b) {
let new_start = vma_a.start();
vma_b.set_start(new_start);
// set vma_a to zero
vma_a.set_end(new_start);
}
}
// Collect merged vmas which will be the output of this function
let mut merged_vmas = Vec::new();
// Insert unchanged chunks back to mem_chunks list and collect merged vmas for output
for chunk in single_vma_chunks.into_iter().filter_map(|chunk| {
if!chunk.is_single_dummy_vma() {
if chunk.is_single_vma_with_conflict_size() {
let new_vma = chunk.get_vma_for_single_vma_chunk().clone();
merged_vmas.push(new_vma);
// Don't insert the merged chunks to mem_chunk list here. It should be updated later.
None
} else {
Some(chunk)
}
} else {
None
}
}) {
mem_chunks.insert(chunk);
}
Ok(merged_vmas)
}
pub fn get_process_range(&self) -> &VMRange {
USER_SPACE_VM_MANAGER.range()
}
pub fn get_elf_ranges(&self) -> &[VMRange] {
&self.elf_ranges
}
pub fn get_heap_range(&self) -> &VMRange {
&self.heap_range
}
pub fn get_stack_range(&self) -> &VMRange {
&self.stack_range
}
pub fn get_base_addr(&self) -> usize {
self.get_process_range().start()
}
pub fn get_stack_base(&self) -> usize {
self.get_stack_range().end()
}
pub fn get_stack_limit(&self) -> usize {
self.get_stack_range().start()
}
pub fn get_brk(&self) -> usize {
*self.brk.read().unwrap()
}
pub fn brk(&self, brk: usize) -> Result<usize> {
let heap_start = self.heap_range.start();
let heap_end = self.heap_range.end();
// Acquire lock first to avoid data-race.
let mut brk_guard = self.brk.write().unwrap();
if brk >= heap_start && brk <= heap_end {
// Get page-aligned brk address.
let new_brk = align_up(brk, PAGE_SIZE);
// Get page-aligned old brk address.
let old_brk = align_up(*brk_guard, PAGE_SIZE);
// Reset the memory when brk shrinks.
if new_brk < old_brk {
let shrink_brk_range =
VMRange::new(new_brk, old_brk).expect("shrink brk range must be valid");
USER_SPACE_VM_MANAGER.reset_memory(shrink_brk_range)?;
}
// Return the user-specified brk address without page aligned. This is same as Linux.
*brk_guard = brk;
Ok(brk)
} else {
if brk < heap_start {
error!("New brk address is too low");
} else if brk > heap_end {
error!("New brk address is too high");
}
Ok(*brk_guard)
}
}
// Get a NON-accurate free size for current process
pub fn get_free_size(&self) -> usize {
let chunk_free_size = {
let process_chunks = self.mem_chunks.read().unwrap();
process_chunks
.iter()
.fold(0, |acc, chunks| acc + chunks.free_size())
};
let free_size = chunk_free_size + USER_SPACE_VM_MANAGER.free_size();
free_size
}
pub fn mmap(
&self,
addr: usize,
size: usize,
perms: VMPerms,
flags: MMapFlags,
fd: FileDesc,
offset: usize,
) -> Result<usize> {
let addr_option = {
if flags.contains(MMapFlags::MAP_FIXED) {
VMMapAddr::Force(addr)
} else {
if addr == 0 {
VMMapAddr::Any
} else {
VMMapAddr::Hint(addr)
}
}
};
let initializer = {
if flags.contains(MMapFlags::MAP_ANONYMOUS) {
// There is no need to fill zeros in mmap. Cleaning is done after munmap.
VMInitializer::DoNothing()
} else {
let file_ref = current!().file(fd)?;
// Only shared, file-backed memory mappings have write-back files
let need_write_back = if flags.contains(MMapFlags::MAP_SHARED) {
true
} else {
false
};
VMInitializer::FileBacked {
file: FileBacked::new(file_ref, offset, need_write_back),
| let segment_size = (segment.p_vaddr + segment.p_memsz) as usize;
let segment_align = segment.p_align as usize;
let segment_layout = VMLayout::new(segment_size, segment_align).unwrap(); | random_line_split |
process_vm.rs | .heap_size
.unwrap_or(config::LIBOS_CONFIG.process.default_heap_size);
let stack_size = self
.stack_size
.unwrap_or(config::LIBOS_CONFIG.process.default_stack_size);
// Before allocating memory, let's first calculate how much memory
// we need in total by iterating the memory layouts required by
// all the memory regions
let elf_layouts: Vec<VMLayout> = self
.elfs
.iter()
.map(|elf| {
elf.program_headers()
.filter(|segment| segment.loadable())
.fold(VMLayout::new_empty(), |mut elf_layout, segment| {
let segment_size = (segment.p_vaddr + segment.p_memsz) as usize;
let segment_align = segment.p_align as usize;
let segment_layout = VMLayout::new(segment_size, segment_align).unwrap();
elf_layout.extend(&segment_layout);
elf_layout
})
})
.collect();
// Make heap and stack 16-byte aligned
let other_layouts = vec![
VMLayout::new(heap_size, 16)?,
VMLayout::new(stack_size, 16)?,
];
let process_layout = elf_layouts.iter().chain(other_layouts.iter()).fold(
VMLayout::new_empty(),
|mut process_layout, sub_layout| {
process_layout.add(&sub_layout);
process_layout
},
);
// Now that we end up with the memory layout required by the process,
// let's allocate the memory for the process
let mut chunks = HashSet::new();
// Init the memory for ELFs in the process
let mut elf_ranges = Vec::with_capacity(2);
elf_layouts
.iter()
.zip(self.elfs.iter())
.map(|(elf_layout, elf_file)| {
let vm_option = VMMapOptionsBuilder::default()
.size(elf_layout.size())
.align(elf_layout.align())
.perms(VMPerms::ALL) // set it to read | write | exec for simplicity
.initializer(VMInitializer::ElfSpecific {
elf_file: elf_file.file_ref().clone(),
})
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (elf_range, chunk_ref) =
USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(elf_range.start() % elf_layout.align() == 0);
chunks.insert(chunk_ref);
Self::init_elf_memory(&elf_range, elf_file).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
trace!("elf range = {:?}", elf_range);
elf_ranges.push(elf_range);
Ok(())
})
.collect::<Result<()>>()?;
// Init the heap memory in the process
let heap_layout = &other_layouts[0];
let vm_option = VMMapOptionsBuilder::default()
.size(heap_layout.size())
.align(heap_layout.align())
.perms(VMPerms::READ | VMPerms::WRITE)
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (heap_range, chunk_ref) = USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(heap_range.start() % heap_layout.align() == 0);
trace!("heap range = {:?}", heap_range);
let brk = RwLock::new(heap_range.start());
chunks.insert(chunk_ref);
// Init the stack memory in the process
let stack_layout = &other_layouts[1];
let vm_option = VMMapOptionsBuilder::default()
.size(stack_layout.size())
.align(heap_layout.align())
.perms(VMPerms::READ | VMPerms::WRITE)
.build()
.map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
let (stack_range, chunk_ref) = USER_SPACE_VM_MANAGER.alloc(&vm_option).map_err(|e| {
&self.handle_error_when_init(&chunks);
e
})?;
debug_assert!(stack_range.start() % stack_layout.align() == 0);
chunks.insert(chunk_ref);
trace!("stack range = {:?}", stack_range);
let mem_chunks = Arc::new(RwLock::new(chunks));
Ok(ProcessVM {
elf_ranges,
heap_range,
stack_range,
brk,
mem_chunks,
})
}
fn validate(&self) -> Result<()> {
let validate_size = |size_opt| -> Result<()> {
if let Some(size) = size_opt {
if size == 0 || size % PAGE_SIZE!= 0 {
return_errno!(EINVAL, "invalid size");
}
}
Ok(())
};
validate_size(self.heap_size)?;
validate_size(self.stack_size)?;
validate_size(self.mmap_size)?;
Ok(())
}
fn handle_error_when_init(&self, chunks: &HashSet<Arc<Chunk>>) {
chunks.iter().for_each(|chunk| {
USER_SPACE_VM_MANAGER
.internal()
.munmap_chunk(chunk, None, false);
});
}
fn init_elf_memory(elf_range: &VMRange, elf_file: &ElfFile) -> Result<()> {
// Destination buffer: ELF appeared in the process
let elf_proc_buf = unsafe { elf_range.as_slice_mut() };
// Source buffer: ELF stored in the ELF file
let elf_file_buf = elf_file.as_slice();
let base_load_address_offset = elf_file.base_load_address_offset() as usize;
// Offsets to track zerolized range
let mut empty_start_offset = 0;
let mut empty_end_offset = 0;
// Init all loadable segments
elf_file
.program_headers()
.filter(|segment| segment.loadable())
.for_each(|segment| {
let file_size = segment.p_filesz as usize;
let file_offset = segment.p_offset as usize;
let mem_addr = segment.p_vaddr as usize;
let mem_size = segment.p_memsz as usize;
let alignment = segment.p_align as usize;
debug_assert!(file_size <= mem_size);
let mem_start_offset = mem_addr - base_load_address_offset;
// Initialize empty part to zero based on alignment
empty_start_offset = align_down(mem_start_offset, alignment);
for b in &mut elf_proc_buf[empty_start_offset..mem_start_offset] {
*b = 0;
}
// Bytes of file_size length are loaded from the ELF file
elf_file.file_ref().read_at(
file_offset,
&mut elf_proc_buf[mem_start_offset..mem_start_offset + file_size],
);
// Set the remaining part to zero based on alignment
debug_assert!(file_size <= mem_size);
empty_end_offset = align_up(mem_start_offset + mem_size, alignment);
for b in &mut elf_proc_buf[mem_start_offset + file_size..empty_end_offset] {
*b = 0;
}
});
Ok(())
}
}
// MemChunks is the structure to track all the chunks which are used by this process.
type MemChunks = Arc<RwLock<HashSet<ChunkRef>>>;
/// The per-process virtual memory
#[derive(Debug)]
pub struct ProcessVM {
elf_ranges: Vec<VMRange>,
heap_range: VMRange,
stack_range: VMRange,
brk: RwLock<usize>,
// Memory safety notes: the mem_chunks field must be the last one.
//
// Rust drops fields in the same order as they are declared. So by making
// mem_chunks the last field, we ensure that when all other fields are
// dropped, their drop methods (if provided) can still access the memory
// region represented by the mem_chunks field.
mem_chunks: MemChunks,
}
impl Default for ProcessVM {
fn default() -> ProcessVM {
ProcessVM {
elf_ranges: Default::default(),
heap_range: Default::default(),
stack_range: Default::default(),
brk: Default::default(),
mem_chunks: Arc::new(RwLock::new(HashSet::new())),
}
}
}
impl Drop for ProcessVM {
fn | (&mut self) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
// There are two cases when this drop is called:
// (1) Process exits normally and in the end, drop process VM
// (2) During creating process stage, process VM is ready but there are some other errors when creating the process, e.g. spawn_attribute is set
// to a wrong value
//
// For the first case, the process VM is cleaned in the exit procedure and nothing is needed. For the second cases, mem_chunks is not empty and should
// be cleaned here.
mem_chunks
.drain_filter(|chunk| chunk.is_single_vma())
.for_each(|chunk| {
USER_SPACE_VM_MANAGER
.internal()
.munmap_chunk(&chunk, None, false);
});
assert!(mem_chunks.len() == 0);
info!("Process VM dropped");
}
}
impl ProcessVM {
pub fn mem_chunks(&self) -> &MemChunks {
&self.mem_chunks
}
pub fn stack_range(&self) -> &VMRange {
&self.stack_range
}
pub fn heap_range(&self) -> &VMRange {
&self.heap_range
}
pub fn add_mem_chunk(&self, chunk: ChunkRef) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
mem_chunks.insert(chunk);
}
pub fn remove_mem_chunk(&self, chunk: &ChunkRef) {
let mut mem_chunks = self.mem_chunks.write().unwrap();
mem_chunks.remove(chunk);
}
pub fn replace_mem_chunk(&self, old_chunk: &ChunkRef, new_chunk: ChunkRef) {
self.remove_mem_chunk(old_chunk);
self.add_mem_chunk(new_chunk)
}
// Try merging all connecting single VMAs of the process.
// This is a very expensive operation.
pub fn merge_all_single_vma_chunks(
mem_chunks: &mut RwLockWriteGuard<HashSet<ChunkRef>>,
) -> Result<Vec<VMArea>> {
// Get all single VMA chunks
// Shared chunks shouldn't be merged since they are managed by shm manager and shared by multi processes
let mut single_vma_chunks = mem_chunks
.drain_filter(|chunk| chunk.is_single_vma() &&!chunk.is_shared())
.collect::<Vec<ChunkRef>>();
single_vma_chunks.sort_unstable_by(|chunk_a, chunk_b| {
chunk_a
.range()
.start()
.partial_cmp(&chunk_b.range().start())
.unwrap()
});
// Try merging connecting single VMA chunks
for chunks in single_vma_chunks.windows(2) {
let chunk_a = &chunks[0];
let chunk_b = &chunks[1];
let mut vma_a = match chunk_a.internal() {
ChunkType::MultiVMA(_) => {
unreachable!();
}
ChunkType::SingleVMA(vma) => vma.lock().unwrap(),
};
let mut vma_b = match chunk_b.internal() {
ChunkType::MultiVMA(_) => {
unreachable!();
}
ChunkType::SingleVMA(vma) => vma.lock().unwrap(),
};
if VMArea::can_merge_vmas(&vma_a, &vma_b) {
let new_start = vma_a.start();
vma_b.set_start(new_start);
// set vma_a to zero
vma_a.set_end(new_start);
}
}
// Collect merged vmas which will be the output of this function
let mut merged_vmas = Vec::new();
// Insert unchanged chunks back to mem_chunks list and collect merged vmas for output
for chunk in single_vma_chunks.into_iter().filter_map(|chunk| {
if!chunk.is_single_dummy_vma() {
if chunk.is_single_vma_with_conflict_size() {
let new_vma = chunk.get_vma_for_single_vma_chunk().clone();
merged_vmas.push(new_vma);
// Don't insert the merged chunks to mem_chunk list here. It should be updated later.
None
} else {
Some(chunk)
}
} else {
None
}
}) {
mem_chunks.insert(chunk);
}
Ok(merged_vmas)
}
pub fn get_process_range(&self) -> &VMRange {
USER_SPACE_VM_MANAGER.range()
}
pub fn get_elf_ranges(&self) -> &[VMRange] {
&self.elf_ranges
}
pub fn get_heap_range(&self) -> &VMRange {
&self.heap_range
}
pub fn get_stack_range(&self) -> &VMRange {
&self.stack_range
}
pub fn get_base_addr(&self) -> usize {
self.get_process_range().start()
}
pub fn get_stack_base(&self) -> usize {
self.get_stack_range().end()
}
pub fn get_stack_limit(&self) -> usize {
self.get_stack_range().start()
}
pub fn get_brk(&self) -> usize {
*self.brk.read().unwrap()
}
pub fn brk(&self, brk: usize) -> Result<usize> {
let heap_start = self.heap_range.start();
let heap_end = self.heap_range.end();
// Acquire lock first to avoid data-race.
let mut brk_guard = self.brk.write().unwrap();
if brk >= heap_start && brk <= heap_end {
// Get page-aligned brk address.
let new_brk = align_up(brk, PAGE_SIZE);
// Get page-aligned old brk address.
let old_brk = align_up(*brk_guard, PAGE_SIZE);
// Reset the memory when brk shrinks.
if new_brk < old_brk {
let shrink_brk_range =
VMRange::new(new_brk, old_brk).expect("shrink brk range must be valid");
USER_SPACE_VM_MANAGER.reset_memory(shrink_brk_range)?;
}
// Return the user-specified brk address without page aligned. This is same as Linux.
*brk_guard = brk;
Ok(brk)
} else {
if brk < heap_start {
error!("New brk address is too low");
} else if brk > heap_end {
error!("New brk address is too high");
}
Ok(*brk_guard)
}
}
// Get a NON-accurate free size for current process
pub fn get_free_size(&self) -> usize {
let chunk_free_size = {
let process_chunks = self.mem_chunks.read().unwrap();
process_chunks
.iter()
.fold(0, |acc, chunks| acc + chunks.free_size())
};
let free_size = chunk_free_size + USER_SPACE_VM_MANAGER.free_size();
free_size
}
pub fn mmap(
&self,
addr: usize,
size: usize,
perms: VMPerms,
flags: MMapFlags,
fd: FileDesc,
offset: usize,
) -> Result<usize> {
let addr_option = {
if flags.contains(MMapFlags::MAP_FIXED) {
VMMapAddr::Force(addr)
} else {
if addr == 0 {
VMMapAddr::Any
} else {
VMMapAddr::Hint(addr)
}
}
};
let initializer = {
if flags.contains(MMapFlags::MAP_ANONYMOUS) {
// There is no need to fill zeros in mmap. Cleaning is done after munmap.
VMInitializer::DoNothing()
} else {
let file_ref = current!().file(fd)?;
// Only shared, file-backed memory mappings have write-back files
let need_write_back = if flags.contains(MMapFlags::MAP_SHARED) {
true
} else {
false
};
VMInitializer::FileBacked {
file: FileBacked::new(file_ref, offset, need_write_back),
| drop | identifier_name |
ledger_cleanup_service.rs | //! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_db::Result as BlockstoreResult;
use solana_measure::measure::Measure;
use solana_metrics::datapoint_debug;
use solana_sdk::clock::Slot;
use std::string::ToString;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{Receiver, RecvTimeoutError};
use std::sync::Arc;
use std::thread;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
// - To try and keep the RocksDB size under 400GB:
// Seeing about 1600b/shred, using 2000b/shred for margin, so 200m shreds can be stored in 400gb.
// at 5k shreds/slot at 50k tps, this is 500k slots (~5 hours).
// At idle, 60 shreds/slot this is about 4m slots (18 days)
// This is chosen to allow enough time for
// - A validator to download a snapshot from a peer and boot from it
// - To make sure that if a validator needs to reboot from its own snapshot, it has enough slots locally
// to catch back up to where it was when it stopped
pub const DEFAULT_MAX_LEDGER_SHREDS: u64 = 200_000_000;
// Allow down to 50m, or 3.5 days at idle, 1hr at 50k load, around ~100GB
pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
// Check for removing slots at this interval so we don't purge too often
// and starve other blockstore users.
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
// Remove a limited number of slots at a time, so the operation
// does not take too long and block other blockstore users.
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
pub struct LedgerCleanupService {
t_cleanup: JoinHandle<()>,
}
impl LedgerCleanupService {
pub fn new(
new_root_receiver: Receiver<Slot>,
blockstore: Arc<Blockstore>,
max_ledger_shreds: u64,
exit: &Arc<AtomicBool>,
) -> Self {
info!(
"LedgerCleanupService active. Max Ledger Slots {}",
max_ledger_shreds
);
let exit = exit.clone();
let mut last_purge_slot = 0;
let t_cleanup = Builder::new()
.name("solana-ledger-cleanup".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
if let Err(e) = Self::cleanup_ledger(
&new_root_receiver,
&blockstore,
max_ledger_shreds,
&mut last_purge_slot,
DEFAULT_PURGE_SLOT_INTERVAL,
) {
match e {
RecvTimeoutError::Disconnected => break,
RecvTimeoutError::Timeout => (),
}
}
})
.unwrap();
Self { t_cleanup }
}
fn find_slots_to_clean(
blockstore: &Arc<Blockstore>,
root: Slot,
max_ledger_shreds: u64,
) -> (u64, Slot, Slot) | max_ledger_shreds,
shreds.len(),
total_shreds,
iterate_time
);
if (total_shreds as u64) < max_ledger_shreds {
return (0, 0, 0);
}
let mut cur_shreds = 0;
let mut lowest_slot_to_clean = shreds[0].0;
for (slot, num_shreds) in shreds.iter().rev() {
cur_shreds += *num_shreds as u64;
if cur_shreds > max_ledger_shreds {
lowest_slot_to_clean = *slot;
break;
}
}
(cur_shreds, lowest_slot_to_clean, first_slot)
}
pub fn cleanup_ledger(
new_root_receiver: &Receiver<Slot>,
blockstore: &Arc<Blockstore>,
max_ledger_shreds: u64,
last_purge_slot: &mut u64,
purge_interval: u64,
) -> Result<(), RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
}
if root - *last_purge_slot > purge_interval {
let disk_utilization_pre = blockstore.storage_size();
info!(
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
);
*last_purge_slot = root;
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot) =
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
if num_shreds_to_clean > 0 {
debug!(
"cleaning up to: {} shreds: {} first: {}",
lowest_slot_to_clean, num_shreds_to_clean, first_slot
);
loop {
let current_lowest =
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
let mut slot_update_time = Measure::start("slot_update");
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
slot_update_time.stop();
let mut clean_time = Measure::start("ledger_clean");
blockstore.purge_slots(first_slot, Some(current_lowest));
clean_time.stop();
debug!(
"ledger purge {} -> {}: {} {}",
first_slot, current_lowest, slot_update_time, clean_time
);
first_slot += DEFAULT_PURGE_BATCH_SIZE;
if current_lowest == lowest_slot_to_clean {
break;
}
thread::sleep(Duration::from_millis(500));
}
}
let disk_utilization_post = blockstore.storage_size();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post);
}
Ok(())
}
fn report_disk_metrics(pre: BlockstoreResult<u64>, post: BlockstoreResult<u64>) {
if let (Ok(pre), Ok(post)) = (pre, post) {
datapoint_debug!(
"ledger_disk_utilization",
("disk_utilization_pre", pre as i64, i64),
("disk_utilization_post", post as i64, i64),
("disk_utilization_delta", (pre as i64 - post as i64), i64)
);
}
}
pub fn join(self) -> thread::Result<()> {
self.t_cleanup.join()
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::get_tmp_ledger_path;
use std::sync::mpsc::channel;
#[test]
fn test_cleanup() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_many_slot_entries(0, 50, 5);
blockstore.insert_shreds(shreds, None, false).unwrap();
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
//send a signal to kill all but 5 shreds, which will be in the newest slots
let mut last_purge_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
.unwrap();
//check that 0-40 don't exist
blockstore
.slot_meta_iterator(0)
.unwrap()
.for_each(|(slot, _)| assert!(slot > 40));
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_cleanup_speed() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let mut blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore.set_no_compaction(true);
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
let mut first_insert = Measure::start("first_insert");
let initial_slots = 50;
let initial_entries = 5;
let (shreds, _) = make_many_slot_entries(0, initial_slots, initial_entries);
blockstore.insert_shreds(shreds, None, false).unwrap();
first_insert.stop();
info!("{}", first_insert);
let mut last_purge_slot = 0;
let mut slot = initial_slots;
let mut num_slots = 6;
for _ in 0..5 {
let mut insert_time = Measure::start("insert time");
let batch_size = 2;
let batches = num_slots / batch_size;
for i in 0..batches {
let (shreds, _) = make_many_slot_entries(slot + i * batch_size, batch_size, 5);
blockstore.insert_shreds(shreds, None, false).unwrap();
if i % 100 == 0 {
info!("inserting..{} of {}", i, batches);
}
}
insert_time.stop();
let mut time = Measure::start("purge time");
sender.send(slot + num_slots).unwrap();
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
initial_slots,
&mut last_purge_slot,
10,
)
.unwrap();
time.stop();
info!(
"slot: {} size: {} {} {}",
slot, num_slots, insert_time, time
);
slot += num_slots;
num_slots *= 2;
}
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
}
| {
let mut shreds = Vec::new();
let mut iterate_time = Measure::start("iterate_time");
let mut total_shreds = 0;
let mut first_slot = 0;
for (i, (slot, meta)) in blockstore.slot_meta_iterator(0).unwrap().enumerate() {
if i == 0 {
first_slot = slot;
debug!("purge: searching from slot: {}", slot);
}
// Not exact since non-full slots will have holes
total_shreds += meta.received;
shreds.push((slot, meta.received));
if slot > root {
break;
}
}
iterate_time.stop();
info!(
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}", | identifier_body |
ledger_cleanup_service.rs | //! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_db::Result as BlockstoreResult;
use solana_measure::measure::Measure;
use solana_metrics::datapoint_debug;
use solana_sdk::clock::Slot;
use std::string::ToString;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{Receiver, RecvTimeoutError};
use std::sync::Arc;
use std::thread;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
// - To try and keep the RocksDB size under 400GB:
// Seeing about 1600b/shred, using 2000b/shred for margin, so 200m shreds can be stored in 400gb.
// at 5k shreds/slot at 50k tps, this is 500k slots (~5 hours).
// At idle, 60 shreds/slot this is about 4m slots (18 days)
// This is chosen to allow enough time for
// - A validator to download a snapshot from a peer and boot from it
// - To make sure that if a validator needs to reboot from its own snapshot, it has enough slots locally
// to catch back up to where it was when it stopped
pub const DEFAULT_MAX_LEDGER_SHREDS: u64 = 200_000_000;
// Allow down to 50m, or 3.5 days at idle, 1hr at 50k load, around ~100GB
pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
// Check for removing slots at this interval so we don't purge too often
// and starve other blockstore users.
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
// Remove a limited number of slots at a time, so the operation
// does not take too long and block other blockstore users.
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
pub struct LedgerCleanupService {
t_cleanup: JoinHandle<()>,
}
impl LedgerCleanupService {
pub fn new(
new_root_receiver: Receiver<Slot>,
blockstore: Arc<Blockstore>,
max_ledger_shreds: u64,
exit: &Arc<AtomicBool>,
) -> Self {
info!(
"LedgerCleanupService active. Max Ledger Slots {}",
max_ledger_shreds
);
let exit = exit.clone();
let mut last_purge_slot = 0;
let t_cleanup = Builder::new()
.name("solana-ledger-cleanup".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
if let Err(e) = Self::cleanup_ledger(
&new_root_receiver,
&blockstore,
max_ledger_shreds,
&mut last_purge_slot,
DEFAULT_PURGE_SLOT_INTERVAL,
) {
match e {
RecvTimeoutError::Disconnected => break,
RecvTimeoutError::Timeout => (),
}
}
})
.unwrap();
Self { t_cleanup }
}
fn find_slots_to_clean(
blockstore: &Arc<Blockstore>,
root: Slot,
max_ledger_shreds: u64,
) -> (u64, Slot, Slot) {
let mut shreds = Vec::new();
let mut iterate_time = Measure::start("iterate_time");
let mut total_shreds = 0;
let mut first_slot = 0;
for (i, (slot, meta)) in blockstore.slot_meta_iterator(0).unwrap().enumerate() {
if i == 0 {
first_slot = slot;
debug!("purge: searching from slot: {}", slot);
}
// Not exact since non-full slots will have holes
total_shreds += meta.received;
shreds.push((slot, meta.received));
if slot > root {
break;
}
}
iterate_time.stop();
info!(
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
max_ledger_shreds,
shreds.len(),
total_shreds,
iterate_time
);
if (total_shreds as u64) < max_ledger_shreds {
return (0, 0, 0);
}
let mut cur_shreds = 0;
let mut lowest_slot_to_clean = shreds[0].0;
for (slot, num_shreds) in shreds.iter().rev() {
cur_shreds += *num_shreds as u64;
if cur_shreds > max_ledger_shreds {
lowest_slot_to_clean = *slot;
break;
}
}
(cur_shreds, lowest_slot_to_clean, first_slot)
}
pub fn cleanup_ledger(
new_root_receiver: &Receiver<Slot>,
blockstore: &Arc<Blockstore>,
max_ledger_shreds: u64,
last_purge_slot: &mut u64,
purge_interval: u64,
) -> Result<(), RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
}
if root - *last_purge_slot > purge_interval {
let disk_utilization_pre = blockstore.storage_size();
info!(
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
);
*last_purge_slot = root;
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot) =
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
if num_shreds_to_clean > 0 {
debug!(
"cleaning up to: {} shreds: {} first: {}",
lowest_slot_to_clean, num_shreds_to_clean, first_slot
);
loop {
let current_lowest =
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
let mut slot_update_time = Measure::start("slot_update");
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
slot_update_time.stop();
let mut clean_time = Measure::start("ledger_clean");
blockstore.purge_slots(first_slot, Some(current_lowest));
clean_time.stop();
debug!(
"ledger purge {} -> {}: {} {}",
first_slot, current_lowest, slot_update_time, clean_time
);
first_slot += DEFAULT_PURGE_BATCH_SIZE;
if current_lowest == lowest_slot_to_clean {
break;
}
thread::sleep(Duration::from_millis(500));
}
}
let disk_utilization_post = blockstore.storage_size();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post);
}
Ok(())
}
fn report_disk_metrics(pre: BlockstoreResult<u64>, post: BlockstoreResult<u64>) {
if let (Ok(pre), Ok(post)) = (pre, post) {
datapoint_debug!(
"ledger_disk_utilization",
("disk_utilization_pre", pre as i64, i64),
("disk_utilization_post", post as i64, i64),
("disk_utilization_delta", (pre as i64 - post as i64), i64)
);
}
}
pub fn join(self) -> thread::Result<()> {
self.t_cleanup.join()
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::get_tmp_ledger_path;
use std::sync::mpsc::channel;
#[test]
fn | () {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_many_slot_entries(0, 50, 5);
blockstore.insert_shreds(shreds, None, false).unwrap();
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
//send a signal to kill all but 5 shreds, which will be in the newest slots
let mut last_purge_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
.unwrap();
//check that 0-40 don't exist
blockstore
.slot_meta_iterator(0)
.unwrap()
.for_each(|(slot, _)| assert!(slot > 40));
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_cleanup_speed() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let mut blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore.set_no_compaction(true);
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
let mut first_insert = Measure::start("first_insert");
let initial_slots = 50;
let initial_entries = 5;
let (shreds, _) = make_many_slot_entries(0, initial_slots, initial_entries);
blockstore.insert_shreds(shreds, None, false).unwrap();
first_insert.stop();
info!("{}", first_insert);
let mut last_purge_slot = 0;
let mut slot = initial_slots;
let mut num_slots = 6;
for _ in 0..5 {
let mut insert_time = Measure::start("insert time");
let batch_size = 2;
let batches = num_slots / batch_size;
for i in 0..batches {
let (shreds, _) = make_many_slot_entries(slot + i * batch_size, batch_size, 5);
blockstore.insert_shreds(shreds, None, false).unwrap();
if i % 100 == 0 {
info!("inserting..{} of {}", i, batches);
}
}
insert_time.stop();
let mut time = Measure::start("purge time");
sender.send(slot + num_slots).unwrap();
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
initial_slots,
&mut last_purge_slot,
10,
)
.unwrap();
time.stop();
info!(
"slot: {} size: {} {} {}",
slot, num_slots, insert_time, time
);
slot += num_slots;
num_slots *= 2;
}
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
}
| test_cleanup | identifier_name |
ledger_cleanup_service.rs | //! The `ledger_cleanup_service` drops older ledger data to limit disk space usage
use solana_ledger::blockstore::Blockstore;
use solana_ledger::blockstore_db::Result as BlockstoreResult;
use solana_measure::measure::Measure;
use solana_metrics::datapoint_debug;
use solana_sdk::clock::Slot;
use std::string::ToString;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{Receiver, RecvTimeoutError};
use std::sync::Arc;
use std::thread;
use std::thread::{Builder, JoinHandle};
use std::time::Duration;
// - To try and keep the RocksDB size under 400GB:
// Seeing about 1600b/shred, using 2000b/shred for margin, so 200m shreds can be stored in 400gb.
// at 5k shreds/slot at 50k tps, this is 500k slots (~5 hours).
// At idle, 60 shreds/slot this is about 4m slots (18 days)
// This is chosen to allow enough time for
// - A validator to download a snapshot from a peer and boot from it
// - To make sure that if a validator needs to reboot from its own snapshot, it has enough slots locally
// to catch back up to where it was when it stopped
pub const DEFAULT_MAX_LEDGER_SHREDS: u64 = 200_000_000;
// Allow down to 50m, or 3.5 days at idle, 1hr at 50k load, around ~100GB
pub const DEFAULT_MIN_MAX_LEDGER_SHREDS: u64 = 50_000_000;
// Check for removing slots at this interval so we don't purge too often
// and starve other blockstore users.
pub const DEFAULT_PURGE_SLOT_INTERVAL: u64 = 512;
// Remove a limited number of slots at a time, so the operation
// does not take too long and block other blockstore users.
pub const DEFAULT_PURGE_BATCH_SIZE: u64 = 256;
pub struct LedgerCleanupService {
t_cleanup: JoinHandle<()>,
}
impl LedgerCleanupService {
pub fn new(
new_root_receiver: Receiver<Slot>,
blockstore: Arc<Blockstore>,
max_ledger_shreds: u64,
exit: &Arc<AtomicBool>,
) -> Self {
info!(
"LedgerCleanupService active. Max Ledger Slots {}",
max_ledger_shreds
);
let exit = exit.clone();
let mut last_purge_slot = 0;
let t_cleanup = Builder::new()
.name("solana-ledger-cleanup".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
break;
}
if let Err(e) = Self::cleanup_ledger(
&new_root_receiver,
&blockstore,
max_ledger_shreds,
&mut last_purge_slot,
DEFAULT_PURGE_SLOT_INTERVAL,
) {
match e {
RecvTimeoutError::Disconnected => break,
RecvTimeoutError::Timeout => (),
}
}
})
.unwrap();
Self { t_cleanup }
}
fn find_slots_to_clean(
blockstore: &Arc<Blockstore>,
root: Slot,
max_ledger_shreds: u64,
) -> (u64, Slot, Slot) {
let mut shreds = Vec::new();
let mut iterate_time = Measure::start("iterate_time");
let mut total_shreds = 0;
let mut first_slot = 0;
for (i, (slot, meta)) in blockstore.slot_meta_iterator(0).unwrap().enumerate() {
if i == 0 {
first_slot = slot;
debug!("purge: searching from slot: {}", slot);
}
// Not exact since non-full slots will have holes
total_shreds += meta.received;
shreds.push((slot, meta.received));
if slot > root {
break;
}
}
iterate_time.stop();
info!(
"checking for ledger purge: max_shreds: {} slots: {} total_shreds: {} {}",
max_ledger_shreds,
shreds.len(),
total_shreds,
iterate_time
);
if (total_shreds as u64) < max_ledger_shreds {
return (0, 0, 0);
}
let mut cur_shreds = 0;
let mut lowest_slot_to_clean = shreds[0].0;
for (slot, num_shreds) in shreds.iter().rev() {
cur_shreds += *num_shreds as u64;
if cur_shreds > max_ledger_shreds {
lowest_slot_to_clean = *slot;
break;
}
}
(cur_shreds, lowest_slot_to_clean, first_slot)
}
pub fn cleanup_ledger(
new_root_receiver: &Receiver<Slot>,
blockstore: &Arc<Blockstore>,
max_ledger_shreds: u64,
last_purge_slot: &mut u64,
purge_interval: u64,
) -> Result<(), RecvTimeoutError> {
let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?;
// Get the newest root
while let Ok(new_root) = new_root_receiver.try_recv() {
root = new_root;
}
if root - *last_purge_slot > purge_interval {
let disk_utilization_pre = blockstore.storage_size();
info!(
"purge: new root: {} last_purge: {} purge_interval: {} disk: {:?}",
root, last_purge_slot, purge_interval, disk_utilization_pre
);
*last_purge_slot = root;
let (num_shreds_to_clean, lowest_slot_to_clean, mut first_slot) =
Self::find_slots_to_clean(blockstore, root, max_ledger_shreds);
if num_shreds_to_clean > 0 {
debug!(
"cleaning up to: {} shreds: {} first: {}",
lowest_slot_to_clean, num_shreds_to_clean, first_slot
);
loop {
let current_lowest =
std::cmp::min(lowest_slot_to_clean, first_slot + DEFAULT_PURGE_BATCH_SIZE);
let mut slot_update_time = Measure::start("slot_update");
*blockstore.lowest_cleanup_slot.write().unwrap() = current_lowest;
slot_update_time.stop();
let mut clean_time = Measure::start("ledger_clean");
blockstore.purge_slots(first_slot, Some(current_lowest));
clean_time.stop();
debug!(
"ledger purge {} -> {}: {} {}",
first_slot, current_lowest, slot_update_time, clean_time
);
first_slot += DEFAULT_PURGE_BATCH_SIZE;
if current_lowest == lowest_slot_to_clean {
break;
}
thread::sleep(Duration::from_millis(500));
}
}
let disk_utilization_post = blockstore.storage_size();
Self::report_disk_metrics(disk_utilization_pre, disk_utilization_post);
}
Ok(())
}
fn report_disk_metrics(pre: BlockstoreResult<u64>, post: BlockstoreResult<u64>) {
if let (Ok(pre), Ok(post)) = (pre, post) {
datapoint_debug!(
"ledger_disk_utilization",
("disk_utilization_pre", pre as i64, i64),
("disk_utilization_post", post as i64, i64),
("disk_utilization_delta", (pre as i64 - post as i64), i64)
);
}
}
pub fn join(self) -> thread::Result<()> {
self.t_cleanup.join()
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_ledger::blockstore::make_many_slot_entries;
use solana_ledger::get_tmp_ledger_path;
use std::sync::mpsc::channel;
#[test]
fn test_cleanup() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let blockstore = Blockstore::open(&blockstore_path).unwrap();
let (shreds, _) = make_many_slot_entries(0, 50, 5);
blockstore.insert_shreds(shreds, None, false).unwrap(); |
//send a signal to kill all but 5 shreds, which will be in the newest slots
let mut last_purge_slot = 0;
sender.send(50).unwrap();
LedgerCleanupService::cleanup_ledger(&receiver, &blockstore, 5, &mut last_purge_slot, 10)
.unwrap();
//check that 0-40 don't exist
blockstore
.slot_meta_iterator(0)
.unwrap()
.for_each(|(slot, _)| assert!(slot > 40));
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
#[test]
fn test_cleanup_speed() {
solana_logger::setup();
let blockstore_path = get_tmp_ledger_path!();
let mut blockstore = Blockstore::open(&blockstore_path).unwrap();
blockstore.set_no_compaction(true);
let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel();
let mut first_insert = Measure::start("first_insert");
let initial_slots = 50;
let initial_entries = 5;
let (shreds, _) = make_many_slot_entries(0, initial_slots, initial_entries);
blockstore.insert_shreds(shreds, None, false).unwrap();
first_insert.stop();
info!("{}", first_insert);
let mut last_purge_slot = 0;
let mut slot = initial_slots;
let mut num_slots = 6;
for _ in 0..5 {
let mut insert_time = Measure::start("insert time");
let batch_size = 2;
let batches = num_slots / batch_size;
for i in 0..batches {
let (shreds, _) = make_many_slot_entries(slot + i * batch_size, batch_size, 5);
blockstore.insert_shreds(shreds, None, false).unwrap();
if i % 100 == 0 {
info!("inserting..{} of {}", i, batches);
}
}
insert_time.stop();
let mut time = Measure::start("purge time");
sender.send(slot + num_slots).unwrap();
LedgerCleanupService::cleanup_ledger(
&receiver,
&blockstore,
initial_slots,
&mut last_purge_slot,
10,
)
.unwrap();
time.stop();
info!(
"slot: {} size: {} {} {}",
slot, num_slots, insert_time, time
);
slot += num_slots;
num_slots *= 2;
}
drop(blockstore);
Blockstore::destroy(&blockstore_path).expect("Expected successful database destruction");
}
} | let blockstore = Arc::new(blockstore);
let (sender, receiver) = channel(); | random_line_split |
tcp.rs | ::Address::zero(), *addr).map(|(laddr, _, _)| Address::Ipv4(laddr)),
}
}
/// Allocate a port for the given local address
fn allocate_port(_addr: &Address) -> Option<u16>
{
// TODO: Could store bitmap against the interface (having a separate bitmap for each interface)
S_PORTS.lock().allocate()
}
fn release_port(_addr: &Address, idx: u16)
{
S_PORTS.lock().release(idx)
}
fn rx_handler_v4(int: &::ipv4::Interface, src_addr: ::ipv4::Address, pkt: ::nic::PacketReader)
{
rx_handler(Address::Ipv4(src_addr), Address::Ipv4(int.addr()), pkt)
}
fn rx_handler(src_addr: Address, dest_addr: Address, mut pkt: ::nic::PacketReader)
{
let pre_header_reader = pkt.clone();
let hdr = match PktHeader::read(&mut pkt)
{
Ok(v) => v,
Err(_) => {
log_error!("Undersized packet: Ran out of data reading header");
return ;
},
};
log_debug!("hdr = {:?}", hdr);
let hdr_len = hdr.get_header_size();
if hdr_len < pre_header_reader.remain() {
log_error!("Undersized or invalid packet: Header length is {} but packet length is {}", hdr_len, pre_header_reader.remain());
return ;
}
// TODO: Validate checksum.
{
let packet_len = pre_header_reader.remain();
// Pseudo header for checksum
let sum_pseudo = match (src_addr,dest_addr)
{
(Address::Ipv4(s), Address::Ipv4(d)) =>
::ipv4::calculate_checksum([
// Big endian stores MSB first, so write the high word first
(s.as_u32() >> 16) as u16, (s.as_u32() >> 0) as u16,
(d.as_u32() >> 16) as u16, (d.as_u32() >> 0) as u16,
IPV4_PROTO_TCP as u16, packet_len as u16,
].iter().copied()),
};
let sum_header = hdr.checksum();
let sum_options_and_data = {
let mut pkt = pkt.clone();
let psum_whole =!::ipv4::calculate_checksum( (0.. (pre_header_reader.remain() - hdr_len) / 2).map(|_| pkt.read_u16n().unwrap()) );
// Final byte is decoded as if there was a zero after it (so as 0x??00)
let psum_partial = if pkt.remain() > 0 { (pkt.read_u8().unwrap() as u16) << 8} else { 0 };
::ipv4::calculate_checksum([psum_whole, psum_partial].iter().copied())
};
let sum_total = ::ipv4::calculate_checksum([
!sum_pseudo,!sum_header,!sum_options_and_data
].iter().copied());
if sum_total!= 0 {
log_error!("Incorrect checksum: 0x{:04x}!= 0", sum_total);
}
}
// Options
while pkt.remain() > pre_header_reader.remain() - hdr_len
{
match pkt.read_u8().unwrap()
{
_ => {},
}
}
let quad = Quad::new(dest_addr, hdr.dest_port, src_addr, hdr.source_port);
// Search for active connections with this quad
if let Some(c) = CONNECTIONS.get(&quad)
{
c.lock().handle(&quad, &hdr, pkt);
}
// Search for proto-connections
// - Proto-connections are lighter weight than full-blown connections, reducing the impact of a SYN flood
else if hdr.flags == FLAG_ACK
{
if let Some(c) = PROTO_CONNECTIONS.take(&quad)
{
// Check the SEQ/ACK numbers, and create the actual connection
if hdr.sequence_number == c.seen_seq + 1 && hdr.acknowledgement_number == c.sent_seq
{
// Make the full connection struct
CONNECTIONS.insert(quad, Mutex::new(Connection::new_inbound(&hdr)));
// Add the connection onto the server's accept queue
let server = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) ).expect("Can't find server");
server.accept_queue.push(quad).expect("Acceped connection with full accept queue");
}
else
{
// - Bad ACK, put the proto connection back into the list
PROTO_CONNECTIONS.insert(quad, c);
}
}
}
// If none found, look for servers on the destination (if SYN)
else if hdr.flags &!FLAG_ACK == FLAG_SYN
{
if let Some(s) = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) )
{
// Decrement the server's accept space
if s.accept_space.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |v| if v == 0 { None } else { Some(v - 1) }).is_err() {
// Reject if no space
// - Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST, 0, &[]);
}
else {
// - Add the quad as a proto-connection and send the SYN-ACK
let pc = ProtoConnection::new(hdr.sequence_number);
quad.send_packet(pc.sent_seq, pc.seen_seq, FLAG_SYN|FLAG_ACK, hdr.window_size, &[]);
PROTO_CONNECTIONS.insert(quad, pc);
}
}
else
{
// Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST|(!hdr.flags & FLAG_ACK), 0, &[]);
}
}
// Otherwise, drop
}
#[derive(Copy,Clone,PartialOrd,PartialEq,Ord,Eq)]
struct Quad
{
local_addr: Address,
local_port: u16,
remote_addr: Address,
remote_port: u16,
}
impl ::core::fmt::Debug for Quad
{
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "Quad({:?}:{} -> {:?}:{})", self.local_addr, self.local_port, self.remote_addr, self.remote_port)
}
}
impl Quad
{
fn new(local_addr: Address, local_port: u16, remote_addr: Address, remote_port: u16) -> Quad
{
Quad {
local_addr, local_port, remote_addr, remote_port
}
}
fn send_packet(&self, seq: u32, ack: u32, flags: u8, window_size: u16, data: &[u8])
{
// Make a header
// TODO: Any options required?
let options_bytes = &[];
let opts_len_rounded = ((options_bytes.len() + 3) / 4) * 4;
let hdr = PktHeader {
source_port: self.local_port,
dest_port: self.remote_port,
sequence_number: seq,
acknowledgement_number: ack,
data_offset: ((5 + opts_len_rounded/4) << 4) as u8 | 0,
flags: flags,
window_size: window_size,
checksum: 0, // To be filled afterwards
urgent_pointer: 0,
}.as_bytes();
// Calculate checksum
// Create sparse packet chain
let data_pkt = SparsePacket::new_root(data);
// - Padding required to make the header a multiple of 4 bytes long
let opt_pad_pkt = SparsePacket::new_chained(&[0; 3][.. opts_len_rounded - options_bytes.len()], &data_pkt);
let opt_pkt = SparsePacket::new_chained(options_bytes, &opt_pad_pkt);
let hdr_pkt = SparsePacket::new_chained(&hdr, &opt_pkt);
// Pass packet downstream
match self.local_addr
{
Address::Ipv4(a) => crate::ipv4::send_packet(a, self.remote_addr.unwrap_ipv4(), IPV4_PROTO_TCP, hdr_pkt),
}
}
}
#[derive(Debug)]
struct PktHeader
{
source_port: u16,
dest_port: u16,
sequence_number: u32,
acknowledgement_number: u32,
/// Packed: top 4 bits are header size in 4byte units, bottom 4 are reserved
data_offset: u8,
/// Bitfield:
/// 0: FIN
/// 1: SYN
/// 2: RST
/// 3: PSH
/// 4: ACK
/// 5: URG
/// 6: ECE
/// 7: CWR
flags: u8,
window_size: u16,
checksum: u16,
urgent_pointer: u16,
//options: [u8],
}
const FLAG_FIN: u8 = 1 << 0;
const FLAG_SYN: u8 = 1 << 1;
const FLAG_RST: u8 = 1 << 2;
const FLAG_PSH: u8 = 1 << 3;
const FLAG_ACK: u8 = 1 << 4;
impl PktHeader
{
fn read(reader: &mut ::nic::PacketReader) -> Result<Self, ()>
{
Ok(PktHeader {
source_port: reader.read_u16n()?,
dest_port: reader.read_u16n()?,
sequence_number: reader.read_u32n()?,
acknowledgement_number: reader.read_u32n()?,
data_offset: reader.read_u8()?,
flags: reader.read_u8()?,
window_size: reader.read_u16n()?,
checksum: reader.read_u16n()?,
urgent_pointer: reader.read_u16n()?,
})
// TODO: Check checksum?
}
fn get_header_size(&self) -> usize {
(self.data_offset >> 4) as usize * 4
}
fn as_bytes(&self) -> [u8; 5*4]
{
[
(self.source_port >> 8) as u8,
(self.source_port >> 0) as u8,
(self.dest_port >> 8) as u8,
(self.dest_port >> 0) as u8,
(self.sequence_number >> 24) as u8,
(self.sequence_number >> 16) as u8,
(self.sequence_number >> 8) as u8,
(self.sequence_number >> 0) as u8,
(self.acknowledgement_number >> 24) as u8,
(self.acknowledgement_number >> 16) as u8,
(self.acknowledgement_number >> 8) as u8,
(self.acknowledgement_number >> 0) as u8,
self.data_offset,
self.flags,
(self.window_size >> 8) as u8,
(self.window_size >> 0) as u8,
(self.checksum >> 8) as u8,
(self.checksum >> 0) as u8,
(self.urgent_pointer >> 8) as u8,
(self.urgent_pointer >> 0) as u8,
]
}
fn as_u16s(&self) -> [u16; 5*2] {
[
self.source_port,
self.dest_port,
(self.sequence_number >> 16) as u16,
(self.sequence_number >> 0) as u16,
(self.acknowledgement_number >> 16) as u16,
(self.acknowledgement_number >> 0) as u16,
(self.data_offset as u16) << 8 | (self.flags as u16),
self.window_size,
self.checksum,
self.urgent_pointer,
]
}
fn checksum(&self) -> u16 {
::ipv4::calculate_checksum(self.as_u16s().iter().cloned())
}
}
struct Connection
{
state: ConnectionState,
/// Sequence number of the next expected remote byte
next_rx_seq: u32,
/// Last ACKed sequence number
last_rx_ack: u32,
/// Received bytes
rx_buffer: RxBuffer,
/// Sequence number of the first byte in the RX buffer
rx_buffer_seq: u32,
rx_window_size_max: u32,
rx_window_size: u32,
/// Sequence number of last transmitted byte
last_tx_seq: u32,
/// Buffer of transmitted but not ACKed bytes
tx_buffer: RingBuf<u8>,
/// Offset of bytes actually sent (not just buffered)
tx_bytes_sent: usize,
/// Last received transmit window size
tx_window_size: u32,
}
#[derive(Copy,Clone,Debug,PartialEq)]
enum ConnectionState
{
//Closed, // Unused
SynSent, // SYN sent by local, waiting for SYN-ACK
//SynReceived, // Server only, handled by PROTO_CONNECTIONS
Established,
FinWait1, // FIN sent, waiting for reply (ACK or FIN)
FinWait2, // sent FIN acked, waiting for FIN from peer
Closing, // Waiting for ACK of FIN (FIN sent and recieved)
TimeWait, // Waiting for timeout after local close
ForceClose, // RST recieved, waiting for user close
CloseWait, // FIN recieved, waiting for user to close (error set, wait for node close)
LastAck, // FIN sent and recieved, waiting for ACK
Finished,
}
impl Connection
{
/// Create a new connection from the ACK in a SYN-SYN,ACK-ACK
fn new_inbound(hdr: &PktHeader) -> Self
{
Connection {
state: ConnectionState::Established,
next_rx_seq: hdr.sequence_number,
last_rx_ack: hdr.sequence_number,
rx_buffer_seq: hdr.sequence_number,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: hdr.acknowledgement_number,
tx_buffer: RingBuf::new(2048),//hdr.window_size as usize),
tx_bytes_sent: 0,
tx_window_size: hdr.window_size as u32,
}
}
fn new_outbound(quad: &Quad, sequence_number: u32) -> Self
{
log_trace!("Connection::new_outbound({:?}, {:#x})", quad, sequence_number);
let mut rv = Connection {
state: ConnectionState::SynSent,
next_rx_seq: 0,
last_rx_ack: 0,
rx_buffer_seq: 0,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: sequence_number,
tx_buffer: RingBuf::new(2048),
tx_bytes_sent: 0,
tx_window_size: 0,//hdr.window_size as u32,
};
rv.send_packet(quad, FLAG_SYN, &[]);
rv
}
/// Handle inbound data
fn handle(&mut self, quad: &Quad, hdr: &PktHeader, mut pkt: ::nic::PacketReader)
{
match self.state
{
//ConnectionState::Closed => return,
ConnectionState::Finished => return,
_ => {},
}
// Synchronisation request
if hdr.flags & FLAG_SYN!= 0 {
// TODO: Send an ACK of the last recieved byte (should this be conditional?)
if self.last_rx_ack!= self.next_rx_seq {
}
//self.next_rx_seq = hdr.sequence_number;
}
// ACK of sent data
if hdr.flags & FLAG_ACK!= 0 {
let in_flight = (self.last_tx_seq - hdr.acknowledgement_number) as usize;
if in_flight > self.tx_buffer.len() {
// TODO: Error, something funky has happened
}
else {
let n_bytes = self.tx_buffer.len() - in_flight;
log_debug!("{:?} ACQ {} bytes", quad, n_bytes);
for _ in 0.. n_bytes {
self.tx_buffer.pop_front();
}
}
}
// Update the window size if it changes
if self.tx_window_size!= hdr.window_size as u32 {
self.tx_window_size = hdr.window_size as u32;
}
let new_state = match self.state
{
//ConnectionState::Closed => return,
// SYN sent by local, waiting for SYN-ACK
ConnectionState::SynSent => {
if hdr.flags & FLAG_SYN!= 0 {
self.next_rx_seq += 1;
if hdr.flags & FLAG_ACK!= 0 {
// Now established
// TODO: Send ACK back
self.send_ack(quad, "SYN-ACK");
ConnectionState::Established
}
else {
// Why did we get a plain SYN in this state?
self.state
}
}
else {
// Ignore non-SYN
self.state
}
},
ConnectionState::Established =>
if hdr.flags & FLAG_RST!= 0 {
// RST received, do an unclean close (reset by peer)
// TODO: Signal to user that the connection is closing (error)
ConnectionState::ForceClose
}
else if hdr.flags & FLAG_FIN!= 0 {
// FIN received, start a clean shutdown
self.next_rx_seq += 1;
// TODO: Signal to user that the connection is closing (EOF)
ConnectionState::CloseWait
}
else {
if pkt.remain() == 0 {
// Pure ACK, no change
if hdr.flags == FLAG_ACK {
log_trace!("{:?} ACK only", quad);
}
else if self.next_rx_seq!= hdr.sequence_number {
log_trace!("{:?} Empty packet, unexpected seqeunce number {:x}!= {:x}", quad, hdr.sequence_number, self.next_rx_seq);
}
else {
// Counts as one byte
self.next_rx_seq += 1;
self.send_ack(quad, "Empty");
}
}
else if hdr.sequence_number - self.next_rx_seq + pkt.remain() as u32 > MAX_WINDOW_SIZE {
// Completely out of sequence
}
else {
// In sequence.
let mut start_ofs = (hdr.sequence_number - self.next_rx_seq) as i32;
while start_ofs < 0 {
pkt.read_u8().unwrap();
start_ofs += 1;
}
let mut ofs = start_ofs as usize;
while let Ok(b) = pkt.read_u8() {
match self.rx_buffer.insert( (self.next_rx_seq - self.rx_buffer_seq) as usize + ofs, &[b])
{
Ok(_) => {},
Err(e) => {
log_error!("{:?} RX buffer push {:?}", quad, e);
break;
},
}
ofs += 1;
}
// Better idea: Have an ACQ point, and a window point. Buffer is double the window
// Once the window point reaches 25% of the window from the ACK point
if start_ofs == 0 {
self.next_rx_seq += ofs as u32;
// Calculate a maximum window size based on how much space is left in the buffer
let buffered_len = self.next_rx_seq - self.rx_buffer_seq; // How much data the user has buffered
let cur_max_window = 2*self.rx_window_size_max - buffered_len; // NOTE: 2* for some flex so the window can stay at max size
if cur_max_window < self.rx_window_size {
// Reduce the window size and send an ACQ (with the updated size)
while cur_max_window < self.rx_window_size {
self.rx_window_size /= 2;
}
self.send_ack(quad, "Constrain window");
}
else if self.next_rx_seq - self.last_rx_ack > self.rx_window_size/2 {
// Send an ACK now, we've recieved a burst of data
self.send_ack(quad, "Data burst");
}
else {
// TODO: Schedule an ACK in a few hundred milliseconds
}
}
if hdr.flags & FLAG_PSH!= 0 {
// TODO: Prod the user that there's new data?
}
}
self.state
},
ConnectionState::CloseWait => {
// Ignore all packets while waiting for the user to complete teardown
self.state
},
ConnectionState::LastAck => // Waiting for ACK in FIN,FIN/ACK,ACK
if hdr.flags & FLAG_ACK!= 0 {
ConnectionState::Finished
}
else {
self.state
},
ConnectionState::FinWait1 => // FIN sent, waiting for reply (ACK or FIN)
if hdr.flags & FLAG_FIN!= 0 {
// TODO: Check the sequence number vs the sequence for the FIN
self.send_ack(quad, "SYN-ACK");
ConnectionState::Closing
}
else if hdr.flags & FLAG_ACK!= 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::FinWait2
}
else {
self.state
},
ConnectionState::FinWait2 =>
if hdr.flags & FLAG_FIN!= 0 { // Got a FIN after the ACK, close
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::Closing =>
if hdr.flags & FLAG_ACK!= 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::ForceClose => self.state, | ConnectionState::Finished => return,
};
self.state_update(quad, new_state);
}
fn state_update(&mut self, quad: &Quad, new_state: ConnectionState)
{
if self.state!= new_state
{
log_trace!("{:?} {:?} -> {:?}", quad, self.state, new_state);
self.state = new_state;
// TODO: If transitioning to `Finished`, release the local port?
// - Only for client connections.
if let ConnectionState::Finished = self.state
{
release_port(&quad.local_addr, quad.local_port);
}
}
}
fn state_to_error(&self) -> Result<(), ConnError>
{
match self.state
{
ConnectionState::SynSent => {
todo!("(quad=?) send/recv before established");
},
ConnectionState::Established => Ok( () ),
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => Err( ConnError::LocalClosed ),
ConnectionState::ForceClose => Err( ConnError::RemoteReset ),
ConnectionState::CloseWait | ConnectionState::LastAck => Err( ConnError::RemoteClosed ),
ConnectionState::Finished => Err( ConnError::LocalClosed ),
}
}
fn send_data(&mut self, quad: &Quad, buf: &[u8]) -> Result<usize, ConnError>
{
// TODO: Is it valid to send before the connection is fully established?
self.state_to_error()?;
// 1. Determine how much data we can send (based on the TX window)
let max_len = usize::saturating_sub(self.tx_window_size as usize, self.tx_buffer.len());
let rv = ::core::cmp::min(buf.len(), max_len);
// Add the data to the TX buffer
for &b in &buf[..rv] {
self.tx_buffer.push_back(b).expect("Incorrectly calculated `max_len` in tcp::Connection::send_data");
}
// If the buffer is full enough, do a send
if self.tx_buffer.len() - self.tx_bytes_sent > 1400 /*|| self.first_tx_time.map(|t| now() - t > MAX_TX_DELAY).unwrap_or(false)*/
{
// Trigger a TX
self.flush_send(quad);
}
else
{
// Kick a short timer, which will send data after it expires
// - Keep kicking the timer as data flows through
// - Have a maximum elapsed time with no packet sent.
//if self.tx_timer.reset(MIN_TX_DELAY) == timer::ResetResult::WasStopped
//{
// self.first_tx_time = Some(now());
//}
}
todo!("{:?} send_data( min({}, {})={} )", quad, max_len, buf.len(), rv);
}
fn flush_send(&mut self, quad: &Quad)
{
loop
{
let nbytes = self.tx_buffer.len() - self.tx_bytes_sent;
todo!("{:?} tx {}", quad, nbytes);
}
//self.first_tx_time = None;
}
fn recv_data(&mut self, _quad: &Quad, buf: &mut [u8]) -> Result<usize, ConnError>
{
self.state_to_error()?;
//let valid_len = self.rx_buffer.valid_len();
//let acked_len = u32::wrapping_sub(self.next_rx_seq, self.rx_buffer_seq);
//let len = usize::min(valid_len, buf.len());
Ok( self.rx_buffer.take(buf) )
}
fn send_packet(&mut self, quad: &Quad, flags: u8, data: &[u8])
{
log_debug!("{:?} send_packet({:02x} {}b)", quad, flags, data.len());
quad.send_packet(self.last_tx_seq, self.next_rx_seq, flags, self.rx_window_size as u16, data);
}
fn send_ack(&mut self, quad: &Quad, msg: &str)
{
log_debug!("{:?} send_ack({:?})", quad, msg);
// - TODO: Cancel any pending ACK
// - Send a new ACK
self.send_packet(quad, FLAG_ACK, &[]);
}
fn close(&mut self, quad: &Quad) -> Result<(), ConnError>
{
let new_state = match self.state
{
ConnectionState::SynSent => {
todo!("{:?} close before established", quad);
},
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => return Err( ConnError::LocalClosed ),
ConnectionState::LastAck => return Err( ConnError::RemoteClosed ),
ConnectionState::Finished => return Err( ConnError::LocalClosed ),
ConnectionState::CloseWait => {
self.send_packet(quad, FLAG_FIN|FLAG_ACK, &[]);
ConnectionState::LastAck
},
ConnectionState::ForceClose => {
ConnectionState::Finished
},
ConnectionState::Established => {
self.send_packet(quad, FLAG_FIN, &[]);
ConnectionState::FinWait1
},
};
self.state_update(quad, new_state);
Ok( () )
}
}
struct ProtoConnection
{
seen_seq: u32,
sent_seq: u32,
}
impl ProtoConnection
{
fn new(seen_seq: u32) -> ProtoConnection
{
ProtoConnection {
seen_seq: seen_seq,
sent_seq: 1, // TODO: Random
}
}
}
struct Server
{
// Amount of connections that can still be accepted
accept_space: AtomicUsize,
// Established connections waiting for the user to accept
accept_queue: AtomicRingBuf<Quad>,
}
pub struct ConnectionHandle(Quad);
#[derive(Debug)]
pub enum ConnError
{
NoRoute,
LocalClosed,
RemoteRefused,
RemoteClosed,
RemoteReset,
NoPortAvailable,
}
impl ConnectionHandle
{
pub fn connect(addr: Address, port: u16) -> Result<ConnectionHandle, ConnError>
{
log_trace!("ConnectionHandle::connect({:?}, {})", addr, port);
// 1. Determine the local address for this remote address
let local_addr = match get_outbound_ip_for(&addr)
{
Some(a) => a,
None => return Err(ConnError::NoRoute),
};
// 2. Pick a local port
let local_port = match allocate_port(&local_addr)
{
Some(p) => p,
None => return Err(ConnError::NoPortAvailable),
};
// 3. Create the quad and allocate the connection structure
let quad = Quad::new(local_addr, local_port, addr, port, );
log_trace!("ConnectionHandle::connect: quad={:?}", quad);
// 4. Send the opening SYN (by creating the outbound connection structure)
let conn = Connection::new_outbound(&quad, 0x10000u32);
CONNECTIONS.insert(quad, Mutex::new(conn));
Ok( ConnectionHandle(quad) )
}
pub fn send_data(& | ConnectionState::TimeWait => self.state,
| random_line_split |
tcp.rs | Address::zero(), *addr).map(|(laddr, _, _)| Address::Ipv4(laddr)),
}
}
/// Allocate a port for the given local address
fn allocate_port(_addr: &Address) -> Option<u16>
{
// TODO: Could store bitmap against the interface (having a separate bitmap for each interface)
S_PORTS.lock().allocate()
}
fn release_port(_addr: &Address, idx: u16)
{
S_PORTS.lock().release(idx)
}
fn rx_handler_v4(int: &::ipv4::Interface, src_addr: ::ipv4::Address, pkt: ::nic::PacketReader)
{
rx_handler(Address::Ipv4(src_addr), Address::Ipv4(int.addr()), pkt)
}
fn rx_handler(src_addr: Address, dest_addr: Address, mut pkt: ::nic::PacketReader)
{
let pre_header_reader = pkt.clone();
let hdr = match PktHeader::read(&mut pkt)
{
Ok(v) => v,
Err(_) => {
log_error!("Undersized packet: Ran out of data reading header");
return ;
},
};
log_debug!("hdr = {:?}", hdr);
let hdr_len = hdr.get_header_size();
if hdr_len < pre_header_reader.remain() {
log_error!("Undersized or invalid packet: Header length is {} but packet length is {}", hdr_len, pre_header_reader.remain());
return ;
}
// TODO: Validate checksum.
{
let packet_len = pre_header_reader.remain();
// Pseudo header for checksum
let sum_pseudo = match (src_addr,dest_addr)
{
(Address::Ipv4(s), Address::Ipv4(d)) =>
::ipv4::calculate_checksum([
// Big endian stores MSB first, so write the high word first
(s.as_u32() >> 16) as u16, (s.as_u32() >> 0) as u16,
(d.as_u32() >> 16) as u16, (d.as_u32() >> 0) as u16,
IPV4_PROTO_TCP as u16, packet_len as u16,
].iter().copied()),
};
let sum_header = hdr.checksum();
let sum_options_and_data = {
let mut pkt = pkt.clone();
let psum_whole =!::ipv4::calculate_checksum( (0.. (pre_header_reader.remain() - hdr_len) / 2).map(|_| pkt.read_u16n().unwrap()) );
// Final byte is decoded as if there was a zero after it (so as 0x??00)
let psum_partial = if pkt.remain() > 0 { (pkt.read_u8().unwrap() as u16) << 8} else { 0 };
::ipv4::calculate_checksum([psum_whole, psum_partial].iter().copied())
};
let sum_total = ::ipv4::calculate_checksum([
!sum_pseudo,!sum_header,!sum_options_and_data
].iter().copied());
if sum_total!= 0 {
log_error!("Incorrect checksum: 0x{:04x}!= 0", sum_total);
}
}
// Options
while pkt.remain() > pre_header_reader.remain() - hdr_len
{
match pkt.read_u8().unwrap()
{
_ => {},
}
}
let quad = Quad::new(dest_addr, hdr.dest_port, src_addr, hdr.source_port);
// Search for active connections with this quad
if let Some(c) = CONNECTIONS.get(&quad)
{
c.lock().handle(&quad, &hdr, pkt);
}
// Search for proto-connections
// - Proto-connections are lighter weight than full-blown connections, reducing the impact of a SYN flood
else if hdr.flags == FLAG_ACK
{
if let Some(c) = PROTO_CONNECTIONS.take(&quad)
{
// Check the SEQ/ACK numbers, and create the actual connection
if hdr.sequence_number == c.seen_seq + 1 && hdr.acknowledgement_number == c.sent_seq
{
// Make the full connection struct
CONNECTIONS.insert(quad, Mutex::new(Connection::new_inbound(&hdr)));
// Add the connection onto the server's accept queue
let server = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) ).expect("Can't find server");
server.accept_queue.push(quad).expect("Acceped connection with full accept queue");
}
else
{
// - Bad ACK, put the proto connection back into the list
PROTO_CONNECTIONS.insert(quad, c);
}
}
}
// If none found, look for servers on the destination (if SYN)
else if hdr.flags &!FLAG_ACK == FLAG_SYN
{
if let Some(s) = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) )
{
// Decrement the server's accept space
if s.accept_space.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |v| if v == 0 { None } else { Some(v - 1) }).is_err() {
// Reject if no space
// - Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST, 0, &[]);
}
else {
// - Add the quad as a proto-connection and send the SYN-ACK
let pc = ProtoConnection::new(hdr.sequence_number);
quad.send_packet(pc.sent_seq, pc.seen_seq, FLAG_SYN|FLAG_ACK, hdr.window_size, &[]);
PROTO_CONNECTIONS.insert(quad, pc);
}
}
else
{
// Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST|(!hdr.flags & FLAG_ACK), 0, &[]);
}
}
// Otherwise, drop
}
#[derive(Copy,Clone,PartialOrd,PartialEq,Ord,Eq)]
struct Quad
{
local_addr: Address,
local_port: u16,
remote_addr: Address,
remote_port: u16,
}
impl ::core::fmt::Debug for Quad
{
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "Quad({:?}:{} -> {:?}:{})", self.local_addr, self.local_port, self.remote_addr, self.remote_port)
}
}
impl Quad
{
fn new(local_addr: Address, local_port: u16, remote_addr: Address, remote_port: u16) -> Quad
{
Quad {
local_addr, local_port, remote_addr, remote_port
}
}
fn send_packet(&self, seq: u32, ack: u32, flags: u8, window_size: u16, data: &[u8])
{
// Make a header
// TODO: Any options required?
let options_bytes = &[];
let opts_len_rounded = ((options_bytes.len() + 3) / 4) * 4;
let hdr = PktHeader {
source_port: self.local_port,
dest_port: self.remote_port,
sequence_number: seq,
acknowledgement_number: ack,
data_offset: ((5 + opts_len_rounded/4) << 4) as u8 | 0,
flags: flags,
window_size: window_size,
checksum: 0, // To be filled afterwards
urgent_pointer: 0,
}.as_bytes();
// Calculate checksum
// Create sparse packet chain
let data_pkt = SparsePacket::new_root(data);
// - Padding required to make the header a multiple of 4 bytes long
let opt_pad_pkt = SparsePacket::new_chained(&[0; 3][.. opts_len_rounded - options_bytes.len()], &data_pkt);
let opt_pkt = SparsePacket::new_chained(options_bytes, &opt_pad_pkt);
let hdr_pkt = SparsePacket::new_chained(&hdr, &opt_pkt);
// Pass packet downstream
match self.local_addr
{
Address::Ipv4(a) => crate::ipv4::send_packet(a, self.remote_addr.unwrap_ipv4(), IPV4_PROTO_TCP, hdr_pkt),
}
}
}
#[derive(Debug)]
struct PktHeader
{
source_port: u16,
dest_port: u16,
sequence_number: u32,
acknowledgement_number: u32,
/// Packed: top 4 bits are header size in 4byte units, bottom 4 are reserved
data_offset: u8,
/// Bitfield:
/// 0: FIN
/// 1: SYN
/// 2: RST
/// 3: PSH
/// 4: ACK
/// 5: URG
/// 6: ECE
/// 7: CWR
flags: u8,
window_size: u16,
checksum: u16,
urgent_pointer: u16,
//options: [u8],
}
const FLAG_FIN: u8 = 1 << 0;
const FLAG_SYN: u8 = 1 << 1;
const FLAG_RST: u8 = 1 << 2;
const FLAG_PSH: u8 = 1 << 3;
const FLAG_ACK: u8 = 1 << 4;
impl PktHeader
{
fn read(reader: &mut ::nic::PacketReader) -> Result<Self, ()>
{
Ok(PktHeader {
source_port: reader.read_u16n()?,
dest_port: reader.read_u16n()?,
sequence_number: reader.read_u32n()?,
acknowledgement_number: reader.read_u32n()?,
data_offset: reader.read_u8()?,
flags: reader.read_u8()?,
window_size: reader.read_u16n()?,
checksum: reader.read_u16n()?,
urgent_pointer: reader.read_u16n()?,
})
// TODO: Check checksum?
}
fn get_header_size(&self) -> usize {
(self.data_offset >> 4) as usize * 4
}
fn as_bytes(&self) -> [u8; 5*4]
{
[
(self.source_port >> 8) as u8,
(self.source_port >> 0) as u8,
(self.dest_port >> 8) as u8,
(self.dest_port >> 0) as u8,
(self.sequence_number >> 24) as u8,
(self.sequence_number >> 16) as u8,
(self.sequence_number >> 8) as u8,
(self.sequence_number >> 0) as u8,
(self.acknowledgement_number >> 24) as u8,
(self.acknowledgement_number >> 16) as u8,
(self.acknowledgement_number >> 8) as u8,
(self.acknowledgement_number >> 0) as u8,
self.data_offset,
self.flags,
(self.window_size >> 8) as u8,
(self.window_size >> 0) as u8,
(self.checksum >> 8) as u8,
(self.checksum >> 0) as u8,
(self.urgent_pointer >> 8) as u8,
(self.urgent_pointer >> 0) as u8,
]
}
fn as_u16s(&self) -> [u16; 5*2] {
[
self.source_port,
self.dest_port,
(self.sequence_number >> 16) as u16,
(self.sequence_number >> 0) as u16,
(self.acknowledgement_number >> 16) as u16,
(self.acknowledgement_number >> 0) as u16,
(self.data_offset as u16) << 8 | (self.flags as u16),
self.window_size,
self.checksum,
self.urgent_pointer,
]
}
fn checksum(&self) -> u16 {
::ipv4::calculate_checksum(self.as_u16s().iter().cloned())
}
}
struct Connection
{
state: ConnectionState,
/// Sequence number of the next expected remote byte
next_rx_seq: u32,
/// Last ACKed sequence number
last_rx_ack: u32,
/// Received bytes
rx_buffer: RxBuffer,
/// Sequence number of the first byte in the RX buffer
rx_buffer_seq: u32,
rx_window_size_max: u32,
rx_window_size: u32,
/// Sequence number of last transmitted byte
last_tx_seq: u32,
/// Buffer of transmitted but not ACKed bytes
tx_buffer: RingBuf<u8>,
/// Offset of bytes actually sent (not just buffered)
tx_bytes_sent: usize,
/// Last received transmit window size
tx_window_size: u32,
}
#[derive(Copy,Clone,Debug,PartialEq)]
enum ConnectionState
{
//Closed, // Unused
SynSent, // SYN sent by local, waiting for SYN-ACK
//SynReceived, // Server only, handled by PROTO_CONNECTIONS
Established,
FinWait1, // FIN sent, waiting for reply (ACK or FIN)
FinWait2, // sent FIN acked, waiting for FIN from peer
Closing, // Waiting for ACK of FIN (FIN sent and recieved)
TimeWait, // Waiting for timeout after local close
ForceClose, // RST recieved, waiting for user close
CloseWait, // FIN recieved, waiting for user to close (error set, wait for node close)
LastAck, // FIN sent and recieved, waiting for ACK
Finished,
}
impl Connection
{
/// Create a new connection from the ACK in a SYN-SYN,ACK-ACK
fn new_inbound(hdr: &PktHeader) -> Self
{
Connection {
state: ConnectionState::Established,
next_rx_seq: hdr.sequence_number,
last_rx_ack: hdr.sequence_number,
rx_buffer_seq: hdr.sequence_number,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: hdr.acknowledgement_number,
tx_buffer: RingBuf::new(2048),//hdr.window_size as usize),
tx_bytes_sent: 0,
tx_window_size: hdr.window_size as u32,
}
}
fn new_outbound(quad: &Quad, sequence_number: u32) -> Self
{
log_trace!("Connection::new_outbound({:?}, {:#x})", quad, sequence_number);
let mut rv = Connection {
state: ConnectionState::SynSent,
next_rx_seq: 0,
last_rx_ack: 0,
rx_buffer_seq: 0,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: sequence_number,
tx_buffer: RingBuf::new(2048),
tx_bytes_sent: 0,
tx_window_size: 0,//hdr.window_size as u32,
};
rv.send_packet(quad, FLAG_SYN, &[]);
rv
}
/// Handle inbound data
fn handle(&mut self, quad: &Quad, hdr: &PktHeader, mut pkt: ::nic::PacketReader)
{
match self.state
{
//ConnectionState::Closed => return,
ConnectionState::Finished => return,
_ => {},
}
// Synchronisation request
if hdr.flags & FLAG_SYN!= 0 {
// TODO: Send an ACK of the last recieved byte (should this be conditional?)
if self.last_rx_ack!= self.next_rx_seq {
}
//self.next_rx_seq = hdr.sequence_number;
}
// ACK of sent data
if hdr.flags & FLAG_ACK!= 0 {
let in_flight = (self.last_tx_seq - hdr.acknowledgement_number) as usize;
if in_flight > self.tx_buffer.len() {
// TODO: Error, something funky has happened
}
else {
let n_bytes = self.tx_buffer.len() - in_flight;
log_debug!("{:?} ACQ {} bytes", quad, n_bytes);
for _ in 0.. n_bytes {
self.tx_buffer.pop_front();
}
}
}
// Update the window size if it changes
if self.tx_window_size!= hdr.window_size as u32 {
self.tx_window_size = hdr.window_size as u32;
}
let new_state = match self.state
{
//ConnectionState::Closed => return,
// SYN sent by local, waiting for SYN-ACK
ConnectionState::SynSent => {
if hdr.flags & FLAG_SYN!= 0 {
self.next_rx_seq += 1;
if hdr.flags & FLAG_ACK!= 0 {
// Now established
// TODO: Send ACK back
self.send_ack(quad, "SYN-ACK");
ConnectionState::Established
}
else {
// Why did we get a plain SYN in this state?
self.state
}
}
else {
// Ignore non-SYN
self.state
}
},
ConnectionState::Established =>
if hdr.flags & FLAG_RST!= 0 |
else if hdr.flags & FLAG_FIN!= 0 {
// FIN received, start a clean shutdown
self.next_rx_seq += 1;
// TODO: Signal to user that the connection is closing (EOF)
ConnectionState::CloseWait
}
else {
if pkt.remain() == 0 {
// Pure ACK, no change
if hdr.flags == FLAG_ACK {
log_trace!("{:?} ACK only", quad);
}
else if self.next_rx_seq!= hdr.sequence_number {
log_trace!("{:?} Empty packet, unexpected seqeunce number {:x}!= {:x}", quad, hdr.sequence_number, self.next_rx_seq);
}
else {
// Counts as one byte
self.next_rx_seq += 1;
self.send_ack(quad, "Empty");
}
}
else if hdr.sequence_number - self.next_rx_seq + pkt.remain() as u32 > MAX_WINDOW_SIZE {
// Completely out of sequence
}
else {
// In sequence.
let mut start_ofs = (hdr.sequence_number - self.next_rx_seq) as i32;
while start_ofs < 0 {
pkt.read_u8().unwrap();
start_ofs += 1;
}
let mut ofs = start_ofs as usize;
while let Ok(b) = pkt.read_u8() {
match self.rx_buffer.insert( (self.next_rx_seq - self.rx_buffer_seq) as usize + ofs, &[b])
{
Ok(_) => {},
Err(e) => {
log_error!("{:?} RX buffer push {:?}", quad, e);
break;
},
}
ofs += 1;
}
// Better idea: Have an ACQ point, and a window point. Buffer is double the window
// Once the window point reaches 25% of the window from the ACK point
if start_ofs == 0 {
self.next_rx_seq += ofs as u32;
// Calculate a maximum window size based on how much space is left in the buffer
let buffered_len = self.next_rx_seq - self.rx_buffer_seq; // How much data the user has buffered
let cur_max_window = 2*self.rx_window_size_max - buffered_len; // NOTE: 2* for some flex so the window can stay at max size
if cur_max_window < self.rx_window_size {
// Reduce the window size and send an ACQ (with the updated size)
while cur_max_window < self.rx_window_size {
self.rx_window_size /= 2;
}
self.send_ack(quad, "Constrain window");
}
else if self.next_rx_seq - self.last_rx_ack > self.rx_window_size/2 {
// Send an ACK now, we've recieved a burst of data
self.send_ack(quad, "Data burst");
}
else {
// TODO: Schedule an ACK in a few hundred milliseconds
}
}
if hdr.flags & FLAG_PSH!= 0 {
// TODO: Prod the user that there's new data?
}
}
self.state
},
ConnectionState::CloseWait => {
// Ignore all packets while waiting for the user to complete teardown
self.state
},
ConnectionState::LastAck => // Waiting for ACK in FIN,FIN/ACK,ACK
if hdr.flags & FLAG_ACK!= 0 {
ConnectionState::Finished
}
else {
self.state
},
ConnectionState::FinWait1 => // FIN sent, waiting for reply (ACK or FIN)
if hdr.flags & FLAG_FIN!= 0 {
// TODO: Check the sequence number vs the sequence for the FIN
self.send_ack(quad, "SYN-ACK");
ConnectionState::Closing
}
else if hdr.flags & FLAG_ACK!= 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::FinWait2
}
else {
self.state
},
ConnectionState::FinWait2 =>
if hdr.flags & FLAG_FIN!= 0 { // Got a FIN after the ACK, close
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::Closing =>
if hdr.flags & FLAG_ACK!= 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::ForceClose => self.state,
ConnectionState::TimeWait => self.state,
ConnectionState::Finished => return,
};
self.state_update(quad, new_state);
}
fn state_update(&mut self, quad: &Quad, new_state: ConnectionState)
{
if self.state!= new_state
{
log_trace!("{:?} {:?} -> {:?}", quad, self.state, new_state);
self.state = new_state;
// TODO: If transitioning to `Finished`, release the local port?
// - Only for client connections.
if let ConnectionState::Finished = self.state
{
release_port(&quad.local_addr, quad.local_port);
}
}
}
fn state_to_error(&self) -> Result<(), ConnError>
{
match self.state
{
ConnectionState::SynSent => {
todo!("(quad=?) send/recv before established");
},
ConnectionState::Established => Ok( () ),
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => Err( ConnError::LocalClosed ),
ConnectionState::ForceClose => Err( ConnError::RemoteReset ),
ConnectionState::CloseWait | ConnectionState::LastAck => Err( ConnError::RemoteClosed ),
ConnectionState::Finished => Err( ConnError::LocalClosed ),
}
}
fn send_data(&mut self, quad: &Quad, buf: &[u8]) -> Result<usize, ConnError>
{
// TODO: Is it valid to send before the connection is fully established?
self.state_to_error()?;
// 1. Determine how much data we can send (based on the TX window)
let max_len = usize::saturating_sub(self.tx_window_size as usize, self.tx_buffer.len());
let rv = ::core::cmp::min(buf.len(), max_len);
// Add the data to the TX buffer
for &b in &buf[..rv] {
self.tx_buffer.push_back(b).expect("Incorrectly calculated `max_len` in tcp::Connection::send_data");
}
// If the buffer is full enough, do a send
if self.tx_buffer.len() - self.tx_bytes_sent > 1400 /*|| self.first_tx_time.map(|t| now() - t > MAX_TX_DELAY).unwrap_or(false)*/
{
// Trigger a TX
self.flush_send(quad);
}
else
{
// Kick a short timer, which will send data after it expires
// - Keep kicking the timer as data flows through
// - Have a maximum elapsed time with no packet sent.
//if self.tx_timer.reset(MIN_TX_DELAY) == timer::ResetResult::WasStopped
//{
// self.first_tx_time = Some(now());
//}
}
todo!("{:?} send_data( min({}, {})={} )", quad, max_len, buf.len(), rv);
}
fn flush_send(&mut self, quad: &Quad)
{
loop
{
let nbytes = self.tx_buffer.len() - self.tx_bytes_sent;
todo!("{:?} tx {}", quad, nbytes);
}
//self.first_tx_time = None;
}
fn recv_data(&mut self, _quad: &Quad, buf: &mut [u8]) -> Result<usize, ConnError>
{
self.state_to_error()?;
//let valid_len = self.rx_buffer.valid_len();
//let acked_len = u32::wrapping_sub(self.next_rx_seq, self.rx_buffer_seq);
//let len = usize::min(valid_len, buf.len());
Ok( self.rx_buffer.take(buf) )
}
fn send_packet(&mut self, quad: &Quad, flags: u8, data: &[u8])
{
log_debug!("{:?} send_packet({:02x} {}b)", quad, flags, data.len());
quad.send_packet(self.last_tx_seq, self.next_rx_seq, flags, self.rx_window_size as u16, data);
}
fn send_ack(&mut self, quad: &Quad, msg: &str)
{
log_debug!("{:?} send_ack({:?})", quad, msg);
// - TODO: Cancel any pending ACK
// - Send a new ACK
self.send_packet(quad, FLAG_ACK, &[]);
}
fn close(&mut self, quad: &Quad) -> Result<(), ConnError>
{
let new_state = match self.state
{
ConnectionState::SynSent => {
todo!("{:?} close before established", quad);
},
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => return Err( ConnError::LocalClosed ),
ConnectionState::LastAck => return Err( ConnError::RemoteClosed ),
ConnectionState::Finished => return Err( ConnError::LocalClosed ),
ConnectionState::CloseWait => {
self.send_packet(quad, FLAG_FIN|FLAG_ACK, &[]);
ConnectionState::LastAck
},
ConnectionState::ForceClose => {
ConnectionState::Finished
},
ConnectionState::Established => {
self.send_packet(quad, FLAG_FIN, &[]);
ConnectionState::FinWait1
},
};
self.state_update(quad, new_state);
Ok( () )
}
}
struct ProtoConnection
{
seen_seq: u32,
sent_seq: u32,
}
impl ProtoConnection
{
fn new(seen_seq: u32) -> ProtoConnection
{
ProtoConnection {
seen_seq: seen_seq,
sent_seq: 1, // TODO: Random
}
}
}
struct Server
{
// Amount of connections that can still be accepted
accept_space: AtomicUsize,
// Established connections waiting for the user to accept
accept_queue: AtomicRingBuf<Quad>,
}
pub struct ConnectionHandle(Quad);
#[derive(Debug)]
pub enum ConnError
{
NoRoute,
LocalClosed,
RemoteRefused,
RemoteClosed,
RemoteReset,
NoPortAvailable,
}
impl ConnectionHandle
{
pub fn connect(addr: Address, port: u16) -> Result<ConnectionHandle, ConnError>
{
log_trace!("ConnectionHandle::connect({:?}, {})", addr, port);
// 1. Determine the local address for this remote address
let local_addr = match get_outbound_ip_for(&addr)
{
Some(a) => a,
None => return Err(ConnError::NoRoute),
};
// 2. Pick a local port
let local_port = match allocate_port(&local_addr)
{
Some(p) => p,
None => return Err(ConnError::NoPortAvailable),
};
// 3. Create the quad and allocate the connection structure
let quad = Quad::new(local_addr, local_port, addr, port, );
log_trace!("ConnectionHandle::connect: quad={:?}", quad);
// 4. Send the opening SYN (by creating the outbound connection structure)
let conn = Connection::new_outbound(&quad, 0x10000u32);
CONNECTIONS.insert(quad, Mutex::new(conn));
Ok( ConnectionHandle(quad) )
}
pub fn send_ | {
// RST received, do an unclean close (reset by peer)
// TODO: Signal to user that the connection is closing (error)
ConnectionState::ForceClose
} | conditional_block |
tcp.rs | Address::zero(), *addr).map(|(laddr, _, _)| Address::Ipv4(laddr)),
}
}
/// Allocate a port for the given local address
fn allocate_port(_addr: &Address) -> Option<u16>
{
// TODO: Could store bitmap against the interface (having a separate bitmap for each interface)
S_PORTS.lock().allocate()
}
fn release_port(_addr: &Address, idx: u16)
{
S_PORTS.lock().release(idx)
}
fn rx_handler_v4(int: &::ipv4::Interface, src_addr: ::ipv4::Address, pkt: ::nic::PacketReader)
{
rx_handler(Address::Ipv4(src_addr), Address::Ipv4(int.addr()), pkt)
}
fn rx_handler(src_addr: Address, dest_addr: Address, mut pkt: ::nic::PacketReader)
{
let pre_header_reader = pkt.clone();
let hdr = match PktHeader::read(&mut pkt)
{
Ok(v) => v,
Err(_) => {
log_error!("Undersized packet: Ran out of data reading header");
return ;
},
};
log_debug!("hdr = {:?}", hdr);
let hdr_len = hdr.get_header_size();
if hdr_len < pre_header_reader.remain() {
log_error!("Undersized or invalid packet: Header length is {} but packet length is {}", hdr_len, pre_header_reader.remain());
return ;
}
// TODO: Validate checksum.
{
let packet_len = pre_header_reader.remain();
// Pseudo header for checksum
let sum_pseudo = match (src_addr,dest_addr)
{
(Address::Ipv4(s), Address::Ipv4(d)) =>
::ipv4::calculate_checksum([
// Big endian stores MSB first, so write the high word first
(s.as_u32() >> 16) as u16, (s.as_u32() >> 0) as u16,
(d.as_u32() >> 16) as u16, (d.as_u32() >> 0) as u16,
IPV4_PROTO_TCP as u16, packet_len as u16,
].iter().copied()),
};
let sum_header = hdr.checksum();
let sum_options_and_data = {
let mut pkt = pkt.clone();
let psum_whole =!::ipv4::calculate_checksum( (0.. (pre_header_reader.remain() - hdr_len) / 2).map(|_| pkt.read_u16n().unwrap()) );
// Final byte is decoded as if there was a zero after it (so as 0x??00)
let psum_partial = if pkt.remain() > 0 { (pkt.read_u8().unwrap() as u16) << 8} else { 0 };
::ipv4::calculate_checksum([psum_whole, psum_partial].iter().copied())
};
let sum_total = ::ipv4::calculate_checksum([
!sum_pseudo,!sum_header,!sum_options_and_data
].iter().copied());
if sum_total!= 0 {
log_error!("Incorrect checksum: 0x{:04x}!= 0", sum_total);
}
}
// Options
while pkt.remain() > pre_header_reader.remain() - hdr_len
{
match pkt.read_u8().unwrap()
{
_ => {},
}
}
let quad = Quad::new(dest_addr, hdr.dest_port, src_addr, hdr.source_port);
// Search for active connections with this quad
if let Some(c) = CONNECTIONS.get(&quad)
{
c.lock().handle(&quad, &hdr, pkt);
}
// Search for proto-connections
// - Proto-connections are lighter weight than full-blown connections, reducing the impact of a SYN flood
else if hdr.flags == FLAG_ACK
{
if let Some(c) = PROTO_CONNECTIONS.take(&quad)
{
// Check the SEQ/ACK numbers, and create the actual connection
if hdr.sequence_number == c.seen_seq + 1 && hdr.acknowledgement_number == c.sent_seq
{
// Make the full connection struct
CONNECTIONS.insert(quad, Mutex::new(Connection::new_inbound(&hdr)));
// Add the connection onto the server's accept queue
let server = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) ).expect("Can't find server");
server.accept_queue.push(quad).expect("Acceped connection with full accept queue");
}
else
{
// - Bad ACK, put the proto connection back into the list
PROTO_CONNECTIONS.insert(quad, c);
}
}
}
// If none found, look for servers on the destination (if SYN)
else if hdr.flags &!FLAG_ACK == FLAG_SYN
{
if let Some(s) = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) )
{
// Decrement the server's accept space
if s.accept_space.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |v| if v == 0 { None } else { Some(v - 1) }).is_err() {
// Reject if no space
// - Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST, 0, &[]);
}
else {
// - Add the quad as a proto-connection and send the SYN-ACK
let pc = ProtoConnection::new(hdr.sequence_number);
quad.send_packet(pc.sent_seq, pc.seen_seq, FLAG_SYN|FLAG_ACK, hdr.window_size, &[]);
PROTO_CONNECTIONS.insert(quad, pc);
}
}
else
{
// Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST|(!hdr.flags & FLAG_ACK), 0, &[]);
}
}
// Otherwise, drop
}
#[derive(Copy,Clone,PartialOrd,PartialEq,Ord,Eq)]
struct Quad
{
local_addr: Address,
local_port: u16,
remote_addr: Address,
remote_port: u16,
}
impl ::core::fmt::Debug for Quad
{
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "Quad({:?}:{} -> {:?}:{})", self.local_addr, self.local_port, self.remote_addr, self.remote_port)
}
}
impl Quad
{
fn | (local_addr: Address, local_port: u16, remote_addr: Address, remote_port: u16) -> Quad
{
Quad {
local_addr, local_port, remote_addr, remote_port
}
}
fn send_packet(&self, seq: u32, ack: u32, flags: u8, window_size: u16, data: &[u8])
{
// Make a header
// TODO: Any options required?
let options_bytes = &[];
let opts_len_rounded = ((options_bytes.len() + 3) / 4) * 4;
let hdr = PktHeader {
source_port: self.local_port,
dest_port: self.remote_port,
sequence_number: seq,
acknowledgement_number: ack,
data_offset: ((5 + opts_len_rounded/4) << 4) as u8 | 0,
flags: flags,
window_size: window_size,
checksum: 0, // To be filled afterwards
urgent_pointer: 0,
}.as_bytes();
// Calculate checksum
// Create sparse packet chain
let data_pkt = SparsePacket::new_root(data);
// - Padding required to make the header a multiple of 4 bytes long
let opt_pad_pkt = SparsePacket::new_chained(&[0; 3][.. opts_len_rounded - options_bytes.len()], &data_pkt);
let opt_pkt = SparsePacket::new_chained(options_bytes, &opt_pad_pkt);
let hdr_pkt = SparsePacket::new_chained(&hdr, &opt_pkt);
// Pass packet downstream
match self.local_addr
{
Address::Ipv4(a) => crate::ipv4::send_packet(a, self.remote_addr.unwrap_ipv4(), IPV4_PROTO_TCP, hdr_pkt),
}
}
}
#[derive(Debug)]
struct PktHeader
{
source_port: u16,
dest_port: u16,
sequence_number: u32,
acknowledgement_number: u32,
/// Packed: top 4 bits are header size in 4byte units, bottom 4 are reserved
data_offset: u8,
/// Bitfield:
/// 0: FIN
/// 1: SYN
/// 2: RST
/// 3: PSH
/// 4: ACK
/// 5: URG
/// 6: ECE
/// 7: CWR
flags: u8,
window_size: u16,
checksum: u16,
urgent_pointer: u16,
//options: [u8],
}
const FLAG_FIN: u8 = 1 << 0;
const FLAG_SYN: u8 = 1 << 1;
const FLAG_RST: u8 = 1 << 2;
const FLAG_PSH: u8 = 1 << 3;
const FLAG_ACK: u8 = 1 << 4;
impl PktHeader
{
fn read(reader: &mut ::nic::PacketReader) -> Result<Self, ()>
{
Ok(PktHeader {
source_port: reader.read_u16n()?,
dest_port: reader.read_u16n()?,
sequence_number: reader.read_u32n()?,
acknowledgement_number: reader.read_u32n()?,
data_offset: reader.read_u8()?,
flags: reader.read_u8()?,
window_size: reader.read_u16n()?,
checksum: reader.read_u16n()?,
urgent_pointer: reader.read_u16n()?,
})
// TODO: Check checksum?
}
fn get_header_size(&self) -> usize {
(self.data_offset >> 4) as usize * 4
}
fn as_bytes(&self) -> [u8; 5*4]
{
[
(self.source_port >> 8) as u8,
(self.source_port >> 0) as u8,
(self.dest_port >> 8) as u8,
(self.dest_port >> 0) as u8,
(self.sequence_number >> 24) as u8,
(self.sequence_number >> 16) as u8,
(self.sequence_number >> 8) as u8,
(self.sequence_number >> 0) as u8,
(self.acknowledgement_number >> 24) as u8,
(self.acknowledgement_number >> 16) as u8,
(self.acknowledgement_number >> 8) as u8,
(self.acknowledgement_number >> 0) as u8,
self.data_offset,
self.flags,
(self.window_size >> 8) as u8,
(self.window_size >> 0) as u8,
(self.checksum >> 8) as u8,
(self.checksum >> 0) as u8,
(self.urgent_pointer >> 8) as u8,
(self.urgent_pointer >> 0) as u8,
]
}
fn as_u16s(&self) -> [u16; 5*2] {
[
self.source_port,
self.dest_port,
(self.sequence_number >> 16) as u16,
(self.sequence_number >> 0) as u16,
(self.acknowledgement_number >> 16) as u16,
(self.acknowledgement_number >> 0) as u16,
(self.data_offset as u16) << 8 | (self.flags as u16),
self.window_size,
self.checksum,
self.urgent_pointer,
]
}
fn checksum(&self) -> u16 {
::ipv4::calculate_checksum(self.as_u16s().iter().cloned())
}
}
struct Connection
{
state: ConnectionState,
/// Sequence number of the next expected remote byte
next_rx_seq: u32,
/// Last ACKed sequence number
last_rx_ack: u32,
/// Received bytes
rx_buffer: RxBuffer,
/// Sequence number of the first byte in the RX buffer
rx_buffer_seq: u32,
rx_window_size_max: u32,
rx_window_size: u32,
/// Sequence number of last transmitted byte
last_tx_seq: u32,
/// Buffer of transmitted but not ACKed bytes
tx_buffer: RingBuf<u8>,
/// Offset of bytes actually sent (not just buffered)
tx_bytes_sent: usize,
/// Last received transmit window size
tx_window_size: u32,
}
#[derive(Copy,Clone,Debug,PartialEq)]
enum ConnectionState
{
//Closed, // Unused
SynSent, // SYN sent by local, waiting for SYN-ACK
//SynReceived, // Server only, handled by PROTO_CONNECTIONS
Established,
FinWait1, // FIN sent, waiting for reply (ACK or FIN)
FinWait2, // sent FIN acked, waiting for FIN from peer
Closing, // Waiting for ACK of FIN (FIN sent and recieved)
TimeWait, // Waiting for timeout after local close
ForceClose, // RST recieved, waiting for user close
CloseWait, // FIN recieved, waiting for user to close (error set, wait for node close)
LastAck, // FIN sent and recieved, waiting for ACK
Finished,
}
impl Connection
{
/// Create a new connection from the ACK in a SYN-SYN,ACK-ACK
fn new_inbound(hdr: &PktHeader) -> Self
{
Connection {
state: ConnectionState::Established,
next_rx_seq: hdr.sequence_number,
last_rx_ack: hdr.sequence_number,
rx_buffer_seq: hdr.sequence_number,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: hdr.acknowledgement_number,
tx_buffer: RingBuf::new(2048),//hdr.window_size as usize),
tx_bytes_sent: 0,
tx_window_size: hdr.window_size as u32,
}
}
fn new_outbound(quad: &Quad, sequence_number: u32) -> Self
{
log_trace!("Connection::new_outbound({:?}, {:#x})", quad, sequence_number);
let mut rv = Connection {
state: ConnectionState::SynSent,
next_rx_seq: 0,
last_rx_ack: 0,
rx_buffer_seq: 0,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: sequence_number,
tx_buffer: RingBuf::new(2048),
tx_bytes_sent: 0,
tx_window_size: 0,//hdr.window_size as u32,
};
rv.send_packet(quad, FLAG_SYN, &[]);
rv
}
/// Handle inbound data
fn handle(&mut self, quad: &Quad, hdr: &PktHeader, mut pkt: ::nic::PacketReader)
{
match self.state
{
//ConnectionState::Closed => return,
ConnectionState::Finished => return,
_ => {},
}
// Synchronisation request
if hdr.flags & FLAG_SYN!= 0 {
// TODO: Send an ACK of the last recieved byte (should this be conditional?)
if self.last_rx_ack!= self.next_rx_seq {
}
//self.next_rx_seq = hdr.sequence_number;
}
// ACK of sent data
if hdr.flags & FLAG_ACK!= 0 {
let in_flight = (self.last_tx_seq - hdr.acknowledgement_number) as usize;
if in_flight > self.tx_buffer.len() {
// TODO: Error, something funky has happened
}
else {
let n_bytes = self.tx_buffer.len() - in_flight;
log_debug!("{:?} ACQ {} bytes", quad, n_bytes);
for _ in 0.. n_bytes {
self.tx_buffer.pop_front();
}
}
}
// Update the window size if it changes
if self.tx_window_size!= hdr.window_size as u32 {
self.tx_window_size = hdr.window_size as u32;
}
let new_state = match self.state
{
//ConnectionState::Closed => return,
// SYN sent by local, waiting for SYN-ACK
ConnectionState::SynSent => {
if hdr.flags & FLAG_SYN!= 0 {
self.next_rx_seq += 1;
if hdr.flags & FLAG_ACK!= 0 {
// Now established
// TODO: Send ACK back
self.send_ack(quad, "SYN-ACK");
ConnectionState::Established
}
else {
// Why did we get a plain SYN in this state?
self.state
}
}
else {
// Ignore non-SYN
self.state
}
},
ConnectionState::Established =>
if hdr.flags & FLAG_RST!= 0 {
// RST received, do an unclean close (reset by peer)
// TODO: Signal to user that the connection is closing (error)
ConnectionState::ForceClose
}
else if hdr.flags & FLAG_FIN!= 0 {
// FIN received, start a clean shutdown
self.next_rx_seq += 1;
// TODO: Signal to user that the connection is closing (EOF)
ConnectionState::CloseWait
}
else {
if pkt.remain() == 0 {
// Pure ACK, no change
if hdr.flags == FLAG_ACK {
log_trace!("{:?} ACK only", quad);
}
else if self.next_rx_seq!= hdr.sequence_number {
log_trace!("{:?} Empty packet, unexpected seqeunce number {:x}!= {:x}", quad, hdr.sequence_number, self.next_rx_seq);
}
else {
// Counts as one byte
self.next_rx_seq += 1;
self.send_ack(quad, "Empty");
}
}
else if hdr.sequence_number - self.next_rx_seq + pkt.remain() as u32 > MAX_WINDOW_SIZE {
// Completely out of sequence
}
else {
// In sequence.
let mut start_ofs = (hdr.sequence_number - self.next_rx_seq) as i32;
while start_ofs < 0 {
pkt.read_u8().unwrap();
start_ofs += 1;
}
let mut ofs = start_ofs as usize;
while let Ok(b) = pkt.read_u8() {
match self.rx_buffer.insert( (self.next_rx_seq - self.rx_buffer_seq) as usize + ofs, &[b])
{
Ok(_) => {},
Err(e) => {
log_error!("{:?} RX buffer push {:?}", quad, e);
break;
},
}
ofs += 1;
}
// Better idea: Have an ACQ point, and a window point. Buffer is double the window
// Once the window point reaches 25% of the window from the ACK point
if start_ofs == 0 {
self.next_rx_seq += ofs as u32;
// Calculate a maximum window size based on how much space is left in the buffer
let buffered_len = self.next_rx_seq - self.rx_buffer_seq; // How much data the user has buffered
let cur_max_window = 2*self.rx_window_size_max - buffered_len; // NOTE: 2* for some flex so the window can stay at max size
if cur_max_window < self.rx_window_size {
// Reduce the window size and send an ACQ (with the updated size)
while cur_max_window < self.rx_window_size {
self.rx_window_size /= 2;
}
self.send_ack(quad, "Constrain window");
}
else if self.next_rx_seq - self.last_rx_ack > self.rx_window_size/2 {
// Send an ACK now, we've recieved a burst of data
self.send_ack(quad, "Data burst");
}
else {
// TODO: Schedule an ACK in a few hundred milliseconds
}
}
if hdr.flags & FLAG_PSH!= 0 {
// TODO: Prod the user that there's new data?
}
}
self.state
},
ConnectionState::CloseWait => {
// Ignore all packets while waiting for the user to complete teardown
self.state
},
ConnectionState::LastAck => // Waiting for ACK in FIN,FIN/ACK,ACK
if hdr.flags & FLAG_ACK!= 0 {
ConnectionState::Finished
}
else {
self.state
},
ConnectionState::FinWait1 => // FIN sent, waiting for reply (ACK or FIN)
if hdr.flags & FLAG_FIN!= 0 {
// TODO: Check the sequence number vs the sequence for the FIN
self.send_ack(quad, "SYN-ACK");
ConnectionState::Closing
}
else if hdr.flags & FLAG_ACK!= 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::FinWait2
}
else {
self.state
},
ConnectionState::FinWait2 =>
if hdr.flags & FLAG_FIN!= 0 { // Got a FIN after the ACK, close
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::Closing =>
if hdr.flags & FLAG_ACK!= 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::ForceClose => self.state,
ConnectionState::TimeWait => self.state,
ConnectionState::Finished => return,
};
self.state_update(quad, new_state);
}
fn state_update(&mut self, quad: &Quad, new_state: ConnectionState)
{
if self.state!= new_state
{
log_trace!("{:?} {:?} -> {:?}", quad, self.state, new_state);
self.state = new_state;
// TODO: If transitioning to `Finished`, release the local port?
// - Only for client connections.
if let ConnectionState::Finished = self.state
{
release_port(&quad.local_addr, quad.local_port);
}
}
}
fn state_to_error(&self) -> Result<(), ConnError>
{
match self.state
{
ConnectionState::SynSent => {
todo!("(quad=?) send/recv before established");
},
ConnectionState::Established => Ok( () ),
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => Err( ConnError::LocalClosed ),
ConnectionState::ForceClose => Err( ConnError::RemoteReset ),
ConnectionState::CloseWait | ConnectionState::LastAck => Err( ConnError::RemoteClosed ),
ConnectionState::Finished => Err( ConnError::LocalClosed ),
}
}
fn send_data(&mut self, quad: &Quad, buf: &[u8]) -> Result<usize, ConnError>
{
// TODO: Is it valid to send before the connection is fully established?
self.state_to_error()?;
// 1. Determine how much data we can send (based on the TX window)
let max_len = usize::saturating_sub(self.tx_window_size as usize, self.tx_buffer.len());
let rv = ::core::cmp::min(buf.len(), max_len);
// Add the data to the TX buffer
for &b in &buf[..rv] {
self.tx_buffer.push_back(b).expect("Incorrectly calculated `max_len` in tcp::Connection::send_data");
}
// If the buffer is full enough, do a send
if self.tx_buffer.len() - self.tx_bytes_sent > 1400 /*|| self.first_tx_time.map(|t| now() - t > MAX_TX_DELAY).unwrap_or(false)*/
{
// Trigger a TX
self.flush_send(quad);
}
else
{
// Kick a short timer, which will send data after it expires
// - Keep kicking the timer as data flows through
// - Have a maximum elapsed time with no packet sent.
//if self.tx_timer.reset(MIN_TX_DELAY) == timer::ResetResult::WasStopped
//{
// self.first_tx_time = Some(now());
//}
}
todo!("{:?} send_data( min({}, {})={} )", quad, max_len, buf.len(), rv);
}
fn flush_send(&mut self, quad: &Quad)
{
loop
{
let nbytes = self.tx_buffer.len() - self.tx_bytes_sent;
todo!("{:?} tx {}", quad, nbytes);
}
//self.first_tx_time = None;
}
fn recv_data(&mut self, _quad: &Quad, buf: &mut [u8]) -> Result<usize, ConnError>
{
self.state_to_error()?;
//let valid_len = self.rx_buffer.valid_len();
//let acked_len = u32::wrapping_sub(self.next_rx_seq, self.rx_buffer_seq);
//let len = usize::min(valid_len, buf.len());
Ok( self.rx_buffer.take(buf) )
}
fn send_packet(&mut self, quad: &Quad, flags: u8, data: &[u8])
{
log_debug!("{:?} send_packet({:02x} {}b)", quad, flags, data.len());
quad.send_packet(self.last_tx_seq, self.next_rx_seq, flags, self.rx_window_size as u16, data);
}
fn send_ack(&mut self, quad: &Quad, msg: &str)
{
log_debug!("{:?} send_ack({:?})", quad, msg);
// - TODO: Cancel any pending ACK
// - Send a new ACK
self.send_packet(quad, FLAG_ACK, &[]);
}
fn close(&mut self, quad: &Quad) -> Result<(), ConnError>
{
let new_state = match self.state
{
ConnectionState::SynSent => {
todo!("{:?} close before established", quad);
},
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => return Err( ConnError::LocalClosed ),
ConnectionState::LastAck => return Err( ConnError::RemoteClosed ),
ConnectionState::Finished => return Err( ConnError::LocalClosed ),
ConnectionState::CloseWait => {
self.send_packet(quad, FLAG_FIN|FLAG_ACK, &[]);
ConnectionState::LastAck
},
ConnectionState::ForceClose => {
ConnectionState::Finished
},
ConnectionState::Established => {
self.send_packet(quad, FLAG_FIN, &[]);
ConnectionState::FinWait1
},
};
self.state_update(quad, new_state);
Ok( () )
}
}
struct ProtoConnection
{
seen_seq: u32,
sent_seq: u32,
}
impl ProtoConnection
{
fn new(seen_seq: u32) -> ProtoConnection
{
ProtoConnection {
seen_seq: seen_seq,
sent_seq: 1, // TODO: Random
}
}
}
struct Server
{
// Amount of connections that can still be accepted
accept_space: AtomicUsize,
// Established connections waiting for the user to accept
accept_queue: AtomicRingBuf<Quad>,
}
pub struct ConnectionHandle(Quad);
#[derive(Debug)]
pub enum ConnError
{
NoRoute,
LocalClosed,
RemoteRefused,
RemoteClosed,
RemoteReset,
NoPortAvailable,
}
impl ConnectionHandle
{
pub fn connect(addr: Address, port: u16) -> Result<ConnectionHandle, ConnError>
{
log_trace!("ConnectionHandle::connect({:?}, {})", addr, port);
// 1. Determine the local address for this remote address
let local_addr = match get_outbound_ip_for(&addr)
{
Some(a) => a,
None => return Err(ConnError::NoRoute),
};
// 2. Pick a local port
let local_port = match allocate_port(&local_addr)
{
Some(p) => p,
None => return Err(ConnError::NoPortAvailable),
};
// 3. Create the quad and allocate the connection structure
let quad = Quad::new(local_addr, local_port, addr, port, );
log_trace!("ConnectionHandle::connect: quad={:?}", quad);
// 4. Send the opening SYN (by creating the outbound connection structure)
let conn = Connection::new_outbound(&quad, 0x10000u32);
CONNECTIONS.insert(quad, Mutex::new(conn));
Ok( ConnectionHandle(quad) )
}
pub fn send_ | new | identifier_name |
tcp.rs | Address::zero(), *addr).map(|(laddr, _, _)| Address::Ipv4(laddr)),
}
}
/// Allocate a port for the given local address
fn allocate_port(_addr: &Address) -> Option<u16>
{
// TODO: Could store bitmap against the interface (having a separate bitmap for each interface)
S_PORTS.lock().allocate()
}
fn release_port(_addr: &Address, idx: u16)
{
S_PORTS.lock().release(idx)
}
fn rx_handler_v4(int: &::ipv4::Interface, src_addr: ::ipv4::Address, pkt: ::nic::PacketReader)
{
rx_handler(Address::Ipv4(src_addr), Address::Ipv4(int.addr()), pkt)
}
fn rx_handler(src_addr: Address, dest_addr: Address, mut pkt: ::nic::PacketReader)
{
let pre_header_reader = pkt.clone();
let hdr = match PktHeader::read(&mut pkt)
{
Ok(v) => v,
Err(_) => {
log_error!("Undersized packet: Ran out of data reading header");
return ;
},
};
log_debug!("hdr = {:?}", hdr);
let hdr_len = hdr.get_header_size();
if hdr_len < pre_header_reader.remain() {
log_error!("Undersized or invalid packet: Header length is {} but packet length is {}", hdr_len, pre_header_reader.remain());
return ;
}
// TODO: Validate checksum.
{
let packet_len = pre_header_reader.remain();
// Pseudo header for checksum
let sum_pseudo = match (src_addr,dest_addr)
{
(Address::Ipv4(s), Address::Ipv4(d)) =>
::ipv4::calculate_checksum([
// Big endian stores MSB first, so write the high word first
(s.as_u32() >> 16) as u16, (s.as_u32() >> 0) as u16,
(d.as_u32() >> 16) as u16, (d.as_u32() >> 0) as u16,
IPV4_PROTO_TCP as u16, packet_len as u16,
].iter().copied()),
};
let sum_header = hdr.checksum();
let sum_options_and_data = {
let mut pkt = pkt.clone();
let psum_whole =!::ipv4::calculate_checksum( (0.. (pre_header_reader.remain() - hdr_len) / 2).map(|_| pkt.read_u16n().unwrap()) );
// Final byte is decoded as if there was a zero after it (so as 0x??00)
let psum_partial = if pkt.remain() > 0 { (pkt.read_u8().unwrap() as u16) << 8} else { 0 };
::ipv4::calculate_checksum([psum_whole, psum_partial].iter().copied())
};
let sum_total = ::ipv4::calculate_checksum([
!sum_pseudo,!sum_header,!sum_options_and_data
].iter().copied());
if sum_total!= 0 {
log_error!("Incorrect checksum: 0x{:04x}!= 0", sum_total);
}
}
// Options
while pkt.remain() > pre_header_reader.remain() - hdr_len
{
match pkt.read_u8().unwrap()
{
_ => {},
}
}
let quad = Quad::new(dest_addr, hdr.dest_port, src_addr, hdr.source_port);
// Search for active connections with this quad
if let Some(c) = CONNECTIONS.get(&quad)
{
c.lock().handle(&quad, &hdr, pkt);
}
// Search for proto-connections
// - Proto-connections are lighter weight than full-blown connections, reducing the impact of a SYN flood
else if hdr.flags == FLAG_ACK
{
if let Some(c) = PROTO_CONNECTIONS.take(&quad)
{
// Check the SEQ/ACK numbers, and create the actual connection
if hdr.sequence_number == c.seen_seq + 1 && hdr.acknowledgement_number == c.sent_seq
{
// Make the full connection struct
CONNECTIONS.insert(quad, Mutex::new(Connection::new_inbound(&hdr)));
// Add the connection onto the server's accept queue
let server = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) ).expect("Can't find server");
server.accept_queue.push(quad).expect("Acceped connection with full accept queue");
}
else
{
// - Bad ACK, put the proto connection back into the list
PROTO_CONNECTIONS.insert(quad, c);
}
}
}
// If none found, look for servers on the destination (if SYN)
else if hdr.flags &!FLAG_ACK == FLAG_SYN
{
if let Some(s) = Option::or( SERVERS.get( &(Some(dest_addr), hdr.dest_port) ), SERVERS.get( &(None, hdr.dest_port) ) )
{
// Decrement the server's accept space
if s.accept_space.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |v| if v == 0 { None } else { Some(v - 1) }).is_err() {
// Reject if no space
// - Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST, 0, &[]);
}
else {
// - Add the quad as a proto-connection and send the SYN-ACK
let pc = ProtoConnection::new(hdr.sequence_number);
quad.send_packet(pc.sent_seq, pc.seen_seq, FLAG_SYN|FLAG_ACK, hdr.window_size, &[]);
PROTO_CONNECTIONS.insert(quad, pc);
}
}
else
{
// Send a RST
quad.send_packet(hdr.acknowledgement_number, hdr.sequence_number, FLAG_RST|(!hdr.flags & FLAG_ACK), 0, &[]);
}
}
// Otherwise, drop
}
#[derive(Copy,Clone,PartialOrd,PartialEq,Ord,Eq)]
struct Quad
{
local_addr: Address,
local_port: u16,
remote_addr: Address,
remote_port: u16,
}
impl ::core::fmt::Debug for Quad
{
fn fmt(&self, f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
write!(f, "Quad({:?}:{} -> {:?}:{})", self.local_addr, self.local_port, self.remote_addr, self.remote_port)
}
}
impl Quad
{
fn new(local_addr: Address, local_port: u16, remote_addr: Address, remote_port: u16) -> Quad
|
fn send_packet(&self, seq: u32, ack: u32, flags: u8, window_size: u16, data: &[u8])
{
// Make a header
// TODO: Any options required?
let options_bytes = &[];
let opts_len_rounded = ((options_bytes.len() + 3) / 4) * 4;
let hdr = PktHeader {
source_port: self.local_port,
dest_port: self.remote_port,
sequence_number: seq,
acknowledgement_number: ack,
data_offset: ((5 + opts_len_rounded/4) << 4) as u8 | 0,
flags: flags,
window_size: window_size,
checksum: 0, // To be filled afterwards
urgent_pointer: 0,
}.as_bytes();
// Calculate checksum
// Create sparse packet chain
let data_pkt = SparsePacket::new_root(data);
// - Padding required to make the header a multiple of 4 bytes long
let opt_pad_pkt = SparsePacket::new_chained(&[0; 3][.. opts_len_rounded - options_bytes.len()], &data_pkt);
let opt_pkt = SparsePacket::new_chained(options_bytes, &opt_pad_pkt);
let hdr_pkt = SparsePacket::new_chained(&hdr, &opt_pkt);
// Pass packet downstream
match self.local_addr
{
Address::Ipv4(a) => crate::ipv4::send_packet(a, self.remote_addr.unwrap_ipv4(), IPV4_PROTO_TCP, hdr_pkt),
}
}
}
#[derive(Debug)]
struct PktHeader
{
source_port: u16,
dest_port: u16,
sequence_number: u32,
acknowledgement_number: u32,
/// Packed: top 4 bits are header size in 4byte units, bottom 4 are reserved
data_offset: u8,
/// Bitfield:
/// 0: FIN
/// 1: SYN
/// 2: RST
/// 3: PSH
/// 4: ACK
/// 5: URG
/// 6: ECE
/// 7: CWR
flags: u8,
window_size: u16,
checksum: u16,
urgent_pointer: u16,
//options: [u8],
}
const FLAG_FIN: u8 = 1 << 0;
const FLAG_SYN: u8 = 1 << 1;
const FLAG_RST: u8 = 1 << 2;
const FLAG_PSH: u8 = 1 << 3;
const FLAG_ACK: u8 = 1 << 4;
impl PktHeader
{
fn read(reader: &mut ::nic::PacketReader) -> Result<Self, ()>
{
Ok(PktHeader {
source_port: reader.read_u16n()?,
dest_port: reader.read_u16n()?,
sequence_number: reader.read_u32n()?,
acknowledgement_number: reader.read_u32n()?,
data_offset: reader.read_u8()?,
flags: reader.read_u8()?,
window_size: reader.read_u16n()?,
checksum: reader.read_u16n()?,
urgent_pointer: reader.read_u16n()?,
})
// TODO: Check checksum?
}
fn get_header_size(&self) -> usize {
(self.data_offset >> 4) as usize * 4
}
fn as_bytes(&self) -> [u8; 5*4]
{
[
(self.source_port >> 8) as u8,
(self.source_port >> 0) as u8,
(self.dest_port >> 8) as u8,
(self.dest_port >> 0) as u8,
(self.sequence_number >> 24) as u8,
(self.sequence_number >> 16) as u8,
(self.sequence_number >> 8) as u8,
(self.sequence_number >> 0) as u8,
(self.acknowledgement_number >> 24) as u8,
(self.acknowledgement_number >> 16) as u8,
(self.acknowledgement_number >> 8) as u8,
(self.acknowledgement_number >> 0) as u8,
self.data_offset,
self.flags,
(self.window_size >> 8) as u8,
(self.window_size >> 0) as u8,
(self.checksum >> 8) as u8,
(self.checksum >> 0) as u8,
(self.urgent_pointer >> 8) as u8,
(self.urgent_pointer >> 0) as u8,
]
}
fn as_u16s(&self) -> [u16; 5*2] {
[
self.source_port,
self.dest_port,
(self.sequence_number >> 16) as u16,
(self.sequence_number >> 0) as u16,
(self.acknowledgement_number >> 16) as u16,
(self.acknowledgement_number >> 0) as u16,
(self.data_offset as u16) << 8 | (self.flags as u16),
self.window_size,
self.checksum,
self.urgent_pointer,
]
}
fn checksum(&self) -> u16 {
::ipv4::calculate_checksum(self.as_u16s().iter().cloned())
}
}
struct Connection
{
state: ConnectionState,
/// Sequence number of the next expected remote byte
next_rx_seq: u32,
/// Last ACKed sequence number
last_rx_ack: u32,
/// Received bytes
rx_buffer: RxBuffer,
/// Sequence number of the first byte in the RX buffer
rx_buffer_seq: u32,
rx_window_size_max: u32,
rx_window_size: u32,
/// Sequence number of last transmitted byte
last_tx_seq: u32,
/// Buffer of transmitted but not ACKed bytes
tx_buffer: RingBuf<u8>,
/// Offset of bytes actually sent (not just buffered)
tx_bytes_sent: usize,
/// Last received transmit window size
tx_window_size: u32,
}
#[derive(Copy,Clone,Debug,PartialEq)]
enum ConnectionState
{
//Closed, // Unused
SynSent, // SYN sent by local, waiting for SYN-ACK
//SynReceived, // Server only, handled by PROTO_CONNECTIONS
Established,
FinWait1, // FIN sent, waiting for reply (ACK or FIN)
FinWait2, // sent FIN acked, waiting for FIN from peer
Closing, // Waiting for ACK of FIN (FIN sent and recieved)
TimeWait, // Waiting for timeout after local close
ForceClose, // RST recieved, waiting for user close
CloseWait, // FIN recieved, waiting for user to close (error set, wait for node close)
LastAck, // FIN sent and recieved, waiting for ACK
Finished,
}
impl Connection
{
/// Create a new connection from the ACK in a SYN-SYN,ACK-ACK
fn new_inbound(hdr: &PktHeader) -> Self
{
Connection {
state: ConnectionState::Established,
next_rx_seq: hdr.sequence_number,
last_rx_ack: hdr.sequence_number,
rx_buffer_seq: hdr.sequence_number,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: hdr.acknowledgement_number,
tx_buffer: RingBuf::new(2048),//hdr.window_size as usize),
tx_bytes_sent: 0,
tx_window_size: hdr.window_size as u32,
}
}
fn new_outbound(quad: &Quad, sequence_number: u32) -> Self
{
log_trace!("Connection::new_outbound({:?}, {:#x})", quad, sequence_number);
let mut rv = Connection {
state: ConnectionState::SynSent,
next_rx_seq: 0,
last_rx_ack: 0,
rx_buffer_seq: 0,
rx_buffer: RxBuffer::new(2*DEF_WINDOW_SIZE as usize),
rx_window_size_max: MAX_WINDOW_SIZE, // Can be updated by the user
rx_window_size: DEF_WINDOW_SIZE,
last_tx_seq: sequence_number,
tx_buffer: RingBuf::new(2048),
tx_bytes_sent: 0,
tx_window_size: 0,//hdr.window_size as u32,
};
rv.send_packet(quad, FLAG_SYN, &[]);
rv
}
/// Handle inbound data
fn handle(&mut self, quad: &Quad, hdr: &PktHeader, mut pkt: ::nic::PacketReader)
{
match self.state
{
//ConnectionState::Closed => return,
ConnectionState::Finished => return,
_ => {},
}
// Synchronisation request
if hdr.flags & FLAG_SYN!= 0 {
// TODO: Send an ACK of the last recieved byte (should this be conditional?)
if self.last_rx_ack!= self.next_rx_seq {
}
//self.next_rx_seq = hdr.sequence_number;
}
// ACK of sent data
if hdr.flags & FLAG_ACK!= 0 {
let in_flight = (self.last_tx_seq - hdr.acknowledgement_number) as usize;
if in_flight > self.tx_buffer.len() {
// TODO: Error, something funky has happened
}
else {
let n_bytes = self.tx_buffer.len() - in_flight;
log_debug!("{:?} ACQ {} bytes", quad, n_bytes);
for _ in 0.. n_bytes {
self.tx_buffer.pop_front();
}
}
}
// Update the window size if it changes
if self.tx_window_size!= hdr.window_size as u32 {
self.tx_window_size = hdr.window_size as u32;
}
let new_state = match self.state
{
//ConnectionState::Closed => return,
// SYN sent by local, waiting for SYN-ACK
ConnectionState::SynSent => {
if hdr.flags & FLAG_SYN!= 0 {
self.next_rx_seq += 1;
if hdr.flags & FLAG_ACK!= 0 {
// Now established
// TODO: Send ACK back
self.send_ack(quad, "SYN-ACK");
ConnectionState::Established
}
else {
// Why did we get a plain SYN in this state?
self.state
}
}
else {
// Ignore non-SYN
self.state
}
},
ConnectionState::Established =>
if hdr.flags & FLAG_RST!= 0 {
// RST received, do an unclean close (reset by peer)
// TODO: Signal to user that the connection is closing (error)
ConnectionState::ForceClose
}
else if hdr.flags & FLAG_FIN!= 0 {
// FIN received, start a clean shutdown
self.next_rx_seq += 1;
// TODO: Signal to user that the connection is closing (EOF)
ConnectionState::CloseWait
}
else {
if pkt.remain() == 0 {
// Pure ACK, no change
if hdr.flags == FLAG_ACK {
log_trace!("{:?} ACK only", quad);
}
else if self.next_rx_seq!= hdr.sequence_number {
log_trace!("{:?} Empty packet, unexpected seqeunce number {:x}!= {:x}", quad, hdr.sequence_number, self.next_rx_seq);
}
else {
// Counts as one byte
self.next_rx_seq += 1;
self.send_ack(quad, "Empty");
}
}
else if hdr.sequence_number - self.next_rx_seq + pkt.remain() as u32 > MAX_WINDOW_SIZE {
// Completely out of sequence
}
else {
// In sequence.
let mut start_ofs = (hdr.sequence_number - self.next_rx_seq) as i32;
while start_ofs < 0 {
pkt.read_u8().unwrap();
start_ofs += 1;
}
let mut ofs = start_ofs as usize;
while let Ok(b) = pkt.read_u8() {
match self.rx_buffer.insert( (self.next_rx_seq - self.rx_buffer_seq) as usize + ofs, &[b])
{
Ok(_) => {},
Err(e) => {
log_error!("{:?} RX buffer push {:?}", quad, e);
break;
},
}
ofs += 1;
}
// Better idea: Have an ACQ point, and a window point. Buffer is double the window
// Once the window point reaches 25% of the window from the ACK point
if start_ofs == 0 {
self.next_rx_seq += ofs as u32;
// Calculate a maximum window size based on how much space is left in the buffer
let buffered_len = self.next_rx_seq - self.rx_buffer_seq; // How much data the user has buffered
let cur_max_window = 2*self.rx_window_size_max - buffered_len; // NOTE: 2* for some flex so the window can stay at max size
if cur_max_window < self.rx_window_size {
// Reduce the window size and send an ACQ (with the updated size)
while cur_max_window < self.rx_window_size {
self.rx_window_size /= 2;
}
self.send_ack(quad, "Constrain window");
}
else if self.next_rx_seq - self.last_rx_ack > self.rx_window_size/2 {
// Send an ACK now, we've recieved a burst of data
self.send_ack(quad, "Data burst");
}
else {
// TODO: Schedule an ACK in a few hundred milliseconds
}
}
if hdr.flags & FLAG_PSH!= 0 {
// TODO: Prod the user that there's new data?
}
}
self.state
},
ConnectionState::CloseWait => {
// Ignore all packets while waiting for the user to complete teardown
self.state
},
ConnectionState::LastAck => // Waiting for ACK in FIN,FIN/ACK,ACK
if hdr.flags & FLAG_ACK!= 0 {
ConnectionState::Finished
}
else {
self.state
},
ConnectionState::FinWait1 => // FIN sent, waiting for reply (ACK or FIN)
if hdr.flags & FLAG_FIN!= 0 {
// TODO: Check the sequence number vs the sequence for the FIN
self.send_ack(quad, "SYN-ACK");
ConnectionState::Closing
}
else if hdr.flags & FLAG_ACK!= 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::FinWait2
}
else {
self.state
},
ConnectionState::FinWait2 =>
if hdr.flags & FLAG_FIN!= 0 { // Got a FIN after the ACK, close
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::Closing =>
if hdr.flags & FLAG_ACK!= 0 {
// TODO: Check the sequence number vs the sequence for the FIN
ConnectionState::TimeWait
}
else {
self.state
},
ConnectionState::ForceClose => self.state,
ConnectionState::TimeWait => self.state,
ConnectionState::Finished => return,
};
self.state_update(quad, new_state);
}
fn state_update(&mut self, quad: &Quad, new_state: ConnectionState)
{
if self.state!= new_state
{
log_trace!("{:?} {:?} -> {:?}", quad, self.state, new_state);
self.state = new_state;
// TODO: If transitioning to `Finished`, release the local port?
// - Only for client connections.
if let ConnectionState::Finished = self.state
{
release_port(&quad.local_addr, quad.local_port);
}
}
}
fn state_to_error(&self) -> Result<(), ConnError>
{
match self.state
{
ConnectionState::SynSent => {
todo!("(quad=?) send/recv before established");
},
ConnectionState::Established => Ok( () ),
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => Err( ConnError::LocalClosed ),
ConnectionState::ForceClose => Err( ConnError::RemoteReset ),
ConnectionState::CloseWait | ConnectionState::LastAck => Err( ConnError::RemoteClosed ),
ConnectionState::Finished => Err( ConnError::LocalClosed ),
}
}
fn send_data(&mut self, quad: &Quad, buf: &[u8]) -> Result<usize, ConnError>
{
// TODO: Is it valid to send before the connection is fully established?
self.state_to_error()?;
// 1. Determine how much data we can send (based on the TX window)
let max_len = usize::saturating_sub(self.tx_window_size as usize, self.tx_buffer.len());
let rv = ::core::cmp::min(buf.len(), max_len);
// Add the data to the TX buffer
for &b in &buf[..rv] {
self.tx_buffer.push_back(b).expect("Incorrectly calculated `max_len` in tcp::Connection::send_data");
}
// If the buffer is full enough, do a send
if self.tx_buffer.len() - self.tx_bytes_sent > 1400 /*|| self.first_tx_time.map(|t| now() - t > MAX_TX_DELAY).unwrap_or(false)*/
{
// Trigger a TX
self.flush_send(quad);
}
else
{
// Kick a short timer, which will send data after it expires
// - Keep kicking the timer as data flows through
// - Have a maximum elapsed time with no packet sent.
//if self.tx_timer.reset(MIN_TX_DELAY) == timer::ResetResult::WasStopped
//{
// self.first_tx_time = Some(now());
//}
}
todo!("{:?} send_data( min({}, {})={} )", quad, max_len, buf.len(), rv);
}
fn flush_send(&mut self, quad: &Quad)
{
loop
{
let nbytes = self.tx_buffer.len() - self.tx_bytes_sent;
todo!("{:?} tx {}", quad, nbytes);
}
//self.first_tx_time = None;
}
fn recv_data(&mut self, _quad: &Quad, buf: &mut [u8]) -> Result<usize, ConnError>
{
self.state_to_error()?;
//let valid_len = self.rx_buffer.valid_len();
//let acked_len = u32::wrapping_sub(self.next_rx_seq, self.rx_buffer_seq);
//let len = usize::min(valid_len, buf.len());
Ok( self.rx_buffer.take(buf) )
}
fn send_packet(&mut self, quad: &Quad, flags: u8, data: &[u8])
{
log_debug!("{:?} send_packet({:02x} {}b)", quad, flags, data.len());
quad.send_packet(self.last_tx_seq, self.next_rx_seq, flags, self.rx_window_size as u16, data);
}
fn send_ack(&mut self, quad: &Quad, msg: &str)
{
log_debug!("{:?} send_ack({:?})", quad, msg);
// - TODO: Cancel any pending ACK
// - Send a new ACK
self.send_packet(quad, FLAG_ACK, &[]);
}
fn close(&mut self, quad: &Quad) -> Result<(), ConnError>
{
let new_state = match self.state
{
ConnectionState::SynSent => {
todo!("{:?} close before established", quad);
},
ConnectionState::FinWait1
| ConnectionState::FinWait2
| ConnectionState::Closing
| ConnectionState::TimeWait => return Err( ConnError::LocalClosed ),
ConnectionState::LastAck => return Err( ConnError::RemoteClosed ),
ConnectionState::Finished => return Err( ConnError::LocalClosed ),
ConnectionState::CloseWait => {
self.send_packet(quad, FLAG_FIN|FLAG_ACK, &[]);
ConnectionState::LastAck
},
ConnectionState::ForceClose => {
ConnectionState::Finished
},
ConnectionState::Established => {
self.send_packet(quad, FLAG_FIN, &[]);
ConnectionState::FinWait1
},
};
self.state_update(quad, new_state);
Ok( () )
}
}
struct ProtoConnection
{
seen_seq: u32,
sent_seq: u32,
}
impl ProtoConnection
{
fn new(seen_seq: u32) -> ProtoConnection
{
ProtoConnection {
seen_seq: seen_seq,
sent_seq: 1, // TODO: Random
}
}
}
struct Server
{
// Amount of connections that can still be accepted
accept_space: AtomicUsize,
// Established connections waiting for the user to accept
accept_queue: AtomicRingBuf<Quad>,
}
pub struct ConnectionHandle(Quad);
#[derive(Debug)]
pub enum ConnError
{
NoRoute,
LocalClosed,
RemoteRefused,
RemoteClosed,
RemoteReset,
NoPortAvailable,
}
impl ConnectionHandle
{
pub fn connect(addr: Address, port: u16) -> Result<ConnectionHandle, ConnError>
{
log_trace!("ConnectionHandle::connect({:?}, {})", addr, port);
// 1. Determine the local address for this remote address
let local_addr = match get_outbound_ip_for(&addr)
{
Some(a) => a,
None => return Err(ConnError::NoRoute),
};
// 2. Pick a local port
let local_port = match allocate_port(&local_addr)
{
Some(p) => p,
None => return Err(ConnError::NoPortAvailable),
};
// 3. Create the quad and allocate the connection structure
let quad = Quad::new(local_addr, local_port, addr, port, );
log_trace!("ConnectionHandle::connect: quad={:?}", quad);
// 4. Send the opening SYN (by creating the outbound connection structure)
let conn = Connection::new_outbound(&quad, 0x10000u32);
CONNECTIONS.insert(quad, Mutex::new(conn));
Ok( ConnectionHandle(quad) )
}
pub fn send_data | {
Quad {
local_addr, local_port, remote_addr, remote_port
}
} | identifier_body |
reaper-rush.rs | #[macro_use]
extern crate clap;
use rand::prelude::*;
use rust_sc2::prelude::*;
use std::{cmp::Ordering, collections::HashSet};
#[bot]
#[derive(Default)]
struct ReaperRushAI {
reapers_retreat: HashSet<u64>,
last_loop_distributed: u32,
}
impl Player for ReaperRushAI {
fn on_start(&mut self) -> SC2Result<()> {
if let Some(townhall) = self.units.my.townhalls.first() {
// Setting rallypoint for command center
townhall.smart(Target::Pos(self.start_center), false);
// Ordering scv on initial 50 minerals
townhall.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
// Splitting workers to closest mineral crystals
for u in &self.units.my.workers {
if let Some(mineral) = self.units.mineral_fields.closest(u) {
u.gather(mineral.tag(), false);
}
}
Ok(())
}
fn on_step(&mut self, _iteration: usize) -> SC2Result<()> {
self.distribute_workers();
self.build();
self.train();
self.execute_micro();
Ok(())
}
fn get_player_settings(&self) -> PlayerSettings {
PlayerSettings::new(Race::Terran).with_name("RustyReapers")
}
}
impl ReaperRushAI {
const DISTRIBUTION_DELAY: u32 = 8;
fn distribute_workers(&mut self) {
if self.units.my.workers.is_empty() {
return;
}
let mut idle_workers = self.units.my.workers.idle();
// Check distribution delay if there aren't any idle workers
let game_loop = self.state.observation.game_loop();
let last_loop = &mut self.last_loop_distributed;
if idle_workers.is_empty() && *last_loop + Self::DISTRIBUTION_DELAY > game_loop {
return;
}
*last_loop = game_loop;
// Distribute
let mineral_fields = &self.units.mineral_fields;
if mineral_fields.is_empty() {
return;
}
let bases = self.units.my.townhalls.ready();
if bases.is_empty() {
return;
}
let mut deficit_minings = Units::new();
let mut deficit_geysers = Units::new();
// Distributing mineral workers
for base in &bases {
match base.assigned_harvesters().cmp(&base.ideal_harvesters()) {
Ordering::Less => (0..(base.ideal_harvesters().unwrap()
- base.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_minings.push(base.clone());
}),
Ordering::Greater => {
let local_minerals = mineral_fields
.iter()
.closer(11.0, base)
.map(|m| m.tag())
.collect::<Vec<u64>>();
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
local_minerals.contains(&target_tag)
|| (u.is_carrying_minerals() && target_tag == base.tag())
})
})
.iter()
.take(
(base.assigned_harvesters().unwrap() - base.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
}
}
// Distributing gas workers
self.units
.my
.gas_buildings
.iter()
.ready()
.filter(|g| g.vespene_contents().map_or(false, |vespene| vespene > 0))
.for_each(
|gas| match gas.assigned_harvesters().cmp(&gas.ideal_harvesters()) {
Ordering::Less => (0..(gas.ideal_harvesters().unwrap()
- gas.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_geysers.push(gas.clone());
}),
Ordering::Greater => {
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
target_tag == gas.tag()
|| (u.is_carrying_vespene()
&& target_tag == bases.closest(gas).unwrap().tag())
})
})
.iter()
.take(
(gas.assigned_harvesters().unwrap() - gas.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
},
);
// Distributing idle workers
let minerals_near_base = if idle_workers.len() > deficit_minings.len() + deficit_geysers.len() {
let minerals = mineral_fields.filter(|m| bases.iter().any(|base| base.is_closer(11.0, *m)));
if minerals.is_empty() {
None
} else {
Some(minerals)
}
} else {
None
};
for u in &idle_workers {
if let Some(closest) = deficit_geysers.closest(u) {
let tag = closest.tag();
deficit_geysers.remove(tag);
u.gather(tag, false);
} else if let Some(closest) = deficit_minings.closest(u) {
u.gather(
mineral_fields
.closer(11.0, closest)
.max(|m| m.mineral_contents().unwrap_or(0))
.unwrap()
.tag(),
false,
);
let tag = closest.tag();
deficit_minings.remove(tag);
} else if u.is_idle() {
if let Some(mineral) = minerals_near_base.as_ref().and_then(|ms| ms.closest(u)) {
u.gather(mineral.tag(), false);
}
}
}
}
fn get_builder(&self, pos: Point2, mineral_tags: &[u64]) -> Option<&Unit> {
self.units
.my
.workers
.iter()
.filter(|u| {
!(u.is_constructing()
|| u.is_returning() || u.is_carrying_resource()
|| (u.is_gathering() && u.target_tag().map_or(true, |tag|!mineral_tags.contains(&tag))))
})
.closest(pos)
}
fn build(&mut self) {
if self.minerals < 75 {
return;
}
let mineral_tags = self
.units
.mineral_fields
.iter()
.map(|u| u.tag())
.collect::<Vec<u64>>();
let main_base = self.start_location.towards(self.game_info.map_center, 8.0);
if self.counter().count(UnitTypeId::Refinery) < 2
&& self.counter().ordered().count(UnitTypeId::Refinery) == 0
&& self.can_afford(UnitTypeId::Refinery, false)
{
let start_location = self.start_location;
if let Some(geyser) = self.find_gas_placement(start_location) {
if let Some(builder) = self.get_builder(geyser.position(), &mineral_tags) {
builder.build_gas(geyser.tag(), false);
self.subtract_resources(UnitTypeId::Refinery, false);
}
}
}
if self.supply_left < 3
&& self.supply_cap < 200
&& self.counter().ordered().count(UnitTypeId::SupplyDepot) == 0
&& self.can_afford(UnitTypeId::SupplyDepot, false)
{
if let Some(location) =
self.find_placement(UnitTypeId::SupplyDepot, main_base, Default::default())
{
if let Some(builder) = self.get_builder(location, &mineral_tags) { | }
}
if self.counter().all().count(UnitTypeId::Barracks) < 4
&& self.can_afford(UnitTypeId::Barracks, false)
{
if let Some(location) = self.find_placement(
UnitTypeId::Barracks,
main_base,
PlacementOptions {
step: 4,
..Default::default()
},
) {
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::Barracks, location, false);
self.subtract_resources(UnitTypeId::Barracks, false);
}
}
}
}
fn train(&mut self) {
if self.minerals < 50 || self.supply_left == 0 {
return;
}
if self.supply_workers < 22 && self.can_afford(UnitTypeId::SCV, true) {
if let Some(cc) = self
.units
.my
.townhalls
.iter()
.find(|u| u.is_ready() && u.is_almost_idle())
{
cc.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
}
if self.can_afford(UnitTypeId::Reaper, true) {
if let Some(barracks) = self
.units
.my
.structures
.iter()
.find(|u| u.type_id() == UnitTypeId::Barracks && u.is_ready() && u.is_almost_idle())
{
barracks.train(UnitTypeId::Reaper, false);
self.subtract_resources(UnitTypeId::Reaper, true);
}
}
}
fn throw_mine(&self, reaper: &Unit, target: &Unit) -> bool {
if reaper.has_ability(AbilityId::KD8ChargeKD8Charge)
&& reaper.in_ability_cast_range(AbilityId::KD8ChargeKD8Charge, target, 0.0)
{
reaper.command(
AbilityId::KD8ChargeKD8Charge,
Target::Pos(target.position()),
false,
);
true
} else {
false
}
}
fn execute_micro(&mut self) {
// Lower ready depots
self.units
.my
.structures
.iter()
.of_type(UnitTypeId::SupplyDepot)
.ready()
.for_each(|s| s.use_ability(AbilityId::MorphSupplyDepotLower, false));
// Reapers micro
let reapers = self.units.my.units.of_type(UnitTypeId::Reaper);
if reapers.is_empty() {
return;
}
let targets = {
let ground_targets = self.units.enemy.all.ground();
let ground_attackers = ground_targets.filter(|e| e.can_attack_ground());
if ground_attackers.is_empty() {
ground_targets
} else {
ground_attackers
}
};
for u in &reapers {
let is_retreating = self.reapers_retreat.contains(&u.tag());
if is_retreating {
if u.health_percentage().unwrap() > 0.75 {
self.reapers_retreat.remove(&u.tag());
}
} else if u.health_percentage().unwrap() < 0.5 {
self.reapers_retreat.insert(u.tag());
}
match targets.closest(u) {
Some(closest) => {
if self.throw_mine(u, closest) {
return;
}
if is_retreating || u.on_cooldown() {
match targets
.iter()
.filter(|t| t.in_range(u, t.speed() + if is_retreating { 2.0 } else { 0.5 }))
.closest(u)
{
Some(closest_attacker) => {
let flee_position = {
let pos = u.position().towards(closest_attacker.position(), -u.speed());
if self.is_pathable(pos) {
pos
} else {
*u.position()
.neighbors8()
.iter()
.filter(|p| self.is_pathable(**p))
.furthest(closest_attacker)
.unwrap_or(&self.start_location)
}
};
u.move_to(Target::Pos(flee_position), false);
}
None => {
if!(is_retreating || u.in_range(&closest, 0.0)) {
u.move_to(Target::Pos(closest.position()), false);
}
}
}
} else {
match targets.iter().in_range_of(u, 0.0).min_by_key(|t| t.hits()) {
Some(target) => u.attack(Target::Tag(target.tag()), false),
None => u.move_to(Target::Pos(closest.position()), false),
}
}
}
None => {
let pos = if is_retreating {
u.position()
} else {
self.enemy_start
};
u.move_to(Target::Pos(pos), false);
}
}
}
}
}
fn main() -> SC2Result<()> {
let app = clap_app!(RustyReapers =>
(version: crate_version!())
(author: crate_authors!())
(@arg ladder_server: --LadderServer +takes_value)
(@arg opponent_id: --OpponentId +takes_value)
(@arg host_port: --GamePort +takes_value)
(@arg player_port: --StartPort +takes_value)
(@arg game_step: -s --step
+takes_value
default_value("2")
"Sets game step for bot"
)
(@subcommand local =>
(about: "Runs local game vs Computer")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race
+takes_value
"Sets opponent race"
)
(@arg difficulty: -d --difficulty
+takes_value
"Sets opponent diffuculty"
)
(@arg ai_build: --("ai-build")
+takes_value
"Sets opponent build"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
(@arg realtime: --realtime "Enables realtime mode")
)
(@subcommand human =>
(about: "Runs game Human vs Bot")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race *
+takes_value
"Sets human race"
)
(@arg name: --name
+takes_value
"Sets human name"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
)
)
.get_matches();
let game_step = match app.value_of("game_step") {
Some("0") => panic!("game_step must be X >= 1"),
Some(step) => step.parse::<u32>().expect("Can't parse game_step"),
None => unreachable!(),
};
let mut bot = ReaperRushAI::default();
bot.set_game_step(game_step);
const LADDER_MAPS: &[&str] = &[
"DeathauraLE",
"EternalEmpireLE",
"EverDreamLE",
"GoldenWallLE",
"IceandChromeLE",
"PillarsofGoldLE",
"SubmarineLE",
];
let mut rng = thread_rng();
match app.subcommand() {
("local", Some(sub)) => run_vs_computer(
&mut bot,
Computer::new(
sub.value_of("race").map_or(Race::Random, |race| {
race.parse().expect("Can't parse computer race")
}),
sub.value_of("difficulty")
.map_or(Difficulty::VeryEasy, |difficulty| {
difficulty.parse().expect("Can't parse computer difficulty")
}),
sub.value_of("ai_build")
.map(|ai_build| ai_build.parse().expect("Can't parse computer build")),
),
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: sub.is_present("realtime"),
save_replay_as: sub.value_of("save_replay"),
},
),
("human", Some(sub)) => run_vs_human(
&mut bot,
PlayerSettings {
race: sub
.value_of("race")
.unwrap()
.parse()
.expect("Can't parse human race"),
name: sub.value_of("name"),
..Default::default()
},
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: true,
save_replay_as: sub.value_of("save_replay"),
},
),
_ => run_ladder_game(
&mut bot,
app.value_of("ladder_server").unwrap_or("127.0.0.1"),
app.value_of("host_port").expect("GamePort must be specified"),
app.value_of("player_port")
.expect("StartPort must be specified")
.parse()
.expect("Can't parse StartPort"),
app.value_of("opponent_id"),
),
}
} | builder.build(UnitTypeId::SupplyDepot, location, false);
self.subtract_resources(UnitTypeId::SupplyDepot, false);
return;
} | random_line_split |
reaper-rush.rs | #[macro_use]
extern crate clap;
use rand::prelude::*;
use rust_sc2::prelude::*;
use std::{cmp::Ordering, collections::HashSet};
#[bot]
#[derive(Default)]
struct ReaperRushAI {
reapers_retreat: HashSet<u64>,
last_loop_distributed: u32,
}
impl Player for ReaperRushAI {
fn on_start(&mut self) -> SC2Result<()> {
if let Some(townhall) = self.units.my.townhalls.first() {
// Setting rallypoint for command center
townhall.smart(Target::Pos(self.start_center), false);
// Ordering scv on initial 50 minerals
townhall.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
// Splitting workers to closest mineral crystals
for u in &self.units.my.workers {
if let Some(mineral) = self.units.mineral_fields.closest(u) {
u.gather(mineral.tag(), false);
}
}
Ok(())
}
fn on_step(&mut self, _iteration: usize) -> SC2Result<()> {
self.distribute_workers();
self.build();
self.train();
self.execute_micro();
Ok(())
}
fn get_player_settings(&self) -> PlayerSettings {
PlayerSettings::new(Race::Terran).with_name("RustyReapers")
}
}
impl ReaperRushAI {
const DISTRIBUTION_DELAY: u32 = 8;
fn distribute_workers(&mut self) {
if self.units.my.workers.is_empty() {
return;
}
let mut idle_workers = self.units.my.workers.idle();
// Check distribution delay if there aren't any idle workers
let game_loop = self.state.observation.game_loop();
let last_loop = &mut self.last_loop_distributed;
if idle_workers.is_empty() && *last_loop + Self::DISTRIBUTION_DELAY > game_loop {
return;
}
*last_loop = game_loop;
// Distribute
let mineral_fields = &self.units.mineral_fields;
if mineral_fields.is_empty() {
return;
}
let bases = self.units.my.townhalls.ready();
if bases.is_empty() {
return;
}
let mut deficit_minings = Units::new();
let mut deficit_geysers = Units::new();
// Distributing mineral workers
for base in &bases {
match base.assigned_harvesters().cmp(&base.ideal_harvesters()) {
Ordering::Less => (0..(base.ideal_harvesters().unwrap()
- base.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_minings.push(base.clone());
}),
Ordering::Greater => {
let local_minerals = mineral_fields
.iter()
.closer(11.0, base)
.map(|m| m.tag())
.collect::<Vec<u64>>();
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
local_minerals.contains(&target_tag)
|| (u.is_carrying_minerals() && target_tag == base.tag())
})
})
.iter()
.take(
(base.assigned_harvesters().unwrap() - base.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
}
}
// Distributing gas workers
self.units
.my
.gas_buildings
.iter()
.ready()
.filter(|g| g.vespene_contents().map_or(false, |vespene| vespene > 0))
.for_each(
|gas| match gas.assigned_harvesters().cmp(&gas.ideal_harvesters()) {
Ordering::Less => (0..(gas.ideal_harvesters().unwrap()
- gas.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_geysers.push(gas.clone());
}),
Ordering::Greater => {
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
target_tag == gas.tag()
|| (u.is_carrying_vespene()
&& target_tag == bases.closest(gas).unwrap().tag())
})
})
.iter()
.take(
(gas.assigned_harvesters().unwrap() - gas.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
},
);
// Distributing idle workers
let minerals_near_base = if idle_workers.len() > deficit_minings.len() + deficit_geysers.len() {
let minerals = mineral_fields.filter(|m| bases.iter().any(|base| base.is_closer(11.0, *m)));
if minerals.is_empty() {
None
} else {
Some(minerals)
}
} else {
None
};
for u in &idle_workers {
if let Some(closest) = deficit_geysers.closest(u) {
let tag = closest.tag();
deficit_geysers.remove(tag);
u.gather(tag, false);
} else if let Some(closest) = deficit_minings.closest(u) {
u.gather(
mineral_fields
.closer(11.0, closest)
.max(|m| m.mineral_contents().unwrap_or(0))
.unwrap()
.tag(),
false,
);
let tag = closest.tag();
deficit_minings.remove(tag);
} else if u.is_idle() {
if let Some(mineral) = minerals_near_base.as_ref().and_then(|ms| ms.closest(u)) {
u.gather(mineral.tag(), false);
}
}
}
}
fn get_builder(&self, pos: Point2, mineral_tags: &[u64]) -> Option<&Unit> {
self.units
.my
.workers
.iter()
.filter(|u| {
!(u.is_constructing()
|| u.is_returning() || u.is_carrying_resource()
|| (u.is_gathering() && u.target_tag().map_or(true, |tag|!mineral_tags.contains(&tag))))
})
.closest(pos)
}
fn build(&mut self) {
if self.minerals < 75 {
return;
}
let mineral_tags = self
.units
.mineral_fields
.iter()
.map(|u| u.tag())
.collect::<Vec<u64>>();
let main_base = self.start_location.towards(self.game_info.map_center, 8.0);
if self.counter().count(UnitTypeId::Refinery) < 2
&& self.counter().ordered().count(UnitTypeId::Refinery) == 0
&& self.can_afford(UnitTypeId::Refinery, false)
{
let start_location = self.start_location;
if let Some(geyser) = self.find_gas_placement(start_location) {
if let Some(builder) = self.get_builder(geyser.position(), &mineral_tags) {
builder.build_gas(geyser.tag(), false);
self.subtract_resources(UnitTypeId::Refinery, false);
}
}
}
if self.supply_left < 3
&& self.supply_cap < 200
&& self.counter().ordered().count(UnitTypeId::SupplyDepot) == 0
&& self.can_afford(UnitTypeId::SupplyDepot, false)
{
if let Some(location) =
self.find_placement(UnitTypeId::SupplyDepot, main_base, Default::default())
{
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::SupplyDepot, location, false);
self.subtract_resources(UnitTypeId::SupplyDepot, false);
return;
}
}
}
if self.counter().all().count(UnitTypeId::Barracks) < 4
&& self.can_afford(UnitTypeId::Barracks, false)
{
if let Some(location) = self.find_placement(
UnitTypeId::Barracks,
main_base,
PlacementOptions {
step: 4,
..Default::default()
},
) {
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::Barracks, location, false);
self.subtract_resources(UnitTypeId::Barracks, false);
}
}
}
}
fn | (&mut self) {
if self.minerals < 50 || self.supply_left == 0 {
return;
}
if self.supply_workers < 22 && self.can_afford(UnitTypeId::SCV, true) {
if let Some(cc) = self
.units
.my
.townhalls
.iter()
.find(|u| u.is_ready() && u.is_almost_idle())
{
cc.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
}
if self.can_afford(UnitTypeId::Reaper, true) {
if let Some(barracks) = self
.units
.my
.structures
.iter()
.find(|u| u.type_id() == UnitTypeId::Barracks && u.is_ready() && u.is_almost_idle())
{
barracks.train(UnitTypeId::Reaper, false);
self.subtract_resources(UnitTypeId::Reaper, true);
}
}
}
fn throw_mine(&self, reaper: &Unit, target: &Unit) -> bool {
if reaper.has_ability(AbilityId::KD8ChargeKD8Charge)
&& reaper.in_ability_cast_range(AbilityId::KD8ChargeKD8Charge, target, 0.0)
{
reaper.command(
AbilityId::KD8ChargeKD8Charge,
Target::Pos(target.position()),
false,
);
true
} else {
false
}
}
fn execute_micro(&mut self) {
// Lower ready depots
self.units
.my
.structures
.iter()
.of_type(UnitTypeId::SupplyDepot)
.ready()
.for_each(|s| s.use_ability(AbilityId::MorphSupplyDepotLower, false));
// Reapers micro
let reapers = self.units.my.units.of_type(UnitTypeId::Reaper);
if reapers.is_empty() {
return;
}
let targets = {
let ground_targets = self.units.enemy.all.ground();
let ground_attackers = ground_targets.filter(|e| e.can_attack_ground());
if ground_attackers.is_empty() {
ground_targets
} else {
ground_attackers
}
};
for u in &reapers {
let is_retreating = self.reapers_retreat.contains(&u.tag());
if is_retreating {
if u.health_percentage().unwrap() > 0.75 {
self.reapers_retreat.remove(&u.tag());
}
} else if u.health_percentage().unwrap() < 0.5 {
self.reapers_retreat.insert(u.tag());
}
match targets.closest(u) {
Some(closest) => {
if self.throw_mine(u, closest) {
return;
}
if is_retreating || u.on_cooldown() {
match targets
.iter()
.filter(|t| t.in_range(u, t.speed() + if is_retreating { 2.0 } else { 0.5 }))
.closest(u)
{
Some(closest_attacker) => {
let flee_position = {
let pos = u.position().towards(closest_attacker.position(), -u.speed());
if self.is_pathable(pos) {
pos
} else {
*u.position()
.neighbors8()
.iter()
.filter(|p| self.is_pathable(**p))
.furthest(closest_attacker)
.unwrap_or(&self.start_location)
}
};
u.move_to(Target::Pos(flee_position), false);
}
None => {
if!(is_retreating || u.in_range(&closest, 0.0)) {
u.move_to(Target::Pos(closest.position()), false);
}
}
}
} else {
match targets.iter().in_range_of(u, 0.0).min_by_key(|t| t.hits()) {
Some(target) => u.attack(Target::Tag(target.tag()), false),
None => u.move_to(Target::Pos(closest.position()), false),
}
}
}
None => {
let pos = if is_retreating {
u.position()
} else {
self.enemy_start
};
u.move_to(Target::Pos(pos), false);
}
}
}
}
}
fn main() -> SC2Result<()> {
let app = clap_app!(RustyReapers =>
(version: crate_version!())
(author: crate_authors!())
(@arg ladder_server: --LadderServer +takes_value)
(@arg opponent_id: --OpponentId +takes_value)
(@arg host_port: --GamePort +takes_value)
(@arg player_port: --StartPort +takes_value)
(@arg game_step: -s --step
+takes_value
default_value("2")
"Sets game step for bot"
)
(@subcommand local =>
(about: "Runs local game vs Computer")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race
+takes_value
"Sets opponent race"
)
(@arg difficulty: -d --difficulty
+takes_value
"Sets opponent diffuculty"
)
(@arg ai_build: --("ai-build")
+takes_value
"Sets opponent build"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
(@arg realtime: --realtime "Enables realtime mode")
)
(@subcommand human =>
(about: "Runs game Human vs Bot")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race *
+takes_value
"Sets human race"
)
(@arg name: --name
+takes_value
"Sets human name"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
)
)
.get_matches();
let game_step = match app.value_of("game_step") {
Some("0") => panic!("game_step must be X >= 1"),
Some(step) => step.parse::<u32>().expect("Can't parse game_step"),
None => unreachable!(),
};
let mut bot = ReaperRushAI::default();
bot.set_game_step(game_step);
const LADDER_MAPS: &[&str] = &[
"DeathauraLE",
"EternalEmpireLE",
"EverDreamLE",
"GoldenWallLE",
"IceandChromeLE",
"PillarsofGoldLE",
"SubmarineLE",
];
let mut rng = thread_rng();
match app.subcommand() {
("local", Some(sub)) => run_vs_computer(
&mut bot,
Computer::new(
sub.value_of("race").map_or(Race::Random, |race| {
race.parse().expect("Can't parse computer race")
}),
sub.value_of("difficulty")
.map_or(Difficulty::VeryEasy, |difficulty| {
difficulty.parse().expect("Can't parse computer difficulty")
}),
sub.value_of("ai_build")
.map(|ai_build| ai_build.parse().expect("Can't parse computer build")),
),
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: sub.is_present("realtime"),
save_replay_as: sub.value_of("save_replay"),
},
),
("human", Some(sub)) => run_vs_human(
&mut bot,
PlayerSettings {
race: sub
.value_of("race")
.unwrap()
.parse()
.expect("Can't parse human race"),
name: sub.value_of("name"),
..Default::default()
},
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: true,
save_replay_as: sub.value_of("save_replay"),
},
),
_ => run_ladder_game(
&mut bot,
app.value_of("ladder_server").unwrap_or("127.0.0.1"),
app.value_of("host_port").expect("GamePort must be specified"),
app.value_of("player_port")
.expect("StartPort must be specified")
.parse()
.expect("Can't parse StartPort"),
app.value_of("opponent_id"),
),
}
}
| train | identifier_name |
reaper-rush.rs | #[macro_use]
extern crate clap;
use rand::prelude::*;
use rust_sc2::prelude::*;
use std::{cmp::Ordering, collections::HashSet};
#[bot]
#[derive(Default)]
struct ReaperRushAI {
reapers_retreat: HashSet<u64>,
last_loop_distributed: u32,
}
impl Player for ReaperRushAI {
fn on_start(&mut self) -> SC2Result<()> {
if let Some(townhall) = self.units.my.townhalls.first() {
// Setting rallypoint for command center
townhall.smart(Target::Pos(self.start_center), false);
// Ordering scv on initial 50 minerals
townhall.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
// Splitting workers to closest mineral crystals
for u in &self.units.my.workers {
if let Some(mineral) = self.units.mineral_fields.closest(u) {
u.gather(mineral.tag(), false);
}
}
Ok(())
}
fn on_step(&mut self, _iteration: usize) -> SC2Result<()> {
self.distribute_workers();
self.build();
self.train();
self.execute_micro();
Ok(())
}
fn get_player_settings(&self) -> PlayerSettings {
PlayerSettings::new(Race::Terran).with_name("RustyReapers")
}
}
impl ReaperRushAI {
const DISTRIBUTION_DELAY: u32 = 8;
fn distribute_workers(&mut self) {
if self.units.my.workers.is_empty() {
return;
}
let mut idle_workers = self.units.my.workers.idle();
// Check distribution delay if there aren't any idle workers
let game_loop = self.state.observation.game_loop();
let last_loop = &mut self.last_loop_distributed;
if idle_workers.is_empty() && *last_loop + Self::DISTRIBUTION_DELAY > game_loop {
return;
}
*last_loop = game_loop;
// Distribute
let mineral_fields = &self.units.mineral_fields;
if mineral_fields.is_empty() {
return;
}
let bases = self.units.my.townhalls.ready();
if bases.is_empty() {
return;
}
let mut deficit_minings = Units::new();
let mut deficit_geysers = Units::new();
// Distributing mineral workers
for base in &bases {
match base.assigned_harvesters().cmp(&base.ideal_harvesters()) {
Ordering::Less => (0..(base.ideal_harvesters().unwrap()
- base.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_minings.push(base.clone());
}),
Ordering::Greater => {
let local_minerals = mineral_fields
.iter()
.closer(11.0, base)
.map(|m| m.tag())
.collect::<Vec<u64>>();
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
local_minerals.contains(&target_tag)
|| (u.is_carrying_minerals() && target_tag == base.tag())
})
})
.iter()
.take(
(base.assigned_harvesters().unwrap() - base.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
}
}
// Distributing gas workers
self.units
.my
.gas_buildings
.iter()
.ready()
.filter(|g| g.vespene_contents().map_or(false, |vespene| vespene > 0))
.for_each(
|gas| match gas.assigned_harvesters().cmp(&gas.ideal_harvesters()) {
Ordering::Less => (0..(gas.ideal_harvesters().unwrap()
- gas.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_geysers.push(gas.clone());
}),
Ordering::Greater => {
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
target_tag == gas.tag()
|| (u.is_carrying_vespene()
&& target_tag == bases.closest(gas).unwrap().tag())
})
})
.iter()
.take(
(gas.assigned_harvesters().unwrap() - gas.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
},
);
// Distributing idle workers
let minerals_near_base = if idle_workers.len() > deficit_minings.len() + deficit_geysers.len() | else {
None
};
for u in &idle_workers {
if let Some(closest) = deficit_geysers.closest(u) {
let tag = closest.tag();
deficit_geysers.remove(tag);
u.gather(tag, false);
} else if let Some(closest) = deficit_minings.closest(u) {
u.gather(
mineral_fields
.closer(11.0, closest)
.max(|m| m.mineral_contents().unwrap_or(0))
.unwrap()
.tag(),
false,
);
let tag = closest.tag();
deficit_minings.remove(tag);
} else if u.is_idle() {
if let Some(mineral) = minerals_near_base.as_ref().and_then(|ms| ms.closest(u)) {
u.gather(mineral.tag(), false);
}
}
}
}
fn get_builder(&self, pos: Point2, mineral_tags: &[u64]) -> Option<&Unit> {
self.units
.my
.workers
.iter()
.filter(|u| {
!(u.is_constructing()
|| u.is_returning() || u.is_carrying_resource()
|| (u.is_gathering() && u.target_tag().map_or(true, |tag|!mineral_tags.contains(&tag))))
})
.closest(pos)
}
fn build(&mut self) {
if self.minerals < 75 {
return;
}
let mineral_tags = self
.units
.mineral_fields
.iter()
.map(|u| u.tag())
.collect::<Vec<u64>>();
let main_base = self.start_location.towards(self.game_info.map_center, 8.0);
if self.counter().count(UnitTypeId::Refinery) < 2
&& self.counter().ordered().count(UnitTypeId::Refinery) == 0
&& self.can_afford(UnitTypeId::Refinery, false)
{
let start_location = self.start_location;
if let Some(geyser) = self.find_gas_placement(start_location) {
if let Some(builder) = self.get_builder(geyser.position(), &mineral_tags) {
builder.build_gas(geyser.tag(), false);
self.subtract_resources(UnitTypeId::Refinery, false);
}
}
}
if self.supply_left < 3
&& self.supply_cap < 200
&& self.counter().ordered().count(UnitTypeId::SupplyDepot) == 0
&& self.can_afford(UnitTypeId::SupplyDepot, false)
{
if let Some(location) =
self.find_placement(UnitTypeId::SupplyDepot, main_base, Default::default())
{
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::SupplyDepot, location, false);
self.subtract_resources(UnitTypeId::SupplyDepot, false);
return;
}
}
}
if self.counter().all().count(UnitTypeId::Barracks) < 4
&& self.can_afford(UnitTypeId::Barracks, false)
{
if let Some(location) = self.find_placement(
UnitTypeId::Barracks,
main_base,
PlacementOptions {
step: 4,
..Default::default()
},
) {
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::Barracks, location, false);
self.subtract_resources(UnitTypeId::Barracks, false);
}
}
}
}
fn train(&mut self) {
if self.minerals < 50 || self.supply_left == 0 {
return;
}
if self.supply_workers < 22 && self.can_afford(UnitTypeId::SCV, true) {
if let Some(cc) = self
.units
.my
.townhalls
.iter()
.find(|u| u.is_ready() && u.is_almost_idle())
{
cc.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
}
if self.can_afford(UnitTypeId::Reaper, true) {
if let Some(barracks) = self
.units
.my
.structures
.iter()
.find(|u| u.type_id() == UnitTypeId::Barracks && u.is_ready() && u.is_almost_idle())
{
barracks.train(UnitTypeId::Reaper, false);
self.subtract_resources(UnitTypeId::Reaper, true);
}
}
}
fn throw_mine(&self, reaper: &Unit, target: &Unit) -> bool {
if reaper.has_ability(AbilityId::KD8ChargeKD8Charge)
&& reaper.in_ability_cast_range(AbilityId::KD8ChargeKD8Charge, target, 0.0)
{
reaper.command(
AbilityId::KD8ChargeKD8Charge,
Target::Pos(target.position()),
false,
);
true
} else {
false
}
}
fn execute_micro(&mut self) {
// Lower ready depots
self.units
.my
.structures
.iter()
.of_type(UnitTypeId::SupplyDepot)
.ready()
.for_each(|s| s.use_ability(AbilityId::MorphSupplyDepotLower, false));
// Reapers micro
let reapers = self.units.my.units.of_type(UnitTypeId::Reaper);
if reapers.is_empty() {
return;
}
let targets = {
let ground_targets = self.units.enemy.all.ground();
let ground_attackers = ground_targets.filter(|e| e.can_attack_ground());
if ground_attackers.is_empty() {
ground_targets
} else {
ground_attackers
}
};
for u in &reapers {
let is_retreating = self.reapers_retreat.contains(&u.tag());
if is_retreating {
if u.health_percentage().unwrap() > 0.75 {
self.reapers_retreat.remove(&u.tag());
}
} else if u.health_percentage().unwrap() < 0.5 {
self.reapers_retreat.insert(u.tag());
}
match targets.closest(u) {
Some(closest) => {
if self.throw_mine(u, closest) {
return;
}
if is_retreating || u.on_cooldown() {
match targets
.iter()
.filter(|t| t.in_range(u, t.speed() + if is_retreating { 2.0 } else { 0.5 }))
.closest(u)
{
Some(closest_attacker) => {
let flee_position = {
let pos = u.position().towards(closest_attacker.position(), -u.speed());
if self.is_pathable(pos) {
pos
} else {
*u.position()
.neighbors8()
.iter()
.filter(|p| self.is_pathable(**p))
.furthest(closest_attacker)
.unwrap_or(&self.start_location)
}
};
u.move_to(Target::Pos(flee_position), false);
}
None => {
if!(is_retreating || u.in_range(&closest, 0.0)) {
u.move_to(Target::Pos(closest.position()), false);
}
}
}
} else {
match targets.iter().in_range_of(u, 0.0).min_by_key(|t| t.hits()) {
Some(target) => u.attack(Target::Tag(target.tag()), false),
None => u.move_to(Target::Pos(closest.position()), false),
}
}
}
None => {
let pos = if is_retreating {
u.position()
} else {
self.enemy_start
};
u.move_to(Target::Pos(pos), false);
}
}
}
}
}
fn main() -> SC2Result<()> {
let app = clap_app!(RustyReapers =>
(version: crate_version!())
(author: crate_authors!())
(@arg ladder_server: --LadderServer +takes_value)
(@arg opponent_id: --OpponentId +takes_value)
(@arg host_port: --GamePort +takes_value)
(@arg player_port: --StartPort +takes_value)
(@arg game_step: -s --step
+takes_value
default_value("2")
"Sets game step for bot"
)
(@subcommand local =>
(about: "Runs local game vs Computer")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race
+takes_value
"Sets opponent race"
)
(@arg difficulty: -d --difficulty
+takes_value
"Sets opponent diffuculty"
)
(@arg ai_build: --("ai-build")
+takes_value
"Sets opponent build"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
(@arg realtime: --realtime "Enables realtime mode")
)
(@subcommand human =>
(about: "Runs game Human vs Bot")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race *
+takes_value
"Sets human race"
)
(@arg name: --name
+takes_value
"Sets human name"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
)
)
.get_matches();
let game_step = match app.value_of("game_step") {
Some("0") => panic!("game_step must be X >= 1"),
Some(step) => step.parse::<u32>().expect("Can't parse game_step"),
None => unreachable!(),
};
let mut bot = ReaperRushAI::default();
bot.set_game_step(game_step);
const LADDER_MAPS: &[&str] = &[
"DeathauraLE",
"EternalEmpireLE",
"EverDreamLE",
"GoldenWallLE",
"IceandChromeLE",
"PillarsofGoldLE",
"SubmarineLE",
];
let mut rng = thread_rng();
match app.subcommand() {
("local", Some(sub)) => run_vs_computer(
&mut bot,
Computer::new(
sub.value_of("race").map_or(Race::Random, |race| {
race.parse().expect("Can't parse computer race")
}),
sub.value_of("difficulty")
.map_or(Difficulty::VeryEasy, |difficulty| {
difficulty.parse().expect("Can't parse computer difficulty")
}),
sub.value_of("ai_build")
.map(|ai_build| ai_build.parse().expect("Can't parse computer build")),
),
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: sub.is_present("realtime"),
save_replay_as: sub.value_of("save_replay"),
},
),
("human", Some(sub)) => run_vs_human(
&mut bot,
PlayerSettings {
race: sub
.value_of("race")
.unwrap()
.parse()
.expect("Can't parse human race"),
name: sub.value_of("name"),
..Default::default()
},
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: true,
save_replay_as: sub.value_of("save_replay"),
},
),
_ => run_ladder_game(
&mut bot,
app.value_of("ladder_server").unwrap_or("127.0.0.1"),
app.value_of("host_port").expect("GamePort must be specified"),
app.value_of("player_port")
.expect("StartPort must be specified")
.parse()
.expect("Can't parse StartPort"),
app.value_of("opponent_id"),
),
}
}
| {
let minerals = mineral_fields.filter(|m| bases.iter().any(|base| base.is_closer(11.0, *m)));
if minerals.is_empty() {
None
} else {
Some(minerals)
}
} | conditional_block |
reaper-rush.rs | #[macro_use]
extern crate clap;
use rand::prelude::*;
use rust_sc2::prelude::*;
use std::{cmp::Ordering, collections::HashSet};
#[bot]
#[derive(Default)]
struct ReaperRushAI {
reapers_retreat: HashSet<u64>,
last_loop_distributed: u32,
}
impl Player for ReaperRushAI {
fn on_start(&mut self) -> SC2Result<()> {
if let Some(townhall) = self.units.my.townhalls.first() {
// Setting rallypoint for command center
townhall.smart(Target::Pos(self.start_center), false);
// Ordering scv on initial 50 minerals
townhall.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
// Splitting workers to closest mineral crystals
for u in &self.units.my.workers {
if let Some(mineral) = self.units.mineral_fields.closest(u) {
u.gather(mineral.tag(), false);
}
}
Ok(())
}
fn on_step(&mut self, _iteration: usize) -> SC2Result<()> {
self.distribute_workers();
self.build();
self.train();
self.execute_micro();
Ok(())
}
fn get_player_settings(&self) -> PlayerSettings {
PlayerSettings::new(Race::Terran).with_name("RustyReapers")
}
}
impl ReaperRushAI {
const DISTRIBUTION_DELAY: u32 = 8;
fn distribute_workers(&mut self) {
if self.units.my.workers.is_empty() {
return;
}
let mut idle_workers = self.units.my.workers.idle();
// Check distribution delay if there aren't any idle workers
let game_loop = self.state.observation.game_loop();
let last_loop = &mut self.last_loop_distributed;
if idle_workers.is_empty() && *last_loop + Self::DISTRIBUTION_DELAY > game_loop {
return;
}
*last_loop = game_loop;
// Distribute
let mineral_fields = &self.units.mineral_fields;
if mineral_fields.is_empty() {
return;
}
let bases = self.units.my.townhalls.ready();
if bases.is_empty() {
return;
}
let mut deficit_minings = Units::new();
let mut deficit_geysers = Units::new();
// Distributing mineral workers
for base in &bases {
match base.assigned_harvesters().cmp(&base.ideal_harvesters()) {
Ordering::Less => (0..(base.ideal_harvesters().unwrap()
- base.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_minings.push(base.clone());
}),
Ordering::Greater => {
let local_minerals = mineral_fields
.iter()
.closer(11.0, base)
.map(|m| m.tag())
.collect::<Vec<u64>>();
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
local_minerals.contains(&target_tag)
|| (u.is_carrying_minerals() && target_tag == base.tag())
})
})
.iter()
.take(
(base.assigned_harvesters().unwrap() - base.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
}
}
// Distributing gas workers
self.units
.my
.gas_buildings
.iter()
.ready()
.filter(|g| g.vespene_contents().map_or(false, |vespene| vespene > 0))
.for_each(
|gas| match gas.assigned_harvesters().cmp(&gas.ideal_harvesters()) {
Ordering::Less => (0..(gas.ideal_harvesters().unwrap()
- gas.assigned_harvesters().unwrap()))
.for_each(|_| {
deficit_geysers.push(gas.clone());
}),
Ordering::Greater => {
idle_workers.extend(
self.units
.my
.workers
.filter(|u| {
u.target_tag().map_or(false, |target_tag| {
target_tag == gas.tag()
|| (u.is_carrying_vespene()
&& target_tag == bases.closest(gas).unwrap().tag())
})
})
.iter()
.take(
(gas.assigned_harvesters().unwrap() - gas.ideal_harvesters().unwrap())
as usize,
)
.cloned(),
);
}
_ => {}
},
);
// Distributing idle workers
let minerals_near_base = if idle_workers.len() > deficit_minings.len() + deficit_geysers.len() {
let minerals = mineral_fields.filter(|m| bases.iter().any(|base| base.is_closer(11.0, *m)));
if minerals.is_empty() {
None
} else {
Some(minerals)
}
} else {
None
};
for u in &idle_workers {
if let Some(closest) = deficit_geysers.closest(u) {
let tag = closest.tag();
deficit_geysers.remove(tag);
u.gather(tag, false);
} else if let Some(closest) = deficit_minings.closest(u) {
u.gather(
mineral_fields
.closer(11.0, closest)
.max(|m| m.mineral_contents().unwrap_or(0))
.unwrap()
.tag(),
false,
);
let tag = closest.tag();
deficit_minings.remove(tag);
} else if u.is_idle() {
if let Some(mineral) = minerals_near_base.as_ref().and_then(|ms| ms.closest(u)) {
u.gather(mineral.tag(), false);
}
}
}
}
fn get_builder(&self, pos: Point2, mineral_tags: &[u64]) -> Option<&Unit> {
self.units
.my
.workers
.iter()
.filter(|u| {
!(u.is_constructing()
|| u.is_returning() || u.is_carrying_resource()
|| (u.is_gathering() && u.target_tag().map_or(true, |tag|!mineral_tags.contains(&tag))))
})
.closest(pos)
}
fn build(&mut self) {
if self.minerals < 75 {
return;
}
let mineral_tags = self
.units
.mineral_fields
.iter()
.map(|u| u.tag())
.collect::<Vec<u64>>();
let main_base = self.start_location.towards(self.game_info.map_center, 8.0);
if self.counter().count(UnitTypeId::Refinery) < 2
&& self.counter().ordered().count(UnitTypeId::Refinery) == 0
&& self.can_afford(UnitTypeId::Refinery, false)
{
let start_location = self.start_location;
if let Some(geyser) = self.find_gas_placement(start_location) {
if let Some(builder) = self.get_builder(geyser.position(), &mineral_tags) {
builder.build_gas(geyser.tag(), false);
self.subtract_resources(UnitTypeId::Refinery, false);
}
}
}
if self.supply_left < 3
&& self.supply_cap < 200
&& self.counter().ordered().count(UnitTypeId::SupplyDepot) == 0
&& self.can_afford(UnitTypeId::SupplyDepot, false)
{
if let Some(location) =
self.find_placement(UnitTypeId::SupplyDepot, main_base, Default::default())
{
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::SupplyDepot, location, false);
self.subtract_resources(UnitTypeId::SupplyDepot, false);
return;
}
}
}
if self.counter().all().count(UnitTypeId::Barracks) < 4
&& self.can_afford(UnitTypeId::Barracks, false)
{
if let Some(location) = self.find_placement(
UnitTypeId::Barracks,
main_base,
PlacementOptions {
step: 4,
..Default::default()
},
) {
if let Some(builder) = self.get_builder(location, &mineral_tags) {
builder.build(UnitTypeId::Barracks, location, false);
self.subtract_resources(UnitTypeId::Barracks, false);
}
}
}
}
fn train(&mut self) | .units
.my
.structures
.iter()
.find(|u| u.type_id() == UnitTypeId::Barracks && u.is_ready() && u.is_almost_idle())
{
barracks.train(UnitTypeId::Reaper, false);
self.subtract_resources(UnitTypeId::Reaper, true);
}
}
}
fn throw_mine(&self, reaper: &Unit, target: &Unit) -> bool {
if reaper.has_ability(AbilityId::KD8ChargeKD8Charge)
&& reaper.in_ability_cast_range(AbilityId::KD8ChargeKD8Charge, target, 0.0)
{
reaper.command(
AbilityId::KD8ChargeKD8Charge,
Target::Pos(target.position()),
false,
);
true
} else {
false
}
}
fn execute_micro(&mut self) {
// Lower ready depots
self.units
.my
.structures
.iter()
.of_type(UnitTypeId::SupplyDepot)
.ready()
.for_each(|s| s.use_ability(AbilityId::MorphSupplyDepotLower, false));
// Reapers micro
let reapers = self.units.my.units.of_type(UnitTypeId::Reaper);
if reapers.is_empty() {
return;
}
let targets = {
let ground_targets = self.units.enemy.all.ground();
let ground_attackers = ground_targets.filter(|e| e.can_attack_ground());
if ground_attackers.is_empty() {
ground_targets
} else {
ground_attackers
}
};
for u in &reapers {
let is_retreating = self.reapers_retreat.contains(&u.tag());
if is_retreating {
if u.health_percentage().unwrap() > 0.75 {
self.reapers_retreat.remove(&u.tag());
}
} else if u.health_percentage().unwrap() < 0.5 {
self.reapers_retreat.insert(u.tag());
}
match targets.closest(u) {
Some(closest) => {
if self.throw_mine(u, closest) {
return;
}
if is_retreating || u.on_cooldown() {
match targets
.iter()
.filter(|t| t.in_range(u, t.speed() + if is_retreating { 2.0 } else { 0.5 }))
.closest(u)
{
Some(closest_attacker) => {
let flee_position = {
let pos = u.position().towards(closest_attacker.position(), -u.speed());
if self.is_pathable(pos) {
pos
} else {
*u.position()
.neighbors8()
.iter()
.filter(|p| self.is_pathable(**p))
.furthest(closest_attacker)
.unwrap_or(&self.start_location)
}
};
u.move_to(Target::Pos(flee_position), false);
}
None => {
if!(is_retreating || u.in_range(&closest, 0.0)) {
u.move_to(Target::Pos(closest.position()), false);
}
}
}
} else {
match targets.iter().in_range_of(u, 0.0).min_by_key(|t| t.hits()) {
Some(target) => u.attack(Target::Tag(target.tag()), false),
None => u.move_to(Target::Pos(closest.position()), false),
}
}
}
None => {
let pos = if is_retreating {
u.position()
} else {
self.enemy_start
};
u.move_to(Target::Pos(pos), false);
}
}
}
}
}
fn main() -> SC2Result<()> {
let app = clap_app!(RustyReapers =>
(version: crate_version!())
(author: crate_authors!())
(@arg ladder_server: --LadderServer +takes_value)
(@arg opponent_id: --OpponentId +takes_value)
(@arg host_port: --GamePort +takes_value)
(@arg player_port: --StartPort +takes_value)
(@arg game_step: -s --step
+takes_value
default_value("2")
"Sets game step for bot"
)
(@subcommand local =>
(about: "Runs local game vs Computer")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race
+takes_value
"Sets opponent race"
)
(@arg difficulty: -d --difficulty
+takes_value
"Sets opponent diffuculty"
)
(@arg ai_build: --("ai-build")
+takes_value
"Sets opponent build"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
(@arg realtime: --realtime "Enables realtime mode")
)
(@subcommand human =>
(about: "Runs game Human vs Bot")
(@arg map: -m --map
+takes_value
)
(@arg race: -r --race *
+takes_value
"Sets human race"
)
(@arg name: --name
+takes_value
"Sets human name"
)
(@arg sc2_version: --("sc2-version")
+takes_value
"Sets sc2 version"
)
(@arg save_replay: --("save-replay")
+takes_value
"Sets path to save replay"
)
)
)
.get_matches();
let game_step = match app.value_of("game_step") {
Some("0") => panic!("game_step must be X >= 1"),
Some(step) => step.parse::<u32>().expect("Can't parse game_step"),
None => unreachable!(),
};
let mut bot = ReaperRushAI::default();
bot.set_game_step(game_step);
const LADDER_MAPS: &[&str] = &[
"DeathauraLE",
"EternalEmpireLE",
"EverDreamLE",
"GoldenWallLE",
"IceandChromeLE",
"PillarsofGoldLE",
"SubmarineLE",
];
let mut rng = thread_rng();
match app.subcommand() {
("local", Some(sub)) => run_vs_computer(
&mut bot,
Computer::new(
sub.value_of("race").map_or(Race::Random, |race| {
race.parse().expect("Can't parse computer race")
}),
sub.value_of("difficulty")
.map_or(Difficulty::VeryEasy, |difficulty| {
difficulty.parse().expect("Can't parse computer difficulty")
}),
sub.value_of("ai_build")
.map(|ai_build| ai_build.parse().expect("Can't parse computer build")),
),
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: sub.is_present("realtime"),
save_replay_as: sub.value_of("save_replay"),
},
),
("human", Some(sub)) => run_vs_human(
&mut bot,
PlayerSettings {
race: sub
.value_of("race")
.unwrap()
.parse()
.expect("Can't parse human race"),
name: sub.value_of("name"),
..Default::default()
},
sub.value_of("map")
.unwrap_or_else(|| LADDER_MAPS.choose(&mut rng).unwrap()),
LaunchOptions {
sc2_version: sub.value_of("sc2_version"),
realtime: true,
save_replay_as: sub.value_of("save_replay"),
},
),
_ => run_ladder_game(
&mut bot,
app.value_of("ladder_server").unwrap_or("127.0.0.1"),
app.value_of("host_port").expect("GamePort must be specified"),
app.value_of("player_port")
.expect("StartPort must be specified")
.parse()
.expect("Can't parse StartPort"),
app.value_of("opponent_id"),
),
}
}
| {
if self.minerals < 50 || self.supply_left == 0 {
return;
}
if self.supply_workers < 22 && self.can_afford(UnitTypeId::SCV, true) {
if let Some(cc) = self
.units
.my
.townhalls
.iter()
.find(|u| u.is_ready() && u.is_almost_idle())
{
cc.train(UnitTypeId::SCV, false);
self.subtract_resources(UnitTypeId::SCV, true);
}
}
if self.can_afford(UnitTypeId::Reaper, true) {
if let Some(barracks) = self | identifier_body |
lib.rs | )))
}
}
unsafe impl GlobalAlloc for SafeZoneAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
assert!(layout.align().is_power_of_two());
self.0.lock().allocate(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
//let ptr = NonNull::new_unchecked(ptr);
self.0.lock().deallocate(ptr, layout);
}
}
/// The memory backing as used by the SCAllocator.
///
/// A client that wants to use the zone or size class allocators
/// has to provide this interface and stick an implementation of it
/// into every SCAllocator.
pub trait PageProvider<'a>: Send {
fn allocate_page(&mut self) -> Option<&'a mut ObjectPage<'a>>;
fn release_page(&mut self, &'a mut ObjectPage<'a>);
}
/// A zone allocator.
///
/// Has a bunch of size class allocators and can serve
/// allocation requests for many different (MAX_SIZE_CLASSES) object sizes
/// (by selecting the right slab allocator).
pub struct ZoneAllocator<'a> {
pager: &'a Mutex<PageProvider<'a>>,
slabs: [SCAllocator<'a>; MAX_SIZE_CLASSES],
}
impl<'a> ZoneAllocator<'a> {
pub const MAX_ALLOC_SIZE: usize = 4032;
#[cfg(feature = "unstable")]
pub const fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> {
ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager),
SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
#[cfg(not(feature = "unstable"))]
pub fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> {
ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager),
SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
/// Return maximum size an object of size `current_size` can use.
///
/// Used to optimize `realloc`.
fn get_max_size(current_size: usize) -> Option<usize> {
match current_size {
0...8 => Some(8),
9...16 => Some(16),
17...32 => Some(32),
33...64 => Some(64),
65...128 => Some(128),
129...256 => Some(256),
257...512 => Some(512),
513...1024 => Some(1024),
1025...2048 => Some(2048),
2049...4032 => Some(4032),
_ => None,
}
}
/// Figure out index into zone array to get the correct slab allocator for that size.
fn get_slab_idx(requested_size: usize) -> Option<usize> {
match requested_size {
0...8 => Some(0),
9...16 => Some(1),
17...32 => Some(2),
33...64 => Some(3),
65...128 => Some(4),
129...256 => Some(5),
257...512 => Some(6),
513...1024 => Some(7),
1025...2048 => Some(8),
2049...4032 => Some(9),
_ => None,
}
}
/// Tries to locate a slab allocator.
///
/// Returns either a index into the slab array or None in case
/// the requested allocation size can not be satisfied by
/// any of the available slabs.
fn try_acquire_slab(&mut self, size: usize) -> Option<usize> {
ZoneAllocator::get_slab_idx(size).map(|idx| {
if self.slabs[idx].size == 0 {
self.slabs[idx].size = size;
}
idx
})
}
/// Refills the SCAllocator in slabs at `idx` with a ObjectPage.
///
/// # TODO
/// * Panics in case we're OOM (should probably return error).
fn refill_slab_allocator<'b>(&'b mut self, idx: usize) {
match self.pager.lock().allocate_page() {
Some(new_head) => self.slabs[idx].insert_slab(new_head),
None => panic!("OOM"),
};
}
/// Allocate a pointer to a block of memory of size `size` with alignment `align`.
///
/// Can return None in case the zone allocator can not satisfy the allocation
/// of the requested size or if we do not have enough memory.
/// In case we are out of memory we try to refill the slab using our local pager
/// and re-try the allocation request once more before we give up.
pub unsafe fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.try_acquire_slab(layout.size()) {
Some(idx) => {
let mut p = self.slabs[idx].allocate(layout);
if p.is_null() {
self.refill_slab_allocator(idx);
p = self.slabs[idx].allocate(layout);
}
p
}
None => ptr::null_mut(),
}
}
/// Deallocates a pointer to a block of memory previously allocated by `allocate`.
///
/// # Arguments
/// * `ptr` - Address of the memory location to free.
/// * `old_size` - Size of the block.
/// * `align` - Alignment of the block.
///
pub unsafe fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
match self.try_acquire_slab(layout.size()) {
Some(idx) => self.slabs[idx].deallocate(ptr, layout),
None => panic!(
"Unable to find slab allocator for size ({}) with ptr {:?}.",
layout.size(),
ptr
),
}
}
unsafe fn copy(dest: *mut u8, src: *const u8, n: usize) {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
/*pub unsafe fn reallocate<'b>(&'b mut self, ptr: *mut u8, old_size: usize, size: usize, align: usize) -> Option<*mut u8> {
// Return immediately in case we can still fit the new request in the current buffer
match ZoneAllocator::get_max_size(old_size) {
Some(max_size) => {
if max_size >= size {
return Some(ptr);
}
()
},
None => ()
};
// Otherwise allocate, copy, free:
self.allocate(size, align).map(|new| {
ZoneAllocator::copy(new, ptr, old_size);
self.deallocate(NonNull::new_unchecked(ptr as *mut u8), old_size, align);
new
})
}*/
}
/// A list of ObjectPage.
struct ObjectPageList<'a> {
/// Points to the head of the list.
head: Option<&'a mut ObjectPage<'a>>,
/// Number of elements in the list.
pub elements: usize,
}
impl<'a> ObjectPageList<'a> {
#[cfg(feature = "unstable")]
const fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
#[cfg(not(feature = "unstable"))]
fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
fn iter_mut<'b>(&'b mut self) -> ObjectPageIterMut<'a> {
let m = match self.head {
None => Rawlink::none(),
Some(ref mut m) => Rawlink::some(*m),
};
ObjectPageIterMut { head: m }
}
/// Inserts `new_head` at the front of the list.
fn insert_front<'b>(&'b mut self, mut new_head: &'a mut ObjectPage<'a>) {
match self.head {
None => {
new_head.prev = Rawlink::none();
self.head = Some(new_head);
}
Some(ref mut head) => {
new_head.prev = Rawlink::none();
head.prev = Rawlink::some(new_head);
mem::swap(head, &mut new_head);
head.next = Rawlink::some(new_head);
}
}
self.elements += 1;
}
/// Removes `slab_page` from the list.
fn remove_from_list<'b, 'c>(&'b mut self, slab_page: &'c mut ObjectPage<'a>) {
unsafe {
match slab_page.prev.resolve_mut() {
None => {
self.head = slab_page.next.resolve_mut();
}
Some(prev) => {
prev.next = match slab_page.next.resolve_mut() {
None => Rawlink::none(),
Some(next) => Rawlink::some(next),
};
}
}
match slab_page.next.resolve_mut() {
None => (),
Some(next) => {
next.prev = match slab_page.prev.resolve_mut() {
None => Rawlink::none(),
Some(prev) => Rawlink::some(prev),
};
}
}
}
self.elements -= 1;
}
/// Does the list contain `s`?
fn has_objectpage<'b>(&'b mut self, s: &'a ObjectPage<'a>) -> bool {
for slab_page in self.iter_mut() {
if slab_page as *const ObjectPage == s as *const ObjectPage {
return true;
}
}
false
}
}
/// Iterate over all the pages inside a slab allocator
struct ObjectPageIterMut<'a> {
head: Rawlink<ObjectPage<'a>>,
}
impl<'a> Iterator for ObjectPageIterMut<'a> {
type Item = &'a mut ObjectPage<'a>;
#[inline]
fn next(&mut self) -> Option<&'a mut ObjectPage<'a>> {
unsafe {
self.head.resolve_mut().map(|next| {
self.head = match next.next.resolve_mut() {
None => Rawlink::none(),
Some(ref mut sp) => Rawlink::some(*sp),
};
next
})
}
}
}
/// A slab allocator allocates elements of a fixed size.
///
/// It has a list of ObjectPage stored inside `slabs` from which
/// it allocates memory.
pub struct SCAllocator<'a> {
/// Allocation size.
size: usize,
/// Memory backing store, to request new ObjectPage.
pager: &'a Mutex<PageProvider<'a>>,
/// List of ObjectPage.
slabs: ObjectPageList<'a>,
}
#[test]
pub fn iter_empty_list() {
let mut new_head1: ObjectPage = Default::default();
let mut l = ObjectPageList::new();
l.insert_front(&mut new_head1);
for p in l.iter_mut() {}
}
impl<'a> SCAllocator<'a> {
/// Create a new SCAllocator.
#[cfg(feature = "unstable")]
pub const fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Create a new SCAllocator.
#[cfg(not(feature = "unstable"))]
pub fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Return object size of this allocator.
pub fn size(&self) -> usize {
self.size
}
/// Try to allocate a new ObjectPage and insert it.
///
/// # TODO
/// * Amount is currently ignored.
/// * Panics on OOM (should return error!)
fn refill_slab<'b>(&'b mut self, amount: usize) {
let mut pager = self.pager.lock();
for i in 0..amount {
match pager.allocate_page() {
Some(new_head) => {
self.insert_slab(new_head);
}
None => panic!("OOM"),
}
}
}
/// Add a new ObjectPage.
pub fn insert_slab<'b>(&'b mut self, new_head: &'a mut ObjectPage<'a>) {
self.slabs.insert_front(new_head);
}
/// Tries to allocate a block of memory with respect to the `alignment`.
///
/// Only searches within already allocated slab pages.
fn try_allocate_from_pagelist<'b>(&'b mut self, layout: Layout) -> *mut u8 {
let size = self.size;
for (idx, slab_page) in self.slabs.iter_mut().enumerate() {
let ptr = slab_page.allocate(layout);
if!ptr.is_null() {
return ptr;
} else {
continue;
}
}
ptr::null_mut()
}
/// Allocates a block of memory with respect to `alignment`.
///
/// In case of failure will try to grow the slab allocator by requesting
/// additional pages and re-try the allocation once more before we give up.
pub fn allocate<'b>(&'b mut self, layout: Layout) -> *mut u8 {
debug!(
"SCAllocator({}) is trying to allocate {:?}",
self.size, layout
);
assert!(layout.size() <= self.size);
assert!(self.size <= (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
assert!(new_layout.size() >= layout.size());
let ptr = self.try_allocate_from_pagelist(new_layout);
if ptr.is_null() {
self.refill_slab(1);
return self.try_allocate_from_pagelist(layout);
}
debug!(
"SCAllocator({}) allocated ptr=0x{:x}",
self.size, ptr as usize
);
return ptr;
}
/// Deallocates a previously allocated block.
///
/// # Bug
/// This never releases memory in case the ObjectPage are provided by the zone.
pub fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
debug!(
"SCAllocator({}) is trying to deallocate ptr = 0x{:x} layout={:?}",
self.size, ptr as usize, layout
);
assert!(layout.size() <= self.size);
let page = (ptr as usize) &!(BASE_PAGE_SIZE - 1) as usize;
let slab_page = unsafe { mem::transmute::<VAddr, &'a mut ObjectPage>(page) };
assert!(self.size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
slab_page.deallocate(ptr, new_layout);
// Drop page in case it is empty and not the last
if slab_page.is_empty() && self.slabs.elements > 1 {
self.slabs.remove_from_list(slab_page);
let mut pager = self.pager.lock();
pager.release_page(slab_page);
}
}
}
/// Holds allocated data.
///
/// Objects life within data and meta tracks the objects status.
/// Currently, `bitfield`, `next` and `prev` pointer should fit inside
/// a single cache-line.
#[repr(packed)]
pub struct ObjectPage<'a> {
/// Holds memory objects.
data: [u8; 4096 - 64],
/// Next element in list (used by `ObjectPageList`).
next: Rawlink<ObjectPage<'a>>,
prev: Rawlink<ObjectPage<'a>>,
/// A bit-field to track free/allocated memory within `data`.
///
/// # Notes
/// * With only 48 bits we do waste some space at the end of every page for 8 bytes allocations.
/// but 12 bytes on-wards is okay.
bitfield: [u64; 6],
}
impl<'a> Default for ObjectPage<'a> {
fn default() -> ObjectPage<'a> {
unsafe { mem::zeroed() }
}
}
unsafe impl<'a> Send for ObjectPage<'a> {}
unsafe impl<'a> Sync for ObjectPage<'a> {}
impl<'a> fmt::Debug for ObjectPage<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ObjectPage")
}
}
impl<'a> ObjectPage<'a> {
/// Tries to find a free block of memory that satisfies `alignment` requirement.
///
/// # Notes
/// * We pass size here to be able to calculate the resulting address within `data`.
fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> | let block_is_free = bitval & (1 << first_free) == 0;
if alignment_ok && block_is_free {
return Some((idx, addr));
}
}
}
}
None
}
/// Check if the current `idx` is allocated.
///
/// # Notes
/// In case `idx` is 3 and allocation size of slab is
/// 8. The corresponding object would start at &data + 3 * 8.
fn is_allocated(&mut self, idx: usize) -> bool {
let base_idx = idx / 64;
let bit_idx = idx % 64;
(self.bitfield[base_idx] & (1 << bit_idx)) > 0
}
/// Sets | {
unsafe {
for (base_idx, b) in self.bitfield.iter().enumerate() {
let bitval = *b;
if bitval == u64::max_value() {
continue;
} else {
let negated = !bitval;
let first_free = negated.trailing_zeros() as usize;
let idx: usize = base_idx * 64 + first_free;
let offset = idx * layout.size();
let offset_inside_data_area =
offset <= (BASE_PAGE_SIZE - CACHE_LINE_SIZE - layout.size());
if !offset_inside_data_area {
return None;
}
let addr: usize = ((self as *const ObjectPage) as usize) + offset;
let alignment_ok = addr % layout.align() == 0; | identifier_body |
lib.rs | )))
}
}
unsafe impl GlobalAlloc for SafeZoneAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
assert!(layout.align().is_power_of_two());
self.0.lock().allocate(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
//let ptr = NonNull::new_unchecked(ptr);
self.0.lock().deallocate(ptr, layout);
}
}
/// The memory backing as used by the SCAllocator.
///
/// A client that wants to use the zone or size class allocators
/// has to provide this interface and stick an implementation of it
/// into every SCAllocator.
pub trait PageProvider<'a>: Send {
fn allocate_page(&mut self) -> Option<&'a mut ObjectPage<'a>>;
fn release_page(&mut self, &'a mut ObjectPage<'a>);
}
/// A zone allocator.
///
/// Has a bunch of size class allocators and can serve
/// allocation requests for many different (MAX_SIZE_CLASSES) object sizes
/// (by selecting the right slab allocator).
pub struct ZoneAllocator<'a> {
pager: &'a Mutex<PageProvider<'a>>,
slabs: [SCAllocator<'a>; MAX_SIZE_CLASSES],
}
impl<'a> ZoneAllocator<'a> {
pub const MAX_ALLOC_SIZE: usize = 4032;
#[cfg(feature = "unstable")]
pub const fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> {
ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager),
SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
#[cfg(not(feature = "unstable"))]
pub fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> {
ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager),
SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
/// Return maximum size an object of size `current_size` can use.
///
/// Used to optimize `realloc`.
fn | (current_size: usize) -> Option<usize> {
match current_size {
0...8 => Some(8),
9...16 => Some(16),
17...32 => Some(32),
33...64 => Some(64),
65...128 => Some(128),
129...256 => Some(256),
257...512 => Some(512),
513...1024 => Some(1024),
1025...2048 => Some(2048),
2049...4032 => Some(4032),
_ => None,
}
}
/// Figure out index into zone array to get the correct slab allocator for that size.
fn get_slab_idx(requested_size: usize) -> Option<usize> {
match requested_size {
0...8 => Some(0),
9...16 => Some(1),
17...32 => Some(2),
33...64 => Some(3),
65...128 => Some(4),
129...256 => Some(5),
257...512 => Some(6),
513...1024 => Some(7),
1025...2048 => Some(8),
2049...4032 => Some(9),
_ => None,
}
}
/// Tries to locate a slab allocator.
///
/// Returns either a index into the slab array or None in case
/// the requested allocation size can not be satisfied by
/// any of the available slabs.
fn try_acquire_slab(&mut self, size: usize) -> Option<usize> {
ZoneAllocator::get_slab_idx(size).map(|idx| {
if self.slabs[idx].size == 0 {
self.slabs[idx].size = size;
}
idx
})
}
/// Refills the SCAllocator in slabs at `idx` with a ObjectPage.
///
/// # TODO
/// * Panics in case we're OOM (should probably return error).
fn refill_slab_allocator<'b>(&'b mut self, idx: usize) {
match self.pager.lock().allocate_page() {
Some(new_head) => self.slabs[idx].insert_slab(new_head),
None => panic!("OOM"),
};
}
/// Allocate a pointer to a block of memory of size `size` with alignment `align`.
///
/// Can return None in case the zone allocator can not satisfy the allocation
/// of the requested size or if we do not have enough memory.
/// In case we are out of memory we try to refill the slab using our local pager
/// and re-try the allocation request once more before we give up.
pub unsafe fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.try_acquire_slab(layout.size()) {
Some(idx) => {
let mut p = self.slabs[idx].allocate(layout);
if p.is_null() {
self.refill_slab_allocator(idx);
p = self.slabs[idx].allocate(layout);
}
p
}
None => ptr::null_mut(),
}
}
/// Deallocates a pointer to a block of memory previously allocated by `allocate`.
///
/// # Arguments
/// * `ptr` - Address of the memory location to free.
/// * `old_size` - Size of the block.
/// * `align` - Alignment of the block.
///
pub unsafe fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
match self.try_acquire_slab(layout.size()) {
Some(idx) => self.slabs[idx].deallocate(ptr, layout),
None => panic!(
"Unable to find slab allocator for size ({}) with ptr {:?}.",
layout.size(),
ptr
),
}
}
unsafe fn copy(dest: *mut u8, src: *const u8, n: usize) {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
/*pub unsafe fn reallocate<'b>(&'b mut self, ptr: *mut u8, old_size: usize, size: usize, align: usize) -> Option<*mut u8> {
// Return immediately in case we can still fit the new request in the current buffer
match ZoneAllocator::get_max_size(old_size) {
Some(max_size) => {
if max_size >= size {
return Some(ptr);
}
()
},
None => ()
};
// Otherwise allocate, copy, free:
self.allocate(size, align).map(|new| {
ZoneAllocator::copy(new, ptr, old_size);
self.deallocate(NonNull::new_unchecked(ptr as *mut u8), old_size, align);
new
})
}*/
}
/// A list of ObjectPage.
struct ObjectPageList<'a> {
/// Points to the head of the list.
head: Option<&'a mut ObjectPage<'a>>,
/// Number of elements in the list.
pub elements: usize,
}
impl<'a> ObjectPageList<'a> {
#[cfg(feature = "unstable")]
const fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
#[cfg(not(feature = "unstable"))]
fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
fn iter_mut<'b>(&'b mut self) -> ObjectPageIterMut<'a> {
let m = match self.head {
None => Rawlink::none(),
Some(ref mut m) => Rawlink::some(*m),
};
ObjectPageIterMut { head: m }
}
/// Inserts `new_head` at the front of the list.
fn insert_front<'b>(&'b mut self, mut new_head: &'a mut ObjectPage<'a>) {
match self.head {
None => {
new_head.prev = Rawlink::none();
self.head = Some(new_head);
}
Some(ref mut head) => {
new_head.prev = Rawlink::none();
head.prev = Rawlink::some(new_head);
mem::swap(head, &mut new_head);
head.next = Rawlink::some(new_head);
}
}
self.elements += 1;
}
/// Removes `slab_page` from the list.
fn remove_from_list<'b, 'c>(&'b mut self, slab_page: &'c mut ObjectPage<'a>) {
unsafe {
match slab_page.prev.resolve_mut() {
None => {
self.head = slab_page.next.resolve_mut();
}
Some(prev) => {
prev.next = match slab_page.next.resolve_mut() {
None => Rawlink::none(),
Some(next) => Rawlink::some(next),
};
}
}
match slab_page.next.resolve_mut() {
None => (),
Some(next) => {
next.prev = match slab_page.prev.resolve_mut() {
None => Rawlink::none(),
Some(prev) => Rawlink::some(prev),
};
}
}
}
self.elements -= 1;
}
/// Does the list contain `s`?
fn has_objectpage<'b>(&'b mut self, s: &'a ObjectPage<'a>) -> bool {
for slab_page in self.iter_mut() {
if slab_page as *const ObjectPage == s as *const ObjectPage {
return true;
}
}
false
}
}
/// Iterate over all the pages inside a slab allocator
struct ObjectPageIterMut<'a> {
head: Rawlink<ObjectPage<'a>>,
}
impl<'a> Iterator for ObjectPageIterMut<'a> {
type Item = &'a mut ObjectPage<'a>;
#[inline]
fn next(&mut self) -> Option<&'a mut ObjectPage<'a>> {
unsafe {
self.head.resolve_mut().map(|next| {
self.head = match next.next.resolve_mut() {
None => Rawlink::none(),
Some(ref mut sp) => Rawlink::some(*sp),
};
next
})
}
}
}
/// A slab allocator allocates elements of a fixed size.
///
/// It has a list of ObjectPage stored inside `slabs` from which
/// it allocates memory.
pub struct SCAllocator<'a> {
/// Allocation size.
size: usize,
/// Memory backing store, to request new ObjectPage.
pager: &'a Mutex<PageProvider<'a>>,
/// List of ObjectPage.
slabs: ObjectPageList<'a>,
}
#[test]
pub fn iter_empty_list() {
let mut new_head1: ObjectPage = Default::default();
let mut l = ObjectPageList::new();
l.insert_front(&mut new_head1);
for p in l.iter_mut() {}
}
impl<'a> SCAllocator<'a> {
/// Create a new SCAllocator.
#[cfg(feature = "unstable")]
pub const fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Create a new SCAllocator.
#[cfg(not(feature = "unstable"))]
pub fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Return object size of this allocator.
pub fn size(&self) -> usize {
self.size
}
/// Try to allocate a new ObjectPage and insert it.
///
/// # TODO
/// * Amount is currently ignored.
/// * Panics on OOM (should return error!)
fn refill_slab<'b>(&'b mut self, amount: usize) {
let mut pager = self.pager.lock();
for i in 0..amount {
match pager.allocate_page() {
Some(new_head) => {
self.insert_slab(new_head);
}
None => panic!("OOM"),
}
}
}
/// Add a new ObjectPage.
pub fn insert_slab<'b>(&'b mut self, new_head: &'a mut ObjectPage<'a>) {
self.slabs.insert_front(new_head);
}
/// Tries to allocate a block of memory with respect to the `alignment`.
///
/// Only searches within already allocated slab pages.
fn try_allocate_from_pagelist<'b>(&'b mut self, layout: Layout) -> *mut u8 {
let size = self.size;
for (idx, slab_page) in self.slabs.iter_mut().enumerate() {
let ptr = slab_page.allocate(layout);
if!ptr.is_null() {
return ptr;
} else {
continue;
}
}
ptr::null_mut()
}
/// Allocates a block of memory with respect to `alignment`.
///
/// In case of failure will try to grow the slab allocator by requesting
/// additional pages and re-try the allocation once more before we give up.
pub fn allocate<'b>(&'b mut self, layout: Layout) -> *mut u8 {
debug!(
"SCAllocator({}) is trying to allocate {:?}",
self.size, layout
);
assert!(layout.size() <= self.size);
assert!(self.size <= (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
assert!(new_layout.size() >= layout.size());
let ptr = self.try_allocate_from_pagelist(new_layout);
if ptr.is_null() {
self.refill_slab(1);
return self.try_allocate_from_pagelist(layout);
}
debug!(
"SCAllocator({}) allocated ptr=0x{:x}",
self.size, ptr as usize
);
return ptr;
}
/// Deallocates a previously allocated block.
///
/// # Bug
/// This never releases memory in case the ObjectPage are provided by the zone.
pub fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
debug!(
"SCAllocator({}) is trying to deallocate ptr = 0x{:x} layout={:?}",
self.size, ptr as usize, layout
);
assert!(layout.size() <= self.size);
let page = (ptr as usize) &!(BASE_PAGE_SIZE - 1) as usize;
let slab_page = unsafe { mem::transmute::<VAddr, &'a mut ObjectPage>(page) };
assert!(self.size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
slab_page.deallocate(ptr, new_layout);
// Drop page in case it is empty and not the last
if slab_page.is_empty() && self.slabs.elements > 1 {
self.slabs.remove_from_list(slab_page);
let mut pager = self.pager.lock();
pager.release_page(slab_page);
}
}
}
/// Holds allocated data.
///
/// Objects life within data and meta tracks the objects status.
/// Currently, `bitfield`, `next` and `prev` pointer should fit inside
/// a single cache-line.
#[repr(packed)]
pub struct ObjectPage<'a> {
/// Holds memory objects.
data: [u8; 4096 - 64],
/// Next element in list (used by `ObjectPageList`).
next: Rawlink<ObjectPage<'a>>,
prev: Rawlink<ObjectPage<'a>>,
/// A bit-field to track free/allocated memory within `data`.
///
/// # Notes
/// * With only 48 bits we do waste some space at the end of every page for 8 bytes allocations.
/// but 12 bytes on-wards is okay.
bitfield: [u64; 6],
}
impl<'a> Default for ObjectPage<'a> {
fn default() -> ObjectPage<'a> {
unsafe { mem::zeroed() }
}
}
unsafe impl<'a> Send for ObjectPage<'a> {}
unsafe impl<'a> Sync for ObjectPage<'a> {}
impl<'a> fmt::Debug for ObjectPage<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ObjectPage")
}
}
impl<'a> ObjectPage<'a> {
/// Tries to find a free block of memory that satisfies `alignment` requirement.
///
/// # Notes
/// * We pass size here to be able to calculate the resulting address within `data`.
fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> {
unsafe {
for (base_idx, b) in self.bitfield.iter().enumerate() {
let bitval = *b;
if bitval == u64::max_value() {
continue;
} else {
let negated =!bitval;
let first_free = negated.trailing_zeros() as usize;
let idx: usize = base_idx * 64 + first_free;
let offset = idx * layout.size();
let offset_inside_data_area =
offset <= (BASE_PAGE_SIZE - CACHE_LINE_SIZE - layout.size());
if!offset_inside_data_area {
return None;
}
let addr: usize = ((self as *const ObjectPage) as usize) + offset;
let alignment_ok = addr % layout.align() == 0;
let block_is_free = bitval & (1 << first_free) == 0;
if alignment_ok && block_is_free {
return Some((idx, addr));
}
}
}
}
None
}
/// Check if the current `idx` is allocated.
///
/// # Notes
/// In case `idx` is 3 and allocation size of slab is
/// 8. The corresponding object would start at &data + 3 * 8.
fn is_allocated(&mut self, idx: usize) -> bool {
let base_idx = idx / 64;
let bit_idx = idx % 64;
(self.bitfield[base_idx] & (1 << bit_idx)) > 0
}
/// Sets | get_max_size | identifier_name |
lib.rs | )))
}
}
unsafe impl GlobalAlloc for SafeZoneAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
assert!(layout.align().is_power_of_two());
self.0.lock().allocate(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
//let ptr = NonNull::new_unchecked(ptr);
self.0.lock().deallocate(ptr, layout);
}
}
/// The memory backing as used by the SCAllocator.
///
/// A client that wants to use the zone or size class allocators
/// has to provide this interface and stick an implementation of it
/// into every SCAllocator.
pub trait PageProvider<'a>: Send {
fn allocate_page(&mut self) -> Option<&'a mut ObjectPage<'a>>;
fn release_page(&mut self, &'a mut ObjectPage<'a>);
}
/// A zone allocator.
///
/// Has a bunch of size class allocators and can serve
/// allocation requests for many different (MAX_SIZE_CLASSES) object sizes
/// (by selecting the right slab allocator).
pub struct ZoneAllocator<'a> {
pager: &'a Mutex<PageProvider<'a>>,
slabs: [SCAllocator<'a>; MAX_SIZE_CLASSES],
}
impl<'a> ZoneAllocator<'a> {
pub const MAX_ALLOC_SIZE: usize = 4032;
#[cfg(feature = "unstable")]
pub const fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> {
ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager),
SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
#[cfg(not(feature = "unstable"))]
pub fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> {
ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager),
SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
/// Return maximum size an object of size `current_size` can use.
///
/// Used to optimize `realloc`.
fn get_max_size(current_size: usize) -> Option<usize> {
match current_size {
0...8 => Some(8),
9...16 => Some(16),
17...32 => Some(32),
33...64 => Some(64),
65...128 => Some(128),
129...256 => Some(256),
257...512 => Some(512),
513...1024 => Some(1024),
1025...2048 => Some(2048),
2049...4032 => Some(4032),
_ => None,
}
}
/// Figure out index into zone array to get the correct slab allocator for that size.
fn get_slab_idx(requested_size: usize) -> Option<usize> {
match requested_size {
0...8 => Some(0),
9...16 => Some(1),
17...32 => Some(2),
33...64 => Some(3),
65...128 => Some(4),
129...256 => Some(5),
257...512 => Some(6),
513...1024 => Some(7),
1025...2048 => Some(8),
2049...4032 => Some(9),
_ => None,
}
}
/// Tries to locate a slab allocator.
///
/// Returns either a index into the slab array or None in case
/// the requested allocation size can not be satisfied by
/// any of the available slabs.
fn try_acquire_slab(&mut self, size: usize) -> Option<usize> {
ZoneAllocator::get_slab_idx(size).map(|idx| {
if self.slabs[idx].size == 0 {
self.slabs[idx].size = size;
}
idx
})
}
/// Refills the SCAllocator in slabs at `idx` with a ObjectPage.
///
/// # TODO
/// * Panics in case we're OOM (should probably return error).
fn refill_slab_allocator<'b>(&'b mut self, idx: usize) {
match self.pager.lock().allocate_page() {
Some(new_head) => self.slabs[idx].insert_slab(new_head),
None => panic!("OOM"),
};
}
/// Allocate a pointer to a block of memory of size `size` with alignment `align`.
///
/// Can return None in case the zone allocator can not satisfy the allocation
/// of the requested size or if we do not have enough memory.
/// In case we are out of memory we try to refill the slab using our local pager
/// and re-try the allocation request once more before we give up.
pub unsafe fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.try_acquire_slab(layout.size()) {
Some(idx) => {
let mut p = self.slabs[idx].allocate(layout);
if p.is_null() {
self.refill_slab_allocator(idx);
p = self.slabs[idx].allocate(layout);
}
p
}
None => ptr::null_mut(),
}
}
/// Deallocates a pointer to a block of memory previously allocated by `allocate`.
///
/// # Arguments
/// * `ptr` - Address of the memory location to free.
/// * `old_size` - Size of the block.
/// * `align` - Alignment of the block.
///
pub unsafe fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
match self.try_acquire_slab(layout.size()) {
Some(idx) => self.slabs[idx].deallocate(ptr, layout),
None => panic!(
"Unable to find slab allocator for size ({}) with ptr {:?}.",
layout.size(),
ptr
),
}
}
unsafe fn copy(dest: *mut u8, src: *const u8, n: usize) {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
/*pub unsafe fn reallocate<'b>(&'b mut self, ptr: *mut u8, old_size: usize, size: usize, align: usize) -> Option<*mut u8> {
// Return immediately in case we can still fit the new request in the current buffer
match ZoneAllocator::get_max_size(old_size) {
Some(max_size) => {
if max_size >= size {
return Some(ptr);
}
()
},
None => ()
};
// Otherwise allocate, copy, free:
self.allocate(size, align).map(|new| {
ZoneAllocator::copy(new, ptr, old_size);
self.deallocate(NonNull::new_unchecked(ptr as *mut u8), old_size, align);
new
})
}*/
}
/// A list of ObjectPage.
struct ObjectPageList<'a> {
/// Points to the head of the list.
head: Option<&'a mut ObjectPage<'a>>,
/// Number of elements in the list.
pub elements: usize,
}
impl<'a> ObjectPageList<'a> {
#[cfg(feature = "unstable")]
const fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
#[cfg(not(feature = "unstable"))]
fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
fn iter_mut<'b>(&'b mut self) -> ObjectPageIterMut<'a> {
let m = match self.head {
None => Rawlink::none(),
Some(ref mut m) => Rawlink::some(*m),
};
ObjectPageIterMut { head: m }
}
/// Inserts `new_head` at the front of the list.
fn insert_front<'b>(&'b mut self, mut new_head: &'a mut ObjectPage<'a>) {
match self.head {
None => {
new_head.prev = Rawlink::none();
self.head = Some(new_head);
}
Some(ref mut head) => {
new_head.prev = Rawlink::none();
head.prev = Rawlink::some(new_head);
mem::swap(head, &mut new_head);
head.next = Rawlink::some(new_head);
}
}
self.elements += 1;
}
/// Removes `slab_page` from the list.
fn remove_from_list<'b, 'c>(&'b mut self, slab_page: &'c mut ObjectPage<'a>) {
unsafe {
match slab_page.prev.resolve_mut() {
None => {
self.head = slab_page.next.resolve_mut();
}
Some(prev) => {
prev.next = match slab_page.next.resolve_mut() {
None => Rawlink::none(),
Some(next) => Rawlink::some(next),
};
}
}
match slab_page.next.resolve_mut() {
None => (),
Some(next) => {
next.prev = match slab_page.prev.resolve_mut() {
None => Rawlink::none(),
Some(prev) => Rawlink::some(prev),
};
}
}
}
self.elements -= 1;
}
/// Does the list contain `s`?
fn has_objectpage<'b>(&'b mut self, s: &'a ObjectPage<'a>) -> bool {
for slab_page in self.iter_mut() {
if slab_page as *const ObjectPage == s as *const ObjectPage {
return true;
}
}
false
}
}
/// Iterate over all the pages inside a slab allocator
struct ObjectPageIterMut<'a> {
head: Rawlink<ObjectPage<'a>>,
}
impl<'a> Iterator for ObjectPageIterMut<'a> {
type Item = &'a mut ObjectPage<'a>;
#[inline]
fn next(&mut self) -> Option<&'a mut ObjectPage<'a>> {
unsafe {
self.head.resolve_mut().map(|next| {
self.head = match next.next.resolve_mut() {
None => Rawlink::none(),
Some(ref mut sp) => Rawlink::some(*sp),
};
next
})
}
}
}
/// A slab allocator allocates elements of a fixed size.
///
/// It has a list of ObjectPage stored inside `slabs` from which
/// it allocates memory.
pub struct SCAllocator<'a> {
/// Allocation size.
size: usize,
/// Memory backing store, to request new ObjectPage.
pager: &'a Mutex<PageProvider<'a>>,
/// List of ObjectPage.
slabs: ObjectPageList<'a>,
}
#[test]
pub fn iter_empty_list() {
let mut new_head1: ObjectPage = Default::default();
let mut l = ObjectPageList::new();
l.insert_front(&mut new_head1);
for p in l.iter_mut() {}
}
impl<'a> SCAllocator<'a> {
/// Create a new SCAllocator.
#[cfg(feature = "unstable")]
pub const fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Create a new SCAllocator.
#[cfg(not(feature = "unstable"))]
pub fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Return object size of this allocator.
pub fn size(&self) -> usize {
self.size
}
/// Try to allocate a new ObjectPage and insert it.
///
/// # TODO
/// * Amount is currently ignored.
/// * Panics on OOM (should return error!)
fn refill_slab<'b>(&'b mut self, amount: usize) {
let mut pager = self.pager.lock();
for i in 0..amount {
match pager.allocate_page() {
Some(new_head) => {
self.insert_slab(new_head);
}
None => panic!("OOM"),
}
}
}
/// Add a new ObjectPage.
pub fn insert_slab<'b>(&'b mut self, new_head: &'a mut ObjectPage<'a>) {
self.slabs.insert_front(new_head);
}
/// Tries to allocate a block of memory with respect to the `alignment`.
///
/// Only searches within already allocated slab pages.
fn try_allocate_from_pagelist<'b>(&'b mut self, layout: Layout) -> *mut u8 {
let size = self.size;
for (idx, slab_page) in self.slabs.iter_mut().enumerate() {
let ptr = slab_page.allocate(layout);
if!ptr.is_null() {
return ptr;
} else |
}
ptr::null_mut()
}
/// Allocates a block of memory with respect to `alignment`.
///
/// In case of failure will try to grow the slab allocator by requesting
/// additional pages and re-try the allocation once more before we give up.
pub fn allocate<'b>(&'b mut self, layout: Layout) -> *mut u8 {
debug!(
"SCAllocator({}) is trying to allocate {:?}",
self.size, layout
);
assert!(layout.size() <= self.size);
assert!(self.size <= (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
assert!(new_layout.size() >= layout.size());
let ptr = self.try_allocate_from_pagelist(new_layout);
if ptr.is_null() {
self.refill_slab(1);
return self.try_allocate_from_pagelist(layout);
}
debug!(
"SCAllocator({}) allocated ptr=0x{:x}",
self.size, ptr as usize
);
return ptr;
}
/// Deallocates a previously allocated block.
///
/// # Bug
/// This never releases memory in case the ObjectPage are provided by the zone.
pub fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
debug!(
"SCAllocator({}) is trying to deallocate ptr = 0x{:x} layout={:?}",
self.size, ptr as usize, layout
);
assert!(layout.size() <= self.size);
let page = (ptr as usize) &!(BASE_PAGE_SIZE - 1) as usize;
let slab_page = unsafe { mem::transmute::<VAddr, &'a mut ObjectPage>(page) };
assert!(self.size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
slab_page.deallocate(ptr, new_layout);
// Drop page in case it is empty and not the last
if slab_page.is_empty() && self.slabs.elements > 1 {
self.slabs.remove_from_list(slab_page);
let mut pager = self.pager.lock();
pager.release_page(slab_page);
}
}
}
/// Holds allocated data.
///
/// Objects life within data and meta tracks the objects status.
/// Currently, `bitfield`, `next` and `prev` pointer should fit inside
/// a single cache-line.
#[repr(packed)]
pub struct ObjectPage<'a> {
/// Holds memory objects.
data: [u8; 4096 - 64],
/// Next element in list (used by `ObjectPageList`).
next: Rawlink<ObjectPage<'a>>,
prev: Rawlink<ObjectPage<'a>>,
/// A bit-field to track free/allocated memory within `data`.
///
/// # Notes
/// * With only 48 bits we do waste some space at the end of every page for 8 bytes allocations.
/// but 12 bytes on-wards is okay.
bitfield: [u64; 6],
}
impl<'a> Default for ObjectPage<'a> {
fn default() -> ObjectPage<'a> {
unsafe { mem::zeroed() }
}
}
unsafe impl<'a> Send for ObjectPage<'a> {}
unsafe impl<'a> Sync for ObjectPage<'a> {}
impl<'a> fmt::Debug for ObjectPage<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ObjectPage")
}
}
impl<'a> ObjectPage<'a> {
/// Tries to find a free block of memory that satisfies `alignment` requirement.
///
/// # Notes
/// * We pass size here to be able to calculate the resulting address within `data`.
fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> {
unsafe {
for (base_idx, b) in self.bitfield.iter().enumerate() {
let bitval = *b;
if bitval == u64::max_value() {
continue;
} else {
let negated =!bitval;
let first_free = negated.trailing_zeros() as usize;
let idx: usize = base_idx * 64 + first_free;
let offset = idx * layout.size();
let offset_inside_data_area =
offset <= (BASE_PAGE_SIZE - CACHE_LINE_SIZE - layout.size());
if!offset_inside_data_area {
return None;
}
let addr: usize = ((self as *const ObjectPage) as usize) + offset;
let alignment_ok = addr % layout.align() == 0;
let block_is_free = bitval & (1 << first_free) == 0;
if alignment_ok && block_is_free {
return Some((idx, addr));
}
}
}
}
None
}
/// Check if the current `idx` is allocated.
///
/// # Notes
/// In case `idx` is 3 and allocation size of slab is
/// 8. The corresponding object would start at &data + 3 * 8.
fn is_allocated(&mut self, idx: usize) -> bool {
let base_idx = idx / 64;
let bit_idx = idx % 64;
(self.bitfield[base_idx] & (1 << bit_idx)) > 0
}
/// Sets | {
continue;
} | conditional_block |
lib.rs | provider)))
}
}
unsafe impl GlobalAlloc for SafeZoneAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
assert!(layout.align().is_power_of_two());
self.0.lock().allocate(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
//let ptr = NonNull::new_unchecked(ptr);
self.0.lock().deallocate(ptr, layout);
}
}
/// The memory backing as used by the SCAllocator.
///
/// A client that wants to use the zone or size class allocators
/// has to provide this interface and stick an implementation of it
/// into every SCAllocator.
pub trait PageProvider<'a>: Send {
fn allocate_page(&mut self) -> Option<&'a mut ObjectPage<'a>>;
fn release_page(&mut self, &'a mut ObjectPage<'a>);
}
/// A zone allocator.
///
/// Has a bunch of size class allocators and can serve
/// allocation requests for many different (MAX_SIZE_CLASSES) object sizes
/// (by selecting the right slab allocator).
pub struct ZoneAllocator<'a> {
pager: &'a Mutex<PageProvider<'a>>,
slabs: [SCAllocator<'a>; MAX_SIZE_CLASSES],
}
impl<'a> ZoneAllocator<'a> {
pub const MAX_ALLOC_SIZE: usize = 4032;
#[cfg(feature = "unstable")]
pub const fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> { | SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
#[cfg(not(feature = "unstable"))]
pub fn new(pager: &'a Mutex<PageProvider<'a>>) -> ZoneAllocator<'a> {
ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager),
SCAllocator::new(32, pager),
SCAllocator::new(64, pager),
SCAllocator::new(128, pager),
SCAllocator::new(256, pager),
SCAllocator::new(512, pager),
SCAllocator::new(1024, pager),
SCAllocator::new(2048, pager),
SCAllocator::new(4032, pager),
],
}
}
/// Return maximum size an object of size `current_size` can use.
///
/// Used to optimize `realloc`.
fn get_max_size(current_size: usize) -> Option<usize> {
match current_size {
0...8 => Some(8),
9...16 => Some(16),
17...32 => Some(32),
33...64 => Some(64),
65...128 => Some(128),
129...256 => Some(256),
257...512 => Some(512),
513...1024 => Some(1024),
1025...2048 => Some(2048),
2049...4032 => Some(4032),
_ => None,
}
}
/// Figure out index into zone array to get the correct slab allocator for that size.
fn get_slab_idx(requested_size: usize) -> Option<usize> {
match requested_size {
0...8 => Some(0),
9...16 => Some(1),
17...32 => Some(2),
33...64 => Some(3),
65...128 => Some(4),
129...256 => Some(5),
257...512 => Some(6),
513...1024 => Some(7),
1025...2048 => Some(8),
2049...4032 => Some(9),
_ => None,
}
}
/// Tries to locate a slab allocator.
///
/// Returns either a index into the slab array or None in case
/// the requested allocation size can not be satisfied by
/// any of the available slabs.
fn try_acquire_slab(&mut self, size: usize) -> Option<usize> {
ZoneAllocator::get_slab_idx(size).map(|idx| {
if self.slabs[idx].size == 0 {
self.slabs[idx].size = size;
}
idx
})
}
/// Refills the SCAllocator in slabs at `idx` with a ObjectPage.
///
/// # TODO
/// * Panics in case we're OOM (should probably return error).
fn refill_slab_allocator<'b>(&'b mut self, idx: usize) {
match self.pager.lock().allocate_page() {
Some(new_head) => self.slabs[idx].insert_slab(new_head),
None => panic!("OOM"),
};
}
/// Allocate a pointer to a block of memory of size `size` with alignment `align`.
///
/// Can return None in case the zone allocator can not satisfy the allocation
/// of the requested size or if we do not have enough memory.
/// In case we are out of memory we try to refill the slab using our local pager
/// and re-try the allocation request once more before we give up.
pub unsafe fn allocate(&mut self, layout: Layout) -> *mut u8 {
match self.try_acquire_slab(layout.size()) {
Some(idx) => {
let mut p = self.slabs[idx].allocate(layout);
if p.is_null() {
self.refill_slab_allocator(idx);
p = self.slabs[idx].allocate(layout);
}
p
}
None => ptr::null_mut(),
}
}
/// Deallocates a pointer to a block of memory previously allocated by `allocate`.
///
/// # Arguments
/// * `ptr` - Address of the memory location to free.
/// * `old_size` - Size of the block.
/// * `align` - Alignment of the block.
///
pub unsafe fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
match self.try_acquire_slab(layout.size()) {
Some(idx) => self.slabs[idx].deallocate(ptr, layout),
None => panic!(
"Unable to find slab allocator for size ({}) with ptr {:?}.",
layout.size(),
ptr
),
}
}
unsafe fn copy(dest: *mut u8, src: *const u8, n: usize) {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
/*pub unsafe fn reallocate<'b>(&'b mut self, ptr: *mut u8, old_size: usize, size: usize, align: usize) -> Option<*mut u8> {
// Return immediately in case we can still fit the new request in the current buffer
match ZoneAllocator::get_max_size(old_size) {
Some(max_size) => {
if max_size >= size {
return Some(ptr);
}
()
},
None => ()
};
// Otherwise allocate, copy, free:
self.allocate(size, align).map(|new| {
ZoneAllocator::copy(new, ptr, old_size);
self.deallocate(NonNull::new_unchecked(ptr as *mut u8), old_size, align);
new
})
}*/
}
/// A list of ObjectPage.
struct ObjectPageList<'a> {
/// Points to the head of the list.
head: Option<&'a mut ObjectPage<'a>>,
/// Number of elements in the list.
pub elements: usize,
}
impl<'a> ObjectPageList<'a> {
#[cfg(feature = "unstable")]
const fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
#[cfg(not(feature = "unstable"))]
fn new() -> ObjectPageList<'a> {
ObjectPageList {
head: None,
elements: 0,
}
}
fn iter_mut<'b>(&'b mut self) -> ObjectPageIterMut<'a> {
let m = match self.head {
None => Rawlink::none(),
Some(ref mut m) => Rawlink::some(*m),
};
ObjectPageIterMut { head: m }
}
/// Inserts `new_head` at the front of the list.
fn insert_front<'b>(&'b mut self, mut new_head: &'a mut ObjectPage<'a>) {
match self.head {
None => {
new_head.prev = Rawlink::none();
self.head = Some(new_head);
}
Some(ref mut head) => {
new_head.prev = Rawlink::none();
head.prev = Rawlink::some(new_head);
mem::swap(head, &mut new_head);
head.next = Rawlink::some(new_head);
}
}
self.elements += 1;
}
/// Removes `slab_page` from the list.
fn remove_from_list<'b, 'c>(&'b mut self, slab_page: &'c mut ObjectPage<'a>) {
unsafe {
match slab_page.prev.resolve_mut() {
None => {
self.head = slab_page.next.resolve_mut();
}
Some(prev) => {
prev.next = match slab_page.next.resolve_mut() {
None => Rawlink::none(),
Some(next) => Rawlink::some(next),
};
}
}
match slab_page.next.resolve_mut() {
None => (),
Some(next) => {
next.prev = match slab_page.prev.resolve_mut() {
None => Rawlink::none(),
Some(prev) => Rawlink::some(prev),
};
}
}
}
self.elements -= 1;
}
/// Does the list contain `s`?
fn has_objectpage<'b>(&'b mut self, s: &'a ObjectPage<'a>) -> bool {
for slab_page in self.iter_mut() {
if slab_page as *const ObjectPage == s as *const ObjectPage {
return true;
}
}
false
}
}
/// Iterate over all the pages inside a slab allocator
struct ObjectPageIterMut<'a> {
head: Rawlink<ObjectPage<'a>>,
}
impl<'a> Iterator for ObjectPageIterMut<'a> {
type Item = &'a mut ObjectPage<'a>;
#[inline]
fn next(&mut self) -> Option<&'a mut ObjectPage<'a>> {
unsafe {
self.head.resolve_mut().map(|next| {
self.head = match next.next.resolve_mut() {
None => Rawlink::none(),
Some(ref mut sp) => Rawlink::some(*sp),
};
next
})
}
}
}
/// A slab allocator allocates elements of a fixed size.
///
/// It has a list of ObjectPage stored inside `slabs` from which
/// it allocates memory.
pub struct SCAllocator<'a> {
/// Allocation size.
size: usize,
/// Memory backing store, to request new ObjectPage.
pager: &'a Mutex<PageProvider<'a>>,
/// List of ObjectPage.
slabs: ObjectPageList<'a>,
}
#[test]
pub fn iter_empty_list() {
let mut new_head1: ObjectPage = Default::default();
let mut l = ObjectPageList::new();
l.insert_front(&mut new_head1);
for p in l.iter_mut() {}
}
impl<'a> SCAllocator<'a> {
/// Create a new SCAllocator.
#[cfg(feature = "unstable")]
pub const fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Create a new SCAllocator.
#[cfg(not(feature = "unstable"))]
pub fn new(size: usize, pager: &'a Mutex<PageProvider<'a>>) -> SCAllocator<'a> {
// const_assert!(size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE);
SCAllocator {
size: size,
pager: pager,
slabs: ObjectPageList::new(),
}
}
/// Return object size of this allocator.
pub fn size(&self) -> usize {
self.size
}
/// Try to allocate a new ObjectPage and insert it.
///
/// # TODO
/// * Amount is currently ignored.
/// * Panics on OOM (should return error!)
fn refill_slab<'b>(&'b mut self, amount: usize) {
let mut pager = self.pager.lock();
for i in 0..amount {
match pager.allocate_page() {
Some(new_head) => {
self.insert_slab(new_head);
}
None => panic!("OOM"),
}
}
}
/// Add a new ObjectPage.
pub fn insert_slab<'b>(&'b mut self, new_head: &'a mut ObjectPage<'a>) {
self.slabs.insert_front(new_head);
}
/// Tries to allocate a block of memory with respect to the `alignment`.
///
/// Only searches within already allocated slab pages.
fn try_allocate_from_pagelist<'b>(&'b mut self, layout: Layout) -> *mut u8 {
let size = self.size;
for (idx, slab_page) in self.slabs.iter_mut().enumerate() {
let ptr = slab_page.allocate(layout);
if!ptr.is_null() {
return ptr;
} else {
continue;
}
}
ptr::null_mut()
}
/// Allocates a block of memory with respect to `alignment`.
///
/// In case of failure will try to grow the slab allocator by requesting
/// additional pages and re-try the allocation once more before we give up.
pub fn allocate<'b>(&'b mut self, layout: Layout) -> *mut u8 {
debug!(
"SCAllocator({}) is trying to allocate {:?}",
self.size, layout
);
assert!(layout.size() <= self.size);
assert!(self.size <= (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
assert!(new_layout.size() >= layout.size());
let ptr = self.try_allocate_from_pagelist(new_layout);
if ptr.is_null() {
self.refill_slab(1);
return self.try_allocate_from_pagelist(layout);
}
debug!(
"SCAllocator({}) allocated ptr=0x{:x}",
self.size, ptr as usize
);
return ptr;
}
/// Deallocates a previously allocated block.
///
/// # Bug
/// This never releases memory in case the ObjectPage are provided by the zone.
pub fn deallocate<'b>(&'b mut self, ptr: *mut u8, layout: Layout) {
debug!(
"SCAllocator({}) is trying to deallocate ptr = 0x{:x} layout={:?}",
self.size, ptr as usize, layout
);
assert!(layout.size() <= self.size);
let page = (ptr as usize) &!(BASE_PAGE_SIZE - 1) as usize;
let slab_page = unsafe { mem::transmute::<VAddr, &'a mut ObjectPage>(page) };
assert!(self.size < (BASE_PAGE_SIZE as usize - CACHE_LINE_SIZE));
let new_layout = unsafe { Layout::from_size_align_unchecked(self.size, layout.align()) };
slab_page.deallocate(ptr, new_layout);
// Drop page in case it is empty and not the last
if slab_page.is_empty() && self.slabs.elements > 1 {
self.slabs.remove_from_list(slab_page);
let mut pager = self.pager.lock();
pager.release_page(slab_page);
}
}
}
/// Holds allocated data.
///
/// Objects life within data and meta tracks the objects status.
/// Currently, `bitfield`, `next` and `prev` pointer should fit inside
/// a single cache-line.
#[repr(packed)]
pub struct ObjectPage<'a> {
/// Holds memory objects.
data: [u8; 4096 - 64],
/// Next element in list (used by `ObjectPageList`).
next: Rawlink<ObjectPage<'a>>,
prev: Rawlink<ObjectPage<'a>>,
/// A bit-field to track free/allocated memory within `data`.
///
/// # Notes
/// * With only 48 bits we do waste some space at the end of every page for 8 bytes allocations.
/// but 12 bytes on-wards is okay.
bitfield: [u64; 6],
}
impl<'a> Default for ObjectPage<'a> {
fn default() -> ObjectPage<'a> {
unsafe { mem::zeroed() }
}
}
unsafe impl<'a> Send for ObjectPage<'a> {}
unsafe impl<'a> Sync for ObjectPage<'a> {}
impl<'a> fmt::Debug for ObjectPage<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ObjectPage")
}
}
impl<'a> ObjectPage<'a> {
/// Tries to find a free block of memory that satisfies `alignment` requirement.
///
/// # Notes
/// * We pass size here to be able to calculate the resulting address within `data`.
fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> {
unsafe {
for (base_idx, b) in self.bitfield.iter().enumerate() {
let bitval = *b;
if bitval == u64::max_value() {
continue;
} else {
let negated =!bitval;
let first_free = negated.trailing_zeros() as usize;
let idx: usize = base_idx * 64 + first_free;
let offset = idx * layout.size();
let offset_inside_data_area =
offset <= (BASE_PAGE_SIZE - CACHE_LINE_SIZE - layout.size());
if!offset_inside_data_area {
return None;
}
let addr: usize = ((self as *const ObjectPage) as usize) + offset;
let alignment_ok = addr % layout.align() == 0;
let block_is_free = bitval & (1 << first_free) == 0;
if alignment_ok && block_is_free {
return Some((idx, addr));
}
}
}
}
None
}
/// Check if the current `idx` is allocated.
///
/// # Notes
/// In case `idx` is 3 and allocation size of slab is
/// 8. The corresponding object would start at &data + 3 * 8.
fn is_allocated(&mut self, idx: usize) -> bool {
let base_idx = idx / 64;
let bit_idx = idx % 64;
(self.bitfield[base_idx] & (1 << bit_idx)) > 0
}
/// Sets the bit | ZoneAllocator {
pager: pager,
slabs: [
SCAllocator::new(8, pager),
SCAllocator::new(16, pager), | random_line_split |
stream_animation.rs | use super::stream_layer::*;
use super::stream_animation_core::*;
use crate::traits::*;
use crate::storage::*;
use crate::storage::file_properties::*;
use crate::storage::layer_properties::*;
use ::desync::*;
use flo_stream::*;
use itertools::*;
use futures::prelude::*;
use futures::task::{Poll};
use futures::stream;
use futures::stream::{BoxStream};
use std::sync::*;
use std::ops::{Range};
use std::time::{Duration};
use std::collections::{HashMap};
///
/// Animation that sends its updates to a storage stream
///
pub struct StreamAnimation {
/// The core, where the actual work is done
core: Arc<Desync<StreamAnimationCore>>,
/// The publisher for the edits to this animation
edit_publisher: Publisher<Arc<Vec<AnimationEdit>>>,
/// Available synchronous requests
idle_sync_requests: Desync<Vec<Desync<Option<Vec<StorageResponse>>>>>,
}
impl StreamAnimation {
///
/// Creates a new stream animation. The result is the animation implementation and the
/// stream of requests to be sent to the storage layer
///
pub fn new<ConnectStream: FnOnce(BoxStream<'static, Vec<StorageCommand>>) -> BoxStream<'static, Vec<StorageResponse>>>(connect_stream: ConnectStream) -> StreamAnimation {
// Create the storage requests. When the storage layer is running behind, we'll buffer up to 10 of these
let mut requests = Publisher::new(10);
let commands = requests.subscribe().boxed();
let storage_responses = connect_stream(commands);
let mut edit_publisher = Publisher::new(10);
let storage_connection = StorageConnection::new(requests, storage_responses);
// The core is used to actually execute the requests
let core = StreamAnimationCore {
storage_connection: storage_connection,
next_element_id: None,
cached_layers: HashMap::new(),
cached_keyframe: None,
brush_defn: None,
brush_props: None,
path_brush_defn: None,
path_brush_props: None,
retired_edit_senders: vec![],
};
let core = Arc::new(Desync::new(core));
// Anything published to the editor is piped into the core
pipe_in(Arc::clone(&core), edit_publisher.subscribe(), |core, edits: Arc<Vec<AnimationEdit>>| {
async move {
// Edits require some pre-processing: assign the IDs, perform undo actions and write to the log (note that undo edits are performed before serialization)
let mut edits = core.assign_ids_to_edits(&*edits).await;
core.process_undo_edits(&mut edits).await;
core.serialize_edits_to_log(&edits).await;
// Perform the edits to retire them
let retired = core.perform_edits(edits).await;
// Clean up the edit publishers, in case any aren't being listened to any more
core.retired_edit_senders.retain(|sender| sender.count_subscribers() > 0);
// Send the edits as retired
for retired_sender in core.retired_edit_senders.iter_mut() {
retired_sender.publish(retired.clone()).await;
}
}.boxed()
});
// Build the animation
StreamAnimation {
core: core,
idle_sync_requests: Desync::new(vec![]),
edit_publisher: edit_publisher
}
}
///
/// Performs an asynchronous request on a storage layer for this animation
///
pub (super) fn request_async<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> impl Future<Output=Option<Vec<StorageResponse>>> {
request_core_async(&self.core, request.into_iter().collect())
}
///
/// Performs a synchronous request on the storage layer for this animation
///
/// Synchronous requests are fairly slow, so should be avoided in inner loops
///
pub (super) fn request_sync<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> Option<Vec<StorageResponse>> {
request_core_sync(Arc::clone(&self.core), request.into_iter().collect())
}
///
/// Waits for any pending edits on this animation to complete
///
pub (super) fn wait_for_edits(&self) {
// Force a desync to wait for the when_empty future to complete
let when_empty = self.edit_publisher.republish().when_empty();
// Create a desync and wait for the 'when_empty' signal to show up (indicating all the edits have been sent to the core)
let wait_for_edits = Desync::new(());
let _ = wait_for_edits.future_desync(move |_| async move { when_empty.await; }.boxed());
// Synchronise after the future has completed
wait_for_edits.sync(|_| { });
// Synchronise with the animation core so that all the edits are performed
self.core.sync(|_| { });
}
///
/// Retrieves the current file properties for the animation
///
fn file_properties(&self) -> FileProperties {
// Retrieve the properties from storage (and update the version we have stored if there is one)
let mut response = self.request_sync(vec![StorageCommand::ReadAnimationProperties]).unwrap_or_else(|| vec![]);
let properties;
match response.pop() {
Some(StorageResponse::NotFound) => {
// File properties are not set
properties = FileProperties::default();
}
Some(StorageResponse::AnimationProperties(props)) => {
// Deserialize the file properties
properties = FileProperties::deserialize(&mut props.chars()).expect("Could not parse file properties");
}
unknown => panic!("Unexpected response {:?} while reading file properties", unknown)
}
properties
}
}
impl Animation for StreamAnimation {
///
/// Retrieves the frame size of this animation
///
fn size(&self) -> (f64, f64) {
self.wait_for_edits();
self.file_properties().size
}
///
/// Retrieves the length of this animation
///
fn duration(&self) -> Duration {
self.wait_for_edits();
self.file_properties().duration
}
///
/// Retrieves the duration of a single frame
///
fn frame_length(&self) -> Duration {
self.wait_for_edits();
self.file_properties().frame_length
}
///
/// Retrieves the IDs of the layers in this object
///
fn get_layer_ids(&self) -> Vec<u64> {
self.wait_for_edits();
let layer_responses = self.request_sync(vec![StorageCommand::ReadLayers]).unwrap_or_else(|| vec![]);
layer_responses
.into_iter()
.flat_map(|response| {
match response {
StorageResponse::LayerProperties(id, properties) => Some((id, LayerProperties::deserialize(&mut properties.chars())?)),
_ => None
}
})
.sorted_by(|(id_a, layer_a), (id_b, layer_b)| {
if layer_a.ordering == layer_b.ordering {
id_a.cmp(&id_b)
} else {
layer_a.ordering.cmp(&layer_b.ordering)
}
})
.map(|(id, _props)| id)
.collect()
}
///
/// Retrieves the layer with the specified ID from this animation
///
fn get_layer_with_id(&self, layer_id: u64) -> Option<Arc<dyn Layer>> {
self.wait_for_edits();
// Read the properties for the specified layer
let layer_properties = self.request_sync(vec![StorageCommand::ReadLayerProperties(layer_id)]);
if let Some(StorageResponse::LayerProperties(_, serialized)) = layer_properties.and_then(|mut props| props.pop()) {
if let Some(layer_properties) = LayerProperties::deserialize(&mut serialized.chars()) {
// Found the layer
Some(Arc::new(StreamLayer::new(Arc::clone(&self.core), layer_id, layer_properties)))
} else {
// Can't deserialize the layer properties
None
}
} else {
// Layer does not exist
None
}
}
///
/// Retrieves the total number of edits that have been performed on this animation
///
fn get_num_edits(&self) -> usize {
self.wait_for_edits();
let mut response = self.request_sync(vec![StorageCommand::ReadEditLogLength]).unwrap_or_else(|| vec![]);
match response.pop() {
Some(StorageResponse::NumberOfEdits(num_edits)) => num_edits,
_ => panic!("Unexpected response while reading number of edits")
}
}
///
/// Reads from the edit log for this animation
///
fn read_edit_log<'a>(&'a self, range: Range<usize>) -> BoxStream<'a, AnimationEdit> {
self.wait_for_edits();
// Clamp the range of edits to the maximum number of edits
let max_edit = self.get_num_edits();
let range = if range.end > max_edit {
range.start..max_edit
} else {
range
};
// Generate a stream to read from the edit log as we go
let per_request = 20;
let mut remaining = range;
let mut fetched = vec![];
let mut next_response = None;
stream::poll_fn(move |context| {
loop {
if remaining.len()!= 0 && fetched.len() == 0 && next_response.is_none() {
// Fetch up to per_request items for each request
let num_to_fetch = remaining.len();
let num_to_fetch = if num_to_fetch > per_request { per_request } else { num_to_fetch };
let fetch_range = (remaining.start)..(remaining.start + num_to_fetch);
// Start polling for the next batch
next_response = Some(self.request_async(vec![StorageCommand::ReadEdits(fetch_range)]));
remaining = (remaining.start+num_to_fetch)..(remaining.end);
}
if let Some(next) = fetched.pop() {
// Just returning the batch we've already fetched
return Poll::Ready(Some(next));
} else if let Some(mut waiting) = next_response.take() {
// Try to retrieve the next item from the batch
let poll_response = waiting.poll_unpin(context);
match poll_response {
Poll::Pending => {
// Keep waiting for the response
next_response = Some(waiting);
return Poll::Pending
},
Poll::Ready(response) => {
// Load the edits into the fetched array
let mut response = response.unwrap_or(vec![]);
while let Some(response) = response.pop() {
// Ignore everything that's not an edit (we have no way to do error handling here)
if let StorageResponse::Edit(_num, serialized_edit) = response {
// Store edits that deserialize successfully on the fetched list
if let Some(edit) = AnimationEdit::deserialize(&mut serialized_edit.chars()) {
fetched.push(edit)
}
}
}
}
}
} else if remaining.len() == 0 {
// Reached the end of the stream
return Poll::Ready(None);
}
}
}).fuse().boxed()
}
}
impl EditableAnimation for StreamAnimation {
///
/// Assigns a new unique ID for creating a new motion
///
/// This ID will not have been used so far and will not be used again, and can be used as the ID for the MotionElement vector element.
///
fn | (&self) -> ElementId {
// Create a queue to run the 'assign element ID' future on
let core = Arc::clone(&self.core);
// Perform the request and retrieve the result
core.future_desync(|core| core.assign_element_id(ElementId::Unassigned).boxed())
.sync().unwrap()
}
///
/// Retrieves a sink that can be used to send edits for this animation
///
/// Edits are supplied as groups (stored in a vec) so that it's possible to ensure that
/// a set of related edits are performed atomically
///
fn edit(&self) -> Publisher<Arc<Vec<AnimationEdit>>> {
self.edit_publisher.republish()
}
///
/// Sends a set of edits straight to this animation
///
/// (Note that these are not always published to the publisher)
///
fn perform_edits(&self, edits: Vec<AnimationEdit>) {
// Get a publisher to send the edits to (this editor does send its edits to the publisher)
let mut publisher = self.edit_publisher.republish();
// Get an idle sync request desync
// We use desync instead of the futures executor as the executor will panic if we are called from within another future
// (desync provides a way around this problem)
let sync_request = self.idle_sync_requests.sync(|reqs| {
let next_request = reqs.pop();
if let Some(next_request) = next_request {
next_request
} else {
let req = Desync::new(None);
req
}
});
// Queue a request
sync_request.future_desync(move |_| {
async move {
// Publish the edits
publisher.publish(Arc::new(edits)).await;
}.boxed()
}).sync().ok();
// Return the sync_request to the pool
self.idle_sync_requests.desync(move |reqs| { reqs.push(sync_request) });
// Wait for the edits to complete
self.wait_for_edits();
}
///
/// Returns a stream of edits as they are being retired (ie, the edits that are now visible on the animation)
///
fn retired_edits(&self) -> BoxStream<'static, RetiredEdit> {
// Create a channel to send edits through
let mut sender = Publisher::new(10);
let receiver = sender.subscribe();
// Add to the list in the core
self.core.sync(move |core| {
core.retired_edit_senders.push(sender);
});
// Box up the receiver to create the result
receiver.boxed()
}
///
/// Flushes any caches this might have (forces reload from data storage)
///
fn flush_caches(&self) {
self.core.desync(|core| {
core.cached_keyframe = None;
});
}
}
| assign_element_id | identifier_name |
stream_animation.rs | use super::stream_layer::*;
use super::stream_animation_core::*;
use crate::traits::*;
use crate::storage::*;
use crate::storage::file_properties::*;
use crate::storage::layer_properties::*;
use ::desync::*;
use flo_stream::*;
use itertools::*;
use futures::prelude::*;
use futures::task::{Poll};
use futures::stream;
use futures::stream::{BoxStream};
use std::sync::*;
use std::ops::{Range};
use std::time::{Duration};
use std::collections::{HashMap};
///
/// Animation that sends its updates to a storage stream
///
pub struct StreamAnimation {
/// The core, where the actual work is done
core: Arc<Desync<StreamAnimationCore>>,
/// The publisher for the edits to this animation
edit_publisher: Publisher<Arc<Vec<AnimationEdit>>>,
/// Available synchronous requests
idle_sync_requests: Desync<Vec<Desync<Option<Vec<StorageResponse>>>>>,
}
impl StreamAnimation {
///
/// Creates a new stream animation. The result is the animation implementation and the
/// stream of requests to be sent to the storage layer
///
pub fn new<ConnectStream: FnOnce(BoxStream<'static, Vec<StorageCommand>>) -> BoxStream<'static, Vec<StorageResponse>>>(connect_stream: ConnectStream) -> StreamAnimation {
// Create the storage requests. When the storage layer is running behind, we'll buffer up to 10 of these
let mut requests = Publisher::new(10);
let commands = requests.subscribe().boxed();
let storage_responses = connect_stream(commands);
let mut edit_publisher = Publisher::new(10);
let storage_connection = StorageConnection::new(requests, storage_responses);
// The core is used to actually execute the requests
let core = StreamAnimationCore {
storage_connection: storage_connection,
next_element_id: None,
cached_layers: HashMap::new(),
cached_keyframe: None,
brush_defn: None,
brush_props: None,
path_brush_defn: None,
path_brush_props: None,
retired_edit_senders: vec![],
};
let core = Arc::new(Desync::new(core));
// Anything published to the editor is piped into the core
pipe_in(Arc::clone(&core), edit_publisher.subscribe(), |core, edits: Arc<Vec<AnimationEdit>>| {
async move {
// Edits require some pre-processing: assign the IDs, perform undo actions and write to the log (note that undo edits are performed before serialization)
let mut edits = core.assign_ids_to_edits(&*edits).await;
core.process_undo_edits(&mut edits).await;
core.serialize_edits_to_log(&edits).await;
// Perform the edits to retire them
let retired = core.perform_edits(edits).await;
// Clean up the edit publishers, in case any aren't being listened to any more
core.retired_edit_senders.retain(|sender| sender.count_subscribers() > 0);
// Send the edits as retired
for retired_sender in core.retired_edit_senders.iter_mut() {
retired_sender.publish(retired.clone()).await;
}
}.boxed()
});
// Build the animation
StreamAnimation {
core: core,
idle_sync_requests: Desync::new(vec![]),
edit_publisher: edit_publisher
}
}
///
/// Performs an asynchronous request on a storage layer for this animation
///
pub (super) fn request_async<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> impl Future<Output=Option<Vec<StorageResponse>>> {
request_core_async(&self.core, request.into_iter().collect())
}
///
/// Performs a synchronous request on the storage layer for this animation
///
/// Synchronous requests are fairly slow, so should be avoided in inner loops
///
pub (super) fn request_sync<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> Option<Vec<StorageResponse>> {
request_core_sync(Arc::clone(&self.core), request.into_iter().collect())
}
///
/// Waits for any pending edits on this animation to complete
///
pub (super) fn wait_for_edits(&self) {
// Force a desync to wait for the when_empty future to complete
let when_empty = self.edit_publisher.republish().when_empty();
// Create a desync and wait for the 'when_empty' signal to show up (indicating all the edits have been sent to the core)
let wait_for_edits = Desync::new(());
let _ = wait_for_edits.future_desync(move |_| async move { when_empty.await; }.boxed());
// Synchronise after the future has completed
wait_for_edits.sync(|_| { });
// Synchronise with the animation core so that all the edits are performed
self.core.sync(|_| { });
}
///
/// Retrieves the current file properties for the animation
///
fn file_properties(&self) -> FileProperties {
// Retrieve the properties from storage (and update the version we have stored if there is one)
let mut response = self.request_sync(vec![StorageCommand::ReadAnimationProperties]).unwrap_or_else(|| vec![]);
let properties;
match response.pop() {
Some(StorageResponse::NotFound) => {
// File properties are not set
properties = FileProperties::default();
}
Some(StorageResponse::AnimationProperties(props)) => {
// Deserialize the file properties
properties = FileProperties::deserialize(&mut props.chars()).expect("Could not parse file properties");
}
unknown => panic!("Unexpected response {:?} while reading file properties", unknown)
}
properties
}
}
impl Animation for StreamAnimation {
///
/// Retrieves the frame size of this animation
///
fn size(&self) -> (f64, f64) {
self.wait_for_edits();
self.file_properties().size
}
///
/// Retrieves the length of this animation
///
fn duration(&self) -> Duration {
self.wait_for_edits();
self.file_properties().duration
}
///
/// Retrieves the duration of a single frame
///
fn frame_length(&self) -> Duration {
self.wait_for_edits();
self.file_properties().frame_length
}
///
/// Retrieves the IDs of the layers in this object
///
fn get_layer_ids(&self) -> Vec<u64> {
self.wait_for_edits();
let layer_responses = self.request_sync(vec![StorageCommand::ReadLayers]).unwrap_or_else(|| vec![]);
layer_responses
.into_iter()
.flat_map(|response| {
match response {
StorageResponse::LayerProperties(id, properties) => Some((id, LayerProperties::deserialize(&mut properties.chars())?)),
_ => None
}
})
.sorted_by(|(id_a, layer_a), (id_b, layer_b)| {
if layer_a.ordering == layer_b.ordering {
id_a.cmp(&id_b)
} else {
layer_a.ordering.cmp(&layer_b.ordering)
}
})
.map(|(id, _props)| id)
.collect()
}
///
/// Retrieves the layer with the specified ID from this animation
///
fn get_layer_with_id(&self, layer_id: u64) -> Option<Arc<dyn Layer>> {
self.wait_for_edits();
// Read the properties for the specified layer
let layer_properties = self.request_sync(vec![StorageCommand::ReadLayerProperties(layer_id)]);
if let Some(StorageResponse::LayerProperties(_, serialized)) = layer_properties.and_then(|mut props| props.pop()) {
if let Some(layer_properties) = LayerProperties::deserialize(&mut serialized.chars()) {
// Found the layer
Some(Arc::new(StreamLayer::new(Arc::clone(&self.core), layer_id, layer_properties)))
} else {
// Can't deserialize the layer properties
None
}
} else {
// Layer does not exist
None
}
}
///
/// Retrieves the total number of edits that have been performed on this animation
///
fn get_num_edits(&self) -> usize {
self.wait_for_edits();
let mut response = self.request_sync(vec![StorageCommand::ReadEditLogLength]).unwrap_or_else(|| vec![]);
match response.pop() {
Some(StorageResponse::NumberOfEdits(num_edits)) => num_edits,
_ => panic!("Unexpected response while reading number of edits")
}
}
///
/// Reads from the edit log for this animation
///
fn read_edit_log<'a>(&'a self, range: Range<usize>) -> BoxStream<'a, AnimationEdit> {
self.wait_for_edits();
// Clamp the range of edits to the maximum number of edits
let max_edit = self.get_num_edits();
let range = if range.end > max_edit {
range.start..max_edit
} else {
range
};
// Generate a stream to read from the edit log as we go
let per_request = 20;
let mut remaining = range;
let mut fetched = vec![];
let mut next_response = None;
stream::poll_fn(move |context| {
loop {
if remaining.len()!= 0 && fetched.len() == 0 && next_response.is_none() |
if let Some(next) = fetched.pop() {
// Just returning the batch we've already fetched
return Poll::Ready(Some(next));
} else if let Some(mut waiting) = next_response.take() {
// Try to retrieve the next item from the batch
let poll_response = waiting.poll_unpin(context);
match poll_response {
Poll::Pending => {
// Keep waiting for the response
next_response = Some(waiting);
return Poll::Pending
},
Poll::Ready(response) => {
// Load the edits into the fetched array
let mut response = response.unwrap_or(vec![]);
while let Some(response) = response.pop() {
// Ignore everything that's not an edit (we have no way to do error handling here)
if let StorageResponse::Edit(_num, serialized_edit) = response {
// Store edits that deserialize successfully on the fetched list
if let Some(edit) = AnimationEdit::deserialize(&mut serialized_edit.chars()) {
fetched.push(edit)
}
}
}
}
}
} else if remaining.len() == 0 {
// Reached the end of the stream
return Poll::Ready(None);
}
}
}).fuse().boxed()
}
}
impl EditableAnimation for StreamAnimation {
///
/// Assigns a new unique ID for creating a new motion
///
/// This ID will not have been used so far and will not be used again, and can be used as the ID for the MotionElement vector element.
///
fn assign_element_id(&self) -> ElementId {
// Create a queue to run the 'assign element ID' future on
let core = Arc::clone(&self.core);
// Perform the request and retrieve the result
core.future_desync(|core| core.assign_element_id(ElementId::Unassigned).boxed())
.sync().unwrap()
}
///
/// Retrieves a sink that can be used to send edits for this animation
///
/// Edits are supplied as groups (stored in a vec) so that it's possible to ensure that
/// a set of related edits are performed atomically
///
fn edit(&self) -> Publisher<Arc<Vec<AnimationEdit>>> {
self.edit_publisher.republish()
}
///
/// Sends a set of edits straight to this animation
///
/// (Note that these are not always published to the publisher)
///
fn perform_edits(&self, edits: Vec<AnimationEdit>) {
// Get a publisher to send the edits to (this editor does send its edits to the publisher)
let mut publisher = self.edit_publisher.republish();
// Get an idle sync request desync
// We use desync instead of the futures executor as the executor will panic if we are called from within another future
// (desync provides a way around this problem)
let sync_request = self.idle_sync_requests.sync(|reqs| {
let next_request = reqs.pop();
if let Some(next_request) = next_request {
next_request
} else {
let req = Desync::new(None);
req
}
});
// Queue a request
sync_request.future_desync(move |_| {
async move {
// Publish the edits
publisher.publish(Arc::new(edits)).await;
}.boxed()
}).sync().ok();
// Return the sync_request to the pool
self.idle_sync_requests.desync(move |reqs| { reqs.push(sync_request) });
// Wait for the edits to complete
self.wait_for_edits();
}
///
/// Returns a stream of edits as they are being retired (ie, the edits that are now visible on the animation)
///
fn retired_edits(&self) -> BoxStream<'static, RetiredEdit> {
// Create a channel to send edits through
let mut sender = Publisher::new(10);
let receiver = sender.subscribe();
// Add to the list in the core
self.core.sync(move |core| {
core.retired_edit_senders.push(sender);
});
// Box up the receiver to create the result
receiver.boxed()
}
///
/// Flushes any caches this might have (forces reload from data storage)
///
fn flush_caches(&self) {
self.core.desync(|core| {
core.cached_keyframe = None;
});
}
}
| {
// Fetch up to per_request items for each request
let num_to_fetch = remaining.len();
let num_to_fetch = if num_to_fetch > per_request { per_request } else { num_to_fetch };
let fetch_range = (remaining.start)..(remaining.start + num_to_fetch);
// Start polling for the next batch
next_response = Some(self.request_async(vec![StorageCommand::ReadEdits(fetch_range)]));
remaining = (remaining.start+num_to_fetch)..(remaining.end);
} | conditional_block |
stream_animation.rs | use super::stream_layer::*;
use super::stream_animation_core::*;
use crate::traits::*;
use crate::storage::*;
use crate::storage::file_properties::*;
use crate::storage::layer_properties::*;
use ::desync::*;
use flo_stream::*;
use itertools::*;
use futures::prelude::*;
use futures::task::{Poll};
use futures::stream;
use futures::stream::{BoxStream};
use std::sync::*;
use std::ops::{Range};
use std::time::{Duration};
use std::collections::{HashMap};
///
/// Animation that sends its updates to a storage stream
///
pub struct StreamAnimation {
/// The core, where the actual work is done
core: Arc<Desync<StreamAnimationCore>>,
/// The publisher for the edits to this animation
edit_publisher: Publisher<Arc<Vec<AnimationEdit>>>,
/// Available synchronous requests
idle_sync_requests: Desync<Vec<Desync<Option<Vec<StorageResponse>>>>>,
}
impl StreamAnimation {
///
/// Creates a new stream animation. The result is the animation implementation and the
/// stream of requests to be sent to the storage layer
///
pub fn new<ConnectStream: FnOnce(BoxStream<'static, Vec<StorageCommand>>) -> BoxStream<'static, Vec<StorageResponse>>>(connect_stream: ConnectStream) -> StreamAnimation {
// Create the storage requests. When the storage layer is running behind, we'll buffer up to 10 of these
let mut requests = Publisher::new(10);
let commands = requests.subscribe().boxed();
let storage_responses = connect_stream(commands);
let mut edit_publisher = Publisher::new(10);
let storage_connection = StorageConnection::new(requests, storage_responses);
// The core is used to actually execute the requests
let core = StreamAnimationCore {
storage_connection: storage_connection,
next_element_id: None,
cached_layers: HashMap::new(),
cached_keyframe: None,
brush_defn: None,
brush_props: None,
path_brush_defn: None,
path_brush_props: None,
retired_edit_senders: vec![],
};
let core = Arc::new(Desync::new(core));
// Anything published to the editor is piped into the core
pipe_in(Arc::clone(&core), edit_publisher.subscribe(), |core, edits: Arc<Vec<AnimationEdit>>| {
async move {
// Edits require some pre-processing: assign the IDs, perform undo actions and write to the log (note that undo edits are performed before serialization)
let mut edits = core.assign_ids_to_edits(&*edits).await;
core.process_undo_edits(&mut edits).await;
core.serialize_edits_to_log(&edits).await;
// Perform the edits to retire them
let retired = core.perform_edits(edits).await;
// Clean up the edit publishers, in case any aren't being listened to any more
core.retired_edit_senders.retain(|sender| sender.count_subscribers() > 0);
// Send the edits as retired
for retired_sender in core.retired_edit_senders.iter_mut() {
retired_sender.publish(retired.clone()).await;
}
}.boxed()
});
// Build the animation
StreamAnimation {
core: core,
idle_sync_requests: Desync::new(vec![]),
edit_publisher: edit_publisher
}
}
///
/// Performs an asynchronous request on a storage layer for this animation
///
pub (super) fn request_async<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> impl Future<Output=Option<Vec<StorageResponse>>> {
request_core_async(&self.core, request.into_iter().collect())
}
///
/// Performs a synchronous request on the storage layer for this animation
///
/// Synchronous requests are fairly slow, so should be avoided in inner loops
///
pub (super) fn request_sync<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> Option<Vec<StorageResponse>> {
request_core_sync(Arc::clone(&self.core), request.into_iter().collect())
}
///
/// Waits for any pending edits on this animation to complete
///
pub (super) fn wait_for_edits(&self) {
// Force a desync to wait for the when_empty future to complete
let when_empty = self.edit_publisher.republish().when_empty();
// Create a desync and wait for the 'when_empty' signal to show up (indicating all the edits have been sent to the core)
let wait_for_edits = Desync::new(());
let _ = wait_for_edits.future_desync(move |_| async move { when_empty.await; }.boxed());
// Synchronise after the future has completed
wait_for_edits.sync(|_| { });
// Synchronise with the animation core so that all the edits are performed
self.core.sync(|_| { });
}
///
/// Retrieves the current file properties for the animation
///
fn file_properties(&self) -> FileProperties {
// Retrieve the properties from storage (and update the version we have stored if there is one)
let mut response = self.request_sync(vec![StorageCommand::ReadAnimationProperties]).unwrap_or_else(|| vec![]);
let properties;
match response.pop() {
Some(StorageResponse::NotFound) => {
// File properties are not set
properties = FileProperties::default();
}
Some(StorageResponse::AnimationProperties(props)) => {
// Deserialize the file properties
properties = FileProperties::deserialize(&mut props.chars()).expect("Could not parse file properties");
}
unknown => panic!("Unexpected response {:?} while reading file properties", unknown)
}
properties
}
}
impl Animation for StreamAnimation {
///
/// Retrieves the frame size of this animation
///
fn size(&self) -> (f64, f64) {
self.wait_for_edits();
self.file_properties().size
}
///
/// Retrieves the length of this animation
///
fn duration(&self) -> Duration {
self.wait_for_edits();
self.file_properties().duration
}
///
/// Retrieves the duration of a single frame
///
fn frame_length(&self) -> Duration {
self.wait_for_edits(); |
///
/// Retrieves the IDs of the layers in this object
///
fn get_layer_ids(&self) -> Vec<u64> {
self.wait_for_edits();
let layer_responses = self.request_sync(vec![StorageCommand::ReadLayers]).unwrap_or_else(|| vec![]);
layer_responses
.into_iter()
.flat_map(|response| {
match response {
StorageResponse::LayerProperties(id, properties) => Some((id, LayerProperties::deserialize(&mut properties.chars())?)),
_ => None
}
})
.sorted_by(|(id_a, layer_a), (id_b, layer_b)| {
if layer_a.ordering == layer_b.ordering {
id_a.cmp(&id_b)
} else {
layer_a.ordering.cmp(&layer_b.ordering)
}
})
.map(|(id, _props)| id)
.collect()
}
///
/// Retrieves the layer with the specified ID from this animation
///
fn get_layer_with_id(&self, layer_id: u64) -> Option<Arc<dyn Layer>> {
self.wait_for_edits();
// Read the properties for the specified layer
let layer_properties = self.request_sync(vec![StorageCommand::ReadLayerProperties(layer_id)]);
if let Some(StorageResponse::LayerProperties(_, serialized)) = layer_properties.and_then(|mut props| props.pop()) {
if let Some(layer_properties) = LayerProperties::deserialize(&mut serialized.chars()) {
// Found the layer
Some(Arc::new(StreamLayer::new(Arc::clone(&self.core), layer_id, layer_properties)))
} else {
// Can't deserialize the layer properties
None
}
} else {
// Layer does not exist
None
}
}
///
/// Retrieves the total number of edits that have been performed on this animation
///
fn get_num_edits(&self) -> usize {
self.wait_for_edits();
let mut response = self.request_sync(vec![StorageCommand::ReadEditLogLength]).unwrap_or_else(|| vec![]);
match response.pop() {
Some(StorageResponse::NumberOfEdits(num_edits)) => num_edits,
_ => panic!("Unexpected response while reading number of edits")
}
}
///
/// Reads from the edit log for this animation
///
fn read_edit_log<'a>(&'a self, range: Range<usize>) -> BoxStream<'a, AnimationEdit> {
self.wait_for_edits();
// Clamp the range of edits to the maximum number of edits
let max_edit = self.get_num_edits();
let range = if range.end > max_edit {
range.start..max_edit
} else {
range
};
// Generate a stream to read from the edit log as we go
let per_request = 20;
let mut remaining = range;
let mut fetched = vec![];
let mut next_response = None;
stream::poll_fn(move |context| {
loop {
if remaining.len()!= 0 && fetched.len() == 0 && next_response.is_none() {
// Fetch up to per_request items for each request
let num_to_fetch = remaining.len();
let num_to_fetch = if num_to_fetch > per_request { per_request } else { num_to_fetch };
let fetch_range = (remaining.start)..(remaining.start + num_to_fetch);
// Start polling for the next batch
next_response = Some(self.request_async(vec![StorageCommand::ReadEdits(fetch_range)]));
remaining = (remaining.start+num_to_fetch)..(remaining.end);
}
if let Some(next) = fetched.pop() {
// Just returning the batch we've already fetched
return Poll::Ready(Some(next));
} else if let Some(mut waiting) = next_response.take() {
// Try to retrieve the next item from the batch
let poll_response = waiting.poll_unpin(context);
match poll_response {
Poll::Pending => {
// Keep waiting for the response
next_response = Some(waiting);
return Poll::Pending
},
Poll::Ready(response) => {
// Load the edits into the fetched array
let mut response = response.unwrap_or(vec![]);
while let Some(response) = response.pop() {
// Ignore everything that's not an edit (we have no way to do error handling here)
if let StorageResponse::Edit(_num, serialized_edit) = response {
// Store edits that deserialize successfully on the fetched list
if let Some(edit) = AnimationEdit::deserialize(&mut serialized_edit.chars()) {
fetched.push(edit)
}
}
}
}
}
} else if remaining.len() == 0 {
// Reached the end of the stream
return Poll::Ready(None);
}
}
}).fuse().boxed()
}
}
impl EditableAnimation for StreamAnimation {
///
/// Assigns a new unique ID for creating a new motion
///
/// This ID will not have been used so far and will not be used again, and can be used as the ID for the MotionElement vector element.
///
fn assign_element_id(&self) -> ElementId {
// Create a queue to run the 'assign element ID' future on
let core = Arc::clone(&self.core);
// Perform the request and retrieve the result
core.future_desync(|core| core.assign_element_id(ElementId::Unassigned).boxed())
.sync().unwrap()
}
///
/// Retrieves a sink that can be used to send edits for this animation
///
/// Edits are supplied as groups (stored in a vec) so that it's possible to ensure that
/// a set of related edits are performed atomically
///
fn edit(&self) -> Publisher<Arc<Vec<AnimationEdit>>> {
self.edit_publisher.republish()
}
///
/// Sends a set of edits straight to this animation
///
/// (Note that these are not always published to the publisher)
///
fn perform_edits(&self, edits: Vec<AnimationEdit>) {
// Get a publisher to send the edits to (this editor does send its edits to the publisher)
let mut publisher = self.edit_publisher.republish();
// Get an idle sync request desync
// We use desync instead of the futures executor as the executor will panic if we are called from within another future
// (desync provides a way around this problem)
let sync_request = self.idle_sync_requests.sync(|reqs| {
let next_request = reqs.pop();
if let Some(next_request) = next_request {
next_request
} else {
let req = Desync::new(None);
req
}
});
// Queue a request
sync_request.future_desync(move |_| {
async move {
// Publish the edits
publisher.publish(Arc::new(edits)).await;
}.boxed()
}).sync().ok();
// Return the sync_request to the pool
self.idle_sync_requests.desync(move |reqs| { reqs.push(sync_request) });
// Wait for the edits to complete
self.wait_for_edits();
}
///
/// Returns a stream of edits as they are being retired (ie, the edits that are now visible on the animation)
///
fn retired_edits(&self) -> BoxStream<'static, RetiredEdit> {
// Create a channel to send edits through
let mut sender = Publisher::new(10);
let receiver = sender.subscribe();
// Add to the list in the core
self.core.sync(move |core| {
core.retired_edit_senders.push(sender);
});
// Box up the receiver to create the result
receiver.boxed()
}
///
/// Flushes any caches this might have (forces reload from data storage)
///
fn flush_caches(&self) {
self.core.desync(|core| {
core.cached_keyframe = None;
});
}
} | self.file_properties().frame_length
} | random_line_split |
stream_animation.rs | use super::stream_layer::*;
use super::stream_animation_core::*;
use crate::traits::*;
use crate::storage::*;
use crate::storage::file_properties::*;
use crate::storage::layer_properties::*;
use ::desync::*;
use flo_stream::*;
use itertools::*;
use futures::prelude::*;
use futures::task::{Poll};
use futures::stream;
use futures::stream::{BoxStream};
use std::sync::*;
use std::ops::{Range};
use std::time::{Duration};
use std::collections::{HashMap};
///
/// Animation that sends its updates to a storage stream
///
pub struct StreamAnimation {
/// The core, where the actual work is done
core: Arc<Desync<StreamAnimationCore>>,
/// The publisher for the edits to this animation
edit_publisher: Publisher<Arc<Vec<AnimationEdit>>>,
/// Available synchronous requests
idle_sync_requests: Desync<Vec<Desync<Option<Vec<StorageResponse>>>>>,
}
impl StreamAnimation {
///
/// Creates a new stream animation. The result is the animation implementation and the
/// stream of requests to be sent to the storage layer
///
pub fn new<ConnectStream: FnOnce(BoxStream<'static, Vec<StorageCommand>>) -> BoxStream<'static, Vec<StorageResponse>>>(connect_stream: ConnectStream) -> StreamAnimation | let core = Arc::new(Desync::new(core));
// Anything published to the editor is piped into the core
pipe_in(Arc::clone(&core), edit_publisher.subscribe(), |core, edits: Arc<Vec<AnimationEdit>>| {
async move {
// Edits require some pre-processing: assign the IDs, perform undo actions and write to the log (note that undo edits are performed before serialization)
let mut edits = core.assign_ids_to_edits(&*edits).await;
core.process_undo_edits(&mut edits).await;
core.serialize_edits_to_log(&edits).await;
// Perform the edits to retire them
let retired = core.perform_edits(edits).await;
// Clean up the edit publishers, in case any aren't being listened to any more
core.retired_edit_senders.retain(|sender| sender.count_subscribers() > 0);
// Send the edits as retired
for retired_sender in core.retired_edit_senders.iter_mut() {
retired_sender.publish(retired.clone()).await;
}
}.boxed()
});
// Build the animation
StreamAnimation {
core: core,
idle_sync_requests: Desync::new(vec![]),
edit_publisher: edit_publisher
}
}
///
/// Performs an asynchronous request on a storage layer for this animation
///
pub (super) fn request_async<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> impl Future<Output=Option<Vec<StorageResponse>>> {
request_core_async(&self.core, request.into_iter().collect())
}
///
/// Performs a synchronous request on the storage layer for this animation
///
/// Synchronous requests are fairly slow, so should be avoided in inner loops
///
pub (super) fn request_sync<Commands: Send+IntoIterator<Item=StorageCommand>>(&self, request: Commands) -> Option<Vec<StorageResponse>> {
request_core_sync(Arc::clone(&self.core), request.into_iter().collect())
}
///
/// Waits for any pending edits on this animation to complete
///
pub (super) fn wait_for_edits(&self) {
// Force a desync to wait for the when_empty future to complete
let when_empty = self.edit_publisher.republish().when_empty();
// Create a desync and wait for the 'when_empty' signal to show up (indicating all the edits have been sent to the core)
let wait_for_edits = Desync::new(());
let _ = wait_for_edits.future_desync(move |_| async move { when_empty.await; }.boxed());
// Synchronise after the future has completed
wait_for_edits.sync(|_| { });
// Synchronise with the animation core so that all the edits are performed
self.core.sync(|_| { });
}
///
/// Retrieves the current file properties for the animation
///
fn file_properties(&self) -> FileProperties {
// Retrieve the properties from storage (and update the version we have stored if there is one)
let mut response = self.request_sync(vec![StorageCommand::ReadAnimationProperties]).unwrap_or_else(|| vec![]);
let properties;
match response.pop() {
Some(StorageResponse::NotFound) => {
// File properties are not set
properties = FileProperties::default();
}
Some(StorageResponse::AnimationProperties(props)) => {
// Deserialize the file properties
properties = FileProperties::deserialize(&mut props.chars()).expect("Could not parse file properties");
}
unknown => panic!("Unexpected response {:?} while reading file properties", unknown)
}
properties
}
}
impl Animation for StreamAnimation {
///
/// Retrieves the frame size of this animation
///
fn size(&self) -> (f64, f64) {
self.wait_for_edits();
self.file_properties().size
}
///
/// Retrieves the length of this animation
///
fn duration(&self) -> Duration {
self.wait_for_edits();
self.file_properties().duration
}
///
/// Retrieves the duration of a single frame
///
fn frame_length(&self) -> Duration {
self.wait_for_edits();
self.file_properties().frame_length
}
///
/// Retrieves the IDs of the layers in this object
///
fn get_layer_ids(&self) -> Vec<u64> {
self.wait_for_edits();
let layer_responses = self.request_sync(vec![StorageCommand::ReadLayers]).unwrap_or_else(|| vec![]);
layer_responses
.into_iter()
.flat_map(|response| {
match response {
StorageResponse::LayerProperties(id, properties) => Some((id, LayerProperties::deserialize(&mut properties.chars())?)),
_ => None
}
})
.sorted_by(|(id_a, layer_a), (id_b, layer_b)| {
if layer_a.ordering == layer_b.ordering {
id_a.cmp(&id_b)
} else {
layer_a.ordering.cmp(&layer_b.ordering)
}
})
.map(|(id, _props)| id)
.collect()
}
///
/// Retrieves the layer with the specified ID from this animation
///
fn get_layer_with_id(&self, layer_id: u64) -> Option<Arc<dyn Layer>> {
self.wait_for_edits();
// Read the properties for the specified layer
let layer_properties = self.request_sync(vec![StorageCommand::ReadLayerProperties(layer_id)]);
if let Some(StorageResponse::LayerProperties(_, serialized)) = layer_properties.and_then(|mut props| props.pop()) {
if let Some(layer_properties) = LayerProperties::deserialize(&mut serialized.chars()) {
// Found the layer
Some(Arc::new(StreamLayer::new(Arc::clone(&self.core), layer_id, layer_properties)))
} else {
// Can't deserialize the layer properties
None
}
} else {
// Layer does not exist
None
}
}
///
/// Retrieves the total number of edits that have been performed on this animation
///
fn get_num_edits(&self) -> usize {
self.wait_for_edits();
let mut response = self.request_sync(vec![StorageCommand::ReadEditLogLength]).unwrap_or_else(|| vec![]);
match response.pop() {
Some(StorageResponse::NumberOfEdits(num_edits)) => num_edits,
_ => panic!("Unexpected response while reading number of edits")
}
}
///
/// Reads from the edit log for this animation
///
fn read_edit_log<'a>(&'a self, range: Range<usize>) -> BoxStream<'a, AnimationEdit> {
self.wait_for_edits();
// Clamp the range of edits to the maximum number of edits
let max_edit = self.get_num_edits();
let range = if range.end > max_edit {
range.start..max_edit
} else {
range
};
// Generate a stream to read from the edit log as we go
let per_request = 20;
let mut remaining = range;
let mut fetched = vec![];
let mut next_response = None;
stream::poll_fn(move |context| {
loop {
if remaining.len()!= 0 && fetched.len() == 0 && next_response.is_none() {
// Fetch up to per_request items for each request
let num_to_fetch = remaining.len();
let num_to_fetch = if num_to_fetch > per_request { per_request } else { num_to_fetch };
let fetch_range = (remaining.start)..(remaining.start + num_to_fetch);
// Start polling for the next batch
next_response = Some(self.request_async(vec![StorageCommand::ReadEdits(fetch_range)]));
remaining = (remaining.start+num_to_fetch)..(remaining.end);
}
if let Some(next) = fetched.pop() {
// Just returning the batch we've already fetched
return Poll::Ready(Some(next));
} else if let Some(mut waiting) = next_response.take() {
// Try to retrieve the next item from the batch
let poll_response = waiting.poll_unpin(context);
match poll_response {
Poll::Pending => {
// Keep waiting for the response
next_response = Some(waiting);
return Poll::Pending
},
Poll::Ready(response) => {
// Load the edits into the fetched array
let mut response = response.unwrap_or(vec![]);
while let Some(response) = response.pop() {
// Ignore everything that's not an edit (we have no way to do error handling here)
if let StorageResponse::Edit(_num, serialized_edit) = response {
// Store edits that deserialize successfully on the fetched list
if let Some(edit) = AnimationEdit::deserialize(&mut serialized_edit.chars()) {
fetched.push(edit)
}
}
}
}
}
} else if remaining.len() == 0 {
// Reached the end of the stream
return Poll::Ready(None);
}
}
}).fuse().boxed()
}
}
impl EditableAnimation for StreamAnimation {
///
/// Assigns a new unique ID for creating a new motion
///
/// This ID will not have been used so far and will not be used again, and can be used as the ID for the MotionElement vector element.
///
fn assign_element_id(&self) -> ElementId {
// Create a queue to run the 'assign element ID' future on
let core = Arc::clone(&self.core);
// Perform the request and retrieve the result
core.future_desync(|core| core.assign_element_id(ElementId::Unassigned).boxed())
.sync().unwrap()
}
///
/// Retrieves a sink that can be used to send edits for this animation
///
/// Edits are supplied as groups (stored in a vec) so that it's possible to ensure that
/// a set of related edits are performed atomically
///
fn edit(&self) -> Publisher<Arc<Vec<AnimationEdit>>> {
self.edit_publisher.republish()
}
///
/// Sends a set of edits straight to this animation
///
/// (Note that these are not always published to the publisher)
///
fn perform_edits(&self, edits: Vec<AnimationEdit>) {
// Get a publisher to send the edits to (this editor does send its edits to the publisher)
let mut publisher = self.edit_publisher.republish();
// Get an idle sync request desync
// We use desync instead of the futures executor as the executor will panic if we are called from within another future
// (desync provides a way around this problem)
let sync_request = self.idle_sync_requests.sync(|reqs| {
let next_request = reqs.pop();
if let Some(next_request) = next_request {
next_request
} else {
let req = Desync::new(None);
req
}
});
// Queue a request
sync_request.future_desync(move |_| {
async move {
// Publish the edits
publisher.publish(Arc::new(edits)).await;
}.boxed()
}).sync().ok();
// Return the sync_request to the pool
self.idle_sync_requests.desync(move |reqs| { reqs.push(sync_request) });
// Wait for the edits to complete
self.wait_for_edits();
}
///
/// Returns a stream of edits as they are being retired (ie, the edits that are now visible on the animation)
///
fn retired_edits(&self) -> BoxStream<'static, RetiredEdit> {
// Create a channel to send edits through
let mut sender = Publisher::new(10);
let receiver = sender.subscribe();
// Add to the list in the core
self.core.sync(move |core| {
core.retired_edit_senders.push(sender);
});
// Box up the receiver to create the result
receiver.boxed()
}
///
/// Flushes any caches this might have (forces reload from data storage)
///
fn flush_caches(&self) {
self.core.desync(|core| {
core.cached_keyframe = None;
});
}
}
| {
// Create the storage requests. When the storage layer is running behind, we'll buffer up to 10 of these
let mut requests = Publisher::new(10);
let commands = requests.subscribe().boxed();
let storage_responses = connect_stream(commands);
let mut edit_publisher = Publisher::new(10);
let storage_connection = StorageConnection::new(requests, storage_responses);
// The core is used to actually execute the requests
let core = StreamAnimationCore {
storage_connection: storage_connection,
next_element_id: None,
cached_layers: HashMap::new(),
cached_keyframe: None,
brush_defn: None,
brush_props: None,
path_brush_defn: None,
path_brush_props: None,
retired_edit_senders: vec![],
}; | identifier_body |
config.rs | :tt])*
$vis:vis struct $struct_name:ident {
$(
$(#[doc = $d:tt])*
$field_vis:vis $field_name:ident : $field_type:ty, default = $default:expr
)*$(,)*
}
) => {
$(#[doc = $struct_d])*
#[derive(Debug, Clone)]
#[non_exhaustive]
$vis struct $struct_name{
$(
$(#[doc = $d])*
$field_vis $field_name : $field_type,
)*
}
impl ConfigField for $struct_name {
fn set(&mut self, key: &str, value: &str) -> Result<()> {
let (key, rem) = key.split_once('.').unwrap_or((key, ""));
match key {
$(
stringify!($field_name) => self.$field_name.set(rem, value),
)*
_ => Err(DataFusionError::Internal(
format!(concat!("Config value \"{}\" not found on ", stringify!($struct_name)), key)
))
}
}
fn visit<V: Visit>(&self, v: &mut V, key_prefix: &str, _description: &'static str) {
$(
let key = format!(concat!("{}.", stringify!($field_name)), key_prefix);
let desc = concat!($($d),*).trim();
self.$field_name.visit(v, key.as_str(), desc);
)*
}
}
impl Default for $struct_name {
fn default() -> Self {
Self {
$($field_name: $default),*
}
}
}
}
}
config_namespace! {
/// Options related to catalog and directory scanning
pub struct CatalogOptions {
/// Whether the default catalog and schema should be created automatically.
pub create_default_catalog_and_schema: bool, default = true
/// The default catalog name - this impacts what SQL queries use if not specified
pub default_catalog: String, default = "datafusion".to_string()
/// The default schema name - this impacts what SQL queries use if not specified
pub default_schema: String, default = "public".to_string()
/// Should DataFusion provide access to `information_schema`
/// virtual tables for displaying schema information
pub information_schema: bool, default = false
/// Location scanned to load tables for `default` schema
pub location: Option<String>, default = None
/// Type of `TableProvider` to use when loading `default` schema
pub format: Option<String>, default = None
/// If the file has a header
pub has_header: bool, default = false
}
}
config_namespace! {
/// Options related to SQL parser
pub struct SqlParserOptions {
/// When set to true, SQL parser will parse float as decimal type
pub parse_float_as_decimal: bool, default = false
/// When set to true, SQL parser will normalize ident (convert ident to lowercase when not quoted)
pub enable_ident_normalization: bool, default = true
/// Configure the SQL dialect used by DataFusion's parser; supported values include: Generic,
/// MySQL, PostgreSQL, Hive, SQLite, Snowflake, Redshift, MsSQL, ClickHouse, BigQuery, and Ansi.
pub dialect: String, default = "generic".to_string()
}
}
config_namespace! {
/// Options related to query execution
pub struct ExecutionOptions {
/// Default batch size while creating new batches, it's especially useful for
/// buffer-in-memory batches since creating tiny batches would result in too much
/// metadata memory consumption
pub batch_size: usize, default = 8192
/// When set to true, record batches will be examined between each operator and
/// small batches will be coalesced into larger batches. This is helpful when there
/// are highly selective filters or joins that could produce tiny output batches. The
/// target batch size is determined by the configuration setting
pub coalesce_batches: bool, default = true
/// Should DataFusion collect statistics after listing files
pub collect_statistics: bool, default = false
/// Number of partitions for query execution. Increasing partitions can increase
/// concurrency.
///
/// Defaults to the number of CPU cores on the system
pub target_partitions: usize, default = num_cpus::get()
/// The default time zone
///
/// Some functions, e.g. `EXTRACT(HOUR from SOME_TIME)`, shift the underlying datetime
/// according to this time zone, and then extract the hour
pub time_zone: Option<String>, default = Some("+00:00".into())
/// Parquet options
pub parquet: ParquetOptions, default = Default::default()
/// Aggregate options
pub aggregate: AggregateOptions, default = Default::default()
/// Fan-out during initial physical planning.
///
/// This is mostly use to plan `UNION` children in parallel.
///
/// Defaults to the number of CPU cores on the system
pub planning_concurrency: usize, default = num_cpus::get()
}
}
config_namespace! {
/// Options related to reading of parquet files
pub struct ParquetOptions {
/// If true, reads the Parquet data page level metadata (the
/// Page Index), if present, to reduce the I/O and number of
/// rows decoded.
pub enable_page_index: bool, default = true
/// If true, the parquet reader attempts to skip entire row groups based
/// on the predicate in the query and the metadata (min/max values) stored in
/// the parquet file
pub pruning: bool, default = true
/// If true, the parquet reader skip the optional embedded metadata that may be in
/// the file Schema. This setting can help avoid schema conflicts when querying
/// multiple parquet files with schemas containing compatible types but different metadata
pub skip_metadata: bool, default = true
/// If specified, the parquet reader will try and fetch the last `size_hint`
/// bytes of the parquet file optimistically. If not specified, two reads are required:
/// One read to fetch the 8-byte parquet footer and
/// another to fetch the metadata length encoded in the footer
pub metadata_size_hint: Option<usize>, default = None
/// If true, filter expressions are be applied during the parquet decoding operation to
/// reduce the number of rows decoded
pub pushdown_filters: bool, default = false
/// If true, filter expressions evaluated during the parquet decoding operation
/// will be reordered heuristically to minimize the cost of evaluation. If false,
/// the filters are applied in the same order as written in the query
pub reorder_filters: bool, default = false
}
}
config_namespace! {
/// Options related to aggregate execution
pub struct AggregateOptions {
/// Specifies the threshold for using `ScalarValue`s to update
/// accumulators during high-cardinality aggregations for each input batch.
///
/// The aggregation is considered high-cardinality if the number of affected groups
/// is greater than or equal to `batch_size / scalar_update_factor`. In such cases,
/// `ScalarValue`s are utilized for updating accumulators, rather than the default
/// batch-slice approach. This can lead to performance improvements.
///
/// By adjusting the `scalar_update_factor`, you can balance the trade-off between
/// more efficient accumulator updates and the number of groups affected.
pub scalar_update_factor: usize, default = 10
}
}
config_namespace! {
/// Options related to query optimization
pub struct OptimizerOptions {
/// When set to true, the physical plan optimizer will try to add round robin
/// repartitioning to increase parallelism to leverage more CPU cores
pub enable_round_robin_repartition: bool, default = true
/// When set to true, the optimizer will insert filters before a join between
/// a nullable and non-nullable column to filter out nulls on the nullable side. This
/// filter can add additional overhead when the file format does not fully support
/// predicate push down.
pub filter_null_join_keys: bool, default = false
/// Should DataFusion repartition data using the aggregate keys to execute aggregates
/// in parallel using the provided `target_partitions` level
pub repartition_aggregations: bool, default = true
/// Minimum total files size in bytes to perform file scan repartitioning.
pub repartition_file_min_size: usize, default = 10 * 1024 * 1024
/// Should DataFusion repartition data using the join keys to execute joins in parallel
/// using the provided `target_partitions` level
pub repartition_joins: bool, default = true
/// Should DataFusion allow symmetric hash joins for unbounded data sources even when
/// its inputs do not have any ordering or filtering If the flag is not enabled,
/// the SymmetricHashJoin operator will be unable to prune its internal buffers,
/// resulting in certain join types - such as Full, Left, LeftAnti, LeftSemi, Right,
/// RightAnti, and RightSemi - being produced only at the end of the execution.
/// This is not typical in stream processing. Additionally, without proper design for
/// long runner execution, all types of joins may encounter out-of-memory errors.
pub allow_symmetric_joins_without_pruning: bool, default = true
/// When set to `true`, file groups will be repartitioned to achieve maximum parallelism.
/// Currently Parquet and CSV formats are supported.
///
/// If set to `true`, all files will be repartitioned evenly (i.e., a single large file
/// might be partitioned into smaller chunks) for parallel scanning.
/// If set to `false`, different files will be read in parallel, but repartitioning won't
/// happen within a single file.
pub repartition_file_scans: bool, default = true
/// Should DataFusion repartition data using the partitions keys to execute window
/// functions in parallel using the provided `target_partitions` level
pub repartition_windows: bool, default = true
/// Should DataFusion execute sorts in a per-partition fashion and merge
/// afterwards instead of coalescing first and sorting globally.
/// With this flag is enabled, plans in the form below
///
/// ```text
/// "SortExec: [a@0 ASC]",
/// " CoalescePartitionsExec",
/// " RepartitionExec: partitioning=RoundRobinBatch(8), input_partitions=1",
/// ```
/// would turn into the plan below which performs better in multithreaded environments
///
/// ```text
/// "SortPreservingMergeExec: [a@0 ASC]",
/// " SortExec: [a@0 ASC]",
/// " RepartitionExec: partitioning=RoundRobinBatch(8), input_partitions=1",
/// ```
pub repartition_sorts: bool, default = true
/// When set to true, the logical plan optimizer will produce warning
/// messages if any optimization rules produce errors and then proceed to the next
/// rule. When set to false, any rules that produce errors will cause the query to fail
pub skip_failed_rules: bool, default = false
/// Number of times that the optimizer will attempt to optimize the plan
pub max_passes: usize, default = 3
/// When set to true, the physical plan optimizer will run a top down
/// process to reorder the join keys
pub top_down_join_key_reordering: bool, default = true
/// When set to true, the physical plan optimizer will prefer HashJoin over SortMergeJoin.
/// HashJoin can work more efficiently than SortMergeJoin but consumes more memory
pub prefer_hash_join: bool, default = true
/// The maximum estimated size in bytes for one input side of a HashJoin
/// will be collected into a single partition
pub hash_join_single_partition_threshold: usize, default = 1024 * 1024
}
}
config_namespace! {
/// Options controlling explain output
pub struct ExplainOptions {
/// When set to true, the explain statement will only print logical plans
pub logical_plan_only: bool, default = false
/// When set to true, the explain statement will only print physical plans
pub physical_plan_only: bool, default = false
}
}
/// A key value pair, with a corresponding description
#[derive(Debug)]
pub struct ConfigEntry {
/// A unique string to identify this config value
pub key: String,
/// The value if any
pub value: Option<String>,
/// A description of this configuration entry
pub description: &'static str,
}
/// Configuration options struct, able to store both built-in configuration and custom options
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct ConfigOptions {
/// Catalog options
pub catalog: CatalogOptions,
/// Execution options
pub execution: ExecutionOptions,
/// Optimizer options
pub optimizer: OptimizerOptions,
/// SQL parser options
pub sql_parser: SqlParserOptions,
/// Explain options
pub explain: ExplainOptions,
/// Optional extensions registered using [`Extensions::insert`]
pub extensions: Extensions,
}
impl ConfigField for ConfigOptions {
fn set(&mut self, key: &str, value: &str) -> Result<()> {
// Extensions are handled in the public `ConfigOptions::set`
let (key, rem) = key.split_once('.').unwrap_or((key, ""));
match key {
"catalog" => self.catalog.set(rem, value),
"execution" => self.execution.set(rem, value),
"optimizer" => self.optimizer.set(rem, value),
"explain" => self.explain.set(rem, value),
"sql_parser" => self.sql_parser.set(rem, value),
_ => Err(DataFusionError::Internal(format!(
"Config value \"{key}\" not found on ConfigOptions"
))),
}
}
fn visit<V: Visit>(&self, v: &mut V, _key_prefix: &str, _description: &'static str) {
self.catalog.visit(v, "datafusion.catalog", "");
self.execution.visit(v, "datafusion.execution", "");
self.optimizer.visit(v, "datafusion.optimizer", "");
self.explain.visit(v, "datafusion.explain", "");
self.sql_parser.visit(v, "datafusion.sql_parser", "");
}
}
impl ConfigOptions {
/// Creates a new [`ConfigOptions`] with default values
pub fn new() -> Self {
Self::default()
}
/// Set extensions to provided value
pub fn with_extensions(mut self, extensions: Extensions) -> Self {
self.extensions = extensions;
self
}
/// Set a configuration option
pub fn set(&mut self, key: &str, value: &str) -> Result<()> {
let (prefix, key) = key.split_once('.').ok_or_else(|| {
DataFusionError::External(
format!("could not find config namespace for key \"{key}\"",).into(),
)
})?;
if prefix == "datafusion" {
return ConfigField::set(self, key, value);
}
let e = self.extensions.0.get_mut(prefix);
let e = e.ok_or_else(|| {
DataFusionError::External(
format!("Could not find config namespace \"{prefix}\"",).into(),
)
})?;
e.0.set(key, value)
}
/// Create new ConfigOptions struct, taking values from
/// environment variables where possible.
///
/// For example, setting `DATAFUSION_EXECUTION_BATCH_SIZE` will
/// control `datafusion.execution.batch_size`.
pub fn from_env() -> Result<Self> {
struct Visitor(Vec<String>);
impl Visit for Visitor {
fn some<V: Display>(&mut self, key: &str, _: V, _: &'static str) {
self.0.push(key.to_string())
}
fn none(&mut self, key: &str, _: &'static str) {
self.0.push(key.to_string())
}
}
// Extract the names of all fields and then look up the corresponding
// environment variables. This isn't hugely efficient but avoids
// ambiguity between `a.b` and `a_b` which would both correspond
// to an environment variable of `A_B`
let mut keys = Visitor(vec![]);
let mut ret = Self::default();
ret.visit(&mut keys, "datafusion", "");
for key in keys.0 {
let env = key.to_uppercase().replace('.', "_");
if let Some(var) = std::env::var_os(env) {
ret.set(&key, var.to_string_lossy().as_ref())?;
}
}
Ok(ret)
}
/// Create new ConfigOptions struct, taking values from a string hash map.
///
/// Only the built-in configurations will be extracted from the hash map
/// and other key value pairs will be ignored.
pub fn from_string_hash_map(settings: HashMap<String, String>) -> Result<Self> {
struct Visitor(Vec<String>);
impl Visit for Visitor {
fn some<V: Display>(&mut self, key: &str, _: V, _: &'static str) {
self.0.push(key.to_string())
}
fn none(&mut self, key: &str, _: &'static str) {
self.0.push(key.to_string())
}
}
let mut keys = Visitor(vec![]);
let mut ret = Self::default();
ret.visit(&mut keys, "datafusion", "");
for key in keys.0 {
if let Some(var) = settings.get(&key) {
ret.set(&key, var)?;
}
}
Ok(ret)
}
/// Returns the [`ConfigEntry`] stored within this [`ConfigOptions`]
pub fn entries(&self) -> Vec<ConfigEntry> {
struct Visitor(Vec<ConfigEntry>);
impl Visit for Visitor {
fn some<V: Display>(
&mut self,
key: &str,
value: V,
description: &'static str,
) {
self.0.push(ConfigEntry {
key: key.to_string(),
value: Some(value.to_string()),
description,
})
}
fn none(&mut self, key: &str, description: &'static str) {
self.0.push(ConfigEntry {
key: key.to_string(),
value: None,
description,
})
}
}
let mut v = Visitor(vec![]);
self.visit(&mut v, "datafusion", "");
v.0.extend(self.extensions.0.values().flat_map(|e| e.0.entries()));
v.0
}
/// Generate documentation that can be included in the user guide
pub fn generate_config_markdown() -> String {
use std::fmt::Write as _;
let mut s = Self::default();
// Normalize for display
s.execution.target_partitions = 0;
s.execution.planning_concurrency = 0;
let mut docs = "| key | default | description |\n".to_string();
docs += "|-----|---------|-------------|\n";
let mut entries = s.entries();
entries.sort_unstable_by(|a, b| a.key.cmp(&b.key));
for entry in s.entries() {
let _ = writeln!(
&mut docs,
"| {} | {} | {} |",
entry.key,
entry.value.as_deref().unwrap_or("NULL"),
entry.description
);
}
docs
}
}
/// [`ConfigExtension`] provides a mechanism to store third-party configuration within DataFusion
///
/// Unfortunately associated constants are not currently object-safe, and so this
/// extends the object-safe [`ExtensionOptions`]
pub trait ConfigExtension: ExtensionOptions {
/// Configuration namespace prefix to use
///
/// All values under this will be prefixed with `$PREFIX + "."`
const PREFIX: &'static str;
}
/// An object-safe API for storing arbitrary configuration
pub trait ExtensionOptions: Send + Sync + std::fmt::Debug +'static {
/// Return `self` as [`Any`]
///
/// This is needed until trait upcasting is stabilised
fn as_any(&self) -> &dyn Any;
/// Return `self` as [`Any`]
///
/// This is needed until trait upcasting is stabilised
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Return a deep clone of this [`ExtensionOptions`]
///
/// It is important this does not share mutable state to avoid consistency issues
/// with configuration changing whilst queries are executing
fn cloned(&self) -> Box<dyn ExtensionOptions>;
/// Set the given `key`, `value` pair
fn set(&mut self, key: &str, value: &str) -> Result<()>;
/// Returns the [`ConfigEntry`] stored in this [`ExtensionOptions`]
fn entries(&self) -> Vec<ConfigEntry>;
}
/// A type-safe container for [`ConfigExtension`]
#[derive(Debug, Default, Clone)]
pub struct Extensions(BTreeMap<&'static str, ExtensionBox>);
impl Extensions {
/// Create a new, empty [`Extensions`]
pub fn new() -> Self {
Self(BTreeMap::new())
}
/// Registers a [`ConfigExtension`] with this [`ConfigOptions`]
pub fn insert<T: ConfigExtension>(&mut self, extension: T) {
assert_ne!(T::PREFIX, "datafusion");
let e = ExtensionBox(Box::new(extension));
self.0.insert(T::PREFIX, e);
}
/// Retrieves the extension of the given type if any
pub fn get<T: ConfigExtension>(&self) -> Option<&T> | {
self.0.get(T::PREFIX)?.0.as_any().downcast_ref()
} | identifier_body |
|
config.rs | down_filters: bool, default = false
/// If true, filter expressions evaluated during the parquet decoding operation
/// will be reordered heuristically to minimize the cost of evaluation. If false,
/// the filters are applied in the same order as written in the query
pub reorder_filters: bool, default = false
}
}
config_namespace! {
/// Options related to aggregate execution
pub struct AggregateOptions {
/// Specifies the threshold for using `ScalarValue`s to update
/// accumulators during high-cardinality aggregations for each input batch.
///
/// The aggregation is considered high-cardinality if the number of affected groups
/// is greater than or equal to `batch_size / scalar_update_factor`. In such cases,
/// `ScalarValue`s are utilized for updating accumulators, rather than the default
/// batch-slice approach. This can lead to performance improvements.
///
/// By adjusting the `scalar_update_factor`, you can balance the trade-off between
/// more efficient accumulator updates and the number of groups affected.
pub scalar_update_factor: usize, default = 10
}
}
config_namespace! {
/// Options related to query optimization
pub struct OptimizerOptions {
/// When set to true, the physical plan optimizer will try to add round robin
/// repartitioning to increase parallelism to leverage more CPU cores
pub enable_round_robin_repartition: bool, default = true
/// When set to true, the optimizer will insert filters before a join between
/// a nullable and non-nullable column to filter out nulls on the nullable side. This
/// filter can add additional overhead when the file format does not fully support
/// predicate push down.
pub filter_null_join_keys: bool, default = false
/// Should DataFusion repartition data using the aggregate keys to execute aggregates
/// in parallel using the provided `target_partitions` level
pub repartition_aggregations: bool, default = true
/// Minimum total files size in bytes to perform file scan repartitioning.
pub repartition_file_min_size: usize, default = 10 * 1024 * 1024
/// Should DataFusion repartition data using the join keys to execute joins in parallel
/// using the provided `target_partitions` level
pub repartition_joins: bool, default = true
/// Should DataFusion allow symmetric hash joins for unbounded data sources even when
/// its inputs do not have any ordering or filtering If the flag is not enabled,
/// the SymmetricHashJoin operator will be unable to prune its internal buffers,
/// resulting in certain join types - such as Full, Left, LeftAnti, LeftSemi, Right,
/// RightAnti, and RightSemi - being produced only at the end of the execution.
/// This is not typical in stream processing. Additionally, without proper design for
/// long runner execution, all types of joins may encounter out-of-memory errors.
pub allow_symmetric_joins_without_pruning: bool, default = true
/// When set to `true`, file groups will be repartitioned to achieve maximum parallelism.
/// Currently Parquet and CSV formats are supported.
///
/// If set to `true`, all files will be repartitioned evenly (i.e., a single large file
/// might be partitioned into smaller chunks) for parallel scanning.
/// If set to `false`, different files will be read in parallel, but repartitioning won't
/// happen within a single file.
pub repartition_file_scans: bool, default = true
/// Should DataFusion repartition data using the partitions keys to execute window
/// functions in parallel using the provided `target_partitions` level
pub repartition_windows: bool, default = true
/// Should DataFusion execute sorts in a per-partition fashion and merge
/// afterwards instead of coalescing first and sorting globally.
/// With this flag is enabled, plans in the form below
///
/// ```text
/// "SortExec: [a@0 ASC]",
/// " CoalescePartitionsExec",
/// " RepartitionExec: partitioning=RoundRobinBatch(8), input_partitions=1",
/// ```
/// would turn into the plan below which performs better in multithreaded environments
///
/// ```text
/// "SortPreservingMergeExec: [a@0 ASC]",
/// " SortExec: [a@0 ASC]",
/// " RepartitionExec: partitioning=RoundRobinBatch(8), input_partitions=1",
/// ```
pub repartition_sorts: bool, default = true
/// When set to true, the logical plan optimizer will produce warning
/// messages if any optimization rules produce errors and then proceed to the next
/// rule. When set to false, any rules that produce errors will cause the query to fail
pub skip_failed_rules: bool, default = false
/// Number of times that the optimizer will attempt to optimize the plan
pub max_passes: usize, default = 3
/// When set to true, the physical plan optimizer will run a top down
/// process to reorder the join keys
pub top_down_join_key_reordering: bool, default = true
/// When set to true, the physical plan optimizer will prefer HashJoin over SortMergeJoin.
/// HashJoin can work more efficiently than SortMergeJoin but consumes more memory
pub prefer_hash_join: bool, default = true
/// The maximum estimated size in bytes for one input side of a HashJoin
/// will be collected into a single partition
pub hash_join_single_partition_threshold: usize, default = 1024 * 1024
}
}
config_namespace! {
/// Options controlling explain output
pub struct ExplainOptions {
/// When set to true, the explain statement will only print logical plans
pub logical_plan_only: bool, default = false
/// When set to true, the explain statement will only print physical plans
pub physical_plan_only: bool, default = false
}
}
/// A key value pair, with a corresponding description
#[derive(Debug)]
pub struct ConfigEntry {
/// A unique string to identify this config value
pub key: String,
/// The value if any
pub value: Option<String>,
/// A description of this configuration entry
pub description: &'static str,
}
/// Configuration options struct, able to store both built-in configuration and custom options
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct ConfigOptions {
/// Catalog options
pub catalog: CatalogOptions,
/// Execution options
pub execution: ExecutionOptions,
/// Optimizer options
pub optimizer: OptimizerOptions,
/// SQL parser options
pub sql_parser: SqlParserOptions,
/// Explain options
pub explain: ExplainOptions,
/// Optional extensions registered using [`Extensions::insert`]
pub extensions: Extensions,
}
impl ConfigField for ConfigOptions {
fn set(&mut self, key: &str, value: &str) -> Result<()> {
// Extensions are handled in the public `ConfigOptions::set`
let (key, rem) = key.split_once('.').unwrap_or((key, ""));
match key {
"catalog" => self.catalog.set(rem, value),
"execution" => self.execution.set(rem, value),
"optimizer" => self.optimizer.set(rem, value),
"explain" => self.explain.set(rem, value),
"sql_parser" => self.sql_parser.set(rem, value),
_ => Err(DataFusionError::Internal(format!(
"Config value \"{key}\" not found on ConfigOptions"
))),
}
}
fn visit<V: Visit>(&self, v: &mut V, _key_prefix: &str, _description: &'static str) {
self.catalog.visit(v, "datafusion.catalog", "");
self.execution.visit(v, "datafusion.execution", "");
self.optimizer.visit(v, "datafusion.optimizer", "");
self.explain.visit(v, "datafusion.explain", "");
self.sql_parser.visit(v, "datafusion.sql_parser", "");
}
}
impl ConfigOptions {
/// Creates a new [`ConfigOptions`] with default values
pub fn new() -> Self {
Self::default()
}
/// Set extensions to provided value
pub fn with_extensions(mut self, extensions: Extensions) -> Self {
self.extensions = extensions;
self
}
/// Set a configuration option
pub fn set(&mut self, key: &str, value: &str) -> Result<()> {
let (prefix, key) = key.split_once('.').ok_or_else(|| {
DataFusionError::External(
format!("could not find config namespace for key \"{key}\"",).into(),
)
})?;
if prefix == "datafusion" {
return ConfigField::set(self, key, value);
}
let e = self.extensions.0.get_mut(prefix);
let e = e.ok_or_else(|| {
DataFusionError::External(
format!("Could not find config namespace \"{prefix}\"",).into(),
)
})?;
e.0.set(key, value)
}
/// Create new ConfigOptions struct, taking values from
/// environment variables where possible.
///
/// For example, setting `DATAFUSION_EXECUTION_BATCH_SIZE` will
/// control `datafusion.execution.batch_size`.
pub fn from_env() -> Result<Self> {
struct Visitor(Vec<String>);
impl Visit for Visitor {
fn some<V: Display>(&mut self, key: &str, _: V, _: &'static str) {
self.0.push(key.to_string())
}
fn none(&mut self, key: &str, _: &'static str) {
self.0.push(key.to_string())
}
}
// Extract the names of all fields and then look up the corresponding
// environment variables. This isn't hugely efficient but avoids
// ambiguity between `a.b` and `a_b` which would both correspond
// to an environment variable of `A_B`
let mut keys = Visitor(vec![]);
let mut ret = Self::default();
ret.visit(&mut keys, "datafusion", "");
for key in keys.0 {
let env = key.to_uppercase().replace('.', "_");
if let Some(var) = std::env::var_os(env) {
ret.set(&key, var.to_string_lossy().as_ref())?;
}
}
Ok(ret)
}
/// Create new ConfigOptions struct, taking values from a string hash map.
///
/// Only the built-in configurations will be extracted from the hash map
/// and other key value pairs will be ignored.
pub fn from_string_hash_map(settings: HashMap<String, String>) -> Result<Self> {
struct Visitor(Vec<String>);
impl Visit for Visitor {
fn some<V: Display>(&mut self, key: &str, _: V, _: &'static str) {
self.0.push(key.to_string())
}
fn none(&mut self, key: &str, _: &'static str) {
self.0.push(key.to_string())
}
}
let mut keys = Visitor(vec![]);
let mut ret = Self::default();
ret.visit(&mut keys, "datafusion", "");
for key in keys.0 {
if let Some(var) = settings.get(&key) {
ret.set(&key, var)?;
}
}
Ok(ret)
}
/// Returns the [`ConfigEntry`] stored within this [`ConfigOptions`]
pub fn entries(&self) -> Vec<ConfigEntry> {
struct Visitor(Vec<ConfigEntry>);
impl Visit for Visitor {
fn some<V: Display>(
&mut self,
key: &str,
value: V,
description: &'static str,
) {
self.0.push(ConfigEntry {
key: key.to_string(),
value: Some(value.to_string()),
description,
})
}
fn none(&mut self, key: &str, description: &'static str) {
self.0.push(ConfigEntry {
key: key.to_string(),
value: None,
description,
})
}
}
let mut v = Visitor(vec![]);
self.visit(&mut v, "datafusion", "");
v.0.extend(self.extensions.0.values().flat_map(|e| e.0.entries()));
v.0
}
/// Generate documentation that can be included in the user guide
pub fn generate_config_markdown() -> String {
use std::fmt::Write as _;
let mut s = Self::default();
// Normalize for display
s.execution.target_partitions = 0;
s.execution.planning_concurrency = 0;
let mut docs = "| key | default | description |\n".to_string();
docs += "|-----|---------|-------------|\n";
let mut entries = s.entries();
entries.sort_unstable_by(|a, b| a.key.cmp(&b.key));
for entry in s.entries() {
let _ = writeln!(
&mut docs,
"| {} | {} | {} |",
entry.key,
entry.value.as_deref().unwrap_or("NULL"),
entry.description
);
}
docs
}
}
/// [`ConfigExtension`] provides a mechanism to store third-party configuration within DataFusion
///
/// Unfortunately associated constants are not currently object-safe, and so this
/// extends the object-safe [`ExtensionOptions`]
pub trait ConfigExtension: ExtensionOptions {
/// Configuration namespace prefix to use
///
/// All values under this will be prefixed with `$PREFIX + "."`
const PREFIX: &'static str;
}
/// An object-safe API for storing arbitrary configuration
pub trait ExtensionOptions: Send + Sync + std::fmt::Debug +'static {
/// Return `self` as [`Any`]
///
/// This is needed until trait upcasting is stabilised
fn as_any(&self) -> &dyn Any;
/// Return `self` as [`Any`]
///
/// This is needed until trait upcasting is stabilised
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Return a deep clone of this [`ExtensionOptions`]
///
/// It is important this does not share mutable state to avoid consistency issues
/// with configuration changing whilst queries are executing
fn cloned(&self) -> Box<dyn ExtensionOptions>;
/// Set the given `key`, `value` pair
fn set(&mut self, key: &str, value: &str) -> Result<()>;
/// Returns the [`ConfigEntry`] stored in this [`ExtensionOptions`]
fn entries(&self) -> Vec<ConfigEntry>;
}
/// A type-safe container for [`ConfigExtension`]
#[derive(Debug, Default, Clone)]
pub struct Extensions(BTreeMap<&'static str, ExtensionBox>);
impl Extensions {
/// Create a new, empty [`Extensions`]
pub fn new() -> Self {
Self(BTreeMap::new())
}
/// Registers a [`ConfigExtension`] with this [`ConfigOptions`]
pub fn insert<T: ConfigExtension>(&mut self, extension: T) {
assert_ne!(T::PREFIX, "datafusion");
let e = ExtensionBox(Box::new(extension));
self.0.insert(T::PREFIX, e);
}
/// Retrieves the extension of the given type if any
pub fn get<T: ConfigExtension>(&self) -> Option<&T> {
self.0.get(T::PREFIX)?.0.as_any().downcast_ref()
}
/// Retrieves the extension of the given type if any
pub fn get_mut<T: ConfigExtension>(&mut self) -> Option<&mut T> {
let e = self.0.get_mut(T::PREFIX)?;
e.0.as_any_mut().downcast_mut()
}
}
#[derive(Debug)]
struct ExtensionBox(Box<dyn ExtensionOptions>);
impl Clone for ExtensionBox {
fn clone(&self) -> Self {
Self(self.0.cloned())
}
}
/// A trait implemented by `config_namespace` and for field types that provides
/// the ability to walk and mutate the configuration tree
trait ConfigField {
fn visit<V: Visit>(&self, v: &mut V, key: &str, description: &'static str);
fn set(&mut self, key: &str, value: &str) -> Result<()>;
}
impl<F: ConfigField + Default> ConfigField for Option<F> {
fn visit<V: Visit>(&self, v: &mut V, key: &str, description: &'static str) {
match self {
Some(s) => s.visit(v, key, description),
None => v.none(key, description),
}
}
fn set(&mut self, key: &str, value: &str) -> Result<()> {
self.get_or_insert_with(Default::default).set(key, value)
}
}
macro_rules! config_field {
($t:ty) => {
impl ConfigField for $t {
fn visit<V: Visit>(&self, v: &mut V, key: &str, description: &'static str) {
v.some(key, self, description)
}
fn set(&mut self, _: &str, value: &str) -> Result<()> {
*self = value.parse().map_err(|e| {
DataFusionError::Context(
format!(concat!("Error parsing {} as ", stringify!($t),), value),
Box::new(DataFusionError::External(Box::new(e))),
)
})?;
Ok(())
}
}
};
}
config_field!(String);
config_field!(bool);
config_field!(usize);
/// An implementation trait used to recursively walk configuration
trait Visit {
fn some<V: Display>(&mut self, key: &str, value: V, description: &'static str);
fn none(&mut self, key: &str, description: &'static str);
}
/// Convenience macro to create [`ExtensionsOptions`].
///
/// The created structure implements the following traits:
///
/// - [`Clone`]
/// - [`Debug`]
/// - [`Default`]
/// - [`ExtensionOptions`]
///
/// # Usage
/// The syntax is:
///
/// ```text
/// extensions_options! {
/// /// Struct docs (optional).
/// [<vis>] struct <StructName> {
/// /// Field docs (optional)
/// [<vis>] <field_name>: <field_type>, default = <default_value>
///
/// ... more fields
/// }
/// }
/// ```
///
/// The placeholders are:
/// - `[<vis>]`: Optional visibility modifier like `pub` or `pub(crate)`.
/// - `<StructName>`: Struct name like `MyStruct`.
/// - `<field_name>`: Field name like `my_field`.
/// - `<field_type>`: Field type like `u8`.
/// - `<default_value>`: Default value matching the field type like `42`.
///
/// # Example
/// ```
/// use datafusion_common::extensions_options;
///
/// extensions_options! {
/// /// My own config options.
/// pub struct MyConfig {
/// /// Should "foo" be replaced by "bar"?
/// pub foo_to_bar: bool, default = true
///
/// /// How many "baz" should be created?
/// pub baz_count: usize, default = 1337
/// }
/// }
/// ```
///
///
/// [`Debug`]: std::fmt::Debug
/// [`ExtensionsOptions`]: crate::config::ExtensionOptions
#[macro_export]
macro_rules! extensions_options {
(
$(#[doc = $struct_d:tt])*
$vis:vis struct $struct_name:ident {
$(
$(#[doc = $d:tt])*
$field_vis:vis $field_name:ident : $field_type:ty, default = $default:expr
)*$(,)*
}
) => {
$(#[doc = $struct_d])*
#[derive(Debug, Clone)]
#[non_exhaustive]
$vis struct $struct_name{
$(
$(#[doc = $d])*
$field_vis $field_name : $field_type,
)*
}
impl Default for $struct_name {
fn default() -> Self {
Self {
$($field_name: $default),*
}
}
}
impl $crate::config::ExtensionOptions for $struct_name {
fn as_any(&self) -> &dyn ::std::any::Any {
self
}
fn as_any_mut(&mut self) -> &mut dyn ::std::any::Any {
self
}
fn cloned(&self) -> Box<dyn $crate::config::ExtensionOptions> {
Box::new(self.clone())
}
fn set(&mut self, key: &str, value: &str) -> $crate::Result<()> {
match key {
$(
stringify!($field_name) => {
self.$field_name = value.parse().map_err(|e| {
$crate::DataFusionError::Context(
format!(concat!("Error parsing {} as ", stringify!($t),), value),
Box::new($crate::DataFusionError::External(Box::new(e))),
)
})?;
Ok(())
} | )*
_ => Err($crate::DataFusionError::Internal(
format!(concat!("Config value \"{}\" not found on ", stringify!($struct_name)), key)
))
} | random_line_split |
|
config.rs | $(#[doc = $struct_d:tt])*
$vis:vis struct $struct_name:ident {
$(
$(#[doc = $d:tt])*
$field_vis:vis $field_name:ident : $field_type:ty, default = $default:expr
)*$(,)*
}
) => {
$(#[doc = $struct_d])*
#[derive(Debug, Clone)]
#[non_exhaustive]
$vis struct $struct_name{
$(
$(#[doc = $d])*
$field_vis $field_name : $field_type,
)*
}
impl ConfigField for $struct_name {
fn set(&mut self, key: &str, value: &str) -> Result<()> {
let (key, rem) = key.split_once('.').unwrap_or((key, ""));
match key {
$(
stringify!($field_name) => self.$field_name.set(rem, value),
)*
_ => Err(DataFusionError::Internal(
format!(concat!("Config value \"{}\" not found on ", stringify!($struct_name)), key)
))
}
}
fn visit<V: Visit>(&self, v: &mut V, key_prefix: &str, _description: &'static str) {
$(
let key = format!(concat!("{}.", stringify!($field_name)), key_prefix);
let desc = concat!($($d),*).trim();
self.$field_name.visit(v, key.as_str(), desc);
)*
}
}
impl Default for $struct_name {
fn default() -> Self {
Self {
$($field_name: $default),*
}
}
}
}
}
config_namespace! {
/// Options related to catalog and directory scanning
pub struct CatalogOptions {
/// Whether the default catalog and schema should be created automatically.
pub create_default_catalog_and_schema: bool, default = true
/// The default catalog name - this impacts what SQL queries use if not specified
pub default_catalog: String, default = "datafusion".to_string()
/// The default schema name - this impacts what SQL queries use if not specified
pub default_schema: String, default = "public".to_string()
/// Should DataFusion provide access to `information_schema`
/// virtual tables for displaying schema information
pub information_schema: bool, default = false
/// Location scanned to load tables for `default` schema
pub location: Option<String>, default = None
/// Type of `TableProvider` to use when loading `default` schema
pub format: Option<String>, default = None
/// If the file has a header
pub has_header: bool, default = false
}
}
config_namespace! {
/// Options related to SQL parser
pub struct SqlParserOptions {
/// When set to true, SQL parser will parse float as decimal type
pub parse_float_as_decimal: bool, default = false
/// When set to true, SQL parser will normalize ident (convert ident to lowercase when not quoted)
pub enable_ident_normalization: bool, default = true
/// Configure the SQL dialect used by DataFusion's parser; supported values include: Generic,
/// MySQL, PostgreSQL, Hive, SQLite, Snowflake, Redshift, MsSQL, ClickHouse, BigQuery, and Ansi.
pub dialect: String, default = "generic".to_string()
}
}
config_namespace! {
/// Options related to query execution
pub struct ExecutionOptions {
/// Default batch size while creating new batches, it's especially useful for
/// buffer-in-memory batches since creating tiny batches would result in too much
/// metadata memory consumption
pub batch_size: usize, default = 8192
/// When set to true, record batches will be examined between each operator and
/// small batches will be coalesced into larger batches. This is helpful when there
/// are highly selective filters or joins that could produce tiny output batches. The
/// target batch size is determined by the configuration setting
pub coalesce_batches: bool, default = true
/// Should DataFusion collect statistics after listing files
pub collect_statistics: bool, default = false
/// Number of partitions for query execution. Increasing partitions can increase
/// concurrency.
///
/// Defaults to the number of CPU cores on the system
pub target_partitions: usize, default = num_cpus::get()
/// The default time zone
///
/// Some functions, e.g. `EXTRACT(HOUR from SOME_TIME)`, shift the underlying datetime
/// according to this time zone, and then extract the hour
pub time_zone: Option<String>, default = Some("+00:00".into())
/// Parquet options
pub parquet: ParquetOptions, default = Default::default()
/// Aggregate options
pub aggregate: AggregateOptions, default = Default::default()
/// Fan-out during initial physical planning.
///
/// This is mostly use to plan `UNION` children in parallel.
///
/// Defaults to the number of CPU cores on the system
pub planning_concurrency: usize, default = num_cpus::get()
}
}
config_namespace! {
/// Options related to reading of parquet files
pub struct ParquetOptions {
/// If true, reads the Parquet data page level metadata (the
/// Page Index), if present, to reduce the I/O and number of
/// rows decoded.
pub enable_page_index: bool, default = true
/// If true, the parquet reader attempts to skip entire row groups based
/// on the predicate in the query and the metadata (min/max values) stored in
/// the parquet file
pub pruning: bool, default = true
/// If true, the parquet reader skip the optional embedded metadata that may be in
/// the file Schema. This setting can help avoid schema conflicts when querying
/// multiple parquet files with schemas containing compatible types but different metadata
pub skip_metadata: bool, default = true
/// If specified, the parquet reader will try and fetch the last `size_hint`
/// bytes of the parquet file optimistically. If not specified, two reads are required:
/// One read to fetch the 8-byte parquet footer and
/// another to fetch the metadata length encoded in the footer
pub metadata_size_hint: Option<usize>, default = None
/// If true, filter expressions are be applied during the parquet decoding operation to
/// reduce the number of rows decoded
pub pushdown_filters: bool, default = false
/// If true, filter expressions evaluated during the parquet decoding operation
/// will be reordered heuristically to minimize the cost of evaluation. If false,
/// the filters are applied in the same order as written in the query
pub reorder_filters: bool, default = false
}
}
config_namespace! {
/// Options related to aggregate execution
pub struct AggregateOptions {
/// Specifies the threshold for using `ScalarValue`s to update
/// accumulators during high-cardinality aggregations for each input batch.
///
/// The aggregation is considered high-cardinality if the number of affected groups
/// is greater than or equal to `batch_size / scalar_update_factor`. In such cases,
/// `ScalarValue`s are utilized for updating accumulators, rather than the default
/// batch-slice approach. This can lead to performance improvements.
///
/// By adjusting the `scalar_update_factor`, you can balance the trade-off between
/// more efficient accumulator updates and the number of groups affected.
pub scalar_update_factor: usize, default = 10
}
}
config_namespace! {
/// Options related to query optimization
pub struct OptimizerOptions {
/// When set to true, the physical plan optimizer will try to add round robin
/// repartitioning to increase parallelism to leverage more CPU cores
pub enable_round_robin_repartition: bool, default = true
/// When set to true, the optimizer will insert filters before a join between
/// a nullable and non-nullable column to filter out nulls on the nullable side. This
/// filter can add additional overhead when the file format does not fully support
/// predicate push down.
pub filter_null_join_keys: bool, default = false
/// Should DataFusion repartition data using the aggregate keys to execute aggregates
/// in parallel using the provided `target_partitions` level
pub repartition_aggregations: bool, default = true
/// Minimum total files size in bytes to perform file scan repartitioning.
pub repartition_file_min_size: usize, default = 10 * 1024 * 1024
/// Should DataFusion repartition data using the join keys to execute joins in parallel
/// using the provided `target_partitions` level
pub repartition_joins: bool, default = true
/// Should DataFusion allow symmetric hash joins for unbounded data sources even when
/// its inputs do not have any ordering or filtering If the flag is not enabled,
/// the SymmetricHashJoin operator will be unable to prune its internal buffers,
/// resulting in certain join types - such as Full, Left, LeftAnti, LeftSemi, Right,
/// RightAnti, and RightSemi - being produced only at the end of the execution.
/// This is not typical in stream processing. Additionally, without proper design for
/// long runner execution, all types of joins may encounter out-of-memory errors.
pub allow_symmetric_joins_without_pruning: bool, default = true
/// When set to `true`, file groups will be repartitioned to achieve maximum parallelism.
/// Currently Parquet and CSV formats are supported.
///
/// If set to `true`, all files will be repartitioned evenly (i.e., a single large file
/// might be partitioned into smaller chunks) for parallel scanning.
/// If set to `false`, different files will be read in parallel, but repartitioning won't
/// happen within a single file.
pub repartition_file_scans: bool, default = true
/// Should DataFusion repartition data using the partitions keys to execute window
/// functions in parallel using the provided `target_partitions` level
pub repartition_windows: bool, default = true
/// Should DataFusion execute sorts in a per-partition fashion and merge
/// afterwards instead of coalescing first and sorting globally.
/// With this flag is enabled, plans in the form below
///
/// ```text
/// "SortExec: [a@0 ASC]",
/// " CoalescePartitionsExec",
/// " RepartitionExec: partitioning=RoundRobinBatch(8), input_partitions=1",
/// ```
/// would turn into the plan below which performs better in multithreaded environments
///
/// ```text
/// "SortPreservingMergeExec: [a@0 ASC]",
/// " SortExec: [a@0 ASC]",
/// " RepartitionExec: partitioning=RoundRobinBatch(8), input_partitions=1",
/// ```
pub repartition_sorts: bool, default = true
/// When set to true, the logical plan optimizer will produce warning
/// messages if any optimization rules produce errors and then proceed to the next
/// rule. When set to false, any rules that produce errors will cause the query to fail
pub skip_failed_rules: bool, default = false
/// Number of times that the optimizer will attempt to optimize the plan
pub max_passes: usize, default = 3
/// When set to true, the physical plan optimizer will run a top down
/// process to reorder the join keys
pub top_down_join_key_reordering: bool, default = true
/// When set to true, the physical plan optimizer will prefer HashJoin over SortMergeJoin.
/// HashJoin can work more efficiently than SortMergeJoin but consumes more memory
pub prefer_hash_join: bool, default = true
/// The maximum estimated size in bytes for one input side of a HashJoin
/// will be collected into a single partition
pub hash_join_single_partition_threshold: usize, default = 1024 * 1024
}
}
config_namespace! {
/// Options controlling explain output
pub struct ExplainOptions {
/// When set to true, the explain statement will only print logical plans
pub logical_plan_only: bool, default = false
/// When set to true, the explain statement will only print physical plans
pub physical_plan_only: bool, default = false
}
}
/// A key value pair, with a corresponding description
#[derive(Debug)]
pub struct ConfigEntry {
/// A unique string to identify this config value
pub key: String,
/// The value if any
pub value: Option<String>,
/// A description of this configuration entry
pub description: &'static str,
}
/// Configuration options struct, able to store both built-in configuration and custom options
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct ConfigOptions {
/// Catalog options
pub catalog: CatalogOptions,
/// Execution options
pub execution: ExecutionOptions,
/// Optimizer options
pub optimizer: OptimizerOptions,
/// SQL parser options
pub sql_parser: SqlParserOptions,
/// Explain options
pub explain: ExplainOptions,
/// Optional extensions registered using [`Extensions::insert`]
pub extensions: Extensions,
}
impl ConfigField for ConfigOptions {
fn set(&mut self, key: &str, value: &str) -> Result<()> {
// Extensions are handled in the public `ConfigOptions::set`
let (key, rem) = key.split_once('.').unwrap_or((key, ""));
match key {
"catalog" => self.catalog.set(rem, value),
"execution" => self.execution.set(rem, value),
"optimizer" => self.optimizer.set(rem, value),
"explain" => self.explain.set(rem, value),
"sql_parser" => self.sql_parser.set(rem, value),
_ => Err(DataFusionError::Internal(format!(
"Config value \"{key}\" not found on ConfigOptions"
))),
}
}
fn visit<V: Visit>(&self, v: &mut V, _key_prefix: &str, _description: &'static str) {
self.catalog.visit(v, "datafusion.catalog", "");
self.execution.visit(v, "datafusion.execution", "");
self.optimizer.visit(v, "datafusion.optimizer", "");
self.explain.visit(v, "datafusion.explain", "");
self.sql_parser.visit(v, "datafusion.sql_parser", "");
}
}
impl ConfigOptions {
/// Creates a new [`ConfigOptions`] with default values
pub fn new() -> Self {
Self::default()
}
/// Set extensions to provided value
pub fn with_extensions(mut self, extensions: Extensions) -> Self {
self.extensions = extensions;
self
}
/// Set a configuration option
pub fn set(&mut self, key: &str, value: &str) -> Result<()> {
let (prefix, key) = key.split_once('.').ok_or_else(|| {
DataFusionError::External(
format!("could not find config namespace for key \"{key}\"",).into(),
)
})?;
if prefix == "datafusion" {
return ConfigField::set(self, key, value);
}
let e = self.extensions.0.get_mut(prefix);
let e = e.ok_or_else(|| {
DataFusionError::External(
format!("Could not find config namespace \"{prefix}\"",).into(),
)
})?;
e.0.set(key, value)
}
/// Create new ConfigOptions struct, taking values from
/// environment variables where possible.
///
/// For example, setting `DATAFUSION_EXECUTION_BATCH_SIZE` will
/// control `datafusion.execution.batch_size`.
pub fn from_env() -> Result<Self> {
struct Visitor(Vec<String>);
impl Visit for Visitor {
fn some<V: Display>(&mut self, key: &str, _: V, _: &'static str) {
self.0.push(key.to_string())
}
fn none(&mut self, key: &str, _: &'static str) {
self.0.push(key.to_string())
}
}
// Extract the names of all fields and then look up the corresponding
// environment variables. This isn't hugely efficient but avoids
// ambiguity between `a.b` and `a_b` which would both correspond
// to an environment variable of `A_B`
let mut keys = Visitor(vec![]);
let mut ret = Self::default();
ret.visit(&mut keys, "datafusion", "");
for key in keys.0 {
let env = key.to_uppercase().replace('.', "_");
if let Some(var) = std::env::var_os(env) {
ret.set(&key, var.to_string_lossy().as_ref())?;
}
}
Ok(ret)
}
/// Create new ConfigOptions struct, taking values from a string hash map.
///
/// Only the built-in configurations will be extracted from the hash map
/// and other key value pairs will be ignored.
pub fn from_string_hash_map(settings: HashMap<String, String>) -> Result<Self> {
struct Visitor(Vec<String>);
impl Visit for Visitor {
fn some<V: Display>(&mut self, key: &str, _: V, _: &'static str) {
self.0.push(key.to_string())
}
fn none(&mut self, key: &str, _: &'static str) {
self.0.push(key.to_string())
}
}
let mut keys = Visitor(vec![]);
let mut ret = Self::default();
ret.visit(&mut keys, "datafusion", "");
for key in keys.0 {
if let Some(var) = settings.get(&key) {
ret.set(&key, var)?;
}
}
Ok(ret)
}
/// Returns the [`ConfigEntry`] stored within this [`ConfigOptions`]
pub fn entries(&self) -> Vec<ConfigEntry> {
struct Visitor(Vec<ConfigEntry>);
impl Visit for Visitor {
fn some<V: Display>(
&mut self,
key: &str,
value: V,
description: &'static str,
) {
self.0.push(ConfigEntry {
key: key.to_string(),
value: Some(value.to_string()),
description,
})
}
fn | (&mut self, key: &str, description: &'static str) {
self.0.push(ConfigEntry {
key: key.to_string(),
value: None,
description,
})
}
}
let mut v = Visitor(vec![]);
self.visit(&mut v, "datafusion", "");
v.0.extend(self.extensions.0.values().flat_map(|e| e.0.entries()));
v.0
}
/// Generate documentation that can be included in the user guide
pub fn generate_config_markdown() -> String {
use std::fmt::Write as _;
let mut s = Self::default();
// Normalize for display
s.execution.target_partitions = 0;
s.execution.planning_concurrency = 0;
let mut docs = "| key | default | description |\n".to_string();
docs += "|-----|---------|-------------|\n";
let mut entries = s.entries();
entries.sort_unstable_by(|a, b| a.key.cmp(&b.key));
for entry in s.entries() {
let _ = writeln!(
&mut docs,
"| {} | {} | {} |",
entry.key,
entry.value.as_deref().unwrap_or("NULL"),
entry.description
);
}
docs
}
}
/// [`ConfigExtension`] provides a mechanism to store third-party configuration within DataFusion
///
/// Unfortunately associated constants are not currently object-safe, and so this
/// extends the object-safe [`ExtensionOptions`]
pub trait ConfigExtension: ExtensionOptions {
/// Configuration namespace prefix to use
///
/// All values under this will be prefixed with `$PREFIX + "."`
const PREFIX: &'static str;
}
/// An object-safe API for storing arbitrary configuration
pub trait ExtensionOptions: Send + Sync + std::fmt::Debug +'static {
/// Return `self` as [`Any`]
///
/// This is needed until trait upcasting is stabilised
fn as_any(&self) -> &dyn Any;
/// Return `self` as [`Any`]
///
/// This is needed until trait upcasting is stabilised
fn as_any_mut(&mut self) -> &mut dyn Any;
/// Return a deep clone of this [`ExtensionOptions`]
///
/// It is important this does not share mutable state to avoid consistency issues
/// with configuration changing whilst queries are executing
fn cloned(&self) -> Box<dyn ExtensionOptions>;
/// Set the given `key`, `value` pair
fn set(&mut self, key: &str, value: &str) -> Result<()>;
/// Returns the [`ConfigEntry`] stored in this [`ExtensionOptions`]
fn entries(&self) -> Vec<ConfigEntry>;
}
/// A type-safe container for [`ConfigExtension`]
#[derive(Debug, Default, Clone)]
pub struct Extensions(BTreeMap<&'static str, ExtensionBox>);
impl Extensions {
/// Create a new, empty [`Extensions`]
pub fn new() -> Self {
Self(BTreeMap::new())
}
/// Registers a [`ConfigExtension`] with this [`ConfigOptions`]
pub fn insert<T: ConfigExtension>(&mut self, extension: T) {
assert_ne!(T::PREFIX, "datafusion");
let e = ExtensionBox(Box::new(extension));
self.0.insert(T::PREFIX, e);
}
/// Retrieves the extension of the given type if any
pub fn get<T: ConfigExtension>(&self) -> Option<&T> {
self.0.get(T::PREFIX)?.0.as_ | none | identifier_name |
glium_backend.rs | );
let rect = glium::Rect {
left: 0,
bottom: 0,
width: img.size.width,
height: img.size.height,
};
let mut raw = glium::texture::RawImage2d::from_raw_rgba(
img.pixels.clone(),
(img.size.width, img.size.height),
);
raw.format = glium::texture::ClientFormat::U8U8U8U8;
self.textures[texture].write(rect, raw);
}
/// Make a new internal texture using image data.
pub fn make_texture(&mut self, img: ImageBuffer) -> TextureIndex {
let mut raw = glium::texture::RawImage2d::from_raw_rgba(
img.pixels,
(img.size.width, img.size.height),
);
raw.format = glium::texture::ClientFormat::U8U8U8U8;
let tex = glium::texture::SrgbTexture2d::new(&self.display, raw).unwrap();
self.textures.push(tex);
self.textures.len() - 1
}
/// Update or construct textures based on changes in atlas cache.
pub fn sync_with_atlas_cache<T: Eq + Hash + Clone + Debug>(
&mut self,
atlas_cache: &mut AtlasCache<T>,
) {
for a in atlas_cache.atlases_mut() {
let idx = a.texture();
// If there are sheets in the atlas that don't have corresponding textures yet,
// construct those now.
while idx >= self.texture_count() {
self.make_empty_texture(a.size().width, a.size().height);
}
// Write the updated texture atlas to internal texture.
a.update_texture(|buf, idx| self.write_to_texture(buf, idx));
}
}
fn process_events(&mut self, core: &mut Core<V>) -> bool {
self.keypress.clear();
// polling and handling the events received by the window
let mut event_list = Vec::new();
self.events.poll_events(|event| event_list.push(event));
for e in event_list {
match e {
Event::WindowEvent {
ref event,
window_id,
}
if window_id == self.display.gl_window().id() =>
{
match event {
&WindowEvent::CloseRequested => return false,
&WindowEvent::CursorMoved { position,.. } => {
let position =
position.to_physical(self.display.gl_window().get_hidpi_factor());
let pos = self.zoom.screen_to_canvas(
self.window_size,
self.canvas.size(),
Point2D::new(position.x as f32, position.y as f32),
);
core.input_mouse_move(pos.x as i32, pos.y as i32);
}
&WindowEvent::MouseInput { state, button,.. } => core.input_mouse_button(
match button {
glutin::MouseButton::Left => MouseButton::Left,
glutin::MouseButton::Right => MouseButton::Right,
_ => MouseButton::Middle,
},
state == glutin::ElementState::Pressed,
),
&WindowEvent::ReceivedCharacter(c) => core.input_char(c),
&WindowEvent::KeyboardInput {
input:
glutin::KeyboardInput {
state,
scancode,
virtual_keycode,
..
},
..
} => {
self.keypress.push(KeyEvent {
state,
scancode: scancode as u8,
virtual_keycode,
});
let is_down = state == glutin::ElementState::Pressed;
use glium::glutin::VirtualKeyCode::*;
if let Some(vk) = match virtual_keycode {
Some(Tab) => Some(Keycode::Tab),
Some(LShift) | Some(RShift) => Some(Keycode::Shift),
Some(LControl) | Some(RControl) => Some(Keycode::Ctrl),
Some(NumpadEnter) | Some(Return) => Some(Keycode::Enter),
Some(Back) => Some(Keycode::Backspace),
Some(Delete) => Some(Keycode::Del),
Some(Numpad8) | Some(Up) => Some(Keycode::Up),
Some(Numpad2) | Some(Down) => Some(Keycode::Down),
Some(Numpad4) | Some(Left) => Some(Keycode::Left),
Some(Numpad6) | Some(Right) => Some(Keycode::Right),
_ => None,
} {
core.input_key_state(vk, is_down);
}
}
_ => (),
}
}
// Events in other windows, ignore
Event::WindowEvent {.. } => {}
Event::Awakened => {
// TODO: Suspend/awaken behavior
}
Event::DeviceEvent {.. } => {}
Event::Suspended(_) => {}
}
}
true
}
/// Return the next keypress event if there is one.
pub fn poll_key(&mut self) -> Option<KeyEvent> { self.keypress.pop() }
fn render(&mut self, core: &mut Core<V>) {
let mut target = self.canvas.get_framebuffer_target(&self.display);
target.clear_color(0.0, 0.0, 0.0, 0.0);
let (w, h) = target.get_dimensions();
for batch in core.end_frame() {
// building the uniforms
let uniforms = uniform! {
matrix: [
[2.0 / w as f32, 0.0, 0.0, -1.0],
[0.0, -2.0 / h as f32, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0f32]
],
tex: glium::uniforms::Sampler::new(&self.textures[batch.texture])
.magnify_filter(glium::uniforms::MagnifySamplerFilter::Nearest),
};
let vertex_buffer =
{ glium::VertexBuffer::new(&self.display, &batch.vertices).unwrap() };
// building the index buffer
let index_buffer = glium::IndexBuffer::new(
&self.display,
PrimitiveType::TrianglesList,
&batch.triangle_indices,
).unwrap();
let params = glium::draw_parameters::DrawParameters {
scissor: batch.clip.map(|clip| glium::Rect {
left: clip.origin.x as u32,
bottom: h - (clip.origin.y + clip.size.height) as u32,
width: clip.size.width as u32,
height: clip.size.height as u32,
}),
blend: glium::Blend::alpha_blending(),
..Default::default()
};
target
.draw(
&vertex_buffer,
&index_buffer,
&self.program,
&uniforms,
¶ms,
)
.unwrap();
}
}
fn update_window_size(&mut self) {
let (w, h) = get_size(&self.display);
self.window_size = Size2D::new(w, h);
}
/// Display the backend and read input events.
pub fn update(&mut self, core: &mut Core<V>) -> bool {
self.update_window_size();
self.render(core);
self.canvas.draw(&self.display, self.zoom);
self.process_events(core)
}
/// Return an image for the current contents of the screen.
pub fn screenshot(&self) -> ImageBuffer { self.canvas.screenshot() }
}
/// Type for key events not handled by Vitral.
#[derive(Debug)]
pub struct KeyEvent {
/// Was the key pressed or released
pub state: glutin::ElementState,
/// Layout-dependent keycode
pub virtual_keycode: Option<glutin::VirtualKeyCode>,
/// Keyboard layout independent hardware scancode for the key
pub scancode: u8,
}
/// Shader program for the `DefaultVertex` type
pub const DEFAULT_SHADER: glium::program::SourceCode = glium::program::SourceCode {
vertex_shader: "
#version 150 core
uniform mat4 matrix;
in vec2 pos;
in vec4 color;
in vec2 tex_coord;
out vec4 v_color;
out vec2 v_tex_coord;
void main() {
gl_Position = vec4(pos, 0.0, 1.0) * matrix;
v_color = color;
v_tex_coord = tex_coord;
}",
fragment_shader: "
#version 150 core
uniform sampler2D tex;
in vec4 v_color;
in vec2 v_tex_coord;
out vec4 f_color;
void main() {
vec4 tex_color = texture(tex, v_tex_coord);
// Discard fully transparent pixels to keep them from
// writing into the depth buffer.
if (tex_color.a == 0.0) discard;
f_color = v_color * tex_color;
}",
tessellation_control_shader: None,
tessellation_evaluation_shader: None,
geometry_shader: None,
};
/// A regular vertex that implements exactly the fields used by Vitral.
#[derive(Copy, Clone)]
pub struct DefaultVertex {
/// 2D position
pub pos: [f32; 2],
/// Texture coordinates
pub tex_coord: [f32; 2],
/// RGBA color
pub color: Color,
}
implement_vertex!(DefaultVertex, pos, tex_coord, color);
impl Vertex for DefaultVertex {
fn new(pos: Point2D<f32>, tex_coord: Point2D<f32>, color: Color) -> Self {
DefaultVertex {
pos: [pos.x, pos.y],
tex_coord: [tex_coord.x, tex_coord.y],
color,
}
}
}
/// A deferred rendering buffer for pixel-perfect display.
struct Canvas {
size: Size2D<u32>,
buffer: glium::texture::SrgbTexture2d,
depth_buffer: glium::framebuffer::DepthRenderBuffer,
shader: glium::Program,
}
impl Canvas {
pub fn new(display: &glium::Display, width: u32, height: u32) -> Canvas {
let shader = program!(
display,
150 => {
vertex: "
#version 150 core
in vec2 pos;
in vec2 tex_coord;
out vec2 v_tex_coord;
void main() {
v_tex_coord = tex_coord;
gl_Position = vec4(pos, 0.0, 1.0);
}",
fragment: "
#version 150 core
uniform sampler2D tex;
in vec2 v_tex_coord;
out vec4 f_color;
void main() {
vec4 tex_color = texture(tex, v_tex_coord);
tex_color.a = 1.0;
f_color = tex_color;
}"})
.unwrap();
let buffer = glium::texture::SrgbTexture2d::empty(display, width, height).unwrap();
let depth_buffer = glium::framebuffer::DepthRenderBuffer::new(
display,
glium::texture::DepthFormat::F32,
width,
height,
).unwrap();
Canvas {
size: Size2D::new(width, height),
buffer,
depth_buffer,
shader,
}
}
/// Get the render target to the pixel-perfect framebuffer.
pub fn get_framebuffer_target(
&mut self,
display: &glium::Display,
) -> glium::framebuffer::SimpleFrameBuffer {
glium::framebuffer::SimpleFrameBuffer::with_depth_buffer(
display,
&self.buffer,
&self.depth_buffer,
).unwrap()
}
pub fn draw(&mut self, display: &glium::Display, zoom: CanvasZoom) {
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 0.0);
let (w, h) = get_size(display);
// Build the geometry for the on-screen rectangle.
let s_rect = zoom.fit_canvas(Size2D::new(w, h), self.size);
let (sx, sy) = (s_rect.origin.x, s_rect.origin.y);
let (sw, sh) = (s_rect.size.width, s_rect.size.height);
// XXX: This could use glium::Surface::blit_whole_color_to instead of
// the handmade blitting, but that was buggy on Windows around
// 2015-03.
let vertices = {
#[derive(Copy, Clone)]
struct BlitVertex {
pos: [f32; 2],
tex_coord: [f32; 2],
}
implement_vertex!(BlitVertex, pos, tex_coord);
glium::VertexBuffer::new(
display,
&[
BlitVertex {
pos: [sx, sy],
tex_coord: [0.0, 0.0],
},
BlitVertex {
pos: [sx + sw, sy],
tex_coord: [1.0, 0.0],
},
BlitVertex {
pos: [sx + sw, sy + sh],
tex_coord: [1.0, 1.0],
},
BlitVertex {
pos: [sx, sy + sh],
tex_coord: [0.0, 1.0],
},
],
).unwrap()
};
let indices = glium::IndexBuffer::new(
display,
glium::index::PrimitiveType::TrianglesList,
&[0u16, 1, 2, 0, 2, 3],
).unwrap();
// Set up the rest of the draw parameters.
let mut params: glium::DrawParameters = Default::default();
// Set an explicit viewport to apply the custom resolution that fixes
// pixel perfect rounding errors.
params.viewport = Some(glium::Rect {
left: 0,
bottom: 0,
width: w,
height: h,
});
// TODO: Option to use smooth filter & non-pixel-perfect scaling
let mag_filter = glium::uniforms::MagnifySamplerFilter::Nearest;
let uniforms = glium::uniforms::UniformsStorage::new(
"tex",
glium::uniforms::Sampler(
&self.buffer,
glium::uniforms::SamplerBehavior {
magnify_filter: mag_filter,
minify_filter: glium::uniforms::MinifySamplerFilter::Linear,
..Default::default()
},
),
);
// Draw the graphics buffer to the window.
target
.draw(&vertices, &indices, &self.shader, &uniforms, ¶ms)
.unwrap();
target.finish().unwrap();
}
pub fn | size | identifier_name |
|
glium_backend.rs | // outside the screen.
const BUFFER: u32 = 8;
let monitor_size = display
.gl_window()
.window()
.get_primary_monitor()
.get_dimensions();
let monitor_size = Size2D::new(monitor_size.width as u32, monitor_size.height as u32);
let mut dim = Size2D::new(width, height);
while dim.width + width <= monitor_size.width - BUFFER
&& dim.height + height <= monitor_size.height - BUFFER
{
dim.width += width;
dim.height += height;
}
display
.gl_window()
.set_inner_size(LogicalSize::new(dim.width as f64, dim.height as f64));
display.gl_window().set_position(LogicalPosition::new(
(monitor_size.width - dim.width) as f64 / 2.0,
(monitor_size.height - dim.height) as f64 / 2.0,
));
}
Ok(Backend::new(display, events, program, width, height))
}
/// Return the pixel resolution of the backend.
///
/// Note that this is the logical size which will stay the same even when the
/// desktop window is resized.
pub fn canvas_size(&self) -> Size2D<u32> { self.canvas.size }
/// Return the current number of textures.
pub fn texture_count(&self) -> usize { self.textures.len() }
/// Make a new empty internal texture.
///
/// The new `TextureIndex` must equal the value `self.texture_count()` would have returned
/// just before calling this.
pub fn make_empty_texture(&mut self, width: u32, height: u32) -> TextureIndex {
let tex = glium::texture::SrgbTexture2d::empty(&self.display, width, height).unwrap();
self.textures.push(tex);
self.textures.len() - 1
}
/// Rewrite an internal texture.
pub fn write_to_texture(&mut self, img: &ImageBuffer, texture: TextureIndex) {
assert!(
texture < self.textures.len(),
"Trying to write nonexistent texture"
);
let rect = glium::Rect {
left: 0,
bottom: 0,
width: img.size.width,
height: img.size.height,
};
let mut raw = glium::texture::RawImage2d::from_raw_rgba(
img.pixels.clone(),
(img.size.width, img.size.height),
);
raw.format = glium::texture::ClientFormat::U8U8U8U8;
self.textures[texture].write(rect, raw);
}
/// Make a new internal texture using image data.
pub fn make_texture(&mut self, img: ImageBuffer) -> TextureIndex {
let mut raw = glium::texture::RawImage2d::from_raw_rgba(
img.pixels,
(img.size.width, img.size.height),
);
raw.format = glium::texture::ClientFormat::U8U8U8U8;
let tex = glium::texture::SrgbTexture2d::new(&self.display, raw).unwrap();
self.textures.push(tex);
self.textures.len() - 1
}
/// Update or construct textures based on changes in atlas cache.
pub fn sync_with_atlas_cache<T: Eq + Hash + Clone + Debug>(
&mut self,
atlas_cache: &mut AtlasCache<T>,
) {
for a in atlas_cache.atlases_mut() {
let idx = a.texture();
// If there are sheets in the atlas that don't have corresponding textures yet,
// construct those now.
while idx >= self.texture_count() {
self.make_empty_texture(a.size().width, a.size().height);
}
// Write the updated texture atlas to internal texture.
a.update_texture(|buf, idx| self.write_to_texture(buf, idx));
}
}
fn process_events(&mut self, core: &mut Core<V>) -> bool {
self.keypress.clear();
// polling and handling the events received by the window
let mut event_list = Vec::new();
self.events.poll_events(|event| event_list.push(event));
for e in event_list {
match e {
Event::WindowEvent {
ref event,
window_id,
}
if window_id == self.display.gl_window().id() =>
{
match event {
&WindowEvent::CloseRequested => return false,
&WindowEvent::CursorMoved { position,.. } => {
let position =
position.to_physical(self.display.gl_window().get_hidpi_factor());
let pos = self.zoom.screen_to_canvas(
self.window_size,
self.canvas.size(),
Point2D::new(position.x as f32, position.y as f32),
);
core.input_mouse_move(pos.x as i32, pos.y as i32);
}
&WindowEvent::MouseInput { state, button,.. } => core.input_mouse_button(
match button {
glutin::MouseButton::Left => MouseButton::Left,
glutin::MouseButton::Right => MouseButton::Right,
_ => MouseButton::Middle,
},
state == glutin::ElementState::Pressed,
),
&WindowEvent::ReceivedCharacter(c) => core.input_char(c),
&WindowEvent::KeyboardInput {
input:
glutin::KeyboardInput {
state,
scancode,
virtual_keycode,
..
},
..
} => {
self.keypress.push(KeyEvent {
state,
scancode: scancode as u8,
virtual_keycode,
});
let is_down = state == glutin::ElementState::Pressed;
use glium::glutin::VirtualKeyCode::*;
if let Some(vk) = match virtual_keycode {
Some(Tab) => Some(Keycode::Tab),
Some(LShift) | Some(RShift) => Some(Keycode::Shift),
Some(LControl) | Some(RControl) => Some(Keycode::Ctrl),
Some(NumpadEnter) | Some(Return) => Some(Keycode::Enter),
Some(Back) => Some(Keycode::Backspace),
Some(Delete) => Some(Keycode::Del),
Some(Numpad8) | Some(Up) => Some(Keycode::Up),
Some(Numpad2) | Some(Down) => Some(Keycode::Down),
Some(Numpad4) | Some(Left) => Some(Keycode::Left),
Some(Numpad6) | Some(Right) => Some(Keycode::Right),
_ => None,
} {
core.input_key_state(vk, is_down);
}
}
_ => (),
}
}
// Events in other windows, ignore
Event::WindowEvent {.. } => {}
Event::Awakened => {
// TODO: Suspend/awaken behavior
}
Event::DeviceEvent {.. } => {}
Event::Suspended(_) => {}
}
}
true
}
/// Return the next keypress event if there is one.
pub fn poll_key(&mut self) -> Option<KeyEvent> { self.keypress.pop() }
fn render(&mut self, core: &mut Core<V>) {
let mut target = self.canvas.get_framebuffer_target(&self.display);
target.clear_color(0.0, 0.0, 0.0, 0.0);
let (w, h) = target.get_dimensions();
for batch in core.end_frame() {
// building the uniforms
let uniforms = uniform! {
matrix: [
[2.0 / w as f32, 0.0, 0.0, -1.0],
[0.0, -2.0 / h as f32, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0f32]
],
tex: glium::uniforms::Sampler::new(&self.textures[batch.texture])
.magnify_filter(glium::uniforms::MagnifySamplerFilter::Nearest),
};
let vertex_buffer =
{ glium::VertexBuffer::new(&self.display, &batch.vertices).unwrap() };
// building the index buffer
let index_buffer = glium::IndexBuffer::new(
&self.display,
PrimitiveType::TrianglesList,
&batch.triangle_indices,
).unwrap();
let params = glium::draw_parameters::DrawParameters {
scissor: batch.clip.map(|clip| glium::Rect {
left: clip.origin.x as u32,
bottom: h - (clip.origin.y + clip.size.height) as u32,
width: clip.size.width as u32,
height: clip.size.height as u32,
}),
blend: glium::Blend::alpha_blending(),
..Default::default()
};
target
.draw(
&vertex_buffer,
&index_buffer,
&self.program,
&uniforms,
¶ms,
)
.unwrap();
}
}
fn update_window_size(&mut self) {
let (w, h) = get_size(&self.display);
self.window_size = Size2D::new(w, h);
}
/// Display the backend and read input events.
pub fn update(&mut self, core: &mut Core<V>) -> bool {
self.update_window_size();
self.render(core);
self.canvas.draw(&self.display, self.zoom);
self.process_events(core)
}
/// Return an image for the current contents of the screen.
pub fn screenshot(&self) -> ImageBuffer { self.canvas.screenshot() }
}
/// Type for key events not handled by Vitral.
#[derive(Debug)]
pub struct KeyEvent {
/// Was the key pressed or released
pub state: glutin::ElementState,
/// Layout-dependent keycode
pub virtual_keycode: Option<glutin::VirtualKeyCode>,
/// Keyboard layout independent hardware scancode for the key
pub scancode: u8,
}
/// Shader program for the `DefaultVertex` type
pub const DEFAULT_SHADER: glium::program::SourceCode = glium::program::SourceCode {
vertex_shader: "
#version 150 core
uniform mat4 matrix;
in vec2 pos;
in vec4 color;
in vec2 tex_coord;
out vec4 v_color;
out vec2 v_tex_coord;
void main() {
gl_Position = vec4(pos, 0.0, 1.0) * matrix;
v_color = color;
v_tex_coord = tex_coord;
}",
fragment_shader: "
#version 150 core
uniform sampler2D tex;
in vec4 v_color;
in vec2 v_tex_coord;
out vec4 f_color;
void main() {
vec4 tex_color = texture(tex, v_tex_coord);
// Discard fully transparent pixels to keep them from
// writing into the depth buffer.
if (tex_color.a == 0.0) discard;
f_color = v_color * tex_color;
}",
tessellation_control_shader: None,
tessellation_evaluation_shader: None,
geometry_shader: None,
};
/// A regular vertex that implements exactly the fields used by Vitral.
#[derive(Copy, Clone)]
pub struct DefaultVertex {
/// 2D position
pub pos: [f32; 2],
/// Texture coordinates
pub tex_coord: [f32; 2],
/// RGBA color
pub color: Color,
}
implement_vertex!(DefaultVertex, pos, tex_coord, color);
impl Vertex for DefaultVertex {
fn new(pos: Point2D<f32>, tex_coord: Point2D<f32>, color: Color) -> Self {
DefaultVertex {
pos: [pos.x, pos.y],
tex_coord: [tex_coord.x, tex_coord.y],
color,
}
}
}
/// A deferred rendering buffer for pixel-perfect display.
struct Canvas {
size: Size2D<u32>,
buffer: glium::texture::SrgbTexture2d,
depth_buffer: glium::framebuffer::DepthRenderBuffer,
shader: glium::Program,
}
impl Canvas {
pub fn new(display: &glium::Display, width: u32, height: u32) -> Canvas {
let shader = program!(
display,
150 => {
vertex: "
#version 150 core
in vec2 pos;
in vec2 tex_coord;
out vec2 v_tex_coord;
void main() {
v_tex_coord = tex_coord;
gl_Position = vec4(pos, 0.0, 1.0);
}",
fragment: "
#version 150 core
uniform sampler2D tex;
in vec2 v_tex_coord;
out vec4 f_color;
void main() {
vec4 tex_color = texture(tex, v_tex_coord);
tex_color.a = 1.0;
f_color = tex_color;
}"})
.unwrap();
let buffer = glium::texture::SrgbTexture2d::empty(display, width, height).unwrap();
let depth_buffer = glium::framebuffer::DepthRenderBuffer::new(
display,
glium::texture::DepthFormat::F32,
width,
height,
).unwrap();
Canvas {
size: Size2D::new(width, height),
buffer,
depth_buffer,
shader,
}
}
/// Get the render target to the pixel-perfect framebuffer.
pub fn get_framebuffer_target(
&mut self,
display: &glium::Display,
) -> glium::framebuffer::SimpleFrameBuffer {
glium::framebuffer::SimpleFrameBuffer::with_depth_buffer(
display,
&self.buffer,
&self.depth_buffer,
).unwrap()
}
pub fn draw(&mut self, display: &glium::Display, zoom: CanvasZoom) {
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 0.0);
let (w, h) = get_size(display);
// Build the geometry for the on-screen rectangle.
let s_rect = zoom.fit_canvas(Size2D::new(w, h), self.size);
let (sx, sy) = (s_rect.origin.x, s_rect.origin.y);
let (sw, sh) = (s_rect.size.width, s_rect.size.height);
// XXX: This could use glium::Surface::blit_whole_color_to instead of
// the handmade blitting, but that was buggy on Windows around
// 2015-03.
let vertices = {
#[derive(Copy, Clone)]
struct BlitVertex {
pos: [f32; 2],
tex_coord: [f32; 2],
}
implement_vertex!(BlitVertex, pos, tex_coord);
glium::VertexBuffer::new(
display,
&[
BlitVertex {
pos: [sx, sy],
tex_coord: [0.0, 0.0],
},
BlitVertex { | pos: [sx + sw, sy],
tex_coord: [1.0, 0.0],
}, | random_line_split |
|
glium_backend.rs | ///
/// The custom shader must support a uniform named `tex` for texture data.
pub fn start<'a, S, P>(
width: u32,
height: u32,
title: S,
shader: P,
) -> Result<Backend<V>, Box<Error>>
where
S: Into<String>,
P: Into<glium::program::ProgramCreationInput<'a>>,
{
let events = glutin::EventsLoop::new();
let window = glutin::WindowBuilder::new().with_title(title);
let context = glutin::ContextBuilder::new()
.with_gl(glutin::GlRequest::Specific(glutin::Api::OpenGl, (3, 2)));
let display = glium::Display::new(window, context, &events)?;
let program = glium::Program::new(&display, shader.into())?;
{
// Start the window as a good fit on the primary monitor.
// Don't make it a completely fullscreen window, that might put the window title bar
// outside the screen.
const BUFFER: u32 = 8;
let monitor_size = display
.gl_window()
.window()
.get_primary_monitor()
.get_dimensions();
let monitor_size = Size2D::new(monitor_size.width as u32, monitor_size.height as u32);
let mut dim = Size2D::new(width, height);
while dim.width + width <= monitor_size.width - BUFFER
&& dim.height + height <= monitor_size.height - BUFFER
{
dim.width += width;
dim.height += height;
}
display
.gl_window()
.set_inner_size(LogicalSize::new(dim.width as f64, dim.height as f64));
display.gl_window().set_position(LogicalPosition::new(
(monitor_size.width - dim.width) as f64 / 2.0,
(monitor_size.height - dim.height) as f64 / 2.0,
));
}
Ok(Backend::new(display, events, program, width, height))
}
/// Return the pixel resolution of the backend.
///
/// Note that this is the logical size which will stay the same even when the
/// desktop window is resized.
pub fn canvas_size(&self) -> Size2D<u32> { self.canvas.size }
/// Return the current number of textures.
pub fn texture_count(&self) -> usize { self.textures.len() }
/// Make a new empty internal texture.
///
/// The new `TextureIndex` must equal the value `self.texture_count()` would have returned
/// just before calling this.
pub fn make_empty_texture(&mut self, width: u32, height: u32) -> TextureIndex {
let tex = glium::texture::SrgbTexture2d::empty(&self.display, width, height).unwrap();
self.textures.push(tex);
self.textures.len() - 1
}
/// Rewrite an internal texture.
pub fn write_to_texture(&mut self, img: &ImageBuffer, texture: TextureIndex) |
/// Make a new internal texture using image data.
pub fn make_texture(&mut self, img: ImageBuffer) -> TextureIndex {
let mut raw = glium::texture::RawImage2d::from_raw_rgba(
img.pixels,
(img.size.width, img.size.height),
);
raw.format = glium::texture::ClientFormat::U8U8U8U8;
let tex = glium::texture::SrgbTexture2d::new(&self.display, raw).unwrap();
self.textures.push(tex);
self.textures.len() - 1
}
/// Update or construct textures based on changes in atlas cache.
pub fn sync_with_atlas_cache<T: Eq + Hash + Clone + Debug>(
&mut self,
atlas_cache: &mut AtlasCache<T>,
) {
for a in atlas_cache.atlases_mut() {
let idx = a.texture();
// If there are sheets in the atlas that don't have corresponding textures yet,
// construct those now.
while idx >= self.texture_count() {
self.make_empty_texture(a.size().width, a.size().height);
}
// Write the updated texture atlas to internal texture.
a.update_texture(|buf, idx| self.write_to_texture(buf, idx));
}
}
fn process_events(&mut self, core: &mut Core<V>) -> bool {
self.keypress.clear();
// polling and handling the events received by the window
let mut event_list = Vec::new();
self.events.poll_events(|event| event_list.push(event));
for e in event_list {
match e {
Event::WindowEvent {
ref event,
window_id,
}
if window_id == self.display.gl_window().id() =>
{
match event {
&WindowEvent::CloseRequested => return false,
&WindowEvent::CursorMoved { position,.. } => {
let position =
position.to_physical(self.display.gl_window().get_hidpi_factor());
let pos = self.zoom.screen_to_canvas(
self.window_size,
self.canvas.size(),
Point2D::new(position.x as f32, position.y as f32),
);
core.input_mouse_move(pos.x as i32, pos.y as i32);
}
&WindowEvent::MouseInput { state, button,.. } => core.input_mouse_button(
match button {
glutin::MouseButton::Left => MouseButton::Left,
glutin::MouseButton::Right => MouseButton::Right,
_ => MouseButton::Middle,
},
state == glutin::ElementState::Pressed,
),
&WindowEvent::ReceivedCharacter(c) => core.input_char(c),
&WindowEvent::KeyboardInput {
input:
glutin::KeyboardInput {
state,
scancode,
virtual_keycode,
..
},
..
} => {
self.keypress.push(KeyEvent {
state,
scancode: scancode as u8,
virtual_keycode,
});
let is_down = state == glutin::ElementState::Pressed;
use glium::glutin::VirtualKeyCode::*;
if let Some(vk) = match virtual_keycode {
Some(Tab) => Some(Keycode::Tab),
Some(LShift) | Some(RShift) => Some(Keycode::Shift),
Some(LControl) | Some(RControl) => Some(Keycode::Ctrl),
Some(NumpadEnter) | Some(Return) => Some(Keycode::Enter),
Some(Back) => Some(Keycode::Backspace),
Some(Delete) => Some(Keycode::Del),
Some(Numpad8) | Some(Up) => Some(Keycode::Up),
Some(Numpad2) | Some(Down) => Some(Keycode::Down),
Some(Numpad4) | Some(Left) => Some(Keycode::Left),
Some(Numpad6) | Some(Right) => Some(Keycode::Right),
_ => None,
} {
core.input_key_state(vk, is_down);
}
}
_ => (),
}
}
// Events in other windows, ignore
Event::WindowEvent {.. } => {}
Event::Awakened => {
// TODO: Suspend/awaken behavior
}
Event::DeviceEvent {.. } => {}
Event::Suspended(_) => {}
}
}
true
}
/// Return the next keypress event if there is one.
pub fn poll_key(&mut self) -> Option<KeyEvent> { self.keypress.pop() }
fn render(&mut self, core: &mut Core<V>) {
let mut target = self.canvas.get_framebuffer_target(&self.display);
target.clear_color(0.0, 0.0, 0.0, 0.0);
let (w, h) = target.get_dimensions();
for batch in core.end_frame() {
// building the uniforms
let uniforms = uniform! {
matrix: [
[2.0 / w as f32, 0.0, 0.0, -1.0],
[0.0, -2.0 / h as f32, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0f32]
],
tex: glium::uniforms::Sampler::new(&self.textures[batch.texture])
.magnify_filter(glium::uniforms::MagnifySamplerFilter::Nearest),
};
let vertex_buffer =
{ glium::VertexBuffer::new(&self.display, &batch.vertices).unwrap() };
// building the index buffer
let index_buffer = glium::IndexBuffer::new(
&self.display,
PrimitiveType::TrianglesList,
&batch.triangle_indices,
).unwrap();
let params = glium::draw_parameters::DrawParameters {
scissor: batch.clip.map(|clip| glium::Rect {
left: clip.origin.x as u32,
bottom: h - (clip.origin.y + clip.size.height) as u32,
width: clip.size.width as u32,
height: clip.size.height as u32,
}),
blend: glium::Blend::alpha_blending(),
..Default::default()
};
target
.draw(
&vertex_buffer,
&index_buffer,
&self.program,
&uniforms,
¶ms,
)
.unwrap();
}
}
fn update_window_size(&mut self) {
let (w, h) = get_size(&self.display);
self.window_size = Size2D::new(w, h);
}
/// Display the backend and read input events.
pub fn update(&mut self, core: &mut Core<V>) -> bool {
self.update_window_size();
self.render(core);
self.canvas.draw(&self.display, self.zoom);
self.process_events(core)
}
/// Return an image for the current contents of the screen.
pub fn screenshot(&self) -> ImageBuffer { self.canvas.screenshot() }
}
/// Type for key events not handled by Vitral.
#[derive(Debug)]
pub struct KeyEvent {
/// Was the key pressed or released
pub state: glutin::ElementState,
/// Layout-dependent keycode
pub virtual_keycode: Option<glutin::VirtualKeyCode>,
/// Keyboard layout independent hardware scancode for the key
pub scancode: u8,
}
/// Shader program for the `DefaultVertex` type
pub const DEFAULT_SHADER: glium::program::SourceCode = glium::program::SourceCode {
vertex_shader: "
#version 150 core
uniform mat4 matrix;
in vec2 pos;
in vec4 color;
in vec2 tex_coord;
out vec4 v_color;
out vec2 v_tex_coord;
void main() {
gl_Position = vec4(pos, 0.0, 1.0) * matrix;
v_color = color;
v_tex_coord = tex_coord;
}",
fragment_shader: "
#version 150 core
uniform sampler2D tex;
in vec4 v_color;
in vec2 v_tex_coord;
out vec4 f_color;
void main() {
vec4 tex_color = texture(tex, v_tex_coord);
// Discard fully transparent pixels to keep them from
// writing into the depth buffer.
if (tex_color.a == 0.0) discard;
f_color = v_color * tex_color;
}",
tessellation_control_shader: None,
tessellation_evaluation_shader: None,
geometry_shader: None,
};
/// A regular vertex that implements exactly the fields used by Vitral.
#[derive(Copy, Clone)]
pub struct DefaultVertex {
/// 2D position
pub pos: [f32; 2],
/// Texture coordinates
pub tex_coord: [f32; 2],
/// RGBA color
pub color: Color,
}
implement_vertex!(DefaultVertex, pos, tex_coord, color);
impl Vertex for DefaultVertex {
fn new(pos: Point2D<f32>, tex_coord: Point2D<f32>, color: Color) -> Self {
DefaultVertex {
pos: [pos.x, pos.y],
tex_coord: [tex_coord.x, tex_coord.y],
color,
}
}
}
/// A deferred rendering buffer for pixel-perfect display.
struct Canvas {
size: Size2D<u32>,
buffer: glium::texture::SrgbTexture2d,
depth_buffer: glium::framebuffer::DepthRenderBuffer,
shader: glium::Program,
}
impl Canvas {
pub fn new(display: &glium::Display, width: u32, height: u32) -> Canvas {
let shader = program!(
display,
150 => {
vertex: "
#version 150 core
in vec2 pos;
in vec2 tex_coord;
out vec2 v_tex_coord;
void main() {
v_tex_coord = tex_coord;
gl_Position = vec4(pos, 0.0, 1.0);
}",
fragment: "
#version 150 core
uniform sampler2D tex;
in vec2 v_tex_coord;
out vec4 f_color;
void main() {
vec4 tex_color = texture(tex, v_tex_coord);
tex_color.a = 1.0;
f_color = tex_color;
}"})
.unwrap();
let buffer = glium::texture::SrgbTexture2d::empty(display, width, height).unwrap();
let depth_buffer = glium::framebuffer::DepthRenderBuffer::new(
display,
glium::texture::DepthFormat::F32,
width,
height,
).unwrap();
Canvas {
size: Size2D::new(width, height),
buffer,
depth_buffer,
shader,
}
}
/// Get the render target to the pixel-perfect framebuffer.
pub fn get_framebuffer_target(
&mut self,
display: &glium::Display,
) -> glium::framebuffer::SimpleFrameBuffer {
glium::framebuffer::SimpleFrameBuffer::with_depth_buffer(
display,
&self.buffer,
&self.depth_buffer,
).unwrap()
}
pub fn draw(&mut self, display: &glium::Display, zoom: CanvasZoom) {
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 0.0);
let (w, h) = get_size(display);
// Build the geometry for the on-screen rectangle.
let s_rect = zoom.fit_canvas(Size2D::new(w, h), self.size);
let (sx, sy) = (s_rect.origin.x, s_rect.origin.y);
let (sw, sh) = (s_rect.size.width, s_rect.size.height);
// XXX: This could use glium::Surface::blit_whole_color_to instead of
// the handmade blitting, but that was buggy on Windows around
// 2015-03.
let vertices = {
#[derive(Copy, Clone)]
struct BlitVertex {
| {
assert!(
texture < self.textures.len(),
"Trying to write nonexistent texture"
);
let rect = glium::Rect {
left: 0,
bottom: 0,
width: img.size.width,
height: img.size.height,
};
let mut raw = glium::texture::RawImage2d::from_raw_rgba(
img.pixels.clone(),
(img.size.width, img.size.height),
);
raw.format = glium::texture::ClientFormat::U8U8U8U8;
self.textures[texture].write(rect, raw);
} | identifier_body |
server.rs | #[feature(struct_variant)];
#[feature(macro_rules)];
use std::io::net::ip::{Ipv4Addr, SocketAddr};
use std::io::net::udp::{UdpSocket, UdpStream};
use std::io::timer;
use udptransport::UdpTransport;
use transport::RaftRpcTransport;
use rpc::{ServerId, LogEntry, AppendEntries, AppendEntriesResponse, RequestVote, RequestVoteResponse, RaftRpc};
use rpc::{AppendEntriesRpc, AppendEntriesResponseRpc, RequestVoteRpc, RequestVoteResponseRpc};
use std::os;
use std::vec;
use std::rand;
#[path="./rust-osc/osc.rs"]
mod osc;
mod udptransport;
mod transport;
mod rpc;
enum ServerType {
Follower,
Candidate,
Leader
}
struct RaftServer {
currentTerm: int,
votedFor: Option<ServerId>,
log: ~[LogEntry],
commitIndex: int,
lastApplied: int,
serverType: ServerType,
electionTimeout: int,
receivedVotes: int,
// Leader state:
// for each server, index of the next log entry to send to that
// server, initialized to last log index + 1
nextIndex: ~[int],
// for each server, index of highest log entry known to be
// replicated on server, initialized to 0
matchIndex: ~[int],
// current set of servers
servers: ~[ServerId],
// serverId corresponding to self
serverId: ServerId,
// transport layer to send RPC's over
transport: ~RaftRpcTransport
}
impl RaftServer {
fn new(transport: ~RaftRpcTransport, serverId: ServerId, servers: ~[ServerId]) -> RaftServer {
return RaftServer {
currentTerm: 0,
votedFor: None,
log: ~[],
commitIndex: 0,
lastApplied: 0,
electionTimeout: 0,
receivedVotes: 0,
serverType: Follower,
nextIndex: vec::with_capacity(servers.len()),
matchIndex: vec::with_capacity(servers.len()),
servers: servers,
serverId: serverId,
transport: transport
}
}
fn run(&mut self) {
loop {
match self.serverType {
Candidate => self.candidateStep(),
Follower => self.followerStep(),
Leader => self.leaderStep()
}
}
}
// Act as a candidate
// if votes received from a majority of servers become leader
// if appendentries received from new leader convert to follower
// if election times out try again
fn candidateStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.candidateRespond(rpc),
None => {}
}
if self.receivedVotes > (self.servers.len()/2) as int {
self.convertToLeader();
}
}
// Respond as a candidate to a given RPC
// RequestVoteResponse with success means we get a vote :D
// AppendEntries with term >= our term means we lost T_T
fn candidateRespond(&mut self, rpc: RaftRpc) {
match rpc {
RequestVoteResponse(rvr) => self.candidateRequestVoteResponse(rvr),
AppendEntries(ae) => self.candidateAppendEntries(ae),
_ => {}
};
}
fn candidateRequestVoteResponse(&mut self, rpc: RequestVoteResponseRpc) {
if rpc.voteGranted {
// TODO check to see if the server already voted for us this cycle
self.receivedVotes += 1; | if rpc.term >= self.currentTerm {
// we lost the election... D:
self.convertToFollower();
}
// pretend we didn't hear them whether or not they won, the resend will occur anyway
}
// Update the server when it is a Follower
// Paper:
// Respond to RPCs from candidates and leaders
// If election timeout elapses without receiving AppendEntries RPC
// or granting vote to candidate: convert to candidate
fn followerStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.followerRespond(rpc),
None => {}
}
self.electionTimeout -= 1;
if self.electionTimeout < 0 {
self.convertToCandidate();
}
}
// Respond to an incoming RPC as a follower
fn followerRespond(&mut self, rpc: RaftRpc) {
let response = match rpc {
AppendEntries(ref ae) => Some(self.followerAppendEntries(ae)),
RequestVote(ref rv) => Some(self.followerRequestVote(rv)),
_ => None
};
match response {
// send response to original rpc sender
Some(responseRpc) => self.transport.sendRpc(rpc.sender(), &responseRpc),
None => {}
}
}
// As a follower, handle an appendEntries RPC
fn followerAppendEntries(&mut self, rpc: &AppendEntriesRpc) -> RaftRpc {
let fail = AppendEntriesResponse(AppendEntriesResponseRpc{sender: self.serverId, term: self.currentTerm, success: false, logIndex: 0});
if rpc.term < self.currentTerm {
return fail;
}
// If log doesn't contain an entry with matching term return false
if rpc.prevLogIndex < self.log.len() as int {
if self.log[rpc.prevLogIndex].term!= rpc.prevLogTerm {
return fail;
}
} else {
return fail;
}
// 3. If an existing entry conflicts with a new one delete the
// existing entry and all that follow it
let startLogIndex = rpc.prevLogIndex+1;
for logOffset in range(0, rpc.entries.len()) {
let logIndex = startLogIndex + logOffset as int;
let entry = rpc.entries[logOffset].clone();
if logIndex < self.log.len() as int {
if self.log[logIndex].term!= entry.term {
// delete it and all following
self.log.truncate(logIndex as uint);
self.log.push(entry);
}
} else {
self.log.push(entry);
}
}
return AppendEntriesResponse(AppendEntriesResponseRpc {
sender: self.serverId, term: self.currentTerm, success: true,
logIndex: (self.log.len() - 1) as int
});
}
// As a follower handle a requestVote rpc
// From paper:
// 1. Reply false if term < currentTerm
// 2. If votedFor is null or candidateId and candidate's log is
// at least as up-to-date as receiver's log, grant vote
fn followerRequestVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc {
let fail = RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: false});
if rpc.term < self.currentTerm {
return fail;
}
// if we haven't voted for anything or we voted for candidate
match self.votedFor {
None => {
return self.followerVote(rpc);
},
Some(id) if rpc.candidateId == id => {
return self.followerVote(rpc);
}
_ => {
return fail;
}
}
}
fn followerVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc {
// if the candidate's log is at least as up-to-date as ours vote for them
let mut voteGranted = false;
let lastLogIndex = (self.log.len() - 1) as int;
if self.log.len() == 0 || (rpc.lastLogIndex >= lastLogIndex &&
rpc.lastLogTerm >= self.log[lastLogIndex].term) {
self.votedFor = Some(rpc.candidateId);
voteGranted = true
}
return RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: voteGranted});
}
// Update as a leader
// Paper:
// If last log index > nextIndex for a follower send AppendEntries RPC with log entries starting at nextIndex
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
// If there exists an N such that N > commitIndex, a majority of matchIndex >= N and log[N].term == currentTerm
// set commitIndex = N
fn leaderStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.leaderRespond(rpc),
None => {}
}
}
fn leaderRespond(&mut self, rpc: RaftRpc) {
match rpc {
AppendEntriesResponse(aer) => self.leaderAppendEntriesResponse(aer),
_ => {}
}
}
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
fn leaderAppendEntriesResponse(&mut self, rpc: AppendEntriesResponseRpc) {
let followerIndex = self.getServerIndex(rpc.sender);
if rpc.success {
self.nextIndex[followerIndex] = rpc.logIndex;
self.matchIndex[followerIndex] = rpc.logIndex;
} else {
if self.nextIndex[followerIndex] > 0 {
self.nextIndex[followerIndex] -= 1;
}
}
}
// Become a candidate (start election)
fn convertToCandidate(&mut self) {
self.serverType = Candidate;
self.currentTerm += 1;
self.receivedVotes = 1; // vote for self
self.setNewTimeout();
// RequestVote {sender: ServerId, term: int, candidateId: ServerId, lastLogIndex: int, lastLogTerm: int},
let lastLogIndex = (self.log.len() - 1) as int;
let requestVote = RequestVote(RequestVoteRpc {
sender: self.serverId, term: self.currentTerm,
candidateId: self.serverId, lastLogIndex: lastLogIndex,
lastLogTerm: self.log[lastLogIndex].term
});
// Broadcast requestVote to all servers
self.broadcastRpc(requestVote);
}
fn convertToFollower(&mut self) {
self.serverType = Follower;
self.setNewTimeout();
}
fn convertToLeader(&mut self) {
self.serverType = Leader;
self.broadcastHeartbeat();
}
fn broadcastRpc(&mut self, rpc: RaftRpc) {
for &serverId in self.servers.iter() {
if serverId == self.serverId {
continue;
}
self.transport.sendRpc(serverId, &rpc);
}
}
fn broadcastHeartbeat(&mut self) {
// Send an empty appendEntries to all servers
// AppendEntries {sender: ServerId, term: int, leaderId: ServerId, prevLogIndex: int, entries: ~[LogEntry], leaderCommitIndex: int},
let lastLogIndex = (self.log.len() - 1) as int;
let appendEntries = AppendEntries(AppendEntriesRpc {
sender: self.serverId, term: self.currentTerm, leaderId: self.serverId,
prevLogIndex: lastLogIndex, prevLogTerm: self.log[lastLogIndex].term,
entries: ~[], leaderCommitIndex: lastLogIndex
});
self.broadcastRpc(appendEntries);
}
fn getServerIndex(&self, serverId: ServerId) -> int {
for i in range(0,self.servers.len() as int) {
if self.servers[i] == serverId {
return i;
}
}
return -1;
}
fn setNewTimeout(&mut self) {
let val:int = rand::random();
self.electionTimeout = val % 500 + 500;
}
}
fn getLocalAddr(port: u16) -> SocketAddr {
return SocketAddr {ip: Ipv4Addr(127,0,0,1), port: port};
}
fn main() {
let args = os::args();
if args.len()!= 2 {
fail!("usage: {} <port>", args[0]);
}
let port = match from_str::<int>(args[1]) {
Some(val) => val,
None => fail!("usage: {} <port:int>", args[0])
};
let udpTransport = UdpTransport::new();
// config with 5 servers
let servers = vec::from_fn(5, |idx| getLocalAddr(9000 + idx as u16));
let mut server = RaftServer::new(~udpTransport, getLocalAddr(port as u16), servers);
server.run();
} | }
}
fn candidateAppendEntries(&mut self, rpc: AppendEntriesRpc) { | random_line_split |
server.rs | #[feature(struct_variant)];
#[feature(macro_rules)];
use std::io::net::ip::{Ipv4Addr, SocketAddr};
use std::io::net::udp::{UdpSocket, UdpStream};
use std::io::timer;
use udptransport::UdpTransport;
use transport::RaftRpcTransport;
use rpc::{ServerId, LogEntry, AppendEntries, AppendEntriesResponse, RequestVote, RequestVoteResponse, RaftRpc};
use rpc::{AppendEntriesRpc, AppendEntriesResponseRpc, RequestVoteRpc, RequestVoteResponseRpc};
use std::os;
use std::vec;
use std::rand;
#[path="./rust-osc/osc.rs"]
mod osc;
mod udptransport;
mod transport;
mod rpc;
enum ServerType {
Follower,
Candidate,
Leader
}
struct RaftServer {
currentTerm: int,
votedFor: Option<ServerId>,
log: ~[LogEntry],
commitIndex: int,
lastApplied: int,
serverType: ServerType,
electionTimeout: int,
receivedVotes: int,
// Leader state:
// for each server, index of the next log entry to send to that
// server, initialized to last log index + 1
nextIndex: ~[int],
// for each server, index of highest log entry known to be
// replicated on server, initialized to 0
matchIndex: ~[int],
// current set of servers
servers: ~[ServerId],
// serverId corresponding to self
serverId: ServerId,
// transport layer to send RPC's over
transport: ~RaftRpcTransport
}
impl RaftServer {
fn new(transport: ~RaftRpcTransport, serverId: ServerId, servers: ~[ServerId]) -> RaftServer {
return RaftServer {
currentTerm: 0,
votedFor: None,
log: ~[],
commitIndex: 0,
lastApplied: 0,
electionTimeout: 0,
receivedVotes: 0,
serverType: Follower,
nextIndex: vec::with_capacity(servers.len()),
matchIndex: vec::with_capacity(servers.len()),
servers: servers,
serverId: serverId,
transport: transport
}
}
fn run(&mut self) {
loop {
match self.serverType {
Candidate => self.candidateStep(),
Follower => self.followerStep(),
Leader => self.leaderStep()
}
}
}
// Act as a candidate
// if votes received from a majority of servers become leader
// if appendentries received from new leader convert to follower
// if election times out try again
fn candidateStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.candidateRespond(rpc),
None => {}
}
if self.receivedVotes > (self.servers.len()/2) as int {
self.convertToLeader();
}
}
// Respond as a candidate to a given RPC
// RequestVoteResponse with success means we get a vote :D
// AppendEntries with term >= our term means we lost T_T
fn candidateRespond(&mut self, rpc: RaftRpc) {
match rpc {
RequestVoteResponse(rvr) => self.candidateRequestVoteResponse(rvr),
AppendEntries(ae) => self.candidateAppendEntries(ae),
_ => {}
};
}
fn candidateRequestVoteResponse(&mut self, rpc: RequestVoteResponseRpc) {
if rpc.voteGranted {
// TODO check to see if the server already voted for us this cycle
self.receivedVotes += 1;
}
}
fn candidateAppendEntries(&mut self, rpc: AppendEntriesRpc) {
if rpc.term >= self.currentTerm {
// we lost the election... D:
self.convertToFollower();
}
// pretend we didn't hear them whether or not they won, the resend will occur anyway
}
// Update the server when it is a Follower
// Paper:
// Respond to RPCs from candidates and leaders
// If election timeout elapses without receiving AppendEntries RPC
// or granting vote to candidate: convert to candidate
fn followerStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.followerRespond(rpc),
None => {}
}
self.electionTimeout -= 1;
if self.electionTimeout < 0 {
self.convertToCandidate();
}
}
// Respond to an incoming RPC as a follower
fn followerRespond(&mut self, rpc: RaftRpc) {
let response = match rpc {
AppendEntries(ref ae) => Some(self.followerAppendEntries(ae)),
RequestVote(ref rv) => Some(self.followerRequestVote(rv)),
_ => None
};
match response {
// send response to original rpc sender
Some(responseRpc) => self.transport.sendRpc(rpc.sender(), &responseRpc),
None => {}
}
}
// As a follower, handle an appendEntries RPC
fn followerAppendEntries(&mut self, rpc: &AppendEntriesRpc) -> RaftRpc {
let fail = AppendEntriesResponse(AppendEntriesResponseRpc{sender: self.serverId, term: self.currentTerm, success: false, logIndex: 0});
if rpc.term < self.currentTerm {
return fail;
}
// If log doesn't contain an entry with matching term return false
if rpc.prevLogIndex < self.log.len() as int {
if self.log[rpc.prevLogIndex].term!= rpc.prevLogTerm {
return fail;
}
} else |
// 3. If an existing entry conflicts with a new one delete the
// existing entry and all that follow it
let startLogIndex = rpc.prevLogIndex+1;
for logOffset in range(0, rpc.entries.len()) {
let logIndex = startLogIndex + logOffset as int;
let entry = rpc.entries[logOffset].clone();
if logIndex < self.log.len() as int {
if self.log[logIndex].term!= entry.term {
// delete it and all following
self.log.truncate(logIndex as uint);
self.log.push(entry);
}
} else {
self.log.push(entry);
}
}
return AppendEntriesResponse(AppendEntriesResponseRpc {
sender: self.serverId, term: self.currentTerm, success: true,
logIndex: (self.log.len() - 1) as int
});
}
// As a follower handle a requestVote rpc
// From paper:
// 1. Reply false if term < currentTerm
// 2. If votedFor is null or candidateId and candidate's log is
// at least as up-to-date as receiver's log, grant vote
fn followerRequestVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc {
let fail = RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: false});
if rpc.term < self.currentTerm {
return fail;
}
// if we haven't voted for anything or we voted for candidate
match self.votedFor {
None => {
return self.followerVote(rpc);
},
Some(id) if rpc.candidateId == id => {
return self.followerVote(rpc);
}
_ => {
return fail;
}
}
}
fn followerVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc {
// if the candidate's log is at least as up-to-date as ours vote for them
let mut voteGranted = false;
let lastLogIndex = (self.log.len() - 1) as int;
if self.log.len() == 0 || (rpc.lastLogIndex >= lastLogIndex &&
rpc.lastLogTerm >= self.log[lastLogIndex].term) {
self.votedFor = Some(rpc.candidateId);
voteGranted = true
}
return RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: voteGranted});
}
// Update as a leader
// Paper:
// If last log index > nextIndex for a follower send AppendEntries RPC with log entries starting at nextIndex
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
// If there exists an N such that N > commitIndex, a majority of matchIndex >= N and log[N].term == currentTerm
// set commitIndex = N
fn leaderStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.leaderRespond(rpc),
None => {}
}
}
fn leaderRespond(&mut self, rpc: RaftRpc) {
match rpc {
AppendEntriesResponse(aer) => self.leaderAppendEntriesResponse(aer),
_ => {}
}
}
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
fn leaderAppendEntriesResponse(&mut self, rpc: AppendEntriesResponseRpc) {
let followerIndex = self.getServerIndex(rpc.sender);
if rpc.success {
self.nextIndex[followerIndex] = rpc.logIndex;
self.matchIndex[followerIndex] = rpc.logIndex;
} else {
if self.nextIndex[followerIndex] > 0 {
self.nextIndex[followerIndex] -= 1;
}
}
}
// Become a candidate (start election)
fn convertToCandidate(&mut self) {
self.serverType = Candidate;
self.currentTerm += 1;
self.receivedVotes = 1; // vote for self
self.setNewTimeout();
// RequestVote {sender: ServerId, term: int, candidateId: ServerId, lastLogIndex: int, lastLogTerm: int},
let lastLogIndex = (self.log.len() - 1) as int;
let requestVote = RequestVote(RequestVoteRpc {
sender: self.serverId, term: self.currentTerm,
candidateId: self.serverId, lastLogIndex: lastLogIndex,
lastLogTerm: self.log[lastLogIndex].term
});
// Broadcast requestVote to all servers
self.broadcastRpc(requestVote);
}
fn convertToFollower(&mut self) {
self.serverType = Follower;
self.setNewTimeout();
}
fn convertToLeader(&mut self) {
self.serverType = Leader;
self.broadcastHeartbeat();
}
fn broadcastRpc(&mut self, rpc: RaftRpc) {
for &serverId in self.servers.iter() {
if serverId == self.serverId {
continue;
}
self.transport.sendRpc(serverId, &rpc);
}
}
fn broadcastHeartbeat(&mut self) {
// Send an empty appendEntries to all servers
// AppendEntries {sender: ServerId, term: int, leaderId: ServerId, prevLogIndex: int, entries: ~[LogEntry], leaderCommitIndex: int},
let lastLogIndex = (self.log.len() - 1) as int;
let appendEntries = AppendEntries(AppendEntriesRpc {
sender: self.serverId, term: self.currentTerm, leaderId: self.serverId,
prevLogIndex: lastLogIndex, prevLogTerm: self.log[lastLogIndex].term,
entries: ~[], leaderCommitIndex: lastLogIndex
});
self.broadcastRpc(appendEntries);
}
fn getServerIndex(&self, serverId: ServerId) -> int {
for i in range(0,self.servers.len() as int) {
if self.servers[i] == serverId {
return i;
}
}
return -1;
}
fn setNewTimeout(&mut self) {
let val:int = rand::random();
self.electionTimeout = val % 500 + 500;
}
}
fn getLocalAddr(port: u16) -> SocketAddr {
return SocketAddr {ip: Ipv4Addr(127,0,0,1), port: port};
}
fn main() {
let args = os::args();
if args.len()!= 2 {
fail!("usage: {} <port>", args[0]);
}
let port = match from_str::<int>(args[1]) {
Some(val) => val,
None => fail!("usage: {} <port:int>", args[0])
};
let udpTransport = UdpTransport::new();
// config with 5 servers
let servers = vec::from_fn(5, |idx| getLocalAddr(9000 + idx as u16));
let mut server = RaftServer::new(~udpTransport, getLocalAddr(port as u16), servers);
server.run();
}
| {
return fail;
} | conditional_block |
server.rs | #[feature(struct_variant)];
#[feature(macro_rules)];
use std::io::net::ip::{Ipv4Addr, SocketAddr};
use std::io::net::udp::{UdpSocket, UdpStream};
use std::io::timer;
use udptransport::UdpTransport;
use transport::RaftRpcTransport;
use rpc::{ServerId, LogEntry, AppendEntries, AppendEntriesResponse, RequestVote, RequestVoteResponse, RaftRpc};
use rpc::{AppendEntriesRpc, AppendEntriesResponseRpc, RequestVoteRpc, RequestVoteResponseRpc};
use std::os;
use std::vec;
use std::rand;
#[path="./rust-osc/osc.rs"]
mod osc;
mod udptransport;
mod transport;
mod rpc;
enum ServerType {
Follower,
Candidate,
Leader
}
struct RaftServer {
currentTerm: int,
votedFor: Option<ServerId>,
log: ~[LogEntry],
commitIndex: int,
lastApplied: int,
serverType: ServerType,
electionTimeout: int,
receivedVotes: int,
// Leader state:
// for each server, index of the next log entry to send to that
// server, initialized to last log index + 1
nextIndex: ~[int],
// for each server, index of highest log entry known to be
// replicated on server, initialized to 0
matchIndex: ~[int],
// current set of servers
servers: ~[ServerId],
// serverId corresponding to self
serverId: ServerId,
// transport layer to send RPC's over
transport: ~RaftRpcTransport
}
impl RaftServer {
fn new(transport: ~RaftRpcTransport, serverId: ServerId, servers: ~[ServerId]) -> RaftServer {
return RaftServer {
currentTerm: 0,
votedFor: None,
log: ~[],
commitIndex: 0,
lastApplied: 0,
electionTimeout: 0,
receivedVotes: 0,
serverType: Follower,
nextIndex: vec::with_capacity(servers.len()),
matchIndex: vec::with_capacity(servers.len()),
servers: servers,
serverId: serverId,
transport: transport
}
}
fn run(&mut self) {
loop {
match self.serverType {
Candidate => self.candidateStep(),
Follower => self.followerStep(),
Leader => self.leaderStep()
}
}
}
// Act as a candidate
// if votes received from a majority of servers become leader
// if appendentries received from new leader convert to follower
// if election times out try again
fn candidateStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.candidateRespond(rpc),
None => {}
}
if self.receivedVotes > (self.servers.len()/2) as int {
self.convertToLeader();
}
}
// Respond as a candidate to a given RPC
// RequestVoteResponse with success means we get a vote :D
// AppendEntries with term >= our term means we lost T_T
fn candidateRespond(&mut self, rpc: RaftRpc) {
match rpc {
RequestVoteResponse(rvr) => self.candidateRequestVoteResponse(rvr),
AppendEntries(ae) => self.candidateAppendEntries(ae),
_ => {}
};
}
fn candidateRequestVoteResponse(&mut self, rpc: RequestVoteResponseRpc) {
if rpc.voteGranted {
// TODO check to see if the server already voted for us this cycle
self.receivedVotes += 1;
}
}
fn candidateAppendEntries(&mut self, rpc: AppendEntriesRpc) {
if rpc.term >= self.currentTerm {
// we lost the election... D:
self.convertToFollower();
}
// pretend we didn't hear them whether or not they won, the resend will occur anyway
}
// Update the server when it is a Follower
// Paper:
// Respond to RPCs from candidates and leaders
// If election timeout elapses without receiving AppendEntries RPC
// or granting vote to candidate: convert to candidate
fn followerStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.followerRespond(rpc),
None => {}
}
self.electionTimeout -= 1;
if self.electionTimeout < 0 {
self.convertToCandidate();
}
}
// Respond to an incoming RPC as a follower
fn followerRespond(&mut self, rpc: RaftRpc) {
let response = match rpc {
AppendEntries(ref ae) => Some(self.followerAppendEntries(ae)),
RequestVote(ref rv) => Some(self.followerRequestVote(rv)),
_ => None
};
match response {
// send response to original rpc sender
Some(responseRpc) => self.transport.sendRpc(rpc.sender(), &responseRpc),
None => {}
}
}
// As a follower, handle an appendEntries RPC
fn followerAppendEntries(&mut self, rpc: &AppendEntriesRpc) -> RaftRpc {
let fail = AppendEntriesResponse(AppendEntriesResponseRpc{sender: self.serverId, term: self.currentTerm, success: false, logIndex: 0});
if rpc.term < self.currentTerm {
return fail;
}
// If log doesn't contain an entry with matching term return false
if rpc.prevLogIndex < self.log.len() as int {
if self.log[rpc.prevLogIndex].term!= rpc.prevLogTerm {
return fail;
}
} else {
return fail;
}
// 3. If an existing entry conflicts with a new one delete the
// existing entry and all that follow it
let startLogIndex = rpc.prevLogIndex+1;
for logOffset in range(0, rpc.entries.len()) {
let logIndex = startLogIndex + logOffset as int;
let entry = rpc.entries[logOffset].clone();
if logIndex < self.log.len() as int {
if self.log[logIndex].term!= entry.term {
// delete it and all following
self.log.truncate(logIndex as uint);
self.log.push(entry);
}
} else {
self.log.push(entry);
}
}
return AppendEntriesResponse(AppendEntriesResponseRpc {
sender: self.serverId, term: self.currentTerm, success: true,
logIndex: (self.log.len() - 1) as int
});
}
// As a follower handle a requestVote rpc
// From paper:
// 1. Reply false if term < currentTerm
// 2. If votedFor is null or candidateId and candidate's log is
// at least as up-to-date as receiver's log, grant vote
fn followerRequestVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc |
fn followerVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc {
// if the candidate's log is at least as up-to-date as ours vote for them
let mut voteGranted = false;
let lastLogIndex = (self.log.len() - 1) as int;
if self.log.len() == 0 || (rpc.lastLogIndex >= lastLogIndex &&
rpc.lastLogTerm >= self.log[lastLogIndex].term) {
self.votedFor = Some(rpc.candidateId);
voteGranted = true
}
return RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: voteGranted});
}
// Update as a leader
// Paper:
// If last log index > nextIndex for a follower send AppendEntries RPC with log entries starting at nextIndex
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
// If there exists an N such that N > commitIndex, a majority of matchIndex >= N and log[N].term == currentTerm
// set commitIndex = N
fn leaderStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.leaderRespond(rpc),
None => {}
}
}
fn leaderRespond(&mut self, rpc: RaftRpc) {
match rpc {
AppendEntriesResponse(aer) => self.leaderAppendEntriesResponse(aer),
_ => {}
}
}
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
fn leaderAppendEntriesResponse(&mut self, rpc: AppendEntriesResponseRpc) {
let followerIndex = self.getServerIndex(rpc.sender);
if rpc.success {
self.nextIndex[followerIndex] = rpc.logIndex;
self.matchIndex[followerIndex] = rpc.logIndex;
} else {
if self.nextIndex[followerIndex] > 0 {
self.nextIndex[followerIndex] -= 1;
}
}
}
// Become a candidate (start election)
fn convertToCandidate(&mut self) {
self.serverType = Candidate;
self.currentTerm += 1;
self.receivedVotes = 1; // vote for self
self.setNewTimeout();
// RequestVote {sender: ServerId, term: int, candidateId: ServerId, lastLogIndex: int, lastLogTerm: int},
let lastLogIndex = (self.log.len() - 1) as int;
let requestVote = RequestVote(RequestVoteRpc {
sender: self.serverId, term: self.currentTerm,
candidateId: self.serverId, lastLogIndex: lastLogIndex,
lastLogTerm: self.log[lastLogIndex].term
});
// Broadcast requestVote to all servers
self.broadcastRpc(requestVote);
}
fn convertToFollower(&mut self) {
self.serverType = Follower;
self.setNewTimeout();
}
fn convertToLeader(&mut self) {
self.serverType = Leader;
self.broadcastHeartbeat();
}
fn broadcastRpc(&mut self, rpc: RaftRpc) {
for &serverId in self.servers.iter() {
if serverId == self.serverId {
continue;
}
self.transport.sendRpc(serverId, &rpc);
}
}
fn broadcastHeartbeat(&mut self) {
// Send an empty appendEntries to all servers
// AppendEntries {sender: ServerId, term: int, leaderId: ServerId, prevLogIndex: int, entries: ~[LogEntry], leaderCommitIndex: int},
let lastLogIndex = (self.log.len() - 1) as int;
let appendEntries = AppendEntries(AppendEntriesRpc {
sender: self.serverId, term: self.currentTerm, leaderId: self.serverId,
prevLogIndex: lastLogIndex, prevLogTerm: self.log[lastLogIndex].term,
entries: ~[], leaderCommitIndex: lastLogIndex
});
self.broadcastRpc(appendEntries);
}
fn getServerIndex(&self, serverId: ServerId) -> int {
for i in range(0,self.servers.len() as int) {
if self.servers[i] == serverId {
return i;
}
}
return -1;
}
fn setNewTimeout(&mut self) {
let val:int = rand::random();
self.electionTimeout = val % 500 + 500;
}
}
fn getLocalAddr(port: u16) -> SocketAddr {
return SocketAddr {ip: Ipv4Addr(127,0,0,1), port: port};
}
fn main() {
let args = os::args();
if args.len()!= 2 {
fail!("usage: {} <port>", args[0]);
}
let port = match from_str::<int>(args[1]) {
Some(val) => val,
None => fail!("usage: {} <port:int>", args[0])
};
let udpTransport = UdpTransport::new();
// config with 5 servers
let servers = vec::from_fn(5, |idx| getLocalAddr(9000 + idx as u16));
let mut server = RaftServer::new(~udpTransport, getLocalAddr(port as u16), servers);
server.run();
}
| {
let fail = RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: false});
if rpc.term < self.currentTerm {
return fail;
}
// if we haven't voted for anything or we voted for candidate
match self.votedFor {
None => {
return self.followerVote(rpc);
},
Some(id) if rpc.candidateId == id => {
return self.followerVote(rpc);
}
_ => {
return fail;
}
}
} | identifier_body |
server.rs | #[feature(struct_variant)];
#[feature(macro_rules)];
use std::io::net::ip::{Ipv4Addr, SocketAddr};
use std::io::net::udp::{UdpSocket, UdpStream};
use std::io::timer;
use udptransport::UdpTransport;
use transport::RaftRpcTransport;
use rpc::{ServerId, LogEntry, AppendEntries, AppendEntriesResponse, RequestVote, RequestVoteResponse, RaftRpc};
use rpc::{AppendEntriesRpc, AppendEntriesResponseRpc, RequestVoteRpc, RequestVoteResponseRpc};
use std::os;
use std::vec;
use std::rand;
#[path="./rust-osc/osc.rs"]
mod osc;
mod udptransport;
mod transport;
mod rpc;
enum ServerType {
Follower,
Candidate,
Leader
}
struct | {
currentTerm: int,
votedFor: Option<ServerId>,
log: ~[LogEntry],
commitIndex: int,
lastApplied: int,
serverType: ServerType,
electionTimeout: int,
receivedVotes: int,
// Leader state:
// for each server, index of the next log entry to send to that
// server, initialized to last log index + 1
nextIndex: ~[int],
// for each server, index of highest log entry known to be
// replicated on server, initialized to 0
matchIndex: ~[int],
// current set of servers
servers: ~[ServerId],
// serverId corresponding to self
serverId: ServerId,
// transport layer to send RPC's over
transport: ~RaftRpcTransport
}
impl RaftServer {
fn new(transport: ~RaftRpcTransport, serverId: ServerId, servers: ~[ServerId]) -> RaftServer {
return RaftServer {
currentTerm: 0,
votedFor: None,
log: ~[],
commitIndex: 0,
lastApplied: 0,
electionTimeout: 0,
receivedVotes: 0,
serverType: Follower,
nextIndex: vec::with_capacity(servers.len()),
matchIndex: vec::with_capacity(servers.len()),
servers: servers,
serverId: serverId,
transport: transport
}
}
fn run(&mut self) {
loop {
match self.serverType {
Candidate => self.candidateStep(),
Follower => self.followerStep(),
Leader => self.leaderStep()
}
}
}
// Act as a candidate
// if votes received from a majority of servers become leader
// if appendentries received from new leader convert to follower
// if election times out try again
fn candidateStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.candidateRespond(rpc),
None => {}
}
if self.receivedVotes > (self.servers.len()/2) as int {
self.convertToLeader();
}
}
// Respond as a candidate to a given RPC
// RequestVoteResponse with success means we get a vote :D
// AppendEntries with term >= our term means we lost T_T
fn candidateRespond(&mut self, rpc: RaftRpc) {
match rpc {
RequestVoteResponse(rvr) => self.candidateRequestVoteResponse(rvr),
AppendEntries(ae) => self.candidateAppendEntries(ae),
_ => {}
};
}
fn candidateRequestVoteResponse(&mut self, rpc: RequestVoteResponseRpc) {
if rpc.voteGranted {
// TODO check to see if the server already voted for us this cycle
self.receivedVotes += 1;
}
}
fn candidateAppendEntries(&mut self, rpc: AppendEntriesRpc) {
if rpc.term >= self.currentTerm {
// we lost the election... D:
self.convertToFollower();
}
// pretend we didn't hear them whether or not they won, the resend will occur anyway
}
// Update the server when it is a Follower
// Paper:
// Respond to RPCs from candidates and leaders
// If election timeout elapses without receiving AppendEntries RPC
// or granting vote to candidate: convert to candidate
fn followerStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.followerRespond(rpc),
None => {}
}
self.electionTimeout -= 1;
if self.electionTimeout < 0 {
self.convertToCandidate();
}
}
// Respond to an incoming RPC as a follower
fn followerRespond(&mut self, rpc: RaftRpc) {
let response = match rpc {
AppendEntries(ref ae) => Some(self.followerAppendEntries(ae)),
RequestVote(ref rv) => Some(self.followerRequestVote(rv)),
_ => None
};
match response {
// send response to original rpc sender
Some(responseRpc) => self.transport.sendRpc(rpc.sender(), &responseRpc),
None => {}
}
}
// As a follower, handle an appendEntries RPC
fn followerAppendEntries(&mut self, rpc: &AppendEntriesRpc) -> RaftRpc {
let fail = AppendEntriesResponse(AppendEntriesResponseRpc{sender: self.serverId, term: self.currentTerm, success: false, logIndex: 0});
if rpc.term < self.currentTerm {
return fail;
}
// If log doesn't contain an entry with matching term return false
if rpc.prevLogIndex < self.log.len() as int {
if self.log[rpc.prevLogIndex].term!= rpc.prevLogTerm {
return fail;
}
} else {
return fail;
}
// 3. If an existing entry conflicts with a new one delete the
// existing entry and all that follow it
let startLogIndex = rpc.prevLogIndex+1;
for logOffset in range(0, rpc.entries.len()) {
let logIndex = startLogIndex + logOffset as int;
let entry = rpc.entries[logOffset].clone();
if logIndex < self.log.len() as int {
if self.log[logIndex].term!= entry.term {
// delete it and all following
self.log.truncate(logIndex as uint);
self.log.push(entry);
}
} else {
self.log.push(entry);
}
}
return AppendEntriesResponse(AppendEntriesResponseRpc {
sender: self.serverId, term: self.currentTerm, success: true,
logIndex: (self.log.len() - 1) as int
});
}
// As a follower handle a requestVote rpc
// From paper:
// 1. Reply false if term < currentTerm
// 2. If votedFor is null or candidateId and candidate's log is
// at least as up-to-date as receiver's log, grant vote
fn followerRequestVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc {
let fail = RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: false});
if rpc.term < self.currentTerm {
return fail;
}
// if we haven't voted for anything or we voted for candidate
match self.votedFor {
None => {
return self.followerVote(rpc);
},
Some(id) if rpc.candidateId == id => {
return self.followerVote(rpc);
}
_ => {
return fail;
}
}
}
fn followerVote(&mut self, rpc: &RequestVoteRpc) -> RaftRpc {
// if the candidate's log is at least as up-to-date as ours vote for them
let mut voteGranted = false;
let lastLogIndex = (self.log.len() - 1) as int;
if self.log.len() == 0 || (rpc.lastLogIndex >= lastLogIndex &&
rpc.lastLogTerm >= self.log[lastLogIndex].term) {
self.votedFor = Some(rpc.candidateId);
voteGranted = true
}
return RequestVoteResponse(RequestVoteResponseRpc {sender: self.serverId, term: self.currentTerm, voteGranted: voteGranted});
}
// Update as a leader
// Paper:
// If last log index > nextIndex for a follower send AppendEntries RPC with log entries starting at nextIndex
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
// If there exists an N such that N > commitIndex, a majority of matchIndex >= N and log[N].term == currentTerm
// set commitIndex = N
fn leaderStep(&mut self) {
match self.transport.readIncoming() {
Some(rpc) => self.leaderRespond(rpc),
None => {}
}
}
fn leaderRespond(&mut self, rpc: RaftRpc) {
match rpc {
AppendEntriesResponse(aer) => self.leaderAppendEntriesResponse(aer),
_ => {}
}
}
// If a successful appendEntries is received update nextIndex and matchIndex of follower
// Otherwise decrement nextIndex of follower and retry
fn leaderAppendEntriesResponse(&mut self, rpc: AppendEntriesResponseRpc) {
let followerIndex = self.getServerIndex(rpc.sender);
if rpc.success {
self.nextIndex[followerIndex] = rpc.logIndex;
self.matchIndex[followerIndex] = rpc.logIndex;
} else {
if self.nextIndex[followerIndex] > 0 {
self.nextIndex[followerIndex] -= 1;
}
}
}
// Become a candidate (start election)
fn convertToCandidate(&mut self) {
self.serverType = Candidate;
self.currentTerm += 1;
self.receivedVotes = 1; // vote for self
self.setNewTimeout();
// RequestVote {sender: ServerId, term: int, candidateId: ServerId, lastLogIndex: int, lastLogTerm: int},
let lastLogIndex = (self.log.len() - 1) as int;
let requestVote = RequestVote(RequestVoteRpc {
sender: self.serverId, term: self.currentTerm,
candidateId: self.serverId, lastLogIndex: lastLogIndex,
lastLogTerm: self.log[lastLogIndex].term
});
// Broadcast requestVote to all servers
self.broadcastRpc(requestVote);
}
fn convertToFollower(&mut self) {
self.serverType = Follower;
self.setNewTimeout();
}
fn convertToLeader(&mut self) {
self.serverType = Leader;
self.broadcastHeartbeat();
}
fn broadcastRpc(&mut self, rpc: RaftRpc) {
for &serverId in self.servers.iter() {
if serverId == self.serverId {
continue;
}
self.transport.sendRpc(serverId, &rpc);
}
}
fn broadcastHeartbeat(&mut self) {
// Send an empty appendEntries to all servers
// AppendEntries {sender: ServerId, term: int, leaderId: ServerId, prevLogIndex: int, entries: ~[LogEntry], leaderCommitIndex: int},
let lastLogIndex = (self.log.len() - 1) as int;
let appendEntries = AppendEntries(AppendEntriesRpc {
sender: self.serverId, term: self.currentTerm, leaderId: self.serverId,
prevLogIndex: lastLogIndex, prevLogTerm: self.log[lastLogIndex].term,
entries: ~[], leaderCommitIndex: lastLogIndex
});
self.broadcastRpc(appendEntries);
}
fn getServerIndex(&self, serverId: ServerId) -> int {
for i in range(0,self.servers.len() as int) {
if self.servers[i] == serverId {
return i;
}
}
return -1;
}
fn setNewTimeout(&mut self) {
let val:int = rand::random();
self.electionTimeout = val % 500 + 500;
}
}
fn getLocalAddr(port: u16) -> SocketAddr {
return SocketAddr {ip: Ipv4Addr(127,0,0,1), port: port};
}
fn main() {
let args = os::args();
if args.len()!= 2 {
fail!("usage: {} <port>", args[0]);
}
let port = match from_str::<int>(args[1]) {
Some(val) => val,
None => fail!("usage: {} <port:int>", args[0])
};
let udpTransport = UdpTransport::new();
// config with 5 servers
let servers = vec::from_fn(5, |idx| getLocalAddr(9000 + idx as u16));
let mut server = RaftServer::new(~udpTransport, getLocalAddr(port as u16), servers);
server.run();
}
| RaftServer | identifier_name |
main.rs | map: HashMap::new(),
}
}
fn add(&mut self, author: Author, commit: Oid) {
self.map
.entry(author)
.or_insert_with(HashSet::new)
.insert(commit);
}
fn iter(&self) -> impl Iterator<Item = (&Author, usize)> {
self.map.iter().map(|(k, v)| (k, v.len()))
}
fn extend(&mut self, other: Self) {
for (author, set) in other.map {
self.map
.entry(author)
.or_insert_with(HashSet::new)
.extend(set);
}
}
#[must_use]
fn difference(&self, other: &AuthorMap) -> AuthorMap {
let mut new = AuthorMap::new();
new.map.reserve(self.map.len());
for (author, set) in self.map.iter() {
if let Some(other_set) = other.map.get(&author) {
let diff: HashSet<_> = set.difference(other_set).cloned().collect();
if!diff.is_empty() {
new.map.insert(author.clone(), diff);
}
} else |
}
new
}
}
fn git(args: &[&str]) -> Result<String, Box<dyn std::error::Error>> {
let mut cmd = Command::new("git");
cmd.args(args);
cmd.stdout(Stdio::piped());
let out = cmd.spawn();
let mut out = match out {
Ok(v) => v,
Err(err) => {
panic!("Failed to spawn command `{:?}`: {:?}", cmd, err);
}
};
let status = out.wait().expect("waited");
if!status.success() {
eprintln!("failed to run `git {:?}`: {:?}", args, status);
return Err(std::io::Error::from(std::io::ErrorKind::Other).into());
}
let mut stdout = Vec::new();
out.stdout.unwrap().read_to_end(&mut stdout).unwrap();
Ok(String::from_utf8_lossy(&stdout).into_owned())
}
lazy_static::lazy_static! {
static ref UPDATED: Mutex<HashSet<String>> = Mutex::new(HashSet::new());
}
fn update_repo(url: &str) -> Result<PathBuf, Box<dyn std::error::Error>> {
let mut slug = url;
let prefix = "https://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "git://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "https://git.chromium.org/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let suffix = ".git";
if slug.ends_with(suffix) {
slug = &slug[..slug.len() - suffix.len()];
}
let path_s = format!("repos/{}", slug);
let path = PathBuf::from(&path_s);
if!UPDATED.lock().unwrap().insert(slug.to_string()) {
return Ok(path);
}
if path.exists() {
if should_update() {
// we know for sure the path_s does *not* contain.git as we strip it, so this is a safe
// temp directory
let tmp = format!("{}.git", path_s);
std::fs::rename(&path, &tmp)?;
git(&[
"clone",
"--bare",
"--dissociate",
"--reference",
&tmp,
&url,
&path_s,
])?;
std::fs::remove_dir_all(&tmp)?;
}
} else {
git(&["clone", "--bare", &url, &path_s])?;
}
Ok(path)
}
fn should_update() -> bool {
std::env::args_os().nth(1).unwrap_or_default() == "--refresh"
}
#[derive(Clone)]
pub struct VersionTag {
name: String,
version: Version,
raw_tag: String,
commit: Oid,
in_progress: bool,
}
impl fmt::Display for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
impl std::hash::Hash for VersionTag {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.version.hash(state);
}
}
impl cmp::Eq for VersionTag {}
impl cmp::PartialEq for VersionTag {
fn eq(&self, other: &Self) -> bool {
self.version == other.version
}
}
impl cmp::PartialOrd for VersionTag {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(&other))
}
}
impl cmp::Ord for VersionTag {
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.version.cmp(&other.version)
}
}
impl fmt::Debug for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
fn get_versions(repo: &Repository) -> Result<Vec<VersionTag>, Box<dyn std::error::Error>> {
let tags = repo
.tag_names(None)?
.into_iter()
.filter_map(|v| v)
.map(|v| v.to_owned())
.collect::<Vec<_>>();
let mut versions = tags
.iter()
.filter_map(|tag| {
Version::parse(&tag)
.or_else(|_| Version::parse(&format!("{}.0", tag)))
.ok()
.map(|v| VersionTag {
name: format!("Rust {}", v),
version: v,
raw_tag: tag.clone(),
commit: repo
.revparse_single(&tag)
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: false,
})
})
.collect::<Vec<_>>();
versions.sort();
Ok(versions)
}
fn commit_coauthors(commit: &Commit) -> Vec<Author> {
let mut coauthors = vec![];
if let Some(msg) = commit.message_raw() {
lazy_static::lazy_static! {
static ref RE: Regex =
RegexBuilder::new(r"^Co-authored-by: (?P<name>.*) <(?P<email>.*)>")
.case_insensitive(true)
.build()
.unwrap();
}
for line in msg.lines().rev() {
if line.starts_with("Co-authored-by") {
if let Some(caps) = RE.captures(line) {
coauthors.push(Author {
name: caps["name"].to_string(),
email: caps["email"].to_string(),
});
}
}
}
}
coauthors
}
fn build_author_map(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
match build_author_map_(repo, reviewers, mailmap, from, to) {
Ok(o) => Ok(o),
Err(err) => Err(ErrorContext(
format!(
"build_author_map(repo={}, from={:?}, to={:?})",
repo.path().display(),
from,
to
),
err,
))?,
}
}
// Note this is not the bors merge commit of a rollup
fn is_rollup_commit(commit: &Commit) -> bool {
let summary = commit.summary().unwrap_or("");
summary.starts_with("Rollup merge of #")
}
fn parse_bors_reviewer(
reviewers: &Reviewers,
repo: &Repository,
commit: &Commit,
) -> Result<Option<Vec<Author>>, ErrorContext> {
if commit.author().name_bytes()!= b"bors" || commit.committer().name_bytes()!= b"bors" {
if commit.committer().name_bytes()!= b"GitHub" ||!is_rollup_commit(commit) {
return Ok(None);
}
}
// Skip non-merge commits
if commit.parents().count() == 1 {
return Ok(None);
}
let to_author = |list: &str| -> Result<Vec<Author>, ErrorContext> {
list.trim_end_matches('.')
.split(|c| c == ',' || c == '+')
.map(|r| r.trim_start_matches('@'))
.map(|r| r.trim_end_matches('`'))
.map(|r| r.trim())
.filter(|r|!r.is_empty())
.filter(|r| *r!= "<try>")
.inspect(|r| {
if!r.chars().all(|c| {
c.is_alphabetic() || c.is_digit(10) || c == '-' || c == '_' || c == '='
}) {
eprintln!(
"warning: to_author for {} contained non-alphabetic characters: {:?}",
commit.id(),
r
);
}
})
.map(|r| {
reviewers.to_author(r).map_err(|e| {
ErrorContext(
format!("reviewer: {:?}, commit: {}", r, commit.id()),
e.into(),
)
})
})
.flat_map(|r| r.transpose())
.collect::<Result<Vec<_>, ErrorContext>>()
};
let message = commit.message().unwrap_or("");
let mut reviewers = if let Some(line) = message.lines().find(|l| l.contains(" r=")) {
let start = line.find("r=").unwrap() + 2;
let end = line[start..]
.find(' ')
.map(|pos| pos + start)
.unwrap_or(line.len());
to_author(&line[start..end])?
} else if let Some(line) = message.lines().find(|l| l.starts_with("Reviewed-by: ")) {
let line = &line["Reviewed-by: ".len()..];
to_author(&line)?
} else {
// old bors didn't include r=
if message!= "automated merge\n" {
panic!(
"expected reviewer for bors merge commit {} in {:?}, message: {:?}",
commit.id(),
repo.path(),
message
);
}
return Ok(None);
};
reviewers.sort();
reviewers.dedup();
Ok(Some(reviewers))
}
fn build_author_map_(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let mut walker = repo.revwalk()?;
if repo.revparse_single(to).is_err() {
// If a commit is not found, try fetching it.
git(&[
"--git-dir",
repo.path().to_str().unwrap(),
"fetch",
"origin",
to,
])?;
}
if from == "" {
let to = repo.revparse_single(to)?.peel_to_commit()?.id();
walker.push(to)?;
} else {
walker.push_range(&format!("{}..{}", from, to))?;
}
let mut author_map = AuthorMap::new();
for oid in walker {
let oid = oid?;
let commit = repo.find_commit(oid)?;
let mut commit_authors = Vec::new();
if!is_rollup_commit(&commit) {
// We ignore the author of rollup-merge commits, and account for
// that author once by counting the reviewer of all bors merges. For
// rollups, we consider that this is the most relevant person, which
// is usually the case.
//
// Otherwise, a single rollup with N PRs attributes N commits to the author of the
// rollup, which isn't fair.
commit_authors.push(Author::from_sig(commit.author()));
}
match parse_bors_reviewer(&reviewers, &repo, &commit) {
Ok(Some(reviewers)) => commit_authors.extend(reviewers),
Ok(None) => {}
Err(ErrorContext(msg, e)) => {
if e.is::<reviewers::UnknownReviewer>() {
eprintln!("Unknown reviewer: {}", ErrorContext(msg, e));
} else {
return Err(ErrorContext(msg, e).into());
}
}
}
commit_authors.extend(commit_coauthors(&commit));
for author in commit_authors {
let author = mailmap.canonicalize(&author);
author_map.add(author, oid);
}
}
Ok(author_map)
}
fn mailmap_from_repo(repo: &git2::Repository) -> Result<Mailmap, Box<dyn std::error::Error>> {
let file = String::from_utf8(
repo.revparse_single("master")?
.peel_to_commit()?
.tree()?
.get_name(".mailmap")
.unwrap()
.to_object(&repo)?
.peel_to_blob()?
.content()
.into(),
)?;
Mailmap::from_string(file)
}
fn up_to_release(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
to: &VersionTag,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let to_commit = repo.find_commit(to.commit).map_err(|e| {
ErrorContext(
format!(
"find_commit: repo={}, commit={}",
repo.path().display(),
to.commit
),
Box::new(e),
)
})?;
let modules = get_submodules(&repo, &to_commit)?;
let mut author_map = build_author_map(&repo, &reviewers, &mailmap, "", &to.raw_tag)
.map_err(|e| ErrorContext(format!("Up to {}", to), e))?;
for module in &modules {
if let Ok(path) = update_repo(&module.repository) {
let subrepo = Repository::open(&path)?;
let submap = build_author_map(
&subrepo,
&reviewers,
&mailmap,
"",
&module.commit.to_string(),
)?;
author_map.extend(submap);
}
}
Ok(author_map)
}
fn generate_thanks() -> Result<BTreeMap<VersionTag, AuthorMap>, Box<dyn std::error::Error>> {
let path = update_repo("https://github.com/rust-lang/rust.git")?;
let repo = git2::Repository::open(&path)?;
let mailmap = mailmap_from_repo(&repo)?;
let reviewers = Reviewers::new()?;
let mut versions = get_versions(&repo)?;
let last_full_stable = versions
.iter()
.rfind(|v| v.raw_tag.ends_with(".0"))
.unwrap()
.version
.clone();
versions.push(VersionTag {
name: String::from("Beta"),
version: {
let mut last = last_full_stable.clone();
last.minor += 1;
last
},
raw_tag: String::from("beta"),
commit: repo
.revparse_single("beta")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
versions.push(VersionTag {
name: String::from("Master"),
version: {
// master is plus 1 minor versions off of beta, which we just pushed
let mut last = last_full_stable.clone();
last.minor += 2;
last
},
raw_tag: String::from("master"),
commit: repo
.revparse_single("master")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
let mut version_map = BTreeMap::new();
let mut cache = HashMap::new();
for (idx, version) in versions.iter().enumerate() {
let previous = if let Some(v) = idx.checked_sub(1).map(|idx| &versions[idx]) {
v
} else {
let author_map = build_author_map(&repo, &reviewers, &mailmap, "", &version.raw_tag)?;
version_map.insert(version.clone(), author_map);
continue;
};
eprintln!("Processing {:?} to {:?}", previous, version);
cache.insert(
version,
up_to_release(&repo, &reviewers, &mailmap, &version)?,
);
let previous = match cache.remove(&previous) {
Some(v) => v,
None => up_to_release(&repo, &reviewers, &mailmap, &previous)?,
};
let current = cache.get(&version).unwrap();
// Remove commits reachable from the previous release.
let only_current = current.difference(&previous);
version_map.insert(version.clone(), only_current);
}
Ok(version_map)
}
fn run() -> Result<(), Box<dyn std::error::Error>> {
let by_version = generate_thanks()?;
let mut all_time = by_version.values().next().unwrap().clone();
for map in by_version.values().skip(1) {
all_time.extend(map.clone());
}
site::render(by_version, all_time)?;
Ok(())
}
fn main() {
if let Err(err) = run() {
eprintln!("Error: {}", err);
let mut cur = &*err;
while let Some(cause) = cur.source() {
eprintln!("\tcaused by: {}", cause);
cur = cause;
}
std::mem::drop(cur);
std::process::exit(1);
}
}
#[derive(Debug)]
struct Submodule {
path: PathBuf,
commit: Oid,
// url
repository: String,
}
fn get_submodules(
repo: &Repository,
at: &Commit,
) -> Result<Vec<Submodule>, Box<dyn std::error::Error>> {
let submodule_cfg = modules_file(&repo, &at)?;
let submodule_cfg = Config::parse(&submodule_cfg)?;
let mut path_to_url = HashMap::new();
let entries = submodule_cfg.entries(None)?;
for entry in &entries {
let entry = entry?;
| {
new.map.insert(author.clone(), set.clone());
} | conditional_block |
main.rs | from(std::io::ErrorKind::Other).into());
}
let mut stdout = Vec::new();
out.stdout.unwrap().read_to_end(&mut stdout).unwrap();
Ok(String::from_utf8_lossy(&stdout).into_owned())
}
lazy_static::lazy_static! {
static ref UPDATED: Mutex<HashSet<String>> = Mutex::new(HashSet::new());
}
fn update_repo(url: &str) -> Result<PathBuf, Box<dyn std::error::Error>> {
let mut slug = url;
let prefix = "https://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "git://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "https://git.chromium.org/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let suffix = ".git";
if slug.ends_with(suffix) {
slug = &slug[..slug.len() - suffix.len()];
}
let path_s = format!("repos/{}", slug);
let path = PathBuf::from(&path_s);
if!UPDATED.lock().unwrap().insert(slug.to_string()) {
return Ok(path);
}
if path.exists() {
if should_update() {
// we know for sure the path_s does *not* contain.git as we strip it, so this is a safe
// temp directory
let tmp = format!("{}.git", path_s);
std::fs::rename(&path, &tmp)?;
git(&[
"clone",
"--bare",
"--dissociate",
"--reference",
&tmp,
&url,
&path_s,
])?;
std::fs::remove_dir_all(&tmp)?;
}
} else {
git(&["clone", "--bare", &url, &path_s])?;
}
Ok(path)
}
fn should_update() -> bool {
std::env::args_os().nth(1).unwrap_or_default() == "--refresh"
}
#[derive(Clone)]
pub struct VersionTag {
name: String,
version: Version,
raw_tag: String,
commit: Oid,
in_progress: bool,
}
impl fmt::Display for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
impl std::hash::Hash for VersionTag {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.version.hash(state);
}
}
impl cmp::Eq for VersionTag {}
impl cmp::PartialEq for VersionTag {
fn eq(&self, other: &Self) -> bool {
self.version == other.version
}
}
impl cmp::PartialOrd for VersionTag {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(&other))
}
}
impl cmp::Ord for VersionTag {
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.version.cmp(&other.version)
}
}
impl fmt::Debug for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
fn get_versions(repo: &Repository) -> Result<Vec<VersionTag>, Box<dyn std::error::Error>> {
let tags = repo
.tag_names(None)?
.into_iter()
.filter_map(|v| v)
.map(|v| v.to_owned())
.collect::<Vec<_>>();
let mut versions = tags
.iter()
.filter_map(|tag| {
Version::parse(&tag)
.or_else(|_| Version::parse(&format!("{}.0", tag)))
.ok()
.map(|v| VersionTag {
name: format!("Rust {}", v),
version: v,
raw_tag: tag.clone(),
commit: repo
.revparse_single(&tag)
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: false,
})
})
.collect::<Vec<_>>();
versions.sort();
Ok(versions)
}
fn commit_coauthors(commit: &Commit) -> Vec<Author> {
let mut coauthors = vec![];
if let Some(msg) = commit.message_raw() {
lazy_static::lazy_static! {
static ref RE: Regex =
RegexBuilder::new(r"^Co-authored-by: (?P<name>.*) <(?P<email>.*)>")
.case_insensitive(true)
.build()
.unwrap();
}
for line in msg.lines().rev() {
if line.starts_with("Co-authored-by") {
if let Some(caps) = RE.captures(line) {
coauthors.push(Author {
name: caps["name"].to_string(),
email: caps["email"].to_string(),
});
}
}
}
}
coauthors
}
fn build_author_map(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
match build_author_map_(repo, reviewers, mailmap, from, to) {
Ok(o) => Ok(o),
Err(err) => Err(ErrorContext(
format!(
"build_author_map(repo={}, from={:?}, to={:?})",
repo.path().display(),
from,
to
),
err,
))?,
}
}
// Note this is not the bors merge commit of a rollup
fn is_rollup_commit(commit: &Commit) -> bool {
let summary = commit.summary().unwrap_or("");
summary.starts_with("Rollup merge of #")
}
fn parse_bors_reviewer(
reviewers: &Reviewers,
repo: &Repository,
commit: &Commit,
) -> Result<Option<Vec<Author>>, ErrorContext> {
if commit.author().name_bytes()!= b"bors" || commit.committer().name_bytes()!= b"bors" {
if commit.committer().name_bytes()!= b"GitHub" ||!is_rollup_commit(commit) {
return Ok(None);
}
}
// Skip non-merge commits
if commit.parents().count() == 1 {
return Ok(None);
}
let to_author = |list: &str| -> Result<Vec<Author>, ErrorContext> {
list.trim_end_matches('.')
.split(|c| c == ',' || c == '+')
.map(|r| r.trim_start_matches('@'))
.map(|r| r.trim_end_matches('`'))
.map(|r| r.trim())
.filter(|r|!r.is_empty())
.filter(|r| *r!= "<try>")
.inspect(|r| {
if!r.chars().all(|c| {
c.is_alphabetic() || c.is_digit(10) || c == '-' || c == '_' || c == '='
}) {
eprintln!(
"warning: to_author for {} contained non-alphabetic characters: {:?}",
commit.id(),
r
);
}
})
.map(|r| {
reviewers.to_author(r).map_err(|e| {
ErrorContext(
format!("reviewer: {:?}, commit: {}", r, commit.id()),
e.into(),
)
})
})
.flat_map(|r| r.transpose())
.collect::<Result<Vec<_>, ErrorContext>>()
};
let message = commit.message().unwrap_or("");
let mut reviewers = if let Some(line) = message.lines().find(|l| l.contains(" r=")) {
let start = line.find("r=").unwrap() + 2;
let end = line[start..]
.find(' ')
.map(|pos| pos + start)
.unwrap_or(line.len());
to_author(&line[start..end])?
} else if let Some(line) = message.lines().find(|l| l.starts_with("Reviewed-by: ")) {
let line = &line["Reviewed-by: ".len()..];
to_author(&line)?
} else {
// old bors didn't include r=
if message!= "automated merge\n" {
panic!(
"expected reviewer for bors merge commit {} in {:?}, message: {:?}",
commit.id(),
repo.path(),
message
);
}
return Ok(None);
};
reviewers.sort();
reviewers.dedup();
Ok(Some(reviewers))
}
fn build_author_map_(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let mut walker = repo.revwalk()?;
if repo.revparse_single(to).is_err() {
// If a commit is not found, try fetching it.
git(&[
"--git-dir",
repo.path().to_str().unwrap(),
"fetch",
"origin",
to,
])?;
}
if from == "" {
let to = repo.revparse_single(to)?.peel_to_commit()?.id();
walker.push(to)?;
} else {
walker.push_range(&format!("{}..{}", from, to))?;
}
let mut author_map = AuthorMap::new();
for oid in walker {
let oid = oid?;
let commit = repo.find_commit(oid)?;
let mut commit_authors = Vec::new();
if!is_rollup_commit(&commit) {
// We ignore the author of rollup-merge commits, and account for
// that author once by counting the reviewer of all bors merges. For
// rollups, we consider that this is the most relevant person, which
// is usually the case.
//
// Otherwise, a single rollup with N PRs attributes N commits to the author of the
// rollup, which isn't fair.
commit_authors.push(Author::from_sig(commit.author()));
}
match parse_bors_reviewer(&reviewers, &repo, &commit) {
Ok(Some(reviewers)) => commit_authors.extend(reviewers),
Ok(None) => {}
Err(ErrorContext(msg, e)) => {
if e.is::<reviewers::UnknownReviewer>() {
eprintln!("Unknown reviewer: {}", ErrorContext(msg, e));
} else {
return Err(ErrorContext(msg, e).into());
}
}
}
commit_authors.extend(commit_coauthors(&commit));
for author in commit_authors {
let author = mailmap.canonicalize(&author);
author_map.add(author, oid);
}
}
Ok(author_map)
}
fn mailmap_from_repo(repo: &git2::Repository) -> Result<Mailmap, Box<dyn std::error::Error>> {
let file = String::from_utf8(
repo.revparse_single("master")?
.peel_to_commit()?
.tree()?
.get_name(".mailmap")
.unwrap()
.to_object(&repo)?
.peel_to_blob()?
.content()
.into(),
)?;
Mailmap::from_string(file)
}
fn up_to_release(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
to: &VersionTag,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let to_commit = repo.find_commit(to.commit).map_err(|e| {
ErrorContext(
format!(
"find_commit: repo={}, commit={}",
repo.path().display(),
to.commit
),
Box::new(e),
)
})?;
let modules = get_submodules(&repo, &to_commit)?;
let mut author_map = build_author_map(&repo, &reviewers, &mailmap, "", &to.raw_tag)
.map_err(|e| ErrorContext(format!("Up to {}", to), e))?;
for module in &modules {
if let Ok(path) = update_repo(&module.repository) {
let subrepo = Repository::open(&path)?;
let submap = build_author_map(
&subrepo,
&reviewers,
&mailmap,
"",
&module.commit.to_string(),
)?;
author_map.extend(submap);
}
}
Ok(author_map)
}
fn generate_thanks() -> Result<BTreeMap<VersionTag, AuthorMap>, Box<dyn std::error::Error>> {
let path = update_repo("https://github.com/rust-lang/rust.git")?;
let repo = git2::Repository::open(&path)?;
let mailmap = mailmap_from_repo(&repo)?;
let reviewers = Reviewers::new()?;
let mut versions = get_versions(&repo)?;
let last_full_stable = versions
.iter()
.rfind(|v| v.raw_tag.ends_with(".0"))
.unwrap()
.version
.clone();
versions.push(VersionTag {
name: String::from("Beta"),
version: {
let mut last = last_full_stable.clone();
last.minor += 1;
last
},
raw_tag: String::from("beta"),
commit: repo
.revparse_single("beta")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
versions.push(VersionTag {
name: String::from("Master"),
version: {
// master is plus 1 minor versions off of beta, which we just pushed
let mut last = last_full_stable.clone();
last.minor += 2;
last
},
raw_tag: String::from("master"),
commit: repo
.revparse_single("master")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
let mut version_map = BTreeMap::new();
let mut cache = HashMap::new();
for (idx, version) in versions.iter().enumerate() {
let previous = if let Some(v) = idx.checked_sub(1).map(|idx| &versions[idx]) {
v
} else {
let author_map = build_author_map(&repo, &reviewers, &mailmap, "", &version.raw_tag)?;
version_map.insert(version.clone(), author_map);
continue;
};
eprintln!("Processing {:?} to {:?}", previous, version);
cache.insert(
version,
up_to_release(&repo, &reviewers, &mailmap, &version)?,
);
let previous = match cache.remove(&previous) {
Some(v) => v,
None => up_to_release(&repo, &reviewers, &mailmap, &previous)?,
};
let current = cache.get(&version).unwrap();
// Remove commits reachable from the previous release.
let only_current = current.difference(&previous);
version_map.insert(version.clone(), only_current);
}
Ok(version_map)
}
fn run() -> Result<(), Box<dyn std::error::Error>> {
let by_version = generate_thanks()?;
let mut all_time = by_version.values().next().unwrap().clone();
for map in by_version.values().skip(1) {
all_time.extend(map.clone());
}
site::render(by_version, all_time)?;
Ok(())
}
fn main() {
if let Err(err) = run() {
eprintln!("Error: {}", err);
let mut cur = &*err;
while let Some(cause) = cur.source() {
eprintln!("\tcaused by: {}", cause);
cur = cause;
}
std::mem::drop(cur);
std::process::exit(1);
}
}
#[derive(Debug)]
struct Submodule {
path: PathBuf,
commit: Oid,
// url
repository: String,
}
fn get_submodules(
repo: &Repository,
at: &Commit,
) -> Result<Vec<Submodule>, Box<dyn std::error::Error>> {
let submodule_cfg = modules_file(&repo, &at)?;
let submodule_cfg = Config::parse(&submodule_cfg)?;
let mut path_to_url = HashMap::new();
let entries = submodule_cfg.entries(None)?;
for entry in &entries {
let entry = entry?;
let name = entry.name().unwrap();
if name.ends_with(".path") {
let url = name.replace(".path", ".url");
let url = submodule_cfg.get_string(&url).unwrap();
path_to_url.insert(entry.value().unwrap().to_owned(), url);
}
}
let mut submodules = Vec::new();
let tree = at.tree()?;
for (path, url) in &path_to_url {
let path = Path::new(&path);
let entry = tree.get_path(&path);
// the submodule may not actually exist
let entry = match entry {
Ok(e) => e,
Err(_) => continue,
};
assert_eq!(entry.kind().unwrap(), git2::ObjectType::Commit);
submodules.push(Submodule {
path: path.to_owned(),
commit: entry.id(),
repository: url.to_owned(),
});
}
submodules.retain(|s| {
let is_rust =
s.repository.contains("rust-lang") || s.repository.contains("rust-lang-nursery");
let exclude = vec![
"https://github.com/rust-lang/llvm.git",
"https://github.com/rust-lang/llvm-project.git",
"https://github.com/rust-lang/lld.git",
"https://github.com/rust-lang-nursery/clang.git",
"https://github.com/rust-lang-nursery/lldb.git",
"https://github.com/rust-lang/libuv.git",
"https://github.com/rust-lang/gyp.git",
"https://github.com/rust-lang/jemalloc.git",
"https://github.com/rust-lang/compiler-rt.git",
"https://github.com/rust-lang/hoedown.git",
];
is_rust
&&!exclude.contains(&s.repository.as_str())
&&!exclude.contains(&&*format!("{}.git", s.repository))
});
Ok(submodules)
}
fn | modules_file | identifier_name |
|
main.rs | map: HashMap::new(),
}
}
fn add(&mut self, author: Author, commit: Oid) { | .or_insert_with(HashSet::new)
.insert(commit);
}
fn iter(&self) -> impl Iterator<Item = (&Author, usize)> {
self.map.iter().map(|(k, v)| (k, v.len()))
}
fn extend(&mut self, other: Self) {
for (author, set) in other.map {
self.map
.entry(author)
.or_insert_with(HashSet::new)
.extend(set);
}
}
#[must_use]
fn difference(&self, other: &AuthorMap) -> AuthorMap {
let mut new = AuthorMap::new();
new.map.reserve(self.map.len());
for (author, set) in self.map.iter() {
if let Some(other_set) = other.map.get(&author) {
let diff: HashSet<_> = set.difference(other_set).cloned().collect();
if!diff.is_empty() {
new.map.insert(author.clone(), diff);
}
} else {
new.map.insert(author.clone(), set.clone());
}
}
new
}
}
fn git(args: &[&str]) -> Result<String, Box<dyn std::error::Error>> {
let mut cmd = Command::new("git");
cmd.args(args);
cmd.stdout(Stdio::piped());
let out = cmd.spawn();
let mut out = match out {
Ok(v) => v,
Err(err) => {
panic!("Failed to spawn command `{:?}`: {:?}", cmd, err);
}
};
let status = out.wait().expect("waited");
if!status.success() {
eprintln!("failed to run `git {:?}`: {:?}", args, status);
return Err(std::io::Error::from(std::io::ErrorKind::Other).into());
}
let mut stdout = Vec::new();
out.stdout.unwrap().read_to_end(&mut stdout).unwrap();
Ok(String::from_utf8_lossy(&stdout).into_owned())
}
lazy_static::lazy_static! {
static ref UPDATED: Mutex<HashSet<String>> = Mutex::new(HashSet::new());
}
fn update_repo(url: &str) -> Result<PathBuf, Box<dyn std::error::Error>> {
let mut slug = url;
let prefix = "https://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "git://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "https://git.chromium.org/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let suffix = ".git";
if slug.ends_with(suffix) {
slug = &slug[..slug.len() - suffix.len()];
}
let path_s = format!("repos/{}", slug);
let path = PathBuf::from(&path_s);
if!UPDATED.lock().unwrap().insert(slug.to_string()) {
return Ok(path);
}
if path.exists() {
if should_update() {
// we know for sure the path_s does *not* contain.git as we strip it, so this is a safe
// temp directory
let tmp = format!("{}.git", path_s);
std::fs::rename(&path, &tmp)?;
git(&[
"clone",
"--bare",
"--dissociate",
"--reference",
&tmp,
&url,
&path_s,
])?;
std::fs::remove_dir_all(&tmp)?;
}
} else {
git(&["clone", "--bare", &url, &path_s])?;
}
Ok(path)
}
fn should_update() -> bool {
std::env::args_os().nth(1).unwrap_or_default() == "--refresh"
}
#[derive(Clone)]
pub struct VersionTag {
name: String,
version: Version,
raw_tag: String,
commit: Oid,
in_progress: bool,
}
impl fmt::Display for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
impl std::hash::Hash for VersionTag {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.version.hash(state);
}
}
impl cmp::Eq for VersionTag {}
impl cmp::PartialEq for VersionTag {
fn eq(&self, other: &Self) -> bool {
self.version == other.version
}
}
impl cmp::PartialOrd for VersionTag {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(&other))
}
}
impl cmp::Ord for VersionTag {
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.version.cmp(&other.version)
}
}
impl fmt::Debug for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
fn get_versions(repo: &Repository) -> Result<Vec<VersionTag>, Box<dyn std::error::Error>> {
let tags = repo
.tag_names(None)?
.into_iter()
.filter_map(|v| v)
.map(|v| v.to_owned())
.collect::<Vec<_>>();
let mut versions = tags
.iter()
.filter_map(|tag| {
Version::parse(&tag)
.or_else(|_| Version::parse(&format!("{}.0", tag)))
.ok()
.map(|v| VersionTag {
name: format!("Rust {}", v),
version: v,
raw_tag: tag.clone(),
commit: repo
.revparse_single(&tag)
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: false,
})
})
.collect::<Vec<_>>();
versions.sort();
Ok(versions)
}
fn commit_coauthors(commit: &Commit) -> Vec<Author> {
let mut coauthors = vec![];
if let Some(msg) = commit.message_raw() {
lazy_static::lazy_static! {
static ref RE: Regex =
RegexBuilder::new(r"^Co-authored-by: (?P<name>.*) <(?P<email>.*)>")
.case_insensitive(true)
.build()
.unwrap();
}
for line in msg.lines().rev() {
if line.starts_with("Co-authored-by") {
if let Some(caps) = RE.captures(line) {
coauthors.push(Author {
name: caps["name"].to_string(),
email: caps["email"].to_string(),
});
}
}
}
}
coauthors
}
fn build_author_map(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
match build_author_map_(repo, reviewers, mailmap, from, to) {
Ok(o) => Ok(o),
Err(err) => Err(ErrorContext(
format!(
"build_author_map(repo={}, from={:?}, to={:?})",
repo.path().display(),
from,
to
),
err,
))?,
}
}
// Note this is not the bors merge commit of a rollup
fn is_rollup_commit(commit: &Commit) -> bool {
let summary = commit.summary().unwrap_or("");
summary.starts_with("Rollup merge of #")
}
fn parse_bors_reviewer(
reviewers: &Reviewers,
repo: &Repository,
commit: &Commit,
) -> Result<Option<Vec<Author>>, ErrorContext> {
if commit.author().name_bytes()!= b"bors" || commit.committer().name_bytes()!= b"bors" {
if commit.committer().name_bytes()!= b"GitHub" ||!is_rollup_commit(commit) {
return Ok(None);
}
}
// Skip non-merge commits
if commit.parents().count() == 1 {
return Ok(None);
}
let to_author = |list: &str| -> Result<Vec<Author>, ErrorContext> {
list.trim_end_matches('.')
.split(|c| c == ',' || c == '+')
.map(|r| r.trim_start_matches('@'))
.map(|r| r.trim_end_matches('`'))
.map(|r| r.trim())
.filter(|r|!r.is_empty())
.filter(|r| *r!= "<try>")
.inspect(|r| {
if!r.chars().all(|c| {
c.is_alphabetic() || c.is_digit(10) || c == '-' || c == '_' || c == '='
}) {
eprintln!(
"warning: to_author for {} contained non-alphabetic characters: {:?}",
commit.id(),
r
);
}
})
.map(|r| {
reviewers.to_author(r).map_err(|e| {
ErrorContext(
format!("reviewer: {:?}, commit: {}", r, commit.id()),
e.into(),
)
})
})
.flat_map(|r| r.transpose())
.collect::<Result<Vec<_>, ErrorContext>>()
};
let message = commit.message().unwrap_or("");
let mut reviewers = if let Some(line) = message.lines().find(|l| l.contains(" r=")) {
let start = line.find("r=").unwrap() + 2;
let end = line[start..]
.find(' ')
.map(|pos| pos + start)
.unwrap_or(line.len());
to_author(&line[start..end])?
} else if let Some(line) = message.lines().find(|l| l.starts_with("Reviewed-by: ")) {
let line = &line["Reviewed-by: ".len()..];
to_author(&line)?
} else {
// old bors didn't include r=
if message!= "automated merge\n" {
panic!(
"expected reviewer for bors merge commit {} in {:?}, message: {:?}",
commit.id(),
repo.path(),
message
);
}
return Ok(None);
};
reviewers.sort();
reviewers.dedup();
Ok(Some(reviewers))
}
fn build_author_map_(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let mut walker = repo.revwalk()?;
if repo.revparse_single(to).is_err() {
// If a commit is not found, try fetching it.
git(&[
"--git-dir",
repo.path().to_str().unwrap(),
"fetch",
"origin",
to,
])?;
}
if from == "" {
let to = repo.revparse_single(to)?.peel_to_commit()?.id();
walker.push(to)?;
} else {
walker.push_range(&format!("{}..{}", from, to))?;
}
let mut author_map = AuthorMap::new();
for oid in walker {
let oid = oid?;
let commit = repo.find_commit(oid)?;
let mut commit_authors = Vec::new();
if!is_rollup_commit(&commit) {
// We ignore the author of rollup-merge commits, and account for
// that author once by counting the reviewer of all bors merges. For
// rollups, we consider that this is the most relevant person, which
// is usually the case.
//
// Otherwise, a single rollup with N PRs attributes N commits to the author of the
// rollup, which isn't fair.
commit_authors.push(Author::from_sig(commit.author()));
}
match parse_bors_reviewer(&reviewers, &repo, &commit) {
Ok(Some(reviewers)) => commit_authors.extend(reviewers),
Ok(None) => {}
Err(ErrorContext(msg, e)) => {
if e.is::<reviewers::UnknownReviewer>() {
eprintln!("Unknown reviewer: {}", ErrorContext(msg, e));
} else {
return Err(ErrorContext(msg, e).into());
}
}
}
commit_authors.extend(commit_coauthors(&commit));
for author in commit_authors {
let author = mailmap.canonicalize(&author);
author_map.add(author, oid);
}
}
Ok(author_map)
}
fn mailmap_from_repo(repo: &git2::Repository) -> Result<Mailmap, Box<dyn std::error::Error>> {
let file = String::from_utf8(
repo.revparse_single("master")?
.peel_to_commit()?
.tree()?
.get_name(".mailmap")
.unwrap()
.to_object(&repo)?
.peel_to_blob()?
.content()
.into(),
)?;
Mailmap::from_string(file)
}
fn up_to_release(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
to: &VersionTag,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let to_commit = repo.find_commit(to.commit).map_err(|e| {
ErrorContext(
format!(
"find_commit: repo={}, commit={}",
repo.path().display(),
to.commit
),
Box::new(e),
)
})?;
let modules = get_submodules(&repo, &to_commit)?;
let mut author_map = build_author_map(&repo, &reviewers, &mailmap, "", &to.raw_tag)
.map_err(|e| ErrorContext(format!("Up to {}", to), e))?;
for module in &modules {
if let Ok(path) = update_repo(&module.repository) {
let subrepo = Repository::open(&path)?;
let submap = build_author_map(
&subrepo,
&reviewers,
&mailmap,
"",
&module.commit.to_string(),
)?;
author_map.extend(submap);
}
}
Ok(author_map)
}
fn generate_thanks() -> Result<BTreeMap<VersionTag, AuthorMap>, Box<dyn std::error::Error>> {
let path = update_repo("https://github.com/rust-lang/rust.git")?;
let repo = git2::Repository::open(&path)?;
let mailmap = mailmap_from_repo(&repo)?;
let reviewers = Reviewers::new()?;
let mut versions = get_versions(&repo)?;
let last_full_stable = versions
.iter()
.rfind(|v| v.raw_tag.ends_with(".0"))
.unwrap()
.version
.clone();
versions.push(VersionTag {
name: String::from("Beta"),
version: {
let mut last = last_full_stable.clone();
last.minor += 1;
last
},
raw_tag: String::from("beta"),
commit: repo
.revparse_single("beta")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
versions.push(VersionTag {
name: String::from("Master"),
version: {
// master is plus 1 minor versions off of beta, which we just pushed
let mut last = last_full_stable.clone();
last.minor += 2;
last
},
raw_tag: String::from("master"),
commit: repo
.revparse_single("master")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
let mut version_map = BTreeMap::new();
let mut cache = HashMap::new();
for (idx, version) in versions.iter().enumerate() {
let previous = if let Some(v) = idx.checked_sub(1).map(|idx| &versions[idx]) {
v
} else {
let author_map = build_author_map(&repo, &reviewers, &mailmap, "", &version.raw_tag)?;
version_map.insert(version.clone(), author_map);
continue;
};
eprintln!("Processing {:?} to {:?}", previous, version);
cache.insert(
version,
up_to_release(&repo, &reviewers, &mailmap, &version)?,
);
let previous = match cache.remove(&previous) {
Some(v) => v,
None => up_to_release(&repo, &reviewers, &mailmap, &previous)?,
};
let current = cache.get(&version).unwrap();
// Remove commits reachable from the previous release.
let only_current = current.difference(&previous);
version_map.insert(version.clone(), only_current);
}
Ok(version_map)
}
fn run() -> Result<(), Box<dyn std::error::Error>> {
let by_version = generate_thanks()?;
let mut all_time = by_version.values().next().unwrap().clone();
for map in by_version.values().skip(1) {
all_time.extend(map.clone());
}
site::render(by_version, all_time)?;
Ok(())
}
fn main() {
if let Err(err) = run() {
eprintln!("Error: {}", err);
let mut cur = &*err;
while let Some(cause) = cur.source() {
eprintln!("\tcaused by: {}", cause);
cur = cause;
}
std::mem::drop(cur);
std::process::exit(1);
}
}
#[derive(Debug)]
struct Submodule {
path: PathBuf,
commit: Oid,
// url
repository: String,
}
fn get_submodules(
repo: &Repository,
at: &Commit,
) -> Result<Vec<Submodule>, Box<dyn std::error::Error>> {
let submodule_cfg = modules_file(&repo, &at)?;
let submodule_cfg = Config::parse(&submodule_cfg)?;
let mut path_to_url = HashMap::new();
let entries = submodule_cfg.entries(None)?;
for entry in &entries {
let entry = entry?;
| self.map
.entry(author) | random_line_split |
main.rs | map: HashMap::new(),
}
}
fn add(&mut self, author: Author, commit: Oid) {
self.map
.entry(author)
.or_insert_with(HashSet::new)
.insert(commit);
}
fn iter(&self) -> impl Iterator<Item = (&Author, usize)> {
self.map.iter().map(|(k, v)| (k, v.len()))
}
fn extend(&mut self, other: Self) {
for (author, set) in other.map {
self.map
.entry(author)
.or_insert_with(HashSet::new)
.extend(set);
}
}
#[must_use]
fn difference(&self, other: &AuthorMap) -> AuthorMap {
let mut new = AuthorMap::new();
new.map.reserve(self.map.len());
for (author, set) in self.map.iter() {
if let Some(other_set) = other.map.get(&author) {
let diff: HashSet<_> = set.difference(other_set).cloned().collect();
if!diff.is_empty() {
new.map.insert(author.clone(), diff);
}
} else {
new.map.insert(author.clone(), set.clone());
}
}
new
}
}
fn git(args: &[&str]) -> Result<String, Box<dyn std::error::Error>> | out.stdout.unwrap().read_to_end(&mut stdout).unwrap();
Ok(String::from_utf8_lossy(&stdout).into_owned())
}
lazy_static::lazy_static! {
static ref UPDATED: Mutex<HashSet<String>> = Mutex::new(HashSet::new());
}
fn update_repo(url: &str) -> Result<PathBuf, Box<dyn std::error::Error>> {
let mut slug = url;
let prefix = "https://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "git://github.com/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let prefix = "https://git.chromium.org/";
if slug.starts_with(prefix) {
slug = &slug[prefix.len()..];
}
let suffix = ".git";
if slug.ends_with(suffix) {
slug = &slug[..slug.len() - suffix.len()];
}
let path_s = format!("repos/{}", slug);
let path = PathBuf::from(&path_s);
if!UPDATED.lock().unwrap().insert(slug.to_string()) {
return Ok(path);
}
if path.exists() {
if should_update() {
// we know for sure the path_s does *not* contain.git as we strip it, so this is a safe
// temp directory
let tmp = format!("{}.git", path_s);
std::fs::rename(&path, &tmp)?;
git(&[
"clone",
"--bare",
"--dissociate",
"--reference",
&tmp,
&url,
&path_s,
])?;
std::fs::remove_dir_all(&tmp)?;
}
} else {
git(&["clone", "--bare", &url, &path_s])?;
}
Ok(path)
}
fn should_update() -> bool {
std::env::args_os().nth(1).unwrap_or_default() == "--refresh"
}
#[derive(Clone)]
pub struct VersionTag {
name: String,
version: Version,
raw_tag: String,
commit: Oid,
in_progress: bool,
}
impl fmt::Display for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
impl std::hash::Hash for VersionTag {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.version.hash(state);
}
}
impl cmp::Eq for VersionTag {}
impl cmp::PartialEq for VersionTag {
fn eq(&self, other: &Self) -> bool {
self.version == other.version
}
}
impl cmp::PartialOrd for VersionTag {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(&other))
}
}
impl cmp::Ord for VersionTag {
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.version.cmp(&other.version)
}
}
impl fmt::Debug for VersionTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.version)
}
}
fn get_versions(repo: &Repository) -> Result<Vec<VersionTag>, Box<dyn std::error::Error>> {
let tags = repo
.tag_names(None)?
.into_iter()
.filter_map(|v| v)
.map(|v| v.to_owned())
.collect::<Vec<_>>();
let mut versions = tags
.iter()
.filter_map(|tag| {
Version::parse(&tag)
.or_else(|_| Version::parse(&format!("{}.0", tag)))
.ok()
.map(|v| VersionTag {
name: format!("Rust {}", v),
version: v,
raw_tag: tag.clone(),
commit: repo
.revparse_single(&tag)
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: false,
})
})
.collect::<Vec<_>>();
versions.sort();
Ok(versions)
}
fn commit_coauthors(commit: &Commit) -> Vec<Author> {
let mut coauthors = vec![];
if let Some(msg) = commit.message_raw() {
lazy_static::lazy_static! {
static ref RE: Regex =
RegexBuilder::new(r"^Co-authored-by: (?P<name>.*) <(?P<email>.*)>")
.case_insensitive(true)
.build()
.unwrap();
}
for line in msg.lines().rev() {
if line.starts_with("Co-authored-by") {
if let Some(caps) = RE.captures(line) {
coauthors.push(Author {
name: caps["name"].to_string(),
email: caps["email"].to_string(),
});
}
}
}
}
coauthors
}
fn build_author_map(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
match build_author_map_(repo, reviewers, mailmap, from, to) {
Ok(o) => Ok(o),
Err(err) => Err(ErrorContext(
format!(
"build_author_map(repo={}, from={:?}, to={:?})",
repo.path().display(),
from,
to
),
err,
))?,
}
}
// Note this is not the bors merge commit of a rollup
fn is_rollup_commit(commit: &Commit) -> bool {
let summary = commit.summary().unwrap_or("");
summary.starts_with("Rollup merge of #")
}
fn parse_bors_reviewer(
reviewers: &Reviewers,
repo: &Repository,
commit: &Commit,
) -> Result<Option<Vec<Author>>, ErrorContext> {
if commit.author().name_bytes()!= b"bors" || commit.committer().name_bytes()!= b"bors" {
if commit.committer().name_bytes()!= b"GitHub" ||!is_rollup_commit(commit) {
return Ok(None);
}
}
// Skip non-merge commits
if commit.parents().count() == 1 {
return Ok(None);
}
let to_author = |list: &str| -> Result<Vec<Author>, ErrorContext> {
list.trim_end_matches('.')
.split(|c| c == ',' || c == '+')
.map(|r| r.trim_start_matches('@'))
.map(|r| r.trim_end_matches('`'))
.map(|r| r.trim())
.filter(|r|!r.is_empty())
.filter(|r| *r!= "<try>")
.inspect(|r| {
if!r.chars().all(|c| {
c.is_alphabetic() || c.is_digit(10) || c == '-' || c == '_' || c == '='
}) {
eprintln!(
"warning: to_author for {} contained non-alphabetic characters: {:?}",
commit.id(),
r
);
}
})
.map(|r| {
reviewers.to_author(r).map_err(|e| {
ErrorContext(
format!("reviewer: {:?}, commit: {}", r, commit.id()),
e.into(),
)
})
})
.flat_map(|r| r.transpose())
.collect::<Result<Vec<_>, ErrorContext>>()
};
let message = commit.message().unwrap_or("");
let mut reviewers = if let Some(line) = message.lines().find(|l| l.contains(" r=")) {
let start = line.find("r=").unwrap() + 2;
let end = line[start..]
.find(' ')
.map(|pos| pos + start)
.unwrap_or(line.len());
to_author(&line[start..end])?
} else if let Some(line) = message.lines().find(|l| l.starts_with("Reviewed-by: ")) {
let line = &line["Reviewed-by: ".len()..];
to_author(&line)?
} else {
// old bors didn't include r=
if message!= "automated merge\n" {
panic!(
"expected reviewer for bors merge commit {} in {:?}, message: {:?}",
commit.id(),
repo.path(),
message
);
}
return Ok(None);
};
reviewers.sort();
reviewers.dedup();
Ok(Some(reviewers))
}
fn build_author_map_(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
from: &str,
to: &str,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let mut walker = repo.revwalk()?;
if repo.revparse_single(to).is_err() {
// If a commit is not found, try fetching it.
git(&[
"--git-dir",
repo.path().to_str().unwrap(),
"fetch",
"origin",
to,
])?;
}
if from == "" {
let to = repo.revparse_single(to)?.peel_to_commit()?.id();
walker.push(to)?;
} else {
walker.push_range(&format!("{}..{}", from, to))?;
}
let mut author_map = AuthorMap::new();
for oid in walker {
let oid = oid?;
let commit = repo.find_commit(oid)?;
let mut commit_authors = Vec::new();
if!is_rollup_commit(&commit) {
// We ignore the author of rollup-merge commits, and account for
// that author once by counting the reviewer of all bors merges. For
// rollups, we consider that this is the most relevant person, which
// is usually the case.
//
// Otherwise, a single rollup with N PRs attributes N commits to the author of the
// rollup, which isn't fair.
commit_authors.push(Author::from_sig(commit.author()));
}
match parse_bors_reviewer(&reviewers, &repo, &commit) {
Ok(Some(reviewers)) => commit_authors.extend(reviewers),
Ok(None) => {}
Err(ErrorContext(msg, e)) => {
if e.is::<reviewers::UnknownReviewer>() {
eprintln!("Unknown reviewer: {}", ErrorContext(msg, e));
} else {
return Err(ErrorContext(msg, e).into());
}
}
}
commit_authors.extend(commit_coauthors(&commit));
for author in commit_authors {
let author = mailmap.canonicalize(&author);
author_map.add(author, oid);
}
}
Ok(author_map)
}
fn mailmap_from_repo(repo: &git2::Repository) -> Result<Mailmap, Box<dyn std::error::Error>> {
let file = String::from_utf8(
repo.revparse_single("master")?
.peel_to_commit()?
.tree()?
.get_name(".mailmap")
.unwrap()
.to_object(&repo)?
.peel_to_blob()?
.content()
.into(),
)?;
Mailmap::from_string(file)
}
fn up_to_release(
repo: &Repository,
reviewers: &Reviewers,
mailmap: &Mailmap,
to: &VersionTag,
) -> Result<AuthorMap, Box<dyn std::error::Error>> {
let to_commit = repo.find_commit(to.commit).map_err(|e| {
ErrorContext(
format!(
"find_commit: repo={}, commit={}",
repo.path().display(),
to.commit
),
Box::new(e),
)
})?;
let modules = get_submodules(&repo, &to_commit)?;
let mut author_map = build_author_map(&repo, &reviewers, &mailmap, "", &to.raw_tag)
.map_err(|e| ErrorContext(format!("Up to {}", to), e))?;
for module in &modules {
if let Ok(path) = update_repo(&module.repository) {
let subrepo = Repository::open(&path)?;
let submap = build_author_map(
&subrepo,
&reviewers,
&mailmap,
"",
&module.commit.to_string(),
)?;
author_map.extend(submap);
}
}
Ok(author_map)
}
fn generate_thanks() -> Result<BTreeMap<VersionTag, AuthorMap>, Box<dyn std::error::Error>> {
let path = update_repo("https://github.com/rust-lang/rust.git")?;
let repo = git2::Repository::open(&path)?;
let mailmap = mailmap_from_repo(&repo)?;
let reviewers = Reviewers::new()?;
let mut versions = get_versions(&repo)?;
let last_full_stable = versions
.iter()
.rfind(|v| v.raw_tag.ends_with(".0"))
.unwrap()
.version
.clone();
versions.push(VersionTag {
name: String::from("Beta"),
version: {
let mut last = last_full_stable.clone();
last.minor += 1;
last
},
raw_tag: String::from("beta"),
commit: repo
.revparse_single("beta")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
versions.push(VersionTag {
name: String::from("Master"),
version: {
// master is plus 1 minor versions off of beta, which we just pushed
let mut last = last_full_stable.clone();
last.minor += 2;
last
},
raw_tag: String::from("master"),
commit: repo
.revparse_single("master")
.unwrap()
.peel_to_commit()
.unwrap()
.id(),
in_progress: true,
});
let mut version_map = BTreeMap::new();
let mut cache = HashMap::new();
for (idx, version) in versions.iter().enumerate() {
let previous = if let Some(v) = idx.checked_sub(1).map(|idx| &versions[idx]) {
v
} else {
let author_map = build_author_map(&repo, &reviewers, &mailmap, "", &version.raw_tag)?;
version_map.insert(version.clone(), author_map);
continue;
};
eprintln!("Processing {:?} to {:?}", previous, version);
cache.insert(
version,
up_to_release(&repo, &reviewers, &mailmap, &version)?,
);
let previous = match cache.remove(&previous) {
Some(v) => v,
None => up_to_release(&repo, &reviewers, &mailmap, &previous)?,
};
let current = cache.get(&version).unwrap();
// Remove commits reachable from the previous release.
let only_current = current.difference(&previous);
version_map.insert(version.clone(), only_current);
}
Ok(version_map)
}
fn run() -> Result<(), Box<dyn std::error::Error>> {
let by_version = generate_thanks()?;
let mut all_time = by_version.values().next().unwrap().clone();
for map in by_version.values().skip(1) {
all_time.extend(map.clone());
}
site::render(by_version, all_time)?;
Ok(())
}
fn main() {
if let Err(err) = run() {
eprintln!("Error: {}", err);
let mut cur = &*err;
while let Some(cause) = cur.source() {
eprintln!("\tcaused by: {}", cause);
cur = cause;
}
std::mem::drop(cur);
std::process::exit(1);
}
}
#[derive(Debug)]
struct Submodule {
path: PathBuf,
commit: Oid,
// url
repository: String,
}
fn get_submodules(
repo: &Repository,
at: &Commit,
) -> Result<Vec<Submodule>, Box<dyn std::error::Error>> {
let submodule_cfg = modules_file(&repo, &at)?;
let submodule_cfg = Config::parse(&submodule_cfg)?;
let mut path_to_url = HashMap::new();
let entries = submodule_cfg.entries(None)?;
for entry in &entries {
let entry = entry?;
| {
let mut cmd = Command::new("git");
cmd.args(args);
cmd.stdout(Stdio::piped());
let out = cmd.spawn();
let mut out = match out {
Ok(v) => v,
Err(err) => {
panic!("Failed to spawn command `{:?}`: {:?}", cmd, err);
}
};
let status = out.wait().expect("waited");
if !status.success() {
eprintln!("failed to run `git {:?}`: {:?}", args, status);
return Err(std::io::Error::from(std::io::ErrorKind::Other).into());
}
let mut stdout = Vec::new(); | identifier_body |
graph_layers.rs | use std::cmp::max;
use std::path::{Path, PathBuf};
use common::fixed_length_priority_queue::FixedLengthPriorityQueue;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use super::graph_links::{GraphLinks, GraphLinksMmap};
use crate::common::file_operations::{atomic_save_bin, read_bin, FileStorageError};
use crate::common::mmap_ops;
use crate::common::utils::rev_range;
use crate::entry::entry_point::OperationResult;
use crate::index::hnsw_index::entry_points::EntryPoints;
use crate::index::hnsw_index::graph_links::GraphLinksConverter;
use crate::index::hnsw_index::point_scorer::FilteredScorer;
use crate::index::hnsw_index::search_context::SearchContext;
use crate::index::visited_pool::{VisitedList, VisitedPool};
use crate::types::PointOffsetType;
use crate::vector_storage::ScoredPointOffset;
pub type LinkContainer = Vec<PointOffsetType>;
pub type LinkContainerRef<'a> = &'a [PointOffsetType];
pub type LayersContainer = Vec<LinkContainer>;
pub const HNSW_GRAPH_FILE: &str = "graph.bin";
pub const HNSW_LINKS_FILE: &str = "links.bin";
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayersBackwardCompatibility {
pub(super) max_level: usize,
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
pub(super) links_layers: Vec<LayersContainer>,
pub(super) entry_points: EntryPoints,
}
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayers<TGraphLinks: GraphLinks> {
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
#[serde(skip)]
pub(super) links: TGraphLinks,
pub(super) entry_points: EntryPoints,
#[serde(skip)]
pub(super) visited_pool: VisitedPool,
}
pub trait GraphLayersBase {
fn get_visited_list_from_pool(&self) -> VisitedList;
fn return_visited_list_to_pool(&self, visited_list: VisitedList);
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, f: F)
where
F: FnMut(PointOffsetType);
/// Get M based on current level
fn get_m(&self, level: usize) -> usize;
/// Greedy search for closest points within a single graph layer
fn _search_on_level(
&self,
searcher: &mut SearchContext,
level: usize,
visited_list: &mut VisitedList,
points_scorer: &mut FilteredScorer,
) {
let limit = self.get_m(level);
let mut points_ids: Vec<PointOffsetType> = Vec::with_capacity(2 * limit);
while let Some(candidate) = searcher.candidates.pop() {
if candidate.score < searcher.lower_bound() {
break;
}
points_ids.clear();
self.links_map(candidate.idx, level, |link| {
if!visited_list.check_and_update_visited(link) {
points_ids.push(link);
}
});
let scores = points_scorer.score_points(&mut points_ids, limit);
scores
.iter()
.copied()
.for_each(|score_point| searcher.process_candidate(score_point));
}
}
fn search_on_level(
&self,
level_entry: ScoredPointOffset,
level: usize,
ef: usize,
points_scorer: &mut FilteredScorer,
) -> FixedLengthPriorityQueue<ScoredPointOffset> {
let mut visited_list = self.get_visited_list_from_pool();
visited_list.check_and_update_visited(level_entry.idx);
let mut search_context = SearchContext::new(level_entry, ef);
self._search_on_level(&mut search_context, level, &mut visited_list, points_scorer);
self.return_visited_list_to_pool(visited_list);
search_context.nearest
}
/// Greedy searches for entry point of level `target_level`.
/// Beam size is 1.
fn search_entry(
&self,
entry_point: PointOffsetType,
top_level: usize,
target_level: usize,
points_scorer: &mut FilteredScorer,
) -> ScoredPointOffset {
let mut links: Vec<PointOffsetType> = Vec::with_capacity(2 * self.get_m(0));
let mut current_point = ScoredPointOffset {
idx: entry_point,
score: points_scorer.score_point(entry_point),
};
for level in rev_range(top_level, target_level) {
let limit = self.get_m(level);
let mut changed = true;
while changed {
changed = false;
links.clear();
self.links_map(current_point.idx, level, |link| {
links.push(link);
});
let scores = points_scorer.score_points(&mut links, limit);
scores.iter().copied().for_each(|score_point| {
if score_point.score > current_point.score {
changed = true;
current_point = score_point;
}
});
}
}
current_point
}
}
impl<TGraphLinks: GraphLinks> GraphLayersBase for GraphLayers<TGraphLinks> {
fn get_visited_list_from_pool(&self) -> VisitedList {
self.visited_pool.get(self.links.num_points())
}
fn return_visited_list_to_pool(&self, visited_list: VisitedList) {
self.visited_pool.return_back(visited_list);
}
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, mut f: F)
where
F: FnMut(PointOffsetType),
{
for link in self.links.links(point_id, level) {
f(*link);
}
}
fn get_m(&self, level: usize) -> usize {
if level == 0 {
self.m0
} else {
self.m
}
}
}
/// Object contains links between nodes for HNSW search
///
/// Assume all scores are similarities. Larger score = closer points
impl<TGraphLinks: GraphLinks> GraphLayers<TGraphLinks> {
pub fn point_level(&self, point_id: PointOffsetType) -> usize {
self.links.point_level(point_id)
}
pub fn search(
&self,
top: usize,
ef: usize,
mut points_scorer: FilteredScorer,
) -> Vec<ScoredPointOffset> {
let entry_point = match self
.entry_points
.get_entry_point(|point_id| points_scorer.check_vector(point_id))
{
None => return vec![],
Some(ep) => ep,
};
let zero_level_entry = self.search_entry(
entry_point.point_id,
entry_point.level,
0,
&mut points_scorer,
);
let nearest = self.search_on_level(zero_level_entry, 0, max(top, ef), &mut points_scorer);
nearest.into_iter().take(top).collect_vec()
}
pub fn get_path(path: &Path) -> PathBuf {
path.join(HNSW_GRAPH_FILE)
}
pub fn get_links_path(path: &Path) -> PathBuf {
path.join(HNSW_LINKS_FILE)
}
pub fn num_points(&self) -> usize {
self.links.num_points()
}
}
impl<TGraphLinks> GraphLayers<TGraphLinks>
where
TGraphLinks: GraphLinks,
{
pub fn load(graph_path: &Path, links_path: &Path) -> OperationResult<Self> {
let try_self: Result<Self, FileStorageError> = if links_path.exists() {
read_bin(graph_path)
} else {
Err(FileStorageError::generic(format!(
"Links file does not exists: {links_path:?}"
)))
};
match try_self {
Ok(mut slf) => {
let links = TGraphLinks::load_from_file(links_path)?;
slf.links = links;
Ok(slf)
}
Err(err) => {
let try_legacy: Result<GraphLayersBackwardCompatibility, _> = read_bin(graph_path);
if let Ok(legacy) = try_legacy {
log::debug!("Converting legacy graph to new format");
let mut converter = GraphLinksConverter::new(legacy.links_layers);
converter.save_as(links_path)?;
let links = TGraphLinks::from_converter(converter)?;
let slf = Self {
m: legacy.m,
m0: legacy.m0,
ef_construct: legacy.ef_construct,
links,
entry_points: legacy.entry_points,
visited_pool: VisitedPool::new(),
};
slf.save(graph_path)?;
Ok(slf)
} else {
Err(err)?
}
}
}
}
pub fn save(&self, path: &Path) -> OperationResult<()> |
}
impl GraphLayers<GraphLinksMmap> {
pub fn prefault_mmap_pages(&self, path: &Path) -> Option<mmap_ops::PrefaultMmapPages> {
self.links.prefault_mmap_pages(path)
}
}
#[cfg(test)]
mod tests {
use std::fs::File;
use std::io::Write;
use itertools::Itertools;
use rand::rngs::StdRng;
use rand::SeedableRng;
use tempfile::Builder;
use super::*;
use crate::data_types::vectors::VectorElementType;
use crate::fixtures::index_fixtures::{
random_vector, FakeFilterContext, TestRawScorerProducer,
};
use crate::index::hnsw_index::graph_links::GraphLinksRam;
use crate::index::hnsw_index::tests::create_graph_layer_fixture;
use crate::spaces::metric::Metric;
use crate::spaces::simple::{CosineMetric, DotProductMetric};
fn search_in_graph<TGraphLinks: GraphLinks>(
query: &[VectorElementType],
top: usize,
vector_storage: &TestRawScorerProducer<CosineMetric>,
graph: &GraphLayers<TGraphLinks>,
) -> Vec<ScoredPointOffset> {
let fake_filter_context = FakeFilterContext {};
let raw_scorer = vector_storage.get_raw_scorer(query.to_owned());
let scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let ef = 16;
graph.search(top, ef, scorer)
}
const M: usize = 8;
#[test]
fn test_search_on_level() {
let dim = 8;
let m = 8;
let ef_construct = 32;
let entry_points_num = 10;
let num_vectors = 10;
let mut rng = StdRng::seed_from_u64(42);
let vector_holder =
TestRawScorerProducer::<DotProductMetric>::new(dim, num_vectors, &mut rng);
let mut graph_layers = GraphLayers {
m,
m0: 2 * m,
ef_construct,
links: GraphLinksRam::default(),
entry_points: EntryPoints::new(entry_points_num),
visited_pool: VisitedPool::new(),
};
let mut graph_links = vec![vec![Vec::new()]; num_vectors];
graph_links[0][0] = vec![1, 2, 3, 4, 5, 6];
graph_layers.links =
GraphLinksRam::from_converter(GraphLinksConverter::new(graph_links.clone())).unwrap();
let linking_idx: PointOffsetType = 7;
let fake_filter_context = FakeFilterContext {};
let added_vector = vector_holder.vectors.get(linking_idx).to_vec();
let raw_scorer = vector_holder.get_raw_scorer(added_vector);
let mut scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let nearest_on_level = graph_layers.search_on_level(
ScoredPointOffset {
idx: 0,
score: scorer.score_point(0),
},
0,
32,
&mut scorer,
);
assert_eq!(nearest_on_level.len(), graph_links[0][0].len() + 1);
for nearest in &nearest_on_level {
// eprintln!("nearest = {:#?}", nearest);
assert_eq!(
nearest.score,
scorer.score_internal(linking_idx, nearest.idx)
)
}
}
#[test]
fn test_save_and_load() {
let num_vectors = 100;
let dim = 8;
let top = 5;
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("graph_dir").tempdir().unwrap();
let links_path = GraphLayers::<GraphLinksRam>::get_links_path(dir.path());
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
false,
&mut rng,
Some(&links_path),
);
let query = random_vector(&mut rng, dim);
let res1 = search_in_graph(&query, top, &vector_holder, &graph_layers);
let path = GraphLayers::<GraphLinksRam>::get_path(dir.path());
graph_layers.save(&path).unwrap();
let graph2 = GraphLayers::<GraphLinksRam>::load(&path, &links_path).unwrap();
let res2 = search_in_graph(&query, top, &vector_holder, &graph2);
assert_eq!(res1, res2)
}
#[test]
fn test_add_points() {
let num_vectors = 1000;
let dim = 8;
let mut rng = StdRng::seed_from_u64(42);
type M = CosineMetric;
let (vector_holder, graph_layers) =
create_graph_layer_fixture::<M, _>(num_vectors, M, dim, false, &mut rng, None);
let main_entry = graph_layers
.entry_points
.get_entry_point(|_x| true)
.expect("Expect entry point to exists");
assert!(main_entry.level > 0);
let num_levels = (0..num_vectors)
.map(|i| graph_layers.links.point_level(i as PointOffsetType))
.max()
.unwrap();
assert_eq!(main_entry.level, num_levels);
let total_links_0 = (0..num_vectors)
.map(|i| graph_layers.links.links(i as PointOffsetType, 0).len())
.sum::<usize>();
eprintln!("total_links_0 = {total_links_0:#?}");
eprintln!("num_vectors = {num_vectors:#?}");
assert!(total_links_0 > 0);
assert!(total_links_0 as f64 / num_vectors as f64 > M as f64);
let top = 5;
let query = random_vector(&mut rng, dim);
let processed_query = M::preprocess(&query).unwrap_or_else(|| query.clone());
let mut reference_top = FixedLengthPriorityQueue::new(top);
for idx in 0..vector_holder.vectors.len() as PointOffsetType {
let vec = &vector_holder.vectors.get(idx);
reference_top.push(ScoredPointOffset {
idx,
score: M::similarity(vec, &processed_query),
});
}
let graph_search = search_in_graph(&query, top, &vector_holder, &graph_layers);
assert_eq!(reference_top.into_vec(), graph_search);
}
#[test]
#[ignore]
fn test_draw_hnsw_graph() {
let dim = 2;
let num_vectors = 500;
let mut rng = StdRng::seed_from_u64(42);
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
true,
&mut rng,
None,
);
let graph_json = serde_json::to_string_pretty(&graph_layers).unwrap();
let vectors_json = serde_json::to_string_pretty(
&(0..vector_holder.vectors.len() as PointOffsetType)
.map(|point_id| vector_holder.vectors.get(point_id).to_vec())
.collect_vec(),
)
.unwrap();
let mut file = File::create("graph.json").unwrap();
file.write_all(
format!("{{ \"graph\": {graph_json}, \n \"vectors\": {vectors_json} }}").as_bytes(),
)
.unwrap();
}
}
| {
Ok(atomic_save_bin(path, self)?)
} | identifier_body |
graph_layers.rs | use std::cmp::max;
use std::path::{Path, PathBuf};
use common::fixed_length_priority_queue::FixedLengthPriorityQueue;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use super::graph_links::{GraphLinks, GraphLinksMmap};
use crate::common::file_operations::{atomic_save_bin, read_bin, FileStorageError};
use crate::common::mmap_ops;
use crate::common::utils::rev_range;
use crate::entry::entry_point::OperationResult;
use crate::index::hnsw_index::entry_points::EntryPoints;
use crate::index::hnsw_index::graph_links::GraphLinksConverter;
use crate::index::hnsw_index::point_scorer::FilteredScorer;
use crate::index::hnsw_index::search_context::SearchContext;
use crate::index::visited_pool::{VisitedList, VisitedPool};
use crate::types::PointOffsetType;
use crate::vector_storage::ScoredPointOffset;
pub type LinkContainer = Vec<PointOffsetType>;
pub type LinkContainerRef<'a> = &'a [PointOffsetType];
pub type LayersContainer = Vec<LinkContainer>;
pub const HNSW_GRAPH_FILE: &str = "graph.bin";
pub const HNSW_LINKS_FILE: &str = "links.bin";
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayersBackwardCompatibility {
pub(super) max_level: usize,
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
pub(super) links_layers: Vec<LayersContainer>,
pub(super) entry_points: EntryPoints,
}
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayers<TGraphLinks: GraphLinks> {
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
#[serde(skip)]
pub(super) links: TGraphLinks,
pub(super) entry_points: EntryPoints,
#[serde(skip)]
pub(super) visited_pool: VisitedPool,
}
pub trait GraphLayersBase {
fn get_visited_list_from_pool(&self) -> VisitedList;
fn return_visited_list_to_pool(&self, visited_list: VisitedList);
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, f: F)
where
F: FnMut(PointOffsetType);
/// Get M based on current level
fn get_m(&self, level: usize) -> usize;
/// Greedy search for closest points within a single graph layer
fn _search_on_level(
&self,
searcher: &mut SearchContext,
level: usize,
visited_list: &mut VisitedList,
points_scorer: &mut FilteredScorer,
) {
let limit = self.get_m(level);
let mut points_ids: Vec<PointOffsetType> = Vec::with_capacity(2 * limit);
while let Some(candidate) = searcher.candidates.pop() {
if candidate.score < searcher.lower_bound() {
break;
}
points_ids.clear();
self.links_map(candidate.idx, level, |link| {
if!visited_list.check_and_update_visited(link) {
points_ids.push(link);
}
});
let scores = points_scorer.score_points(&mut points_ids, limit);
scores
.iter()
.copied()
.for_each(|score_point| searcher.process_candidate(score_point));
}
}
fn search_on_level(
&self,
level_entry: ScoredPointOffset,
level: usize,
ef: usize,
points_scorer: &mut FilteredScorer,
) -> FixedLengthPriorityQueue<ScoredPointOffset> {
let mut visited_list = self.get_visited_list_from_pool();
visited_list.check_and_update_visited(level_entry.idx);
let mut search_context = SearchContext::new(level_entry, ef);
self._search_on_level(&mut search_context, level, &mut visited_list, points_scorer);
self.return_visited_list_to_pool(visited_list);
search_context.nearest
}
/// Greedy searches for entry point of level `target_level`.
/// Beam size is 1.
fn search_entry(
&self,
entry_point: PointOffsetType,
top_level: usize,
target_level: usize,
points_scorer: &mut FilteredScorer,
) -> ScoredPointOffset {
let mut links: Vec<PointOffsetType> = Vec::with_capacity(2 * self.get_m(0));
let mut current_point = ScoredPointOffset {
idx: entry_point,
score: points_scorer.score_point(entry_point),
};
for level in rev_range(top_level, target_level) {
let limit = self.get_m(level);
let mut changed = true;
while changed {
changed = false;
links.clear();
self.links_map(current_point.idx, level, |link| {
links.push(link);
});
let scores = points_scorer.score_points(&mut links, limit);
scores.iter().copied().for_each(|score_point| {
if score_point.score > current_point.score {
changed = true;
current_point = score_point;
}
});
}
}
current_point
}
}
impl<TGraphLinks: GraphLinks> GraphLayersBase for GraphLayers<TGraphLinks> {
fn get_visited_list_from_pool(&self) -> VisitedList {
self.visited_pool.get(self.links.num_points())
}
fn return_visited_list_to_pool(&self, visited_list: VisitedList) {
self.visited_pool.return_back(visited_list);
}
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, mut f: F)
where
F: FnMut(PointOffsetType),
{
for link in self.links.links(point_id, level) {
f(*link);
}
}
fn get_m(&self, level: usize) -> usize {
if level == 0 {
self.m0
} else {
self.m
}
}
}
/// Object contains links between nodes for HNSW search
///
/// Assume all scores are similarities. Larger score = closer points
impl<TGraphLinks: GraphLinks> GraphLayers<TGraphLinks> {
pub fn | (&self, point_id: PointOffsetType) -> usize {
self.links.point_level(point_id)
}
pub fn search(
&self,
top: usize,
ef: usize,
mut points_scorer: FilteredScorer,
) -> Vec<ScoredPointOffset> {
let entry_point = match self
.entry_points
.get_entry_point(|point_id| points_scorer.check_vector(point_id))
{
None => return vec![],
Some(ep) => ep,
};
let zero_level_entry = self.search_entry(
entry_point.point_id,
entry_point.level,
0,
&mut points_scorer,
);
let nearest = self.search_on_level(zero_level_entry, 0, max(top, ef), &mut points_scorer);
nearest.into_iter().take(top).collect_vec()
}
pub fn get_path(path: &Path) -> PathBuf {
path.join(HNSW_GRAPH_FILE)
}
pub fn get_links_path(path: &Path) -> PathBuf {
path.join(HNSW_LINKS_FILE)
}
pub fn num_points(&self) -> usize {
self.links.num_points()
}
}
impl<TGraphLinks> GraphLayers<TGraphLinks>
where
TGraphLinks: GraphLinks,
{
pub fn load(graph_path: &Path, links_path: &Path) -> OperationResult<Self> {
let try_self: Result<Self, FileStorageError> = if links_path.exists() {
read_bin(graph_path)
} else {
Err(FileStorageError::generic(format!(
"Links file does not exists: {links_path:?}"
)))
};
match try_self {
Ok(mut slf) => {
let links = TGraphLinks::load_from_file(links_path)?;
slf.links = links;
Ok(slf)
}
Err(err) => {
let try_legacy: Result<GraphLayersBackwardCompatibility, _> = read_bin(graph_path);
if let Ok(legacy) = try_legacy {
log::debug!("Converting legacy graph to new format");
let mut converter = GraphLinksConverter::new(legacy.links_layers);
converter.save_as(links_path)?;
let links = TGraphLinks::from_converter(converter)?;
let slf = Self {
m: legacy.m,
m0: legacy.m0,
ef_construct: legacy.ef_construct,
links,
entry_points: legacy.entry_points,
visited_pool: VisitedPool::new(),
};
slf.save(graph_path)?;
Ok(slf)
} else {
Err(err)?
}
}
}
}
pub fn save(&self, path: &Path) -> OperationResult<()> {
Ok(atomic_save_bin(path, self)?)
}
}
impl GraphLayers<GraphLinksMmap> {
pub fn prefault_mmap_pages(&self, path: &Path) -> Option<mmap_ops::PrefaultMmapPages> {
self.links.prefault_mmap_pages(path)
}
}
#[cfg(test)]
mod tests {
use std::fs::File;
use std::io::Write;
use itertools::Itertools;
use rand::rngs::StdRng;
use rand::SeedableRng;
use tempfile::Builder;
use super::*;
use crate::data_types::vectors::VectorElementType;
use crate::fixtures::index_fixtures::{
random_vector, FakeFilterContext, TestRawScorerProducer,
};
use crate::index::hnsw_index::graph_links::GraphLinksRam;
use crate::index::hnsw_index::tests::create_graph_layer_fixture;
use crate::spaces::metric::Metric;
use crate::spaces::simple::{CosineMetric, DotProductMetric};
fn search_in_graph<TGraphLinks: GraphLinks>(
query: &[VectorElementType],
top: usize,
vector_storage: &TestRawScorerProducer<CosineMetric>,
graph: &GraphLayers<TGraphLinks>,
) -> Vec<ScoredPointOffset> {
let fake_filter_context = FakeFilterContext {};
let raw_scorer = vector_storage.get_raw_scorer(query.to_owned());
let scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let ef = 16;
graph.search(top, ef, scorer)
}
const M: usize = 8;
#[test]
fn test_search_on_level() {
let dim = 8;
let m = 8;
let ef_construct = 32;
let entry_points_num = 10;
let num_vectors = 10;
let mut rng = StdRng::seed_from_u64(42);
let vector_holder =
TestRawScorerProducer::<DotProductMetric>::new(dim, num_vectors, &mut rng);
let mut graph_layers = GraphLayers {
m,
m0: 2 * m,
ef_construct,
links: GraphLinksRam::default(),
entry_points: EntryPoints::new(entry_points_num),
visited_pool: VisitedPool::new(),
};
let mut graph_links = vec![vec![Vec::new()]; num_vectors];
graph_links[0][0] = vec![1, 2, 3, 4, 5, 6];
graph_layers.links =
GraphLinksRam::from_converter(GraphLinksConverter::new(graph_links.clone())).unwrap();
let linking_idx: PointOffsetType = 7;
let fake_filter_context = FakeFilterContext {};
let added_vector = vector_holder.vectors.get(linking_idx).to_vec();
let raw_scorer = vector_holder.get_raw_scorer(added_vector);
let mut scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let nearest_on_level = graph_layers.search_on_level(
ScoredPointOffset {
idx: 0,
score: scorer.score_point(0),
},
0,
32,
&mut scorer,
);
assert_eq!(nearest_on_level.len(), graph_links[0][0].len() + 1);
for nearest in &nearest_on_level {
// eprintln!("nearest = {:#?}", nearest);
assert_eq!(
nearest.score,
scorer.score_internal(linking_idx, nearest.idx)
)
}
}
#[test]
fn test_save_and_load() {
let num_vectors = 100;
let dim = 8;
let top = 5;
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("graph_dir").tempdir().unwrap();
let links_path = GraphLayers::<GraphLinksRam>::get_links_path(dir.path());
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
false,
&mut rng,
Some(&links_path),
);
let query = random_vector(&mut rng, dim);
let res1 = search_in_graph(&query, top, &vector_holder, &graph_layers);
let path = GraphLayers::<GraphLinksRam>::get_path(dir.path());
graph_layers.save(&path).unwrap();
let graph2 = GraphLayers::<GraphLinksRam>::load(&path, &links_path).unwrap();
let res2 = search_in_graph(&query, top, &vector_holder, &graph2);
assert_eq!(res1, res2)
}
#[test]
fn test_add_points() {
let num_vectors = 1000;
let dim = 8;
let mut rng = StdRng::seed_from_u64(42);
type M = CosineMetric;
let (vector_holder, graph_layers) =
create_graph_layer_fixture::<M, _>(num_vectors, M, dim, false, &mut rng, None);
let main_entry = graph_layers
.entry_points
.get_entry_point(|_x| true)
.expect("Expect entry point to exists");
assert!(main_entry.level > 0);
let num_levels = (0..num_vectors)
.map(|i| graph_layers.links.point_level(i as PointOffsetType))
.max()
.unwrap();
assert_eq!(main_entry.level, num_levels);
let total_links_0 = (0..num_vectors)
.map(|i| graph_layers.links.links(i as PointOffsetType, 0).len())
.sum::<usize>();
eprintln!("total_links_0 = {total_links_0:#?}");
eprintln!("num_vectors = {num_vectors:#?}");
assert!(total_links_0 > 0);
assert!(total_links_0 as f64 / num_vectors as f64 > M as f64);
let top = 5;
let query = random_vector(&mut rng, dim);
let processed_query = M::preprocess(&query).unwrap_or_else(|| query.clone());
let mut reference_top = FixedLengthPriorityQueue::new(top);
for idx in 0..vector_holder.vectors.len() as PointOffsetType {
let vec = &vector_holder.vectors.get(idx);
reference_top.push(ScoredPointOffset {
idx,
score: M::similarity(vec, &processed_query),
});
}
let graph_search = search_in_graph(&query, top, &vector_holder, &graph_layers);
assert_eq!(reference_top.into_vec(), graph_search);
}
#[test]
#[ignore]
fn test_draw_hnsw_graph() {
let dim = 2;
let num_vectors = 500;
let mut rng = StdRng::seed_from_u64(42);
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
true,
&mut rng,
None,
);
let graph_json = serde_json::to_string_pretty(&graph_layers).unwrap();
let vectors_json = serde_json::to_string_pretty(
&(0..vector_holder.vectors.len() as PointOffsetType)
.map(|point_id| vector_holder.vectors.get(point_id).to_vec())
.collect_vec(),
)
.unwrap();
let mut file = File::create("graph.json").unwrap();
file.write_all(
format!("{{ \"graph\": {graph_json}, \n \"vectors\": {vectors_json} }}").as_bytes(),
)
.unwrap();
}
}
| point_level | identifier_name |
graph_layers.rs | use std::cmp::max;
use std::path::{Path, PathBuf};
use common::fixed_length_priority_queue::FixedLengthPriorityQueue;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use super::graph_links::{GraphLinks, GraphLinksMmap};
use crate::common::file_operations::{atomic_save_bin, read_bin, FileStorageError};
use crate::common::mmap_ops;
use crate::common::utils::rev_range;
use crate::entry::entry_point::OperationResult;
use crate::index::hnsw_index::entry_points::EntryPoints;
use crate::index::hnsw_index::graph_links::GraphLinksConverter;
use crate::index::hnsw_index::point_scorer::FilteredScorer;
use crate::index::hnsw_index::search_context::SearchContext;
use crate::index::visited_pool::{VisitedList, VisitedPool};
use crate::types::PointOffsetType;
use crate::vector_storage::ScoredPointOffset;
pub type LinkContainer = Vec<PointOffsetType>;
pub type LinkContainerRef<'a> = &'a [PointOffsetType];
pub type LayersContainer = Vec<LinkContainer>;
pub const HNSW_GRAPH_FILE: &str = "graph.bin";
pub const HNSW_LINKS_FILE: &str = "links.bin";
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayersBackwardCompatibility {
pub(super) max_level: usize,
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
pub(super) links_layers: Vec<LayersContainer>,
pub(super) entry_points: EntryPoints,
}
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayers<TGraphLinks: GraphLinks> {
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
#[serde(skip)]
pub(super) links: TGraphLinks,
pub(super) entry_points: EntryPoints,
#[serde(skip)]
pub(super) visited_pool: VisitedPool,
}
pub trait GraphLayersBase {
fn get_visited_list_from_pool(&self) -> VisitedList;
fn return_visited_list_to_pool(&self, visited_list: VisitedList);
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, f: F)
where
F: FnMut(PointOffsetType);
/// Get M based on current level
fn get_m(&self, level: usize) -> usize;
/// Greedy search for closest points within a single graph layer
fn _search_on_level(
&self,
searcher: &mut SearchContext,
level: usize,
visited_list: &mut VisitedList,
points_scorer: &mut FilteredScorer,
) {
let limit = self.get_m(level);
let mut points_ids: Vec<PointOffsetType> = Vec::with_capacity(2 * limit);
while let Some(candidate) = searcher.candidates.pop() {
if candidate.score < searcher.lower_bound() {
break;
}
points_ids.clear();
self.links_map(candidate.idx, level, |link| {
if!visited_list.check_and_update_visited(link) {
points_ids.push(link);
}
});
let scores = points_scorer.score_points(&mut points_ids, limit);
scores
.iter()
.copied()
.for_each(|score_point| searcher.process_candidate(score_point));
}
}
fn search_on_level(
&self,
level_entry: ScoredPointOffset,
level: usize,
ef: usize,
points_scorer: &mut FilteredScorer,
) -> FixedLengthPriorityQueue<ScoredPointOffset> {
let mut visited_list = self.get_visited_list_from_pool();
visited_list.check_and_update_visited(level_entry.idx);
let mut search_context = SearchContext::new(level_entry, ef);
self._search_on_level(&mut search_context, level, &mut visited_list, points_scorer);
self.return_visited_list_to_pool(visited_list);
search_context.nearest
}
/// Greedy searches for entry point of level `target_level`.
/// Beam size is 1.
fn search_entry(
&self,
entry_point: PointOffsetType,
top_level: usize,
target_level: usize,
points_scorer: &mut FilteredScorer,
) -> ScoredPointOffset {
let mut links: Vec<PointOffsetType> = Vec::with_capacity(2 * self.get_m(0));
let mut current_point = ScoredPointOffset {
idx: entry_point,
score: points_scorer.score_point(entry_point),
};
for level in rev_range(top_level, target_level) {
let limit = self.get_m(level);
let mut changed = true;
while changed {
changed = false;
links.clear();
self.links_map(current_point.idx, level, |link| {
links.push(link);
});
let scores = points_scorer.score_points(&mut links, limit);
scores.iter().copied().for_each(|score_point| {
if score_point.score > current_point.score {
changed = true;
current_point = score_point;
}
});
}
}
current_point
}
}
impl<TGraphLinks: GraphLinks> GraphLayersBase for GraphLayers<TGraphLinks> {
fn get_visited_list_from_pool(&self) -> VisitedList {
self.visited_pool.get(self.links.num_points())
}
fn return_visited_list_to_pool(&self, visited_list: VisitedList) {
self.visited_pool.return_back(visited_list);
}
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, mut f: F)
where
F: FnMut(PointOffsetType),
{
for link in self.links.links(point_id, level) {
f(*link);
}
}
fn get_m(&self, level: usize) -> usize {
if level == 0 {
self.m0
} else {
self.m
}
}
}
/// Object contains links between nodes for HNSW search
///
/// Assume all scores are similarities. Larger score = closer points
impl<TGraphLinks: GraphLinks> GraphLayers<TGraphLinks> {
pub fn point_level(&self, point_id: PointOffsetType) -> usize {
self.links.point_level(point_id)
}
pub fn search(
&self,
top: usize,
ef: usize,
mut points_scorer: FilteredScorer,
) -> Vec<ScoredPointOffset> {
let entry_point = match self
.entry_points
.get_entry_point(|point_id| points_scorer.check_vector(point_id))
{
None => return vec![],
Some(ep) => ep,
};
let zero_level_entry = self.search_entry(
entry_point.point_id,
entry_point.level,
0,
&mut points_scorer,
);
let nearest = self.search_on_level(zero_level_entry, 0, max(top, ef), &mut points_scorer);
nearest.into_iter().take(top).collect_vec()
}
pub fn get_path(path: &Path) -> PathBuf {
path.join(HNSW_GRAPH_FILE)
}
pub fn get_links_path(path: &Path) -> PathBuf {
path.join(HNSW_LINKS_FILE)
}
pub fn num_points(&self) -> usize {
self.links.num_points()
}
}
impl<TGraphLinks> GraphLayers<TGraphLinks>
where
TGraphLinks: GraphLinks,
{
pub fn load(graph_path: &Path, links_path: &Path) -> OperationResult<Self> {
let try_self: Result<Self, FileStorageError> = if links_path.exists() {
read_bin(graph_path)
} else {
Err(FileStorageError::generic(format!(
"Links file does not exists: {links_path:?}"
)))
};
match try_self {
Ok(mut slf) => {
let links = TGraphLinks::load_from_file(links_path)?;
slf.links = links;
Ok(slf)
}
Err(err) => | Err(err)?
}
}
}
}
pub fn save(&self, path: &Path) -> OperationResult<()> {
Ok(atomic_save_bin(path, self)?)
}
}
impl GraphLayers<GraphLinksMmap> {
pub fn prefault_mmap_pages(&self, path: &Path) -> Option<mmap_ops::PrefaultMmapPages> {
self.links.prefault_mmap_pages(path)
}
}
#[cfg(test)]
mod tests {
use std::fs::File;
use std::io::Write;
use itertools::Itertools;
use rand::rngs::StdRng;
use rand::SeedableRng;
use tempfile::Builder;
use super::*;
use crate::data_types::vectors::VectorElementType;
use crate::fixtures::index_fixtures::{
random_vector, FakeFilterContext, TestRawScorerProducer,
};
use crate::index::hnsw_index::graph_links::GraphLinksRam;
use crate::index::hnsw_index::tests::create_graph_layer_fixture;
use crate::spaces::metric::Metric;
use crate::spaces::simple::{CosineMetric, DotProductMetric};
fn search_in_graph<TGraphLinks: GraphLinks>(
query: &[VectorElementType],
top: usize,
vector_storage: &TestRawScorerProducer<CosineMetric>,
graph: &GraphLayers<TGraphLinks>,
) -> Vec<ScoredPointOffset> {
let fake_filter_context = FakeFilterContext {};
let raw_scorer = vector_storage.get_raw_scorer(query.to_owned());
let scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let ef = 16;
graph.search(top, ef, scorer)
}
const M: usize = 8;
#[test]
fn test_search_on_level() {
let dim = 8;
let m = 8;
let ef_construct = 32;
let entry_points_num = 10;
let num_vectors = 10;
let mut rng = StdRng::seed_from_u64(42);
let vector_holder =
TestRawScorerProducer::<DotProductMetric>::new(dim, num_vectors, &mut rng);
let mut graph_layers = GraphLayers {
m,
m0: 2 * m,
ef_construct,
links: GraphLinksRam::default(),
entry_points: EntryPoints::new(entry_points_num),
visited_pool: VisitedPool::new(),
};
let mut graph_links = vec![vec![Vec::new()]; num_vectors];
graph_links[0][0] = vec![1, 2, 3, 4, 5, 6];
graph_layers.links =
GraphLinksRam::from_converter(GraphLinksConverter::new(graph_links.clone())).unwrap();
let linking_idx: PointOffsetType = 7;
let fake_filter_context = FakeFilterContext {};
let added_vector = vector_holder.vectors.get(linking_idx).to_vec();
let raw_scorer = vector_holder.get_raw_scorer(added_vector);
let mut scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let nearest_on_level = graph_layers.search_on_level(
ScoredPointOffset {
idx: 0,
score: scorer.score_point(0),
},
0,
32,
&mut scorer,
);
assert_eq!(nearest_on_level.len(), graph_links[0][0].len() + 1);
for nearest in &nearest_on_level {
// eprintln!("nearest = {:#?}", nearest);
assert_eq!(
nearest.score,
scorer.score_internal(linking_idx, nearest.idx)
)
}
}
#[test]
fn test_save_and_load() {
let num_vectors = 100;
let dim = 8;
let top = 5;
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("graph_dir").tempdir().unwrap();
let links_path = GraphLayers::<GraphLinksRam>::get_links_path(dir.path());
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
false,
&mut rng,
Some(&links_path),
);
let query = random_vector(&mut rng, dim);
let res1 = search_in_graph(&query, top, &vector_holder, &graph_layers);
let path = GraphLayers::<GraphLinksRam>::get_path(dir.path());
graph_layers.save(&path).unwrap();
let graph2 = GraphLayers::<GraphLinksRam>::load(&path, &links_path).unwrap();
let res2 = search_in_graph(&query, top, &vector_holder, &graph2);
assert_eq!(res1, res2)
}
#[test]
fn test_add_points() {
let num_vectors = 1000;
let dim = 8;
let mut rng = StdRng::seed_from_u64(42);
type M = CosineMetric;
let (vector_holder, graph_layers) =
create_graph_layer_fixture::<M, _>(num_vectors, M, dim, false, &mut rng, None);
let main_entry = graph_layers
.entry_points
.get_entry_point(|_x| true)
.expect("Expect entry point to exists");
assert!(main_entry.level > 0);
let num_levels = (0..num_vectors)
.map(|i| graph_layers.links.point_level(i as PointOffsetType))
.max()
.unwrap();
assert_eq!(main_entry.level, num_levels);
let total_links_0 = (0..num_vectors)
.map(|i| graph_layers.links.links(i as PointOffsetType, 0).len())
.sum::<usize>();
eprintln!("total_links_0 = {total_links_0:#?}");
eprintln!("num_vectors = {num_vectors:#?}");
assert!(total_links_0 > 0);
assert!(total_links_0 as f64 / num_vectors as f64 > M as f64);
let top = 5;
let query = random_vector(&mut rng, dim);
let processed_query = M::preprocess(&query).unwrap_or_else(|| query.clone());
let mut reference_top = FixedLengthPriorityQueue::new(top);
for idx in 0..vector_holder.vectors.len() as PointOffsetType {
let vec = &vector_holder.vectors.get(idx);
reference_top.push(ScoredPointOffset {
idx,
score: M::similarity(vec, &processed_query),
});
}
let graph_search = search_in_graph(&query, top, &vector_holder, &graph_layers);
assert_eq!(reference_top.into_vec(), graph_search);
}
#[test]
#[ignore]
fn test_draw_hnsw_graph() {
let dim = 2;
let num_vectors = 500;
let mut rng = StdRng::seed_from_u64(42);
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
true,
&mut rng,
None,
);
let graph_json = serde_json::to_string_pretty(&graph_layers).unwrap();
let vectors_json = serde_json::to_string_pretty(
&(0..vector_holder.vectors.len() as PointOffsetType)
.map(|point_id| vector_holder.vectors.get(point_id).to_vec())
.collect_vec(),
)
.unwrap();
let mut file = File::create("graph.json").unwrap();
file.write_all(
format!("{{ \"graph\": {graph_json}, \n \"vectors\": {vectors_json} }}").as_bytes(),
)
.unwrap();
}
}
| {
let try_legacy: Result<GraphLayersBackwardCompatibility, _> = read_bin(graph_path);
if let Ok(legacy) = try_legacy {
log::debug!("Converting legacy graph to new format");
let mut converter = GraphLinksConverter::new(legacy.links_layers);
converter.save_as(links_path)?;
let links = TGraphLinks::from_converter(converter)?;
let slf = Self {
m: legacy.m,
m0: legacy.m0,
ef_construct: legacy.ef_construct,
links,
entry_points: legacy.entry_points,
visited_pool: VisitedPool::new(),
};
slf.save(graph_path)?;
Ok(slf)
} else { | conditional_block |
graph_layers.rs | use std::cmp::max;
use std::path::{Path, PathBuf};
use common::fixed_length_priority_queue::FixedLengthPriorityQueue;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use super::graph_links::{GraphLinks, GraphLinksMmap};
use crate::common::file_operations::{atomic_save_bin, read_bin, FileStorageError};
use crate::common::mmap_ops;
use crate::common::utils::rev_range;
use crate::entry::entry_point::OperationResult;
use crate::index::hnsw_index::entry_points::EntryPoints;
use crate::index::hnsw_index::graph_links::GraphLinksConverter;
use crate::index::hnsw_index::point_scorer::FilteredScorer;
use crate::index::hnsw_index::search_context::SearchContext;
use crate::index::visited_pool::{VisitedList, VisitedPool};
use crate::types::PointOffsetType;
use crate::vector_storage::ScoredPointOffset;
pub type LinkContainer = Vec<PointOffsetType>;
pub type LinkContainerRef<'a> = &'a [PointOffsetType];
pub type LayersContainer = Vec<LinkContainer>;
pub const HNSW_GRAPH_FILE: &str = "graph.bin";
pub const HNSW_LINKS_FILE: &str = "links.bin";
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayersBackwardCompatibility {
pub(super) max_level: usize,
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
pub(super) links_layers: Vec<LayersContainer>,
pub(super) entry_points: EntryPoints,
}
#[derive(Deserialize, Serialize, Debug)]
pub struct GraphLayers<TGraphLinks: GraphLinks> {
pub(super) m: usize,
pub(super) m0: usize,
pub(super) ef_construct: usize,
#[serde(skip)]
pub(super) links: TGraphLinks,
pub(super) entry_points: EntryPoints,
#[serde(skip)]
pub(super) visited_pool: VisitedPool,
}
pub trait GraphLayersBase {
fn get_visited_list_from_pool(&self) -> VisitedList;
fn return_visited_list_to_pool(&self, visited_list: VisitedList);
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, f: F)
where
F: FnMut(PointOffsetType);
/// Get M based on current level
fn get_m(&self, level: usize) -> usize;
/// Greedy search for closest points within a single graph layer
fn _search_on_level(
&self,
searcher: &mut SearchContext,
level: usize,
visited_list: &mut VisitedList,
points_scorer: &mut FilteredScorer,
) {
let limit = self.get_m(level);
let mut points_ids: Vec<PointOffsetType> = Vec::with_capacity(2 * limit);
while let Some(candidate) = searcher.candidates.pop() {
if candidate.score < searcher.lower_bound() {
break;
}
points_ids.clear();
self.links_map(candidate.idx, level, |link| {
if!visited_list.check_and_update_visited(link) {
points_ids.push(link);
}
});
let scores = points_scorer.score_points(&mut points_ids, limit);
scores
.iter()
.copied()
.for_each(|score_point| searcher.process_candidate(score_point));
}
}
fn search_on_level(
&self,
level_entry: ScoredPointOffset,
level: usize,
ef: usize,
points_scorer: &mut FilteredScorer,
) -> FixedLengthPriorityQueue<ScoredPointOffset> {
let mut visited_list = self.get_visited_list_from_pool();
visited_list.check_and_update_visited(level_entry.idx);
let mut search_context = SearchContext::new(level_entry, ef);
self._search_on_level(&mut search_context, level, &mut visited_list, points_scorer);
self.return_visited_list_to_pool(visited_list);
search_context.nearest
}
/// Greedy searches for entry point of level `target_level`.
/// Beam size is 1.
fn search_entry(
&self,
entry_point: PointOffsetType,
top_level: usize,
target_level: usize,
points_scorer: &mut FilteredScorer,
) -> ScoredPointOffset {
let mut links: Vec<PointOffsetType> = Vec::with_capacity(2 * self.get_m(0));
let mut current_point = ScoredPointOffset {
idx: entry_point,
score: points_scorer.score_point(entry_point),
};
for level in rev_range(top_level, target_level) {
let limit = self.get_m(level);
let mut changed = true;
while changed {
changed = false;
links.clear();
self.links_map(current_point.idx, level, |link| {
links.push(link);
});
let scores = points_scorer.score_points(&mut links, limit);
scores.iter().copied().for_each(|score_point| {
if score_point.score > current_point.score {
changed = true;
current_point = score_point;
}
});
}
}
current_point
}
}
impl<TGraphLinks: GraphLinks> GraphLayersBase for GraphLayers<TGraphLinks> {
fn get_visited_list_from_pool(&self) -> VisitedList {
self.visited_pool.get(self.links.num_points())
}
fn return_visited_list_to_pool(&self, visited_list: VisitedList) {
self.visited_pool.return_back(visited_list);
}
fn links_map<F>(&self, point_id: PointOffsetType, level: usize, mut f: F)
where
F: FnMut(PointOffsetType),
{
for link in self.links.links(point_id, level) {
f(*link);
}
}
fn get_m(&self, level: usize) -> usize {
if level == 0 {
self.m0
} else {
self.m
}
}
}
/// Object contains links between nodes for HNSW search
///
/// Assume all scores are similarities. Larger score = closer points
impl<TGraphLinks: GraphLinks> GraphLayers<TGraphLinks> {
pub fn point_level(&self, point_id: PointOffsetType) -> usize {
self.links.point_level(point_id)
}
pub fn search(
&self,
top: usize,
ef: usize,
mut points_scorer: FilteredScorer,
) -> Vec<ScoredPointOffset> {
let entry_point = match self
.entry_points
.get_entry_point(|point_id| points_scorer.check_vector(point_id))
{
None => return vec![],
Some(ep) => ep,
};
let zero_level_entry = self.search_entry(
entry_point.point_id,
entry_point.level,
0,
&mut points_scorer,
);
let nearest = self.search_on_level(zero_level_entry, 0, max(top, ef), &mut points_scorer);
nearest.into_iter().take(top).collect_vec()
}
pub fn get_path(path: &Path) -> PathBuf {
path.join(HNSW_GRAPH_FILE)
}
pub fn get_links_path(path: &Path) -> PathBuf {
path.join(HNSW_LINKS_FILE)
}
pub fn num_points(&self) -> usize {
self.links.num_points()
}
}
impl<TGraphLinks> GraphLayers<TGraphLinks>
where
TGraphLinks: GraphLinks,
{
pub fn load(graph_path: &Path, links_path: &Path) -> OperationResult<Self> {
let try_self: Result<Self, FileStorageError> = if links_path.exists() {
read_bin(graph_path)
} else {
Err(FileStorageError::generic(format!(
"Links file does not exists: {links_path:?}"
)))
};
match try_self {
Ok(mut slf) => {
let links = TGraphLinks::load_from_file(links_path)?;
slf.links = links;
Ok(slf)
}
Err(err) => {
let try_legacy: Result<GraphLayersBackwardCompatibility, _> = read_bin(graph_path);
if let Ok(legacy) = try_legacy {
log::debug!("Converting legacy graph to new format");
let mut converter = GraphLinksConverter::new(legacy.links_layers);
converter.save_as(links_path)?;
let links = TGraphLinks::from_converter(converter)?;
let slf = Self {
m: legacy.m,
m0: legacy.m0,
ef_construct: legacy.ef_construct,
links,
entry_points: legacy.entry_points,
visited_pool: VisitedPool::new(),
};
slf.save(graph_path)?;
Ok(slf)
} else {
Err(err)?
}
}
}
}
pub fn save(&self, path: &Path) -> OperationResult<()> {
Ok(atomic_save_bin(path, self)?)
}
}
impl GraphLayers<GraphLinksMmap> {
pub fn prefault_mmap_pages(&self, path: &Path) -> Option<mmap_ops::PrefaultMmapPages> {
self.links.prefault_mmap_pages(path)
}
}
#[cfg(test)]
mod tests {
use std::fs::File;
use std::io::Write;
use itertools::Itertools;
use rand::rngs::StdRng;
use rand::SeedableRng;
use tempfile::Builder;
use super::*;
use crate::data_types::vectors::VectorElementType;
use crate::fixtures::index_fixtures::{
random_vector, FakeFilterContext, TestRawScorerProducer,
};
use crate::index::hnsw_index::graph_links::GraphLinksRam;
use crate::index::hnsw_index::tests::create_graph_layer_fixture;
use crate::spaces::metric::Metric;
use crate::spaces::simple::{CosineMetric, DotProductMetric};
fn search_in_graph<TGraphLinks: GraphLinks>(
query: &[VectorElementType], | vector_storage: &TestRawScorerProducer<CosineMetric>,
graph: &GraphLayers<TGraphLinks>,
) -> Vec<ScoredPointOffset> {
let fake_filter_context = FakeFilterContext {};
let raw_scorer = vector_storage.get_raw_scorer(query.to_owned());
let scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let ef = 16;
graph.search(top, ef, scorer)
}
const M: usize = 8;
#[test]
fn test_search_on_level() {
let dim = 8;
let m = 8;
let ef_construct = 32;
let entry_points_num = 10;
let num_vectors = 10;
let mut rng = StdRng::seed_from_u64(42);
let vector_holder =
TestRawScorerProducer::<DotProductMetric>::new(dim, num_vectors, &mut rng);
let mut graph_layers = GraphLayers {
m,
m0: 2 * m,
ef_construct,
links: GraphLinksRam::default(),
entry_points: EntryPoints::new(entry_points_num),
visited_pool: VisitedPool::new(),
};
let mut graph_links = vec![vec![Vec::new()]; num_vectors];
graph_links[0][0] = vec![1, 2, 3, 4, 5, 6];
graph_layers.links =
GraphLinksRam::from_converter(GraphLinksConverter::new(graph_links.clone())).unwrap();
let linking_idx: PointOffsetType = 7;
let fake_filter_context = FakeFilterContext {};
let added_vector = vector_holder.vectors.get(linking_idx).to_vec();
let raw_scorer = vector_holder.get_raw_scorer(added_vector);
let mut scorer = FilteredScorer::new(raw_scorer.as_ref(), Some(&fake_filter_context));
let nearest_on_level = graph_layers.search_on_level(
ScoredPointOffset {
idx: 0,
score: scorer.score_point(0),
},
0,
32,
&mut scorer,
);
assert_eq!(nearest_on_level.len(), graph_links[0][0].len() + 1);
for nearest in &nearest_on_level {
// eprintln!("nearest = {:#?}", nearest);
assert_eq!(
nearest.score,
scorer.score_internal(linking_idx, nearest.idx)
)
}
}
#[test]
fn test_save_and_load() {
let num_vectors = 100;
let dim = 8;
let top = 5;
let mut rng = StdRng::seed_from_u64(42);
let dir = Builder::new().prefix("graph_dir").tempdir().unwrap();
let links_path = GraphLayers::<GraphLinksRam>::get_links_path(dir.path());
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
false,
&mut rng,
Some(&links_path),
);
let query = random_vector(&mut rng, dim);
let res1 = search_in_graph(&query, top, &vector_holder, &graph_layers);
let path = GraphLayers::<GraphLinksRam>::get_path(dir.path());
graph_layers.save(&path).unwrap();
let graph2 = GraphLayers::<GraphLinksRam>::load(&path, &links_path).unwrap();
let res2 = search_in_graph(&query, top, &vector_holder, &graph2);
assert_eq!(res1, res2)
}
#[test]
fn test_add_points() {
let num_vectors = 1000;
let dim = 8;
let mut rng = StdRng::seed_from_u64(42);
type M = CosineMetric;
let (vector_holder, graph_layers) =
create_graph_layer_fixture::<M, _>(num_vectors, M, dim, false, &mut rng, None);
let main_entry = graph_layers
.entry_points
.get_entry_point(|_x| true)
.expect("Expect entry point to exists");
assert!(main_entry.level > 0);
let num_levels = (0..num_vectors)
.map(|i| graph_layers.links.point_level(i as PointOffsetType))
.max()
.unwrap();
assert_eq!(main_entry.level, num_levels);
let total_links_0 = (0..num_vectors)
.map(|i| graph_layers.links.links(i as PointOffsetType, 0).len())
.sum::<usize>();
eprintln!("total_links_0 = {total_links_0:#?}");
eprintln!("num_vectors = {num_vectors:#?}");
assert!(total_links_0 > 0);
assert!(total_links_0 as f64 / num_vectors as f64 > M as f64);
let top = 5;
let query = random_vector(&mut rng, dim);
let processed_query = M::preprocess(&query).unwrap_or_else(|| query.clone());
let mut reference_top = FixedLengthPriorityQueue::new(top);
for idx in 0..vector_holder.vectors.len() as PointOffsetType {
let vec = &vector_holder.vectors.get(idx);
reference_top.push(ScoredPointOffset {
idx,
score: M::similarity(vec, &processed_query),
});
}
let graph_search = search_in_graph(&query, top, &vector_holder, &graph_layers);
assert_eq!(reference_top.into_vec(), graph_search);
}
#[test]
#[ignore]
fn test_draw_hnsw_graph() {
let dim = 2;
let num_vectors = 500;
let mut rng = StdRng::seed_from_u64(42);
let (vector_holder, graph_layers) = create_graph_layer_fixture::<CosineMetric, _>(
num_vectors,
M,
dim,
true,
&mut rng,
None,
);
let graph_json = serde_json::to_string_pretty(&graph_layers).unwrap();
let vectors_json = serde_json::to_string_pretty(
&(0..vector_holder.vectors.len() as PointOffsetType)
.map(|point_id| vector_holder.vectors.get(point_id).to_vec())
.collect_vec(),
)
.unwrap();
let mut file = File::create("graph.json").unwrap();
file.write_all(
format!("{{ \"graph\": {graph_json}, \n \"vectors\": {vectors_json} }}").as_bytes(),
)
.unwrap();
}
} | top: usize, | random_line_split |
image.rs | use core::ops::Range;
use crate::ImageTrait;
use crate::vec::*;
use crate::rangetools::*;
use std::path::Path;
use static_assertions::*;
pub enum PixelPos {
R,
G,
B,
A,
}
pub fn convert(slice: &mut [Color]) -> &mut [u32] {
assert_eq_size!(Color, u32);
assert_eq_align!(Color, u32);
unsafe { std::slice::from_raw_parts_mut(slice.as_mut_ptr() as *mut u32, slice.len()) }
}
#[derive(Clone)]
pub struct Image {
pub buffer: Vec<u8>,
pub width: usize,
pub height: usize,
}
impl ImageTrait for Image {
fn get_rgba8_buffer(&self) -> &[u8] { &self.buffer[0..(self.height * self.width *4)] }
fn get_width(&self) -> usize { self.width }
fn get_height(&self) -> usize { self.height }
}
#[derive(Clone, Debug)]
#[repr(C, align(4))]
pub struct Color {
pub r: u8,
pub g: u8,
pub b: u8,
pub a: u8,
}
impl Image {
pub fn get_u32_buffer(&self) -> &[u32] {
let len = self.height * self.width;
let buffer = &self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn get_u32_mut_buffer(&mut self) -> &mut [u32] {
let len = self.height * self.width;
let buffer = &mut self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to_mut();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn new(size: &Vec2i) -> Image {
let width = size.x as usize;
let height = size.y as usize;
Image {
buffer: vec![0; width * height * 4],
width,
height,
}
}
pub fn resize_lazy(&mut self, size: &Vec2i) {
let width = size.x as usize;
let height = size.y as usize;
let needed_size = width * height * 4 * 12 / 10; // With capacity
if self.buffer.len() < needed_size {
self.buffer.resize(needed_size, 0);
}
self.width = width;
self.height = height;
}
#[inline]
pub fn clear(&mut self, color: &Color) {
let color = color.to_u32();
for pix in self.get_u32_mut_buffer() {
*pix = color;
}
}
#[inline]
pub fn get_rect(&self) -> Rect2i {
Rect2i {
min: Vec2i::default(),
max: Vec2i::new(self.width as i32, self.height as i32),
}
}
#[inline]
pub fn range_x(&self) -> Range<i32> {
0..(self.width as i32)
}
#[inline]
pub fn range_y(&self) -> Range<i32> {
0..(self.height as i32)
}
pub fn save_png(&self, path: &Path) -> Result<(), std::io::Error> {
use std::fs::File;
use std::io::BufWriter;
let file = File::create(path)?;
let w = &mut BufWriter::new(file);
let mut encoder = png::Encoder::new(w, self.width as u32, self.height as u32);
encoder.set_color(png::ColorType::RGBA);
encoder.set_depth(png::BitDepth::Eight);
let mut writer = encoder.write_header()?;
writer.write_image_data(&self.buffer)?;
Ok(())
}
}
impl Color {
#[inline]
pub fn rgba(r: u8, g: u8, b: u8, a: u8) -> Color {
Color { r, g, b, a }
}
#[inline]
pub fn rgba_f64(r: f64, g: f64, b: f64, a: f64) -> Color {
Color {
r: (r * 255.0) as u8,
g: (g * 255.0) as u8,
b: (b * 255.0) as u8,
a: (a * 255.0) as u8,
}
}
#[inline]
pub fn to_rgba_f64(&self) -> (f64, f64, f64, f64) {
(
self.r as f64 / 255.0,
self.g as f64 / 255.0,
self.b as f64 / 255.0,
self.a as f64 / 255.0,
)
}
#[inline]
pub fn rgb(r: u8, g: u8, b: u8) -> Color {
Color::rgba(r, g, b, 255)
}
#[inline]
pub fn gray(rgb: u8) -> Color {
Color::rgb(rgb, rgb, rgb)
}
#[inline]
pub fn from_u32(v: u32) -> Self {
let res = u32::to_le_bytes(v);
Color::rgba(res[0], res[1], res[2], res[3])
}
#[inline]
pub fn to_u32(&self) -> u32 {
u32::from_le_bytes([self.r, self.g, self.b, self.a])
}
}
#[inline]
pub fn get_pixel(image: &Image, pos: &Vec2i) -> Color {
Color::from_u32(image.get_u32_buffer()[pos.x as usize + pos.y as usize * image.width])
}
#[inline]
pub fn set_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
let width = image.width;
image.get_u32_mut_buffer()[pos.x as usize + pos.y as usize * width] = color.to_u32();
}
#[inline]
pub fn draw_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
set_pixel(image, &pos, &blend(&color, &get_pixel(image, &pos)));
}
fn for_two_images<F: Fn(&mut u32, &u32)>(dst: &mut Image, src: &Image, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&src.range_y(), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&src.range_x(), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_x_range = offset_range(&src_x_range, src_y_range.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
let src_slice = &src_buf[range_to_usize(&src_x_range)];
for (pix_dst, pix_src) in dst_slice.iter_mut().zip(src_slice.iter()) {
f(pix_dst, pix_src);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
src_x_range = offset_range(&src_x_range, src_width);
}
}
pub fn place_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| *pix_dst = *pix_src);
}
pub fn place_image_scaled(dst: &mut Image, src: &Image, pos: &Vec2i, scale: i32) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..(src.height as i32 * scale)), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..(src.width as i32 * scale)), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let src_y_range_slice = div_range(&src_y_range, scale);
let src_x_range_slice = div_range(&src_x_range, scale);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_pos_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_pos_range = offset_range(&src_x_range_slice, src_y_range_slice.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
let mut current_y = src_y_range.start / scale;
for src_y in src_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_pos_range)];
let src_slice = &src_buf[range_to_usize(&src_pos_range)];
let mut current_x = src_x_range_slice.start;
let mut src_iter = src_slice.iter();
let mut pix_src = src_iter.next().unwrap();
for (pix_dst, src_x) in dst_slice.iter_mut().zip(src_x_range.clone()) {
if src_x / scale!= current_x {
pix_src = src_iter.next().unwrap();
current_x = src_x / scale;
}
*pix_dst = *pix_src;
}
dst_pos_range = offset_range(&dst_pos_range, dst_width);
if src_y / scale!= current_y {
src_pos_range = offset_range(&src_pos_range, src_width);
current_y = src_y / scale;
}
}
}
pub fn draw_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| {
*pix_dst = blend(&Color::from_u32(*pix_src), &Color::from_u32(*pix_dst)).to_u32();
});
}
#[inline]
pub fn function_for_all_pixels<F: FnMut(usize, usize) -> Color>(image: &mut Image, mut f: F) {
let height = image.height;
let width = image.width;
let mut iter = image.get_u32_mut_buffer().iter_mut();
for y in 0..height {
for x in 0..width {
let color = f(x, y);
if let Some(c) = iter.next() {
*c = color.to_u32();
}
}
}
}
fn for_image_and_rect<F: Fn(&mut u32)>(dst: &mut Image, rect_size: &Vec2i, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..rect_size.y), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..rect_size.x), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let dst_width = dst.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let dst_buf = dst.get_u32_mut_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
for pix_dst in dst_slice.iter_mut() {
f(pix_dst);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
}
}
#[inline]
pub fn draw_rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
for_image_and_rect(image, size, pos, |pix| {
*pix = blend(&color, &Color::from_u32(*pix)).to_u32();
});
}
#[inline]
pub fn rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
let color = color.to_u32();
for_image_and_rect(image, size, pos, |pix| *pix = color);
}
#[inline]
/// Fast blend on integer numbers without gamma correction and premultiplied alpha. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
pub fn blend(src: &Color, dst: &Color) -> Color {
let srca = src.a as i32;
let dsta = dst.a as i32;
let outa = (srca + dsta) * 255 - srca * dsta;
macro_rules! blend {
($src:expr, $dst:expr) => {
((255 * ($src as i32) * srca + ($dst as i32) * dsta * (255 - srca)) / outa) as u8
};
}
if outa == 0 {
Color::rgba(0, 0, 0, 0)
} else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa / 255) as u8
)
}
}
#[inline]
/// Works on f32 with gamma correction of 2.2 power. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending + https://en.wikipedia.org/wiki/Alpha_compositing#Composing_alpha_blending_with_gamma_correction
pub fn ideal_blend(src: &Color, dst: &Color) -> Color {
let srca = src.a as f32 / 255.0;
let dsta = dst.a as f32 / 255.0;
let outa = 1. - (1. - srca) * (1. - dsta);
macro_rules! blend {
($src:expr, $dst:expr) => {
(((($src as f32 / 255.0).powf(2.2) * srca + ($dst as f32 / 255.0).powf(2.2) * dsta * (1.0 - srca)) / outa).powf(1. / 2.2) * 255.0) as u8
};
}
if outa == 0.0 {
Color::rgba(0, 0, 0, 0)
} else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa * 255.0) as u8 | )
}
}
pub fn place_repeated_scaled_image(image: &mut Image, repeated_image: &Image, pos: &Vec2i, scale: i32, repeat_x: bool, repeat_y: bool) {
let size = Vec2i::new(repeated_image.get_width() as i32, repeated_image.get_height() as i32) * scale;
let range_x = calc_range_for_repeated_line(repeat_x, pos.x, size.x, image.get_width() as i32);
let range_y = calc_range_for_repeated_line(repeat_y, pos.y, size.y, image.get_height() as i32);
for y in range_y {
for x in range_x.clone() {
place_image_scaled(image, repeated_image, &(Vec2i::new(
x * size.x,
y * size.y
) + pos), scale);
}
}
fn calc_range_for_repeated_line(repeat: bool, pos: i32, len: i32, size: i32) -> std::ops::Range<i32> {
if repeat {
let minus = {
let mut pos_offset = 0;
while pos + pos_offset * len >= -len {
pos_offset -= 1 ;
}
pos_offset
};
let plus = {
let mut pos_offset = 0;
while pos + pos_offset * len < size {
pos_offset += 1 ;
}
pos_offset
};
minus..plus
} else {
0i32..1i32
}
}
} | random_line_split |
|
image.rs | use core::ops::Range;
use crate::ImageTrait;
use crate::vec::*;
use crate::rangetools::*;
use std::path::Path;
use static_assertions::*;
pub enum PixelPos {
R,
G,
B,
A,
}
pub fn convert(slice: &mut [Color]) -> &mut [u32] {
assert_eq_size!(Color, u32);
assert_eq_align!(Color, u32);
unsafe { std::slice::from_raw_parts_mut(slice.as_mut_ptr() as *mut u32, slice.len()) }
}
#[derive(Clone)]
pub struct Image {
pub buffer: Vec<u8>,
pub width: usize,
pub height: usize,
}
impl ImageTrait for Image {
fn get_rgba8_buffer(&self) -> &[u8] { &self.buffer[0..(self.height * self.width *4)] }
fn get_width(&self) -> usize { self.width }
fn get_height(&self) -> usize { self.height }
}
#[derive(Clone, Debug)]
#[repr(C, align(4))]
pub struct Color {
pub r: u8,
pub g: u8,
pub b: u8,
pub a: u8,
}
impl Image {
pub fn get_u32_buffer(&self) -> &[u32] {
let len = self.height * self.width;
let buffer = &self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn get_u32_mut_buffer(&mut self) -> &mut [u32] {
let len = self.height * self.width;
let buffer = &mut self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to_mut();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn new(size: &Vec2i) -> Image {
let width = size.x as usize;
let height = size.y as usize;
Image {
buffer: vec![0; width * height * 4],
width,
height,
}
}
pub fn resize_lazy(&mut self, size: &Vec2i) {
let width = size.x as usize;
let height = size.y as usize;
let needed_size = width * height * 4 * 12 / 10; // With capacity
if self.buffer.len() < needed_size {
self.buffer.resize(needed_size, 0);
}
self.width = width;
self.height = height;
}
#[inline]
pub fn clear(&mut self, color: &Color) {
let color = color.to_u32();
for pix in self.get_u32_mut_buffer() {
*pix = color;
}
}
#[inline]
pub fn get_rect(&self) -> Rect2i {
Rect2i {
min: Vec2i::default(),
max: Vec2i::new(self.width as i32, self.height as i32),
}
}
#[inline]
pub fn range_x(&self) -> Range<i32> {
0..(self.width as i32)
}
#[inline]
pub fn range_y(&self) -> Range<i32> {
0..(self.height as i32)
}
pub fn save_png(&self, path: &Path) -> Result<(), std::io::Error> {
use std::fs::File;
use std::io::BufWriter;
let file = File::create(path)?;
let w = &mut BufWriter::new(file);
let mut encoder = png::Encoder::new(w, self.width as u32, self.height as u32);
encoder.set_color(png::ColorType::RGBA);
encoder.set_depth(png::BitDepth::Eight);
let mut writer = encoder.write_header()?;
writer.write_image_data(&self.buffer)?;
Ok(())
}
}
impl Color {
#[inline]
pub fn rgba(r: u8, g: u8, b: u8, a: u8) -> Color {
Color { r, g, b, a }
}
#[inline]
pub fn rgba_f64(r: f64, g: f64, b: f64, a: f64) -> Color {
Color {
r: (r * 255.0) as u8,
g: (g * 255.0) as u8,
b: (b * 255.0) as u8,
a: (a * 255.0) as u8,
}
}
#[inline]
pub fn to_rgba_f64(&self) -> (f64, f64, f64, f64) {
(
self.r as f64 / 255.0,
self.g as f64 / 255.0,
self.b as f64 / 255.0,
self.a as f64 / 255.0,
)
}
#[inline]
pub fn rgb(r: u8, g: u8, b: u8) -> Color {
Color::rgba(r, g, b, 255)
}
#[inline]
pub fn gray(rgb: u8) -> Color {
Color::rgb(rgb, rgb, rgb)
}
#[inline]
pub fn from_u32(v: u32) -> Self {
let res = u32::to_le_bytes(v);
Color::rgba(res[0], res[1], res[2], res[3])
}
#[inline]
pub fn to_u32(&self) -> u32 {
u32::from_le_bytes([self.r, self.g, self.b, self.a])
}
}
#[inline]
pub fn get_pixel(image: &Image, pos: &Vec2i) -> Color {
Color::from_u32(image.get_u32_buffer()[pos.x as usize + pos.y as usize * image.width])
}
#[inline]
pub fn set_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
let width = image.width;
image.get_u32_mut_buffer()[pos.x as usize + pos.y as usize * width] = color.to_u32();
}
#[inline]
pub fn draw_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
set_pixel(image, &pos, &blend(&color, &get_pixel(image, &pos)));
}
fn for_two_images<F: Fn(&mut u32, &u32)>(dst: &mut Image, src: &Image, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&src.range_y(), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&src.range_x(), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_x_range = offset_range(&src_x_range, src_y_range.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
let src_slice = &src_buf[range_to_usize(&src_x_range)];
for (pix_dst, pix_src) in dst_slice.iter_mut().zip(src_slice.iter()) {
f(pix_dst, pix_src);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
src_x_range = offset_range(&src_x_range, src_width);
}
}
pub fn place_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| *pix_dst = *pix_src);
}
pub fn place_image_scaled(dst: &mut Image, src: &Image, pos: &Vec2i, scale: i32) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..(src.height as i32 * scale)), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..(src.width as i32 * scale)), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let src_y_range_slice = div_range(&src_y_range, scale);
let src_x_range_slice = div_range(&src_x_range, scale);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_pos_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_pos_range = offset_range(&src_x_range_slice, src_y_range_slice.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
let mut current_y = src_y_range.start / scale;
for src_y in src_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_pos_range)];
let src_slice = &src_buf[range_to_usize(&src_pos_range)];
let mut current_x = src_x_range_slice.start;
let mut src_iter = src_slice.iter();
let mut pix_src = src_iter.next().unwrap();
for (pix_dst, src_x) in dst_slice.iter_mut().zip(src_x_range.clone()) {
if src_x / scale!= current_x {
pix_src = src_iter.next().unwrap();
current_x = src_x / scale;
}
*pix_dst = *pix_src;
}
dst_pos_range = offset_range(&dst_pos_range, dst_width);
if src_y / scale!= current_y {
src_pos_range = offset_range(&src_pos_range, src_width);
current_y = src_y / scale;
}
}
}
pub fn draw_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| {
*pix_dst = blend(&Color::from_u32(*pix_src), &Color::from_u32(*pix_dst)).to_u32();
});
}
#[inline]
pub fn function_for_all_pixels<F: FnMut(usize, usize) -> Color>(image: &mut Image, mut f: F) {
let height = image.height;
let width = image.width;
let mut iter = image.get_u32_mut_buffer().iter_mut();
for y in 0..height {
for x in 0..width {
let color = f(x, y);
if let Some(c) = iter.next() {
*c = color.to_u32();
}
}
}
}
fn for_image_and_rect<F: Fn(&mut u32)>(dst: &mut Image, rect_size: &Vec2i, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..rect_size.y), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..rect_size.x), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let dst_width = dst.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let dst_buf = dst.get_u32_mut_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
for pix_dst in dst_slice.iter_mut() {
f(pix_dst);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
}
}
#[inline]
pub fn draw_rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
for_image_and_rect(image, size, pos, |pix| {
*pix = blend(&color, &Color::from_u32(*pix)).to_u32();
});
}
#[inline]
pub fn rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
let color = color.to_u32();
for_image_and_rect(image, size, pos, |pix| *pix = color);
}
#[inline]
/// Fast blend on integer numbers without gamma correction and premultiplied alpha. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
pub fn blend(src: &Color, dst: &Color) -> Color {
let srca = src.a as i32;
let dsta = dst.a as i32;
let outa = (srca + dsta) * 255 - srca * dsta;
macro_rules! blend {
($src:expr, $dst:expr) => {
((255 * ($src as i32) * srca + ($dst as i32) * dsta * (255 - srca)) / outa) as u8
};
}
if outa == 0 | else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa / 255) as u8
)
}
}
#[inline]
/// Works on f32 with gamma correction of 2.2 power. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending + https://en.wikipedia.org/wiki/Alpha_compositing#Composing_alpha_blending_with_gamma_correction
pub fn ideal_blend(src: &Color, dst: &Color) -> Color {
let srca = src.a as f32 / 255.0;
let dsta = dst.a as f32 / 255.0;
let outa = 1. - (1. - srca) * (1. - dsta);
macro_rules! blend {
($src:expr, $dst:expr) => {
(((($src as f32 / 255.0).powf(2.2) * srca + ($dst as f32 / 255.0).powf(2.2) * dsta * (1.0 - srca)) / outa).powf(1. / 2.2) * 255.0) as u8
};
}
if outa == 0.0 {
Color::rgba(0, 0, 0, 0)
} else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa * 255.0) as u8
)
}
}
pub fn place_repeated_scaled_image(image: &mut Image, repeated_image: &Image, pos: &Vec2i, scale: i32, repeat_x: bool, repeat_y: bool) {
let size = Vec2i::new(repeated_image.get_width() as i32, repeated_image.get_height() as i32) * scale;
let range_x = calc_range_for_repeated_line(repeat_x, pos.x, size.x, image.get_width() as i32);
let range_y = calc_range_for_repeated_line(repeat_y, pos.y, size.y, image.get_height() as i32);
for y in range_y {
for x in range_x.clone() {
place_image_scaled(image, repeated_image, &(Vec2i::new(
x * size.x,
y * size.y
) + pos), scale);
}
}
fn calc_range_for_repeated_line(repeat: bool, pos: i32, len: i32, size: i32) -> std::ops::Range<i32> {
if repeat {
let minus = {
let mut pos_offset = 0;
while pos + pos_offset * len >= -len {
pos_offset -= 1 ;
}
pos_offset
};
let plus = {
let mut pos_offset = 0;
while pos + pos_offset * len < size {
pos_offset += 1 ;
}
pos_offset
};
minus..plus
} else {
0i32..1i32
}
}
}
| {
Color::rgba(0, 0, 0, 0)
} | conditional_block |
image.rs | use core::ops::Range;
use crate::ImageTrait;
use crate::vec::*;
use crate::rangetools::*;
use std::path::Path;
use static_assertions::*;
pub enum PixelPos {
R,
G,
B,
A,
}
pub fn convert(slice: &mut [Color]) -> &mut [u32] {
assert_eq_size!(Color, u32);
assert_eq_align!(Color, u32);
unsafe { std::slice::from_raw_parts_mut(slice.as_mut_ptr() as *mut u32, slice.len()) }
}
#[derive(Clone)]
pub struct Image {
pub buffer: Vec<u8>,
pub width: usize,
pub height: usize,
}
impl ImageTrait for Image {
fn get_rgba8_buffer(&self) -> &[u8] { &self.buffer[0..(self.height * self.width *4)] }
fn get_width(&self) -> usize { self.width }
fn get_height(&self) -> usize { self.height }
}
#[derive(Clone, Debug)]
#[repr(C, align(4))]
pub struct Color {
pub r: u8,
pub g: u8,
pub b: u8,
pub a: u8,
}
impl Image {
pub fn get_u32_buffer(&self) -> &[u32] {
let len = self.height * self.width;
let buffer = &self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn get_u32_mut_buffer(&mut self) -> &mut [u32] {
let len = self.height * self.width;
let buffer = &mut self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to_mut();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn new(size: &Vec2i) -> Image {
let width = size.x as usize;
let height = size.y as usize;
Image {
buffer: vec![0; width * height * 4],
width,
height,
}
}
pub fn resize_lazy(&mut self, size: &Vec2i) {
let width = size.x as usize;
let height = size.y as usize;
let needed_size = width * height * 4 * 12 / 10; // With capacity
if self.buffer.len() < needed_size {
self.buffer.resize(needed_size, 0);
}
self.width = width;
self.height = height;
}
#[inline]
pub fn clear(&mut self, color: &Color) {
let color = color.to_u32();
for pix in self.get_u32_mut_buffer() {
*pix = color;
}
}
#[inline]
pub fn get_rect(&self) -> Rect2i {
Rect2i {
min: Vec2i::default(),
max: Vec2i::new(self.width as i32, self.height as i32),
}
}
#[inline]
pub fn range_x(&self) -> Range<i32> {
0..(self.width as i32)
}
#[inline]
pub fn range_y(&self) -> Range<i32> |
pub fn save_png(&self, path: &Path) -> Result<(), std::io::Error> {
use std::fs::File;
use std::io::BufWriter;
let file = File::create(path)?;
let w = &mut BufWriter::new(file);
let mut encoder = png::Encoder::new(w, self.width as u32, self.height as u32);
encoder.set_color(png::ColorType::RGBA);
encoder.set_depth(png::BitDepth::Eight);
let mut writer = encoder.write_header()?;
writer.write_image_data(&self.buffer)?;
Ok(())
}
}
impl Color {
#[inline]
pub fn rgba(r: u8, g: u8, b: u8, a: u8) -> Color {
Color { r, g, b, a }
}
#[inline]
pub fn rgba_f64(r: f64, g: f64, b: f64, a: f64) -> Color {
Color {
r: (r * 255.0) as u8,
g: (g * 255.0) as u8,
b: (b * 255.0) as u8,
a: (a * 255.0) as u8,
}
}
#[inline]
pub fn to_rgba_f64(&self) -> (f64, f64, f64, f64) {
(
self.r as f64 / 255.0,
self.g as f64 / 255.0,
self.b as f64 / 255.0,
self.a as f64 / 255.0,
)
}
#[inline]
pub fn rgb(r: u8, g: u8, b: u8) -> Color {
Color::rgba(r, g, b, 255)
}
#[inline]
pub fn gray(rgb: u8) -> Color {
Color::rgb(rgb, rgb, rgb)
}
#[inline]
pub fn from_u32(v: u32) -> Self {
let res = u32::to_le_bytes(v);
Color::rgba(res[0], res[1], res[2], res[3])
}
#[inline]
pub fn to_u32(&self) -> u32 {
u32::from_le_bytes([self.r, self.g, self.b, self.a])
}
}
#[inline]
pub fn get_pixel(image: &Image, pos: &Vec2i) -> Color {
Color::from_u32(image.get_u32_buffer()[pos.x as usize + pos.y as usize * image.width])
}
#[inline]
pub fn set_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
let width = image.width;
image.get_u32_mut_buffer()[pos.x as usize + pos.y as usize * width] = color.to_u32();
}
#[inline]
pub fn draw_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
set_pixel(image, &pos, &blend(&color, &get_pixel(image, &pos)));
}
fn for_two_images<F: Fn(&mut u32, &u32)>(dst: &mut Image, src: &Image, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&src.range_y(), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&src.range_x(), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_x_range = offset_range(&src_x_range, src_y_range.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
let src_slice = &src_buf[range_to_usize(&src_x_range)];
for (pix_dst, pix_src) in dst_slice.iter_mut().zip(src_slice.iter()) {
f(pix_dst, pix_src);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
src_x_range = offset_range(&src_x_range, src_width);
}
}
pub fn place_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| *pix_dst = *pix_src);
}
pub fn place_image_scaled(dst: &mut Image, src: &Image, pos: &Vec2i, scale: i32) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..(src.height as i32 * scale)), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..(src.width as i32 * scale)), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let src_y_range_slice = div_range(&src_y_range, scale);
let src_x_range_slice = div_range(&src_x_range, scale);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_pos_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_pos_range = offset_range(&src_x_range_slice, src_y_range_slice.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
let mut current_y = src_y_range.start / scale;
for src_y in src_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_pos_range)];
let src_slice = &src_buf[range_to_usize(&src_pos_range)];
let mut current_x = src_x_range_slice.start;
let mut src_iter = src_slice.iter();
let mut pix_src = src_iter.next().unwrap();
for (pix_dst, src_x) in dst_slice.iter_mut().zip(src_x_range.clone()) {
if src_x / scale!= current_x {
pix_src = src_iter.next().unwrap();
current_x = src_x / scale;
}
*pix_dst = *pix_src;
}
dst_pos_range = offset_range(&dst_pos_range, dst_width);
if src_y / scale!= current_y {
src_pos_range = offset_range(&src_pos_range, src_width);
current_y = src_y / scale;
}
}
}
pub fn draw_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| {
*pix_dst = blend(&Color::from_u32(*pix_src), &Color::from_u32(*pix_dst)).to_u32();
});
}
#[inline]
pub fn function_for_all_pixels<F: FnMut(usize, usize) -> Color>(image: &mut Image, mut f: F) {
let height = image.height;
let width = image.width;
let mut iter = image.get_u32_mut_buffer().iter_mut();
for y in 0..height {
for x in 0..width {
let color = f(x, y);
if let Some(c) = iter.next() {
*c = color.to_u32();
}
}
}
}
fn for_image_and_rect<F: Fn(&mut u32)>(dst: &mut Image, rect_size: &Vec2i, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..rect_size.y), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..rect_size.x), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let dst_width = dst.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let dst_buf = dst.get_u32_mut_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
for pix_dst in dst_slice.iter_mut() {
f(pix_dst);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
}
}
#[inline]
pub fn draw_rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
for_image_and_rect(image, size, pos, |pix| {
*pix = blend(&color, &Color::from_u32(*pix)).to_u32();
});
}
#[inline]
pub fn rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
let color = color.to_u32();
for_image_and_rect(image, size, pos, |pix| *pix = color);
}
#[inline]
/// Fast blend on integer numbers without gamma correction and premultiplied alpha. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
pub fn blend(src: &Color, dst: &Color) -> Color {
let srca = src.a as i32;
let dsta = dst.a as i32;
let outa = (srca + dsta) * 255 - srca * dsta;
macro_rules! blend {
($src:expr, $dst:expr) => {
((255 * ($src as i32) * srca + ($dst as i32) * dsta * (255 - srca)) / outa) as u8
};
}
if outa == 0 {
Color::rgba(0, 0, 0, 0)
} else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa / 255) as u8
)
}
}
#[inline]
/// Works on f32 with gamma correction of 2.2 power. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending + https://en.wikipedia.org/wiki/Alpha_compositing#Composing_alpha_blending_with_gamma_correction
pub fn ideal_blend(src: &Color, dst: &Color) -> Color {
let srca = src.a as f32 / 255.0;
let dsta = dst.a as f32 / 255.0;
let outa = 1. - (1. - srca) * (1. - dsta);
macro_rules! blend {
($src:expr, $dst:expr) => {
(((($src as f32 / 255.0).powf(2.2) * srca + ($dst as f32 / 255.0).powf(2.2) * dsta * (1.0 - srca)) / outa).powf(1. / 2.2) * 255.0) as u8
};
}
if outa == 0.0 {
Color::rgba(0, 0, 0, 0)
} else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa * 255.0) as u8
)
}
}
pub fn place_repeated_scaled_image(image: &mut Image, repeated_image: &Image, pos: &Vec2i, scale: i32, repeat_x: bool, repeat_y: bool) {
let size = Vec2i::new(repeated_image.get_width() as i32, repeated_image.get_height() as i32) * scale;
let range_x = calc_range_for_repeated_line(repeat_x, pos.x, size.x, image.get_width() as i32);
let range_y = calc_range_for_repeated_line(repeat_y, pos.y, size.y, image.get_height() as i32);
for y in range_y {
for x in range_x.clone() {
place_image_scaled(image, repeated_image, &(Vec2i::new(
x * size.x,
y * size.y
) + pos), scale);
}
}
fn calc_range_for_repeated_line(repeat: bool, pos: i32, len: i32, size: i32) -> std::ops::Range<i32> {
if repeat {
let minus = {
let mut pos_offset = 0;
while pos + pos_offset * len >= -len {
pos_offset -= 1 ;
}
pos_offset
};
let plus = {
let mut pos_offset = 0;
while pos + pos_offset * len < size {
pos_offset += 1 ;
}
pos_offset
};
minus..plus
} else {
0i32..1i32
}
}
}
| {
0..(self.height as i32)
} | identifier_body |
image.rs | use core::ops::Range;
use crate::ImageTrait;
use crate::vec::*;
use crate::rangetools::*;
use std::path::Path;
use static_assertions::*;
pub enum PixelPos {
R,
G,
B,
A,
}
pub fn convert(slice: &mut [Color]) -> &mut [u32] {
assert_eq_size!(Color, u32);
assert_eq_align!(Color, u32);
unsafe { std::slice::from_raw_parts_mut(slice.as_mut_ptr() as *mut u32, slice.len()) }
}
#[derive(Clone)]
pub struct Image {
pub buffer: Vec<u8>,
pub width: usize,
pub height: usize,
}
impl ImageTrait for Image {
fn get_rgba8_buffer(&self) -> &[u8] { &self.buffer[0..(self.height * self.width *4)] }
fn get_width(&self) -> usize { self.width }
fn get_height(&self) -> usize { self.height }
}
#[derive(Clone, Debug)]
#[repr(C, align(4))]
pub struct Color {
pub r: u8,
pub g: u8,
pub b: u8,
pub a: u8,
}
impl Image {
pub fn get_u32_buffer(&self) -> &[u32] {
let len = self.height * self.width;
let buffer = &self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn get_u32_mut_buffer(&mut self) -> &mut [u32] {
let len = self.height * self.width;
let buffer = &mut self.buffer[0..(len * 4)];
unsafe {
let (prefix, shorts, suffix) = buffer.align_to_mut();
assert!(prefix.is_empty());
assert!(suffix.is_empty());
shorts
}
}
pub fn new(size: &Vec2i) -> Image {
let width = size.x as usize;
let height = size.y as usize;
Image {
buffer: vec![0; width * height * 4],
width,
height,
}
}
pub fn resize_lazy(&mut self, size: &Vec2i) {
let width = size.x as usize;
let height = size.y as usize;
let needed_size = width * height * 4 * 12 / 10; // With capacity
if self.buffer.len() < needed_size {
self.buffer.resize(needed_size, 0);
}
self.width = width;
self.height = height;
}
#[inline]
pub fn clear(&mut self, color: &Color) {
let color = color.to_u32();
for pix in self.get_u32_mut_buffer() {
*pix = color;
}
}
#[inline]
pub fn get_rect(&self) -> Rect2i {
Rect2i {
min: Vec2i::default(),
max: Vec2i::new(self.width as i32, self.height as i32),
}
}
#[inline]
pub fn range_x(&self) -> Range<i32> {
0..(self.width as i32)
}
#[inline]
pub fn range_y(&self) -> Range<i32> {
0..(self.height as i32)
}
pub fn save_png(&self, path: &Path) -> Result<(), std::io::Error> {
use std::fs::File;
use std::io::BufWriter;
let file = File::create(path)?;
let w = &mut BufWriter::new(file);
let mut encoder = png::Encoder::new(w, self.width as u32, self.height as u32);
encoder.set_color(png::ColorType::RGBA);
encoder.set_depth(png::BitDepth::Eight);
let mut writer = encoder.write_header()?;
writer.write_image_data(&self.buffer)?;
Ok(())
}
}
impl Color {
#[inline]
pub fn rgba(r: u8, g: u8, b: u8, a: u8) -> Color {
Color { r, g, b, a }
}
#[inline]
pub fn rgba_f64(r: f64, g: f64, b: f64, a: f64) -> Color {
Color {
r: (r * 255.0) as u8,
g: (g * 255.0) as u8,
b: (b * 255.0) as u8,
a: (a * 255.0) as u8,
}
}
#[inline]
pub fn to_rgba_f64(&self) -> (f64, f64, f64, f64) {
(
self.r as f64 / 255.0,
self.g as f64 / 255.0,
self.b as f64 / 255.0,
self.a as f64 / 255.0,
)
}
#[inline]
pub fn rgb(r: u8, g: u8, b: u8) -> Color {
Color::rgba(r, g, b, 255)
}
#[inline]
pub fn gray(rgb: u8) -> Color {
Color::rgb(rgb, rgb, rgb)
}
#[inline]
pub fn from_u32(v: u32) -> Self {
let res = u32::to_le_bytes(v);
Color::rgba(res[0], res[1], res[2], res[3])
}
#[inline]
pub fn to_u32(&self) -> u32 {
u32::from_le_bytes([self.r, self.g, self.b, self.a])
}
}
#[inline]
pub fn get_pixel(image: &Image, pos: &Vec2i) -> Color {
Color::from_u32(image.get_u32_buffer()[pos.x as usize + pos.y as usize * image.width])
}
#[inline]
pub fn set_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
let width = image.width;
image.get_u32_mut_buffer()[pos.x as usize + pos.y as usize * width] = color.to_u32();
}
#[inline]
pub fn draw_pixel(image: &mut Image, pos: &Vec2i, color: &Color) {
set_pixel(image, &pos, &blend(&color, &get_pixel(image, &pos)));
}
fn for_two_images<F: Fn(&mut u32, &u32)>(dst: &mut Image, src: &Image, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&src.range_y(), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&src.range_x(), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_x_range = offset_range(&src_x_range, src_y_range.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
let src_slice = &src_buf[range_to_usize(&src_x_range)];
for (pix_dst, pix_src) in dst_slice.iter_mut().zip(src_slice.iter()) {
f(pix_dst, pix_src);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
src_x_range = offset_range(&src_x_range, src_width);
}
}
pub fn place_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| *pix_dst = *pix_src);
}
pub fn place_image_scaled(dst: &mut Image, src: &Image, pos: &Vec2i, scale: i32) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..(src.height as i32 * scale)), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..(src.width as i32 * scale)), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let src_y_range = offset_range(&dst_y_range, -pos.y);
let src_x_range = offset_range(&dst_x_range, -pos.x);
let src_y_range_slice = div_range(&src_y_range, scale);
let src_x_range_slice = div_range(&src_x_range, scale);
let dst_width = dst.width as i32;
let src_width = src.width as i32;
let mut dst_pos_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let mut src_pos_range = offset_range(&src_x_range_slice, src_y_range_slice.start * src_width);
let dst_buf = dst.get_u32_mut_buffer();
let src_buf = src.get_u32_buffer();
let mut current_y = src_y_range.start / scale;
for src_y in src_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_pos_range)];
let src_slice = &src_buf[range_to_usize(&src_pos_range)];
let mut current_x = src_x_range_slice.start;
let mut src_iter = src_slice.iter();
let mut pix_src = src_iter.next().unwrap();
for (pix_dst, src_x) in dst_slice.iter_mut().zip(src_x_range.clone()) {
if src_x / scale!= current_x {
pix_src = src_iter.next().unwrap();
current_x = src_x / scale;
}
*pix_dst = *pix_src;
}
dst_pos_range = offset_range(&dst_pos_range, dst_width);
if src_y / scale!= current_y {
src_pos_range = offset_range(&src_pos_range, src_width);
current_y = src_y / scale;
}
}
}
pub fn draw_image(dst: &mut Image, src: &Image, pos: &Vec2i) {
for_two_images(dst, src, pos, |pix_dst, pix_src| {
*pix_dst = blend(&Color::from_u32(*pix_src), &Color::from_u32(*pix_dst)).to_u32();
});
}
#[inline]
pub fn function_for_all_pixels<F: FnMut(usize, usize) -> Color>(image: &mut Image, mut f: F) {
let height = image.height;
let width = image.width;
let mut iter = image.get_u32_mut_buffer().iter_mut();
for y in 0..height {
for x in 0..width {
let color = f(x, y);
if let Some(c) = iter.next() {
*c = color.to_u32();
}
}
}
}
fn for_image_and_rect<F: Fn(&mut u32)>(dst: &mut Image, rect_size: &Vec2i, pos: &Vec2i, f: F) {
let dst_y_range = intersect_range(
&dst.range_y(),
&offset_range(&(0..rect_size.y), pos.y)
);
let dst_x_range = intersect_range(
&dst.range_x(),
&offset_range(&(0..rect_size.x), pos.x)
);
if dst_x_range.end == dst_x_range.start {
return;
}
let dst_width = dst.width as i32;
let mut dst_x_range = offset_range(&dst_x_range, dst_y_range.start * dst_width);
let dst_buf = dst.get_u32_mut_buffer();
for _ in dst_y_range {
let dst_slice = &mut dst_buf[range_to_usize(&dst_x_range)];
for pix_dst in dst_slice.iter_mut() {
f(pix_dst);
}
dst_x_range = offset_range(&dst_x_range, dst_width);
}
}
#[inline]
pub fn draw_rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
for_image_and_rect(image, size, pos, |pix| {
*pix = blend(&color, &Color::from_u32(*pix)).to_u32();
});
}
#[inline]
pub fn rect(image: &mut Image, pos: &Vec2i, size: &Vec2i, color: &Color) {
let color = color.to_u32();
for_image_and_rect(image, size, pos, |pix| *pix = color);
}
#[inline]
/// Fast blend on integer numbers without gamma correction and premultiplied alpha. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
pub fn blend(src: &Color, dst: &Color) -> Color {
let srca = src.a as i32;
let dsta = dst.a as i32;
let outa = (srca + dsta) * 255 - srca * dsta;
macro_rules! blend {
($src:expr, $dst:expr) => {
((255 * ($src as i32) * srca + ($dst as i32) * dsta * (255 - srca)) / outa) as u8
};
}
if outa == 0 {
Color::rgba(0, 0, 0, 0)
} else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa / 255) as u8
)
}
}
#[inline]
/// Works on f32 with gamma correction of 2.2 power. Source: https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending + https://en.wikipedia.org/wiki/Alpha_compositing#Composing_alpha_blending_with_gamma_correction
pub fn | (src: &Color, dst: &Color) -> Color {
let srca = src.a as f32 / 255.0;
let dsta = dst.a as f32 / 255.0;
let outa = 1. - (1. - srca) * (1. - dsta);
macro_rules! blend {
($src:expr, $dst:expr) => {
(((($src as f32 / 255.0).powf(2.2) * srca + ($dst as f32 / 255.0).powf(2.2) * dsta * (1.0 - srca)) / outa).powf(1. / 2.2) * 255.0) as u8
};
}
if outa == 0.0 {
Color::rgba(0, 0, 0, 0)
} else {
Color::rgba(
blend!(src.r, dst.r),
blend!(src.g, dst.g),
blend!(src.b, dst.b),
(outa * 255.0) as u8
)
}
}
pub fn place_repeated_scaled_image(image: &mut Image, repeated_image: &Image, pos: &Vec2i, scale: i32, repeat_x: bool, repeat_y: bool) {
let size = Vec2i::new(repeated_image.get_width() as i32, repeated_image.get_height() as i32) * scale;
let range_x = calc_range_for_repeated_line(repeat_x, pos.x, size.x, image.get_width() as i32);
let range_y = calc_range_for_repeated_line(repeat_y, pos.y, size.y, image.get_height() as i32);
for y in range_y {
for x in range_x.clone() {
place_image_scaled(image, repeated_image, &(Vec2i::new(
x * size.x,
y * size.y
) + pos), scale);
}
}
fn calc_range_for_repeated_line(repeat: bool, pos: i32, len: i32, size: i32) -> std::ops::Range<i32> {
if repeat {
let minus = {
let mut pos_offset = 0;
while pos + pos_offset * len >= -len {
pos_offset -= 1 ;
}
pos_offset
};
let plus = {
let mut pos_offset = 0;
while pos + pos_offset * len < size {
pos_offset += 1 ;
}
pos_offset
};
minus..plus
} else {
0i32..1i32
}
}
}
| ideal_blend | identifier_name |
modular.rs | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT license OR Apache 2.0
use authentication::perform_authentication;
use futures::{
future::{self, TryFutureExt},
Future, Stream, StreamExt, TryStreamExt,
};
use std::sync::Arc;
use tokio::sync::broadcast::{channel as event_channel, Sender as Broadcaster};
use tokio_util::sync::CancellationToken;
use tracing::Instrument;
use crate::{
common::{
authentication::{
self, AuthenticationError, AuthenticationHandler, AuthenticationHandlingError,
},
protocol::{
negotiation::{self, NegotiationError, NegotiationService},
request_handler::RequestClientHandler,
traits::{
SerializedTunnelRegistry, ServiceRegistry, TunnelNamingError, TunnelRegistrationError,
TunnelRegistry,
},
tunnel::{
self, id::TunnelIDGenerator, Tunnel, TunnelDownlink, TunnelError, TunnelId,
TunnelIncomingType, TunnelName,
},
RouteAddress, Router,
},
},
util::tunnel_stream::WrappedStream,
};
pub struct ModularDaemon<TTunnel> {
service_registry: Arc<dyn ServiceRegistry + Send + Sync +'static>,
tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync +'static>,
router: Arc<dyn Router + Send + Sync +'static>,
request_handler: Arc<RequestClientHandler>,
authentication_handler: Arc<dyn AuthenticationHandler + Send + Sync +'static>,
tunnel_id_generator: Arc<dyn TunnelIDGenerator + Send + Sync +'static>,
// event hooks
pub tunnel_connected: Broadcaster<(TunnelId, Arc<TTunnel>)>,
pub tunnel_authenticated: Broadcaster<(TunnelId, TunnelName, Arc<TTunnel>)>,
pub tunnel_disconnected:
Broadcaster<(TunnelId, Option<TunnelName> /*, DisconnectReason? */)>,
}
impl<TTunnel> ModularDaemon<TTunnel> {
pub fn requests<'a>(&'a self) -> &Arc<RequestClientHandler> {
&self.request_handler
}
fn authenticate_tunnel<'a>(
self: &Arc<Self>,
tunnel: tunnel::ArcTunnel<'a>,
shutdown: &CancellationToken,
) -> impl Future<Output = Result<Option<(tunnel::TunnelName, tunnel::ArcTunnel<'a>)>, anyhow::Error>>
+ 'a {
let shutdown = shutdown.clone();
let authentication_handler = Arc::clone(&self.authentication_handler);
async move {
let result = perform_authentication(
authentication_handler.as_ref(),
tunnel.as_ref(),
&shutdown.into(),
)
.await;
match result {
Err(AuthenticationError::Handling(AuthenticationHandlingError::FatalApplicationError(
fatal_error,
))) => {
tracing::error!(reason=?fatal_error, "Authentication encountered fatal error!");
anyhow::Context::context(
Err(fatal_error),
"Fatal error encountered while handling authentication",
)
}
Err(AuthenticationError::Handling(handling_error)) => {
// Non-fatal handling errors are passed to tracing and close the tunnel
tracing::warn!(
reason = (&handling_error as &dyn std::error::Error),
"Tunnel closed due to authentication handling failure"
);
Ok(None)
}
Err(AuthenticationError::Remote(remote_error)) => {
tracing::debug!(
reason = (&remote_error as &dyn std::error::Error),
"Tunnel closed due to remote authentication failure"
);
Ok(None)
}
Ok(tunnel_name) => Ok(Some((tunnel_name, tunnel))),
}
}
}
}
impl<TTunnel> ModularDaemon<TTunnel>
where
Self:'static,
{
pub fn new(
service_registry: Arc<dyn ServiceRegistry + Send + Sync +'static>,
tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync +'static>,
router: Arc<dyn Router + Send + Sync +'static>,
authentication_handler: Arc<dyn AuthenticationHandler + Send + Sync +'static>,
tunnel_id_generator: Arc<dyn TunnelIDGenerator + Send + Sync +'static>,
) -> Self {
Self {
request_handler: Arc::new(RequestClientHandler::new(
Arc::clone(&tunnel_registry),
Arc::clone(&service_registry),
Arc::clone(&router),
)),
service_registry,
tunnel_registry,
router,
authentication_handler,
tunnel_id_generator,
// For event handlers, we simply drop the receive sides,
// as new ones can be made with Sender::subscribe(&self)
tunnel_connected: event_channel(32).0,
tunnel_authenticated: event_channel(32).0,
tunnel_disconnected: event_channel(32).0,
}
}
/// Run the server against a tunnel_source.
///
/// This can be performed concurrently against multiple sources, with a shared server instance.
/// The implementation assumes that shutdown_request_listener will also halt the tunnel_source.
pub fn run<TunnelSource, TIntoTunnel>(
self: Arc<Self>,
tunnel_source: TunnelSource,
shutdown_request_listener: CancellationToken,
) -> tokio::task::JoinHandle<()>
where
TunnelSource: Stream<Item = TIntoTunnel> + Send +'static,
TIntoTunnel: Into<TTunnel>,
TTunnel: Tunnel +'static,
{
let this = Arc::clone(&self);
// Pipeline phases:
// Attach baggage - Arcs need cloned once per incoming tunnel, if they need to access it
// The baggage attachment phase takes the initial Arc items clones them per-stream
// This also generates a u64 as an ID for this tunnel, using a naive interlocked/atomic counter
let pipeline = tunnel_source
.take_until({
let shutdown_request_listener = shutdown_request_listener.clone();
async move { shutdown_request_listener.cancelled().await }
})
.scan(
(this, shutdown_request_listener),
|(this, shutdown_request_listener), tunnel| {
let id = this.tunnel_id_generator.next();
let tunnel: TTunnel = tunnel.into();
future::ready(Some((
tunnel,
id,
this.clone(),
shutdown_request_listener.clone(),
)))
},
);
// Tunnel Lifecycle - Sub-pipeline performed by futures on a per-tunnel basis
// This could be done at the stream level, but Rust-Analyzer's typesystem struggles
// to understand stream associated types at this level.
let pipeline = pipeline.for_each_concurrent(
None,
|(tunnel, id, this, shutdown_request_listener)| async move {
let tunnel = Arc::new(tunnel);
if let Err(e) = this
.tunnel_lifecycle(id, tunnel, shutdown_request_listener)
.await
{
tracing::debug!(error=?e, "tunnel lifetime exited with error");
}
},
);
// Spawn an instrumented task for the server which will return
// when all connections shut down and the tunnel source closes
tokio::task::spawn(pipeline.instrument(tracing::span!(tracing::Level::INFO, "modular_server")))
}
}
#[derive(thiserror::Error, Debug)]
enum TunnelLifecycleError {
#[error(transparent)]
RegistrationError(#[from] TunnelRegistrationError),
#[error(transparent)]
RegistryNamingError(#[from] TunnelNamingError),
#[error(transparent)]
RequestProcessingError(RequestProcessingError),
#[error("Authentication refused to remote by either breach of protocol or invalid/inadequate credentials")]
AuthenticationRefused,
#[error("Fatal error encountered in tunnel lifecycle: {0:?}")]
FatalError(anyhow::Error),
}
#[derive(thiserror::Error, Debug)]
enum RequestProcessingError {
#[error("Protocol version mismatch")]
UnsupportedProtocolVersion,
#[error("Tunnel error encountered: {0}")]
TunnelError(TunnelError),
#[error(transparent)]
FatalError(anyhow::Error),
}
impl From<RequestProcessingError> for TunnelLifecycleError {
fn from(e: RequestProcessingError) -> TunnelLifecycleError {
match e {
RequestProcessingError::FatalError(fatal_error) => {
TunnelLifecycleError::FatalError(fatal_error)
}
non_fatal => TunnelLifecycleError::RequestProcessingError(non_fatal),
}
}
}
impl<TTunnel> ModularDaemon<TTunnel>
where
TTunnel: Tunnel +'static,
{
fn tunnel_lifecycle(
self: Arc<Self>,
id: TunnelId,
tunnel: Arc<TTunnel>,
shutdown: CancellationToken,
) -> impl Future<Output = Result<(), TunnelLifecycleError>> +'static {
async move {
// A registry mutex that prevents us from racing when calling the registry for
// this particular tunnel entry. This should also be enforced at the registry level.
let serialized_registry: Arc<dyn TunnelRegistry + Send + Sync +'static> = Arc::new(SerializedTunnelRegistry::new(Arc::clone(&self.tunnel_registry)));
// Tunnel registration - The tunnel registry is called to imbue the tunnel with an ID
{
let tunnel_registry = Arc::clone(&serialized_registry);
Self::register_tunnel(id, Arc::clone(&tunnel), tunnel_registry)
.instrument(tracing::span!(tracing::Level::DEBUG, "registration",?id))
}.await?;
// Send tunnel_connected event once the tunnel is successfully registered to its ID
// Ignore error as it occurs only when no receivers exist to read the event
let _ = self.tunnel_connected.send((id, tunnel.clone()));
// From here on, any failure must trigger attempted deregistration of the tunnel,
// So further phases return their result to check for failures, which then result
// in a deregistration call.
// Phases resume in registered_tunnel_lifecycle.
let tunnel_registry = Arc::clone(&serialized_registry);
match self.registered_tunnel_lifecycle(id, tunnel, shutdown, tunnel_registry).await {
Ok(lifecycle_result) => Ok(lifecycle_result),
Err(e) => {
let deregistered = serialized_registry.deregister_tunnel(id).await.ok();
match &e {
&TunnelLifecycleError::AuthenticationRefused => tracing::debug!(err=?e, record=?deregistered, "Deregistered due to authentication refusal"),
e => tracing::info!(err=?e, record=?deregistered, "Deregistered due to lifecycle error")
}
Err(e)
}
}
}.instrument(tracing::span!(tracing::Level::DEBUG, "tunnel",?id))
}
async fn registered_tunnel_lifecycle(
self: Arc<Self>,
id: TunnelId,
tunnel: Arc<TTunnel>,
shutdown: CancellationToken,
serialized_tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync +'static>,
) -> Result<(), TunnelLifecycleError> {
// Authenticate connections - Each connection will be piped into the authenticator,
// which has the option of declining the connection, and may save additional metadata.
let tunnel_authentication = {
self
.authenticate_tunnel(tunnel.clone(), &shutdown)
.instrument(tracing::span!(tracing::Level::DEBUG, "authentication",?id))
.map_err(TunnelLifecycleError::FatalError)
};
let tunnel_name = match tunnel_authentication.await? {
Some((tunnel_name, _tunnel_dyn)) => tunnel_name,
None => |
};
// Tunnel naming - The tunnel registry is notified of the authenticator-provided tunnel name
{
let tunnel_registry = Arc::clone(&serialized_tunnel_registry);
Self::name_tunnel(id, tunnel_name.clone(), tunnel_registry).instrument(tracing::span!(
tracing::Level::DEBUG,
"naming",
?id
))
}
.await?;
// Send tunnel_authenticated event for the newly-named tunnel, once the registry is aware of it
// Ignore error as it occurs only when no receivers exist to read the event
let _ = self
.tunnel_authenticated
.send((id, tunnel_name.clone(), tunnel.clone()));
// Process incoming requests until the incoming channel is closed.
{
let service_registry = Arc::clone(&self.service_registry);
Self::handle_incoming_requests(
id,
tunnel
.downlink()
.await
.ok_or(TunnelLifecycleError::RequestProcessingError(
RequestProcessingError::TunnelError(TunnelError::ConnectionClosed),
))?,
service_registry,
shutdown,
)
.instrument(tracing::span!(
tracing::Level::DEBUG,
"request_handling",
?id
))
}
.await?;
// Deregister closed tunnels after graceful exit
let _record = serialized_tunnel_registry.deregister_tunnel(id).await;
// TODO: Find a way to call self.tunnel_disconnected automatically, and simplify deregistration code path
// Otherwise, these deregister calls are an absurd amount of complexity.
// Maybe use drop semantics paired with a cancellation token and a task?
Ok(())
}
// Process incoming requests until the incoming channel is closed.
// Await a tunnel closure request from the host, or for the tunnel to close on its own.
// A tunnel has "closed on its own" if incoming closes *or* outgoing requests fail with
// a notification that the outgoing channel has been closed.
//
// The request handler for this side should be configured to send a close request for
// the tunnel with the given ID when it sees a request fail due to tunnel closure.
// TODO: configure request handler (?) to do that using a std::sync::Weak<ModularDaemon>.
async fn handle_incoming_requests<TDownlink: TunnelDownlink>(
id: TunnelId,
mut incoming: TDownlink,
service_registry: Arc<dyn ServiceRegistry + Send + Sync +'static>,
shutdown: CancellationToken,
) -> Result<(), RequestProcessingError> {
let negotiator = Arc::new(NegotiationService::new(service_registry));
incoming
.as_stream()
// Stop accepting new requests after a graceful shutdown is requested
.take_until(shutdown.clone().cancelled())
.map_err(|e: TunnelError| RequestProcessingError::TunnelError(e))
.scan((negotiator, shutdown), |(negotiator, shutdown), link| {
let res = link.map(|content| (Arc::clone(&*negotiator), shutdown.clone(), content));
future::ready(Some(res))
})
.try_for_each_concurrent(None, |(negotiator, shutdown, link)| {
Self::handle_incoming_request(id, link, negotiator, shutdown)
})
.await?;
Ok(())
}
async fn handle_incoming_request<Services>(
id: TunnelId,
link: TunnelIncomingType,
negotiator: Arc<NegotiationService<Services>>,
shutdown: CancellationToken,
) -> Result<(), RequestProcessingError>
where
Services: ServiceRegistry + Send + Sync +?Sized +'static,
{
match link {
tunnel::TunnelIncomingType::BiStream(link) => {
Self::handle_incoming_request_bistream(id, link, negotiator, shutdown).await
}
}
}
async fn handle_incoming_request_bistream<Services>(
tunnel_id: TunnelId,
link: WrappedStream,
negotiator: Arc<NegotiationService<Services>>,
shutdown: CancellationToken, // TODO: Respond to shutdown listener requests
) -> Result<(), RequestProcessingError>
where
Services: ServiceRegistry + Send + Sync +?Sized +'static,
{
match negotiator.negotiate(link, tunnel_id).await {
// Tunnels established on an invalid negotiation protocol are useless; consider this fatal
Err(NegotiationError::UnsupportedProtocolVersion) => {
Err(RequestProcessingError::UnsupportedProtocolVersion)
}
// Protocol violations are not considered fatal, as they do not affect other links
// They do still destroy the current link, however.
Err(NegotiationError::ProtocolViolation) => Ok(()),
Err(NegotiationError::ReadError) => Ok(()),
Err(NegotiationError::WriteError) => Ok(()),
// Generic refusal for when a service doesn't accept a route for whatever reason
Err(NegotiationError::Refused) => {
tracing::debug!("Refused remote protocol request");
Ok(())
}
// Lack of support for a service is just a more specific refusal
Err(NegotiationError::UnsupportedServiceVersion) => {
tracing::debug!("Refused request due to unsupported service version");
Ok(())
}
Err(NegotiationError::ApplicationError(e)) => {
tracing::warn!(err=?e, "Refused request due to application error in negotiation");
Ok(())
}
Err(NegotiationError::FatalError(e)) => {
tracing::error!(err=?e, "Refused request due to fatal application error in negotiation");
Err(RequestProcessingError::FatalError(
NegotiationError::FatalError(e).into(),
))
}
Ok((link, route_addr, service)) => {
if shutdown.is_cancelled() {
// Drop services post-negotiation if the connection is awaiting
// shutdown, instead of handing them to the service to be performed.
return Ok(());
}
let route_addr: RouteAddress = route_addr;
let service: negotiation::ArcService = service;
match service
.handle(route_addr.clone(), Box::new(link), tunnel_id)
.await
{
// TODO: Figure out which of these should be considered fatal to the tunnel, if any
Err(e) => {
tracing::debug!(
address = route_addr.as_str(),
error =?e,
"Protocol Service responded with non-fatal error"
);
Ok(())
}
Ok(()) => {
tracing::trace!(
address = route_addr.as_str(),
"Protocol Service reported success"
);
Ok(())
}
}
}
}
}
async fn register_tunnel<TTunnelRegistry>(
id: TunnelId,
tunnel: Arc<TTunnel>,
tunnel_registry: TTunnelRegistry,
) -> Result<(), TunnelRegistrationError>
where
TTunnelRegistry: std::ops::Deref + Send +'static,
<TTunnelRegistry as std::ops::Deref>::Target: TunnelRegistry + Send + Sync,
{
let registration = async move {
tunnel_registry
.register_tunnel(id, tunnel)
.map_err(|e| match e {
TunnelRegistrationError::IdOccupied(id) => {
tracing::error!(?id, "ID occupied; dropping tunnel");
TunnelRegistrationError::IdOccupied(id)
}
TunnelRegistrationError::NameOccupied(name) => {
// This error indicates that the tunnel registry is reporting names incorrectly, or
// holding entries from prior launches beyond the lifetime of the server that created them
tracing::error!(
"Name reported as occupied, but we haven't named this tunnel yet; dropping tunnel"
);
TunnelRegistrationError::NameOccupied(name)
}
TunnelRegistrationError::ApplicationError(e) => {
tracing::error!(err=?e, "ApplicationError in tunnel registration");
TunnelRegistrationError::ApplicationError(e)
}
})
.await
};
tokio::spawn(registration).await.map_err(|e| {
if e.is_panic() {
std::panic::resume_unwind(e.into_panic());
} else {
TunnelRegistrationError::ApplicationError(anyhow::Error::msg("Registration task cancelled"))
}
})?
}
async fn name_tunnel<TTunnelRegistry>(
id: TunnelId,
tunnel_name: TunnelName,
tunnel_registry: TTunnelRegistry,
) -> Result<(), TunnelNamingError>
where
TTunnelRegistry: std::ops::Deref + Send + Sync +'static,
<TTunnelRegistry as std::ops::Deref>::Target: TunnelRegistry + Send + Sync,
{
let naming = async move {
tunnel_registry
.deref()
.name_tunnel(id, tunnel_name)
.map_err(|e| match e {
// If a tunnel registry wishes to keep a tunnel alive past a naming clash, it
// must rename the existing tunnel then name the new one, and report Ok here.
TunnelNamingError::NameOccupied(name) => {
tracing::error!(?id, "Name reports as occupied; dropping tunnel");
TunnelNamingError::NameOccupied(name)
}
TunnelNamingError::TunnelNotRegistered(id) => {
// This indicates out-of-order processing on per-tunnel events in the registry
// To solve this, the tunnel registry task complete event processing in-order
// for events produced by a given tunnel's lifetime. The simplest way is to
// serialize all registry changes using a tokio::task with an ordered channel.
tracing::error!("Tunnel reported as not registered from naming task");
TunnelNamingError::TunnelNotRegistered(id)
}
TunnelNamingError::ApplicationError(e) => {
tracing::error!(err=?e, "ApplicationError in tunnel naming");
TunnelNamingError::ApplicationError(e)
}
})
.await
};
tokio::spawn(naming).await.map_err(|e| {
if e.is_panic() {
std::panic::resume_unwind(e.into_panic());
} else {
TunnelNamingError::ApplicationError(anyhow::Error::msg("Naming task cancelled"))
}
})?
}
}
| {
let _ = serialized_tunnel_registry.deregister_tunnel(id).await;
return Ok(());
} | conditional_block |
modular.rs | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT license OR Apache 2.0
use authentication::perform_authentication;
use futures::{
future::{self, TryFutureExt},
Future, Stream, StreamExt, TryStreamExt,
};
use std::sync::Arc;
use tokio::sync::broadcast::{channel as event_channel, Sender as Broadcaster};
use tokio_util::sync::CancellationToken;
use tracing::Instrument;
use crate::{
common::{
authentication::{
self, AuthenticationError, AuthenticationHandler, AuthenticationHandlingError,
},
protocol::{
negotiation::{self, NegotiationError, NegotiationService},
request_handler::RequestClientHandler,
traits::{
SerializedTunnelRegistry, ServiceRegistry, TunnelNamingError, TunnelRegistrationError,
TunnelRegistry,
},
tunnel::{
self, id::TunnelIDGenerator, Tunnel, TunnelDownlink, TunnelError, TunnelId,
TunnelIncomingType, TunnelName,
},
RouteAddress, Router,
},
},
util::tunnel_stream::WrappedStream,
};
pub struct ModularDaemon<TTunnel> {
service_registry: Arc<dyn ServiceRegistry + Send + Sync +'static>,
tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync +'static>,
router: Arc<dyn Router + Send + Sync +'static>,
request_handler: Arc<RequestClientHandler>,
authentication_handler: Arc<dyn AuthenticationHandler + Send + Sync +'static>,
tunnel_id_generator: Arc<dyn TunnelIDGenerator + Send + Sync +'static>,
// event hooks
pub tunnel_connected: Broadcaster<(TunnelId, Arc<TTunnel>)>,
pub tunnel_authenticated: Broadcaster<(TunnelId, TunnelName, Arc<TTunnel>)>,
pub tunnel_disconnected:
Broadcaster<(TunnelId, Option<TunnelName> /*, DisconnectReason? */)>,
}
impl<TTunnel> ModularDaemon<TTunnel> {
pub fn requests<'a>(&'a self) -> &Arc<RequestClientHandler> {
&self.request_handler
}
fn authenticate_tunnel<'a>(
self: &Arc<Self>,
tunnel: tunnel::ArcTunnel<'a>,
shutdown: &CancellationToken,
) -> impl Future<Output = Result<Option<(tunnel::TunnelName, tunnel::ArcTunnel<'a>)>, anyhow::Error>>
+ 'a {
let shutdown = shutdown.clone();
let authentication_handler = Arc::clone(&self.authentication_handler);
async move {
let result = perform_authentication(
authentication_handler.as_ref(),
tunnel.as_ref(),
&shutdown.into(),
)
.await;
match result {
Err(AuthenticationError::Handling(AuthenticationHandlingError::FatalApplicationError(
fatal_error,
))) => {
tracing::error!(reason=?fatal_error, "Authentication encountered fatal error!");
anyhow::Context::context(
Err(fatal_error),
"Fatal error encountered while handling authentication",
)
}
Err(AuthenticationError::Handling(handling_error)) => {
// Non-fatal handling errors are passed to tracing and close the tunnel
tracing::warn!(
reason = (&handling_error as &dyn std::error::Error),
"Tunnel closed due to authentication handling failure"
);
Ok(None)
}
Err(AuthenticationError::Remote(remote_error)) => {
tracing::debug!(
reason = (&remote_error as &dyn std::error::Error),
"Tunnel closed due to remote authentication failure"
);
Ok(None)
}
Ok(tunnel_name) => Ok(Some((tunnel_name, tunnel))),
}
}
}
}
impl<TTunnel> ModularDaemon<TTunnel>
where
Self:'static,
{
pub fn new(
service_registry: Arc<dyn ServiceRegistry + Send + Sync +'static>,
tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync +'static>,
router: Arc<dyn Router + Send + Sync +'static>,
authentication_handler: Arc<dyn AuthenticationHandler + Send + Sync +'static>,
tunnel_id_generator: Arc<dyn TunnelIDGenerator + Send + Sync +'static>,
) -> Self {
Self {
request_handler: Arc::new(RequestClientHandler::new(
Arc::clone(&tunnel_registry),
Arc::clone(&service_registry),
Arc::clone(&router),
)),
service_registry,
tunnel_registry,
router,
authentication_handler,
tunnel_id_generator,
// For event handlers, we simply drop the receive sides,
// as new ones can be made with Sender::subscribe(&self)
tunnel_connected: event_channel(32).0,
tunnel_authenticated: event_channel(32).0,
tunnel_disconnected: event_channel(32).0,
}
}
/// Run the server against a tunnel_source.
///
/// This can be performed concurrently against multiple sources, with a shared server instance.
/// The implementation assumes that shutdown_request_listener will also halt the tunnel_source.
pub fn run<TunnelSource, TIntoTunnel>(
self: Arc<Self>,
tunnel_source: TunnelSource,
shutdown_request_listener: CancellationToken,
) -> tokio::task::JoinHandle<()>
where
TunnelSource: Stream<Item = TIntoTunnel> + Send +'static,
TIntoTunnel: Into<TTunnel>,
TTunnel: Tunnel +'static,
{
let this = Arc::clone(&self);
// Pipeline phases:
// Attach baggage - Arcs need cloned once per incoming tunnel, if they need to access it
// The baggage attachment phase takes the initial Arc items clones them per-stream
// This also generates a u64 as an ID for this tunnel, using a naive interlocked/atomic counter
let pipeline = tunnel_source
.take_until({
let shutdown_request_listener = shutdown_request_listener.clone();
async move { shutdown_request_listener.cancelled().await }
})
.scan(
(this, shutdown_request_listener),
|(this, shutdown_request_listener), tunnel| {
let id = this.tunnel_id_generator.next();
let tunnel: TTunnel = tunnel.into();
future::ready(Some((
tunnel,
id,
this.clone(),
shutdown_request_listener.clone(),
)))
},
);
// Tunnel Lifecycle - Sub-pipeline performed by futures on a per-tunnel basis
// This could be done at the stream level, but Rust-Analyzer's typesystem struggles
// to understand stream associated types at this level.
let pipeline = pipeline.for_each_concurrent(
None,
|(tunnel, id, this, shutdown_request_listener)| async move {
let tunnel = Arc::new(tunnel);
if let Err(e) = this
.tunnel_lifecycle(id, tunnel, shutdown_request_listener)
.await
{
tracing::debug!(error=?e, "tunnel lifetime exited with error");
}
},
);
// Spawn an instrumented task for the server which will return
// when all connections shut down and the tunnel source closes
tokio::task::spawn(pipeline.instrument(tracing::span!(tracing::Level::INFO, "modular_server")))
}
}
#[derive(thiserror::Error, Debug)]
enum TunnelLifecycleError {
#[error(transparent)]
RegistrationError(#[from] TunnelRegistrationError),
#[error(transparent)]
RegistryNamingError(#[from] TunnelNamingError),
#[error(transparent)]
RequestProcessingError(RequestProcessingError),
#[error("Authentication refused to remote by either breach of protocol or invalid/inadequate credentials")]
AuthenticationRefused,
#[error("Fatal error encountered in tunnel lifecycle: {0:?}")]
FatalError(anyhow::Error),
}
#[derive(thiserror::Error, Debug)]
enum RequestProcessingError {
#[error("Protocol version mismatch")]
UnsupportedProtocolVersion,
#[error("Tunnel error encountered: {0}")]
TunnelError(TunnelError),
#[error(transparent)]
FatalError(anyhow::Error),
}
impl From<RequestProcessingError> for TunnelLifecycleError {
fn from(e: RequestProcessingError) -> TunnelLifecycleError {
match e {
RequestProcessingError::FatalError(fatal_error) => {
TunnelLifecycleError::FatalError(fatal_error)
}
non_fatal => TunnelLifecycleError::RequestProcessingError(non_fatal),
}
}
}
impl<TTunnel> ModularDaemon<TTunnel>
where
TTunnel: Tunnel +'static,
{
fn tunnel_lifecycle(
self: Arc<Self>,
id: TunnelId,
tunnel: Arc<TTunnel>,
shutdown: CancellationToken,
) -> impl Future<Output = Result<(), TunnelLifecycleError>> +'static {
async move {
// A registry mutex that prevents us from racing when calling the registry for
// this particular tunnel entry. This should also be enforced at the registry level.
let serialized_registry: Arc<dyn TunnelRegistry + Send + Sync +'static> = Arc::new(SerializedTunnelRegistry::new(Arc::clone(&self.tunnel_registry)));
// Tunnel registration - The tunnel registry is called to imbue the tunnel with an ID
{
let tunnel_registry = Arc::clone(&serialized_registry);
Self::register_tunnel(id, Arc::clone(&tunnel), tunnel_registry)
.instrument(tracing::span!(tracing::Level::DEBUG, "registration",?id))
}.await?;
// Send tunnel_connected event once the tunnel is successfully registered to its ID
// Ignore error as it occurs only when no receivers exist to read the event
let _ = self.tunnel_connected.send((id, tunnel.clone()));
// From here on, any failure must trigger attempted deregistration of the tunnel,
// So further phases return their result to check for failures, which then result
// in a deregistration call.
// Phases resume in registered_tunnel_lifecycle.
let tunnel_registry = Arc::clone(&serialized_registry);
match self.registered_tunnel_lifecycle(id, tunnel, shutdown, tunnel_registry).await {
Ok(lifecycle_result) => Ok(lifecycle_result),
Err(e) => {
let deregistered = serialized_registry.deregister_tunnel(id).await.ok();
match &e {
&TunnelLifecycleError::AuthenticationRefused => tracing::debug!(err=?e, record=?deregistered, "Deregistered due to authentication refusal"),
e => tracing::info!(err=?e, record=?deregistered, "Deregistered due to lifecycle error")
}
Err(e)
}
}
}.instrument(tracing::span!(tracing::Level::DEBUG, "tunnel",?id))
}
async fn registered_tunnel_lifecycle(
self: Arc<Self>,
id: TunnelId,
tunnel: Arc<TTunnel>,
shutdown: CancellationToken,
serialized_tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync +'static>,
) -> Result<(), TunnelLifecycleError> {
// Authenticate connections - Each connection will be piped into the authenticator,
// which has the option of declining the connection, and may save additional metadata.
let tunnel_authentication = {
self
.authenticate_tunnel(tunnel.clone(), &shutdown)
.instrument(tracing::span!(tracing::Level::DEBUG, "authentication",?id))
.map_err(TunnelLifecycleError::FatalError)
};
let tunnel_name = match tunnel_authentication.await? {
Some((tunnel_name, _tunnel_dyn)) => tunnel_name,
None => {
let _ = serialized_tunnel_registry.deregister_tunnel(id).await;
return Ok(());
}
};
// Tunnel naming - The tunnel registry is notified of the authenticator-provided tunnel name
{
let tunnel_registry = Arc::clone(&serialized_tunnel_registry);
Self::name_tunnel(id, tunnel_name.clone(), tunnel_registry).instrument(tracing::span!(
tracing::Level::DEBUG,
"naming",
?id
))
}
.await?;
// Send tunnel_authenticated event for the newly-named tunnel, once the registry is aware of it
// Ignore error as it occurs only when no receivers exist to read the event
let _ = self
.tunnel_authenticated
.send((id, tunnel_name.clone(), tunnel.clone()));
// Process incoming requests until the incoming channel is closed.
{
let service_registry = Arc::clone(&self.service_registry);
Self::handle_incoming_requests(
id,
tunnel
.downlink()
.await
.ok_or(TunnelLifecycleError::RequestProcessingError(
RequestProcessingError::TunnelError(TunnelError::ConnectionClosed),
))?,
service_registry,
shutdown,
)
.instrument(tracing::span!(
tracing::Level::DEBUG,
"request_handling",
?id
))
}
.await?;
// Deregister closed tunnels after graceful exit
let _record = serialized_tunnel_registry.deregister_tunnel(id).await;
// TODO: Find a way to call self.tunnel_disconnected automatically, and simplify deregistration code path
// Otherwise, these deregister calls are an absurd amount of complexity.
// Maybe use drop semantics paired with a cancellation token and a task?
Ok(())
}
// Process incoming requests until the incoming channel is closed.
// Await a tunnel closure request from the host, or for the tunnel to close on its own.
// A tunnel has "closed on its own" if incoming closes *or* outgoing requests fail with
// a notification that the outgoing channel has been closed.
//
// The request handler for this side should be configured to send a close request for
// the tunnel with the given ID when it sees a request fail due to tunnel closure.
// TODO: configure request handler (?) to do that using a std::sync::Weak<ModularDaemon>.
async fn handle_incoming_requests<TDownlink: TunnelDownlink>(
id: TunnelId,
mut incoming: TDownlink,
service_registry: Arc<dyn ServiceRegistry + Send + Sync +'static>,
shutdown: CancellationToken,
) -> Result<(), RequestProcessingError> {
let negotiator = Arc::new(NegotiationService::new(service_registry));
incoming
.as_stream()
// Stop accepting new requests after a graceful shutdown is requested
.take_until(shutdown.clone().cancelled())
.map_err(|e: TunnelError| RequestProcessingError::TunnelError(e))
.scan((negotiator, shutdown), |(negotiator, shutdown), link| {
let res = link.map(|content| (Arc::clone(&*negotiator), shutdown.clone(), content));
future::ready(Some(res))
})
.try_for_each_concurrent(None, |(negotiator, shutdown, link)| {
Self::handle_incoming_request(id, link, negotiator, shutdown)
})
.await?;
Ok(())
}
async fn handle_incoming_request<Services>(
id: TunnelId,
link: TunnelIncomingType,
negotiator: Arc<NegotiationService<Services>>,
shutdown: CancellationToken,
) -> Result<(), RequestProcessingError>
where
Services: ServiceRegistry + Send + Sync +?Sized +'static, | }
}
}
async fn handle_incoming_request_bistream<Services>(
tunnel_id: TunnelId,
link: WrappedStream,
negotiator: Arc<NegotiationService<Services>>,
shutdown: CancellationToken, // TODO: Respond to shutdown listener requests
) -> Result<(), RequestProcessingError>
where
Services: ServiceRegistry + Send + Sync +?Sized +'static,
{
match negotiator.negotiate(link, tunnel_id).await {
// Tunnels established on an invalid negotiation protocol are useless; consider this fatal
Err(NegotiationError::UnsupportedProtocolVersion) => {
Err(RequestProcessingError::UnsupportedProtocolVersion)
}
// Protocol violations are not considered fatal, as they do not affect other links
// They do still destroy the current link, however.
Err(NegotiationError::ProtocolViolation) => Ok(()),
Err(NegotiationError::ReadError) => Ok(()),
Err(NegotiationError::WriteError) => Ok(()),
// Generic refusal for when a service doesn't accept a route for whatever reason
Err(NegotiationError::Refused) => {
tracing::debug!("Refused remote protocol request");
Ok(())
}
// Lack of support for a service is just a more specific refusal
Err(NegotiationError::UnsupportedServiceVersion) => {
tracing::debug!("Refused request due to unsupported service version");
Ok(())
}
Err(NegotiationError::ApplicationError(e)) => {
tracing::warn!(err=?e, "Refused request due to application error in negotiation");
Ok(())
}
Err(NegotiationError::FatalError(e)) => {
tracing::error!(err=?e, "Refused request due to fatal application error in negotiation");
Err(RequestProcessingError::FatalError(
NegotiationError::FatalError(e).into(),
))
}
Ok((link, route_addr, service)) => {
if shutdown.is_cancelled() {
// Drop services post-negotiation if the connection is awaiting
// shutdown, instead of handing them to the service to be performed.
return Ok(());
}
let route_addr: RouteAddress = route_addr;
let service: negotiation::ArcService = service;
match service
.handle(route_addr.clone(), Box::new(link), tunnel_id)
.await
{
// TODO: Figure out which of these should be considered fatal to the tunnel, if any
Err(e) => {
tracing::debug!(
address = route_addr.as_str(),
error =?e,
"Protocol Service responded with non-fatal error"
);
Ok(())
}
Ok(()) => {
tracing::trace!(
address = route_addr.as_str(),
"Protocol Service reported success"
);
Ok(())
}
}
}
}
}
async fn register_tunnel<TTunnelRegistry>(
id: TunnelId,
tunnel: Arc<TTunnel>,
tunnel_registry: TTunnelRegistry,
) -> Result<(), TunnelRegistrationError>
where
TTunnelRegistry: std::ops::Deref + Send +'static,
<TTunnelRegistry as std::ops::Deref>::Target: TunnelRegistry + Send + Sync,
{
let registration = async move {
tunnel_registry
.register_tunnel(id, tunnel)
.map_err(|e| match e {
TunnelRegistrationError::IdOccupied(id) => {
tracing::error!(?id, "ID occupied; dropping tunnel");
TunnelRegistrationError::IdOccupied(id)
}
TunnelRegistrationError::NameOccupied(name) => {
// This error indicates that the tunnel registry is reporting names incorrectly, or
// holding entries from prior launches beyond the lifetime of the server that created them
tracing::error!(
"Name reported as occupied, but we haven't named this tunnel yet; dropping tunnel"
);
TunnelRegistrationError::NameOccupied(name)
}
TunnelRegistrationError::ApplicationError(e) => {
tracing::error!(err=?e, "ApplicationError in tunnel registration");
TunnelRegistrationError::ApplicationError(e)
}
})
.await
};
tokio::spawn(registration).await.map_err(|e| {
if e.is_panic() {
std::panic::resume_unwind(e.into_panic());
} else {
TunnelRegistrationError::ApplicationError(anyhow::Error::msg("Registration task cancelled"))
}
})?
}
async fn name_tunnel<TTunnelRegistry>(
id: TunnelId,
tunnel_name: TunnelName,
tunnel_registry: TTunnelRegistry,
) -> Result<(), TunnelNamingError>
where
TTunnelRegistry: std::ops::Deref + Send + Sync +'static,
<TTunnelRegistry as std::ops::Deref>::Target: TunnelRegistry + Send + Sync,
{
let naming = async move {
tunnel_registry
.deref()
.name_tunnel(id, tunnel_name)
.map_err(|e| match e {
// If a tunnel registry wishes to keep a tunnel alive past a naming clash, it
// must rename the existing tunnel then name the new one, and report Ok here.
TunnelNamingError::NameOccupied(name) => {
tracing::error!(?id, "Name reports as occupied; dropping tunnel");
TunnelNamingError::NameOccupied(name)
}
TunnelNamingError::TunnelNotRegistered(id) => {
// This indicates out-of-order processing on per-tunnel events in the registry
// To solve this, the tunnel registry task complete event processing in-order
// for events produced by a given tunnel's lifetime. The simplest way is to
// serialize all registry changes using a tokio::task with an ordered channel.
tracing::error!("Tunnel reported as not registered from naming task");
TunnelNamingError::TunnelNotRegistered(id)
}
TunnelNamingError::ApplicationError(e) => {
tracing::error!(err=?e, "ApplicationError in tunnel naming");
TunnelNamingError::ApplicationError(e)
}
})
.await
};
tokio::spawn(naming).await.map_err(|e| {
if e.is_panic() {
std::panic::resume_unwind(e.into_panic());
} else {
TunnelNamingError::ApplicationError(anyhow::Error::msg("Naming task cancelled"))
}
})?
}
} | {
match link {
tunnel::TunnelIncomingType::BiStream(link) => {
Self::handle_incoming_request_bistream(id, link, negotiator, shutdown).await | random_line_split |
modular.rs | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT license OR Apache 2.0
use authentication::perform_authentication;
use futures::{
future::{self, TryFutureExt},
Future, Stream, StreamExt, TryStreamExt,
};
use std::sync::Arc;
use tokio::sync::broadcast::{channel as event_channel, Sender as Broadcaster};
use tokio_util::sync::CancellationToken;
use tracing::Instrument;
use crate::{
common::{
authentication::{
self, AuthenticationError, AuthenticationHandler, AuthenticationHandlingError,
},
protocol::{
negotiation::{self, NegotiationError, NegotiationService},
request_handler::RequestClientHandler,
traits::{
SerializedTunnelRegistry, ServiceRegistry, TunnelNamingError, TunnelRegistrationError,
TunnelRegistry,
},
tunnel::{
self, id::TunnelIDGenerator, Tunnel, TunnelDownlink, TunnelError, TunnelId,
TunnelIncomingType, TunnelName,
},
RouteAddress, Router,
},
},
util::tunnel_stream::WrappedStream,
};
pub struct ModularDaemon<TTunnel> {
service_registry: Arc<dyn ServiceRegistry + Send + Sync +'static>,
tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync +'static>,
router: Arc<dyn Router + Send + Sync +'static>,
request_handler: Arc<RequestClientHandler>,
authentication_handler: Arc<dyn AuthenticationHandler + Send + Sync +'static>,
tunnel_id_generator: Arc<dyn TunnelIDGenerator + Send + Sync +'static>,
// event hooks
pub tunnel_connected: Broadcaster<(TunnelId, Arc<TTunnel>)>,
pub tunnel_authenticated: Broadcaster<(TunnelId, TunnelName, Arc<TTunnel>)>,
pub tunnel_disconnected:
Broadcaster<(TunnelId, Option<TunnelName> /*, DisconnectReason? */)>,
}
impl<TTunnel> ModularDaemon<TTunnel> {
pub fn requests<'a>(&'a self) -> &Arc<RequestClientHandler> {
&self.request_handler
}
fn authenticate_tunnel<'a>(
self: &Arc<Self>,
tunnel: tunnel::ArcTunnel<'a>,
shutdown: &CancellationToken,
) -> impl Future<Output = Result<Option<(tunnel::TunnelName, tunnel::ArcTunnel<'a>)>, anyhow::Error>>
+ 'a {
let shutdown = shutdown.clone();
let authentication_handler = Arc::clone(&self.authentication_handler);
async move {
let result = perform_authentication(
authentication_handler.as_ref(),
tunnel.as_ref(),
&shutdown.into(),
)
.await;
match result {
Err(AuthenticationError::Handling(AuthenticationHandlingError::FatalApplicationError(
fatal_error,
))) => {
tracing::error!(reason=?fatal_error, "Authentication encountered fatal error!");
anyhow::Context::context(
Err(fatal_error),
"Fatal error encountered while handling authentication",
)
}
Err(AuthenticationError::Handling(handling_error)) => {
// Non-fatal handling errors are passed to tracing and close the tunnel
tracing::warn!(
reason = (&handling_error as &dyn std::error::Error),
"Tunnel closed due to authentication handling failure"
);
Ok(None)
}
Err(AuthenticationError::Remote(remote_error)) => {
tracing::debug!(
reason = (&remote_error as &dyn std::error::Error),
"Tunnel closed due to remote authentication failure"
);
Ok(None)
}
Ok(tunnel_name) => Ok(Some((tunnel_name, tunnel))),
}
}
}
}
impl<TTunnel> ModularDaemon<TTunnel>
where
Self:'static,
{
pub fn new(
service_registry: Arc<dyn ServiceRegistry + Send + Sync +'static>,
tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync +'static>,
router: Arc<dyn Router + Send + Sync +'static>,
authentication_handler: Arc<dyn AuthenticationHandler + Send + Sync +'static>,
tunnel_id_generator: Arc<dyn TunnelIDGenerator + Send + Sync +'static>,
) -> Self {
Self {
request_handler: Arc::new(RequestClientHandler::new(
Arc::clone(&tunnel_registry),
Arc::clone(&service_registry),
Arc::clone(&router),
)),
service_registry,
tunnel_registry,
router,
authentication_handler,
tunnel_id_generator,
// For event handlers, we simply drop the receive sides,
// as new ones can be made with Sender::subscribe(&self)
tunnel_connected: event_channel(32).0,
tunnel_authenticated: event_channel(32).0,
tunnel_disconnected: event_channel(32).0,
}
}
/// Run the server against a tunnel_source.
///
/// This can be performed concurrently against multiple sources, with a shared server instance.
/// The implementation assumes that shutdown_request_listener will also halt the tunnel_source.
pub fn run<TunnelSource, TIntoTunnel>(
self: Arc<Self>,
tunnel_source: TunnelSource,
shutdown_request_listener: CancellationToken,
) -> tokio::task::JoinHandle<()>
where
TunnelSource: Stream<Item = TIntoTunnel> + Send +'static,
TIntoTunnel: Into<TTunnel>,
TTunnel: Tunnel +'static,
{
let this = Arc::clone(&self);
// Pipeline phases:
// Attach baggage - Arcs need cloned once per incoming tunnel, if they need to access it
// The baggage attachment phase takes the initial Arc items clones them per-stream
// This also generates a u64 as an ID for this tunnel, using a naive interlocked/atomic counter
let pipeline = tunnel_source
.take_until({
let shutdown_request_listener = shutdown_request_listener.clone();
async move { shutdown_request_listener.cancelled().await }
})
.scan(
(this, shutdown_request_listener),
|(this, shutdown_request_listener), tunnel| {
let id = this.tunnel_id_generator.next();
let tunnel: TTunnel = tunnel.into();
future::ready(Some((
tunnel,
id,
this.clone(),
shutdown_request_listener.clone(),
)))
},
);
// Tunnel Lifecycle - Sub-pipeline performed by futures on a per-tunnel basis
// This could be done at the stream level, but Rust-Analyzer's typesystem struggles
// to understand stream associated types at this level.
let pipeline = pipeline.for_each_concurrent(
None,
|(tunnel, id, this, shutdown_request_listener)| async move {
let tunnel = Arc::new(tunnel);
if let Err(e) = this
.tunnel_lifecycle(id, tunnel, shutdown_request_listener)
.await
{
tracing::debug!(error=?e, "tunnel lifetime exited with error");
}
},
);
// Spawn an instrumented task for the server which will return
// when all connections shut down and the tunnel source closes
tokio::task::spawn(pipeline.instrument(tracing::span!(tracing::Level::INFO, "modular_server")))
}
}
#[derive(thiserror::Error, Debug)]
enum | {
#[error(transparent)]
RegistrationError(#[from] TunnelRegistrationError),
#[error(transparent)]
RegistryNamingError(#[from] TunnelNamingError),
#[error(transparent)]
RequestProcessingError(RequestProcessingError),
#[error("Authentication refused to remote by either breach of protocol or invalid/inadequate credentials")]
AuthenticationRefused,
#[error("Fatal error encountered in tunnel lifecycle: {0:?}")]
FatalError(anyhow::Error),
}
#[derive(thiserror::Error, Debug)]
enum RequestProcessingError {
#[error("Protocol version mismatch")]
UnsupportedProtocolVersion,
#[error("Tunnel error encountered: {0}")]
TunnelError(TunnelError),
#[error(transparent)]
FatalError(anyhow::Error),
}
impl From<RequestProcessingError> for TunnelLifecycleError {
fn from(e: RequestProcessingError) -> TunnelLifecycleError {
match e {
RequestProcessingError::FatalError(fatal_error) => {
TunnelLifecycleError::FatalError(fatal_error)
}
non_fatal => TunnelLifecycleError::RequestProcessingError(non_fatal),
}
}
}
impl<TTunnel> ModularDaemon<TTunnel>
where
TTunnel: Tunnel +'static,
{
fn tunnel_lifecycle(
self: Arc<Self>,
id: TunnelId,
tunnel: Arc<TTunnel>,
shutdown: CancellationToken,
) -> impl Future<Output = Result<(), TunnelLifecycleError>> +'static {
async move {
// A registry mutex that prevents us from racing when calling the registry for
// this particular tunnel entry. This should also be enforced at the registry level.
let serialized_registry: Arc<dyn TunnelRegistry + Send + Sync +'static> = Arc::new(SerializedTunnelRegistry::new(Arc::clone(&self.tunnel_registry)));
// Tunnel registration - The tunnel registry is called to imbue the tunnel with an ID
{
let tunnel_registry = Arc::clone(&serialized_registry);
Self::register_tunnel(id, Arc::clone(&tunnel), tunnel_registry)
.instrument(tracing::span!(tracing::Level::DEBUG, "registration",?id))
}.await?;
// Send tunnel_connected event once the tunnel is successfully registered to its ID
// Ignore error as it occurs only when no receivers exist to read the event
let _ = self.tunnel_connected.send((id, tunnel.clone()));
// From here on, any failure must trigger attempted deregistration of the tunnel,
// So further phases return their result to check for failures, which then result
// in a deregistration call.
// Phases resume in registered_tunnel_lifecycle.
let tunnel_registry = Arc::clone(&serialized_registry);
match self.registered_tunnel_lifecycle(id, tunnel, shutdown, tunnel_registry).await {
Ok(lifecycle_result) => Ok(lifecycle_result),
Err(e) => {
let deregistered = serialized_registry.deregister_tunnel(id).await.ok();
match &e {
&TunnelLifecycleError::AuthenticationRefused => tracing::debug!(err=?e, record=?deregistered, "Deregistered due to authentication refusal"),
e => tracing::info!(err=?e, record=?deregistered, "Deregistered due to lifecycle error")
}
Err(e)
}
}
}.instrument(tracing::span!(tracing::Level::DEBUG, "tunnel",?id))
}
async fn registered_tunnel_lifecycle(
self: Arc<Self>,
id: TunnelId,
tunnel: Arc<TTunnel>,
shutdown: CancellationToken,
serialized_tunnel_registry: Arc<dyn TunnelRegistry + Send + Sync +'static>,
) -> Result<(), TunnelLifecycleError> {
// Authenticate connections - Each connection will be piped into the authenticator,
// which has the option of declining the connection, and may save additional metadata.
let tunnel_authentication = {
self
.authenticate_tunnel(tunnel.clone(), &shutdown)
.instrument(tracing::span!(tracing::Level::DEBUG, "authentication",?id))
.map_err(TunnelLifecycleError::FatalError)
};
let tunnel_name = match tunnel_authentication.await? {
Some((tunnel_name, _tunnel_dyn)) => tunnel_name,
None => {
let _ = serialized_tunnel_registry.deregister_tunnel(id).await;
return Ok(());
}
};
// Tunnel naming - The tunnel registry is notified of the authenticator-provided tunnel name
{
let tunnel_registry = Arc::clone(&serialized_tunnel_registry);
Self::name_tunnel(id, tunnel_name.clone(), tunnel_registry).instrument(tracing::span!(
tracing::Level::DEBUG,
"naming",
?id
))
}
.await?;
// Send tunnel_authenticated event for the newly-named tunnel, once the registry is aware of it
// Ignore error as it occurs only when no receivers exist to read the event
let _ = self
.tunnel_authenticated
.send((id, tunnel_name.clone(), tunnel.clone()));
// Process incoming requests until the incoming channel is closed.
{
let service_registry = Arc::clone(&self.service_registry);
Self::handle_incoming_requests(
id,
tunnel
.downlink()
.await
.ok_or(TunnelLifecycleError::RequestProcessingError(
RequestProcessingError::TunnelError(TunnelError::ConnectionClosed),
))?,
service_registry,
shutdown,
)
.instrument(tracing::span!(
tracing::Level::DEBUG,
"request_handling",
?id
))
}
.await?;
// Deregister closed tunnels after graceful exit
let _record = serialized_tunnel_registry.deregister_tunnel(id).await;
// TODO: Find a way to call self.tunnel_disconnected automatically, and simplify deregistration code path
// Otherwise, these deregister calls are an absurd amount of complexity.
// Maybe use drop semantics paired with a cancellation token and a task?
Ok(())
}
// Process incoming requests until the incoming channel is closed.
// Await a tunnel closure request from the host, or for the tunnel to close on its own.
// A tunnel has "closed on its own" if incoming closes *or* outgoing requests fail with
// a notification that the outgoing channel has been closed.
//
// The request handler for this side should be configured to send a close request for
// the tunnel with the given ID when it sees a request fail due to tunnel closure.
// TODO: configure request handler (?) to do that using a std::sync::Weak<ModularDaemon>.
async fn handle_incoming_requests<TDownlink: TunnelDownlink>(
id: TunnelId,
mut incoming: TDownlink,
service_registry: Arc<dyn ServiceRegistry + Send + Sync +'static>,
shutdown: CancellationToken,
) -> Result<(), RequestProcessingError> {
let negotiator = Arc::new(NegotiationService::new(service_registry));
incoming
.as_stream()
// Stop accepting new requests after a graceful shutdown is requested
.take_until(shutdown.clone().cancelled())
.map_err(|e: TunnelError| RequestProcessingError::TunnelError(e))
.scan((negotiator, shutdown), |(negotiator, shutdown), link| {
let res = link.map(|content| (Arc::clone(&*negotiator), shutdown.clone(), content));
future::ready(Some(res))
})
.try_for_each_concurrent(None, |(negotiator, shutdown, link)| {
Self::handle_incoming_request(id, link, negotiator, shutdown)
})
.await?;
Ok(())
}
async fn handle_incoming_request<Services>(
id: TunnelId,
link: TunnelIncomingType,
negotiator: Arc<NegotiationService<Services>>,
shutdown: CancellationToken,
) -> Result<(), RequestProcessingError>
where
Services: ServiceRegistry + Send + Sync +?Sized +'static,
{
match link {
tunnel::TunnelIncomingType::BiStream(link) => {
Self::handle_incoming_request_bistream(id, link, negotiator, shutdown).await
}
}
}
async fn handle_incoming_request_bistream<Services>(
tunnel_id: TunnelId,
link: WrappedStream,
negotiator: Arc<NegotiationService<Services>>,
shutdown: CancellationToken, // TODO: Respond to shutdown listener requests
) -> Result<(), RequestProcessingError>
where
Services: ServiceRegistry + Send + Sync +?Sized +'static,
{
match negotiator.negotiate(link, tunnel_id).await {
// Tunnels established on an invalid negotiation protocol are useless; consider this fatal
Err(NegotiationError::UnsupportedProtocolVersion) => {
Err(RequestProcessingError::UnsupportedProtocolVersion)
}
// Protocol violations are not considered fatal, as they do not affect other links
// They do still destroy the current link, however.
Err(NegotiationError::ProtocolViolation) => Ok(()),
Err(NegotiationError::ReadError) => Ok(()),
Err(NegotiationError::WriteError) => Ok(()),
// Generic refusal for when a service doesn't accept a route for whatever reason
Err(NegotiationError::Refused) => {
tracing::debug!("Refused remote protocol request");
Ok(())
}
// Lack of support for a service is just a more specific refusal
Err(NegotiationError::UnsupportedServiceVersion) => {
tracing::debug!("Refused request due to unsupported service version");
Ok(())
}
Err(NegotiationError::ApplicationError(e)) => {
tracing::warn!(err=?e, "Refused request due to application error in negotiation");
Ok(())
}
Err(NegotiationError::FatalError(e)) => {
tracing::error!(err=?e, "Refused request due to fatal application error in negotiation");
Err(RequestProcessingError::FatalError(
NegotiationError::FatalError(e).into(),
))
}
Ok((link, route_addr, service)) => {
if shutdown.is_cancelled() {
// Drop services post-negotiation if the connection is awaiting
// shutdown, instead of handing them to the service to be performed.
return Ok(());
}
let route_addr: RouteAddress = route_addr;
let service: negotiation::ArcService = service;
match service
.handle(route_addr.clone(), Box::new(link), tunnel_id)
.await
{
// TODO: Figure out which of these should be considered fatal to the tunnel, if any
Err(e) => {
tracing::debug!(
address = route_addr.as_str(),
error =?e,
"Protocol Service responded with non-fatal error"
);
Ok(())
}
Ok(()) => {
tracing::trace!(
address = route_addr.as_str(),
"Protocol Service reported success"
);
Ok(())
}
}
}
}
}
async fn register_tunnel<TTunnelRegistry>(
id: TunnelId,
tunnel: Arc<TTunnel>,
tunnel_registry: TTunnelRegistry,
) -> Result<(), TunnelRegistrationError>
where
TTunnelRegistry: std::ops::Deref + Send +'static,
<TTunnelRegistry as std::ops::Deref>::Target: TunnelRegistry + Send + Sync,
{
let registration = async move {
tunnel_registry
.register_tunnel(id, tunnel)
.map_err(|e| match e {
TunnelRegistrationError::IdOccupied(id) => {
tracing::error!(?id, "ID occupied; dropping tunnel");
TunnelRegistrationError::IdOccupied(id)
}
TunnelRegistrationError::NameOccupied(name) => {
// This error indicates that the tunnel registry is reporting names incorrectly, or
// holding entries from prior launches beyond the lifetime of the server that created them
tracing::error!(
"Name reported as occupied, but we haven't named this tunnel yet; dropping tunnel"
);
TunnelRegistrationError::NameOccupied(name)
}
TunnelRegistrationError::ApplicationError(e) => {
tracing::error!(err=?e, "ApplicationError in tunnel registration");
TunnelRegistrationError::ApplicationError(e)
}
})
.await
};
tokio::spawn(registration).await.map_err(|e| {
if e.is_panic() {
std::panic::resume_unwind(e.into_panic());
} else {
TunnelRegistrationError::ApplicationError(anyhow::Error::msg("Registration task cancelled"))
}
})?
}
async fn name_tunnel<TTunnelRegistry>(
id: TunnelId,
tunnel_name: TunnelName,
tunnel_registry: TTunnelRegistry,
) -> Result<(), TunnelNamingError>
where
TTunnelRegistry: std::ops::Deref + Send + Sync +'static,
<TTunnelRegistry as std::ops::Deref>::Target: TunnelRegistry + Send + Sync,
{
let naming = async move {
tunnel_registry
.deref()
.name_tunnel(id, tunnel_name)
.map_err(|e| match e {
// If a tunnel registry wishes to keep a tunnel alive past a naming clash, it
// must rename the existing tunnel then name the new one, and report Ok here.
TunnelNamingError::NameOccupied(name) => {
tracing::error!(?id, "Name reports as occupied; dropping tunnel");
TunnelNamingError::NameOccupied(name)
}
TunnelNamingError::TunnelNotRegistered(id) => {
// This indicates out-of-order processing on per-tunnel events in the registry
// To solve this, the tunnel registry task complete event processing in-order
// for events produced by a given tunnel's lifetime. The simplest way is to
// serialize all registry changes using a tokio::task with an ordered channel.
tracing::error!("Tunnel reported as not registered from naming task");
TunnelNamingError::TunnelNotRegistered(id)
}
TunnelNamingError::ApplicationError(e) => {
tracing::error!(err=?e, "ApplicationError in tunnel naming");
TunnelNamingError::ApplicationError(e)
}
})
.await
};
tokio::spawn(naming).await.map_err(|e| {
if e.is_panic() {
std::panic::resume_unwind(e.into_panic());
} else {
TunnelNamingError::ApplicationError(anyhow::Error::msg("Naming task cancelled"))
}
})?
}
}
| TunnelLifecycleError | identifier_name |
pixel_format.rs | // The MIT License (MIT)
//
// Copyright (c) 2018 Michael Dilger
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use super::{D3DFormat, DataFormat, DxgiFormat};
use crate::error::*;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt;
use std::io::{Read, Write};
#[derive(Clone)]
pub struct PixelFormat {
/// Size of this structure in bytes; set to 32
pub size: u32,
/// Values which indicate what type of data is in the surface
pub flags: PixelFormatFlags,
/// Codes for specifying compressed or custom formats.
pub fourcc: Option<FourCC>,
/// Number of bits in an RGB (possibly including alpha) format. Valid when
/// flags includes RGB or LUMINANCE.
pub rgb_bit_count: Option<u32>,
/// Red (or Y) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the red mask would be 0x00ff0000.
pub r_bit_mask: Option<u32>,
/// Green (or U) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the green mask would be 0x0000ff00.
pub g_bit_mask: Option<u32>,
/// Blue (or V) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the blue mask would be 0x000000ff
pub b_bit_mask: Option<u32>,
/// Alpha mask for reading alpha data. Valid of flags includes ALPHA_PIXELS or ALPHA.
/// For instance, given the A8R8G8B8 format, the alpha mask would be 0xff000000
pub a_bit_mask: Option<u32>,
}
impl PixelFormat {
pub fn read<R: Read>(mut r: R) -> Result<PixelFormat, Error> {
let size = r.read_u32::<LittleEndian>()?;
if size!= 32 {
return Err(Error::InvalidField("Pixel format struct size".to_owned()));
}
let flags = PixelFormatFlags::from_bits_truncate(r.read_u32::<LittleEndian>()?);
let fourcc = r.read_u32::<LittleEndian>()?;
let rgb_bit_count = r.read_u32::<LittleEndian>()?;
let r_bit_mask = r.read_u32::<LittleEndian>()?;
let g_bit_mask = r.read_u32::<LittleEndian>()?;
let b_bit_mask = r.read_u32::<LittleEndian>()?;
let a_bit_mask = r.read_u32::<LittleEndian>()?;
Ok(PixelFormat {
size,
flags,
fourcc: if flags.contains(PixelFormatFlags::FOURCC) {
Some(FourCC(fourcc))
} else {
None
},
rgb_bit_count: if flags.contains(PixelFormatFlags::RGB)
|| flags.contains(PixelFormatFlags::LUMINANCE)
{
Some(rgb_bit_count)
} else {
None
},
r_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(r_bit_mask)
} else {
None
},
g_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(g_bit_mask)
} else {
None
},
b_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(b_bit_mask)
} else {
None
},
a_bit_mask: if flags.contains(PixelFormatFlags::ALPHA_PIXELS)
|| flags.contains(PixelFormatFlags::ALPHA)
{
Some(a_bit_mask)
} else {
None
},
})
}
pub fn write<W: Write>(&self, w: &mut W) -> Result<(), Error> {
w.write_u32::<LittleEndian>(self.size)?;
w.write_u32::<LittleEndian>(self.flags.bits())?;
w.write_u32::<LittleEndian>(self.fourcc.as_ref().unwrap_or(&FourCC(0)).0)?;
w.write_u32::<LittleEndian>(self.rgb_bit_count.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.r_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.g_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.b_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.a_bit_mask.unwrap_or(0))?;
Ok(())
}
}
impl fmt::Debug for PixelFormat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, " Pixel Format:")?;
writeln!(f, " flags: {:?}", self.flags)?;
writeln!(f, " fourcc: {:?}", self.fourcc)?;
writeln!(f, " bits_per_pixel: {:?}", self.rgb_bit_count)?;
writeln!(
f,
" RGBA bitmasks: {:?}, {:?}, {:?}, {:?}",
self.r_bit_mask, self.g_bit_mask, self.b_bit_mask, self.a_bit_mask
)?;
Ok(())
}
}
impl Default for PixelFormat {
fn default() -> PixelFormat {
PixelFormat {
size: 32, // must be 32
flags: PixelFormatFlags::empty(),
fourcc: None,
rgb_bit_count: None,
r_bit_mask: None,
g_bit_mask: None,
b_bit_mask: None,
a_bit_mask: None,
}
}
}
impl From<D3DFormat> for PixelFormat {
fn from(format: D3DFormat) -> PixelFormat {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB);
pf.rgb_bit_count = Some(bpp as u32)
} else if let Some(fourcc) = format.get_fourcc() {
pf.flags.insert(PixelFormatFlags::FOURCC);
pf.fourcc = Some(fourcc);
}
if let Some(abitmask) = format.a_bit_mask() {
pf.flags.insert(PixelFormatFlags::ALPHA_PIXELS);
pf.a_bit_mask = Some(abitmask);
}
pf.r_bit_mask = format.r_bit_mask();
pf.g_bit_mask = format.g_bit_mask();
pf.b_bit_mask = format.b_bit_mask();
pf
}
}
impl From<DxgiFormat> for PixelFormat {
fn from(format: DxgiFormat) -> PixelFormat |
}
bitflags! {
pub struct PixelFormatFlags: u32 {
/// Texture contains alpha data.
const ALPHA_PIXELS = 0x1;
/// Alpha channel only uncomressed data (used in older DDS files)
const ALPHA = 0x2;
/// Texture contains compressed RGB data.
const FOURCC = 0x4;
/// Texture contains uncompressed RGB data.
const RGB = 0x40;
/// YUV uncompressed data (used in older DDS files)
const YUV = 0x200;
/// Single channel color uncompressed data (used in older DDS files)
const LUMINANCE = 0x20000;
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct FourCC(pub u32);
// generate little-endian u32 from 4 bytes
// rust is not ready for this yet
/*
macro_rules! u32_code {
($w:expr) => {
((($w[0] as u32) << 0) |
(($w[1] as u32) << 8) |
(($w[2] as u32) << 16) |
(($w[3] as u32) << 24) |
((*$w as [u8; 4])[0] as u32 * 0))
}
}
*/
impl FourCC {
pub const NONE: u32 = 0;
// D3D formats
pub const DXT1: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const DXT2: u32 = 0x32545844; //u32_code!(b"DXT2");
pub const DXT3: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const DXT4: u32 = 0x34545844; //u32_code!(b"DXT4");
pub const DXT5: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const R8G8_B8G8: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const A16B16G16R16: u32 = 36;
pub const Q16W16V16U16: u32 = 110;
pub const R16F: u32 = 111;
pub const G16R16F: u32 = 112;
pub const A16B16G16R16F: u32 = 113;
pub const R32F: u32 = 114;
pub const G32R32F: u32 = 115;
pub const A32B32G32R32F: u32 = 116;
pub const UYVY: u32 = 0x59565955; //u32_code!(b"UYVY");
pub const YUY2: u32 = 0x32595559; //u32_code!(b"YUY2");
pub const CXV8U8: u32 = 117;
pub const ATI1: u32 = 0x31495441; //u32_code!(b"ATI1"); // BC4 unorm
pub const ATI2: u32 = 0x32495441; //u32_code!(b"ATI2"); // BC5 unorm
pub const DX10: u32 = 0x30315844; //u32_code!(b"DX10");
// DXGI formats (different names, often for same things)
pub const BC1_UNORM: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const BC2_UNORM: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const BC3_UNORM: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const BC4_UNORM: u32 = 0x55344342; //u32_code!(b"BC4U");
pub const BC4_SNORM: u32 = 0x53344342; //u32_code!(b"BC4S");
pub const BC5_UNORM: u32 = 0x32495441; //u32_code!(b"ATI2");
pub const BC5_SNORM: u32 = 0x53354342; //u32_code!(b"BC5S");
pub const R8G8_B8G8_UNORM: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8_UNORM: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const R16G16B16A16_UNORM: u32 = 36;
pub const R16G16B16A16_SNORM: u32 = 110;
pub const R16_FLOAT: u32 = 111;
pub const R16G16_FLOAT: u32 = 112;
pub const R16G16B16A16_FLOAT: u32 = 113;
pub const R32_FLOAT: u32 = 114;
pub const R32G32_FLOAT: u32 = 115;
pub const R32G32B32A32_FLOAT: u32 = 116;
}
| {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB); // means uncompressed
pf.rgb_bit_count = Some(bpp as u32)
}
pf.fourcc = Some(FourCC(FourCC::DX10)); // we always use extention for Dxgi
pf.flags.insert(PixelFormatFlags::FOURCC);
// flags::ALPHA_PIXELS is not set, use DX10 extension.
// r_bit_mask, g_bit_mask, b_bit_mask and a_bit_mask are not set.
// FIXME - we may need to set these in some circumstances.
pf
} | identifier_body |
pixel_format.rs | // The MIT License (MIT)
//
// Copyright (c) 2018 Michael Dilger
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use super::{D3DFormat, DataFormat, DxgiFormat};
use crate::error::*;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt;
use std::io::{Read, Write};
#[derive(Clone)]
pub struct PixelFormat {
/// Size of this structure in bytes; set to 32
pub size: u32,
/// Values which indicate what type of data is in the surface
pub flags: PixelFormatFlags,
/// Codes for specifying compressed or custom formats.
pub fourcc: Option<FourCC>,
/// Number of bits in an RGB (possibly including alpha) format. Valid when
/// flags includes RGB or LUMINANCE.
pub rgb_bit_count: Option<u32>,
/// Red (or Y) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the red mask would be 0x00ff0000.
pub r_bit_mask: Option<u32>,
/// Green (or U) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the green mask would be 0x0000ff00.
pub g_bit_mask: Option<u32>,
/// Blue (or V) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the blue mask would be 0x000000ff
pub b_bit_mask: Option<u32>,
/// Alpha mask for reading alpha data. Valid of flags includes ALPHA_PIXELS or ALPHA.
/// For instance, given the A8R8G8B8 format, the alpha mask would be 0xff000000
pub a_bit_mask: Option<u32>,
}
impl PixelFormat {
pub fn read<R: Read>(mut r: R) -> Result<PixelFormat, Error> {
let size = r.read_u32::<LittleEndian>()?;
if size!= 32 |
let flags = PixelFormatFlags::from_bits_truncate(r.read_u32::<LittleEndian>()?);
let fourcc = r.read_u32::<LittleEndian>()?;
let rgb_bit_count = r.read_u32::<LittleEndian>()?;
let r_bit_mask = r.read_u32::<LittleEndian>()?;
let g_bit_mask = r.read_u32::<LittleEndian>()?;
let b_bit_mask = r.read_u32::<LittleEndian>()?;
let a_bit_mask = r.read_u32::<LittleEndian>()?;
Ok(PixelFormat {
size,
flags,
fourcc: if flags.contains(PixelFormatFlags::FOURCC) {
Some(FourCC(fourcc))
} else {
None
},
rgb_bit_count: if flags.contains(PixelFormatFlags::RGB)
|| flags.contains(PixelFormatFlags::LUMINANCE)
{
Some(rgb_bit_count)
} else {
None
},
r_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(r_bit_mask)
} else {
None
},
g_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(g_bit_mask)
} else {
None
},
b_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(b_bit_mask)
} else {
None
},
a_bit_mask: if flags.contains(PixelFormatFlags::ALPHA_PIXELS)
|| flags.contains(PixelFormatFlags::ALPHA)
{
Some(a_bit_mask)
} else {
None
},
})
}
pub fn write<W: Write>(&self, w: &mut W) -> Result<(), Error> {
w.write_u32::<LittleEndian>(self.size)?;
w.write_u32::<LittleEndian>(self.flags.bits())?;
w.write_u32::<LittleEndian>(self.fourcc.as_ref().unwrap_or(&FourCC(0)).0)?;
w.write_u32::<LittleEndian>(self.rgb_bit_count.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.r_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.g_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.b_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.a_bit_mask.unwrap_or(0))?;
Ok(())
}
}
impl fmt::Debug for PixelFormat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, " Pixel Format:")?;
writeln!(f, " flags: {:?}", self.flags)?;
writeln!(f, " fourcc: {:?}", self.fourcc)?;
writeln!(f, " bits_per_pixel: {:?}", self.rgb_bit_count)?;
writeln!(
f,
" RGBA bitmasks: {:?}, {:?}, {:?}, {:?}",
self.r_bit_mask, self.g_bit_mask, self.b_bit_mask, self.a_bit_mask
)?;
Ok(())
}
}
impl Default for PixelFormat {
fn default() -> PixelFormat {
PixelFormat {
size: 32, // must be 32
flags: PixelFormatFlags::empty(),
fourcc: None,
rgb_bit_count: None,
r_bit_mask: None,
g_bit_mask: None,
b_bit_mask: None,
a_bit_mask: None,
}
}
}
impl From<D3DFormat> for PixelFormat {
fn from(format: D3DFormat) -> PixelFormat {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB);
pf.rgb_bit_count = Some(bpp as u32)
} else if let Some(fourcc) = format.get_fourcc() {
pf.flags.insert(PixelFormatFlags::FOURCC);
pf.fourcc = Some(fourcc);
}
if let Some(abitmask) = format.a_bit_mask() {
pf.flags.insert(PixelFormatFlags::ALPHA_PIXELS);
pf.a_bit_mask = Some(abitmask);
}
pf.r_bit_mask = format.r_bit_mask();
pf.g_bit_mask = format.g_bit_mask();
pf.b_bit_mask = format.b_bit_mask();
pf
}
}
impl From<DxgiFormat> for PixelFormat {
fn from(format: DxgiFormat) -> PixelFormat {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB); // means uncompressed
pf.rgb_bit_count = Some(bpp as u32)
}
pf.fourcc = Some(FourCC(FourCC::DX10)); // we always use extention for Dxgi
pf.flags.insert(PixelFormatFlags::FOURCC);
// flags::ALPHA_PIXELS is not set, use DX10 extension.
// r_bit_mask, g_bit_mask, b_bit_mask and a_bit_mask are not set.
// FIXME - we may need to set these in some circumstances.
pf
}
}
bitflags! {
pub struct PixelFormatFlags: u32 {
/// Texture contains alpha data.
const ALPHA_PIXELS = 0x1;
/// Alpha channel only uncomressed data (used in older DDS files)
const ALPHA = 0x2;
/// Texture contains compressed RGB data.
const FOURCC = 0x4;
/// Texture contains uncompressed RGB data.
const RGB = 0x40;
/// YUV uncompressed data (used in older DDS files)
const YUV = 0x200;
/// Single channel color uncompressed data (used in older DDS files)
const LUMINANCE = 0x20000;
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct FourCC(pub u32);
// generate little-endian u32 from 4 bytes
// rust is not ready for this yet
/*
macro_rules! u32_code {
($w:expr) => {
((($w[0] as u32) << 0) |
(($w[1] as u32) << 8) |
(($w[2] as u32) << 16) |
(($w[3] as u32) << 24) |
((*$w as [u8; 4])[0] as u32 * 0))
}
}
*/
impl FourCC {
pub const NONE: u32 = 0;
// D3D formats
pub const DXT1: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const DXT2: u32 = 0x32545844; //u32_code!(b"DXT2");
pub const DXT3: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const DXT4: u32 = 0x34545844; //u32_code!(b"DXT4");
pub const DXT5: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const R8G8_B8G8: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const A16B16G16R16: u32 = 36;
pub const Q16W16V16U16: u32 = 110;
pub const R16F: u32 = 111;
pub const G16R16F: u32 = 112;
pub const A16B16G16R16F: u32 = 113;
pub const R32F: u32 = 114;
pub const G32R32F: u32 = 115;
pub const A32B32G32R32F: u32 = 116;
pub const UYVY: u32 = 0x59565955; //u32_code!(b"UYVY");
pub const YUY2: u32 = 0x32595559; //u32_code!(b"YUY2");
pub const CXV8U8: u32 = 117;
pub const ATI1: u32 = 0x31495441; //u32_code!(b"ATI1"); // BC4 unorm
pub const ATI2: u32 = 0x32495441; //u32_code!(b"ATI2"); // BC5 unorm
pub const DX10: u32 = 0x30315844; //u32_code!(b"DX10");
// DXGI formats (different names, often for same things)
pub const BC1_UNORM: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const BC2_UNORM: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const BC3_UNORM: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const BC4_UNORM: u32 = 0x55344342; //u32_code!(b"BC4U");
pub const BC4_SNORM: u32 = 0x53344342; //u32_code!(b"BC4S");
pub const BC5_UNORM: u32 = 0x32495441; //u32_code!(b"ATI2");
pub const BC5_SNORM: u32 = 0x53354342; //u32_code!(b"BC5S");
pub const R8G8_B8G8_UNORM: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8_UNORM: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const R16G16B16A16_UNORM: u32 = 36;
pub const R16G16B16A16_SNORM: u32 = 110;
pub const R16_FLOAT: u32 = 111;
pub const R16G16_FLOAT: u32 = 112;
pub const R16G16B16A16_FLOAT: u32 = 113;
pub const R32_FLOAT: u32 = 114;
pub const R32G32_FLOAT: u32 = 115;
pub const R32G32B32A32_FLOAT: u32 = 116;
}
| {
return Err(Error::InvalidField("Pixel format struct size".to_owned()));
} | conditional_block |
pixel_format.rs | // The MIT License (MIT)
//
// Copyright (c) 2018 Michael Dilger
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use super::{D3DFormat, DataFormat, DxgiFormat};
use crate::error::*;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt;
use std::io::{Read, Write};
#[derive(Clone)]
pub struct PixelFormat {
/// Size of this structure in bytes; set to 32
pub size: u32,
/// Values which indicate what type of data is in the surface
pub flags: PixelFormatFlags,
/// Codes for specifying compressed or custom formats.
pub fourcc: Option<FourCC>,
/// Number of bits in an RGB (possibly including alpha) format. Valid when
/// flags includes RGB or LUMINANCE.
pub rgb_bit_count: Option<u32>,
/// Red (or Y) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the red mask would be 0x00ff0000.
pub r_bit_mask: Option<u32>,
/// Green (or U) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the green mask would be 0x0000ff00.
pub g_bit_mask: Option<u32>,
/// Blue (or V) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the blue mask would be 0x000000ff
pub b_bit_mask: Option<u32>,
/// Alpha mask for reading alpha data. Valid of flags includes ALPHA_PIXELS or ALPHA.
/// For instance, given the A8R8G8B8 format, the alpha mask would be 0xff000000
pub a_bit_mask: Option<u32>,
}
impl PixelFormat {
pub fn read<R: Read>(mut r: R) -> Result<PixelFormat, Error> {
let size = r.read_u32::<LittleEndian>()?;
if size!= 32 {
return Err(Error::InvalidField("Pixel format struct size".to_owned()));
}
let flags = PixelFormatFlags::from_bits_truncate(r.read_u32::<LittleEndian>()?);
let fourcc = r.read_u32::<LittleEndian>()?;
let rgb_bit_count = r.read_u32::<LittleEndian>()?;
let r_bit_mask = r.read_u32::<LittleEndian>()?;
let g_bit_mask = r.read_u32::<LittleEndian>()?;
let b_bit_mask = r.read_u32::<LittleEndian>()?;
let a_bit_mask = r.read_u32::<LittleEndian>()?;
Ok(PixelFormat {
size,
flags,
fourcc: if flags.contains(PixelFormatFlags::FOURCC) {
Some(FourCC(fourcc))
} else {
None
},
rgb_bit_count: if flags.contains(PixelFormatFlags::RGB)
|| flags.contains(PixelFormatFlags::LUMINANCE)
{
Some(rgb_bit_count)
} else {
None
},
r_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(r_bit_mask)
} else {
None
},
g_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(g_bit_mask)
} else {
None
},
b_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(b_bit_mask)
} else {
None
},
a_bit_mask: if flags.contains(PixelFormatFlags::ALPHA_PIXELS)
|| flags.contains(PixelFormatFlags::ALPHA)
{
Some(a_bit_mask)
} else {
None
},
})
}
pub fn write<W: Write>(&self, w: &mut W) -> Result<(), Error> {
w.write_u32::<LittleEndian>(self.size)?;
w.write_u32::<LittleEndian>(self.flags.bits())?;
w.write_u32::<LittleEndian>(self.fourcc.as_ref().unwrap_or(&FourCC(0)).0)?;
w.write_u32::<LittleEndian>(self.rgb_bit_count.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.r_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.g_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.b_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.a_bit_mask.unwrap_or(0))?;
Ok(())
}
}
impl fmt::Debug for PixelFormat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, " Pixel Format:")?;
writeln!(f, " flags: {:?}", self.flags)?;
writeln!(f, " fourcc: {:?}", self.fourcc)?;
writeln!(f, " bits_per_pixel: {:?}", self.rgb_bit_count)?; | )?;
Ok(())
}
}
impl Default for PixelFormat {
fn default() -> PixelFormat {
PixelFormat {
size: 32, // must be 32
flags: PixelFormatFlags::empty(),
fourcc: None,
rgb_bit_count: None,
r_bit_mask: None,
g_bit_mask: None,
b_bit_mask: None,
a_bit_mask: None,
}
}
}
impl From<D3DFormat> for PixelFormat {
fn from(format: D3DFormat) -> PixelFormat {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB);
pf.rgb_bit_count = Some(bpp as u32)
} else if let Some(fourcc) = format.get_fourcc() {
pf.flags.insert(PixelFormatFlags::FOURCC);
pf.fourcc = Some(fourcc);
}
if let Some(abitmask) = format.a_bit_mask() {
pf.flags.insert(PixelFormatFlags::ALPHA_PIXELS);
pf.a_bit_mask = Some(abitmask);
}
pf.r_bit_mask = format.r_bit_mask();
pf.g_bit_mask = format.g_bit_mask();
pf.b_bit_mask = format.b_bit_mask();
pf
}
}
impl From<DxgiFormat> for PixelFormat {
fn from(format: DxgiFormat) -> PixelFormat {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB); // means uncompressed
pf.rgb_bit_count = Some(bpp as u32)
}
pf.fourcc = Some(FourCC(FourCC::DX10)); // we always use extention for Dxgi
pf.flags.insert(PixelFormatFlags::FOURCC);
// flags::ALPHA_PIXELS is not set, use DX10 extension.
// r_bit_mask, g_bit_mask, b_bit_mask and a_bit_mask are not set.
// FIXME - we may need to set these in some circumstances.
pf
}
}
bitflags! {
pub struct PixelFormatFlags: u32 {
/// Texture contains alpha data.
const ALPHA_PIXELS = 0x1;
/// Alpha channel only uncomressed data (used in older DDS files)
const ALPHA = 0x2;
/// Texture contains compressed RGB data.
const FOURCC = 0x4;
/// Texture contains uncompressed RGB data.
const RGB = 0x40;
/// YUV uncompressed data (used in older DDS files)
const YUV = 0x200;
/// Single channel color uncompressed data (used in older DDS files)
const LUMINANCE = 0x20000;
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct FourCC(pub u32);
// generate little-endian u32 from 4 bytes
// rust is not ready for this yet
/*
macro_rules! u32_code {
($w:expr) => {
((($w[0] as u32) << 0) |
(($w[1] as u32) << 8) |
(($w[2] as u32) << 16) |
(($w[3] as u32) << 24) |
((*$w as [u8; 4])[0] as u32 * 0))
}
}
*/
impl FourCC {
pub const NONE: u32 = 0;
// D3D formats
pub const DXT1: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const DXT2: u32 = 0x32545844; //u32_code!(b"DXT2");
pub const DXT3: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const DXT4: u32 = 0x34545844; //u32_code!(b"DXT4");
pub const DXT5: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const R8G8_B8G8: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const A16B16G16R16: u32 = 36;
pub const Q16W16V16U16: u32 = 110;
pub const R16F: u32 = 111;
pub const G16R16F: u32 = 112;
pub const A16B16G16R16F: u32 = 113;
pub const R32F: u32 = 114;
pub const G32R32F: u32 = 115;
pub const A32B32G32R32F: u32 = 116;
pub const UYVY: u32 = 0x59565955; //u32_code!(b"UYVY");
pub const YUY2: u32 = 0x32595559; //u32_code!(b"YUY2");
pub const CXV8U8: u32 = 117;
pub const ATI1: u32 = 0x31495441; //u32_code!(b"ATI1"); // BC4 unorm
pub const ATI2: u32 = 0x32495441; //u32_code!(b"ATI2"); // BC5 unorm
pub const DX10: u32 = 0x30315844; //u32_code!(b"DX10");
// DXGI formats (different names, often for same things)
pub const BC1_UNORM: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const BC2_UNORM: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const BC3_UNORM: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const BC4_UNORM: u32 = 0x55344342; //u32_code!(b"BC4U");
pub const BC4_SNORM: u32 = 0x53344342; //u32_code!(b"BC4S");
pub const BC5_UNORM: u32 = 0x32495441; //u32_code!(b"ATI2");
pub const BC5_SNORM: u32 = 0x53354342; //u32_code!(b"BC5S");
pub const R8G8_B8G8_UNORM: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8_UNORM: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const R16G16B16A16_UNORM: u32 = 36;
pub const R16G16B16A16_SNORM: u32 = 110;
pub const R16_FLOAT: u32 = 111;
pub const R16G16_FLOAT: u32 = 112;
pub const R16G16B16A16_FLOAT: u32 = 113;
pub const R32_FLOAT: u32 = 114;
pub const R32G32_FLOAT: u32 = 115;
pub const R32G32B32A32_FLOAT: u32 = 116;
} | writeln!(
f,
" RGBA bitmasks: {:?}, {:?}, {:?}, {:?}",
self.r_bit_mask, self.g_bit_mask, self.b_bit_mask, self.a_bit_mask | random_line_split |
pixel_format.rs | // The MIT License (MIT)
//
// Copyright (c) 2018 Michael Dilger
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use super::{D3DFormat, DataFormat, DxgiFormat};
use crate::error::*;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::fmt;
use std::io::{Read, Write};
#[derive(Clone)]
pub struct PixelFormat {
/// Size of this structure in bytes; set to 32
pub size: u32,
/// Values which indicate what type of data is in the surface
pub flags: PixelFormatFlags,
/// Codes for specifying compressed or custom formats.
pub fourcc: Option<FourCC>,
/// Number of bits in an RGB (possibly including alpha) format. Valid when
/// flags includes RGB or LUMINANCE.
pub rgb_bit_count: Option<u32>,
/// Red (or Y) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the red mask would be 0x00ff0000.
pub r_bit_mask: Option<u32>,
/// Green (or U) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the green mask would be 0x0000ff00.
pub g_bit_mask: Option<u32>,
/// Blue (or V) mask for reading color data. For instance, given the A8R8G8B8 format,
/// the blue mask would be 0x000000ff
pub b_bit_mask: Option<u32>,
/// Alpha mask for reading alpha data. Valid of flags includes ALPHA_PIXELS or ALPHA.
/// For instance, given the A8R8G8B8 format, the alpha mask would be 0xff000000
pub a_bit_mask: Option<u32>,
}
impl PixelFormat {
pub fn read<R: Read>(mut r: R) -> Result<PixelFormat, Error> {
let size = r.read_u32::<LittleEndian>()?;
if size!= 32 {
return Err(Error::InvalidField("Pixel format struct size".to_owned()));
}
let flags = PixelFormatFlags::from_bits_truncate(r.read_u32::<LittleEndian>()?);
let fourcc = r.read_u32::<LittleEndian>()?;
let rgb_bit_count = r.read_u32::<LittleEndian>()?;
let r_bit_mask = r.read_u32::<LittleEndian>()?;
let g_bit_mask = r.read_u32::<LittleEndian>()?;
let b_bit_mask = r.read_u32::<LittleEndian>()?;
let a_bit_mask = r.read_u32::<LittleEndian>()?;
Ok(PixelFormat {
size,
flags,
fourcc: if flags.contains(PixelFormatFlags::FOURCC) {
Some(FourCC(fourcc))
} else {
None
},
rgb_bit_count: if flags.contains(PixelFormatFlags::RGB)
|| flags.contains(PixelFormatFlags::LUMINANCE)
{
Some(rgb_bit_count)
} else {
None
},
r_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(r_bit_mask)
} else {
None
},
g_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(g_bit_mask)
} else {
None
},
b_bit_mask: if flags.contains(PixelFormatFlags::RGB) {
Some(b_bit_mask)
} else {
None
},
a_bit_mask: if flags.contains(PixelFormatFlags::ALPHA_PIXELS)
|| flags.contains(PixelFormatFlags::ALPHA)
{
Some(a_bit_mask)
} else {
None
},
})
}
pub fn | <W: Write>(&self, w: &mut W) -> Result<(), Error> {
w.write_u32::<LittleEndian>(self.size)?;
w.write_u32::<LittleEndian>(self.flags.bits())?;
w.write_u32::<LittleEndian>(self.fourcc.as_ref().unwrap_or(&FourCC(0)).0)?;
w.write_u32::<LittleEndian>(self.rgb_bit_count.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.r_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.g_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.b_bit_mask.unwrap_or(0))?;
w.write_u32::<LittleEndian>(self.a_bit_mask.unwrap_or(0))?;
Ok(())
}
}
impl fmt::Debug for PixelFormat {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, " Pixel Format:")?;
writeln!(f, " flags: {:?}", self.flags)?;
writeln!(f, " fourcc: {:?}", self.fourcc)?;
writeln!(f, " bits_per_pixel: {:?}", self.rgb_bit_count)?;
writeln!(
f,
" RGBA bitmasks: {:?}, {:?}, {:?}, {:?}",
self.r_bit_mask, self.g_bit_mask, self.b_bit_mask, self.a_bit_mask
)?;
Ok(())
}
}
impl Default for PixelFormat {
fn default() -> PixelFormat {
PixelFormat {
size: 32, // must be 32
flags: PixelFormatFlags::empty(),
fourcc: None,
rgb_bit_count: None,
r_bit_mask: None,
g_bit_mask: None,
b_bit_mask: None,
a_bit_mask: None,
}
}
}
impl From<D3DFormat> for PixelFormat {
fn from(format: D3DFormat) -> PixelFormat {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB);
pf.rgb_bit_count = Some(bpp as u32)
} else if let Some(fourcc) = format.get_fourcc() {
pf.flags.insert(PixelFormatFlags::FOURCC);
pf.fourcc = Some(fourcc);
}
if let Some(abitmask) = format.a_bit_mask() {
pf.flags.insert(PixelFormatFlags::ALPHA_PIXELS);
pf.a_bit_mask = Some(abitmask);
}
pf.r_bit_mask = format.r_bit_mask();
pf.g_bit_mask = format.g_bit_mask();
pf.b_bit_mask = format.b_bit_mask();
pf
}
}
impl From<DxgiFormat> for PixelFormat {
fn from(format: DxgiFormat) -> PixelFormat {
let mut pf: PixelFormat = Default::default();
if let Some(bpp) = format.get_bits_per_pixel() {
pf.flags.insert(PixelFormatFlags::RGB); // means uncompressed
pf.rgb_bit_count = Some(bpp as u32)
}
pf.fourcc = Some(FourCC(FourCC::DX10)); // we always use extention for Dxgi
pf.flags.insert(PixelFormatFlags::FOURCC);
// flags::ALPHA_PIXELS is not set, use DX10 extension.
// r_bit_mask, g_bit_mask, b_bit_mask and a_bit_mask are not set.
// FIXME - we may need to set these in some circumstances.
pf
}
}
bitflags! {
pub struct PixelFormatFlags: u32 {
/// Texture contains alpha data.
const ALPHA_PIXELS = 0x1;
/// Alpha channel only uncomressed data (used in older DDS files)
const ALPHA = 0x2;
/// Texture contains compressed RGB data.
const FOURCC = 0x4;
/// Texture contains uncompressed RGB data.
const RGB = 0x40;
/// YUV uncompressed data (used in older DDS files)
const YUV = 0x200;
/// Single channel color uncompressed data (used in older DDS files)
const LUMINANCE = 0x20000;
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct FourCC(pub u32);
// generate little-endian u32 from 4 bytes
// rust is not ready for this yet
/*
macro_rules! u32_code {
($w:expr) => {
((($w[0] as u32) << 0) |
(($w[1] as u32) << 8) |
(($w[2] as u32) << 16) |
(($w[3] as u32) << 24) |
((*$w as [u8; 4])[0] as u32 * 0))
}
}
*/
impl FourCC {
pub const NONE: u32 = 0;
// D3D formats
pub const DXT1: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const DXT2: u32 = 0x32545844; //u32_code!(b"DXT2");
pub const DXT3: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const DXT4: u32 = 0x34545844; //u32_code!(b"DXT4");
pub const DXT5: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const R8G8_B8G8: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const A16B16G16R16: u32 = 36;
pub const Q16W16V16U16: u32 = 110;
pub const R16F: u32 = 111;
pub const G16R16F: u32 = 112;
pub const A16B16G16R16F: u32 = 113;
pub const R32F: u32 = 114;
pub const G32R32F: u32 = 115;
pub const A32B32G32R32F: u32 = 116;
pub const UYVY: u32 = 0x59565955; //u32_code!(b"UYVY");
pub const YUY2: u32 = 0x32595559; //u32_code!(b"YUY2");
pub const CXV8U8: u32 = 117;
pub const ATI1: u32 = 0x31495441; //u32_code!(b"ATI1"); // BC4 unorm
pub const ATI2: u32 = 0x32495441; //u32_code!(b"ATI2"); // BC5 unorm
pub const DX10: u32 = 0x30315844; //u32_code!(b"DX10");
// DXGI formats (different names, often for same things)
pub const BC1_UNORM: u32 = 0x31545844; //u32_code!(b"DXT1");
pub const BC2_UNORM: u32 = 0x33545844; //u32_code!(b"DXT3");
pub const BC3_UNORM: u32 = 0x35545844; //u32_code!(b"DXT5");
pub const BC4_UNORM: u32 = 0x55344342; //u32_code!(b"BC4U");
pub const BC4_SNORM: u32 = 0x53344342; //u32_code!(b"BC4S");
pub const BC5_UNORM: u32 = 0x32495441; //u32_code!(b"ATI2");
pub const BC5_SNORM: u32 = 0x53354342; //u32_code!(b"BC5S");
pub const R8G8_B8G8_UNORM: u32 = 0x47424752; //u32_code!(b"RGBG");
pub const G8R8_G8B8_UNORM: u32 = 0x42475247; //u32_code!(b"GRGB");
pub const R16G16B16A16_UNORM: u32 = 36;
pub const R16G16B16A16_SNORM: u32 = 110;
pub const R16_FLOAT: u32 = 111;
pub const R16G16_FLOAT: u32 = 112;
pub const R16G16B16A16_FLOAT: u32 = 113;
pub const R32_FLOAT: u32 = 114;
pub const R32G32_FLOAT: u32 = 115;
pub const R32G32B32A32_FLOAT: u32 = 116;
}
| write | identifier_name |
main.rs | #![feature(proc_macro)]
#![no_std]
extern crate cortex_m;
extern crate cortex_m_rtfm as rtfm;
extern crate stm32f30x_hal as hal;
extern crate ls010b7dh01;
extern crate rn4870;
extern crate embedded_graphics as graphics;
extern crate panic_abort;
extern crate nb;
mod display;
mod ble;
use cortex_m::asm;
use cortex_m::peripheral::syst::SystClkSource;
use rtfm::{app, Threshold};
use hal::prelude::*;
use hal::timer;
use hal::timer::Timer;
use hal::spi::Spi;
use hal::serial;
use hal::serial::Serial;
use hal::delay::Delay;
use hal::gpio::{gpiob, gpioc, Input, Output, PullUp, PushPull, AF7};
use ls010b7dh01::Ls010b7dh01;
use graphics::prelude::*;
use graphics::primitives::{Circle, Line, Rect};
use graphics::fonts::{Font, Font6x8};
use graphics::transform::Transform;
use graphics::image::Image1BPP;
app! {
device: hal::stm32f30x,
resources: {
static TOGGLE: bool = false;
static TIME: u8 = 0;
static STATE: State = State::Time;
static EXTI: hal::stm32f30x::EXTI;
static RESET_BLE: bool = true;
static REDRAW: bool = true;
static DRAW_BUFFER: [u8; 16] = [32; 16];
static BUFFER_POS: u8 = 0;
// Late Resources
static EXTCOMIN: display::Extcomin;
static DISPLAY: display::Display;
static BLE: ble::Ble;
},
tasks: {
TIM7: {
path: tick,
resources: [TOGGLE, EXTCOMIN, DISPLAY],
},
SYS_TICK: {
path: sys_tick,
resources: [TOGGLE, EXTCOMIN, DISPLAY,
TIME, BLE, RESET_BLE, STATE, REDRAW,
DRAW_BUFFER],
},
USART1_EXTI25: {
path: ble_message,
resources: [BLE, DRAW_BUFFER, BUFFER_POS],
},
EXTI9_5: {
enabled: true,
priority: 1,
path: exti9_5,
resources: [STATE, EXTI],
},
EXTI15_10: {
path: exti15_10,
resources: [STATE, EXTI],
},
},
}
pub enum State {
Ble,
Time,
Face,
}
fn init(mut p: init::Peripherals, _r: init::Resources) -> init::LateResources {
let mut rcc = p.device.RCC.constrain();
let mut flash = p.device.FLASH.constrain();
let mut gpioa = p.device.GPIOA.split(&mut rcc.ahb);
let mut gpiob = p.device.GPIOB.split(&mut rcc.ahb);
let mut gpioc = p.device.GPIOC.split(&mut rcc.ahb); | rcc.apb2.rstr().modify(|_, w| w.syscfgrst().clear_bit());
// Enable systick
p.core.SYST.set_clock_source(SystClkSource::Core);
p.core.SYST.set_reload(16_000_000);
p.core.SYST.enable_interrupt();
p.core.SYST.enable_counter();
// Set up our clocks & timer & delay
let clocks = rcc.cfgr.freeze(&mut flash.acr);
let mut timer = Timer::tim7(p.device.TIM7, 1.hz(), clocks, &mut rcc.apb1);
//timer.listen(timer::Event::TimeOut);
let mut delay = Delay::new(p.core.SYST, clocks);
// Set up our GPIO pins
let disp_en = gpiob.pb2.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let extcomin = gpiob.pb1.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let cs = gpiob.pb0.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let mut v5_en = gpioa.pa3.into_push_pull_output(
&mut gpioa.moder,
&mut gpioa.otyper,
);
let reset_ble = gpiob.pb5.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let sck = gpioa.pa5.into_af5(&mut gpioa.moder, &mut gpioa.afrl);
let miso = gpioa.pa6.into_af5(&mut gpioa.moder, &mut gpioa.afrl);
let mosi = gpioa.pa7.into_af5(&mut gpioa.moder, &mut gpioa.afrl);
let tx = gpiob.pb6.into_af7(&mut gpiob.moder, &mut gpiob.afrl);
let rx = gpiob.pb7.into_af7(&mut gpiob.moder, &mut gpiob.afrl);
let button_1 = gpiob.pb8.into_pull_up_input(
&mut gpiob.moder,
&mut gpiob.pupdr,
);
let button_2 = gpiob.pb9.into_pull_up_input(
&mut gpiob.moder,
&mut gpiob.pupdr,
);
let button_3 = gpioc.pc13.into_pull_up_input(
&mut gpioc.moder,
&mut gpioc.pupdr,
);
// Set up our display
let mode = ls010b7dh01::MODE;
let spi = Spi::spi1(
p.device.SPI1,
(sck, miso, mosi),
mode,
1.mhz(),
clocks,
&mut rcc.apb2,
);
let mut display = Ls010b7dh01::new(spi, cs, disp_en);
// Set up our BLE
let mut serial = Serial::usart1(
p.device.USART1,
(tx, rx),
115_200.bps(),
clocks,
&mut rcc.apb2,
);
serial.listen(serial::Event::Rxne); // TODO: Serial interrupts?
let mut ble = rn4870::Rn4870::new(serial, reset_ble);
// Set the default values
v5_en.set_high();
display.enable();
// Set up syscfg to link GPIO to EXTI
p.device.SYSCFG.exticr3.modify(|_, w| unsafe {
w.bits(0x11)
/* This does not work
w.exti8().bits(0b001) // Port b
.exti9().bits(0b001) // Port b
*/
});
p.device.SYSCFG.exticr4.modify(|_, w| unsafe {
w.exti13().bits(0b010) // Port c
});
p.device.EXTI.imr1.modify(|_, w| {
w.mr8().set_bit().mr9().set_bit().mr13().set_bit()
});
p.device.EXTI.ftsr1.modify(|_, w| {
w.tr8().set_bit().tr9().set_bit().tr13().set_bit()
});
init::LateResources {
DISPLAY: display,
EXTCOMIN: extcomin,
BLE: ble,
EXTI: p.device.EXTI,
}
}
fn idle() ->! {
loop {
rtfm::wfi();
}
}
fn ble_message(_t: &mut Threshold, mut r: USART1_EXTI25::Resources) {
let res = r.BLE.read_raw();
match res {
Ok(n) => {
if n < 32 {
return
}
(*r.DRAW_BUFFER)[*r.BUFFER_POS as usize] = n;
*r.BUFFER_POS += 1;
if *r.BUFFER_POS == 16 {
*r.BUFFER_POS = 0;
}
}
Err(nb::Error::Other(_)) => {
r.BLE.handle_error(|uart| { uart.clear_overflow_error(); } );
}
Err(nb::Error::WouldBlock) => {}
}
}
fn exti9_5(_t: &mut Threshold, mut r: EXTI9_5::Resources) {
if r.EXTI.pr1.read().pr8().bit_is_set() {
r.EXTI.pr1.modify(|_, w| w.pr8().set_bit());
*r.STATE = State::Ble;
}
if r.EXTI.pr1.read().pr9().bit_is_set() {
r.EXTI.pr1.modify(|_, w| w.pr9().set_bit());
*r.STATE = State::Time;
}
}
fn exti15_10(_t: &mut Threshold, mut r: EXTI15_10::Resources) {
if r.EXTI.pr1.read().pr13().bit_is_set() {
r.EXTI.pr1.modify(|_, w| w.pr13().set_bit());
*r.STATE = State::Face;
}
}
fn tick(_t: &mut Threshold, mut r: TIM7::Resources) {
}
fn sys_tick(_t: &mut Threshold, mut r: SYS_TICK::Resources) {
let toggle = *r.TOGGLE;
let extcomin = &mut *r.EXTCOMIN;
if *r.RESET_BLE {
r.BLE.hard_reset_on();
*r.RESET_BLE = false;
} else {
r.BLE.hard_reset_off();
}
match *r.STATE {
State::Ble => {
r.DISPLAY.clear();
//let s = String::from_utf8_lossy(&*r.DRAW_BUFFER);
unsafe {
let s = &*(&*r.DRAW_BUFFER as *const [u8] as *const str);
r.DISPLAY.draw(Font6x8::render_str(s).translate((5, 50)).into_iter());
r.DISPLAY.flush_buffer();
}
}
State::Time => {
*r.REDRAW = true;
draw_time(&mut *r.DISPLAY, *r.TIME);
*r.TIME += 1;
if *r.TIME == 60 {
*r.TIME = 0;
}
}
State::Face => {
if *r.REDRAW {
draw_face(&mut *r.DISPLAY);
*r.REDRAW = false;
}
}
}
// Toggle extcomin manually
if toggle {
(*extcomin).set_high();
} else {
(*extcomin).set_low();
}
*r.TOGGLE =!toggle;
}
fn draw_face(mut display: &mut display::Display) {
display.clear();
let bpp = Image1BPP::new(include_bytes!("../data/face_1bpp_neg.raw"), 120, 120)
.translate((0, 0));
display.draw(bpp.into_iter());
display.flush_buffer();
}
fn draw_time(mut display: &mut display::Display, time: u8) {
display.clear();
/*
let values = [
(125, 65), (124, 71), (123, 77), (122, 83), (119, 89),
(116, 94), (113, 100), (109, 105), (105, 109), (100, 113),
(95, 116), (89, 119), (83, 122), (77, 123), (71, 124),
(65, 125), (59, 124), (53, 123), (47, 122), (41, 119),
(36, 116), (30, 113), (25, 109), (21, 105), (17, 100),
(14, 95), (11, 89), (8, 83), (7, 77), (6, 71),
(5, 65), (6, 59), (7, 53), (8, 47), (11, 41),
(14, 36), (17, 30), (21, 25), (25, 21), (30, 17),
(35, 14), (41, 11), (47, 8), (53, 7), (59, 6),
(65, 5), (71, 6), (77, 7), (83, 8), (89, 11),
(94, 14), (100, 17), (105, 21), (109, 25), (113, 30),
(116, 35), (119, 41), (122, 47), (123, 53), (124, 59),
];
*/
let values =[(109, 64), (108, 68), (108, 73), (106, 77), (105, 82), (102, 86), (100, 90), (97, 94), (94, 97), (90, 100), (86, 102), (82, 105), (77, 106), (73, 108), (68, 108), (64, 109), (60, 108), (55, 108), (51, 106), (46, 105), (42, 102), (38, 100), (34, 97), (31, 94), (28, 90), (26, 86), (23, 82), (22, 77), (20, 73), (20, 68), (19, 64), (20, 60), (20, 55), (22, 51), (23, 46), (26, 42), (28, 38), (31, 34), (34, 31), (38, 28), (42, 26), (46, 23), (51, 22), (55, 20), (60, 20), (64, 19), (68, 20), (73, 20), (77, 22), (82, 23), (86, 26), (90, 28), (94, 31), (97, 34), (100, 38), (102, 42), (105, 46), (106, 51), (108, 55), (108, 60)];
let digits = [(116, 60), (108, 87), (88, 107), (61, 115), (34, 107), (14, 87), (6, 60), (14, 33), (34, 13), (61, 5), (88, 13), (108, 33)];
display.draw(Font6x8::render_str("3").translate(digits[0]).into_iter());
display.draw(Font6x8::render_str("4").translate(digits[1]).into_iter());
display.draw(Font6x8::render_str("5").translate(digits[2]).into_iter());
display.draw(Font6x8::render_str("6").translate(digits[3]).into_iter());
display.draw(Font6x8::render_str("7").translate(digits[4]).into_iter());
display.draw(Font6x8::render_str("8").translate(digits[5]).into_iter());
display.draw(Font6x8::render_str("9").translate(digits[6]).into_iter());
display.draw(Font6x8::render_str("10").translate(digits[7]).into_iter());
display.draw(Font6x8::render_str("11").translate(digits[8]).into_iter());
display.draw(Font6x8::render_str("12").translate(digits[9]).into_iter());
display.draw(Font6x8::render_str("1").translate(digits[10]).into_iter());
display.draw(Font6x8::render_str("2").translate(digits[11]).into_iter());
display.draw(Line::new((65, 65), values[time as usize], 1).into_iter());
display.flush_buffer();
}
fn draw_buffer(buffer: &[u8]) {
} |
// Enable the syscfg
rcc.apb2.enr().modify(|_, w| w.syscfgen().enabled());
rcc.apb2.rstr().modify(|_, w| w.syscfgrst().set_bit()); | random_line_split |
main.rs | #![feature(proc_macro)]
#![no_std]
extern crate cortex_m;
extern crate cortex_m_rtfm as rtfm;
extern crate stm32f30x_hal as hal;
extern crate ls010b7dh01;
extern crate rn4870;
extern crate embedded_graphics as graphics;
extern crate panic_abort;
extern crate nb;
mod display;
mod ble;
use cortex_m::asm;
use cortex_m::peripheral::syst::SystClkSource;
use rtfm::{app, Threshold};
use hal::prelude::*;
use hal::timer;
use hal::timer::Timer;
use hal::spi::Spi;
use hal::serial;
use hal::serial::Serial;
use hal::delay::Delay;
use hal::gpio::{gpiob, gpioc, Input, Output, PullUp, PushPull, AF7};
use ls010b7dh01::Ls010b7dh01;
use graphics::prelude::*;
use graphics::primitives::{Circle, Line, Rect};
use graphics::fonts::{Font, Font6x8};
use graphics::transform::Transform;
use graphics::image::Image1BPP;
app! {
device: hal::stm32f30x,
resources: {
static TOGGLE: bool = false;
static TIME: u8 = 0;
static STATE: State = State::Time;
static EXTI: hal::stm32f30x::EXTI;
static RESET_BLE: bool = true;
static REDRAW: bool = true;
static DRAW_BUFFER: [u8; 16] = [32; 16];
static BUFFER_POS: u8 = 0;
// Late Resources
static EXTCOMIN: display::Extcomin;
static DISPLAY: display::Display;
static BLE: ble::Ble;
},
tasks: {
TIM7: {
path: tick,
resources: [TOGGLE, EXTCOMIN, DISPLAY],
},
SYS_TICK: {
path: sys_tick,
resources: [TOGGLE, EXTCOMIN, DISPLAY,
TIME, BLE, RESET_BLE, STATE, REDRAW,
DRAW_BUFFER],
},
USART1_EXTI25: {
path: ble_message,
resources: [BLE, DRAW_BUFFER, BUFFER_POS],
},
EXTI9_5: {
enabled: true,
priority: 1,
path: exti9_5,
resources: [STATE, EXTI],
},
EXTI15_10: {
path: exti15_10,
resources: [STATE, EXTI],
},
},
}
pub enum State {
Ble,
Time,
Face,
}
fn init(mut p: init::Peripherals, _r: init::Resources) -> init::LateResources {
let mut rcc = p.device.RCC.constrain();
let mut flash = p.device.FLASH.constrain();
let mut gpioa = p.device.GPIOA.split(&mut rcc.ahb);
let mut gpiob = p.device.GPIOB.split(&mut rcc.ahb);
let mut gpioc = p.device.GPIOC.split(&mut rcc.ahb);
// Enable the syscfg
rcc.apb2.enr().modify(|_, w| w.syscfgen().enabled());
rcc.apb2.rstr().modify(|_, w| w.syscfgrst().set_bit());
rcc.apb2.rstr().modify(|_, w| w.syscfgrst().clear_bit());
// Enable systick
p.core.SYST.set_clock_source(SystClkSource::Core);
p.core.SYST.set_reload(16_000_000);
p.core.SYST.enable_interrupt();
p.core.SYST.enable_counter();
// Set up our clocks & timer & delay
let clocks = rcc.cfgr.freeze(&mut flash.acr);
let mut timer = Timer::tim7(p.device.TIM7, 1.hz(), clocks, &mut rcc.apb1);
//timer.listen(timer::Event::TimeOut);
let mut delay = Delay::new(p.core.SYST, clocks);
// Set up our GPIO pins
let disp_en = gpiob.pb2.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let extcomin = gpiob.pb1.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let cs = gpiob.pb0.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let mut v5_en = gpioa.pa3.into_push_pull_output(
&mut gpioa.moder,
&mut gpioa.otyper,
);
let reset_ble = gpiob.pb5.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let sck = gpioa.pa5.into_af5(&mut gpioa.moder, &mut gpioa.afrl);
let miso = gpioa.pa6.into_af5(&mut gpioa.moder, &mut gpioa.afrl);
let mosi = gpioa.pa7.into_af5(&mut gpioa.moder, &mut gpioa.afrl);
let tx = gpiob.pb6.into_af7(&mut gpiob.moder, &mut gpiob.afrl);
let rx = gpiob.pb7.into_af7(&mut gpiob.moder, &mut gpiob.afrl);
let button_1 = gpiob.pb8.into_pull_up_input(
&mut gpiob.moder,
&mut gpiob.pupdr,
);
let button_2 = gpiob.pb9.into_pull_up_input(
&mut gpiob.moder,
&mut gpiob.pupdr,
);
let button_3 = gpioc.pc13.into_pull_up_input(
&mut gpioc.moder,
&mut gpioc.pupdr,
);
// Set up our display
let mode = ls010b7dh01::MODE;
let spi = Spi::spi1(
p.device.SPI1,
(sck, miso, mosi),
mode,
1.mhz(),
clocks,
&mut rcc.apb2,
);
let mut display = Ls010b7dh01::new(spi, cs, disp_en);
// Set up our BLE
let mut serial = Serial::usart1(
p.device.USART1,
(tx, rx),
115_200.bps(),
clocks,
&mut rcc.apb2,
);
serial.listen(serial::Event::Rxne); // TODO: Serial interrupts?
let mut ble = rn4870::Rn4870::new(serial, reset_ble);
// Set the default values
v5_en.set_high();
display.enable();
// Set up syscfg to link GPIO to EXTI
p.device.SYSCFG.exticr3.modify(|_, w| unsafe {
w.bits(0x11)
/* This does not work
w.exti8().bits(0b001) // Port b
.exti9().bits(0b001) // Port b
*/
});
p.device.SYSCFG.exticr4.modify(|_, w| unsafe {
w.exti13().bits(0b010) // Port c
});
p.device.EXTI.imr1.modify(|_, w| {
w.mr8().set_bit().mr9().set_bit().mr13().set_bit()
});
p.device.EXTI.ftsr1.modify(|_, w| {
w.tr8().set_bit().tr9().set_bit().tr13().set_bit()
});
init::LateResources {
DISPLAY: display,
EXTCOMIN: extcomin,
BLE: ble,
EXTI: p.device.EXTI,
}
}
fn idle() ->! {
loop {
rtfm::wfi();
}
}
fn ble_message(_t: &mut Threshold, mut r: USART1_EXTI25::Resources) |
fn exti9_5(_t: &mut Threshold, mut r: EXTI9_5::Resources) {
if r.EXTI.pr1.read().pr8().bit_is_set() {
r.EXTI.pr1.modify(|_, w| w.pr8().set_bit());
*r.STATE = State::Ble;
}
if r.EXTI.pr1.read().pr9().bit_is_set() {
r.EXTI.pr1.modify(|_, w| w.pr9().set_bit());
*r.STATE = State::Time;
}
}
fn exti15_10(_t: &mut Threshold, mut r: EXTI15_10::Resources) {
if r.EXTI.pr1.read().pr13().bit_is_set() {
r.EXTI.pr1.modify(|_, w| w.pr13().set_bit());
*r.STATE = State::Face;
}
}
fn tick(_t: &mut Threshold, mut r: TIM7::Resources) {
}
fn sys_tick(_t: &mut Threshold, mut r: SYS_TICK::Resources) {
let toggle = *r.TOGGLE;
let extcomin = &mut *r.EXTCOMIN;
if *r.RESET_BLE {
r.BLE.hard_reset_on();
*r.RESET_BLE = false;
} else {
r.BLE.hard_reset_off();
}
match *r.STATE {
State::Ble => {
r.DISPLAY.clear();
//let s = String::from_utf8_lossy(&*r.DRAW_BUFFER);
unsafe {
let s = &*(&*r.DRAW_BUFFER as *const [u8] as *const str);
r.DISPLAY.draw(Font6x8::render_str(s).translate((5, 50)).into_iter());
r.DISPLAY.flush_buffer();
}
}
State::Time => {
*r.REDRAW = true;
draw_time(&mut *r.DISPLAY, *r.TIME);
*r.TIME += 1;
if *r.TIME == 60 {
*r.TIME = 0;
}
}
State::Face => {
if *r.REDRAW {
draw_face(&mut *r.DISPLAY);
*r.REDRAW = false;
}
}
}
// Toggle extcomin manually
if toggle {
(*extcomin).set_high();
} else {
(*extcomin).set_low();
}
*r.TOGGLE =!toggle;
}
fn draw_face(mut display: &mut display::Display) {
display.clear();
let bpp = Image1BPP::new(include_bytes!("../data/face_1bpp_neg.raw"), 120, 120)
.translate((0, 0));
display.draw(bpp.into_iter());
display.flush_buffer();
}
fn draw_time(mut display: &mut display::Display, time: u8) {
display.clear();
/*
let values = [
(125, 65), (124, 71), (123, 77), (122, 83), (119, 89),
(116, 94), (113, 100), (109, 105), (105, 109), (100, 113),
(95, 116), (89, 119), (83, 122), (77, 123), (71, 124),
(65, 125), (59, 124), (53, 123), (47, 122), (41, 119),
(36, 116), (30, 113), (25, 109), (21, 105), (17, 100),
(14, 95), (11, 89), (8, 83), (7, 77), (6, 71),
(5, 65), (6, 59), (7, 53), (8, 47), (11, 41),
(14, 36), (17, 30), (21, 25), (25, 21), (30, 17),
(35, 14), (41, 11), (47, 8), (53, 7), (59, 6),
(65, 5), (71, 6), (77, 7), (83, 8), (89, 11),
(94, 14), (100, 17), (105, 21), (109, 25), (113, 30),
(116, 35), (119, 41), (122, 47), (123, 53), (124, 59),
];
*/
let values =[(109, 64), (108, 68), (108, 73), (106, 77), (105, 82), (102, 86), (100, 90), (97, 94), (94, 97), (90, 100), (86, 102), (82, 105), (77, 106), (73, 108), (68, 108), (64, 109), (60, 108), (55, 108), (51, 106), (46, 105), (42, 102), (38, 100), (34, 97), (31, 94), (28, 90), (26, 86), (23, 82), (22, 77), (20, 73), (20, 68), (19, 64), (20, 60), (20, 55), (22, 51), (23, 46), (26, 42), (28, 38), (31, 34), (34, 31), (38, 28), (42, 26), (46, 23), (51, 22), (55, 20), (60, 20), (64, 19), (68, 20), (73, 20), (77, 22), (82, 23), (86, 26), (90, 28), (94, 31), (97, 34), (100, 38), (102, 42), (105, 46), (106, 51), (108, 55), (108, 60)];
let digits = [(116, 60), (108, 87), (88, 107), (61, 115), (34, 107), (14, 87), (6, 60), (14, 33), (34, 13), (61, 5), (88, 13), (108, 33)];
display.draw(Font6x8::render_str("3").translate(digits[0]).into_iter());
display.draw(Font6x8::render_str("4").translate(digits[1]).into_iter());
display.draw(Font6x8::render_str("5").translate(digits[2]).into_iter());
display.draw(Font6x8::render_str("6").translate(digits[3]).into_iter());
display.draw(Font6x8::render_str("7").translate(digits[4]).into_iter());
display.draw(Font6x8::render_str("8").translate(digits[5]).into_iter());
display.draw(Font6x8::render_str("9").translate(digits[6]).into_iter());
display.draw(Font6x8::render_str("10").translate(digits[7]).into_iter());
display.draw(Font6x8::render_str("11").translate(digits[8]).into_iter());
display.draw(Font6x8::render_str("12").translate(digits[9]).into_iter());
display.draw(Font6x8::render_str("1").translate(digits[10]).into_iter());
display.draw(Font6x8::render_str("2").translate(digits[11]).into_iter());
display.draw(Line::new((65, 65), values[time as usize], 1).into_iter());
display.flush_buffer();
}
fn draw_buffer(buffer: &[u8]) {
}
| {
let res = r.BLE.read_raw();
match res {
Ok(n) => {
if n < 32 {
return
}
(*r.DRAW_BUFFER)[*r.BUFFER_POS as usize] = n;
*r.BUFFER_POS += 1;
if *r.BUFFER_POS == 16 {
*r.BUFFER_POS = 0;
}
}
Err(nb::Error::Other(_)) => {
r.BLE.handle_error(|uart| { uart.clear_overflow_error(); } );
}
Err(nb::Error::WouldBlock) => {}
}
} | identifier_body |
main.rs | #![feature(proc_macro)]
#![no_std]
extern crate cortex_m;
extern crate cortex_m_rtfm as rtfm;
extern crate stm32f30x_hal as hal;
extern crate ls010b7dh01;
extern crate rn4870;
extern crate embedded_graphics as graphics;
extern crate panic_abort;
extern crate nb;
mod display;
mod ble;
use cortex_m::asm;
use cortex_m::peripheral::syst::SystClkSource;
use rtfm::{app, Threshold};
use hal::prelude::*;
use hal::timer;
use hal::timer::Timer;
use hal::spi::Spi;
use hal::serial;
use hal::serial::Serial;
use hal::delay::Delay;
use hal::gpio::{gpiob, gpioc, Input, Output, PullUp, PushPull, AF7};
use ls010b7dh01::Ls010b7dh01;
use graphics::prelude::*;
use graphics::primitives::{Circle, Line, Rect};
use graphics::fonts::{Font, Font6x8};
use graphics::transform::Transform;
use graphics::image::Image1BPP;
app! {
device: hal::stm32f30x,
resources: {
static TOGGLE: bool = false;
static TIME: u8 = 0;
static STATE: State = State::Time;
static EXTI: hal::stm32f30x::EXTI;
static RESET_BLE: bool = true;
static REDRAW: bool = true;
static DRAW_BUFFER: [u8; 16] = [32; 16];
static BUFFER_POS: u8 = 0;
// Late Resources
static EXTCOMIN: display::Extcomin;
static DISPLAY: display::Display;
static BLE: ble::Ble;
},
tasks: {
TIM7: {
path: tick,
resources: [TOGGLE, EXTCOMIN, DISPLAY],
},
SYS_TICK: {
path: sys_tick,
resources: [TOGGLE, EXTCOMIN, DISPLAY,
TIME, BLE, RESET_BLE, STATE, REDRAW,
DRAW_BUFFER],
},
USART1_EXTI25: {
path: ble_message,
resources: [BLE, DRAW_BUFFER, BUFFER_POS],
},
EXTI9_5: {
enabled: true,
priority: 1,
path: exti9_5,
resources: [STATE, EXTI],
},
EXTI15_10: {
path: exti15_10,
resources: [STATE, EXTI],
},
},
}
pub enum State {
Ble,
Time,
Face,
}
fn init(mut p: init::Peripherals, _r: init::Resources) -> init::LateResources {
let mut rcc = p.device.RCC.constrain();
let mut flash = p.device.FLASH.constrain();
let mut gpioa = p.device.GPIOA.split(&mut rcc.ahb);
let mut gpiob = p.device.GPIOB.split(&mut rcc.ahb);
let mut gpioc = p.device.GPIOC.split(&mut rcc.ahb);
// Enable the syscfg
rcc.apb2.enr().modify(|_, w| w.syscfgen().enabled());
rcc.apb2.rstr().modify(|_, w| w.syscfgrst().set_bit());
rcc.apb2.rstr().modify(|_, w| w.syscfgrst().clear_bit());
// Enable systick
p.core.SYST.set_clock_source(SystClkSource::Core);
p.core.SYST.set_reload(16_000_000);
p.core.SYST.enable_interrupt();
p.core.SYST.enable_counter();
// Set up our clocks & timer & delay
let clocks = rcc.cfgr.freeze(&mut flash.acr);
let mut timer = Timer::tim7(p.device.TIM7, 1.hz(), clocks, &mut rcc.apb1);
//timer.listen(timer::Event::TimeOut);
let mut delay = Delay::new(p.core.SYST, clocks);
// Set up our GPIO pins
let disp_en = gpiob.pb2.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let extcomin = gpiob.pb1.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let cs = gpiob.pb0.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let mut v5_en = gpioa.pa3.into_push_pull_output(
&mut gpioa.moder,
&mut gpioa.otyper,
);
let reset_ble = gpiob.pb5.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let sck = gpioa.pa5.into_af5(&mut gpioa.moder, &mut gpioa.afrl);
let miso = gpioa.pa6.into_af5(&mut gpioa.moder, &mut gpioa.afrl);
let mosi = gpioa.pa7.into_af5(&mut gpioa.moder, &mut gpioa.afrl);
let tx = gpiob.pb6.into_af7(&mut gpiob.moder, &mut gpiob.afrl);
let rx = gpiob.pb7.into_af7(&mut gpiob.moder, &mut gpiob.afrl);
let button_1 = gpiob.pb8.into_pull_up_input(
&mut gpiob.moder,
&mut gpiob.pupdr,
);
let button_2 = gpiob.pb9.into_pull_up_input(
&mut gpiob.moder,
&mut gpiob.pupdr,
);
let button_3 = gpioc.pc13.into_pull_up_input(
&mut gpioc.moder,
&mut gpioc.pupdr,
);
// Set up our display
let mode = ls010b7dh01::MODE;
let spi = Spi::spi1(
p.device.SPI1,
(sck, miso, mosi),
mode,
1.mhz(),
clocks,
&mut rcc.apb2,
);
let mut display = Ls010b7dh01::new(spi, cs, disp_en);
// Set up our BLE
let mut serial = Serial::usart1(
p.device.USART1,
(tx, rx),
115_200.bps(),
clocks,
&mut rcc.apb2,
);
serial.listen(serial::Event::Rxne); // TODO: Serial interrupts?
let mut ble = rn4870::Rn4870::new(serial, reset_ble);
// Set the default values
v5_en.set_high();
display.enable();
// Set up syscfg to link GPIO to EXTI
p.device.SYSCFG.exticr3.modify(|_, w| unsafe {
w.bits(0x11)
/* This does not work
w.exti8().bits(0b001) // Port b
.exti9().bits(0b001) // Port b
*/
});
p.device.SYSCFG.exticr4.modify(|_, w| unsafe {
w.exti13().bits(0b010) // Port c
});
p.device.EXTI.imr1.modify(|_, w| {
w.mr8().set_bit().mr9().set_bit().mr13().set_bit()
});
p.device.EXTI.ftsr1.modify(|_, w| {
w.tr8().set_bit().tr9().set_bit().tr13().set_bit()
});
init::LateResources {
DISPLAY: display,
EXTCOMIN: extcomin,
BLE: ble,
EXTI: p.device.EXTI,
}
}
fn idle() ->! {
loop {
rtfm::wfi();
}
}
fn ble_message(_t: &mut Threshold, mut r: USART1_EXTI25::Resources) {
let res = r.BLE.read_raw();
match res {
Ok(n) => {
if n < 32 {
return
}
(*r.DRAW_BUFFER)[*r.BUFFER_POS as usize] = n;
*r.BUFFER_POS += 1;
if *r.BUFFER_POS == 16 |
}
Err(nb::Error::Other(_)) => {
r.BLE.handle_error(|uart| { uart.clear_overflow_error(); } );
}
Err(nb::Error::WouldBlock) => {}
}
}
fn exti9_5(_t: &mut Threshold, mut r: EXTI9_5::Resources) {
if r.EXTI.pr1.read().pr8().bit_is_set() {
r.EXTI.pr1.modify(|_, w| w.pr8().set_bit());
*r.STATE = State::Ble;
}
if r.EXTI.pr1.read().pr9().bit_is_set() {
r.EXTI.pr1.modify(|_, w| w.pr9().set_bit());
*r.STATE = State::Time;
}
}
fn exti15_10(_t: &mut Threshold, mut r: EXTI15_10::Resources) {
if r.EXTI.pr1.read().pr13().bit_is_set() {
r.EXTI.pr1.modify(|_, w| w.pr13().set_bit());
*r.STATE = State::Face;
}
}
fn tick(_t: &mut Threshold, mut r: TIM7::Resources) {
}
fn sys_tick(_t: &mut Threshold, mut r: SYS_TICK::Resources) {
let toggle = *r.TOGGLE;
let extcomin = &mut *r.EXTCOMIN;
if *r.RESET_BLE {
r.BLE.hard_reset_on();
*r.RESET_BLE = false;
} else {
r.BLE.hard_reset_off();
}
match *r.STATE {
State::Ble => {
r.DISPLAY.clear();
//let s = String::from_utf8_lossy(&*r.DRAW_BUFFER);
unsafe {
let s = &*(&*r.DRAW_BUFFER as *const [u8] as *const str);
r.DISPLAY.draw(Font6x8::render_str(s).translate((5, 50)).into_iter());
r.DISPLAY.flush_buffer();
}
}
State::Time => {
*r.REDRAW = true;
draw_time(&mut *r.DISPLAY, *r.TIME);
*r.TIME += 1;
if *r.TIME == 60 {
*r.TIME = 0;
}
}
State::Face => {
if *r.REDRAW {
draw_face(&mut *r.DISPLAY);
*r.REDRAW = false;
}
}
}
// Toggle extcomin manually
if toggle {
(*extcomin).set_high();
} else {
(*extcomin).set_low();
}
*r.TOGGLE =!toggle;
}
fn draw_face(mut display: &mut display::Display) {
display.clear();
let bpp = Image1BPP::new(include_bytes!("../data/face_1bpp_neg.raw"), 120, 120)
.translate((0, 0));
display.draw(bpp.into_iter());
display.flush_buffer();
}
fn draw_time(mut display: &mut display::Display, time: u8) {
display.clear();
/*
let values = [
(125, 65), (124, 71), (123, 77), (122, 83), (119, 89),
(116, 94), (113, 100), (109, 105), (105, 109), (100, 113),
(95, 116), (89, 119), (83, 122), (77, 123), (71, 124),
(65, 125), (59, 124), (53, 123), (47, 122), (41, 119),
(36, 116), (30, 113), (25, 109), (21, 105), (17, 100),
(14, 95), (11, 89), (8, 83), (7, 77), (6, 71),
(5, 65), (6, 59), (7, 53), (8, 47), (11, 41),
(14, 36), (17, 30), (21, 25), (25, 21), (30, 17),
(35, 14), (41, 11), (47, 8), (53, 7), (59, 6),
(65, 5), (71, 6), (77, 7), (83, 8), (89, 11),
(94, 14), (100, 17), (105, 21), (109, 25), (113, 30),
(116, 35), (119, 41), (122, 47), (123, 53), (124, 59),
];
*/
let values =[(109, 64), (108, 68), (108, 73), (106, 77), (105, 82), (102, 86), (100, 90), (97, 94), (94, 97), (90, 100), (86, 102), (82, 105), (77, 106), (73, 108), (68, 108), (64, 109), (60, 108), (55, 108), (51, 106), (46, 105), (42, 102), (38, 100), (34, 97), (31, 94), (28, 90), (26, 86), (23, 82), (22, 77), (20, 73), (20, 68), (19, 64), (20, 60), (20, 55), (22, 51), (23, 46), (26, 42), (28, 38), (31, 34), (34, 31), (38, 28), (42, 26), (46, 23), (51, 22), (55, 20), (60, 20), (64, 19), (68, 20), (73, 20), (77, 22), (82, 23), (86, 26), (90, 28), (94, 31), (97, 34), (100, 38), (102, 42), (105, 46), (106, 51), (108, 55), (108, 60)];
let digits = [(116, 60), (108, 87), (88, 107), (61, 115), (34, 107), (14, 87), (6, 60), (14, 33), (34, 13), (61, 5), (88, 13), (108, 33)];
display.draw(Font6x8::render_str("3").translate(digits[0]).into_iter());
display.draw(Font6x8::render_str("4").translate(digits[1]).into_iter());
display.draw(Font6x8::render_str("5").translate(digits[2]).into_iter());
display.draw(Font6x8::render_str("6").translate(digits[3]).into_iter());
display.draw(Font6x8::render_str("7").translate(digits[4]).into_iter());
display.draw(Font6x8::render_str("8").translate(digits[5]).into_iter());
display.draw(Font6x8::render_str("9").translate(digits[6]).into_iter());
display.draw(Font6x8::render_str("10").translate(digits[7]).into_iter());
display.draw(Font6x8::render_str("11").translate(digits[8]).into_iter());
display.draw(Font6x8::render_str("12").translate(digits[9]).into_iter());
display.draw(Font6x8::render_str("1").translate(digits[10]).into_iter());
display.draw(Font6x8::render_str("2").translate(digits[11]).into_iter());
display.draw(Line::new((65, 65), values[time as usize], 1).into_iter());
display.flush_buffer();
}
fn draw_buffer(buffer: &[u8]) {
}
| {
*r.BUFFER_POS = 0;
} | conditional_block |
main.rs | #![feature(proc_macro)]
#![no_std]
extern crate cortex_m;
extern crate cortex_m_rtfm as rtfm;
extern crate stm32f30x_hal as hal;
extern crate ls010b7dh01;
extern crate rn4870;
extern crate embedded_graphics as graphics;
extern crate panic_abort;
extern crate nb;
mod display;
mod ble;
use cortex_m::asm;
use cortex_m::peripheral::syst::SystClkSource;
use rtfm::{app, Threshold};
use hal::prelude::*;
use hal::timer;
use hal::timer::Timer;
use hal::spi::Spi;
use hal::serial;
use hal::serial::Serial;
use hal::delay::Delay;
use hal::gpio::{gpiob, gpioc, Input, Output, PullUp, PushPull, AF7};
use ls010b7dh01::Ls010b7dh01;
use graphics::prelude::*;
use graphics::primitives::{Circle, Line, Rect};
use graphics::fonts::{Font, Font6x8};
use graphics::transform::Transform;
use graphics::image::Image1BPP;
app! {
device: hal::stm32f30x,
resources: {
static TOGGLE: bool = false;
static TIME: u8 = 0;
static STATE: State = State::Time;
static EXTI: hal::stm32f30x::EXTI;
static RESET_BLE: bool = true;
static REDRAW: bool = true;
static DRAW_BUFFER: [u8; 16] = [32; 16];
static BUFFER_POS: u8 = 0;
// Late Resources
static EXTCOMIN: display::Extcomin;
static DISPLAY: display::Display;
static BLE: ble::Ble;
},
tasks: {
TIM7: {
path: tick,
resources: [TOGGLE, EXTCOMIN, DISPLAY],
},
SYS_TICK: {
path: sys_tick,
resources: [TOGGLE, EXTCOMIN, DISPLAY,
TIME, BLE, RESET_BLE, STATE, REDRAW,
DRAW_BUFFER],
},
USART1_EXTI25: {
path: ble_message,
resources: [BLE, DRAW_BUFFER, BUFFER_POS],
},
EXTI9_5: {
enabled: true,
priority: 1,
path: exti9_5,
resources: [STATE, EXTI],
},
EXTI15_10: {
path: exti15_10,
resources: [STATE, EXTI],
},
},
}
pub enum State {
Ble,
Time,
Face,
}
fn init(mut p: init::Peripherals, _r: init::Resources) -> init::LateResources {
let mut rcc = p.device.RCC.constrain();
let mut flash = p.device.FLASH.constrain();
let mut gpioa = p.device.GPIOA.split(&mut rcc.ahb);
let mut gpiob = p.device.GPIOB.split(&mut rcc.ahb);
let mut gpioc = p.device.GPIOC.split(&mut rcc.ahb);
// Enable the syscfg
rcc.apb2.enr().modify(|_, w| w.syscfgen().enabled());
rcc.apb2.rstr().modify(|_, w| w.syscfgrst().set_bit());
rcc.apb2.rstr().modify(|_, w| w.syscfgrst().clear_bit());
// Enable systick
p.core.SYST.set_clock_source(SystClkSource::Core);
p.core.SYST.set_reload(16_000_000);
p.core.SYST.enable_interrupt();
p.core.SYST.enable_counter();
// Set up our clocks & timer & delay
let clocks = rcc.cfgr.freeze(&mut flash.acr);
let mut timer = Timer::tim7(p.device.TIM7, 1.hz(), clocks, &mut rcc.apb1);
//timer.listen(timer::Event::TimeOut);
let mut delay = Delay::new(p.core.SYST, clocks);
// Set up our GPIO pins
let disp_en = gpiob.pb2.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let extcomin = gpiob.pb1.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let cs = gpiob.pb0.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let mut v5_en = gpioa.pa3.into_push_pull_output(
&mut gpioa.moder,
&mut gpioa.otyper,
);
let reset_ble = gpiob.pb5.into_push_pull_output(
&mut gpiob.moder,
&mut gpiob.otyper,
);
let sck = gpioa.pa5.into_af5(&mut gpioa.moder, &mut gpioa.afrl);
let miso = gpioa.pa6.into_af5(&mut gpioa.moder, &mut gpioa.afrl);
let mosi = gpioa.pa7.into_af5(&mut gpioa.moder, &mut gpioa.afrl);
let tx = gpiob.pb6.into_af7(&mut gpiob.moder, &mut gpiob.afrl);
let rx = gpiob.pb7.into_af7(&mut gpiob.moder, &mut gpiob.afrl);
let button_1 = gpiob.pb8.into_pull_up_input(
&mut gpiob.moder,
&mut gpiob.pupdr,
);
let button_2 = gpiob.pb9.into_pull_up_input(
&mut gpiob.moder,
&mut gpiob.pupdr,
);
let button_3 = gpioc.pc13.into_pull_up_input(
&mut gpioc.moder,
&mut gpioc.pupdr,
);
// Set up our display
let mode = ls010b7dh01::MODE;
let spi = Spi::spi1(
p.device.SPI1,
(sck, miso, mosi),
mode,
1.mhz(),
clocks,
&mut rcc.apb2,
);
let mut display = Ls010b7dh01::new(spi, cs, disp_en);
// Set up our BLE
let mut serial = Serial::usart1(
p.device.USART1,
(tx, rx),
115_200.bps(),
clocks,
&mut rcc.apb2,
);
serial.listen(serial::Event::Rxne); // TODO: Serial interrupts?
let mut ble = rn4870::Rn4870::new(serial, reset_ble);
// Set the default values
v5_en.set_high();
display.enable();
// Set up syscfg to link GPIO to EXTI
p.device.SYSCFG.exticr3.modify(|_, w| unsafe {
w.bits(0x11)
/* This does not work
w.exti8().bits(0b001) // Port b
.exti9().bits(0b001) // Port b
*/
});
p.device.SYSCFG.exticr4.modify(|_, w| unsafe {
w.exti13().bits(0b010) // Port c
});
p.device.EXTI.imr1.modify(|_, w| {
w.mr8().set_bit().mr9().set_bit().mr13().set_bit()
});
p.device.EXTI.ftsr1.modify(|_, w| {
w.tr8().set_bit().tr9().set_bit().tr13().set_bit()
});
init::LateResources {
DISPLAY: display,
EXTCOMIN: extcomin,
BLE: ble,
EXTI: p.device.EXTI,
}
}
fn idle() ->! {
loop {
rtfm::wfi();
}
}
fn ble_message(_t: &mut Threshold, mut r: USART1_EXTI25::Resources) {
let res = r.BLE.read_raw();
match res {
Ok(n) => {
if n < 32 {
return
}
(*r.DRAW_BUFFER)[*r.BUFFER_POS as usize] = n;
*r.BUFFER_POS += 1;
if *r.BUFFER_POS == 16 {
*r.BUFFER_POS = 0;
}
}
Err(nb::Error::Other(_)) => {
r.BLE.handle_error(|uart| { uart.clear_overflow_error(); } );
}
Err(nb::Error::WouldBlock) => {}
}
}
fn exti9_5(_t: &mut Threshold, mut r: EXTI9_5::Resources) {
if r.EXTI.pr1.read().pr8().bit_is_set() {
r.EXTI.pr1.modify(|_, w| w.pr8().set_bit());
*r.STATE = State::Ble;
}
if r.EXTI.pr1.read().pr9().bit_is_set() {
r.EXTI.pr1.modify(|_, w| w.pr9().set_bit());
*r.STATE = State::Time;
}
}
fn exti15_10(_t: &mut Threshold, mut r: EXTI15_10::Resources) {
if r.EXTI.pr1.read().pr13().bit_is_set() {
r.EXTI.pr1.modify(|_, w| w.pr13().set_bit());
*r.STATE = State::Face;
}
}
fn | (_t: &mut Threshold, mut r: TIM7::Resources) {
}
fn sys_tick(_t: &mut Threshold, mut r: SYS_TICK::Resources) {
let toggle = *r.TOGGLE;
let extcomin = &mut *r.EXTCOMIN;
if *r.RESET_BLE {
r.BLE.hard_reset_on();
*r.RESET_BLE = false;
} else {
r.BLE.hard_reset_off();
}
match *r.STATE {
State::Ble => {
r.DISPLAY.clear();
//let s = String::from_utf8_lossy(&*r.DRAW_BUFFER);
unsafe {
let s = &*(&*r.DRAW_BUFFER as *const [u8] as *const str);
r.DISPLAY.draw(Font6x8::render_str(s).translate((5, 50)).into_iter());
r.DISPLAY.flush_buffer();
}
}
State::Time => {
*r.REDRAW = true;
draw_time(&mut *r.DISPLAY, *r.TIME);
*r.TIME += 1;
if *r.TIME == 60 {
*r.TIME = 0;
}
}
State::Face => {
if *r.REDRAW {
draw_face(&mut *r.DISPLAY);
*r.REDRAW = false;
}
}
}
// Toggle extcomin manually
if toggle {
(*extcomin).set_high();
} else {
(*extcomin).set_low();
}
*r.TOGGLE =!toggle;
}
fn draw_face(mut display: &mut display::Display) {
display.clear();
let bpp = Image1BPP::new(include_bytes!("../data/face_1bpp_neg.raw"), 120, 120)
.translate((0, 0));
display.draw(bpp.into_iter());
display.flush_buffer();
}
fn draw_time(mut display: &mut display::Display, time: u8) {
display.clear();
/*
let values = [
(125, 65), (124, 71), (123, 77), (122, 83), (119, 89),
(116, 94), (113, 100), (109, 105), (105, 109), (100, 113),
(95, 116), (89, 119), (83, 122), (77, 123), (71, 124),
(65, 125), (59, 124), (53, 123), (47, 122), (41, 119),
(36, 116), (30, 113), (25, 109), (21, 105), (17, 100),
(14, 95), (11, 89), (8, 83), (7, 77), (6, 71),
(5, 65), (6, 59), (7, 53), (8, 47), (11, 41),
(14, 36), (17, 30), (21, 25), (25, 21), (30, 17),
(35, 14), (41, 11), (47, 8), (53, 7), (59, 6),
(65, 5), (71, 6), (77, 7), (83, 8), (89, 11),
(94, 14), (100, 17), (105, 21), (109, 25), (113, 30),
(116, 35), (119, 41), (122, 47), (123, 53), (124, 59),
];
*/
let values =[(109, 64), (108, 68), (108, 73), (106, 77), (105, 82), (102, 86), (100, 90), (97, 94), (94, 97), (90, 100), (86, 102), (82, 105), (77, 106), (73, 108), (68, 108), (64, 109), (60, 108), (55, 108), (51, 106), (46, 105), (42, 102), (38, 100), (34, 97), (31, 94), (28, 90), (26, 86), (23, 82), (22, 77), (20, 73), (20, 68), (19, 64), (20, 60), (20, 55), (22, 51), (23, 46), (26, 42), (28, 38), (31, 34), (34, 31), (38, 28), (42, 26), (46, 23), (51, 22), (55, 20), (60, 20), (64, 19), (68, 20), (73, 20), (77, 22), (82, 23), (86, 26), (90, 28), (94, 31), (97, 34), (100, 38), (102, 42), (105, 46), (106, 51), (108, 55), (108, 60)];
let digits = [(116, 60), (108, 87), (88, 107), (61, 115), (34, 107), (14, 87), (6, 60), (14, 33), (34, 13), (61, 5), (88, 13), (108, 33)];
display.draw(Font6x8::render_str("3").translate(digits[0]).into_iter());
display.draw(Font6x8::render_str("4").translate(digits[1]).into_iter());
display.draw(Font6x8::render_str("5").translate(digits[2]).into_iter());
display.draw(Font6x8::render_str("6").translate(digits[3]).into_iter());
display.draw(Font6x8::render_str("7").translate(digits[4]).into_iter());
display.draw(Font6x8::render_str("8").translate(digits[5]).into_iter());
display.draw(Font6x8::render_str("9").translate(digits[6]).into_iter());
display.draw(Font6x8::render_str("10").translate(digits[7]).into_iter());
display.draw(Font6x8::render_str("11").translate(digits[8]).into_iter());
display.draw(Font6x8::render_str("12").translate(digits[9]).into_iter());
display.draw(Font6x8::render_str("1").translate(digits[10]).into_iter());
display.draw(Font6x8::render_str("2").translate(digits[11]).into_iter());
display.draw(Line::new((65, 65), values[time as usize], 1).into_iter());
display.flush_buffer();
}
fn draw_buffer(buffer: &[u8]) {
}
| tick | identifier_name |
conv2d_compact.rs | use super::{ConstraintSystem, Scalar, MemoryManager, Memory, TensorAddress, SCALAR_SIZE, BigScalar, RangeFull, Range, RangeFrom, RangeTo, Id, min, Functions, ActivationFunction};
use crate::scalar::power_of_two;
impl ConstraintSystem {
pub fn run_conv2d_compact<T: Scalar>(mem: &MemoryManager, param: &[u32], var_dict: &mut Memory<T>) {
let (mul_result, k_col, packed_size,bit_length,extracted) = (param[0], param[1], param[2], param[3], param[4]);
let (fout, row_out, col_packed) = (mem[mul_result].dim[0],mem[mul_result].dim[1],mem[mul_result].dim[2]);
let row_dim = mem[extracted].dim[2];
let offset = power_of_two::<T>(bit_length - 1);
let mut big_offset = T::zero();
for _ in 0..packed_size + k_col - 1 {
big_offset = (big_offset * T::from_i32(2) + T::one()) * offset;
}
let n_packed = packed_size + k_col - 1;
for layer_out in 0..fout {
//matching result
for r in 0..row_out {
for c in 0..col_packed {
let val = (var_dict[mem[mul_result].at_idx(&[layer_out, r, c]) as usize] + big_offset).to_bytes();
let mut ext = Vec::new();
ext.resize((packed_size + k_col - 1) as usize, T::zero());
for k in 0..(packed_size + k_col - 1) * bit_length {
ext[(k / bit_length) as usize] += T::from_i32(((val[(k/8) as usize] >> (k % 8)) & 1) as i32) * power_of_two(k % bit_length);
}
for k in 0..packed_size + k_col - 1 {
let idx = c * n_packed + k;
if idx >= row_dim {
break
}
var_dict[mem[extracted].at_idx(&[layer_out,r,idx]) as usize] = ext[k as usize] - offset;
}
}
}
}
}
pub fn conv2d_compact(&mut self, input: TensorAddress, output: TensorAddress, weight_rev: TensorAddress, bias: Option<(TensorAddress, u32)>, bit_length: u8, act: ActivationFunction) {
// packing weight
let dim = &self.mem[weight_rev].dim;
let (fout, fin, k_row, k_col) = (dim[0], dim[1], dim[2], dim[3]);
let packed_weight = self.mem.alloc(&[fout, fin, k_row]);
assert!(k_col * (bit_length as u32) <= SCALAR_SIZE);
self.packing_tensor(weight_rev, packed_weight, bit_length, k_col as u8,1, BigScalar::one(), true);
let (row, col) = (self.mem[input].dim[1], self.mem[input].dim[2]);
let packed_size = min((SCALAR_SIZE / (bit_length as u32)).checked_sub(k_col).unwrap(),col);
let col_packed = (col-1)/packed_size + 1;
let packed_layer = self.mem.alloc(&[fin, row, col_packed]);
// packing row of inputs
self.packing_tensor_by_dim(input,&[-1], packed_layer, bit_length, packed_size as u8,1,BigScalar::one(), true);
// splicing output by row
let mut mul_input = Vec::new();
for r in 0..row - k_row + 1 { | }
mul_input.push(mul_input_row);
}
//packing bias
let mut packed_bias: Vec<Vec<TensorAddress>> = Vec::with_capacity(fout as usize);
let mut bias_dim = 0;
let mut bias_scale = 0;
if let Some((b, scale)) = bias {
bias_dim = (col - k_col)/packed_size + 1;
bias_scale = scale;
for layer_out in 0..fout {
let mut packed_bias_row: Vec<TensorAddress> = Vec::with_capacity(((row - k_row)/scale + 1) as usize);
for r in 0..(row - k_row)/scale + 1 {
let packed_bias = self.mem.alloc(&[bias_dim]);
let bias_row = self.mem.save(self.mem[b].at_(&[layer_out, r]));
self.packing_tensor(bias_row, packed_bias, bit_length, packed_size as u8, scale,power_of_two(bit_length as u32 * (k_col - 1)), true);
packed_bias_row.push(packed_bias);
}
packed_bias.push(packed_bias_row);
}
}
let mul_result = self.mem.alloc(&[fout, row - k_row + 1, col_packed]);
for layer_out in 0..fout {
let packed_weight = self.mem.save(self.mem[packed_weight].at_(&[layer_out]));
for r in 0..row - k_row + 1 {
for c in 0..col_packed {
let cur_bias = if c < bias_dim {Some(self.mem[packed_bias[layer_out as usize][(r/bias_scale) as usize]].at_idx(&[c]))} else {None};
self.dot(mul_input[r as usize][c as usize], packed_weight, self.mem[mul_result].at_idx(&[layer_out, r, c]), cur_bias);
}
}
}
// sign extraction
let n_packed = packed_size + k_col - 1;
let extracted_length = (col_packed - 1) * n_packed + ((col-1) % packed_size) + k_col;
let extracted = self.mem.alloc(&[fout, row - k_row + 1, extracted_length]);
self.packing_tensor_by_dim(extracted,&[-1], mul_result, bit_length, n_packed as u8,1,BigScalar::one(), false);
let params = vec![mul_result, k_col, packed_size, bit_length as u32, extracted];
self.compute.push((params.into_boxed_slice(), Functions::ConvCompact));
fn split_tensor<const N:usize>(mem: &mut MemoryManager,tensor: TensorAddress, length: u32, pos: [u32; N]) -> [(Option<TensorAddress>, Option<TensorAddress>); N] {
let fully_packed = mem[tensor].dim[2]/length;
let remainder = mem[tensor].dim[2] % length;
// should not save this
let tmp=mem[tensor].partition(2, length);
let mut res: [(Option<TensorAddress>, Option<TensorAddress>); N] = [(None, None); N];
for i in 0..N - 1 {
if pos[i] == pos[i+1] {
res[i] = (None, None);
continue;
}
let n= fully_packed + if remainder >= pos[i+1] {1} else {0};
let full = if n > 0 {
Some(mem.save(tmp.at(&[RangeFull(), RangeFull(), RangeTo(..n), Range(pos[i]..pos[i+1])])))
} else {
None
};
let rem = if pos[i] < remainder && remainder < pos[i+1] {
Some(mem.save(tmp.at(&[RangeFull(), RangeFull(), Id(n), Range(pos[i]..remainder)])))
} else {
None
};
res[i] = (full, rem);
}
res
}
fn extract_sign_part(c: &mut ConstraintSystem, extracted: TensorAddress, bit_length: u8) {
let output = c.mem.alloc(&c.mem[extracted].dim.to_owned());
c.sign(extracted, output, bit_length - 1);
}
let reduced_extract = self.mem.save(self.mem[extracted].at(&[RangeFull(), RangeFull(), RangeTo(..extracted_length - k_col + 1)]));
if k_col!= 1 {
let rem_extract = self.mem.save(self.mem[extracted].at(&[RangeFull(), RangeFull(), RangeFrom(extracted_length - k_col + 1..)]));
extract_sign_part(self, rem_extract, bit_length);
}
let [(output_full, output_full_rem), (output_part, output_part_rem), (_,_)]= split_tensor(&mut self.mem, output, packed_size, [0, packed_size-(k_col-1), packed_size]);
let [(ext_left, ext_left_rem), (ext_full, ext_full_rem), (ext_right,ext_right_rem), (_,_)]= split_tensor(&mut self.mem, reduced_extract, n_packed, [0, k_col-1, packed_size, n_packed]);
// extract the fully correct part
if let Some(e) = ext_full {
self.activation(e, output_full.unwrap(), bit_length - 1, act);
}
if let Some(e) = ext_full_rem {
self.activation(e, output_full_rem.unwrap(), bit_length - 1, act);
}
//extract left and right sign part
if let Some(e) = ext_left {
extract_sign_part(self,e, bit_length);
}
if let Some(e) = ext_left_rem {
extract_sign_part(self,e, bit_length);
}
if let Some(e) = ext_right {
extract_sign_part(self,e, bit_length);
}
assert_eq!(ext_right_rem, None);
if let Some(left_rem) = ext_left_rem {
if let Some(right) = ext_right {
let sum_res = self.mem.alloc(&[fout, row - k_row + 1, self.mem[right].dim[2] - 1, k_col - 1]);
let left = self.mem.save(self.mem[ext_left.unwrap()].at(&[RangeFull(), RangeFrom(1..)]));
self.sum_two(right, left, sum_res);
self.activation(sum_res, output_part.unwrap(), bit_length - 1, act);
let sum_res = self.mem.alloc(&[fout, row - k_row + 1, self.mem[left_rem].dim[2]]);
let right_rem = self.mem.save(self.mem[right].at(&[RangeFull(), Id(self.mem[right].dim[2] - 1), RangeTo(..self.mem[left_rem].dim[2])]));
self.sum_two(right_rem, left_rem, sum_res);
self.activation(sum_res, output_part_rem.unwrap(), bit_length - 1, act);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::scalar::slice_to_scalar;
#[test]
fn conv2d_compact_test() {
let mut x = ConstraintSystem::new();
let input = x.mem.alloc(&[2,5,5]);
let weight = x.mem.alloc(&[2,2,3,3]);
let output = x.mem.alloc(&[2,3,3]);
let bias = x.mem.alloc(&[2,3,3]);
let weight_rev = x.mem.save(x.mem[weight].reverse(3));
x.conv2d_compact(input, output, weight_rev, Some((bias, 1)), 7, ActivationFunction::Sign);
let mut mem: Vec<BigScalar> = slice_to_scalar(&[1,0,1,-1,0,0,0,-2,4,-1,-4,0,3,-4,0,0,0,1,-1,1,-4,2,3,-1,0,-4,2,2,-3,-1,-1,1,2,-1,1,4,4,2,3,-3,0,3,-2,3,0,2,3,3,-2,2,4,3,3,-4,-4,-1,3,1,4,-2,-2,0,-2,4,-3,0,0,0,-2,0,0,0,0,3,4,-3,-4,-1,-1,-4,3,1,-2,0,0,0,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-3,0,-3,0,1,-4,-1,2,0,0,-4,2,1,3,2,-3,4,-3]);
mem.resize(x.mem.n_var as usize, Scalar::zero());
x.compute(&mut mem);
assert_eq!(mem[87..87+18], slice_to_scalar(&[1,1,-1,-1,-1,1,1,-1,-1,-1,1,-1,-1,-1,1,-1,-1,-1]));
x.sort_cons();
assert!(x.verify(&mem));
}
#[test]
fn conv2d_compact_test_small() {
let mut x = ConstraintSystem::new();
let input = x.mem.alloc(&[1,4,3]);
let weight = x.mem.alloc(&[1,1,3,3]);
let output = x.mem.alloc(&[1,2,1]);
let weight_rev = x.mem.save(x.mem[weight].reverse(3));
x.conv2d_compact(input, output, weight_rev, None, 5,ActivationFunction::Sign);
let mut mem = x.mem.new_memory::<BigScalar>();
x.load_memory(input, &mut mem, &slice_to_scalar(&[1,1,2, 1,2,1, 1,1,1, 1,2,1]));
x.load_memory(weight, &mut mem, &slice_to_scalar(&[1,1,-1, 1,-1,1, 1,1,1]));
x.compute(&mut mem);
assert_eq!(mem[x.mem[output].begin() as usize..x.mem[output].end() as usize], slice_to_scalar(&[1,1]));
x.sort_cons();
assert!(x.verify(&mem));
}
} | let mut mul_input_row = Vec::new();
for c in 0..col_packed {
mul_input_row.push(self.mem.save(self.mem[packed_layer].at(&[RangeFull(), Range(r..r + k_row), Id(c)]))); | random_line_split |
conv2d_compact.rs | use super::{ConstraintSystem, Scalar, MemoryManager, Memory, TensorAddress, SCALAR_SIZE, BigScalar, RangeFull, Range, RangeFrom, RangeTo, Id, min, Functions, ActivationFunction};
use crate::scalar::power_of_two;
impl ConstraintSystem {
pub fn run_conv2d_compact<T: Scalar>(mem: &MemoryManager, param: &[u32], var_dict: &mut Memory<T>) {
let (mul_result, k_col, packed_size,bit_length,extracted) = (param[0], param[1], param[2], param[3], param[4]);
let (fout, row_out, col_packed) = (mem[mul_result].dim[0],mem[mul_result].dim[1],mem[mul_result].dim[2]);
let row_dim = mem[extracted].dim[2];
let offset = power_of_two::<T>(bit_length - 1);
let mut big_offset = T::zero();
for _ in 0..packed_size + k_col - 1 {
big_offset = (big_offset * T::from_i32(2) + T::one()) * offset;
}
let n_packed = packed_size + k_col - 1;
for layer_out in 0..fout {
//matching result
for r in 0..row_out {
for c in 0..col_packed {
let val = (var_dict[mem[mul_result].at_idx(&[layer_out, r, c]) as usize] + big_offset).to_bytes();
let mut ext = Vec::new();
ext.resize((packed_size + k_col - 1) as usize, T::zero());
for k in 0..(packed_size + k_col - 1) * bit_length {
ext[(k / bit_length) as usize] += T::from_i32(((val[(k/8) as usize] >> (k % 8)) & 1) as i32) * power_of_two(k % bit_length);
}
for k in 0..packed_size + k_col - 1 {
let idx = c * n_packed + k;
if idx >= row_dim {
break
}
var_dict[mem[extracted].at_idx(&[layer_out,r,idx]) as usize] = ext[k as usize] - offset;
}
}
}
}
}
pub fn conv2d_compact(&mut self, input: TensorAddress, output: TensorAddress, weight_rev: TensorAddress, bias: Option<(TensorAddress, u32)>, bit_length: u8, act: ActivationFunction) | for r in 0..row - k_row + 1 {
let mut mul_input_row = Vec::new();
for c in 0..col_packed {
mul_input_row.push(self.mem.save(self.mem[packed_layer].at(&[RangeFull(), Range(r..r + k_row), Id(c)])));
}
mul_input.push(mul_input_row);
}
//packing bias
let mut packed_bias: Vec<Vec<TensorAddress>> = Vec::with_capacity(fout as usize);
let mut bias_dim = 0;
let mut bias_scale = 0;
if let Some((b, scale)) = bias {
bias_dim = (col - k_col)/packed_size + 1;
bias_scale = scale;
for layer_out in 0..fout {
let mut packed_bias_row: Vec<TensorAddress> = Vec::with_capacity(((row - k_row)/scale + 1) as usize);
for r in 0..(row - k_row)/scale + 1 {
let packed_bias = self.mem.alloc(&[bias_dim]);
let bias_row = self.mem.save(self.mem[b].at_(&[layer_out, r]));
self.packing_tensor(bias_row, packed_bias, bit_length, packed_size as u8, scale,power_of_two(bit_length as u32 * (k_col - 1)), true);
packed_bias_row.push(packed_bias);
}
packed_bias.push(packed_bias_row);
}
}
let mul_result = self.mem.alloc(&[fout, row - k_row + 1, col_packed]);
for layer_out in 0..fout {
let packed_weight = self.mem.save(self.mem[packed_weight].at_(&[layer_out]));
for r in 0..row - k_row + 1 {
for c in 0..col_packed {
let cur_bias = if c < bias_dim {Some(self.mem[packed_bias[layer_out as usize][(r/bias_scale) as usize]].at_idx(&[c]))} else {None};
self.dot(mul_input[r as usize][c as usize], packed_weight, self.mem[mul_result].at_idx(&[layer_out, r, c]), cur_bias);
}
}
}
// sign extraction
let n_packed = packed_size + k_col - 1;
let extracted_length = (col_packed - 1) * n_packed + ((col-1) % packed_size) + k_col;
let extracted = self.mem.alloc(&[fout, row - k_row + 1, extracted_length]);
self.packing_tensor_by_dim(extracted,&[-1], mul_result, bit_length, n_packed as u8,1,BigScalar::one(), false);
let params = vec![mul_result, k_col, packed_size, bit_length as u32, extracted];
self.compute.push((params.into_boxed_slice(), Functions::ConvCompact));
fn split_tensor<const N:usize>(mem: &mut MemoryManager,tensor: TensorAddress, length: u32, pos: [u32; N]) -> [(Option<TensorAddress>, Option<TensorAddress>); N] {
let fully_packed = mem[tensor].dim[2]/length;
let remainder = mem[tensor].dim[2] % length;
// should not save this
let tmp=mem[tensor].partition(2, length);
let mut res: [(Option<TensorAddress>, Option<TensorAddress>); N] = [(None, None); N];
for i in 0..N - 1 {
if pos[i] == pos[i+1] {
res[i] = (None, None);
continue;
}
let n= fully_packed + if remainder >= pos[i+1] {1} else {0};
let full = if n > 0 {
Some(mem.save(tmp.at(&[RangeFull(), RangeFull(), RangeTo(..n), Range(pos[i]..pos[i+1])])))
} else {
None
};
let rem = if pos[i] < remainder && remainder < pos[i+1] {
Some(mem.save(tmp.at(&[RangeFull(), RangeFull(), Id(n), Range(pos[i]..remainder)])))
} else {
None
};
res[i] = (full, rem);
}
res
}
fn extract_sign_part(c: &mut ConstraintSystem, extracted: TensorAddress, bit_length: u8) {
let output = c.mem.alloc(&c.mem[extracted].dim.to_owned());
c.sign(extracted, output, bit_length - 1);
}
let reduced_extract = self.mem.save(self.mem[extracted].at(&[RangeFull(), RangeFull(), RangeTo(..extracted_length - k_col + 1)]));
if k_col!= 1 {
let rem_extract = self.mem.save(self.mem[extracted].at(&[RangeFull(), RangeFull(), RangeFrom(extracted_length - k_col + 1..)]));
extract_sign_part(self, rem_extract, bit_length);
}
let [(output_full, output_full_rem), (output_part, output_part_rem), (_,_)]= split_tensor(&mut self.mem, output, packed_size, [0, packed_size-(k_col-1), packed_size]);
let [(ext_left, ext_left_rem), (ext_full, ext_full_rem), (ext_right,ext_right_rem), (_,_)]= split_tensor(&mut self.mem, reduced_extract, n_packed, [0, k_col-1, packed_size, n_packed]);
// extract the fully correct part
if let Some(e) = ext_full {
self.activation(e, output_full.unwrap(), bit_length - 1, act);
}
if let Some(e) = ext_full_rem {
self.activation(e, output_full_rem.unwrap(), bit_length - 1, act);
}
//extract left and right sign part
if let Some(e) = ext_left {
extract_sign_part(self,e, bit_length);
}
if let Some(e) = ext_left_rem {
extract_sign_part(self,e, bit_length);
}
if let Some(e) = ext_right {
extract_sign_part(self,e, bit_length);
}
assert_eq!(ext_right_rem, None);
if let Some(left_rem) = ext_left_rem {
if let Some(right) = ext_right {
let sum_res = self.mem.alloc(&[fout, row - k_row + 1, self.mem[right].dim[2] - 1, k_col - 1]);
let left = self.mem.save(self.mem[ext_left.unwrap()].at(&[RangeFull(), RangeFrom(1..)]));
self.sum_two(right, left, sum_res);
self.activation(sum_res, output_part.unwrap(), bit_length - 1, act);
let sum_res = self.mem.alloc(&[fout, row - k_row + 1, self.mem[left_rem].dim[2]]);
let right_rem = self.mem.save(self.mem[right].at(&[RangeFull(), Id(self.mem[right].dim[2] - 1), RangeTo(..self.mem[left_rem].dim[2])]));
self.sum_two(right_rem, left_rem, sum_res);
self.activation(sum_res, output_part_rem.unwrap(), bit_length - 1, act);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::scalar::slice_to_scalar;
#[test]
fn conv2d_compact_test() {
let mut x = ConstraintSystem::new();
let input = x.mem.alloc(&[2,5,5]);
let weight = x.mem.alloc(&[2,2,3,3]);
let output = x.mem.alloc(&[2,3,3]);
let bias = x.mem.alloc(&[2,3,3]);
let weight_rev = x.mem.save(x.mem[weight].reverse(3));
x.conv2d_compact(input, output, weight_rev, Some((bias, 1)), 7, ActivationFunction::Sign);
let mut mem: Vec<BigScalar> = slice_to_scalar(&[1,0,1,-1,0,0,0,-2,4,-1,-4,0,3,-4,0,0,0,1,-1,1,-4,2,3,-1,0,-4,2,2,-3,-1,-1,1,2,-1,1,4,4,2,3,-3,0,3,-2,3,0,2,3,3,-2,2,4,3,3,-4,-4,-1,3,1,4,-2,-2,0,-2,4,-3,0,0,0,-2,0,0,0,0,3,4,-3,-4,-1,-1,-4,3,1,-2,0,0,0,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-3,0,-3,0,1,-4,-1,2,0,0,-4,2,1,3,2,-3,4,-3]);
mem.resize(x.mem.n_var as usize, Scalar::zero());
x.compute(&mut mem);
assert_eq!(mem[87..87+18], slice_to_scalar(&[1,1,-1,-1,-1,1,1,-1,-1,-1,1,-1,-1,-1,1,-1,-1,-1]));
x.sort_cons();
assert!(x.verify(&mem));
}
#[test]
fn conv2d_compact_test_small() {
let mut x = ConstraintSystem::new();
let input = x.mem.alloc(&[1,4,3]);
let weight = x.mem.alloc(&[1,1,3,3]);
let output = x.mem.alloc(&[1,2,1]);
let weight_rev = x.mem.save(x.mem[weight].reverse(3));
x.conv2d_compact(input, output, weight_rev, None, 5,ActivationFunction::Sign);
let mut mem = x.mem.new_memory::<BigScalar>();
x.load_memory(input, &mut mem, &slice_to_scalar(&[1,1,2, 1,2,1, 1,1,1, 1,2,1]));
x.load_memory(weight, &mut mem, &slice_to_scalar(&[1,1,-1, 1,-1,1, 1,1,1]));
x.compute(&mut mem);
assert_eq!(mem[x.mem[output].begin() as usize..x.mem[output].end() as usize], slice_to_scalar(&[1,1]));
x.sort_cons();
assert!(x.verify(&mem));
}
} | {
// packing weight
let dim = &self.mem[weight_rev].dim;
let (fout, fin, k_row, k_col) = (dim[0], dim[1], dim[2], dim[3]);
let packed_weight = self.mem.alloc(&[fout, fin, k_row]);
assert!(k_col * (bit_length as u32) <= SCALAR_SIZE);
self.packing_tensor(weight_rev, packed_weight, bit_length, k_col as u8,1, BigScalar::one(), true);
let (row, col) = (self.mem[input].dim[1], self.mem[input].dim[2]);
let packed_size = min((SCALAR_SIZE / (bit_length as u32)).checked_sub(k_col).unwrap(),col);
let col_packed = (col-1)/packed_size + 1;
let packed_layer = self.mem.alloc(&[fin, row, col_packed]);
// packing row of inputs
self.packing_tensor_by_dim(input,&[-1], packed_layer, bit_length, packed_size as u8,1,BigScalar::one(), true);
// splicing output by row
let mut mul_input = Vec::new(); | identifier_body |
conv2d_compact.rs | use super::{ConstraintSystem, Scalar, MemoryManager, Memory, TensorAddress, SCALAR_SIZE, BigScalar, RangeFull, Range, RangeFrom, RangeTo, Id, min, Functions, ActivationFunction};
use crate::scalar::power_of_two;
impl ConstraintSystem {
pub fn run_conv2d_compact<T: Scalar>(mem: &MemoryManager, param: &[u32], var_dict: &mut Memory<T>) {
let (mul_result, k_col, packed_size,bit_length,extracted) = (param[0], param[1], param[2], param[3], param[4]);
let (fout, row_out, col_packed) = (mem[mul_result].dim[0],mem[mul_result].dim[1],mem[mul_result].dim[2]);
let row_dim = mem[extracted].dim[2];
let offset = power_of_two::<T>(bit_length - 1);
let mut big_offset = T::zero();
for _ in 0..packed_size + k_col - 1 {
big_offset = (big_offset * T::from_i32(2) + T::one()) * offset;
}
let n_packed = packed_size + k_col - 1;
for layer_out in 0..fout {
//matching result
for r in 0..row_out {
for c in 0..col_packed {
let val = (var_dict[mem[mul_result].at_idx(&[layer_out, r, c]) as usize] + big_offset).to_bytes();
let mut ext = Vec::new();
ext.resize((packed_size + k_col - 1) as usize, T::zero());
for k in 0..(packed_size + k_col - 1) * bit_length {
ext[(k / bit_length) as usize] += T::from_i32(((val[(k/8) as usize] >> (k % 8)) & 1) as i32) * power_of_two(k % bit_length);
}
for k in 0..packed_size + k_col - 1 {
let idx = c * n_packed + k;
if idx >= row_dim {
break
}
var_dict[mem[extracted].at_idx(&[layer_out,r,idx]) as usize] = ext[k as usize] - offset;
}
}
}
}
}
pub fn conv2d_compact(&mut self, input: TensorAddress, output: TensorAddress, weight_rev: TensorAddress, bias: Option<(TensorAddress, u32)>, bit_length: u8, act: ActivationFunction) {
// packing weight
let dim = &self.mem[weight_rev].dim;
let (fout, fin, k_row, k_col) = (dim[0], dim[1], dim[2], dim[3]);
let packed_weight = self.mem.alloc(&[fout, fin, k_row]);
assert!(k_col * (bit_length as u32) <= SCALAR_SIZE);
self.packing_tensor(weight_rev, packed_weight, bit_length, k_col as u8,1, BigScalar::one(), true);
let (row, col) = (self.mem[input].dim[1], self.mem[input].dim[2]);
let packed_size = min((SCALAR_SIZE / (bit_length as u32)).checked_sub(k_col).unwrap(),col);
let col_packed = (col-1)/packed_size + 1;
let packed_layer = self.mem.alloc(&[fin, row, col_packed]);
// packing row of inputs
self.packing_tensor_by_dim(input,&[-1], packed_layer, bit_length, packed_size as u8,1,BigScalar::one(), true);
// splicing output by row
let mut mul_input = Vec::new();
for r in 0..row - k_row + 1 {
let mut mul_input_row = Vec::new();
for c in 0..col_packed {
mul_input_row.push(self.mem.save(self.mem[packed_layer].at(&[RangeFull(), Range(r..r + k_row), Id(c)])));
}
mul_input.push(mul_input_row);
}
//packing bias
let mut packed_bias: Vec<Vec<TensorAddress>> = Vec::with_capacity(fout as usize);
let mut bias_dim = 0;
let mut bias_scale = 0;
if let Some((b, scale)) = bias {
bias_dim = (col - k_col)/packed_size + 1;
bias_scale = scale;
for layer_out in 0..fout {
let mut packed_bias_row: Vec<TensorAddress> = Vec::with_capacity(((row - k_row)/scale + 1) as usize);
for r in 0..(row - k_row)/scale + 1 {
let packed_bias = self.mem.alloc(&[bias_dim]);
let bias_row = self.mem.save(self.mem[b].at_(&[layer_out, r]));
self.packing_tensor(bias_row, packed_bias, bit_length, packed_size as u8, scale,power_of_two(bit_length as u32 * (k_col - 1)), true);
packed_bias_row.push(packed_bias);
}
packed_bias.push(packed_bias_row);
}
}
let mul_result = self.mem.alloc(&[fout, row - k_row + 1, col_packed]);
for layer_out in 0..fout {
let packed_weight = self.mem.save(self.mem[packed_weight].at_(&[layer_out]));
for r in 0..row - k_row + 1 {
for c in 0..col_packed {
let cur_bias = if c < bias_dim {Some(self.mem[packed_bias[layer_out as usize][(r/bias_scale) as usize]].at_idx(&[c]))} else {None};
self.dot(mul_input[r as usize][c as usize], packed_weight, self.mem[mul_result].at_idx(&[layer_out, r, c]), cur_bias);
}
}
}
// sign extraction
let n_packed = packed_size + k_col - 1;
let extracted_length = (col_packed - 1) * n_packed + ((col-1) % packed_size) + k_col;
let extracted = self.mem.alloc(&[fout, row - k_row + 1, extracted_length]);
self.packing_tensor_by_dim(extracted,&[-1], mul_result, bit_length, n_packed as u8,1,BigScalar::one(), false);
let params = vec![mul_result, k_col, packed_size, bit_length as u32, extracted];
self.compute.push((params.into_boxed_slice(), Functions::ConvCompact));
fn split_tensor<const N:usize>(mem: &mut MemoryManager,tensor: TensorAddress, length: u32, pos: [u32; N]) -> [(Option<TensorAddress>, Option<TensorAddress>); N] {
let fully_packed = mem[tensor].dim[2]/length;
let remainder = mem[tensor].dim[2] % length;
// should not save this
let tmp=mem[tensor].partition(2, length);
let mut res: [(Option<TensorAddress>, Option<TensorAddress>); N] = [(None, None); N];
for i in 0..N - 1 {
if pos[i] == pos[i+1] {
res[i] = (None, None);
continue;
}
let n= fully_packed + if remainder >= pos[i+1] {1} else {0};
let full = if n > 0 {
Some(mem.save(tmp.at(&[RangeFull(), RangeFull(), RangeTo(..n), Range(pos[i]..pos[i+1])])))
} else {
None
};
let rem = if pos[i] < remainder && remainder < pos[i+1] {
Some(mem.save(tmp.at(&[RangeFull(), RangeFull(), Id(n), Range(pos[i]..remainder)])))
} else {
None
};
res[i] = (full, rem);
}
res
}
fn extract_sign_part(c: &mut ConstraintSystem, extracted: TensorAddress, bit_length: u8) {
let output = c.mem.alloc(&c.mem[extracted].dim.to_owned());
c.sign(extracted, output, bit_length - 1);
}
let reduced_extract = self.mem.save(self.mem[extracted].at(&[RangeFull(), RangeFull(), RangeTo(..extracted_length - k_col + 1)]));
if k_col!= 1 {
let rem_extract = self.mem.save(self.mem[extracted].at(&[RangeFull(), RangeFull(), RangeFrom(extracted_length - k_col + 1..)]));
extract_sign_part(self, rem_extract, bit_length);
}
let [(output_full, output_full_rem), (output_part, output_part_rem), (_,_)]= split_tensor(&mut self.mem, output, packed_size, [0, packed_size-(k_col-1), packed_size]);
let [(ext_left, ext_left_rem), (ext_full, ext_full_rem), (ext_right,ext_right_rem), (_,_)]= split_tensor(&mut self.mem, reduced_extract, n_packed, [0, k_col-1, packed_size, n_packed]);
// extract the fully correct part
if let Some(e) = ext_full {
self.activation(e, output_full.unwrap(), bit_length - 1, act);
}
if let Some(e) = ext_full_rem {
self.activation(e, output_full_rem.unwrap(), bit_length - 1, act);
}
//extract left and right sign part
if let Some(e) = ext_left {
extract_sign_part(self,e, bit_length);
}
if let Some(e) = ext_left_rem {
extract_sign_part(self,e, bit_length);
}
if let Some(e) = ext_right {
extract_sign_part(self,e, bit_length);
}
assert_eq!(ext_right_rem, None);
if let Some(left_rem) = ext_left_rem {
if let Some(right) = ext_right {
let sum_res = self.mem.alloc(&[fout, row - k_row + 1, self.mem[right].dim[2] - 1, k_col - 1]);
let left = self.mem.save(self.mem[ext_left.unwrap()].at(&[RangeFull(), RangeFrom(1..)]));
self.sum_two(right, left, sum_res);
self.activation(sum_res, output_part.unwrap(), bit_length - 1, act);
let sum_res = self.mem.alloc(&[fout, row - k_row + 1, self.mem[left_rem].dim[2]]);
let right_rem = self.mem.save(self.mem[right].at(&[RangeFull(), Id(self.mem[right].dim[2] - 1), RangeTo(..self.mem[left_rem].dim[2])]));
self.sum_two(right_rem, left_rem, sum_res);
self.activation(sum_res, output_part_rem.unwrap(), bit_length - 1, act);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::scalar::slice_to_scalar;
#[test]
fn conv2d_compact_test() {
let mut x = ConstraintSystem::new();
let input = x.mem.alloc(&[2,5,5]);
let weight = x.mem.alloc(&[2,2,3,3]);
let output = x.mem.alloc(&[2,3,3]);
let bias = x.mem.alloc(&[2,3,3]);
let weight_rev = x.mem.save(x.mem[weight].reverse(3));
x.conv2d_compact(input, output, weight_rev, Some((bias, 1)), 7, ActivationFunction::Sign);
let mut mem: Vec<BigScalar> = slice_to_scalar(&[1,0,1,-1,0,0,0,-2,4,-1,-4,0,3,-4,0,0,0,1,-1,1,-4,2,3,-1,0,-4,2,2,-3,-1,-1,1,2,-1,1,4,4,2,3,-3,0,3,-2,3,0,2,3,3,-2,2,4,3,3,-4,-4,-1,3,1,4,-2,-2,0,-2,4,-3,0,0,0,-2,0,0,0,0,3,4,-3,-4,-1,-1,-4,3,1,-2,0,0,0,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-3,0,-3,0,1,-4,-1,2,0,0,-4,2,1,3,2,-3,4,-3]);
mem.resize(x.mem.n_var as usize, Scalar::zero());
x.compute(&mut mem);
assert_eq!(mem[87..87+18], slice_to_scalar(&[1,1,-1,-1,-1,1,1,-1,-1,-1,1,-1,-1,-1,1,-1,-1,-1]));
x.sort_cons();
assert!(x.verify(&mem));
}
#[test]
fn | () {
let mut x = ConstraintSystem::new();
let input = x.mem.alloc(&[1,4,3]);
let weight = x.mem.alloc(&[1,1,3,3]);
let output = x.mem.alloc(&[1,2,1]);
let weight_rev = x.mem.save(x.mem[weight].reverse(3));
x.conv2d_compact(input, output, weight_rev, None, 5,ActivationFunction::Sign);
let mut mem = x.mem.new_memory::<BigScalar>();
x.load_memory(input, &mut mem, &slice_to_scalar(&[1,1,2, 1,2,1, 1,1,1, 1,2,1]));
x.load_memory(weight, &mut mem, &slice_to_scalar(&[1,1,-1, 1,-1,1, 1,1,1]));
x.compute(&mut mem);
assert_eq!(mem[x.mem[output].begin() as usize..x.mem[output].end() as usize], slice_to_scalar(&[1,1]));
x.sort_cons();
assert!(x.verify(&mem));
}
} | conv2d_compact_test_small | identifier_name |
conv2d_compact.rs | use super::{ConstraintSystem, Scalar, MemoryManager, Memory, TensorAddress, SCALAR_SIZE, BigScalar, RangeFull, Range, RangeFrom, RangeTo, Id, min, Functions, ActivationFunction};
use crate::scalar::power_of_two;
impl ConstraintSystem {
pub fn run_conv2d_compact<T: Scalar>(mem: &MemoryManager, param: &[u32], var_dict: &mut Memory<T>) {
let (mul_result, k_col, packed_size,bit_length,extracted) = (param[0], param[1], param[2], param[3], param[4]);
let (fout, row_out, col_packed) = (mem[mul_result].dim[0],mem[mul_result].dim[1],mem[mul_result].dim[2]);
let row_dim = mem[extracted].dim[2];
let offset = power_of_two::<T>(bit_length - 1);
let mut big_offset = T::zero();
for _ in 0..packed_size + k_col - 1 {
big_offset = (big_offset * T::from_i32(2) + T::one()) * offset;
}
let n_packed = packed_size + k_col - 1;
for layer_out in 0..fout {
//matching result
for r in 0..row_out {
for c in 0..col_packed {
let val = (var_dict[mem[mul_result].at_idx(&[layer_out, r, c]) as usize] + big_offset).to_bytes();
let mut ext = Vec::new();
ext.resize((packed_size + k_col - 1) as usize, T::zero());
for k in 0..(packed_size + k_col - 1) * bit_length {
ext[(k / bit_length) as usize] += T::from_i32(((val[(k/8) as usize] >> (k % 8)) & 1) as i32) * power_of_two(k % bit_length);
}
for k in 0..packed_size + k_col - 1 {
let idx = c * n_packed + k;
if idx >= row_dim {
break
}
var_dict[mem[extracted].at_idx(&[layer_out,r,idx]) as usize] = ext[k as usize] - offset;
}
}
}
}
}
pub fn conv2d_compact(&mut self, input: TensorAddress, output: TensorAddress, weight_rev: TensorAddress, bias: Option<(TensorAddress, u32)>, bit_length: u8, act: ActivationFunction) {
// packing weight
let dim = &self.mem[weight_rev].dim;
let (fout, fin, k_row, k_col) = (dim[0], dim[1], dim[2], dim[3]);
let packed_weight = self.mem.alloc(&[fout, fin, k_row]);
assert!(k_col * (bit_length as u32) <= SCALAR_SIZE);
self.packing_tensor(weight_rev, packed_weight, bit_length, k_col as u8,1, BigScalar::one(), true);
let (row, col) = (self.mem[input].dim[1], self.mem[input].dim[2]);
let packed_size = min((SCALAR_SIZE / (bit_length as u32)).checked_sub(k_col).unwrap(),col);
let col_packed = (col-1)/packed_size + 1;
let packed_layer = self.mem.alloc(&[fin, row, col_packed]);
// packing row of inputs
self.packing_tensor_by_dim(input,&[-1], packed_layer, bit_length, packed_size as u8,1,BigScalar::one(), true);
// splicing output by row
let mut mul_input = Vec::new();
for r in 0..row - k_row + 1 {
let mut mul_input_row = Vec::new();
for c in 0..col_packed {
mul_input_row.push(self.mem.save(self.mem[packed_layer].at(&[RangeFull(), Range(r..r + k_row), Id(c)])));
}
mul_input.push(mul_input_row);
}
//packing bias
let mut packed_bias: Vec<Vec<TensorAddress>> = Vec::with_capacity(fout as usize);
let mut bias_dim = 0;
let mut bias_scale = 0;
if let Some((b, scale)) = bias {
bias_dim = (col - k_col)/packed_size + 1;
bias_scale = scale;
for layer_out in 0..fout {
let mut packed_bias_row: Vec<TensorAddress> = Vec::with_capacity(((row - k_row)/scale + 1) as usize);
for r in 0..(row - k_row)/scale + 1 {
let packed_bias = self.mem.alloc(&[bias_dim]);
let bias_row = self.mem.save(self.mem[b].at_(&[layer_out, r]));
self.packing_tensor(bias_row, packed_bias, bit_length, packed_size as u8, scale,power_of_two(bit_length as u32 * (k_col - 1)), true);
packed_bias_row.push(packed_bias);
}
packed_bias.push(packed_bias_row);
}
}
let mul_result = self.mem.alloc(&[fout, row - k_row + 1, col_packed]);
for layer_out in 0..fout {
let packed_weight = self.mem.save(self.mem[packed_weight].at_(&[layer_out]));
for r in 0..row - k_row + 1 {
for c in 0..col_packed {
let cur_bias = if c < bias_dim {Some(self.mem[packed_bias[layer_out as usize][(r/bias_scale) as usize]].at_idx(&[c]))} else {None};
self.dot(mul_input[r as usize][c as usize], packed_weight, self.mem[mul_result].at_idx(&[layer_out, r, c]), cur_bias);
}
}
}
// sign extraction
let n_packed = packed_size + k_col - 1;
let extracted_length = (col_packed - 1) * n_packed + ((col-1) % packed_size) + k_col;
let extracted = self.mem.alloc(&[fout, row - k_row + 1, extracted_length]);
self.packing_tensor_by_dim(extracted,&[-1], mul_result, bit_length, n_packed as u8,1,BigScalar::one(), false);
let params = vec![mul_result, k_col, packed_size, bit_length as u32, extracted];
self.compute.push((params.into_boxed_slice(), Functions::ConvCompact));
fn split_tensor<const N:usize>(mem: &mut MemoryManager,tensor: TensorAddress, length: u32, pos: [u32; N]) -> [(Option<TensorAddress>, Option<TensorAddress>); N] {
let fully_packed = mem[tensor].dim[2]/length;
let remainder = mem[tensor].dim[2] % length;
// should not save this
let tmp=mem[tensor].partition(2, length);
let mut res: [(Option<TensorAddress>, Option<TensorAddress>); N] = [(None, None); N];
for i in 0..N - 1 {
if pos[i] == pos[i+1] {
res[i] = (None, None);
continue;
}
let n= fully_packed + if remainder >= pos[i+1] {1} else {0};
let full = if n > 0 {
Some(mem.save(tmp.at(&[RangeFull(), RangeFull(), RangeTo(..n), Range(pos[i]..pos[i+1])])))
} else {
None
};
let rem = if pos[i] < remainder && remainder < pos[i+1] | else {
None
};
res[i] = (full, rem);
}
res
}
fn extract_sign_part(c: &mut ConstraintSystem, extracted: TensorAddress, bit_length: u8) {
let output = c.mem.alloc(&c.mem[extracted].dim.to_owned());
c.sign(extracted, output, bit_length - 1);
}
let reduced_extract = self.mem.save(self.mem[extracted].at(&[RangeFull(), RangeFull(), RangeTo(..extracted_length - k_col + 1)]));
if k_col!= 1 {
let rem_extract = self.mem.save(self.mem[extracted].at(&[RangeFull(), RangeFull(), RangeFrom(extracted_length - k_col + 1..)]));
extract_sign_part(self, rem_extract, bit_length);
}
let [(output_full, output_full_rem), (output_part, output_part_rem), (_,_)]= split_tensor(&mut self.mem, output, packed_size, [0, packed_size-(k_col-1), packed_size]);
let [(ext_left, ext_left_rem), (ext_full, ext_full_rem), (ext_right,ext_right_rem), (_,_)]= split_tensor(&mut self.mem, reduced_extract, n_packed, [0, k_col-1, packed_size, n_packed]);
// extract the fully correct part
if let Some(e) = ext_full {
self.activation(e, output_full.unwrap(), bit_length - 1, act);
}
if let Some(e) = ext_full_rem {
self.activation(e, output_full_rem.unwrap(), bit_length - 1, act);
}
//extract left and right sign part
if let Some(e) = ext_left {
extract_sign_part(self,e, bit_length);
}
if let Some(e) = ext_left_rem {
extract_sign_part(self,e, bit_length);
}
if let Some(e) = ext_right {
extract_sign_part(self,e, bit_length);
}
assert_eq!(ext_right_rem, None);
if let Some(left_rem) = ext_left_rem {
if let Some(right) = ext_right {
let sum_res = self.mem.alloc(&[fout, row - k_row + 1, self.mem[right].dim[2] - 1, k_col - 1]);
let left = self.mem.save(self.mem[ext_left.unwrap()].at(&[RangeFull(), RangeFrom(1..)]));
self.sum_two(right, left, sum_res);
self.activation(sum_res, output_part.unwrap(), bit_length - 1, act);
let sum_res = self.mem.alloc(&[fout, row - k_row + 1, self.mem[left_rem].dim[2]]);
let right_rem = self.mem.save(self.mem[right].at(&[RangeFull(), Id(self.mem[right].dim[2] - 1), RangeTo(..self.mem[left_rem].dim[2])]));
self.sum_two(right_rem, left_rem, sum_res);
self.activation(sum_res, output_part_rem.unwrap(), bit_length - 1, act);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::scalar::slice_to_scalar;
#[test]
fn conv2d_compact_test() {
let mut x = ConstraintSystem::new();
let input = x.mem.alloc(&[2,5,5]);
let weight = x.mem.alloc(&[2,2,3,3]);
let output = x.mem.alloc(&[2,3,3]);
let bias = x.mem.alloc(&[2,3,3]);
let weight_rev = x.mem.save(x.mem[weight].reverse(3));
x.conv2d_compact(input, output, weight_rev, Some((bias, 1)), 7, ActivationFunction::Sign);
let mut mem: Vec<BigScalar> = slice_to_scalar(&[1,0,1,-1,0,0,0,-2,4,-1,-4,0,3,-4,0,0,0,1,-1,1,-4,2,3,-1,0,-4,2,2,-3,-1,-1,1,2,-1,1,4,4,2,3,-3,0,3,-2,3,0,2,3,3,-2,2,4,3,3,-4,-4,-1,3,1,4,-2,-2,0,-2,4,-3,0,0,0,-2,0,0,0,0,3,4,-3,-4,-1,-1,-4,3,1,-2,0,0,0,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-3,0,-3,0,1,-4,-1,2,0,0,-4,2,1,3,2,-3,4,-3]);
mem.resize(x.mem.n_var as usize, Scalar::zero());
x.compute(&mut mem);
assert_eq!(mem[87..87+18], slice_to_scalar(&[1,1,-1,-1,-1,1,1,-1,-1,-1,1,-1,-1,-1,1,-1,-1,-1]));
x.sort_cons();
assert!(x.verify(&mem));
}
#[test]
fn conv2d_compact_test_small() {
let mut x = ConstraintSystem::new();
let input = x.mem.alloc(&[1,4,3]);
let weight = x.mem.alloc(&[1,1,3,3]);
let output = x.mem.alloc(&[1,2,1]);
let weight_rev = x.mem.save(x.mem[weight].reverse(3));
x.conv2d_compact(input, output, weight_rev, None, 5,ActivationFunction::Sign);
let mut mem = x.mem.new_memory::<BigScalar>();
x.load_memory(input, &mut mem, &slice_to_scalar(&[1,1,2, 1,2,1, 1,1,1, 1,2,1]));
x.load_memory(weight, &mut mem, &slice_to_scalar(&[1,1,-1, 1,-1,1, 1,1,1]));
x.compute(&mut mem);
assert_eq!(mem[x.mem[output].begin() as usize..x.mem[output].end() as usize], slice_to_scalar(&[1,1]));
x.sort_cons();
assert!(x.verify(&mem));
}
} | {
Some(mem.save(tmp.at(&[RangeFull(), RangeFull(), Id(n), Range(pos[i]..remainder)])))
} | conditional_block |
search.rs | use super::context;
use super::Contrapositive;
use super::{Cuts, Db, Steps};
use crate::offset::{OLit, Offset, Sub};
use crate::subst::Ptr as SubPtr;
use crate::{Lit, Rewind};
use alloc::vec::Vec;
use core::{fmt::Display, hash::Hash, ops::Neg};
use log::debug;
pub struct Search<'t, P, C> {
task: Task<'t, P, C>,
ctx: Context<'t, P, C>,
promises: Vec<Promise<Task<'t, P, C>>>,
pub sub: Sub<'t, C>,
proof: Steps<'t, P, C>,
alternatives: Vec<(Alternative<'t, P, C>, Action<'t, P, C>)>,
inferences: usize,
literals: usize,
db: &'t Db<P, C, usize>,
opt: Opt,
}
#[derive(Clone)]
pub struct TaskIter<C: IntoIterator>(core::iter::Skip<C::IntoIter>);
impl<C: IntoIterator> TaskIter<C> {
pub fn new(cl: C) -> Self |
}
impl<C: IntoIterator> Iterator for TaskIter<C> {
type Item = <C::IntoIter as Iterator>::Item;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
pub type Task<'t, P, C> = TaskIter<super::clause::OClause<'t, Lit<P, C, usize>>>;
pub type Context<'t, P, C> = context::Context<Vec<OLit<'t, P, C>>>;
#[derive(Clone, Debug)]
pub enum Action<'t, P, C> {
Prove,
Reduce(OLit<'t, P, C>, Index),
Extend(OLit<'t, P, C>, Contras<'t, P, C>, Index),
}
impl<'t, P, C> Action<'t, P, C> {
pub fn max_children(&self) -> usize {
use Action::*;
match self {
Prove | Reduce(_, _) => 0,
Extend(_, cs, skip) => cs[*skip].rest.len(),
}
}
}
type Index = usize;
type Contras<'t, P, C> = &'t [Contrapositive<P, C, usize>];
struct Alternative<'t, P, C> {
task: Task<'t, P, C>,
// when we do *not* use cut, then we may need to backtrack to
// contexts that are larger than the current context,
// so we save the whole context here
ctx: Option<Context<'t, P, C>>,
// when we use cut, then we always backtrack to contexts that are
// prefixes of the current context, so in that case,
// storing just a pointer to the context suffices
ctx_ptr: context::Ptr,
promises: Option<Vec<Promise<Task<'t, P, C>>>>,
promises_len: usize,
sub: SubPtr,
proof_len: usize,
}
#[derive(Clone)]
struct Promise<T> {
task: T,
ctx_ptr: context::Ptr,
alt_len: usize,
}
pub struct Opt {
pub lim: usize,
pub cuts: Cuts,
}
impl<'t, P, C> Search<'t, P, C> {
pub fn new(task: Task<'t, P, C>, db: &'t Db<P, C, usize>, opt: Opt) -> Self {
Self {
task,
ctx: Context::default(),
promises: Vec::new(),
sub: Sub::default(),
proof: Steps::new(),
alternatives: Vec::new(),
inferences: 0,
literals: 0,
db,
opt,
}
}
}
type State<'t, P, C> = Result<Action<'t, P, C>, bool>;
impl<'t, P, C> Search<'t, P, C>
where
P: Clone + Display + Eq + Hash + Neg<Output = P>,
C: Clone + Display + Eq,
{
pub fn prove(&mut self) -> Option<&Steps<'t, P, C>> {
let mut action: Action<'t, P, C> = Action::Prove;
loop {
let result = match action {
Action::Prove => match self.task.clone().next() {
Some(lit) => self.chk(lit),
None => self.fulfill_promise(),
},
Action::Reduce(lit, skip) => self.red(lit, skip),
Action::Extend(lit, contras, skip) => self.ext(lit, contras, skip),
};
match result {
Ok(next) => action = next,
Err(true) => return Some(&self.proof),
Err(false) => return None,
}
}
}
pub fn inferences(&self) -> usize {
self.inferences
}
fn chk(&mut self, lit: OLit<'t, P, C>) -> State<'t, P, C> {
debug!("checks: {}", lit);
debug!("{} {}", self.literals, lit.head());
debug!("lemmas: {}", self.ctx.lemmas.len());
debug!("path: {}", self.ctx.path.len());
self.literals += 1;
let mut lits = self.task.clone();
let mut path = self.ctx.path.iter();
let mut lemmas = self.ctx.lemmas.iter();
if lits.any(|cl| path.any(|pl| pl.eq_mod(&self.sub, &cl))) {
debug!("regularity");
self.try_alternative()
} else if lemmas.any(|lem| lem.eq_mod(&self.sub, &lit)) {
debug!("lemma");
self.proof.push(Action::Prove);
// do not add lit to lemmas, unlike original leanCoP
// furthermore, do not try red/ext steps if we found a lemma,
// because it does not add anything to substitution
// note that Jens said that this might sometimes be counterproductive,
// because adding to the substitution is also beneficial to cut down search space
self.task.next();
Ok(Action::Prove)
} else {
Ok(Action::Reduce(lit, 0))
}
}
fn red(&mut self, lit: OLit<'t, P, C>, skip: usize) -> State<'t, P, C> {
debug!("reduce: {}", lit);
let alternative = Alternative::from(&*self);
for (pidx, pat) in self.ctx.path.iter().rev().enumerate().skip(skip) {
debug!("try reduce: {}", pat);
let sub_dom_len = self.sub.get_dom_len();
if pat.head()!= &-lit.head().clone() {
continue;
}
if pat.args().unify(&mut self.sub, lit.args()) {
debug!("reduce succeeded");
self.proof.push(Action::Reduce(lit, pidx));
if!self.opt.cuts.reduction {
let action = Action::Reduce(lit, pidx + 1);
self.alternatives.push((alternative, action));
}
self.ctx.lemmas.push(lit);
self.task.next();
return Ok(Action::Prove);
} else {
self.sub.set_dom_len(sub_dom_len)
}
}
self.ext0(lit)
}
fn ext0(&mut self, lit: OLit<'t, P, C>) -> State<'t, P, C> {
debug!("extend: {}", lit);
let neg = -lit.head().clone();
match self.db.get(&neg) {
Some(entries) => self.ext(lit, entries, 0),
None => self.try_alternative(),
}
}
fn ext(&mut self, lit: OLit<'t, P, C>, cs: Contras<'t, P, C>, skip: usize) -> State<'t, P, C> {
let alt = Alternative::from(&*self);
let prm = Promise::from(&*self);
let sub = SubPtr::from(&self.sub);
for (eidx, entry) in cs.iter().enumerate().skip(skip) {
debug!(
"try extend {}{} (lit = {}, |path| = {})",
lit.head(),
entry,
lit,
self.ctx.path.len()
);
if self.ctx.path.len() >= self.opt.lim && entry.vars.is_some() {
debug!("path limit reached");
continue;
};
let eargs = Offset::new(sub.dom_max(), &entry.args);
if let Some(vars) = entry.vars {
// we have to add 1 here because the lowest variable is 0
self.sub.set_dom_max(sub.dom_max() + vars + 1)
};
debug!("unify {} ~? {}, sub = {}", eargs, lit.args(), self.sub);
if eargs.unify(&mut self.sub, lit.args()) {
debug!("unify succeeded with {}, sub = {}", entry.rest, self.sub);
self.inferences += 1;
// promise to fulfill the current task
// (if the promise is kept and cut is enabled,
// then all alternatives that came after will be discarded)
self.promises.push(prm);
self.proof.push(Action::Extend(lit, cs, eidx));
let action = Action::Extend(lit, cs, eidx + 1);
// register an alternative (that will be discarded
// if the above promise is kept and cut is enabled)
self.alternatives.push((alt, action));
self.task = Task::new(Offset::new(sub.dom_max(), &entry.rest));
self.ctx.path.push(lit);
return Ok(Action::Prove);
} else {
debug!("unify failed");
self.sub.rewind(&sub)
}
}
self.try_alternative()
}
fn fulfill_promise(&mut self) -> State<'t, P, C> {
debug!("fulfill promise ({} left)", self.promises.len());
let prm = self.promises.pop().ok_or(true)?;
self.task = prm.task;
self.ctx.rewind(prm.ctx_ptr);
if let Some(prev) = self.task.next() {
self.ctx.lemmas.push(prev)
};
if let Some(cut) = self.opt.cuts.extension {
use super::cuts::Cut::*;
let alt_len = match cut {
Exclusive => prm.alt_len + 1,
Inclusive => prm.alt_len,
};
debug!("cut {} alternatives", self.alternatives.len() - alt_len);
assert!(alt_len <= self.alternatives.len());
self.alternatives.truncate(alt_len);
}
Ok(Action::Prove)
}
fn try_alternative(&mut self) -> State<'t, P, C> {
debug!("try alternative ({} left)", self.alternatives.len());
self.alternatives.pop().ok_or(false).map(|(alt, action)| {
self.rewind(alt);
action
})
}
}
impl<'t, P, C> From<&Search<'t, P, C>> for Alternative<'t, P, C> {
fn from(st: &Search<'t, P, C>) -> Self {
Self {
task: st.task.clone(),
ctx: if st.opt.cuts.extension.is_none() {
Some(st.ctx.clone())
} else {
None
},
ctx_ptr: context::Ptr::from(&st.ctx),
promises: if st.opt.cuts.extension.is_none() {
Some(st.promises.clone())
} else {
None
},
promises_len: st.promises.len(),
sub: SubPtr::from(&st.sub),
proof_len: st.proof.len(),
}
}
}
impl<'t, P, C> From<&Search<'t, P, C>> for Promise<Task<'t, P, C>> {
fn from(st: &Search<'t, P, C>) -> Self {
Self {
task: st.task.clone(),
ctx_ptr: context::Ptr::from(&st.ctx),
alt_len: st.alternatives.len(),
}
}
}
impl<'t, P, C> Rewind<Alternative<'t, P, C>> for Search<'t, P, C> {
fn rewind(&mut self, alt: Alternative<'t, P, C>) {
self.task = alt.task;
if let Some(ctx) = alt.ctx {
self.ctx = ctx;
} else {
self.ctx.rewind(alt.ctx_ptr);
}
if let Some(promises) = alt.promises {
self.promises = promises;
} else {
assert!(self.promises.len() >= alt.promises_len);
self.promises.truncate(alt.promises_len);
}
self.sub.rewind(&alt.sub);
self.proof.truncate(alt.proof_len);
}
}
| {
Self(cl.into_iter().skip(0))
} | identifier_body |
search.rs | use super::context;
use super::Contrapositive;
use super::{Cuts, Db, Steps};
use crate::offset::{OLit, Offset, Sub};
use crate::subst::Ptr as SubPtr;
use crate::{Lit, Rewind};
use alloc::vec::Vec;
use core::{fmt::Display, hash::Hash, ops::Neg};
use log::debug;
pub struct Search<'t, P, C> {
task: Task<'t, P, C>,
ctx: Context<'t, P, C>,
promises: Vec<Promise<Task<'t, P, C>>>,
pub sub: Sub<'t, C>,
proof: Steps<'t, P, C>,
alternatives: Vec<(Alternative<'t, P, C>, Action<'t, P, C>)>,
inferences: usize,
literals: usize,
db: &'t Db<P, C, usize>,
opt: Opt,
}
#[derive(Clone)]
pub struct TaskIter<C: IntoIterator>(core::iter::Skip<C::IntoIter>);
impl<C: IntoIterator> TaskIter<C> {
pub fn new(cl: C) -> Self {
Self(cl.into_iter().skip(0))
}
}
impl<C: IntoIterator> Iterator for TaskIter<C> {
type Item = <C::IntoIter as Iterator>::Item;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
pub type Task<'t, P, C> = TaskIter<super::clause::OClause<'t, Lit<P, C, usize>>>;
pub type Context<'t, P, C> = context::Context<Vec<OLit<'t, P, C>>>;
#[derive(Clone, Debug)]
pub enum Action<'t, P, C> {
Prove,
Reduce(OLit<'t, P, C>, Index),
Extend(OLit<'t, P, C>, Contras<'t, P, C>, Index),
}
impl<'t, P, C> Action<'t, P, C> {
pub fn max_children(&self) -> usize {
use Action::*;
match self {
Prove | Reduce(_, _) => 0,
Extend(_, cs, skip) => cs[*skip].rest.len(),
}
}
}
type Index = usize;
type Contras<'t, P, C> = &'t [Contrapositive<P, C, usize>];
struct Alternative<'t, P, C> {
task: Task<'t, P, C>,
// when we do *not* use cut, then we may need to backtrack to
// contexts that are larger than the current context,
// so we save the whole context here
ctx: Option<Context<'t, P, C>>,
// when we use cut, then we always backtrack to contexts that are
// prefixes of the current context, so in that case,
// storing just a pointer to the context suffices
ctx_ptr: context::Ptr,
promises: Option<Vec<Promise<Task<'t, P, C>>>>,
promises_len: usize,
sub: SubPtr,
proof_len: usize,
}
#[derive(Clone)]
struct Promise<T> {
task: T,
ctx_ptr: context::Ptr,
alt_len: usize,
}
pub struct Opt {
pub lim: usize,
pub cuts: Cuts,
}
impl<'t, P, C> Search<'t, P, C> {
pub fn new(task: Task<'t, P, C>, db: &'t Db<P, C, usize>, opt: Opt) -> Self {
Self {
task,
ctx: Context::default(),
promises: Vec::new(),
sub: Sub::default(),
proof: Steps::new(),
alternatives: Vec::new(),
inferences: 0,
literals: 0,
db,
opt,
}
}
}
type State<'t, P, C> = Result<Action<'t, P, C>, bool>;
impl<'t, P, C> Search<'t, P, C>
where
P: Clone + Display + Eq + Hash + Neg<Output = P>,
C: Clone + Display + Eq,
{
pub fn prove(&mut self) -> Option<&Steps<'t, P, C>> {
let mut action: Action<'t, P, C> = Action::Prove;
loop {
let result = match action {
Action::Prove => match self.task.clone().next() {
Some(lit) => self.chk(lit),
None => self.fulfill_promise(),
},
Action::Reduce(lit, skip) => self.red(lit, skip),
Action::Extend(lit, contras, skip) => self.ext(lit, contras, skip),
};
match result {
Ok(next) => action = next,
Err(true) => return Some(&self.proof),
Err(false) => return None,
}
}
}
pub fn inferences(&self) -> usize {
self.inferences
}
fn chk(&mut self, lit: OLit<'t, P, C>) -> State<'t, P, C> {
debug!("checks: {}", lit);
debug!("{} {}", self.literals, lit.head());
debug!("lemmas: {}", self.ctx.lemmas.len());
debug!("path: {}", self.ctx.path.len());
self.literals += 1;
let mut lits = self.task.clone();
let mut path = self.ctx.path.iter();
let mut lemmas = self.ctx.lemmas.iter();
if lits.any(|cl| path.any(|pl| pl.eq_mod(&self.sub, &cl))) {
debug!("regularity");
self.try_alternative()
} else if lemmas.any(|lem| lem.eq_mod(&self.sub, &lit)) {
debug!("lemma");
self.proof.push(Action::Prove);
// do not add lit to lemmas, unlike original leanCoP
// furthermore, do not try red/ext steps if we found a lemma,
// because it does not add anything to substitution
// note that Jens said that this might sometimes be counterproductive,
// because adding to the substitution is also beneficial to cut down search space
self.task.next();
Ok(Action::Prove)
} else {
Ok(Action::Reduce(lit, 0))
}
}
fn red(&mut self, lit: OLit<'t, P, C>, skip: usize) -> State<'t, P, C> {
debug!("reduce: {}", lit);
let alternative = Alternative::from(&*self);
for (pidx, pat) in self.ctx.path.iter().rev().enumerate().skip(skip) {
debug!("try reduce: {}", pat);
let sub_dom_len = self.sub.get_dom_len();
if pat.head()!= &-lit.head().clone() {
continue;
}
if pat.args().unify(&mut self.sub, lit.args()) {
debug!("reduce succeeded");
self.proof.push(Action::Reduce(lit, pidx));
if!self.opt.cuts.reduction {
let action = Action::Reduce(lit, pidx + 1);
self.alternatives.push((alternative, action));
}
self.ctx.lemmas.push(lit);
self.task.next();
return Ok(Action::Prove);
} else {
self.sub.set_dom_len(sub_dom_len)
}
}
self.ext0(lit)
}
fn ext0(&mut self, lit: OLit<'t, P, C>) -> State<'t, P, C> {
debug!("extend: {}", lit);
let neg = -lit.head().clone();
match self.db.get(&neg) {
Some(entries) => self.ext(lit, entries, 0),
None => self.try_alternative(),
}
}
fn ext(&mut self, lit: OLit<'t, P, C>, cs: Contras<'t, P, C>, skip: usize) -> State<'t, P, C> {
let alt = Alternative::from(&*self);
let prm = Promise::from(&*self);
let sub = SubPtr::from(&self.sub);
for (eidx, entry) in cs.iter().enumerate().skip(skip) {
debug!(
"try extend {}{} (lit = {}, |path| = {})",
lit.head(),
entry,
lit,
self.ctx.path.len()
);
if self.ctx.path.len() >= self.opt.lim && entry.vars.is_some() {
debug!("path limit reached");
continue;
};
let eargs = Offset::new(sub.dom_max(), &entry.args);
if let Some(vars) = entry.vars {
// we have to add 1 here because the lowest variable is 0
self.sub.set_dom_max(sub.dom_max() + vars + 1)
};
debug!("unify {} ~? {}, sub = {}", eargs, lit.args(), self.sub);
if eargs.unify(&mut self.sub, lit.args()) {
debug!("unify succeeded with {}, sub = {}", entry.rest, self.sub);
self.inferences += 1;
// promise to fulfill the current task
// (if the promise is kept and cut is enabled,
// then all alternatives that came after will be discarded)
self.promises.push(prm);
self.proof.push(Action::Extend(lit, cs, eidx));
let action = Action::Extend(lit, cs, eidx + 1);
// register an alternative (that will be discarded
// if the above promise is kept and cut is enabled)
self.alternatives.push((alt, action));
self.task = Task::new(Offset::new(sub.dom_max(), &entry.rest));
self.ctx.path.push(lit);
return Ok(Action::Prove);
} else {
debug!("unify failed");
self.sub.rewind(&sub)
}
}
self.try_alternative()
}
fn fulfill_promise(&mut self) -> State<'t, P, C> {
debug!("fulfill promise ({} left)", self.promises.len());
let prm = self.promises.pop().ok_or(true)?;
self.task = prm.task;
self.ctx.rewind(prm.ctx_ptr);
if let Some(prev) = self.task.next() {
self.ctx.lemmas.push(prev)
};
if let Some(cut) = self.opt.cuts.extension {
use super::cuts::Cut::*;
let alt_len = match cut {
Exclusive => prm.alt_len + 1,
Inclusive => prm.alt_len,
};
debug!("cut {} alternatives", self.alternatives.len() - alt_len);
assert!(alt_len <= self.alternatives.len());
self.alternatives.truncate(alt_len);
}
Ok(Action::Prove)
}
fn | (&mut self) -> State<'t, P, C> {
debug!("try alternative ({} left)", self.alternatives.len());
self.alternatives.pop().ok_or(false).map(|(alt, action)| {
self.rewind(alt);
action
})
}
}
impl<'t, P, C> From<&Search<'t, P, C>> for Alternative<'t, P, C> {
fn from(st: &Search<'t, P, C>) -> Self {
Self {
task: st.task.clone(),
ctx: if st.opt.cuts.extension.is_none() {
Some(st.ctx.clone())
} else {
None
},
ctx_ptr: context::Ptr::from(&st.ctx),
promises: if st.opt.cuts.extension.is_none() {
Some(st.promises.clone())
} else {
None
},
promises_len: st.promises.len(),
sub: SubPtr::from(&st.sub),
proof_len: st.proof.len(),
}
}
}
impl<'t, P, C> From<&Search<'t, P, C>> for Promise<Task<'t, P, C>> {
fn from(st: &Search<'t, P, C>) -> Self {
Self {
task: st.task.clone(),
ctx_ptr: context::Ptr::from(&st.ctx),
alt_len: st.alternatives.len(),
}
}
}
impl<'t, P, C> Rewind<Alternative<'t, P, C>> for Search<'t, P, C> {
fn rewind(&mut self, alt: Alternative<'t, P, C>) {
self.task = alt.task;
if let Some(ctx) = alt.ctx {
self.ctx = ctx;
} else {
self.ctx.rewind(alt.ctx_ptr);
}
if let Some(promises) = alt.promises {
self.promises = promises;
} else {
assert!(self.promises.len() >= alt.promises_len);
self.promises.truncate(alt.promises_len);
}
self.sub.rewind(&alt.sub);
self.proof.truncate(alt.proof_len);
}
}
| try_alternative | identifier_name |
search.rs | use super::context;
use super::Contrapositive;
use super::{Cuts, Db, Steps};
use crate::offset::{OLit, Offset, Sub};
use crate::subst::Ptr as SubPtr;
use crate::{Lit, Rewind};
use alloc::vec::Vec;
use core::{fmt::Display, hash::Hash, ops::Neg};
use log::debug;
pub struct Search<'t, P, C> {
task: Task<'t, P, C>,
ctx: Context<'t, P, C>,
promises: Vec<Promise<Task<'t, P, C>>>,
pub sub: Sub<'t, C>,
proof: Steps<'t, P, C>,
alternatives: Vec<(Alternative<'t, P, C>, Action<'t, P, C>)>,
inferences: usize,
literals: usize,
db: &'t Db<P, C, usize>,
opt: Opt,
}
#[derive(Clone)]
pub struct TaskIter<C: IntoIterator>(core::iter::Skip<C::IntoIter>);
impl<C: IntoIterator> TaskIter<C> {
pub fn new(cl: C) -> Self {
Self(cl.into_iter().skip(0))
}
}
impl<C: IntoIterator> Iterator for TaskIter<C> {
type Item = <C::IntoIter as Iterator>::Item;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
pub type Task<'t, P, C> = TaskIter<super::clause::OClause<'t, Lit<P, C, usize>>>;
pub type Context<'t, P, C> = context::Context<Vec<OLit<'t, P, C>>>;
#[derive(Clone, Debug)]
pub enum Action<'t, P, C> {
Prove,
Reduce(OLit<'t, P, C>, Index),
Extend(OLit<'t, P, C>, Contras<'t, P, C>, Index),
}
impl<'t, P, C> Action<'t, P, C> {
pub fn max_children(&self) -> usize {
use Action::*;
match self {
Prove | Reduce(_, _) => 0,
Extend(_, cs, skip) => cs[*skip].rest.len(),
}
}
}
type Index = usize;
type Contras<'t, P, C> = &'t [Contrapositive<P, C, usize>];
struct Alternative<'t, P, C> {
task: Task<'t, P, C>,
// when we do *not* use cut, then we may need to backtrack to
// contexts that are larger than the current context,
// so we save the whole context here
ctx: Option<Context<'t, P, C>>,
// when we use cut, then we always backtrack to contexts that are
// prefixes of the current context, so in that case,
// storing just a pointer to the context suffices
ctx_ptr: context::Ptr,
promises: Option<Vec<Promise<Task<'t, P, C>>>>,
promises_len: usize,
sub: SubPtr,
proof_len: usize,
}
#[derive(Clone)]
struct Promise<T> {
task: T,
ctx_ptr: context::Ptr,
alt_len: usize,
}
pub struct Opt {
pub lim: usize,
pub cuts: Cuts,
}
impl<'t, P, C> Search<'t, P, C> {
pub fn new(task: Task<'t, P, C>, db: &'t Db<P, C, usize>, opt: Opt) -> Self {
Self {
task,
ctx: Context::default(),
promises: Vec::new(),
sub: Sub::default(),
proof: Steps::new(),
alternatives: Vec::new(),
inferences: 0,
literals: 0,
db,
opt,
}
}
}
type State<'t, P, C> = Result<Action<'t, P, C>, bool>;
impl<'t, P, C> Search<'t, P, C>
where
P: Clone + Display + Eq + Hash + Neg<Output = P>,
C: Clone + Display + Eq,
{
pub fn prove(&mut self) -> Option<&Steps<'t, P, C>> {
let mut action: Action<'t, P, C> = Action::Prove;
loop {
let result = match action {
Action::Prove => match self.task.clone().next() {
Some(lit) => self.chk(lit),
None => self.fulfill_promise(),
},
Action::Reduce(lit, skip) => self.red(lit, skip),
Action::Extend(lit, contras, skip) => self.ext(lit, contras, skip),
};
match result {
Ok(next) => action = next,
Err(true) => return Some(&self.proof),
Err(false) => return None,
}
}
}
pub fn inferences(&self) -> usize {
self.inferences
}
fn chk(&mut self, lit: OLit<'t, P, C>) -> State<'t, P, C> {
debug!("checks: {}", lit);
debug!("{} {}", self.literals, lit.head());
debug!("lemmas: {}", self.ctx.lemmas.len());
debug!("path: {}", self.ctx.path.len());
self.literals += 1;
let mut lits = self.task.clone();
let mut path = self.ctx.path.iter();
let mut lemmas = self.ctx.lemmas.iter();
if lits.any(|cl| path.any(|pl| pl.eq_mod(&self.sub, &cl))) {
debug!("regularity");
self.try_alternative()
} else if lemmas.any(|lem| lem.eq_mod(&self.sub, &lit)) {
debug!("lemma");
self.proof.push(Action::Prove);
// do not add lit to lemmas, unlike original leanCoP
// furthermore, do not try red/ext steps if we found a lemma,
// because it does not add anything to substitution
// note that Jens said that this might sometimes be counterproductive,
// because adding to the substitution is also beneficial to cut down search space
self.task.next();
Ok(Action::Prove)
} else {
Ok(Action::Reduce(lit, 0))
}
}
fn red(&mut self, lit: OLit<'t, P, C>, skip: usize) -> State<'t, P, C> {
debug!("reduce: {}", lit);
let alternative = Alternative::from(&*self);
for (pidx, pat) in self.ctx.path.iter().rev().enumerate().skip(skip) {
debug!("try reduce: {}", pat);
let sub_dom_len = self.sub.get_dom_len();
if pat.head()!= &-lit.head().clone() {
continue;
}
if pat.args().unify(&mut self.sub, lit.args()) {
debug!("reduce succeeded");
self.proof.push(Action::Reduce(lit, pidx));
if!self.opt.cuts.reduction {
let action = Action::Reduce(lit, pidx + 1);
self.alternatives.push((alternative, action));
}
self.ctx.lemmas.push(lit);
self.task.next();
return Ok(Action::Prove);
} else {
self.sub.set_dom_len(sub_dom_len)
}
}
self.ext0(lit)
}
fn ext0(&mut self, lit: OLit<'t, P, C>) -> State<'t, P, C> {
debug!("extend: {}", lit);
let neg = -lit.head().clone();
match self.db.get(&neg) {
Some(entries) => self.ext(lit, entries, 0),
None => self.try_alternative(),
}
}
fn ext(&mut self, lit: OLit<'t, P, C>, cs: Contras<'t, P, C>, skip: usize) -> State<'t, P, C> {
let alt = Alternative::from(&*self);
let prm = Promise::from(&*self);
let sub = SubPtr::from(&self.sub);
for (eidx, entry) in cs.iter().enumerate().skip(skip) {
debug!(
"try extend {}{} (lit = {}, |path| = {})",
lit.head(),
entry,
lit,
self.ctx.path.len()
);
if self.ctx.path.len() >= self.opt.lim && entry.vars.is_some() {
debug!("path limit reached");
continue;
};
let eargs = Offset::new(sub.dom_max(), &entry.args);
if let Some(vars) = entry.vars {
// we have to add 1 here because the lowest variable is 0
self.sub.set_dom_max(sub.dom_max() + vars + 1)
};
debug!("unify {} ~? {}, sub = {}", eargs, lit.args(), self.sub);
if eargs.unify(&mut self.sub, lit.args()) {
debug!("unify succeeded with {}, sub = {}", entry.rest, self.sub);
self.inferences += 1;
// promise to fulfill the current task
// (if the promise is kept and cut is enabled,
// then all alternatives that came after will be discarded)
self.promises.push(prm);
self.proof.push(Action::Extend(lit, cs, eidx));
let action = Action::Extend(lit, cs, eidx + 1);
// register an alternative (that will be discarded
// if the above promise is kept and cut is enabled)
self.alternatives.push((alt, action));
self.task = Task::new(Offset::new(sub.dom_max(), &entry.rest));
self.ctx.path.push(lit);
return Ok(Action::Prove);
} else {
debug!("unify failed");
self.sub.rewind(&sub)
}
}
self.try_alternative()
}
fn fulfill_promise(&mut self) -> State<'t, P, C> {
debug!("fulfill promise ({} left)", self.promises.len());
let prm = self.promises.pop().ok_or(true)?;
self.task = prm.task;
self.ctx.rewind(prm.ctx_ptr);
if let Some(prev) = self.task.next() {
self.ctx.lemmas.push(prev)
};
if let Some(cut) = self.opt.cuts.extension {
use super::cuts::Cut::*;
let alt_len = match cut {
Exclusive => prm.alt_len + 1,
Inclusive => prm.alt_len,
};
debug!("cut {} alternatives", self.alternatives.len() - alt_len);
assert!(alt_len <= self.alternatives.len());
self.alternatives.truncate(alt_len);
}
Ok(Action::Prove)
}
fn try_alternative(&mut self) -> State<'t, P, C> {
debug!("try alternative ({} left)", self.alternatives.len());
self.alternatives.pop().ok_or(false).map(|(alt, action)| {
self.rewind(alt);
action
})
}
}
impl<'t, P, C> From<&Search<'t, P, C>> for Alternative<'t, P, C> {
fn from(st: &Search<'t, P, C>) -> Self {
Self {
task: st.task.clone(),
ctx: if st.opt.cuts.extension.is_none() {
Some(st.ctx.clone())
} else {
None
},
ctx_ptr: context::Ptr::from(&st.ctx),
promises: if st.opt.cuts.extension.is_none() {
Some(st.promises.clone())
} else {
None
},
promises_len: st.promises.len(),
sub: SubPtr::from(&st.sub),
proof_len: st.proof.len(),
}
}
}
impl<'t, P, C> From<&Search<'t, P, C>> for Promise<Task<'t, P, C>> {
fn from(st: &Search<'t, P, C>) -> Self {
Self {
task: st.task.clone(),
ctx_ptr: context::Ptr::from(&st.ctx),
alt_len: st.alternatives.len(),
}
}
}
impl<'t, P, C> Rewind<Alternative<'t, P, C>> for Search<'t, P, C> {
fn rewind(&mut self, alt: Alternative<'t, P, C>) {
self.task = alt.task;
if let Some(ctx) = alt.ctx | else {
self.ctx.rewind(alt.ctx_ptr);
}
if let Some(promises) = alt.promises {
self.promises = promises;
} else {
assert!(self.promises.len() >= alt.promises_len);
self.promises.truncate(alt.promises_len);
}
self.sub.rewind(&alt.sub);
self.proof.truncate(alt.proof_len);
}
}
| {
self.ctx = ctx;
} | conditional_block |
search.rs | use super::context;
use super::Contrapositive;
use super::{Cuts, Db, Steps};
use crate::offset::{OLit, Offset, Sub};
use crate::subst::Ptr as SubPtr;
use crate::{Lit, Rewind};
use alloc::vec::Vec;
use core::{fmt::Display, hash::Hash, ops::Neg};
use log::debug;
pub struct Search<'t, P, C> {
task: Task<'t, P, C>,
ctx: Context<'t, P, C>,
promises: Vec<Promise<Task<'t, P, C>>>,
pub sub: Sub<'t, C>,
proof: Steps<'t, P, C>,
alternatives: Vec<(Alternative<'t, P, C>, Action<'t, P, C>)>,
inferences: usize,
literals: usize,
db: &'t Db<P, C, usize>,
opt: Opt,
}
#[derive(Clone)]
pub struct TaskIter<C: IntoIterator>(core::iter::Skip<C::IntoIter>);
impl<C: IntoIterator> TaskIter<C> {
pub fn new(cl: C) -> Self {
Self(cl.into_iter().skip(0))
}
}
impl<C: IntoIterator> Iterator for TaskIter<C> {
type Item = <C::IntoIter as Iterator>::Item;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
pub type Task<'t, P, C> = TaskIter<super::clause::OClause<'t, Lit<P, C, usize>>>;
pub type Context<'t, P, C> = context::Context<Vec<OLit<'t, P, C>>>;
#[derive(Clone, Debug)]
pub enum Action<'t, P, C> {
Prove,
Reduce(OLit<'t, P, C>, Index),
Extend(OLit<'t, P, C>, Contras<'t, P, C>, Index),
}
impl<'t, P, C> Action<'t, P, C> {
pub fn max_children(&self) -> usize {
use Action::*;
match self {
Prove | Reduce(_, _) => 0,
Extend(_, cs, skip) => cs[*skip].rest.len(),
}
}
}
type Index = usize;
type Contras<'t, P, C> = &'t [Contrapositive<P, C, usize>];
struct Alternative<'t, P, C> {
task: Task<'t, P, C>,
// when we do *not* use cut, then we may need to backtrack to
// contexts that are larger than the current context,
// so we save the whole context here
ctx: Option<Context<'t, P, C>>,
// when we use cut, then we always backtrack to contexts that are
// prefixes of the current context, so in that case,
// storing just a pointer to the context suffices
ctx_ptr: context::Ptr,
promises: Option<Vec<Promise<Task<'t, P, C>>>>,
promises_len: usize,
sub: SubPtr,
proof_len: usize,
}
#[derive(Clone)]
struct Promise<T> {
task: T,
ctx_ptr: context::Ptr,
alt_len: usize,
}
pub struct Opt {
pub lim: usize,
pub cuts: Cuts,
}
impl<'t, P, C> Search<'t, P, C> {
pub fn new(task: Task<'t, P, C>, db: &'t Db<P, C, usize>, opt: Opt) -> Self {
Self {
task,
ctx: Context::default(),
promises: Vec::new(),
sub: Sub::default(),
proof: Steps::new(),
alternatives: Vec::new(),
inferences: 0,
literals: 0,
db,
opt,
}
}
}
type State<'t, P, C> = Result<Action<'t, P, C>, bool>;
impl<'t, P, C> Search<'t, P, C>
where
P: Clone + Display + Eq + Hash + Neg<Output = P>,
C: Clone + Display + Eq,
{
pub fn prove(&mut self) -> Option<&Steps<'t, P, C>> {
let mut action: Action<'t, P, C> = Action::Prove;
loop {
let result = match action {
Action::Prove => match self.task.clone().next() {
Some(lit) => self.chk(lit),
None => self.fulfill_promise(),
},
Action::Reduce(lit, skip) => self.red(lit, skip),
Action::Extend(lit, contras, skip) => self.ext(lit, contras, skip),
};
match result {
Ok(next) => action = next,
Err(true) => return Some(&self.proof),
Err(false) => return None,
}
}
}
pub fn inferences(&self) -> usize {
self.inferences
}
fn chk(&mut self, lit: OLit<'t, P, C>) -> State<'t, P, C> {
debug!("checks: {}", lit);
debug!("{} {}", self.literals, lit.head());
debug!("lemmas: {}", self.ctx.lemmas.len());
debug!("path: {}", self.ctx.path.len());
self.literals += 1;
let mut lits = self.task.clone();
let mut path = self.ctx.path.iter();
let mut lemmas = self.ctx.lemmas.iter();
if lits.any(|cl| path.any(|pl| pl.eq_mod(&self.sub, &cl))) {
debug!("regularity");
self.try_alternative()
} else if lemmas.any(|lem| lem.eq_mod(&self.sub, &lit)) {
debug!("lemma");
self.proof.push(Action::Prove);
// do not add lit to lemmas, unlike original leanCoP
// furthermore, do not try red/ext steps if we found a lemma,
// because it does not add anything to substitution
// note that Jens said that this might sometimes be counterproductive,
// because adding to the substitution is also beneficial to cut down search space
self.task.next();
Ok(Action::Prove)
} else {
Ok(Action::Reduce(lit, 0))
}
}
fn red(&mut self, lit: OLit<'t, P, C>, skip: usize) -> State<'t, P, C> {
debug!("reduce: {}", lit);
let alternative = Alternative::from(&*self);
for (pidx, pat) in self.ctx.path.iter().rev().enumerate().skip(skip) {
debug!("try reduce: {}", pat);
let sub_dom_len = self.sub.get_dom_len();
if pat.head()!= &-lit.head().clone() {
continue;
}
if pat.args().unify(&mut self.sub, lit.args()) {
debug!("reduce succeeded");
self.proof.push(Action::Reduce(lit, pidx));
if!self.opt.cuts.reduction {
let action = Action::Reduce(lit, pidx + 1);
self.alternatives.push((alternative, action));
}
self.ctx.lemmas.push(lit);
self.task.next();
return Ok(Action::Prove);
} else {
self.sub.set_dom_len(sub_dom_len)
}
}
self.ext0(lit)
}
fn ext0(&mut self, lit: OLit<'t, P, C>) -> State<'t, P, C> {
debug!("extend: {}", lit); | }
}
fn ext(&mut self, lit: OLit<'t, P, C>, cs: Contras<'t, P, C>, skip: usize) -> State<'t, P, C> {
let alt = Alternative::from(&*self);
let prm = Promise::from(&*self);
let sub = SubPtr::from(&self.sub);
for (eidx, entry) in cs.iter().enumerate().skip(skip) {
debug!(
"try extend {}{} (lit = {}, |path| = {})",
lit.head(),
entry,
lit,
self.ctx.path.len()
);
if self.ctx.path.len() >= self.opt.lim && entry.vars.is_some() {
debug!("path limit reached");
continue;
};
let eargs = Offset::new(sub.dom_max(), &entry.args);
if let Some(vars) = entry.vars {
// we have to add 1 here because the lowest variable is 0
self.sub.set_dom_max(sub.dom_max() + vars + 1)
};
debug!("unify {} ~? {}, sub = {}", eargs, lit.args(), self.sub);
if eargs.unify(&mut self.sub, lit.args()) {
debug!("unify succeeded with {}, sub = {}", entry.rest, self.sub);
self.inferences += 1;
// promise to fulfill the current task
// (if the promise is kept and cut is enabled,
// then all alternatives that came after will be discarded)
self.promises.push(prm);
self.proof.push(Action::Extend(lit, cs, eidx));
let action = Action::Extend(lit, cs, eidx + 1);
// register an alternative (that will be discarded
// if the above promise is kept and cut is enabled)
self.alternatives.push((alt, action));
self.task = Task::new(Offset::new(sub.dom_max(), &entry.rest));
self.ctx.path.push(lit);
return Ok(Action::Prove);
} else {
debug!("unify failed");
self.sub.rewind(&sub)
}
}
self.try_alternative()
}
fn fulfill_promise(&mut self) -> State<'t, P, C> {
debug!("fulfill promise ({} left)", self.promises.len());
let prm = self.promises.pop().ok_or(true)?;
self.task = prm.task;
self.ctx.rewind(prm.ctx_ptr);
if let Some(prev) = self.task.next() {
self.ctx.lemmas.push(prev)
};
if let Some(cut) = self.opt.cuts.extension {
use super::cuts::Cut::*;
let alt_len = match cut {
Exclusive => prm.alt_len + 1,
Inclusive => prm.alt_len,
};
debug!("cut {} alternatives", self.alternatives.len() - alt_len);
assert!(alt_len <= self.alternatives.len());
self.alternatives.truncate(alt_len);
}
Ok(Action::Prove)
}
fn try_alternative(&mut self) -> State<'t, P, C> {
debug!("try alternative ({} left)", self.alternatives.len());
self.alternatives.pop().ok_or(false).map(|(alt, action)| {
self.rewind(alt);
action
})
}
}
impl<'t, P, C> From<&Search<'t, P, C>> for Alternative<'t, P, C> {
fn from(st: &Search<'t, P, C>) -> Self {
Self {
task: st.task.clone(),
ctx: if st.opt.cuts.extension.is_none() {
Some(st.ctx.clone())
} else {
None
},
ctx_ptr: context::Ptr::from(&st.ctx),
promises: if st.opt.cuts.extension.is_none() {
Some(st.promises.clone())
} else {
None
},
promises_len: st.promises.len(),
sub: SubPtr::from(&st.sub),
proof_len: st.proof.len(),
}
}
}
impl<'t, P, C> From<&Search<'t, P, C>> for Promise<Task<'t, P, C>> {
fn from(st: &Search<'t, P, C>) -> Self {
Self {
task: st.task.clone(),
ctx_ptr: context::Ptr::from(&st.ctx),
alt_len: st.alternatives.len(),
}
}
}
impl<'t, P, C> Rewind<Alternative<'t, P, C>> for Search<'t, P, C> {
fn rewind(&mut self, alt: Alternative<'t, P, C>) {
self.task = alt.task;
if let Some(ctx) = alt.ctx {
self.ctx = ctx;
} else {
self.ctx.rewind(alt.ctx_ptr);
}
if let Some(promises) = alt.promises {
self.promises = promises;
} else {
assert!(self.promises.len() >= alt.promises_len);
self.promises.truncate(alt.promises_len);
}
self.sub.rewind(&alt.sub);
self.proof.truncate(alt.proof_len);
}
} | let neg = -lit.head().clone();
match self.db.get(&neg) {
Some(entries) => self.ext(lit, entries, 0),
None => self.try_alternative(), | random_line_split |
main.rs | #[macro_use]
extern crate log;
extern crate fern;
extern crate chrono;
extern crate libc;
mod configuration;
use configuration::ServerData;
use configuration::ClientData;
use std::sync::mpsc;
use std::thread;
use std::time;
use std::time::SystemTime;
use std::env;
/*==============================================================================
* Loggers
*------------------------------------------------------------------------------
*
*/
fn setup_terminal_logging() -> Result<(), fern::InitError> {
fern::Dispatch::new()
.format(|out, message, record| unsafe {
out.finish(format_args!(
"{}[{}] {}",
chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"),
libc::pthread_self(),
message
))
})
.level(log::LevelFilter::Info)
.chain(std::io::stdout())
.apply()?;
Ok(())
}
fn setup_file_logging() -> Result<(), fern::InitError> {
fern::Dispatch::new()
.format(|out, message, record| unsafe {
out.finish(format_args!(
"{}[{}] {}",
chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"),
libc::pthread_self(),
message
))
})
.level(log::LevelFilter::Info)
.chain(fern::log_file("output.log")?)
.apply()?;
Ok(())
}
/*==============================================================================
* Packet
*------------------------------------------------------------------------------
*
*/
struct Packet {
from: VirtualLink,
workload: u64,
origin: u32,
timestamp: SystemTime,
}
impl Packet {
fn from_iface(iface: &NetworkInterface, workload: u64, origin: u32) -> Packet {
Packet {
from: (*iface).get_virtual_link(),
workload: workload,
origin: origin,
timestamp: SystemTime::now(),
}
}
fn answer_me_at(tx: &mpsc::Sender<Packet>, workload: u64, origin: u32, timestamp: SystemTime) -> Packet {
Packet {
from: VirtualLink::linked_to(tx),
workload: workload,
origin: origin,
timestamp: timestamp,
}
}
}
/*==============================================================================
* VirtualLink
*------------------------------------------------------------------------------
*
*/
struct VirtualLink {
s: mpsc::Sender<Packet>
}
impl VirtualLink {
fn to_iface(interface: &NetworkInterface) -> VirtualLink {
VirtualLink {
s: (*interface).s.clone()
}
}
fn linked_to(tx: &mpsc::Sender<Packet>) -> VirtualLink {
VirtualLink {
s: (*tx).clone()
}
}
fn send_through(&self, packet: Packet) {
self.s.send(packet).unwrap()
}
}
/*==============================================================================
* Network Interface
*------------------------------------------------------------------------------
*
*/
struct NetworkInterface {
s: mpsc::Sender<Packet>,
r: mpsc::Receiver<Packet>
}
impl NetworkInterface {
fn new() -> NetworkInterface {
let (tx, rx) = mpsc::channel();
NetworkInterface {
s: tx,
r: rx
}
}
fn read(&self) -> Packet |
fn get_virtual_link(&self) -> VirtualLink {
VirtualLink::to_iface(self)
}
}
/*==============================================================================
* Host
*
*/
struct Host {
nic: NetworkInterface,
}
impl Host {
fn new() -> Host {
Host {
nic: NetworkInterface::new(),
}
}
fn get_virtual_link(&self) -> VirtualLink {
self.nic.get_virtual_link()
}
}
/*==============================================================================
* Stats
*
*/
struct Stats {
samples: u64,
total: u64,
}
impl Stats {
fn new() -> Stats {
Stats {
samples: 0,
total: 0,
}
}
fn update_stats(&mut self, new_sample_time: u64) {
self.samples += 1;
self.total += new_sample_time;
}
fn get_average(&self) -> f64 {
if self.samples == 0 {
return 0.0;
}
(self.total as f64) / (self.samples as f64)
}
}
/*==============================================================================
* Server
*
*/
struct Server {
id: u32,
host: Host,
processing_power: u64
}
impl Server {
fn new(id: u32, server_data: ServerData) -> Server {
Server {
id: id,
host: Host::new(),
processing_power: server_data.get_processing_power()
}
}
fn get_virtual_link(&self) -> VirtualLink {
self.host.get_virtual_link()
}
fn run(self) {
info!("[S{}] Ejecutando servidor {}", self.id, self.id);
let rx = self.host.nic.r;
let tx = self.host.nic.s;
for message in rx {
// Obtenemos la cantidad de cuadrantes a procesar.
let workload = message.workload;
info!("[S{}] Recibidas {} unidades de trabajo desde observatorio {}", self.id, workload, message.origin);
/*
* Procesamos los cuadrantes.
*
* El workload tiene unidades de trabajo. El poder de procesamiento
* tiene unidades de trabajo por segundo. El sleep time tiene unidades
* de milisegundos.
*
* Por ejemplo, un servidor recibe 5 unidades de trabajo desde el
* cliente. El servidor puede procesar dos unidades de trabajo por
* segundo. El hilo dormirá entonces 2500 milisegundos simulando
* el procesamiento de la carga. Para acelerar o relentizar
* la simulación, podemos ajustar el factor global de velocidad;
* por ejemplo, si el factor global es 2.0, en vez de dormir los 2500
* milisegundos dormiría 1250.
*
*/
let sleep_time = (1000*workload)/self.processing_power;
let sleep_time_scaled = ((sleep_time as f64)/GLOBAL_SPEED) as u64;
info!("[S{}] Tiempo estimado: {}ms (s: {}ms)", self.id, sleep_time, sleep_time_scaled);
thread::sleep(time::Duration::from_millis(sleep_time_scaled));
info!("[S{}] Procesamiento terminado; devolviendo ACK a observatorio {}", self.id, message.origin);
// Devolvemos el ACK.
let response = Packet::answer_me_at(&tx, 0, self.id, message.timestamp);
message.from.send_through(response);
}
}
}
/*==============================================================================
* Client
*
*/
struct Target {
virtual_link: VirtualLink,
weight: f64
}
struct Client {
id: u32,
host: Host,
distribution_scheme: Vec<Target>,
work_generation_rate: u64
}
impl Client {
fn new(id: u32, servers: &Vec<Server>, client_data: ClientData) -> Client {
let workshare: &Vec<f64> = client_data.get_workshare();
let mut distribution = Vec::new();
for i in 0..servers.len() {
distribution.push(Target {
virtual_link: servers[i].get_virtual_link(),
weight: workshare[i]
});
}
Client {
id: id,
host: Host::new(),
distribution_scheme: distribution,
work_generation_rate: client_data.get_work_generation_rate()
}
}
fn run(self) {
info!("[C{}] Ejecutando cliente {}", self.id, self.id);
/*
* Cada cierta cantidad de tiempo, el observatorio genera x cuadrantes.
* A partir de ahí itera por la lista de servidores distribuyendo los
* cuadrantes según los factores de distribución (e.g., si debe enviar
* una fracción p_k de los cuadrantes al servidor k, enviará p_k*x
* cuadrantes al servidor k).
*
* Habiendo enviado los mensajes, simplemente espera las respuestas.
* Suponiendo alternativamente que hay que seguir generando cuadrantes
* mientras se toman fotos, se pueden tener internamente dos threads,
* uno acumulando cuadrantes y otro tomando cuadrantes y distribuyendolos.
*
* Para medir el tiempo de respuesta del observatorio se puede ir
* calculando una media móvil, tomando el tiempo que tarda en responder
* cada servidor.
*/
let targets = &self.distribution_scheme;
let mut stats : Stats = Stats::new();
loop {
let x = self.work_generation_rate;
info!("[C{}] Generando {} unidades de trabajo", self.id, x);
// Distribuimos los x cuadrantes generados.
let mut sid = 0;
for target in targets {
sid += 1;
let workload = ((x as f64)*(target.weight)) as u64;
let packet = Packet::from_iface(&self.host.nic, workload, self.id);
info!("[C{}] Enviando {} unidades al servidor {}", self.id, workload, sid);
target.virtual_link.send_through(packet);
}
// Esperamos la respuesta de cada servidor.
info!("[C{}] Esperando respuestas", self.id);
for _d in targets {
let _response = self.host.nic.read();
// Cálculo de tiempo de respuesta
let response_time_duration = _response.timestamp.elapsed().unwrap();
let response_time_ms = response_time_duration.as_secs() + ((response_time_duration.subsec_millis() * 1000) as u64);
stats.update_stats(response_time_ms);
}
// Impresión de estadística hasta el momento
info!("[C{}] Promedio de respuesta parcial: {} ms", self.id, format!("{:.*}", 2, stats.get_average()));
info!("[C{}] Todos los servidores terminaron de procesar el bache", self.id);
let sleep_time = (3000.0/GLOBAL_SPEED) as u64;
thread::sleep(time::Duration::from_millis(sleep_time));
}
}
}
/*==============================================================================
* Main
*
*/
const GLOBAL_SPEED: f64 = 1.0;
fn main() {
let args : Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "--debug" {
setup_file_logging().expect("Couldn't set up logger");
} else {
setup_terminal_logging().expect("Couldn't set up logger");
}
/*
* Cargamos la configuración. La configuración es un archivo de texto con
* pares clave-valor. El objeto de configuración puede usarse como
*
* configuration.get("clave") // retorna el valor asociado a "clave".
*/
info!("[T0] Cargando configuración");
let mut configuration = configuration::Configuration::new();
configuration.load();
let mut threads = Vec::new();
let mut servers: Vec<Server> = Vec::new();
let mut clients: Vec<Client> = Vec::new();
info!("[T0] Inicializando servidores");
let server_data: Vec<ServerData> = configuration.get_server_dataset();
let mut server_count = 0;
for d in server_data {
server_count += 1;
servers.push(Server::new(server_count, d));
}
info!("[T0] Inicializando clientes");
let client_data: Vec<ClientData> = configuration.get_client_dataset();
let mut client_count = 0;
for c in client_data {
client_count += 1;
clients.push(Client::new(client_count, &servers, c));
}
info!("[T0] Lanzando hilos servidores");
for server in servers {
let th = thread::spawn(move || {
server.run();
});
threads.push(th);
}
info!("[T0] Lanzando hilos clientes");
for client in clients {
let th = thread::spawn(move || {
client.run();
});
threads.push(th);
}
info!("[T0] Esperando la finalización del programa");
for th in threads {
th.join().unwrap();
}
}
| {
self.r.recv().unwrap()
} | identifier_body |
main.rs | #[macro_use]
extern crate log;
extern crate fern;
extern crate chrono;
extern crate libc;
mod configuration;
use configuration::ServerData;
use configuration::ClientData;
use std::sync::mpsc;
use std::thread;
use std::time;
use std::time::SystemTime;
use std::env;
/*==============================================================================
* Loggers
*------------------------------------------------------------------------------
*
*/
fn setup_terminal_logging() -> Result<(), fern::InitError> {
fern::Dispatch::new()
.format(|out, message, record| unsafe {
out.finish(format_args!(
"{}[{}] {}",
chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"),
libc::pthread_self(),
message
))
})
.level(log::LevelFilter::Info)
.chain(std::io::stdout())
.apply()?;
Ok(())
}
fn setup_file_logging() -> Result<(), fern::InitError> {
fern::Dispatch::new()
.format(|out, message, record| unsafe {
out.finish(format_args!(
"{}[{}] {}",
chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"),
libc::pthread_self(),
message
))
})
.level(log::LevelFilter::Info)
.chain(fern::log_file("output.log")?)
.apply()?;
Ok(())
}
/*==============================================================================
* Packet
*------------------------------------------------------------------------------
*
*/
struct Packet {
from: VirtualLink,
workload: u64,
origin: u32,
timestamp: SystemTime,
}
impl Packet {
fn from_iface(iface: &NetworkInterface, workload: u64, origin: u32) -> Packet {
Packet {
from: (*iface).get_virtual_link(),
workload: workload,
origin: origin,
timestamp: SystemTime::now(),
}
}
fn answer_me_at(tx: &mpsc::Sender<Packet>, workload: u64, origin: u32, timestamp: SystemTime) -> Packet {
Packet {
from: VirtualLink::linked_to(tx),
workload: workload,
origin: origin,
timestamp: timestamp,
}
}
}
/*==============================================================================
* VirtualLink
*------------------------------------------------------------------------------
*
*/
struct VirtualLink {
s: mpsc::Sender<Packet>
}
impl VirtualLink {
fn to_iface(interface: &NetworkInterface) -> VirtualLink {
VirtualLink {
s: (*interface).s.clone()
}
}
fn linked_to(tx: &mpsc::Sender<Packet>) -> VirtualLink {
VirtualLink {
s: (*tx).clone()
}
}
fn send_through(&self, packet: Packet) {
self.s.send(packet).unwrap()
}
}
/*==============================================================================
* Network Interface
*------------------------------------------------------------------------------
*
*/
struct NetworkInterface {
s: mpsc::Sender<Packet>,
r: mpsc::Receiver<Packet>
}
impl NetworkInterface {
fn new() -> NetworkInterface {
let (tx, rx) = mpsc::channel();
NetworkInterface {
s: tx,
r: rx
}
}
fn read(&self) -> Packet {
self.r.recv().unwrap()
}
fn get_virtual_link(&self) -> VirtualLink {
VirtualLink::to_iface(self)
}
}
/*==============================================================================
* Host
*
*/
struct Host {
nic: NetworkInterface,
}
impl Host {
fn new() -> Host {
Host {
nic: NetworkInterface::new(),
}
}
fn get_virtual_link(&self) -> VirtualLink {
self.nic.get_virtual_link()
}
}
/*==============================================================================
* Stats
*
*/
struct Stats {
samples: u64,
total: u64,
}
impl Stats {
fn new() -> Stats {
Stats {
samples: 0,
total: 0,
}
}
fn update_stats(&mut self, new_sample_time: u64) {
self.samples += 1;
self.total += new_sample_time;
}
fn get_average(&self) -> f64 {
if self.samples == 0 |
(self.total as f64) / (self.samples as f64)
}
}
/*==============================================================================
* Server
*
*/
struct Server {
id: u32,
host: Host,
processing_power: u64
}
impl Server {
fn new(id: u32, server_data: ServerData) -> Server {
Server {
id: id,
host: Host::new(),
processing_power: server_data.get_processing_power()
}
}
fn get_virtual_link(&self) -> VirtualLink {
self.host.get_virtual_link()
}
fn run(self) {
info!("[S{}] Ejecutando servidor {}", self.id, self.id);
let rx = self.host.nic.r;
let tx = self.host.nic.s;
for message in rx {
// Obtenemos la cantidad de cuadrantes a procesar.
let workload = message.workload;
info!("[S{}] Recibidas {} unidades de trabajo desde observatorio {}", self.id, workload, message.origin);
/*
* Procesamos los cuadrantes.
*
* El workload tiene unidades de trabajo. El poder de procesamiento
* tiene unidades de trabajo por segundo. El sleep time tiene unidades
* de milisegundos.
*
* Por ejemplo, un servidor recibe 5 unidades de trabajo desde el
* cliente. El servidor puede procesar dos unidades de trabajo por
* segundo. El hilo dormirá entonces 2500 milisegundos simulando
* el procesamiento de la carga. Para acelerar o relentizar
* la simulación, podemos ajustar el factor global de velocidad;
* por ejemplo, si el factor global es 2.0, en vez de dormir los 2500
* milisegundos dormiría 1250.
*
*/
let sleep_time = (1000*workload)/self.processing_power;
let sleep_time_scaled = ((sleep_time as f64)/GLOBAL_SPEED) as u64;
info!("[S{}] Tiempo estimado: {}ms (s: {}ms)", self.id, sleep_time, sleep_time_scaled);
thread::sleep(time::Duration::from_millis(sleep_time_scaled));
info!("[S{}] Procesamiento terminado; devolviendo ACK a observatorio {}", self.id, message.origin);
// Devolvemos el ACK.
let response = Packet::answer_me_at(&tx, 0, self.id, message.timestamp);
message.from.send_through(response);
}
}
}
/*==============================================================================
* Client
*
*/
struct Target {
virtual_link: VirtualLink,
weight: f64
}
struct Client {
id: u32,
host: Host,
distribution_scheme: Vec<Target>,
work_generation_rate: u64
}
impl Client {
fn new(id: u32, servers: &Vec<Server>, client_data: ClientData) -> Client {
let workshare: &Vec<f64> = client_data.get_workshare();
let mut distribution = Vec::new();
for i in 0..servers.len() {
distribution.push(Target {
virtual_link: servers[i].get_virtual_link(),
weight: workshare[i]
});
}
Client {
id: id,
host: Host::new(),
distribution_scheme: distribution,
work_generation_rate: client_data.get_work_generation_rate()
}
}
fn run(self) {
info!("[C{}] Ejecutando cliente {}", self.id, self.id);
/*
* Cada cierta cantidad de tiempo, el observatorio genera x cuadrantes.
* A partir de ahí itera por la lista de servidores distribuyendo los
* cuadrantes según los factores de distribución (e.g., si debe enviar
* una fracción p_k de los cuadrantes al servidor k, enviará p_k*x
* cuadrantes al servidor k).
*
* Habiendo enviado los mensajes, simplemente espera las respuestas.
* Suponiendo alternativamente que hay que seguir generando cuadrantes
* mientras se toman fotos, se pueden tener internamente dos threads,
* uno acumulando cuadrantes y otro tomando cuadrantes y distribuyendolos.
*
* Para medir el tiempo de respuesta del observatorio se puede ir
* calculando una media móvil, tomando el tiempo que tarda en responder
* cada servidor.
*/
let targets = &self.distribution_scheme;
let mut stats : Stats = Stats::new();
loop {
let x = self.work_generation_rate;
info!("[C{}] Generando {} unidades de trabajo", self.id, x);
// Distribuimos los x cuadrantes generados.
let mut sid = 0;
for target in targets {
sid += 1;
let workload = ((x as f64)*(target.weight)) as u64;
let packet = Packet::from_iface(&self.host.nic, workload, self.id);
info!("[C{}] Enviando {} unidades al servidor {}", self.id, workload, sid);
target.virtual_link.send_through(packet);
}
// Esperamos la respuesta de cada servidor.
info!("[C{}] Esperando respuestas", self.id);
for _d in targets {
let _response = self.host.nic.read();
// Cálculo de tiempo de respuesta
let response_time_duration = _response.timestamp.elapsed().unwrap();
let response_time_ms = response_time_duration.as_secs() + ((response_time_duration.subsec_millis() * 1000) as u64);
stats.update_stats(response_time_ms);
}
// Impresión de estadística hasta el momento
info!("[C{}] Promedio de respuesta parcial: {} ms", self.id, format!("{:.*}", 2, stats.get_average()));
info!("[C{}] Todos los servidores terminaron de procesar el bache", self.id);
let sleep_time = (3000.0/GLOBAL_SPEED) as u64;
thread::sleep(time::Duration::from_millis(sleep_time));
}
}
}
/*==============================================================================
* Main
*
*/
const GLOBAL_SPEED: f64 = 1.0;
fn main() {
let args : Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "--debug" {
setup_file_logging().expect("Couldn't set up logger");
} else {
setup_terminal_logging().expect("Couldn't set up logger");
}
/*
* Cargamos la configuración. La configuración es un archivo de texto con
* pares clave-valor. El objeto de configuración puede usarse como
*
* configuration.get("clave") // retorna el valor asociado a "clave".
*/
info!("[T0] Cargando configuración");
let mut configuration = configuration::Configuration::new();
configuration.load();
let mut threads = Vec::new();
let mut servers: Vec<Server> = Vec::new();
let mut clients: Vec<Client> = Vec::new();
info!("[T0] Inicializando servidores");
let server_data: Vec<ServerData> = configuration.get_server_dataset();
let mut server_count = 0;
for d in server_data {
server_count += 1;
servers.push(Server::new(server_count, d));
}
info!("[T0] Inicializando clientes");
let client_data: Vec<ClientData> = configuration.get_client_dataset();
let mut client_count = 0;
for c in client_data {
client_count += 1;
clients.push(Client::new(client_count, &servers, c));
}
info!("[T0] Lanzando hilos servidores");
for server in servers {
let th = thread::spawn(move || {
server.run();
});
threads.push(th);
}
info!("[T0] Lanzando hilos clientes");
for client in clients {
let th = thread::spawn(move || {
client.run();
});
threads.push(th);
}
info!("[T0] Esperando la finalización del programa");
for th in threads {
th.join().unwrap();
}
}
| {
return 0.0;
} | conditional_block |
main.rs | #[macro_use]
extern crate log;
extern crate fern;
extern crate chrono;
extern crate libc;
mod configuration;
use configuration::ServerData;
use configuration::ClientData;
use std::sync::mpsc;
use std::thread;
use std::time;
use std::time::SystemTime;
use std::env;
/*==============================================================================
* Loggers
*------------------------------------------------------------------------------
*
*/
fn setup_terminal_logging() -> Result<(), fern::InitError> {
fern::Dispatch::new()
.format(|out, message, record| unsafe {
out.finish(format_args!(
"{}[{}] {}",
chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"),
libc::pthread_self(),
message
))
})
.level(log::LevelFilter::Info)
.chain(std::io::stdout())
.apply()?;
Ok(())
}
fn setup_file_logging() -> Result<(), fern::InitError> {
fern::Dispatch::new()
.format(|out, message, record| unsafe {
out.finish(format_args!(
"{}[{}] {}",
chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"),
libc::pthread_self(),
message
))
})
.level(log::LevelFilter::Info)
.chain(fern::log_file("output.log")?)
.apply()?;
Ok(())
}
/*==============================================================================
* Packet
*------------------------------------------------------------------------------
*
*/
struct Packet {
from: VirtualLink,
workload: u64,
origin: u32,
timestamp: SystemTime,
}
impl Packet {
fn from_iface(iface: &NetworkInterface, workload: u64, origin: u32) -> Packet {
Packet {
from: (*iface).get_virtual_link(),
workload: workload,
origin: origin,
timestamp: SystemTime::now(),
}
}
fn answer_me_at(tx: &mpsc::Sender<Packet>, workload: u64, origin: u32, timestamp: SystemTime) -> Packet {
Packet {
from: VirtualLink::linked_to(tx),
workload: workload,
origin: origin,
timestamp: timestamp,
}
}
}
/*==============================================================================
* VirtualLink
*------------------------------------------------------------------------------
*
*/
struct VirtualLink {
s: mpsc::Sender<Packet>
}
impl VirtualLink {
fn to_iface(interface: &NetworkInterface) -> VirtualLink {
VirtualLink {
s: (*interface).s.clone()
}
}
fn linked_to(tx: &mpsc::Sender<Packet>) -> VirtualLink {
VirtualLink {
s: (*tx).clone()
}
}
fn send_through(&self, packet: Packet) {
self.s.send(packet).unwrap()
}
}
/*==============================================================================
* Network Interface
*------------------------------------------------------------------------------
*
*/
struct NetworkInterface {
s: mpsc::Sender<Packet>,
r: mpsc::Receiver<Packet>
}
impl NetworkInterface {
fn new() -> NetworkInterface {
let (tx, rx) = mpsc::channel();
NetworkInterface {
s: tx,
r: rx
}
}
fn read(&self) -> Packet {
self.r.recv().unwrap()
}
fn get_virtual_link(&self) -> VirtualLink {
VirtualLink::to_iface(self)
}
}
/*==============================================================================
* Host
*
*/
struct Host {
nic: NetworkInterface,
}
impl Host {
fn new() -> Host {
Host {
nic: NetworkInterface::new(),
}
}
fn get_virtual_link(&self) -> VirtualLink {
self.nic.get_virtual_link()
}
}
/*==============================================================================
* Stats
*
*/
struct Stats {
samples: u64,
total: u64,
}
impl Stats {
fn new() -> Stats {
Stats {
samples: 0,
total: 0,
}
}
fn update_stats(&mut self, new_sample_time: u64) {
self.samples += 1;
self.total += new_sample_time;
}
fn get_average(&self) -> f64 {
if self.samples == 0 {
return 0.0;
}
(self.total as f64) / (self.samples as f64)
}
}
/*==============================================================================
* Server
*
*/
struct Server {
id: u32,
host: Host,
processing_power: u64
}
impl Server {
fn new(id: u32, server_data: ServerData) -> Server {
Server {
id: id,
host: Host::new(),
processing_power: server_data.get_processing_power()
}
}
fn get_virtual_link(&self) -> VirtualLink {
self.host.get_virtual_link()
}
fn run(self) {
info!("[S{}] Ejecutando servidor {}", self.id, self.id);
let rx = self.host.nic.r;
let tx = self.host.nic.s;
for message in rx {
// Obtenemos la cantidad de cuadrantes a procesar.
let workload = message.workload;
info!("[S{}] Recibidas {} unidades de trabajo desde observatorio {}", self.id, workload, message.origin);
/*
* Procesamos los cuadrantes.
*
* El workload tiene unidades de trabajo. El poder de procesamiento
* tiene unidades de trabajo por segundo. El sleep time tiene unidades
* de milisegundos.
*
* Por ejemplo, un servidor recibe 5 unidades de trabajo desde el
* cliente. El servidor puede procesar dos unidades de trabajo por
* segundo. El hilo dormirá entonces 2500 milisegundos simulando
* el procesamiento de la carga. Para acelerar o relentizar
* la simulación, podemos ajustar el factor global de velocidad;
* por ejemplo, si el factor global es 2.0, en vez de dormir los 2500
* milisegundos dormiría 1250.
*
*/
let sleep_time = (1000*workload)/self.processing_power;
let sleep_time_scaled = ((sleep_time as f64)/GLOBAL_SPEED) as u64;
info!("[S{}] Tiempo estimado: {}ms (s: {}ms)", self.id, sleep_time, sleep_time_scaled);
thread::sleep(time::Duration::from_millis(sleep_time_scaled));
info!("[S{}] Procesamiento terminado; devolviendo ACK a observatorio {}", self.id, message.origin);
// Devolvemos el ACK.
let response = Packet::answer_me_at(&tx, 0, self.id, message.timestamp);
message.from.send_through(response);
}
}
}
/*==============================================================================
* Client
*
*/
struct Target {
virtual_link: VirtualLink,
weight: f64
}
struct Client {
id: u32,
host: Host,
distribution_scheme: Vec<Target>,
work_generation_rate: u64
}
impl Client {
fn new(id: u32, servers: &Vec<Server>, client_data: ClientData) -> Client {
let workshare: &Vec<f64> = client_data.get_workshare();
let mut distribution = Vec::new();
for i in 0..servers.len() {
distribution.push(Target {
virtual_link: servers[i].get_virtual_link(),
weight: workshare[i]
});
}
Client {
id: id,
host: Host::new(),
distribution_scheme: distribution,
work_generation_rate: client_data.get_work_generation_rate()
}
}
fn run(self) {
info!("[C{}] Ejecutando cliente {}", self.id, self.id);
/*
* Cada cierta cantidad de tiempo, el observatorio genera x cuadrantes.
* A partir de ahí itera por la lista de servidores distribuyendo los
* cuadrantes según los factores de distribución (e.g., si debe enviar
* una fracción p_k de los cuadrantes al servidor k, enviará p_k*x
* cuadrantes al servidor k).
*
* Habiendo enviado los mensajes, simplemente espera las respuestas.
* Suponiendo alternativamente que hay que seguir generando cuadrantes
* mientras se toman fotos, se pueden tener internamente dos threads,
* uno acumulando cuadrantes y otro tomando cuadrantes y distribuyendolos.
*
* Para medir el tiempo de respuesta del observatorio se puede ir
* calculando una media móvil, tomando el tiempo que tarda en responder
* cada servidor.
*/
let targets = &self.distribution_scheme;
let mut stats : Stats = Stats::new();
loop {
let x = self.work_generation_rate;
info!("[C{}] Generando {} unidades de trabajo", self.id, x);
// Distribuimos los x cuadrantes generados.
let mut sid = 0;
for target in targets {
sid += 1;
let workload = ((x as f64)*(target.weight)) as u64;
let packet = Packet::from_iface(&self.host.nic, workload, self.id);
info!("[C{}] Enviando {} unidades al servidor {}", self.id, workload, sid);
target.virtual_link.send_through(packet);
}
// Esperamos la respuesta de cada servidor.
info!("[C{}] Esperando respuestas", self.id);
for _d in targets {
let _response = self.host.nic.read();
// Cálculo de tiempo de respuesta
let response_time_duration = _response.timestamp.elapsed().unwrap();
let response_time_ms = response_time_duration.as_secs() + ((response_time_duration.subsec_millis() * 1000) as u64);
stats.update_stats(response_time_ms);
}
// Impresión de estadística hasta el momento
info!("[C{}] Promedio de respuesta parcial: {} ms", self.id, format!("{:.*}", 2, stats.get_average()));
info!("[C{}] Todos los servidores terminaron de procesar el bache", self.id);
let sleep_time = (3000.0/GLOBAL_SPEED) as u64;
thread::sleep(time::Duration::from_millis(sleep_time));
}
}
}
/*==============================================================================
* Main
*
*/
const GLOBAL_SPEED: f64 = 1.0;
fn main() {
let args : Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "--debug" {
setup_file_logging().expect("Couldn't set up logger");
} else {
setup_terminal_logging().expect("Couldn't set up logger");
}
/*
* Cargamos la configuración. La configuración es un archivo de texto con
* pares clave-valor. El objeto de configuración puede usarse como
*
* configuration.get("clave") // retorna el valor asociado a "clave".
*/
info!("[T0] Cargando configuración");
let mut configuration = configuration::Configuration::new();
configuration.load(); |
let mut threads = Vec::new();
let mut servers: Vec<Server> = Vec::new();
let mut clients: Vec<Client> = Vec::new();
info!("[T0] Inicializando servidores");
let server_data: Vec<ServerData> = configuration.get_server_dataset();
let mut server_count = 0;
for d in server_data {
server_count += 1;
servers.push(Server::new(server_count, d));
}
info!("[T0] Inicializando clientes");
let client_data: Vec<ClientData> = configuration.get_client_dataset();
let mut client_count = 0;
for c in client_data {
client_count += 1;
clients.push(Client::new(client_count, &servers, c));
}
info!("[T0] Lanzando hilos servidores");
for server in servers {
let th = thread::spawn(move || {
server.run();
});
threads.push(th);
}
info!("[T0] Lanzando hilos clientes");
for client in clients {
let th = thread::spawn(move || {
client.run();
});
threads.push(th);
}
info!("[T0] Esperando la finalización del programa");
for th in threads {
th.join().unwrap();
}
} | random_line_split |
|
main.rs | #[macro_use]
extern crate log;
extern crate fern;
extern crate chrono;
extern crate libc;
mod configuration;
use configuration::ServerData;
use configuration::ClientData;
use std::sync::mpsc;
use std::thread;
use std::time;
use std::time::SystemTime;
use std::env;
/*==============================================================================
* Loggers
*------------------------------------------------------------------------------
*
*/
fn setup_terminal_logging() -> Result<(), fern::InitError> {
fern::Dispatch::new()
.format(|out, message, record| unsafe {
out.finish(format_args!(
"{}[{}] {}",
chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"),
libc::pthread_self(),
message
))
})
.level(log::LevelFilter::Info)
.chain(std::io::stdout())
.apply()?;
Ok(())
}
fn setup_file_logging() -> Result<(), fern::InitError> {
fern::Dispatch::new()
.format(|out, message, record| unsafe {
out.finish(format_args!(
"{}[{}] {}",
chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"),
libc::pthread_self(),
message
))
})
.level(log::LevelFilter::Info)
.chain(fern::log_file("output.log")?)
.apply()?;
Ok(())
}
/*==============================================================================
* Packet
*------------------------------------------------------------------------------
*
*/
struct Packet {
from: VirtualLink,
workload: u64,
origin: u32,
timestamp: SystemTime,
}
impl Packet {
fn from_iface(iface: &NetworkInterface, workload: u64, origin: u32) -> Packet {
Packet {
from: (*iface).get_virtual_link(),
workload: workload,
origin: origin,
timestamp: SystemTime::now(),
}
}
fn answer_me_at(tx: &mpsc::Sender<Packet>, workload: u64, origin: u32, timestamp: SystemTime) -> Packet {
Packet {
from: VirtualLink::linked_to(tx),
workload: workload,
origin: origin,
timestamp: timestamp,
}
}
}
/*==============================================================================
* VirtualLink
*------------------------------------------------------------------------------
*
*/
struct VirtualLink {
s: mpsc::Sender<Packet>
}
impl VirtualLink {
fn to_iface(interface: &NetworkInterface) -> VirtualLink {
VirtualLink {
s: (*interface).s.clone()
}
}
fn linked_to(tx: &mpsc::Sender<Packet>) -> VirtualLink {
VirtualLink {
s: (*tx).clone()
}
}
fn send_through(&self, packet: Packet) {
self.s.send(packet).unwrap()
}
}
/*==============================================================================
* Network Interface
*------------------------------------------------------------------------------
*
*/
struct NetworkInterface {
s: mpsc::Sender<Packet>,
r: mpsc::Receiver<Packet>
}
impl NetworkInterface {
fn | () -> NetworkInterface {
let (tx, rx) = mpsc::channel();
NetworkInterface {
s: tx,
r: rx
}
}
fn read(&self) -> Packet {
self.r.recv().unwrap()
}
fn get_virtual_link(&self) -> VirtualLink {
VirtualLink::to_iface(self)
}
}
/*==============================================================================
* Host
*
*/
struct Host {
nic: NetworkInterface,
}
impl Host {
fn new() -> Host {
Host {
nic: NetworkInterface::new(),
}
}
fn get_virtual_link(&self) -> VirtualLink {
self.nic.get_virtual_link()
}
}
/*==============================================================================
* Stats
*
*/
struct Stats {
samples: u64,
total: u64,
}
impl Stats {
fn new() -> Stats {
Stats {
samples: 0,
total: 0,
}
}
fn update_stats(&mut self, new_sample_time: u64) {
self.samples += 1;
self.total += new_sample_time;
}
fn get_average(&self) -> f64 {
if self.samples == 0 {
return 0.0;
}
(self.total as f64) / (self.samples as f64)
}
}
/*==============================================================================
* Server
*
*/
struct Server {
id: u32,
host: Host,
processing_power: u64
}
impl Server {
fn new(id: u32, server_data: ServerData) -> Server {
Server {
id: id,
host: Host::new(),
processing_power: server_data.get_processing_power()
}
}
fn get_virtual_link(&self) -> VirtualLink {
self.host.get_virtual_link()
}
fn run(self) {
info!("[S{}] Ejecutando servidor {}", self.id, self.id);
let rx = self.host.nic.r;
let tx = self.host.nic.s;
for message in rx {
// Obtenemos la cantidad de cuadrantes a procesar.
let workload = message.workload;
info!("[S{}] Recibidas {} unidades de trabajo desde observatorio {}", self.id, workload, message.origin);
/*
* Procesamos los cuadrantes.
*
* El workload tiene unidades de trabajo. El poder de procesamiento
* tiene unidades de trabajo por segundo. El sleep time tiene unidades
* de milisegundos.
*
* Por ejemplo, un servidor recibe 5 unidades de trabajo desde el
* cliente. El servidor puede procesar dos unidades de trabajo por
* segundo. El hilo dormirá entonces 2500 milisegundos simulando
* el procesamiento de la carga. Para acelerar o relentizar
* la simulación, podemos ajustar el factor global de velocidad;
* por ejemplo, si el factor global es 2.0, en vez de dormir los 2500
* milisegundos dormiría 1250.
*
*/
let sleep_time = (1000*workload)/self.processing_power;
let sleep_time_scaled = ((sleep_time as f64)/GLOBAL_SPEED) as u64;
info!("[S{}] Tiempo estimado: {}ms (s: {}ms)", self.id, sleep_time, sleep_time_scaled);
thread::sleep(time::Duration::from_millis(sleep_time_scaled));
info!("[S{}] Procesamiento terminado; devolviendo ACK a observatorio {}", self.id, message.origin);
// Devolvemos el ACK.
let response = Packet::answer_me_at(&tx, 0, self.id, message.timestamp);
message.from.send_through(response);
}
}
}
/*==============================================================================
* Client
*
*/
struct Target {
virtual_link: VirtualLink,
weight: f64
}
struct Client {
id: u32,
host: Host,
distribution_scheme: Vec<Target>,
work_generation_rate: u64
}
impl Client {
fn new(id: u32, servers: &Vec<Server>, client_data: ClientData) -> Client {
let workshare: &Vec<f64> = client_data.get_workshare();
let mut distribution = Vec::new();
for i in 0..servers.len() {
distribution.push(Target {
virtual_link: servers[i].get_virtual_link(),
weight: workshare[i]
});
}
Client {
id: id,
host: Host::new(),
distribution_scheme: distribution,
work_generation_rate: client_data.get_work_generation_rate()
}
}
fn run(self) {
info!("[C{}] Ejecutando cliente {}", self.id, self.id);
/*
* Cada cierta cantidad de tiempo, el observatorio genera x cuadrantes.
* A partir de ahí itera por la lista de servidores distribuyendo los
* cuadrantes según los factores de distribución (e.g., si debe enviar
* una fracción p_k de los cuadrantes al servidor k, enviará p_k*x
* cuadrantes al servidor k).
*
* Habiendo enviado los mensajes, simplemente espera las respuestas.
* Suponiendo alternativamente que hay que seguir generando cuadrantes
* mientras se toman fotos, se pueden tener internamente dos threads,
* uno acumulando cuadrantes y otro tomando cuadrantes y distribuyendolos.
*
* Para medir el tiempo de respuesta del observatorio se puede ir
* calculando una media móvil, tomando el tiempo que tarda en responder
* cada servidor.
*/
let targets = &self.distribution_scheme;
let mut stats : Stats = Stats::new();
loop {
let x = self.work_generation_rate;
info!("[C{}] Generando {} unidades de trabajo", self.id, x);
// Distribuimos los x cuadrantes generados.
let mut sid = 0;
for target in targets {
sid += 1;
let workload = ((x as f64)*(target.weight)) as u64;
let packet = Packet::from_iface(&self.host.nic, workload, self.id);
info!("[C{}] Enviando {} unidades al servidor {}", self.id, workload, sid);
target.virtual_link.send_through(packet);
}
// Esperamos la respuesta de cada servidor.
info!("[C{}] Esperando respuestas", self.id);
for _d in targets {
let _response = self.host.nic.read();
// Cálculo de tiempo de respuesta
let response_time_duration = _response.timestamp.elapsed().unwrap();
let response_time_ms = response_time_duration.as_secs() + ((response_time_duration.subsec_millis() * 1000) as u64);
stats.update_stats(response_time_ms);
}
// Impresión de estadística hasta el momento
info!("[C{}] Promedio de respuesta parcial: {} ms", self.id, format!("{:.*}", 2, stats.get_average()));
info!("[C{}] Todos los servidores terminaron de procesar el bache", self.id);
let sleep_time = (3000.0/GLOBAL_SPEED) as u64;
thread::sleep(time::Duration::from_millis(sleep_time));
}
}
}
/*==============================================================================
* Main
*
*/
const GLOBAL_SPEED: f64 = 1.0;
fn main() {
let args : Vec<String> = env::args().collect();
if args.len() > 1 && args[1] == "--debug" {
setup_file_logging().expect("Couldn't set up logger");
} else {
setup_terminal_logging().expect("Couldn't set up logger");
}
/*
* Cargamos la configuración. La configuración es un archivo de texto con
* pares clave-valor. El objeto de configuración puede usarse como
*
* configuration.get("clave") // retorna el valor asociado a "clave".
*/
info!("[T0] Cargando configuración");
let mut configuration = configuration::Configuration::new();
configuration.load();
let mut threads = Vec::new();
let mut servers: Vec<Server> = Vec::new();
let mut clients: Vec<Client> = Vec::new();
info!("[T0] Inicializando servidores");
let server_data: Vec<ServerData> = configuration.get_server_dataset();
let mut server_count = 0;
for d in server_data {
server_count += 1;
servers.push(Server::new(server_count, d));
}
info!("[T0] Inicializando clientes");
let client_data: Vec<ClientData> = configuration.get_client_dataset();
let mut client_count = 0;
for c in client_data {
client_count += 1;
clients.push(Client::new(client_count, &servers, c));
}
info!("[T0] Lanzando hilos servidores");
for server in servers {
let th = thread::spawn(move || {
server.run();
});
threads.push(th);
}
info!("[T0] Lanzando hilos clientes");
for client in clients {
let th = thread::spawn(move || {
client.run();
});
threads.push(th);
}
info!("[T0] Esperando la finalización del programa");
for th in threads {
th.join().unwrap();
}
}
| new | identifier_name |
main.rs | use std::string::ToString;
fn main() |
fn vectors() {
let v: Vec<i32> = Vec::new();
let mut v = vec![1, 2, 3];
match v.binary_search(&16) {
Ok(pos) => v.insert(pos, 16),
Err(_) => v.push(16)
}
match v.binary_search(&12) {
Ok(pos) => v.insert(pos, 12),
Err(pos) => v.insert(pos, 12)
}
println!("Binary Search -> {:?}", v);
let mut v = Vec::new();
v.push(5);
v.push(6);
v.push(7);
v.push(8);
let v = vec![1, 2, 3, 4, 5];
let third: &i32 = &v[2];
println!("The third element is {}", third);
match v.get(2) {
Some(third) => println!("The third element is {}", third),
None => println!("There is no third element."),
}
// When the program has a valid reference, the borrow checker enforces the ownership and
// borrowing rules (covered in Chapter 4) to ensure this reference and any other references to
// the contents of the vector remain valid. Recall the rule that states you can’t have mutable
// and immutable references in the same scope. That rule applies in Listing 8-7, where we hold
// an immutable reference to the first element in a vector and try to add an element to the end,
// which won’t work.
let mut v = vec![1, 2, 3, 4, 5];
let first = &v[0];
v.push(6);
//Below line causes Compilation Error
//println!("The first element is: {}", first);
// This error is due to the way vectors work: adding a new element onto the end of the vector
// might require allocating new memory and copying the old elements to the new space, if there
// isn’t enough room to put all the elements next to each other where the vector currently is.
// In that case, the reference to the first element would be pointing to deallocated memory.
// The borrowing rules prevent programs from ending up in that situation.
let v = vec![100, 32, 57];
for i in &v {
println!("{}", i);
}
// To change the value that the mutable reference refers to, we have to use the dereference
// operator (*) to get to the value in i before we can use the += operator.
let mut v = vec![100, 32, 57];
for i in &mut v {
*i += 50;
}
for i in &v {
println!("{}", i);
}
enum SpreadsheetCell {
Int(i32),
Float(f64),
Text(String),
}
let row = vec![
SpreadsheetCell::Int(3),
SpreadsheetCell::Text(String::from("blue")),
SpreadsheetCell::Float(10.12),
];
}
fn strings() {
let mut s = String::new();
let m = String::from("sdfsdf");
let data = "initial contents";
let s = data.to_string();
// the method also works on a literal directly:
let s = "initial contents".to_string();
let hello = String::from("السلام عليكم");
let hello = String::from("Dobrý den");
let hello = String::from("Hello");
let hello = String::from("שָׁלוֹם");
let hello = String::from("नमस्ते");
let hello = String::from("こんにちは");
let hello = String::from("안녕하세요");
let hello = String::from("你好");
let hello = String::from("Olá");
let hello = String::from("Здравствуйте");
let hello = String::from("Hola");
let mut s1 = String::from("foo");
let s2 = "bar";
s1.push_str(s2);
println!("s2 is {}", s2);
let mut s = String::from("lo");
s.push('l');
use std::ops::Add;
let s1 = String::from("Hello, ");
let s2 = String::from("world!");
// The reason we’re able to use &s2 in the call to add is that the compiler can coerce the
// &String argument into a &str. When we call the add method, Rust uses a deref coercion, which
// here turns &s2 into &s2[..]. We’ll discuss deref coercion in more depth in Chapter 15.
// Because add does not take ownership of the s parameter, s2 will still be a valid String after
// this operation.
// looks like it will copy both strings and create a new one, this statement actually takes
// ownership of s1, appends a copy of the contents of s2, and then returns ownership of the
// result. In other words, it looks like it’s making a lot of copies but isn’t; the
// implementation is more efficient than copying.
//let s3 = s1.add(&s2);
let s3 = s1 + &s2;
println!("{}", s3);
let s1 = String::from("tic");
let s2 = String::from("tac");
let s3 = String::from("toe");
//let s = s1 + "-" + &s2 + "-" + &s3;
let s = format!("{}-{}-{}", s1, s2, s3);
println!("{}", s);
// The version of the code using format! is much easier to read and doesn’t take ownership of
// any of its parameters.
println!("{}", s1);
// A String is a wrapper over a Vec<u8>
let len = String::from("Hola").len();
// In this case, len will be 4, which means the vector storing the string “Hola” is 4 bytes long.
// Each of these letters takes 1 byte when encoded in UTF-8
println!("{}", len);
let len = String::from("Здравствуйте").len();
println!("{}", len);
// It takes 24 bytes to encode “Здравствуйте” in UTF-8, because each Unicode scalar value in that string
// takes 2 bytes of storage. Therefore, an index into the string’s bytes will not always
// correlate to a valid Unicode scalar value. To demonstrate, consider this invalid Rust code:
// let hello = "Здравствуйте";
// let answer = &hello[0];
// println!("{}", answer);
// error[E0277]: the type `str` cannot be indexed by `{integer}`
// Another point about UTF-8 is that there are actually three relevant ways to look at strings
// from Rust’s perspective: as bytes, scalar values, and grapheme clusters (the closest thing to
// what we would call letters).
// “नमस्ते”
// Bytes: [224, 164, 168, 224, 164, 174, 224, 164, 184, 224, 165, 141, 224, 164, 164, 224, 165, 135]
// Unicode scalar values (Rust's char type): ['न', 'म', 'स', '्', 'त', 'े']
// There are six char values here, but the fourth and sixth are not letters: they’re diacritics
// that don’t make sense on their own
// Grapheme clusters: ["न", "म", "स्", "ते"]
let namaste = "नमस्ते";
println!("{}", &namaste[0..12]);
let hello = "Здравствуйте";
let s = &hello[0..4];
println!("{}", s);
for c in "नमस्ते".chars() {
println!("{}", c);
}
for b in "नमस्ते".bytes() {
print!("{},", b);
}
// But be sure to remember that valid Unicode scalar values may be made up of more than 1 byte.
// Getting grapheme clusters from strings is complex, so this functionality is not provided by
// the standard library. Crates are available on crates.io if this is the functionality you need.
}
fn hashmaps() {
use std::collections::HashMap;
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.insert(String::from("Yellow"), 50);
// Note that we need to first use the HashMap from the collections portion of the standard
// library. Of our three common collections, this one is the least often used, so it’s not
// included in the features brought into scope automatically in the prelude.
// The type annotation HashMap<_, _> is needed here because it’s possible to collect into many
// different data structures and Rust doesn’t know which you want unless you specify. For the
// parameters for the key and value types, however, we use underscores, and Rust can infer the
// types that the hash map contains based on the types of the data in the vectors.
let teams = vec![String::from("Blue"), String::from("Yellow")];
let initial_scores = vec![10, 50];
println!("");
let scores: HashMap<_, _> = teams.iter().zip(initial_scores.iter()).collect();
for (k, v) in &scores {
println!("{},{}", k, v);
}
let score = scores.get(&String::from("Blue"));
match score {
Some(s) => println!("{}", s),
None => ()
}
// For types that implement the Copy trait, like i32, the values are copied into the hash map.
// For owned values like String, the values will be moved and the hash map will be the owner of
// those values
let field_name = String::from("Favorite color");
let field_value = String::from("Blue");
let mut map = HashMap::new();
map.insert(field_name, field_value);
//error[E0382]: borrow of moved value: `field_name`
//println!("{}", field_name);
// If we insert references to values into the hash map, the values won’t be moved into the hash
// map. The values that the references point to must be valid for at least as long as the hash
// map is valid.
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.insert(String::from("Yellow"), 50);
// Here, score will have the value that’s associated with the Blue team, and the result will be
// Some(&10). The result is wrapped in Some because get returns an Option<&V>
let team_name = String::from("Blue");
// get borrows key so its passed using &
let score = scores.get(&team_name);
match score {
Some(num) => println!("{}", num),
None => ()
}
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.insert(String::from("Yellow"), 50);
for (key, value) in &scores {
println!("{}: {}", key, value);
}
// Overwriting a Value
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.insert(String::from("Blue"), 25);
println!("{:?}", scores);
// Only Inserting a Value If the Key Has No Value
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.entry(String::from("Yellow")).or_insert(50);
scores.entry(String::from("Blue")).or_insert(50);
println!("{:?}", scores);
// Updating a Value Based on the Old Value
let text = "hello world wonderful world";
let mut map = HashMap::new();
for word in text.split_whitespace() {
let count = map.entry(word).or_insert(0);
*count += 1;
}
println!("{:?}", map);
// The or_insert method actually returns a mutable reference (&mut V) to the value for this key.
// Here we store that mutable reference in the count variable, so in order to assign to that
// value, we must first dereference count using the asterisk (*). The mutable reference goes out
// of scope at the end of the for loop, so all of these changes are safe and allowed by the
// borrowing rules.
// Hashing Functions
// By default, HashMap uses a “cryptographically strong”1 hashing function that can provide
// resistance to Denial of Service (DoS) attacks. This is not the fastest hashing algorithm
// available, but the trade-off for better security that comes with the drop in performance is
// worth it. If you profile your code and find that the default hash function is too slow for
// your purposes, you can switch to another function by specifying a different hasher. A hasher
// is a type that implements the BuildHasher trait. We’ll talk about traits and how to implement
// them in Chapter 10. You don’t necessarily have to implement your own hasher from scratch;
// crates.io has libraries shared by other Rust users that provide hashers implementing many
// common hashing algorithms.
} | {
vectors();
strings();
hashmaps();
} | identifier_body |
main.rs | use std::string::ToString;
fn main() {
vectors();
strings();
hashmaps();
}
fn vectors() {
let v: Vec<i32> = Vec::new();
let mut v = vec![1, 2, 3];
match v.binary_search(&16) {
Ok(pos) => v.insert(pos, 16),
Err(_) => v.push(16)
}
match v.binary_search(&12) {
Ok(pos) => v.insert(pos, 12),
Err(pos) => v.insert(pos, 12)
}
println!("Binary Search -> {:?}", v);
let mut v = Vec::new();
v.push(5);
v.push(6);
v.push(7);
v.push(8);
let v = vec![1, 2, 3, 4, 5];
let third: &i32 = &v[2];
println!("The third element is {}", third);
match v.get(2) {
Some(third) => println!("The third element is {}", third),
None => println!("There is no third element."),
}
// When the program has a valid reference, the borrow checker enforces the ownership and
// borrowing rules (covered in Chapter 4) to ensure this reference and any other references to
// the contents of the vector remain valid. Recall the rule that states you can’t have mutable
// and immutable references in the same scope. That rule applies in Listing 8-7, where we hold
// an immutable reference to the first element in a vector and try to add an element to the end,
// which won’t work.
let mut v = vec![1, 2, 3, 4, 5];
let first = &v[0];
v.push(6);
//Below line causes Compilation Error
//println!("The first element is: {}", first);
// This error is due to the way vectors work: adding a new element onto the end of the vector
// might require allocating new memory and copying the old elements to the new space, if there
// isn’t enough room to put all the elements next to each other where the vector currently is.
// In that case, the reference to the first element would be pointing to deallocated memory.
// The borrowing rules prevent programs from ending up in that situation.
let v = vec![100, 32, 57];
for i in &v {
println!("{}", i);
}
// To change the value that the mutable reference refers to, we have to use the dereference
// operator (*) to get to the value in i before we can use the += operator.
let mut v = vec![100, 32, 57];
for i in &mut v {
*i += 50;
}
for i in &v {
println!("{}", i);
}
enum SpreadsheetCell {
Int(i32),
Float(f64),
Text(String),
}
let row = vec![
SpreadsheetCell::Int(3),
SpreadsheetCell::Text(String::from("blue")),
SpreadsheetCell::Float(10.12),
];
}
fn strings() {
let mut s = String::new();
let m = String::from("sdfsdf");
let data = "initial contents";
let s = data.to_string();
// the method also works on a literal directly:
let s = "initial contents".to_string();
let hello = String::from("السلام عليكم"); | let hello = String::from("שָׁלוֹם");
let hello = String::from("नमस्ते");
let hello = String::from("こんにちは");
let hello = String::from("안녕하세요");
let hello = String::from("你好");
let hello = String::from("Olá");
let hello = String::from("Здравствуйте");
let hello = String::from("Hola");
let mut s1 = String::from("foo");
let s2 = "bar";
s1.push_str(s2);
println!("s2 is {}", s2);
let mut s = String::from("lo");
s.push('l');
use std::ops::Add;
let s1 = String::from("Hello, ");
let s2 = String::from("world!");
// The reason we’re able to use &s2 in the call to add is that the compiler can coerce the
// &String argument into a &str. When we call the add method, Rust uses a deref coercion, which
// here turns &s2 into &s2[..]. We’ll discuss deref coercion in more depth in Chapter 15.
// Because add does not take ownership of the s parameter, s2 will still be a valid String after
// this operation.
// looks like it will copy both strings and create a new one, this statement actually takes
// ownership of s1, appends a copy of the contents of s2, and then returns ownership of the
// result. In other words, it looks like it’s making a lot of copies but isn’t; the
// implementation is more efficient than copying.
//let s3 = s1.add(&s2);
let s3 = s1 + &s2;
println!("{}", s3);
let s1 = String::from("tic");
let s2 = String::from("tac");
let s3 = String::from("toe");
//let s = s1 + "-" + &s2 + "-" + &s3;
let s = format!("{}-{}-{}", s1, s2, s3);
println!("{}", s);
// The version of the code using format! is much easier to read and doesn’t take ownership of
// any of its parameters.
println!("{}", s1);
// A String is a wrapper over a Vec<u8>
let len = String::from("Hola").len();
// In this case, len will be 4, which means the vector storing the string “Hola” is 4 bytes long.
// Each of these letters takes 1 byte when encoded in UTF-8
println!("{}", len);
let len = String::from("Здравствуйте").len();
println!("{}", len);
// It takes 24 bytes to encode “Здравствуйте” in UTF-8, because each Unicode scalar value in that string
// takes 2 bytes of storage. Therefore, an index into the string’s bytes will not always
// correlate to a valid Unicode scalar value. To demonstrate, consider this invalid Rust code:
// let hello = "Здравствуйте";
// let answer = &hello[0];
// println!("{}", answer);
// error[E0277]: the type `str` cannot be indexed by `{integer}`
// Another point about UTF-8 is that there are actually three relevant ways to look at strings
// from Rust’s perspective: as bytes, scalar values, and grapheme clusters (the closest thing to
// what we would call letters).
// “नमस्ते”
// Bytes: [224, 164, 168, 224, 164, 174, 224, 164, 184, 224, 165, 141, 224, 164, 164, 224, 165, 135]
// Unicode scalar values (Rust's char type): ['न', 'म', 'स', '्', 'त', 'े']
// There are six char values here, but the fourth and sixth are not letters: they’re diacritics
// that don’t make sense on their own
// Grapheme clusters: ["न", "म", "स्", "ते"]
let namaste = "नमस्ते";
println!("{}", &namaste[0..12]);
let hello = "Здравствуйте";
let s = &hello[0..4];
println!("{}", s);
for c in "नमस्ते".chars() {
println!("{}", c);
}
for b in "नमस्ते".bytes() {
print!("{},", b);
}
// But be sure to remember that valid Unicode scalar values may be made up of more than 1 byte.
// Getting grapheme clusters from strings is complex, so this functionality is not provided by
// the standard library. Crates are available on crates.io if this is the functionality you need.
}
fn hashmaps() {
use std::collections::HashMap;
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.insert(String::from("Yellow"), 50);
// Note that we need to first use the HashMap from the collections portion of the standard
// library. Of our three common collections, this one is the least often used, so it’s not
// included in the features brought into scope automatically in the prelude.
// The type annotation HashMap<_, _> is needed here because it’s possible to collect into many
// different data structures and Rust doesn’t know which you want unless you specify. For the
// parameters for the key and value types, however, we use underscores, and Rust can infer the
// types that the hash map contains based on the types of the data in the vectors.
let teams = vec![String::from("Blue"), String::from("Yellow")];
let initial_scores = vec![10, 50];
println!("");
let scores: HashMap<_, _> = teams.iter().zip(initial_scores.iter()).collect();
for (k, v) in &scores {
println!("{},{}", k, v);
}
let score = scores.get(&String::from("Blue"));
match score {
Some(s) => println!("{}", s),
None => ()
}
// For types that implement the Copy trait, like i32, the values are copied into the hash map.
// For owned values like String, the values will be moved and the hash map will be the owner of
// those values
let field_name = String::from("Favorite color");
let field_value = String::from("Blue");
let mut map = HashMap::new();
map.insert(field_name, field_value);
//error[E0382]: borrow of moved value: `field_name`
//println!("{}", field_name);
// If we insert references to values into the hash map, the values won’t be moved into the hash
// map. The values that the references point to must be valid for at least as long as the hash
// map is valid.
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.insert(String::from("Yellow"), 50);
// Here, score will have the value that’s associated with the Blue team, and the result will be
// Some(&10). The result is wrapped in Some because get returns an Option<&V>
let team_name = String::from("Blue");
// get borrows key so its passed using &
let score = scores.get(&team_name);
match score {
Some(num) => println!("{}", num),
None => ()
}
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.insert(String::from("Yellow"), 50);
for (key, value) in &scores {
println!("{}: {}", key, value);
}
// Overwriting a Value
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.insert(String::from("Blue"), 25);
println!("{:?}", scores);
// Only Inserting a Value If the Key Has No Value
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.entry(String::from("Yellow")).or_insert(50);
scores.entry(String::from("Blue")).or_insert(50);
println!("{:?}", scores);
// Updating a Value Based on the Old Value
let text = "hello world wonderful world";
let mut map = HashMap::new();
for word in text.split_whitespace() {
let count = map.entry(word).or_insert(0);
*count += 1;
}
println!("{:?}", map);
// The or_insert method actually returns a mutable reference (&mut V) to the value for this key.
// Here we store that mutable reference in the count variable, so in order to assign to that
// value, we must first dereference count using the asterisk (*). The mutable reference goes out
// of scope at the end of the for loop, so all of these changes are safe and allowed by the
// borrowing rules.
// Hashing Functions
// By default, HashMap uses a “cryptographically strong”1 hashing function that can provide
// resistance to Denial of Service (DoS) attacks. This is not the fastest hashing algorithm
// available, but the trade-off for better security that comes with the drop in performance is
// worth it. If you profile your code and find that the default hash function is too slow for
// your purposes, you can switch to another function by specifying a different hasher. A hasher
// is a type that implements the BuildHasher trait. We’ll talk about traits and how to implement
// them in Chapter 10. You don’t necessarily have to implement your own hasher from scratch;
// crates.io has libraries shared by other Rust users that provide hashers implementing many
// common hashing algorithms.
} | let hello = String::from("Dobrý den");
let hello = String::from("Hello"); | random_line_split |
main.rs | use std::string::ToString;
fn main() {
vectors();
strings();
hashmaps();
}
fn vectors() {
let v: Vec<i32> = Vec::new();
let mut v = vec![1, 2, 3];
match v.binary_search(&16) {
Ok(pos) => v.insert(pos, 16),
Err(_) => v.push(16)
}
match v.binary_search(&12) {
Ok(pos) => v.insert(pos, 12),
Err(pos) => v.insert(pos, 12)
}
println!("Binary Search -> {:?}", v);
let mut v = Vec::new();
v.push(5);
v.push(6);
v.push(7);
v.push(8);
let v = vec![1, 2, 3, 4, 5];
let third: &i32 = &v[2];
println!("The third element is {}", third);
match v.get(2) {
Some(third) => println!("The third element is {}", third),
None => println!("There is no third element."),
}
// When the program has a valid reference, the borrow checker enforces the ownership and
// borrowing rules (covered in Chapter 4) to ensure this reference and any other references to
// the contents of the vector remain valid. Recall the rule that states you can’t have mutable
// and immutable references in the same scope. That rule applies in Listing 8-7, where we hold
// an immutable reference to the first element in a vector and try to add an element to the end,
// which won’t work.
let mut v = vec![1, 2, 3, 4, 5];
let first = &v[0];
v.push(6);
//Below line causes Compilation Error
//println!("The first element is: {}", first);
// This error is due to the way vectors work: adding a new element onto the end of the vector
// might require allocating new memory and copying the old elements to the new space, if there
// isn’t enough room to put all the elements next to each other where the vector currently is.
// In that case, the reference to the first element would be pointing to deallocated memory.
// The borrowing rules prevent programs from ending up in that situation.
let v = vec![100, 32, 57];
for i in &v {
println!("{}", i);
}
// To change the value that the mutable reference refers to, we have to use the dereference
// operator (*) to get to the value in i before we can use the += operator.
let mut v = vec![100, 32, 57];
for i in &mut v {
*i += 50;
}
for i in &v {
println!("{}", i);
}
enum SpreadsheetCell {
Int(i32),
Float(f64),
Text(String),
}
let row = vec![
SpreadsheetCell::Int(3),
SpreadsheetCell::Text(String::from("blue")),
SpreadsheetCell::Float(10.12),
];
}
fn string | let mut s = String::new();
let m = String::from("sdfsdf");
let data = "initial contents";
let s = data.to_string();
// the method also works on a literal directly:
let s = "initial contents".to_string();
let hello = String::from("السلام عليكم");
let hello = String::from("Dobrý den");
let hello = String::from("Hello");
let hello = String::from("שָׁלוֹם");
let hello = String::from("नमस्ते");
let hello = String::from("こんにちは");
let hello = String::from("안녕하세요");
let hello = String::from("你好");
let hello = String::from("Olá");
let hello = String::from("Здравствуйте");
let hello = String::from("Hola");
let mut s1 = String::from("foo");
let s2 = "bar";
s1.push_str(s2);
println!("s2 is {}", s2);
let mut s = String::from("lo");
s.push('l');
use std::ops::Add;
let s1 = String::from("Hello, ");
let s2 = String::from("world!");
// The reason we’re able to use &s2 in the call to add is that the compiler can coerce the
// &String argument into a &str. When we call the add method, Rust uses a deref coercion, which
// here turns &s2 into &s2[..]. We’ll discuss deref coercion in more depth in Chapter 15.
// Because add does not take ownership of the s parameter, s2 will still be a valid String after
// this operation.
// looks like it will copy both strings and create a new one, this statement actually takes
// ownership of s1, appends a copy of the contents of s2, and then returns ownership of the
// result. In other words, it looks like it’s making a lot of copies but isn’t; the
// implementation is more efficient than copying.
//let s3 = s1.add(&s2);
let s3 = s1 + &s2;
println!("{}", s3);
let s1 = String::from("tic");
let s2 = String::from("tac");
let s3 = String::from("toe");
//let s = s1 + "-" + &s2 + "-" + &s3;
let s = format!("{}-{}-{}", s1, s2, s3);
println!("{}", s);
// The version of the code using format! is much easier to read and doesn’t take ownership of
// any of its parameters.
println!("{}", s1);
// A String is a wrapper over a Vec<u8>
let len = String::from("Hola").len();
// In this case, len will be 4, which means the vector storing the string “Hola” is 4 bytes long.
// Each of these letters takes 1 byte when encoded in UTF-8
println!("{}", len);
let len = String::from("Здравствуйте").len();
println!("{}", len);
// It takes 24 bytes to encode “Здравствуйте” in UTF-8, because each Unicode scalar value in that string
// takes 2 bytes of storage. Therefore, an index into the string’s bytes will not always
// correlate to a valid Unicode scalar value. To demonstrate, consider this invalid Rust code:
// let hello = "Здравствуйте";
// let answer = &hello[0];
// println!("{}", answer);
// error[E0277]: the type `str` cannot be indexed by `{integer}`
// Another point about UTF-8 is that there are actually three relevant ways to look at strings
// from Rust’s perspective: as bytes, scalar values, and grapheme clusters (the closest thing to
// what we would call letters).
// “नमस्ते”
// Bytes: [224, 164, 168, 224, 164, 174, 224, 164, 184, 224, 165, 141, 224, 164, 164, 224, 165, 135]
// Unicode scalar values (Rust's char type): ['न', 'म', 'स', '्', 'त', 'े']
// There are six char values here, but the fourth and sixth are not letters: they’re diacritics
// that don’t make sense on their own
// Grapheme clusters: ["न", "म", "स्", "ते"]
let namaste = "नमस्ते";
println!("{}", &namaste[0..12]);
let hello = "Здравствуйте";
let s = &hello[0..4];
println!("{}", s);
for c in "नमस्ते".chars() {
println!("{}", c);
}
for b in "नमस्ते".bytes() {
print!("{},", b);
}
// But be sure to remember that valid Unicode scalar values may be made up of more than 1 byte.
// Getting grapheme clusters from strings is complex, so this functionality is not provided by
// the standard library. Crates are available on crates.io if this is the functionality you need.
}
fn hashmaps() {
use std::collections::HashMap;
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.insert(String::from("Yellow"), 50);
// Note that we need to first use the HashMap from the collections portion of the standard
// library. Of our three common collections, this one is the least often used, so it’s not
// included in the features brought into scope automatically in the prelude.
// The type annotation HashMap<_, _> is needed here because it’s possible to collect into many
// different data structures and Rust doesn’t know which you want unless you specify. For the
// parameters for the key and value types, however, we use underscores, and Rust can infer the
// types that the hash map contains based on the types of the data in the vectors.
let teams = vec![String::from("Blue"), String::from("Yellow")];
let initial_scores = vec![10, 50];
println!("");
let scores: HashMap<_, _> = teams.iter().zip(initial_scores.iter()).collect();
for (k, v) in &scores {
println!("{},{}", k, v);
}
let score = scores.get(&String::from("Blue"));
match score {
Some(s) => println!("{}", s),
None => ()
}
// For types that implement the Copy trait, like i32, the values are copied into the hash map.
// For owned values like String, the values will be moved and the hash map will be the owner of
// those values
let field_name = String::from("Favorite color");
let field_value = String::from("Blue");
let mut map = HashMap::new();
map.insert(field_name, field_value);
//error[E0382]: borrow of moved value: `field_name`
//println!("{}", field_name);
// If we insert references to values into the hash map, the values won’t be moved into the hash
// map. The values that the references point to must be valid for at least as long as the hash
// map is valid.
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.insert(String::from("Yellow"), 50);
// Here, score will have the value that’s associated with the Blue team, and the result will be
// Some(&10). The result is wrapped in Some because get returns an Option<&V>
let team_name = String::from("Blue");
// get borrows key so its passed using &
let score = scores.get(&team_name);
match score {
Some(num) => println!("{}", num),
None => ()
}
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.insert(String::from("Yellow"), 50);
for (key, value) in &scores {
println!("{}: {}", key, value);
}
// Overwriting a Value
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.insert(String::from("Blue"), 25);
println!("{:?}", scores);
// Only Inserting a Value If the Key Has No Value
let mut scores = HashMap::new();
scores.insert(String::from("Blue"), 10);
scores.entry(String::from("Yellow")).or_insert(50);
scores.entry(String::from("Blue")).or_insert(50);
println!("{:?}", scores);
// Updating a Value Based on the Old Value
let text = "hello world wonderful world";
let mut map = HashMap::new();
for word in text.split_whitespace() {
let count = map.entry(word).or_insert(0);
*count += 1;
}
println!("{:?}", map);
// The or_insert method actually returns a mutable reference (&mut V) to the value for this key.
// Here we store that mutable reference in the count variable, so in order to assign to that
// value, we must first dereference count using the asterisk (*). The mutable reference goes out
// of scope at the end of the for loop, so all of these changes are safe and allowed by the
// borrowing rules.
// Hashing Functions
// By default, HashMap uses a “cryptographically strong”1 hashing function that can provide
// resistance to Denial of Service (DoS) attacks. This is not the fastest hashing algorithm
// available, but the trade-off for better security that comes with the drop in performance is
// worth it. If you profile your code and find that the default hash function is too slow for
// your purposes, you can switch to another function by specifying a different hasher. A hasher
// is a type that implements the BuildHasher trait. We’ll talk about traits and how to implement
// them in Chapter 10. You don’t necessarily have to implement your own hasher from scratch;
// crates.io has libraries shared by other Rust users that provide hashers implementing many
// common hashing algorithms.
} | s() {
| identifier_name |
main.rs | use std::fmt;
use std::collections::{VecDeque, HashSet};
use intcode::{Word, util::{parse_stdin_program, GameDisplay}, Program, Registers, ExecutionState};
fn main() {
let mut robot = Robot::new(parse_stdin_program());
let mut gd: GameDisplay<Tile> = GameDisplay::default();
gd.insert(robot.position(), Tile::Empty);
let mut oxygen_at = None;
{
let mut work = VecDeque::new();
work.push_back(*robot.position());
while let Some(root) = work.pop_front() {
// perhaps robot.travel(&root, &gd)?
while root!= *robot.position() {
match path_to(&gd, robot.position(), &root) {
Some(directions) => {
for d in directions {
let prev = *robot.position();
let (moved_to, _) = robot.try_move(d);
assert_ne!(prev, moved_to);
}
},
None => panic!("Cannot get from {:?} to {:?}", robot.position(), root),
}
}
let unexplored = Direction::all()
.map(|d| (d, root.step_in_direction(&d)))
.filter_map(|(d, p)| match gd.get(&p) {
Some(Tile::Unexplored) | None => Some((d, p)),
Some(_) => None,
})
.collect::<Vec<_>>();
for (d, target) in unexplored {
let (ended_up, tile) = robot.try_move(d);
if tile == Tile::Oxygen {
assert!(oxygen_at.is_none());
oxygen_at = Some(ended_up);
}
if target == ended_up {
gd.insert(&target, tile);
// push to the same side as we are popping will decrease the amount of running
// around on the map so maybe depth first?
work.push_front(target);
let (back_at, _) = robot.try_move(d.reverse());
assert_eq!(back_at, root);
} else {
gd.insert(&target, tile);
}
}
}
}
println!("oxygen at: {:?}", oxygen_at);
println!("robot moves: {}", robot.moves);
println!("stage1: {}", path_to(&gd, &( 0, 0), oxygen_at.as_ref().unwrap()).unwrap().len());
{
// stage2 is probably just a dfs from the oxygen, mark the coordinates and... push all new
// marked ones to the queue?
let mut frontier = VecDeque::new();
let mut oxygen = HashSet::new();
oxygen.insert(oxygen_at.unwrap());
frontier.push_back((oxygen_at.unwrap(), 0));
let mut prev_time = 0;
let mut minutes = 0;
while let Some((p1, time)) = frontier.pop_front() {
oxygen.insert(p1);
if prev_time!= time {
assert!(prev_time < time, "{} should be less than {}", prev_time, time);
prev_time = time;
minutes += 1;
println!("{:>3} minutes... {} slots oxygenated", minutes, oxygen.len());
}
let unoxinated = Direction::all()
.map(|d| p1.step_in_direction(&d))
.filter_map(|p| match gd.get(&p) {
Some(Tile::Empty) => Some(p),
Some(_) | None => None,
})
.filter(|p|!oxygen.contains(&p))
.collect::<Vec<_>>();
for p2 in unoxinated {
frontier.push_back((p2, time + 1));
}
}
println!("stage2: {}", minutes);
}
}
/// Wasteful dijkstra... could share the hashmaps across queries maybe?
fn | (gd: &GameDisplay<Tile>, pos: &(Word, Word), target: &(Word, Word)) -> Option<Vec<Direction>> {
//println!("path_to: {:?} to {:?}", pos, target);
use std::collections::{HashMap, BinaryHeap};
use std::collections::hash_map::Entry;
use std::cmp;
let mut ret = Vec::new();
let mut work = BinaryHeap::new();
let mut dist = HashMap::new();
let mut prev = HashMap::new();
work.push(cmp::Reverse((0, *pos)));
while let Some(cmp::Reverse((steps_here, p))) = work.pop() {
//println!("path_to: popped {:?}", (p, steps_here));
if p == *target {
//println!("path_to: found target {:?}", p);
let mut backwards = p;
ret.push(p);
while backwards!= *pos {
let previous = prev.remove(&backwards).unwrap();
ret.push(previous);
backwards = previous;
}
ret.reverse();
let dirs = ret.windows(2)
.map(|slice| {
let a = slice[0];
let b = slice[1];
let d = (b.0 - a.0, b.1 - a.1);
match d {
( 0,-1) => Direction::Down,
( 0, 1) => Direction::Up,
(-1, 0) => Direction::Left,
( 1, 0) => Direction::Right,
x => unreachable!("cannot have this {:?} between {:?} and {:?}", x, a, b),
}
}).collect();
return Some(dirs);
}
match dist.entry(p) {
Entry::Vacant(vcnt) => {
vcnt.insert(steps_here);
},
Entry::Occupied(mut o) => {
if *o.get() >= steps_here {
*o.get_mut() = steps_here;
} else {
println!("already visited {:?} with lower dist {} than {} from {:?}", p, o.get(), steps_here, prev[&p]);
continue;
}
}
}
for (p2, dir) in adjacent(gd, &p) {
let alt = steps_here + 1;
if alt < *dist.get(&p2).unwrap_or(&usize::max_value()) {
//println!(" {:?} --{:?}--> {:?}", p, dir, p2);
dist.insert(p2, alt);
prev.insert(p2, p);
work.push(cmp::Reverse((alt, p2)));
}
}
}
None
}
#[test]
fn test_path_to() {
use Direction::*;
let mut gd: GameDisplay<Tile> = GameDisplay::default();
gd.insert(&(-1, 0), Tile::Wall);
gd.insert(&(-1,-1), Tile::Wall);
gd.insert(&( 0,-1), Tile::Wall);
gd.insert(&( 2, 0), Tile::Wall);
gd.insert(&( 2,-1), Tile::Wall);
gd.insert(&( 0, 0), Tile::Empty); // right
gd.insert(&( 1, 0), Tile::Empty); // down
gd.insert(&( 1, 1), Tile::Empty); // down
gd.insert(&( 1, 2), Tile::Empty); // down
gd.insert(&( 1, 3), Tile::Empty); // down
gd.insert(&( 1, 4), Tile::Empty); // down
gd.insert(&( 2, 4), Tile::Empty); // down
gd.insert(&( 3, 4), Tile::Empty); // down
gd.insert(&( 4, 4), Tile::Empty); // down
gd.insert(&( 4, 3), Tile::Empty); // down
gd.insert(&( 4, 2), Tile::Empty); // down
gd.insert(&( 4, 1), Tile::Empty); // down
gd.insert(&( 1,-1), Tile::Empty); // down
gd.insert(&( 1,-2), Tile::Empty); // down
gd.insert(&( 2,-2), Tile::Empty); // right
gd.insert(&( 3,-2), Tile::Empty); // right
gd.insert(&( 3,-1), Tile::Empty);
gd.insert(&( 3, 0), Tile::Empty);
gd.insert(&( 4, 0), Tile::Empty);
println!("{}", gd);
assert_eq!(vec![Right, Down, Down, Right, Right, Up, Up, Right], path_to(&gd, &( 0, 0), &( 4, 0)).unwrap());
}
fn adjacent<'a>(gd: &'a GameDisplay<Tile>, pos: &'a (Word, Word)) -> impl Iterator<Item = ((Word, Word), Direction)> + 'a {
Direction::all()
.into_iter()
.map(move |d| (pos.step_in_direction(&d), d))
.filter_map(move |(p2, d)| gd.get(&p2).map(|t| (p2, d, t)))
//.inspect(|x| println!(" c: {:?}", x))
.filter_map(|(p2, d, t)| match t {
&Tile::Empty | &Tile::Robot | &Tile::Oxygen => Some((p2, d)),
_ => None,
})
//.inspect(|x| println!(" d: {:?}", x))
}
struct Robot {
program: Program<'static>,
regs: Option<Registers>,
pos: (Word, Word),
moves: usize,
}
impl Robot {
fn new(data: Vec<Word>) -> Self {
let mem = intcode::Memory::from(data).with_memory_expansion();
let program = Program::from(mem);
Robot {
program,
regs: Some(Registers::default()),
pos: (0, 0),
moves: 0,
}
}
fn position(&self) -> &(Word, Word) {
&self.pos
}
fn try_move(&mut self, dir: Direction) -> ((Word, Word), Tile) {
loop {
let mut ret = None;
self.regs = Some(match self.program.eval_from_instruction(self.regs.take().unwrap()).unwrap() {
ExecutionState::HaltedAt(regs) => unreachable!("Halted at: {:?}", regs),
ExecutionState::Paused(regs) => unreachable!("Paused? {:?}", regs),
ExecutionState::InputIO(io) => {
let val: i64 = dir.into();
//println!("robot <-- {}", val);
self.program.handle_input_completion(io, val).unwrap()
},
ExecutionState::OutputIO(io, value) => {
//println!("robot --> {}", value);
let moved = value!= 0;
let found = value == 2;
let tile = if found { Tile::Oxygen } else if moved { Tile::Empty } else { Tile::Wall };
let prev = self.pos;
if moved {
self.pos = self.pos.step_in_direction(&dir);
self.moves += 1;
}
// println!("robot movement from {:?} to {:?} ended up {:?}", prev, dir, self.pos);
ret = Some((self.pos, tile));
self.program.handle_output_completion(io)
},
});
if let Some((pos, tile)) = ret {
return (pos, tile);
}
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum Direction {
Up,
Right,
Down,
Left
}
impl Into<Word> for Direction {
fn into(self) -> Word {
match self {
Direction::Up => 1,
Direction::Right => 3,
Direction::Down => 2,
Direction::Left => 4,
}
}
}
impl Direction {
fn all() -> impl Iterator<Item = Direction> {
use Direction::*;
[Up, Right, Down, Left].into_iter().copied()
}
fn reverse(&self) -> Direction {
use Direction::*;
match *self {
Up => Down,
Right => Left,
Down => Up,
Left => Right,
}
}
}
trait Coordinates {
fn step_in_direction(&self, dir: &Direction) -> Self;
}
impl Coordinates for (Word, Word) {
fn step_in_direction(&self, dir: &Direction) -> Self {
match *dir {
Direction::Up => (self.0, self.1 + 1),
Direction::Right => (self.0 + 1, self.1),
Direction::Down => (self.0, self.1 - 1),
Direction::Left => (self.0 - 1, self.1),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum Tile {
Wall,
Empty,
Oxygen,
Robot,
Unexplored
}
impl Default for Tile {
fn default() -> Self {
Tile::Unexplored
}
}
impl fmt::Display for Tile {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let ch = match *self {
Tile::Wall => '#',
Tile::Empty =>'',
Tile::Oxygen => 'o',
Tile::Robot => 'R',
Tile::Unexplored => '?'
};
write!(fmt, "{}", ch)
}
}
| path_to | identifier_name |
main.rs | use std::fmt;
use std::collections::{VecDeque, HashSet};
use intcode::{Word, util::{parse_stdin_program, GameDisplay}, Program, Registers, ExecutionState};
fn main() {
let mut robot = Robot::new(parse_stdin_program());
let mut gd: GameDisplay<Tile> = GameDisplay::default();
gd.insert(robot.position(), Tile::Empty);
let mut oxygen_at = None;
{
let mut work = VecDeque::new();
work.push_back(*robot.position());
while let Some(root) = work.pop_front() {
// perhaps robot.travel(&root, &gd)?
while root!= *robot.position() {
match path_to(&gd, robot.position(), &root) {
Some(directions) => {
for d in directions {
let prev = *robot.position();
let (moved_to, _) = robot.try_move(d);
assert_ne!(prev, moved_to);
}
},
None => panic!("Cannot get from {:?} to {:?}", robot.position(), root),
}
}
let unexplored = Direction::all()
.map(|d| (d, root.step_in_direction(&d)))
.filter_map(|(d, p)| match gd.get(&p) {
Some(Tile::Unexplored) | None => Some((d, p)),
Some(_) => None,
})
.collect::<Vec<_>>();
for (d, target) in unexplored {
let (ended_up, tile) = robot.try_move(d);
if tile == Tile::Oxygen {
assert!(oxygen_at.is_none());
oxygen_at = Some(ended_up);
}
if target == ended_up {
gd.insert(&target, tile);
// push to the same side as we are popping will decrease the amount of running
// around on the map so maybe depth first?
work.push_front(target);
let (back_at, _) = robot.try_move(d.reverse());
assert_eq!(back_at, root);
} else {
gd.insert(&target, tile);
}
}
}
}
println!("oxygen at: {:?}", oxygen_at);
println!("robot moves: {}", robot.moves);
println!("stage1: {}", path_to(&gd, &( 0, 0), oxygen_at.as_ref().unwrap()).unwrap().len());
{
// stage2 is probably just a dfs from the oxygen, mark the coordinates and... push all new
// marked ones to the queue?
let mut frontier = VecDeque::new();
let mut oxygen = HashSet::new();
oxygen.insert(oxygen_at.unwrap());
frontier.push_back((oxygen_at.unwrap(), 0));
let mut prev_time = 0;
let mut minutes = 0;
while let Some((p1, time)) = frontier.pop_front() {
oxygen.insert(p1);
if prev_time!= time {
assert!(prev_time < time, "{} should be less than {}", prev_time, time);
prev_time = time;
minutes += 1;
println!("{:>3} minutes... {} slots oxygenated", minutes, oxygen.len());
}
let unoxinated = Direction::all()
.map(|d| p1.step_in_direction(&d))
.filter_map(|p| match gd.get(&p) {
Some(Tile::Empty) => Some(p),
Some(_) | None => None,
})
.filter(|p|!oxygen.contains(&p))
.collect::<Vec<_>>();
for p2 in unoxinated {
frontier.push_back((p2, time + 1));
}
}
println!("stage2: {}", minutes);
}
}
/// Wasteful dijkstra... could share the hashmaps across queries maybe?
fn path_to(gd: &GameDisplay<Tile>, pos: &(Word, Word), target: &(Word, Word)) -> Option<Vec<Direction>> {
//println!("path_to: {:?} to {:?}", pos, target);
use std::collections::{HashMap, BinaryHeap};
use std::collections::hash_map::Entry;
use std::cmp;
let mut ret = Vec::new();
let mut work = BinaryHeap::new();
let mut dist = HashMap::new();
let mut prev = HashMap::new();
work.push(cmp::Reverse((0, *pos)));
while let Some(cmp::Reverse((steps_here, p))) = work.pop() {
//println!("path_to: popped {:?}", (p, steps_here));
if p == *target {
//println!("path_to: found target {:?}", p);
let mut backwards = p;
ret.push(p);
while backwards!= *pos {
let previous = prev.remove(&backwards).unwrap();
ret.push(previous);
backwards = previous;
}
ret.reverse();
let dirs = ret.windows(2)
.map(|slice| {
let a = slice[0];
let b = slice[1];
let d = (b.0 - a.0, b.1 - a.1);
match d {
( 0,-1) => Direction::Down,
( 0, 1) => Direction::Up,
(-1, 0) => Direction::Left,
( 1, 0) => Direction::Right,
x => unreachable!("cannot have this {:?} between {:?} and {:?}", x, a, b),
}
}).collect();
return Some(dirs);
}
match dist.entry(p) {
Entry::Vacant(vcnt) => {
vcnt.insert(steps_here);
},
Entry::Occupied(mut o) => {
if *o.get() >= steps_here {
*o.get_mut() = steps_here;
} else {
println!("already visited {:?} with lower dist {} than {} from {:?}", p, o.get(), steps_here, prev[&p]);
continue;
} |
if alt < *dist.get(&p2).unwrap_or(&usize::max_value()) {
//println!(" {:?} --{:?}--> {:?}", p, dir, p2);
dist.insert(p2, alt);
prev.insert(p2, p);
work.push(cmp::Reverse((alt, p2)));
}
}
}
None
}
#[test]
fn test_path_to() {
use Direction::*;
let mut gd: GameDisplay<Tile> = GameDisplay::default();
gd.insert(&(-1, 0), Tile::Wall);
gd.insert(&(-1,-1), Tile::Wall);
gd.insert(&( 0,-1), Tile::Wall);
gd.insert(&( 2, 0), Tile::Wall);
gd.insert(&( 2,-1), Tile::Wall);
gd.insert(&( 0, 0), Tile::Empty); // right
gd.insert(&( 1, 0), Tile::Empty); // down
gd.insert(&( 1, 1), Tile::Empty); // down
gd.insert(&( 1, 2), Tile::Empty); // down
gd.insert(&( 1, 3), Tile::Empty); // down
gd.insert(&( 1, 4), Tile::Empty); // down
gd.insert(&( 2, 4), Tile::Empty); // down
gd.insert(&( 3, 4), Tile::Empty); // down
gd.insert(&( 4, 4), Tile::Empty); // down
gd.insert(&( 4, 3), Tile::Empty); // down
gd.insert(&( 4, 2), Tile::Empty); // down
gd.insert(&( 4, 1), Tile::Empty); // down
gd.insert(&( 1,-1), Tile::Empty); // down
gd.insert(&( 1,-2), Tile::Empty); // down
gd.insert(&( 2,-2), Tile::Empty); // right
gd.insert(&( 3,-2), Tile::Empty); // right
gd.insert(&( 3,-1), Tile::Empty);
gd.insert(&( 3, 0), Tile::Empty);
gd.insert(&( 4, 0), Tile::Empty);
println!("{}", gd);
assert_eq!(vec![Right, Down, Down, Right, Right, Up, Up, Right], path_to(&gd, &( 0, 0), &( 4, 0)).unwrap());
}
fn adjacent<'a>(gd: &'a GameDisplay<Tile>, pos: &'a (Word, Word)) -> impl Iterator<Item = ((Word, Word), Direction)> + 'a {
Direction::all()
.into_iter()
.map(move |d| (pos.step_in_direction(&d), d))
.filter_map(move |(p2, d)| gd.get(&p2).map(|t| (p2, d, t)))
//.inspect(|x| println!(" c: {:?}", x))
.filter_map(|(p2, d, t)| match t {
&Tile::Empty | &Tile::Robot | &Tile::Oxygen => Some((p2, d)),
_ => None,
})
//.inspect(|x| println!(" d: {:?}", x))
}
struct Robot {
program: Program<'static>,
regs: Option<Registers>,
pos: (Word, Word),
moves: usize,
}
impl Robot {
fn new(data: Vec<Word>) -> Self {
let mem = intcode::Memory::from(data).with_memory_expansion();
let program = Program::from(mem);
Robot {
program,
regs: Some(Registers::default()),
pos: (0, 0),
moves: 0,
}
}
fn position(&self) -> &(Word, Word) {
&self.pos
}
fn try_move(&mut self, dir: Direction) -> ((Word, Word), Tile) {
loop {
let mut ret = None;
self.regs = Some(match self.program.eval_from_instruction(self.regs.take().unwrap()).unwrap() {
ExecutionState::HaltedAt(regs) => unreachable!("Halted at: {:?}", regs),
ExecutionState::Paused(regs) => unreachable!("Paused? {:?}", regs),
ExecutionState::InputIO(io) => {
let val: i64 = dir.into();
//println!("robot <-- {}", val);
self.program.handle_input_completion(io, val).unwrap()
},
ExecutionState::OutputIO(io, value) => {
//println!("robot --> {}", value);
let moved = value!= 0;
let found = value == 2;
let tile = if found { Tile::Oxygen } else if moved { Tile::Empty } else { Tile::Wall };
let prev = self.pos;
if moved {
self.pos = self.pos.step_in_direction(&dir);
self.moves += 1;
}
// println!("robot movement from {:?} to {:?} ended up {:?}", prev, dir, self.pos);
ret = Some((self.pos, tile));
self.program.handle_output_completion(io)
},
});
if let Some((pos, tile)) = ret {
return (pos, tile);
}
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum Direction {
Up,
Right,
Down,
Left
}
impl Into<Word> for Direction {
fn into(self) -> Word {
match self {
Direction::Up => 1,
Direction::Right => 3,
Direction::Down => 2,
Direction::Left => 4,
}
}
}
impl Direction {
fn all() -> impl Iterator<Item = Direction> {
use Direction::*;
[Up, Right, Down, Left].into_iter().copied()
}
fn reverse(&self) -> Direction {
use Direction::*;
match *self {
Up => Down,
Right => Left,
Down => Up,
Left => Right,
}
}
}
trait Coordinates {
fn step_in_direction(&self, dir: &Direction) -> Self;
}
impl Coordinates for (Word, Word) {
fn step_in_direction(&self, dir: &Direction) -> Self {
match *dir {
Direction::Up => (self.0, self.1 + 1),
Direction::Right => (self.0 + 1, self.1),
Direction::Down => (self.0, self.1 - 1),
Direction::Left => (self.0 - 1, self.1),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum Tile {
Wall,
Empty,
Oxygen,
Robot,
Unexplored
}
impl Default for Tile {
fn default() -> Self {
Tile::Unexplored
}
}
impl fmt::Display for Tile {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let ch = match *self {
Tile::Wall => '#',
Tile::Empty =>'',
Tile::Oxygen => 'o',
Tile::Robot => 'R',
Tile::Unexplored => '?'
};
write!(fmt, "{}", ch)
}
} | }
}
for (p2, dir) in adjacent(gd, &p) {
let alt = steps_here + 1; | random_line_split |
connection.rs | pub(crate) keyboard_mapper: RefCell<Option<Keyboard>>,
pub(crate) keyboard_window_id: RefCell<Option<usize>>,
pub(crate) surface_to_window_id: RefCell<HashMap<u32, usize>>,
pub(crate) active_surface_id: RefCell<u32>,
/// Repeats per second
pub(crate) key_repeat_rate: RefCell<i32>,
pub(crate) mem_pool: RefCell<AutoMemPool>,
/// Delay before repeating, in milliseconds
pub(crate) key_repeat_delay: RefCell<i32>,
pub(crate) last_serial: RefCell<u32>,
seat_listener: SeatListener,
pub(crate) environment: Environment<MyEnvironment>,
event_q: RefCell<EventQueue>,
pub(crate) display: RefCell<Display>,
}
impl Drop for WaylandConnection {
fn drop(&mut self) {
self.environment
.with_inner(|env| env.input_handler.shutdown());
}
}
impl WaylandConnection {
pub fn create_new() -> anyhow::Result<Self> {
let (environment, display, event_q) = toolkit::new_default_environment!(
MyEnvironment,
desktop,
fields = [
output_handler: OutputHandler::new(),
input_handler: InputHandler::new(),
]
)?;
let mut pointer = None;
let mut seat_keyboards = HashMap::new();
for seat in environment.get_all_seats() {
if let Some((has_kbd, has_ptr, name)) =
toolkit::seat::with_seat_data(&seat, |seat_data| {
(
seat_data.has_keyboard &&!seat_data.defunct,
seat_data.has_pointer &&!seat_data.defunct,
seat_data.name.clone(),
)
})
{
if has_kbd {
let keyboard = seat.get_keyboard();
keyboard.quick_assign(|keyboard, event, _| {
let conn = Connection::get().unwrap().wayland();
if let Err(err) = conn.keyboard_event(keyboard, event) {
log::error!("keyboard_event: {:#}", err);
}
});
environment.with_inner(|env| env.input_handler.advise_seat(&seat, &keyboard));
seat_keyboards.insert(name, keyboard);
}
if has_ptr {
pointer.replace(PointerDispatcher::register(
&seat,
environment.require_global(),
environment.require_global(),
environment.require_global(),
environment.get_primary_selection_manager(),
)?);
}
}
}
let pointer =
pointer.ok_or_else(|| anyhow::anyhow!("no seats have an available pointer"))?;
let seat_listener;
{
let env = environment.clone();
seat_listener = environment.listen_for_seats(move |seat, seat_data, _| {
if seat_data.has_keyboard {
if!seat_data.defunct {
// We only want to assign a new keyboard object if we don't already have
// one for this seat. When a seat is being created or updated, the listener
// can receive the same seat multiple times: for example, when switching
// back from another virtual console, the same seat is usually seen four
// times with different data flags:
//
// has_pointer: true; has_keyboard: false
// has_pointer: false; has_keyboard: false
// has_pointer: false; has_keyboard: true
// has_pointer: true; has_keyboard: true
//
// This is essentially telling the client to re-assign its keyboard and
// pointer, but that means that this listener will fire twice with
// has_keyboard set to true. If we assign a handler both times, then we end
// up handling key events twice.
if!seat_keyboards.contains_key(&seat_data.name) {
let keyboard = seat.get_keyboard();
keyboard.quick_assign(|keyboard, event, _| {
let conn = Connection::get().unwrap().wayland();
if let Err(err) = conn.keyboard_event(keyboard, event) {
log::error!("keyboard_event: {:#}", err);
}
});
env.with_inner(|env| env.input_handler.advise_seat(&seat, &keyboard));
seat_keyboards.insert(seat_data.name.clone(), keyboard);
}
} else {
env.with_inner(|env| env.input_handler.seat_defunct(&seat));
}
} else {
// If we previously had a keyboard object on this seat, it's no longer valid if
// has_keyboard is false, so we remove the keyboard object we knew about and
// thereby ensure that we assign a new keyboard object next time the listener
// fires for this seat with has_keyboard = true.
seat_keyboards.remove(&seat_data.name);
}
if seat_data.has_pointer &&!seat_data.defunct {
let conn = Connection::get().unwrap().wayland();
conn.pointer.borrow_mut().seat_changed(&seat);
}
});
}
let mem_pool = environment.create_auto_pool()?;
Ok(Self {
display: RefCell::new(display),
environment,
should_terminate: RefCell::new(false),
next_window_id: AtomicUsize::new(1),
windows: RefCell::new(HashMap::new()),
event_q: RefCell::new(event_q),
pointer: RefCell::new(pointer),
seat_listener,
mem_pool: RefCell::new(mem_pool),
gl_connection: RefCell::new(None),
keyboard_mapper: RefCell::new(None),
key_repeat_rate: RefCell::new(25),
key_repeat_delay: RefCell::new(400),
keyboard_window_id: RefCell::new(None),
last_serial: RefCell::new(0),
surface_to_window_id: RefCell::new(HashMap::new()),
active_surface_id: RefCell::new(0),
})
}
fn keyboard_event(
&self,
keyboard: Main<WlKeyboard>,
event: WlKeyboardEvent,
) -> anyhow::Result<()> {
match &event {
WlKeyboardEvent::Enter {
serial, surface,..
} => {
// update global active surface id
*self.active_surface_id.borrow_mut() = surface.as_ref().id();
*self.last_serial.borrow_mut() = *serial;
if let Some(&window_id) = self
.surface_to_window_id
.borrow()
.get(&surface.as_ref().id())
{
self.keyboard_window_id.borrow_mut().replace(window_id);
self.environment.with_inner(|env| {
if let Some(input) =
env.input_handler.get_text_input_for_keyboard(&keyboard)
{
input.enable();
input.commit();
}
env.input_handler.advise_surface(&surface, &keyboard);
});
} else {
log::warn!("{:?}, no known surface", event);
}
}
WlKeyboardEvent::Leave { serial,.. } => {
if let Some(input) = self
.environment
.with_inner(|env| env.input_handler.get_text_input_for_keyboard(&keyboard))
{
input.disable();
input.commit();
}
*self.last_serial.borrow_mut() = *serial;
}
WlKeyboardEvent::Key { serial,.. } | WlKeyboardEvent::Modifiers { serial,.. } => {
*self.last_serial.borrow_mut() = *serial;
}
WlKeyboardEvent::RepeatInfo { rate, delay } => {
*self.key_repeat_rate.borrow_mut() = *rate;
*self.key_repeat_delay.borrow_mut() = *delay;
}
WlKeyboardEvent::Keymap { format, fd, size } => | }
_ => {}
}
}
_ => {}
}
if let Some(&window_id) = self.keyboard_window_id.borrow().as_ref() {
if let Some(win) = self.window_by_id(window_id) {
let mut inner = win.borrow_mut();
inner.keyboard_event(event);
}
}
Ok(())
}
pub(crate) fn dispatch_to_focused_window(&self, event: WindowEvent) {
if let Some(&window_id) = self.keyboard_window_id.borrow().as_ref() {
if let Some(win) = self.window_by_id(window_id) {
let mut inner = win.borrow_mut();
inner.events.dispatch(event);
}
}
}
pub(crate) fn next_window_id(&self) -> usize {
self.next_window_id
.fetch_add(1, ::std::sync::atomic::Ordering::Relaxed)
}
fn flush(&self) -> anyhow::Result<()> {
if let Err(e) = self.display.borrow_mut().flush() {
if e.kind()!= ::std::io::ErrorKind::WouldBlock {
bail!("Error while flushing display: {}", e);
}
}
Ok(())
}
pub(crate) fn window_by_id(&self, window_id: usize) -> Option<Rc<RefCell<WaylandWindowInner>>> {
self.windows.borrow().get(&window_id).map(Rc::clone)
}
pub(crate) fn with_window_inner<
R,
F: FnOnce(&mut WaylandWindowInner) -> anyhow::Result<R> + Send +'static,
>(
window: usize,
f: F,
) -> promise::Future<R>
where
R: Send +'static,
{
let mut prom = promise::Promise::new();
let future = prom.get_future().unwrap();
promise::spawn::spawn_into_main_thread(async move {
if let Some(handle) = Connection::get().unwrap().wayland().window_by_id(window) {
let mut inner = handle.borrow_mut();
prom.result(f(&mut inner));
}
})
.detach();
future
}
fn run_message_loop_impl(&self) -> anyhow::Result<()> {
const TOK_WL: usize = 0xffff_fffc;
const TOK_SPAWN: usize = 0xffff_fffd;
let tok_wl = Token(TOK_WL);
let tok_spawn = Token(TOK_SPAWN);
let mut poll = Poll::new()?;
let mut events = Events::with_capacity(8);
poll.registry().register(
&mut SourceFd(&self.display.borrow().get_connection_fd()),
tok_wl,
Interest::READABLE,
)?;
poll.registry().register(
&mut SourceFd(&SPAWN_QUEUE.raw_fd()),
tok_spawn,
Interest::READABLE,
)?;
while!*self.should_terminate.borrow() {
// Check the spawn queue before we try to sleep; there may
// be work pending and we don't guarantee that there is a
// 1:1 wakeup to queued function, so we need to be assertive
// in order to avoid missing wakeups
let timeout = if SPAWN_QUEUE.run() {
// if we processed one, we don't want to sleep because
// there may be others to deal with
Some(std::time::Duration::from_secs(0))
} else {
None
};
{
let mut event_q = self.event_q.borrow_mut();
if let Err(err) = event_q.dispatch_pending(&mut (), |_, _, _| {}) {
return Err(err).with_context(|| {
format!(
"error during event_q.dispatch protocol_error={:?}",
self.display.borrow().protocol_error()
)
});
}
}
self.flush()?;
if let Err(err) = poll.poll(&mut events, timeout) {
if err.kind() == std::io::ErrorKind::Interrupted {
continue;
}
bail!("polling for events: {:?}", err);
}
for event in &events {
if event.token() == tok_wl {
let event_q = self.event_q.borrow();
if let Some(guard) = event_q.prepare_read() {
if let Err(err) = guard.read_events() {
if err.kind()!= std::io::ErrorKind::WouldBlock
&& err.kind()!= std::io::ErrorKind::Interrupted
{
return Err(err).with_context(|| {
format!(
"error during event_q.read_events protocol_error={:?}",
self.display.borrow().protocol_error()
)
});
}
}
}
}
}
}
Ok(())
}
pub(crate) fn advise_of_appearance_change(&self, appearance: crate::Appearance) {
for win in self.windows.borrow().values() {
win.borrow_mut().appearance_changed(appearance);
}
}
}
impl ConnectionOps for WaylandConnection {
fn name(&self) -> String {
"Wayland".to_string()
}
fn terminate_message_loop(&self) {
*self.should_terminate.borrow_mut() = true;
}
fn get_appearance(&self) -> Appearance {
match promise::spawn::block_on(crate::os::xdg_desktop_portal::get_appearance()) {
Ok(Some(appearance)) => return appearance,
| {
let file = unsafe { std::fs::File::from_raw_fd(*fd) };
match format {
KeymapFormat::XkbV1 => {
let mut data = vec![0u8; *size as usize];
file.read_exact_at(&mut data, 0)?;
// Dance around CString panicing on the NUL terminator
// in the xkbcommon crate
while let Some(0) = data.last() {
data.pop();
}
let s = String::from_utf8(data)?;
match Keyboard::new_from_string(s) {
Ok(k) => {
self.keyboard_mapper.replace(Some(k));
}
Err(err) => {
log::error!("Error processing keymap change: {:#}", err);
}
} | conditional_block |
connection.rs | pub(crate) keyboard_mapper: RefCell<Option<Keyboard>>,
pub(crate) keyboard_window_id: RefCell<Option<usize>>,
pub(crate) surface_to_window_id: RefCell<HashMap<u32, usize>>,
pub(crate) active_surface_id: RefCell<u32>,
/// Repeats per second
pub(crate) key_repeat_rate: RefCell<i32>,
pub(crate) mem_pool: RefCell<AutoMemPool>,
/// Delay before repeating, in milliseconds
pub(crate) key_repeat_delay: RefCell<i32>,
pub(crate) last_serial: RefCell<u32>,
seat_listener: SeatListener,
pub(crate) environment: Environment<MyEnvironment>,
event_q: RefCell<EventQueue>,
pub(crate) display: RefCell<Display>,
}
impl Drop for WaylandConnection {
fn drop(&mut self) {
self.environment
.with_inner(|env| env.input_handler.shutdown());
}
}
impl WaylandConnection {
pub fn create_new() -> anyhow::Result<Self> {
let (environment, display, event_q) = toolkit::new_default_environment!(
MyEnvironment,
desktop,
fields = [
output_handler: OutputHandler::new(),
input_handler: InputHandler::new(),
]
)?;
let mut pointer = None;
let mut seat_keyboards = HashMap::new();
for seat in environment.get_all_seats() {
if let Some((has_kbd, has_ptr, name)) =
toolkit::seat::with_seat_data(&seat, |seat_data| {
(
seat_data.has_keyboard &&!seat_data.defunct,
seat_data.has_pointer &&!seat_data.defunct,
seat_data.name.clone(),
)
})
{
if has_kbd {
let keyboard = seat.get_keyboard();
keyboard.quick_assign(|keyboard, event, _| {
let conn = Connection::get().unwrap().wayland();
if let Err(err) = conn.keyboard_event(keyboard, event) {
log::error!("keyboard_event: {:#}", err);
}
});
environment.with_inner(|env| env.input_handler.advise_seat(&seat, &keyboard));
seat_keyboards.insert(name, keyboard);
}
if has_ptr {
pointer.replace(PointerDispatcher::register(
&seat,
environment.require_global(),
environment.require_global(),
environment.require_global(),
environment.get_primary_selection_manager(),
)?);
}
}
}
let pointer =
pointer.ok_or_else(|| anyhow::anyhow!("no seats have an available pointer"))?;
let seat_listener;
{
let env = environment.clone();
seat_listener = environment.listen_for_seats(move |seat, seat_data, _| {
if seat_data.has_keyboard {
if!seat_data.defunct {
// We only want to assign a new keyboard object if we don't already have
// one for this seat. When a seat is being created or updated, the listener
// can receive the same seat multiple times: for example, when switching
// back from another virtual console, the same seat is usually seen four
// times with different data flags:
//
// has_pointer: true; has_keyboard: false
// has_pointer: false; has_keyboard: false
// has_pointer: false; has_keyboard: true
// has_pointer: true; has_keyboard: true
//
// This is essentially telling the client to re-assign its keyboard and
// pointer, but that means that this listener will fire twice with
// has_keyboard set to true. If we assign a handler both times, then we end
// up handling key events twice.
if!seat_keyboards.contains_key(&seat_data.name) {
let keyboard = seat.get_keyboard();
keyboard.quick_assign(|keyboard, event, _| {
let conn = Connection::get().unwrap().wayland();
if let Err(err) = conn.keyboard_event(keyboard, event) {
log::error!("keyboard_event: {:#}", err);
}
});
env.with_inner(|env| env.input_handler.advise_seat(&seat, &keyboard));
seat_keyboards.insert(seat_data.name.clone(), keyboard);
}
} else {
env.with_inner(|env| env.input_handler.seat_defunct(&seat));
}
} else {
// If we previously had a keyboard object on this seat, it's no longer valid if
// has_keyboard is false, so we remove the keyboard object we knew about and
// thereby ensure that we assign a new keyboard object next time the listener
// fires for this seat with has_keyboard = true.
seat_keyboards.remove(&seat_data.name);
}
if seat_data.has_pointer &&!seat_data.defunct {
let conn = Connection::get().unwrap().wayland();
conn.pointer.borrow_mut().seat_changed(&seat);
}
});
}
let mem_pool = environment.create_auto_pool()?;
Ok(Self {
display: RefCell::new(display),
environment,
should_terminate: RefCell::new(false),
next_window_id: AtomicUsize::new(1),
windows: RefCell::new(HashMap::new()),
event_q: RefCell::new(event_q),
pointer: RefCell::new(pointer),
seat_listener,
mem_pool: RefCell::new(mem_pool),
gl_connection: RefCell::new(None),
keyboard_mapper: RefCell::new(None),
key_repeat_rate: RefCell::new(25),
key_repeat_delay: RefCell::new(400),
keyboard_window_id: RefCell::new(None),
last_serial: RefCell::new(0),
surface_to_window_id: RefCell::new(HashMap::new()),
active_surface_id: RefCell::new(0),
})
}
fn | (
&self,
keyboard: Main<WlKeyboard>,
event: WlKeyboardEvent,
) -> anyhow::Result<()> {
match &event {
WlKeyboardEvent::Enter {
serial, surface,..
} => {
// update global active surface id
*self.active_surface_id.borrow_mut() = surface.as_ref().id();
*self.last_serial.borrow_mut() = *serial;
if let Some(&window_id) = self
.surface_to_window_id
.borrow()
.get(&surface.as_ref().id())
{
self.keyboard_window_id.borrow_mut().replace(window_id);
self.environment.with_inner(|env| {
if let Some(input) =
env.input_handler.get_text_input_for_keyboard(&keyboard)
{
input.enable();
input.commit();
}
env.input_handler.advise_surface(&surface, &keyboard);
});
} else {
log::warn!("{:?}, no known surface", event);
}
}
WlKeyboardEvent::Leave { serial,.. } => {
if let Some(input) = self
.environment
.with_inner(|env| env.input_handler.get_text_input_for_keyboard(&keyboard))
{
input.disable();
input.commit();
}
*self.last_serial.borrow_mut() = *serial;
}
WlKeyboardEvent::Key { serial,.. } | WlKeyboardEvent::Modifiers { serial,.. } => {
*self.last_serial.borrow_mut() = *serial;
}
WlKeyboardEvent::RepeatInfo { rate, delay } => {
*self.key_repeat_rate.borrow_mut() = *rate;
*self.key_repeat_delay.borrow_mut() = *delay;
}
WlKeyboardEvent::Keymap { format, fd, size } => {
let file = unsafe { std::fs::File::from_raw_fd(*fd) };
match format {
KeymapFormat::XkbV1 => {
let mut data = vec![0u8; *size as usize];
file.read_exact_at(&mut data, 0)?;
// Dance around CString panicing on the NUL terminator
// in the xkbcommon crate
while let Some(0) = data.last() {
data.pop();
}
let s = String::from_utf8(data)?;
match Keyboard::new_from_string(s) {
Ok(k) => {
self.keyboard_mapper.replace(Some(k));
}
Err(err) => {
log::error!("Error processing keymap change: {:#}", err);
}
}
}
_ => {}
}
}
_ => {}
}
if let Some(&window_id) = self.keyboard_window_id.borrow().as_ref() {
if let Some(win) = self.window_by_id(window_id) {
let mut inner = win.borrow_mut();
inner.keyboard_event(event);
}
}
Ok(())
}
pub(crate) fn dispatch_to_focused_window(&self, event: WindowEvent) {
if let Some(&window_id) = self.keyboard_window_id.borrow().as_ref() {
if let Some(win) = self.window_by_id(window_id) {
let mut inner = win.borrow_mut();
inner.events.dispatch(event);
}
}
}
pub(crate) fn next_window_id(&self) -> usize {
self.next_window_id
.fetch_add(1, ::std::sync::atomic::Ordering::Relaxed)
}
fn flush(&self) -> anyhow::Result<()> {
if let Err(e) = self.display.borrow_mut().flush() {
if e.kind()!= ::std::io::ErrorKind::WouldBlock {
bail!("Error while flushing display: {}", e);
}
}
Ok(())
}
pub(crate) fn window_by_id(&self, window_id: usize) -> Option<Rc<RefCell<WaylandWindowInner>>> {
self.windows.borrow().get(&window_id).map(Rc::clone)
}
pub(crate) fn with_window_inner<
R,
F: FnOnce(&mut WaylandWindowInner) -> anyhow::Result<R> + Send +'static,
>(
window: usize,
f: F,
) -> promise::Future<R>
where
R: Send +'static,
{
let mut prom = promise::Promise::new();
let future = prom.get_future().unwrap();
promise::spawn::spawn_into_main_thread(async move {
if let Some(handle) = Connection::get().unwrap().wayland().window_by_id(window) {
let mut inner = handle.borrow_mut();
prom.result(f(&mut inner));
}
})
.detach();
future
}
fn run_message_loop_impl(&self) -> anyhow::Result<()> {
const TOK_WL: usize = 0xffff_fffc;
const TOK_SPAWN: usize = 0xffff_fffd;
let tok_wl = Token(TOK_WL);
let tok_spawn = Token(TOK_SPAWN);
let mut poll = Poll::new()?;
let mut events = Events::with_capacity(8);
poll.registry().register(
&mut SourceFd(&self.display.borrow().get_connection_fd()),
tok_wl,
Interest::READABLE,
)?;
poll.registry().register(
&mut SourceFd(&SPAWN_QUEUE.raw_fd()),
tok_spawn,
Interest::READABLE,
)?;
while!*self.should_terminate.borrow() {
// Check the spawn queue before we try to sleep; there may
// be work pending and we don't guarantee that there is a
// 1:1 wakeup to queued function, so we need to be assertive
// in order to avoid missing wakeups
let timeout = if SPAWN_QUEUE.run() {
// if we processed one, we don't want to sleep because
// there may be others to deal with
Some(std::time::Duration::from_secs(0))
} else {
None
};
{
let mut event_q = self.event_q.borrow_mut();
if let Err(err) = event_q.dispatch_pending(&mut (), |_, _, _| {}) {
return Err(err).with_context(|| {
format!(
"error during event_q.dispatch protocol_error={:?}",
self.display.borrow().protocol_error()
)
});
}
}
self.flush()?;
if let Err(err) = poll.poll(&mut events, timeout) {
if err.kind() == std::io::ErrorKind::Interrupted {
continue;
}
bail!("polling for events: {:?}", err);
}
for event in &events {
if event.token() == tok_wl {
let event_q = self.event_q.borrow();
if let Some(guard) = event_q.prepare_read() {
if let Err(err) = guard.read_events() {
if err.kind()!= std::io::ErrorKind::WouldBlock
&& err.kind()!= std::io::ErrorKind::Interrupted
{
return Err(err).with_context(|| {
format!(
"error during event_q.read_events protocol_error={:?}",
self.display.borrow().protocol_error()
)
});
}
}
}
}
}
}
Ok(())
}
pub(crate) fn advise_of_appearance_change(&self, appearance: crate::Appearance) {
for win in self.windows.borrow().values() {
win.borrow_mut().appearance_changed(appearance);
}
}
}
impl ConnectionOps for WaylandConnection {
fn name(&self) -> String {
"Wayland".to_string()
}
fn terminate_message_loop(&self) {
*self.should_terminate.borrow_mut() = true;
}
fn get_appearance(&self) -> Appearance {
match promise::spawn::block_on(crate::os::xdg_desktop_portal::get_appearance()) {
Ok(Some(appearance)) => return appearance,
| keyboard_event | identifier_name |
connection.rs |
pub(crate) keyboard_mapper: RefCell<Option<Keyboard>>,
pub(crate) keyboard_window_id: RefCell<Option<usize>>,
pub(crate) surface_to_window_id: RefCell<HashMap<u32, usize>>,
pub(crate) active_surface_id: RefCell<u32>,
/// Repeats per second
pub(crate) key_repeat_rate: RefCell<i32>,
pub(crate) mem_pool: RefCell<AutoMemPool>,
/// Delay before repeating, in milliseconds
pub(crate) key_repeat_delay: RefCell<i32>,
pub(crate) last_serial: RefCell<u32>,
seat_listener: SeatListener,
pub(crate) environment: Environment<MyEnvironment>,
event_q: RefCell<EventQueue>,
pub(crate) display: RefCell<Display>,
}
impl Drop for WaylandConnection {
fn drop(&mut self) {
self.environment
.with_inner(|env| env.input_handler.shutdown());
}
}
impl WaylandConnection {
pub fn create_new() -> anyhow::Result<Self> {
let (environment, display, event_q) = toolkit::new_default_environment!(
MyEnvironment,
desktop,
fields = [
output_handler: OutputHandler::new(),
input_handler: InputHandler::new(),
]
)?;
let mut pointer = None;
let mut seat_keyboards = HashMap::new();
for seat in environment.get_all_seats() {
if let Some((has_kbd, has_ptr, name)) =
toolkit::seat::with_seat_data(&seat, |seat_data| {
(
seat_data.has_keyboard &&!seat_data.defunct,
seat_data.has_pointer &&!seat_data.defunct,
seat_data.name.clone(),
)
})
{
if has_kbd {
let keyboard = seat.get_keyboard();
keyboard.quick_assign(|keyboard, event, _| {
let conn = Connection::get().unwrap().wayland();
if let Err(err) = conn.keyboard_event(keyboard, event) {
log::error!("keyboard_event: {:#}", err);
}
});
environment.with_inner(|env| env.input_handler.advise_seat(&seat, &keyboard));
seat_keyboards.insert(name, keyboard);
}
if has_ptr {
pointer.replace(PointerDispatcher::register(
&seat,
environment.require_global(),
environment.require_global(),
environment.require_global(),
environment.get_primary_selection_manager(),
)?);
}
}
}
let pointer =
pointer.ok_or_else(|| anyhow::anyhow!("no seats have an available pointer"))?;
let seat_listener;
{
let env = environment.clone();
seat_listener = environment.listen_for_seats(move |seat, seat_data, _| {
if seat_data.has_keyboard {
if!seat_data.defunct {
// We only want to assign a new keyboard object if we don't already have
// one for this seat. When a seat is being created or updated, the listener
// can receive the same seat multiple times: for example, when switching
// back from another virtual console, the same seat is usually seen four
// times with different data flags:
//
// has_pointer: true; has_keyboard: false
// has_pointer: false; has_keyboard: false
// has_pointer: false; has_keyboard: true
// has_pointer: true; has_keyboard: true
//
// This is essentially telling the client to re-assign its keyboard and
// pointer, but that means that this listener will fire twice with
// has_keyboard set to true. If we assign a handler both times, then we end
// up handling key events twice.
if!seat_keyboards.contains_key(&seat_data.name) {
let keyboard = seat.get_keyboard();
keyboard.quick_assign(|keyboard, event, _| {
let conn = Connection::get().unwrap().wayland();
if let Err(err) = conn.keyboard_event(keyboard, event) {
log::error!("keyboard_event: {:#}", err);
}
});
env.with_inner(|env| env.input_handler.advise_seat(&seat, &keyboard));
seat_keyboards.insert(seat_data.name.clone(), keyboard);
}
} else {
env.with_inner(|env| env.input_handler.seat_defunct(&seat));
}
} else {
// If we previously had a keyboard object on this seat, it's no longer valid if
// has_keyboard is false, so we remove the keyboard object we knew about and
// thereby ensure that we assign a new keyboard object next time the listener
// fires for this seat with has_keyboard = true.
seat_keyboards.remove(&seat_data.name);
}
if seat_data.has_pointer &&!seat_data.defunct {
let conn = Connection::get().unwrap().wayland();
conn.pointer.borrow_mut().seat_changed(&seat);
}
});
}
let mem_pool = environment.create_auto_pool()?;
Ok(Self {
display: RefCell::new(display),
environment,
should_terminate: RefCell::new(false),
next_window_id: AtomicUsize::new(1), | windows: RefCell::new(HashMap::new()),
event_q: RefCell::new(event_q),
pointer: RefCell::new(pointer),
seat_listener,
mem_pool: RefCell::new(mem_pool),
gl_connection: RefCell::new(None),
keyboard_mapper: RefCell::new(None),
key_repeat_rate: RefCell::new(25),
key_repeat_delay: RefCell::new(400),
keyboard_window_id: RefCell::new(None),
last_serial: RefCell::new(0),
surface_to_window_id: RefCell::new(HashMap::new()),
active_surface_id: RefCell::new(0),
})
}
fn keyboard_event(
&self,
keyboard: Main<WlKeyboard>,
event: WlKeyboardEvent,
) -> anyhow::Result<()> {
match &event {
WlKeyboardEvent::Enter {
serial, surface,..
} => {
// update global active surface id
*self.active_surface_id.borrow_mut() = surface.as_ref().id();
*self.last_serial.borrow_mut() = *serial;
if let Some(&window_id) = self
.surface_to_window_id
.borrow()
.get(&surface.as_ref().id())
{
self.keyboard_window_id.borrow_mut().replace(window_id);
self.environment.with_inner(|env| {
if let Some(input) =
env.input_handler.get_text_input_for_keyboard(&keyboard)
{
input.enable();
input.commit();
}
env.input_handler.advise_surface(&surface, &keyboard);
});
} else {
log::warn!("{:?}, no known surface", event);
}
}
WlKeyboardEvent::Leave { serial,.. } => {
if let Some(input) = self
.environment
.with_inner(|env| env.input_handler.get_text_input_for_keyboard(&keyboard))
{
input.disable();
input.commit();
}
*self.last_serial.borrow_mut() = *serial;
}
WlKeyboardEvent::Key { serial,.. } | WlKeyboardEvent::Modifiers { serial,.. } => {
*self.last_serial.borrow_mut() = *serial;
}
WlKeyboardEvent::RepeatInfo { rate, delay } => {
*self.key_repeat_rate.borrow_mut() = *rate;
*self.key_repeat_delay.borrow_mut() = *delay;
}
WlKeyboardEvent::Keymap { format, fd, size } => {
let file = unsafe { std::fs::File::from_raw_fd(*fd) };
match format {
KeymapFormat::XkbV1 => {
let mut data = vec![0u8; *size as usize];
file.read_exact_at(&mut data, 0)?;
// Dance around CString panicing on the NUL terminator
// in the xkbcommon crate
while let Some(0) = data.last() {
data.pop();
}
let s = String::from_utf8(data)?;
match Keyboard::new_from_string(s) {
Ok(k) => {
self.keyboard_mapper.replace(Some(k));
}
Err(err) => {
log::error!("Error processing keymap change: {:#}", err);
}
}
}
_ => {}
}
}
_ => {}
}
if let Some(&window_id) = self.keyboard_window_id.borrow().as_ref() {
if let Some(win) = self.window_by_id(window_id) {
let mut inner = win.borrow_mut();
inner.keyboard_event(event);
}
}
Ok(())
}
pub(crate) fn dispatch_to_focused_window(&self, event: WindowEvent) {
if let Some(&window_id) = self.keyboard_window_id.borrow().as_ref() {
if let Some(win) = self.window_by_id(window_id) {
let mut inner = win.borrow_mut();
inner.events.dispatch(event);
}
}
}
pub(crate) fn next_window_id(&self) -> usize {
self.next_window_id
.fetch_add(1, ::std::sync::atomic::Ordering::Relaxed)
}
fn flush(&self) -> anyhow::Result<()> {
if let Err(e) = self.display.borrow_mut().flush() {
if e.kind()!= ::std::io::ErrorKind::WouldBlock {
bail!("Error while flushing display: {}", e);
}
}
Ok(())
}
pub(crate) fn window_by_id(&self, window_id: usize) -> Option<Rc<RefCell<WaylandWindowInner>>> {
self.windows.borrow().get(&window_id).map(Rc::clone)
}
pub(crate) fn with_window_inner<
R,
F: FnOnce(&mut WaylandWindowInner) -> anyhow::Result<R> + Send +'static,
>(
window: usize,
f: F,
) -> promise::Future<R>
where
R: Send +'static,
{
let mut prom = promise::Promise::new();
let future = prom.get_future().unwrap();
promise::spawn::spawn_into_main_thread(async move {
if let Some(handle) = Connection::get().unwrap().wayland().window_by_id(window) {
let mut inner = handle.borrow_mut();
prom.result(f(&mut inner));
}
})
.detach();
future
}
fn run_message_loop_impl(&self) -> anyhow::Result<()> {
const TOK_WL: usize = 0xffff_fffc;
const TOK_SPAWN: usize = 0xffff_fffd;
let tok_wl = Token(TOK_WL);
let tok_spawn = Token(TOK_SPAWN);
let mut poll = Poll::new()?;
let mut events = Events::with_capacity(8);
poll.registry().register(
&mut SourceFd(&self.display.borrow().get_connection_fd()),
tok_wl,
Interest::READABLE,
)?;
poll.registry().register(
&mut SourceFd(&SPAWN_QUEUE.raw_fd()),
tok_spawn,
Interest::READABLE,
)?;
while!*self.should_terminate.borrow() {
// Check the spawn queue before we try to sleep; there may
// be work pending and we don't guarantee that there is a
// 1:1 wakeup to queued function, so we need to be assertive
// in order to avoid missing wakeups
let timeout = if SPAWN_QUEUE.run() {
// if we processed one, we don't want to sleep because
// there may be others to deal with
Some(std::time::Duration::from_secs(0))
} else {
None
};
{
let mut event_q = self.event_q.borrow_mut();
if let Err(err) = event_q.dispatch_pending(&mut (), |_, _, _| {}) {
return Err(err).with_context(|| {
format!(
"error during event_q.dispatch protocol_error={:?}",
self.display.borrow().protocol_error()
)
});
}
}
self.flush()?;
if let Err(err) = poll.poll(&mut events, timeout) {
if err.kind() == std::io::ErrorKind::Interrupted {
continue;
}
bail!("polling for events: {:?}", err);
}
for event in &events {
if event.token() == tok_wl {
let event_q = self.event_q.borrow();
if let Some(guard) = event_q.prepare_read() {
if let Err(err) = guard.read_events() {
if err.kind()!= std::io::ErrorKind::WouldBlock
&& err.kind()!= std::io::ErrorKind::Interrupted
{
return Err(err).with_context(|| {
format!(
"error during event_q.read_events protocol_error={:?}",
self.display.borrow().protocol_error()
)
});
}
}
}
}
}
}
Ok(())
}
pub(crate) fn advise_of_appearance_change(&self, appearance: crate::Appearance) {
for win in self.windows.borrow().values() {
win.borrow_mut().appearance_changed(appearance);
}
}
}
impl ConnectionOps for WaylandConnection {
fn name(&self) -> String {
"Wayland".to_string()
}
fn terminate_message_loop(&self) {
*self.should_terminate.borrow_mut() = true;
}
fn get_appearance(&self) -> Appearance {
match promise::spawn::block_on(crate::os::xdg_desktop_portal::get_appearance()) {
Ok(Some(appearance)) => return appearance,
Ok | random_line_split |
|
connection.rs | pub(crate) keyboard_mapper: RefCell<Option<Keyboard>>,
pub(crate) keyboard_window_id: RefCell<Option<usize>>,
pub(crate) surface_to_window_id: RefCell<HashMap<u32, usize>>,
pub(crate) active_surface_id: RefCell<u32>,
/// Repeats per second
pub(crate) key_repeat_rate: RefCell<i32>,
pub(crate) mem_pool: RefCell<AutoMemPool>,
/// Delay before repeating, in milliseconds
pub(crate) key_repeat_delay: RefCell<i32>,
pub(crate) last_serial: RefCell<u32>,
seat_listener: SeatListener,
pub(crate) environment: Environment<MyEnvironment>,
event_q: RefCell<EventQueue>,
pub(crate) display: RefCell<Display>,
}
impl Drop for WaylandConnection {
fn drop(&mut self) {
self.environment
.with_inner(|env| env.input_handler.shutdown());
}
}
impl WaylandConnection {
pub fn create_new() -> anyhow::Result<Self> {
let (environment, display, event_q) = toolkit::new_default_environment!(
MyEnvironment,
desktop,
fields = [
output_handler: OutputHandler::new(),
input_handler: InputHandler::new(),
]
)?;
let mut pointer = None;
let mut seat_keyboards = HashMap::new();
for seat in environment.get_all_seats() {
if let Some((has_kbd, has_ptr, name)) =
toolkit::seat::with_seat_data(&seat, |seat_data| {
(
seat_data.has_keyboard &&!seat_data.defunct,
seat_data.has_pointer &&!seat_data.defunct,
seat_data.name.clone(),
)
})
{
if has_kbd {
let keyboard = seat.get_keyboard();
keyboard.quick_assign(|keyboard, event, _| {
let conn = Connection::get().unwrap().wayland();
if let Err(err) = conn.keyboard_event(keyboard, event) {
log::error!("keyboard_event: {:#}", err);
}
});
environment.with_inner(|env| env.input_handler.advise_seat(&seat, &keyboard));
seat_keyboards.insert(name, keyboard);
}
if has_ptr {
pointer.replace(PointerDispatcher::register(
&seat,
environment.require_global(),
environment.require_global(),
environment.require_global(),
environment.get_primary_selection_manager(),
)?);
}
}
}
let pointer =
pointer.ok_or_else(|| anyhow::anyhow!("no seats have an available pointer"))?;
let seat_listener;
{
let env = environment.clone();
seat_listener = environment.listen_for_seats(move |seat, seat_data, _| {
if seat_data.has_keyboard {
if!seat_data.defunct {
// We only want to assign a new keyboard object if we don't already have
// one for this seat. When a seat is being created or updated, the listener
// can receive the same seat multiple times: for example, when switching
// back from another virtual console, the same seat is usually seen four
// times with different data flags:
//
// has_pointer: true; has_keyboard: false
// has_pointer: false; has_keyboard: false
// has_pointer: false; has_keyboard: true
// has_pointer: true; has_keyboard: true
//
// This is essentially telling the client to re-assign its keyboard and
// pointer, but that means that this listener will fire twice with
// has_keyboard set to true. If we assign a handler both times, then we end
// up handling key events twice.
if!seat_keyboards.contains_key(&seat_data.name) {
let keyboard = seat.get_keyboard();
keyboard.quick_assign(|keyboard, event, _| {
let conn = Connection::get().unwrap().wayland();
if let Err(err) = conn.keyboard_event(keyboard, event) {
log::error!("keyboard_event: {:#}", err);
}
});
env.with_inner(|env| env.input_handler.advise_seat(&seat, &keyboard));
seat_keyboards.insert(seat_data.name.clone(), keyboard);
}
} else {
env.with_inner(|env| env.input_handler.seat_defunct(&seat));
}
} else {
// If we previously had a keyboard object on this seat, it's no longer valid if
// has_keyboard is false, so we remove the keyboard object we knew about and
// thereby ensure that we assign a new keyboard object next time the listener
// fires for this seat with has_keyboard = true.
seat_keyboards.remove(&seat_data.name);
}
if seat_data.has_pointer &&!seat_data.defunct {
let conn = Connection::get().unwrap().wayland();
conn.pointer.borrow_mut().seat_changed(&seat);
}
});
}
let mem_pool = environment.create_auto_pool()?;
Ok(Self {
display: RefCell::new(display),
environment,
should_terminate: RefCell::new(false),
next_window_id: AtomicUsize::new(1),
windows: RefCell::new(HashMap::new()),
event_q: RefCell::new(event_q),
pointer: RefCell::new(pointer),
seat_listener,
mem_pool: RefCell::new(mem_pool),
gl_connection: RefCell::new(None),
keyboard_mapper: RefCell::new(None),
key_repeat_rate: RefCell::new(25),
key_repeat_delay: RefCell::new(400),
keyboard_window_id: RefCell::new(None),
last_serial: RefCell::new(0),
surface_to_window_id: RefCell::new(HashMap::new()),
active_surface_id: RefCell::new(0),
})
}
fn keyboard_event(
&self,
keyboard: Main<WlKeyboard>,
event: WlKeyboardEvent,
) -> anyhow::Result<()> {
match &event {
WlKeyboardEvent::Enter {
serial, surface,..
} => {
// update global active surface id
*self.active_surface_id.borrow_mut() = surface.as_ref().id();
*self.last_serial.borrow_mut() = *serial;
if let Some(&window_id) = self
.surface_to_window_id
.borrow()
.get(&surface.as_ref().id())
{
self.keyboard_window_id.borrow_mut().replace(window_id);
self.environment.with_inner(|env| {
if let Some(input) =
env.input_handler.get_text_input_for_keyboard(&keyboard)
{
input.enable();
input.commit();
}
env.input_handler.advise_surface(&surface, &keyboard);
});
} else {
log::warn!("{:?}, no known surface", event);
}
}
WlKeyboardEvent::Leave { serial,.. } => {
if let Some(input) = self
.environment
.with_inner(|env| env.input_handler.get_text_input_for_keyboard(&keyboard))
{
input.disable();
input.commit();
}
*self.last_serial.borrow_mut() = *serial;
}
WlKeyboardEvent::Key { serial,.. } | WlKeyboardEvent::Modifiers { serial,.. } => {
*self.last_serial.borrow_mut() = *serial;
}
WlKeyboardEvent::RepeatInfo { rate, delay } => {
*self.key_repeat_rate.borrow_mut() = *rate;
*self.key_repeat_delay.borrow_mut() = *delay;
}
WlKeyboardEvent::Keymap { format, fd, size } => {
let file = unsafe { std::fs::File::from_raw_fd(*fd) };
match format {
KeymapFormat::XkbV1 => {
let mut data = vec![0u8; *size as usize];
file.read_exact_at(&mut data, 0)?;
// Dance around CString panicing on the NUL terminator
// in the xkbcommon crate
while let Some(0) = data.last() {
data.pop();
}
let s = String::from_utf8(data)?;
match Keyboard::new_from_string(s) {
Ok(k) => {
self.keyboard_mapper.replace(Some(k));
}
Err(err) => {
log::error!("Error processing keymap change: {:#}", err);
}
}
}
_ => {}
}
}
_ => {}
}
if let Some(&window_id) = self.keyboard_window_id.borrow().as_ref() {
if let Some(win) = self.window_by_id(window_id) {
let mut inner = win.borrow_mut();
inner.keyboard_event(event);
}
}
Ok(())
}
pub(crate) fn dispatch_to_focused_window(&self, event: WindowEvent) |
pub(crate) fn next_window_id(&self) -> usize {
self.next_window_id
.fetch_add(1, ::std::sync::atomic::Ordering::Relaxed)
}
fn flush(&self) -> anyhow::Result<()> {
if let Err(e) = self.display.borrow_mut().flush() {
if e.kind()!= ::std::io::ErrorKind::WouldBlock {
bail!("Error while flushing display: {}", e);
}
}
Ok(())
}
pub(crate) fn window_by_id(&self, window_id: usize) -> Option<Rc<RefCell<WaylandWindowInner>>> {
self.windows.borrow().get(&window_id).map(Rc::clone)
}
pub(crate) fn with_window_inner<
R,
F: FnOnce(&mut WaylandWindowInner) -> anyhow::Result<R> + Send +'static,
>(
window: usize,
f: F,
) -> promise::Future<R>
where
R: Send +'static,
{
let mut prom = promise::Promise::new();
let future = prom.get_future().unwrap();
promise::spawn::spawn_into_main_thread(async move {
if let Some(handle) = Connection::get().unwrap().wayland().window_by_id(window) {
let mut inner = handle.borrow_mut();
prom.result(f(&mut inner));
}
})
.detach();
future
}
fn run_message_loop_impl(&self) -> anyhow::Result<()> {
const TOK_WL: usize = 0xffff_fffc;
const TOK_SPAWN: usize = 0xffff_fffd;
let tok_wl = Token(TOK_WL);
let tok_spawn = Token(TOK_SPAWN);
let mut poll = Poll::new()?;
let mut events = Events::with_capacity(8);
poll.registry().register(
&mut SourceFd(&self.display.borrow().get_connection_fd()),
tok_wl,
Interest::READABLE,
)?;
poll.registry().register(
&mut SourceFd(&SPAWN_QUEUE.raw_fd()),
tok_spawn,
Interest::READABLE,
)?;
while!*self.should_terminate.borrow() {
// Check the spawn queue before we try to sleep; there may
// be work pending and we don't guarantee that there is a
// 1:1 wakeup to queued function, so we need to be assertive
// in order to avoid missing wakeups
let timeout = if SPAWN_QUEUE.run() {
// if we processed one, we don't want to sleep because
// there may be others to deal with
Some(std::time::Duration::from_secs(0))
} else {
None
};
{
let mut event_q = self.event_q.borrow_mut();
if let Err(err) = event_q.dispatch_pending(&mut (), |_, _, _| {}) {
return Err(err).with_context(|| {
format!(
"error during event_q.dispatch protocol_error={:?}",
self.display.borrow().protocol_error()
)
});
}
}
self.flush()?;
if let Err(err) = poll.poll(&mut events, timeout) {
if err.kind() == std::io::ErrorKind::Interrupted {
continue;
}
bail!("polling for events: {:?}", err);
}
for event in &events {
if event.token() == tok_wl {
let event_q = self.event_q.borrow();
if let Some(guard) = event_q.prepare_read() {
if let Err(err) = guard.read_events() {
if err.kind()!= std::io::ErrorKind::WouldBlock
&& err.kind()!= std::io::ErrorKind::Interrupted
{
return Err(err).with_context(|| {
format!(
"error during event_q.read_events protocol_error={:?}",
self.display.borrow().protocol_error()
)
});
}
}
}
}
}
}
Ok(())
}
pub(crate) fn advise_of_appearance_change(&self, appearance: crate::Appearance) {
for win in self.windows.borrow().values() {
win.borrow_mut().appearance_changed(appearance);
}
}
}
impl ConnectionOps for WaylandConnection {
fn name(&self) -> String {
"Wayland".to_string()
}
fn terminate_message_loop(&self) {
*self.should_terminate.borrow_mut() = true;
}
fn get_appearance(&self) -> Appearance {
match promise::spawn::block_on(crate::os::xdg_desktop_portal::get_appearance()) {
Ok(Some(appearance)) => return appearance,
| {
if let Some(&window_id) = self.keyboard_window_id.borrow().as_ref() {
if let Some(win) = self.window_by_id(window_id) {
let mut inner = win.borrow_mut();
inner.events.dispatch(event);
}
}
} | identifier_body |
substitution.rs | use std::{cell::RefCell, default::Default, fmt};
use union_find::{QuickFindUf, Union, UnionByRank, UnionFind, UnionResult};
use crate::base::{
fixed::{FixedVec, FixedVecMap},
kind::ArcKind,
symbol::Symbol,
types::{self, ArcType, Flags, FlagsVisitor, Skolem, Type, TypeContext, Walker},
};
use crate::typ::RcType;
#[derive(Debug, PartialEq, Functor)]
pub enum Error<T> {
Occurs(T, T),
}
impl<T> fmt::Display for Error<T>
where
T: fmt::Display,
T: for<'a> types::ToDoc<'a, ::pretty::Arena<'a, ()>, (), ()>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match *self {
Occurs(ref var, ref typ) => write!(f, "Variable `{}` occurs in `{}`.", var, typ),
}
}
}
pub struct Substitution<T>
where
T: Substitutable,
{
/// Union-find data structure used to store the relationships of all variables in the
/// substitution
union: RefCell<QuickFindUf<UnionByLevel>>,
/// Vector containing all created variables for this substitution. Needed for the `real` method
/// which needs to always be able to return a `&T` reference
variables: FixedVec<T>,
/// For variables which have been infered to have a real type (not a variable) their types are
/// stored here. As the type stored will never changed we use a `FixedVecMap` lets `real` return
/// `&T` from this map safely.
types: FixedVecMap<T>,
factory: T::Factory,
interner: T::Interner,
variable_cache: RefCell<Vec<T>>,
}
impl<T> TypeContext<Symbol, T> for Substitution<T>
where
T: Substitutable + From<Type<Symbol, T>>,
for<'a> &'a T::Interner: TypeContext<Symbol, T>,
{
gluon_base::forward_type_interner_methods!(Symbol, T, self_, &self_.interner);
}
impl<'a, T> TypeContext<Symbol, T> for &'a Substitution<T>
where
T: Substitutable + From<Type<Symbol, T>>,
&'a T::Interner: TypeContext<Symbol, T>,
{
gluon_base::forward_type_interner_methods!(Symbol, T, self_, &self_.interner);
}
impl<'a> types::Substitution<Symbol, RcType> for &'a Substitution<RcType> {
fn new_var(&mut self) -> RcType {
Substitution::new_var(*self)
}
fn new_skolem(&mut self, name: Symbol, kind: ArcKind) -> RcType {
Substitution::new_skolem(*self, name, kind)
}
}
impl<T> Default for Substitution<T>
where
T: Substitutable,
T::Factory: Default,
T::Interner: Default,
{
fn default() -> Substitution<T> {
Substitution::new(Default::default(), Default::default())
}
}
/// Trait which variables need to implement to allow the substitution to get to the u32 identifying
/// the variable
pub trait Variable {
fn get_id(&self) -> u32;
}
impl Variable for u32 {
fn get_id(&self) -> u32 {
*self
}
}
pub trait VariableFactory {
type Variable: Variable;
fn new(&self, x: u32) -> Self::Variable;
}
impl VariableFactory for () {
type Variable = u32;
fn new(&self, x: u32) -> Self::Variable {
x
}
}
/// Trait implemented on types which may contain substitutable variables
pub trait Substitutable: Sized {
type Variable: Variable;
type Factory: VariableFactory<Variable = Self::Variable>;
type Interner: Default;
/// Constructs a new object from its variable type
fn from_variable(subs: &Substitution<Self>, x: Self::Variable) -> Self;
fn into_variable(&mut self, x: Self::Variable);
fn is_unique(self_: &Self) -> bool;
/// Retrieves the variable if `self` is a variable otherwise returns `None`
fn get_var(&self) -> Option<&Self::Variable>;
fn get_id(&self) -> Option<u32> {
self.get_var().map(|var| var.get_id())
}
fn traverse<'a, F>(&'a self, f: &mut F)
where
F: Walker<'a, Self>;
fn instantiate(&self, subs: &Substitution<Self>) -> Self;
// Allowed return true even if the type does not contain variables but not false if it does
// contain
fn contains_variables(&self) -> bool {
true
}
fn on_union(&self) -> Option<&Self> {
None
}
}
pub fn occurs<T>(typ: &T, subs: &Substitution<T>, var: u32) -> bool
where
T: Substitutable,
{
struct Occurs<'a, T: Substitutable + 'a> {
occurs: bool,
var: u32,
subs: &'a Substitution<T>,
}
impl<'a, 't, T> Walker<'t, T> for Occurs<'a, T>
where
T: Substitutable,
{
fn walk(&mut self, typ: &'t T) {
if!typ.contains_variables() || self.occurs {
return;
}
let typ = self.subs.real(typ);
if let Some(other) = typ.get_var() {
if self.var.get_id() == other.get_id() {
self.occurs = true;
typ.traverse(self);
return;
}
self.subs.update_level(self.var, other.get_id());
}
typ.traverse(self);
}
}
let mut occurs = Occurs {
occurs: false,
var,
subs,
};
occurs.walk(typ);
occurs.occurs
}
/// Specialized union implementation which makes sure that variables with a higher level always
/// point to the lower level variable.
///
/// map.union(1, 2);
/// map.find(2) -> 1
/// map.find(1) -> 1
#[derive(Debug)]
struct UnionByLevel {
rank: UnionByRank,
level: u32,
}
impl Default for UnionByLevel {
fn default() -> UnionByLevel {
UnionByLevel {
rank: UnionByRank::default(),
level: ::std::u32::MAX,
}
}
}
impl Union for UnionByLevel {
#[inline]
fn union(left: UnionByLevel, right: UnionByLevel) -> UnionResult<UnionByLevel> {
use std::cmp::Ordering;
let (rank_result, rank) = match Union::union(left.rank, right.rank) {
UnionResult::Left(l) => (
UnionResult::Left(UnionByLevel {
rank: l,
level: left.level,
}),
l,
),
UnionResult::Right(r) => (
UnionResult::Right(UnionByLevel {
rank: r,
level: left.level,
}),
r,
),
};
match left.level.cmp(&right.level) {
Ordering::Less => UnionResult::Left(UnionByLevel {
rank: rank,
level: left.level,
}),
Ordering::Greater => UnionResult::Right(UnionByLevel {
rank: rank,
level: right.level,
}),
Ordering::Equal => rank_result,
}
}
}
impl<T> fmt::Debug for Substitution<T>
where
T: fmt::Debug + Substitutable,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Substitution {{ map: {:?}, var_id: {:?} }}",
self.union.borrow(),
self.var_id()
)
}
}
impl<T> Substitution<T>
where
T: Substitutable,
{
pub fn new(factory: T::Factory, interner: T::Interner) -> Substitution<T> {
Substitution {
union: RefCell::new(QuickFindUf::new(0)),
variables: FixedVec::new(),
types: FixedVecMap::new(),
factory: factory,
interner,
variable_cache: Default::default(),
}
}
pub fn var_id(&self) -> u32 {
self.variables.len() as u32
}
pub fn insert(&self, var: u32, t: T) {
match t.get_var() {
Some(_) => ice!(
"Tried to insert variable which is not allowed as that would cause memory \
unsafety"
),
None => match self.types.try_insert(var as usize, t.into()) {
Ok(()) => (),
Err(_) => ice!("Expected variable to not have a type associated with it"),
},
}
}
pub fn replace(&mut self, var: u32, t: T) {
debug_assert!(t.get_id()!= Some(var));
self.types.insert(var as usize, t.into());
}
pub fn reset(&mut self, var: u32) {
self.types.remove(var as usize);
}
/// Assumes that no variables unified with anything (but variables < level may exist)
pub fn clear_from(&mut self, level: u32) {
self.union = RefCell::new(QuickFindUf::new(0));
let mut u = self.union.borrow_mut();
for _ in 0..level {
u.insert(UnionByLevel {
..UnionByLevel::default()
});
}
let mut variable_cache = self.variable_cache.borrow_mut();
// Since no types should be unified with anything we can remove all of this and reuse the
// unique values
variable_cache.extend(self.types.drain().filter(T::is_unique));
while self.variables.len() > level as usize {
variable_cache.push(self.variables.pop().unwrap());
}
}
/// Creates a new variable
pub fn new_var(&self) -> T
where
T: Clone,
{
self.new_var_fn(|var| match self.variable_cache.borrow_mut().pop() {
Some(mut typ) => {
T::into_variable(&mut typ, self.factory.new(var));
typ
}
None => T::from_variable(self, self.factory.new(var)),
})
}
pub fn new_var_fn<F>(&self, f: F) -> T
where
T: Clone,
F: FnOnce(u32) -> T,
{
let var_id = self.variables.len() as u32;
let id = self.union.borrow_mut().insert(UnionByLevel {
level: var_id,
..UnionByLevel::default()
});
assert!(id == self.variables.len());
debug!("New var {}", self.variables.len());
let var = f(var_id);
self.variables.push(var.clone().into());
var
}
/// If `typ` is a variable this returns the real unified value of that variable. Otherwise it
/// just returns the type itself. Note that the returned type may contain terms which also need
/// to have `real` called on them.
pub fn real<'r>(&'r self, typ: &'r T) -> &'r T {
match typ.get_id() {
Some(id) => match self.find_type_for_var(id) {
Some(t) => t,
None => typ,
},
_ => typ,
}
}
pub fn | (&self, var: u32) -> Option<&T> {
self.variables.get(var as usize)
}
pub fn find_type_for_var(&self, var: u32) -> Option<&T> {
let mut union = self.union.borrow_mut();
if var as usize >= union.size() {
return None;
}
let index = union.find(var as usize);
self.types.get(index).or_else(|| {
if var == index as u32 {
None
} else {
Some(&self.variables[index as usize])
}
})
}
/// Updates the level of `other` to be the minimum level value of `var` and `other`
pub fn update_level(&self, var: u32, other: u32) {
let level = ::std::cmp::min(self.get_level(var), self.get_level(other));
let mut union = self.union.borrow_mut();
union.get_mut(other as usize).level = level;
}
pub fn set_level(&self, var: u32, level: u32) {
let mut union = self.union.borrow_mut();
union.get_mut(var as usize).level = level;
}
pub fn get_level(&self, mut var: u32) -> u32 {
if let Some(v) = self.find_type_for_var(var) {
var = v.get_var().map_or(var, |v| v.get_id());
}
let mut union = self.union.borrow_mut();
let level = &mut union.get_mut(var as usize).level;
*level = ::std::cmp::min(*level, var);
*level
}
pub fn replace_variable(&self, typ: &T) -> Option<T>
where
T: Clone,
{
match typ.get_id() {
Some(id) => self.find_type_for_var(id).cloned(),
None => None,
}
}
}
pub fn is_variable_unified(subs: &Substitution<RcType>, var: &RcType) -> bool {
match **var {
Type::Variable(ref var) => subs.find_type_for_var(var.id).is_some(),
_ => false,
}
}
impl<T: Substitutable + Clone> Substitution<T> {
pub fn make_real(&self, typ: &mut T) {
*typ = self.real(typ).clone();
}
}
impl<T: Substitutable + PartialEq + Clone> Substitution<T> {
/// Takes `id` and updates the substitution to say that it should have the same type as `typ`
pub fn union(&self, variable: &T, typ: &T) -> Result<Option<T>, Error<T>>
where
T::Variable: Clone,
T: fmt::Display,
{
assert!(variable.get_id().is_some(), "Expected a variable");
let id = variable.get_id().unwrap();
let resolved_type = typ.on_union();
let typ = resolved_type.unwrap_or(typ);
// Nothing needs to be done if both are the same variable already (also prevents the occurs
// check from failing)
if typ.get_var().map_or(false, |other| other.get_id() == id) {
return Ok(None);
}
if occurs(typ, self, id) {
return Err(Error::Occurs(variable.clone(), typ.clone()));
}
{
let id_type = self.find_type_for_var(id);
let other_type = self.real(typ);
if id_type.map_or(false, |x| x == other_type)
|| other_type.get_var().map(|y| y.get_id()) == Some(id)
{
return Ok(None);
}
}
{
let typ = resolved_type.unwrap_or(typ);
match typ.get_var().map(|v| v.get_id()) {
Some(other_id) if variable.get_var().is_some() => {
self.union
.borrow_mut()
.union(id as usize, other_id as usize);
self.update_level(id.get_id(), other_id);
self.update_level(other_id, id.get_id());
}
_ => {
if let Some(other_id) = typ.get_id() {
self.update_level(id.get_id(), other_id);
}
self.insert(id.get_id(), typ.clone());
}
}
}
Ok(resolved_type.cloned())
}
}
impl Substitution<RcType> {
pub fn new_skolem(&self, name: Symbol, kind: ArcKind) -> RcType {
self.new_var_fn(|id| {
let skolem = Skolem { name, id, kind };
match self.variable_cache.borrow_mut().pop() {
Some(mut typ) => {
RcType::set(&mut typ, Type::Skolem(skolem));
typ
}
None => (&*self).skolem(skolem),
}
})
}
pub fn zonk(&self, typ: &RcType) -> RcType {
types::walk_move_type(
typ.clone(),
&mut FlagsVisitor(Flags::HAS_VARIABLES, |typ: &RcType| match typ.get_id() {
Some(id) => match self.find_type_for_var(id) {
Some(t) => Some(self.zonk(t)),
None => None,
},
None => None,
}),
)
}
// Stub kept in case multiple types are attempted again
pub fn bind_arc(&self, typ: &RcType) -> ArcType {
typ.clone()
}
}
| get_var | identifier_name |
substitution.rs | use std::{cell::RefCell, default::Default, fmt};
use union_find::{QuickFindUf, Union, UnionByRank, UnionFind, UnionResult};
use crate::base::{
fixed::{FixedVec, FixedVecMap},
kind::ArcKind,
symbol::Symbol,
types::{self, ArcType, Flags, FlagsVisitor, Skolem, Type, TypeContext, Walker},
};
use crate::typ::RcType;
#[derive(Debug, PartialEq, Functor)]
pub enum Error<T> {
Occurs(T, T),
}
impl<T> fmt::Display for Error<T>
where
T: fmt::Display,
T: for<'a> types::ToDoc<'a, ::pretty::Arena<'a, ()>, (), ()>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match *self {
Occurs(ref var, ref typ) => write!(f, "Variable `{}` occurs in `{}`.", var, typ),
}
}
}
pub struct Substitution<T>
where
T: Substitutable,
{
/// Union-find data structure used to store the relationships of all variables in the
/// substitution
union: RefCell<QuickFindUf<UnionByLevel>>,
/// Vector containing all created variables for this substitution. Needed for the `real` method
/// which needs to always be able to return a `&T` reference
variables: FixedVec<T>,
/// For variables which have been infered to have a real type (not a variable) their types are
/// stored here. As the type stored will never changed we use a `FixedVecMap` lets `real` return
/// `&T` from this map safely.
types: FixedVecMap<T>,
factory: T::Factory,
interner: T::Interner,
variable_cache: RefCell<Vec<T>>,
}
impl<T> TypeContext<Symbol, T> for Substitution<T>
where
T: Substitutable + From<Type<Symbol, T>>,
for<'a> &'a T::Interner: TypeContext<Symbol, T>,
{
gluon_base::forward_type_interner_methods!(Symbol, T, self_, &self_.interner);
}
impl<'a, T> TypeContext<Symbol, T> for &'a Substitution<T>
where
T: Substitutable + From<Type<Symbol, T>>,
&'a T::Interner: TypeContext<Symbol, T>,
{
gluon_base::forward_type_interner_methods!(Symbol, T, self_, &self_.interner);
}
impl<'a> types::Substitution<Symbol, RcType> for &'a Substitution<RcType> {
fn new_var(&mut self) -> RcType {
Substitution::new_var(*self)
}
fn new_skolem(&mut self, name: Symbol, kind: ArcKind) -> RcType {
Substitution::new_skolem(*self, name, kind)
}
}
impl<T> Default for Substitution<T>
where
T: Substitutable,
T::Factory: Default,
T::Interner: Default,
{
fn default() -> Substitution<T> {
Substitution::new(Default::default(), Default::default())
}
}
/// Trait which variables need to implement to allow the substitution to get to the u32 identifying
/// the variable
pub trait Variable {
fn get_id(&self) -> u32;
}
impl Variable for u32 {
fn get_id(&self) -> u32 {
*self
}
}
pub trait VariableFactory {
type Variable: Variable;
fn new(&self, x: u32) -> Self::Variable;
}
impl VariableFactory for () {
type Variable = u32;
fn new(&self, x: u32) -> Self::Variable {
x
}
}
/// Trait implemented on types which may contain substitutable variables
pub trait Substitutable: Sized {
type Variable: Variable;
type Factory: VariableFactory<Variable = Self::Variable>;
type Interner: Default;
/// Constructs a new object from its variable type
fn from_variable(subs: &Substitution<Self>, x: Self::Variable) -> Self;
fn into_variable(&mut self, x: Self::Variable);
fn is_unique(self_: &Self) -> bool;
/// Retrieves the variable if `self` is a variable otherwise returns `None`
fn get_var(&self) -> Option<&Self::Variable>;
fn get_id(&self) -> Option<u32> {
self.get_var().map(|var| var.get_id())
}
fn traverse<'a, F>(&'a self, f: &mut F)
where
F: Walker<'a, Self>;
fn instantiate(&self, subs: &Substitution<Self>) -> Self;
// Allowed return true even if the type does not contain variables but not false if it does
// contain
fn contains_variables(&self) -> bool {
true
}
fn on_union(&self) -> Option<&Self> {
None
}
}
pub fn occurs<T>(typ: &T, subs: &Substitution<T>, var: u32) -> bool
where
T: Substitutable,
{
struct Occurs<'a, T: Substitutable + 'a> {
occurs: bool,
var: u32,
subs: &'a Substitution<T>,
}
impl<'a, 't, T> Walker<'t, T> for Occurs<'a, T>
where
T: Substitutable,
{
fn walk(&mut self, typ: &'t T) {
if!typ.contains_variables() || self.occurs {
return;
}
let typ = self.subs.real(typ);
if let Some(other) = typ.get_var() {
if self.var.get_id() == other.get_id() {
self.occurs = true;
typ.traverse(self);
return;
}
self.subs.update_level(self.var, other.get_id());
}
typ.traverse(self);
}
}
let mut occurs = Occurs {
occurs: false,
var,
subs,
};
occurs.walk(typ);
occurs.occurs
}
/// Specialized union implementation which makes sure that variables with a higher level always
/// point to the lower level variable.
///
/// map.union(1, 2);
/// map.find(2) -> 1
/// map.find(1) -> 1
#[derive(Debug)]
struct UnionByLevel {
rank: UnionByRank,
level: u32,
}
impl Default for UnionByLevel {
fn default() -> UnionByLevel {
UnionByLevel {
rank: UnionByRank::default(),
level: ::std::u32::MAX,
}
}
}
impl Union for UnionByLevel {
#[inline]
fn union(left: UnionByLevel, right: UnionByLevel) -> UnionResult<UnionByLevel> {
use std::cmp::Ordering;
let (rank_result, rank) = match Union::union(left.rank, right.rank) {
UnionResult::Left(l) => (
UnionResult::Left(UnionByLevel {
rank: l,
level: left.level,
}),
l,
),
UnionResult::Right(r) => (
UnionResult::Right(UnionByLevel {
rank: r,
level: left.level,
}),
r,
),
};
match left.level.cmp(&right.level) {
Ordering::Less => UnionResult::Left(UnionByLevel {
rank: rank,
level: left.level,
}),
Ordering::Greater => UnionResult::Right(UnionByLevel {
rank: rank,
level: right.level,
}),
Ordering::Equal => rank_result,
}
}
}
impl<T> fmt::Debug for Substitution<T>
where
T: fmt::Debug + Substitutable,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Substitution {{ map: {:?}, var_id: {:?} }}",
self.union.borrow(),
self.var_id()
)
}
}
impl<T> Substitution<T>
where
T: Substitutable,
{
pub fn new(factory: T::Factory, interner: T::Interner) -> Substitution<T> {
Substitution {
union: RefCell::new(QuickFindUf::new(0)),
variables: FixedVec::new(),
types: FixedVecMap::new(),
factory: factory,
interner,
variable_cache: Default::default(),
}
}
pub fn var_id(&self) -> u32 {
self.variables.len() as u32
}
pub fn insert(&self, var: u32, t: T) {
match t.get_var() {
Some(_) => ice!(
"Tried to insert variable which is not allowed as that would cause memory \
unsafety"
),
None => match self.types.try_insert(var as usize, t.into()) {
Ok(()) => (),
Err(_) => ice!("Expected variable to not have a type associated with it"),
},
}
}
pub fn replace(&mut self, var: u32, t: T) {
debug_assert!(t.get_id()!= Some(var));
self.types.insert(var as usize, t.into());
}
pub fn reset(&mut self, var: u32) {
self.types.remove(var as usize);
}
/// Assumes that no variables unified with anything (but variables < level may exist)
pub fn clear_from(&mut self, level: u32) {
self.union = RefCell::new(QuickFindUf::new(0));
let mut u = self.union.borrow_mut();
for _ in 0..level {
u.insert(UnionByLevel {
..UnionByLevel::default()
});
}
let mut variable_cache = self.variable_cache.borrow_mut();
// Since no types should be unified with anything we can remove all of this and reuse the
// unique values
variable_cache.extend(self.types.drain().filter(T::is_unique));
while self.variables.len() > level as usize {
variable_cache.push(self.variables.pop().unwrap());
}
}
/// Creates a new variable
pub fn new_var(&self) -> T
where
T: Clone,
{
self.new_var_fn(|var| match self.variable_cache.borrow_mut().pop() {
Some(mut typ) => {
T::into_variable(&mut typ, self.factory.new(var));
typ
}
None => T::from_variable(self, self.factory.new(var)),
})
}
pub fn new_var_fn<F>(&self, f: F) -> T
where
T: Clone,
F: FnOnce(u32) -> T,
{
let var_id = self.variables.len() as u32;
let id = self.union.borrow_mut().insert(UnionByLevel {
level: var_id,
..UnionByLevel::default()
});
assert!(id == self.variables.len());
debug!("New var {}", self.variables.len());
let var = f(var_id);
self.variables.push(var.clone().into());
var
}
/// If `typ` is a variable this returns the real unified value of that variable. Otherwise it
/// just returns the type itself. Note that the returned type may contain terms which also need
/// to have `real` called on them.
pub fn real<'r>(&'r self, typ: &'r T) -> &'r T {
match typ.get_id() {
Some(id) => match self.find_type_for_var(id) {
Some(t) => t,
None => typ,
},
_ => typ,
}
}
pub fn get_var(&self, var: u32) -> Option<&T> {
self.variables.get(var as usize)
}
pub fn find_type_for_var(&self, var: u32) -> Option<&T> {
let mut union = self.union.borrow_mut();
if var as usize >= union.size() {
return None;
}
let index = union.find(var as usize);
self.types.get(index).or_else(|| {
if var == index as u32 {
None
} else {
Some(&self.variables[index as usize])
}
})
} | let mut union = self.union.borrow_mut();
union.get_mut(other as usize).level = level;
}
pub fn set_level(&self, var: u32, level: u32) {
let mut union = self.union.borrow_mut();
union.get_mut(var as usize).level = level;
}
pub fn get_level(&self, mut var: u32) -> u32 {
if let Some(v) = self.find_type_for_var(var) {
var = v.get_var().map_or(var, |v| v.get_id());
}
let mut union = self.union.borrow_mut();
let level = &mut union.get_mut(var as usize).level;
*level = ::std::cmp::min(*level, var);
*level
}
pub fn replace_variable(&self, typ: &T) -> Option<T>
where
T: Clone,
{
match typ.get_id() {
Some(id) => self.find_type_for_var(id).cloned(),
None => None,
}
}
}
pub fn is_variable_unified(subs: &Substitution<RcType>, var: &RcType) -> bool {
match **var {
Type::Variable(ref var) => subs.find_type_for_var(var.id).is_some(),
_ => false,
}
}
impl<T: Substitutable + Clone> Substitution<T> {
pub fn make_real(&self, typ: &mut T) {
*typ = self.real(typ).clone();
}
}
impl<T: Substitutable + PartialEq + Clone> Substitution<T> {
/// Takes `id` and updates the substitution to say that it should have the same type as `typ`
pub fn union(&self, variable: &T, typ: &T) -> Result<Option<T>, Error<T>>
where
T::Variable: Clone,
T: fmt::Display,
{
assert!(variable.get_id().is_some(), "Expected a variable");
let id = variable.get_id().unwrap();
let resolved_type = typ.on_union();
let typ = resolved_type.unwrap_or(typ);
// Nothing needs to be done if both are the same variable already (also prevents the occurs
// check from failing)
if typ.get_var().map_or(false, |other| other.get_id() == id) {
return Ok(None);
}
if occurs(typ, self, id) {
return Err(Error::Occurs(variable.clone(), typ.clone()));
}
{
let id_type = self.find_type_for_var(id);
let other_type = self.real(typ);
if id_type.map_or(false, |x| x == other_type)
|| other_type.get_var().map(|y| y.get_id()) == Some(id)
{
return Ok(None);
}
}
{
let typ = resolved_type.unwrap_or(typ);
match typ.get_var().map(|v| v.get_id()) {
Some(other_id) if variable.get_var().is_some() => {
self.union
.borrow_mut()
.union(id as usize, other_id as usize);
self.update_level(id.get_id(), other_id);
self.update_level(other_id, id.get_id());
}
_ => {
if let Some(other_id) = typ.get_id() {
self.update_level(id.get_id(), other_id);
}
self.insert(id.get_id(), typ.clone());
}
}
}
Ok(resolved_type.cloned())
}
}
impl Substitution<RcType> {
pub fn new_skolem(&self, name: Symbol, kind: ArcKind) -> RcType {
self.new_var_fn(|id| {
let skolem = Skolem { name, id, kind };
match self.variable_cache.borrow_mut().pop() {
Some(mut typ) => {
RcType::set(&mut typ, Type::Skolem(skolem));
typ
}
None => (&*self).skolem(skolem),
}
})
}
pub fn zonk(&self, typ: &RcType) -> RcType {
types::walk_move_type(
typ.clone(),
&mut FlagsVisitor(Flags::HAS_VARIABLES, |typ: &RcType| match typ.get_id() {
Some(id) => match self.find_type_for_var(id) {
Some(t) => Some(self.zonk(t)),
None => None,
},
None => None,
}),
)
}
// Stub kept in case multiple types are attempted again
pub fn bind_arc(&self, typ: &RcType) -> ArcType {
typ.clone()
}
} |
/// Updates the level of `other` to be the minimum level value of `var` and `other`
pub fn update_level(&self, var: u32, other: u32) {
let level = ::std::cmp::min(self.get_level(var), self.get_level(other)); | random_line_split |
day_14.rs | //! This is my solution for [Advent of Code - Day 14](https://adventofcode.com/2020/day/14) -
//! _Docking Data_
//!
//! This was themed around bitwise operations. The challenge was mostly parsing the puzzle
//! description into the bitwise operations needed. This was the first time I needed an Either
//! implementation rather than just using an enum as I needed to be able to store the current Mask
//! in a variable that is explicitly a Mask rather than an Instruction that could be either a Mask
//! or a Mem.
use std::fs;
use regex::Regex;
use im::{HashMap, HashSet};
use either::Either;
use either::Either::*;
/// The entry point for running the solutions with the'real' puzzle input.
///
/// - The puzzle input is expected to be at `<project_root>/res/day-14-input`
/// - It is expected this will be called by [`super::main()`] when the user elects to run day 14.
pub fn run() {
let contents = fs::read_to_string("res/day-14-input").expect("Failed to read file");
let memory = run_program_v1(contents.as_str());
let sum = sum_memory(memory);
println!("The sum of memory values after running the program v1 is: {}", sum);
let memory = run_program_v2(contents.as_str());
let sum = sum_memory(memory);
println!("The sum of memory values after running the program v2 is: {}", sum);
}
/// Representing an input line that overwrites the current bitmask, see [`parse_line`].
#[derive(Debug, Eq, PartialEq)]
struct Mask { mask: usize, data: usize }
/// Represents an input line that updates the current memory values, see [`parse_line`].
#[derive(Debug, Eq, PartialEq)]
struct Mem { address: usize, value: usize }
/// Parse a line from the puzzle input into structured data
///
/// A line will be of one of the two following formats:
/// * `mask = 000000000000000000000000000000X1001X`
/// * `mem[8] = 11`
///
/// ## Masks
/// For both parts of the puzzle the mask has two uses, where the character is a `0 `or `1` it
/// should be treated a raw data that will in someway override other input, and `X` will be used as
/// the mask. It is easier to store this as two bitmaps, one for the data and one for the mask, as
/// these are used separately.
///
/// ## Memory Updates
/// Whilst the two parts use the mask to modify where/what actually gets written `mem[8] = 11`
/// should be interpreted as address = 8, value = 11.
///
/// # Examples from Tests
/// ```
/// assert_eq!(
/// Left(Mask {
/// mask: 0b111111111111111111111111111111111111,
/// data: 0b000000000000000000000000000000000000,
/// }),
/// parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
/// );
/// assert_eq!(
/// Left(Mask {
/// mask: 0b111111111111111111111111111110111101,
/// data: 0b000000000000000000000000000001000000,
/// }),
/// parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X")
/// );
///
/// assert_eq!(
/// Right(Mem { address: 8, value: 11 }),
/// parse_line("mem[8] = 11")
/// );
/// assert_eq!(
/// Right(Mem { address: 7, value: 101 }),
/// parse_line("mem[7] = 101")
/// );
/// assert_eq!(
/// Right(Mem { address: 8, value: 0 }),
/// parse_line("mem[8] = 0")
/// );
/// ```
fn parse_line(line: &str) -> Either<Mask, Mem> {
let mut parts = line.split(" = ");
let inst = parts.next().expect("Invalid line");
let value = parts.next().expect("Invalid line");
if inst == "mask" {
let (mask, data) =
value.chars().fold(
(0usize, 0usize),
|(mask, data), char| (
mask << 1 | if char == 'X' { 1 } else { 0 },
data << 1 | if char == '1' { 1 } else { 0 }
),
);
Left(Mask { mask, data })
} else {
let re = Regex::new(r"^mem\[(\d+)]$").unwrap();
match re.captures(inst) {
Some(cap) => Right(Mem {
address: cap.get(1).unwrap().as_str().parse::<usize>().unwrap(),
value: value.parse::<usize>().unwrap(),
}),
None => panic!("Invalid line")
}
}
}
/// Takes the string input and returns the memory state after that has been interpreted using the
/// part 1 protocol
///
/// > The current bitmask is applied to values immediately before they are written to memory: a 0 or
/// > 1 overwrites the corresponding bit in the value, while an X leaves the bit in the value
/// > unchanged.
///
/// # Example from Tests
/// ```
/// let mut expected: HashMap<usize, usize> = HashMap::new();
///
/// let program_1 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\nmem[8] = 11";
///
/// expected.insert(8, 73);
/// assert_eq!(expected, run_program_v1(program_1));
///
/// let program_2 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
/// mem[8] = 11
/// mem[7] = 101
/// mem[8] = 0";
///
/// expected.insert(7, 101);
/// expected.insert(8, 64);
/// let memory = run_program_v1(program_2);
///
/// assert_eq!(expected, memory);
///
/// assert_eq!(165usize, sum_memory(memory));
/// ```
fn run_program_v1(program: &str) -> HashMap<usize, usize> {
let mut memory = HashMap::new();
let mut current_mask = Mask { mask: 0, data: 0 };
for line in program.lines() {
match parse_line(line) {
Left(Mask { mask, data }) => current_mask = Mask { mask, data },
Right(Mem { address, value }) => {
memory.insert(
address,
value & current_mask.mask | current_mask.data,
);
}
}
}
return memory;
}
/// Takes the string input and returns the memory state after that has been interpreted using the
/// part 2 protocol.
///
/// > Immediately before a value is written to memory, each bit in the bitmask modifies the
/// > corresponding bit of the destination memory address in the following way:
/// > - If the bitmask bit is 0, the corresponding memory address bit is unchanged.
/// > - If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1.
/// > - If the bitmask bit is X, the corresponding memory address bit is floating.
/// >
/// > A floating bit is not connected to anything and instead fluctuates unpredictably. In practice,
/// > this means the floating bits will take on all possible values, potentially causing many memory
/// > addresses to be written all at once!
///
/// The set of addresses a mask will write to is given by [`explode_addresses`]
///
/// # Example from Tests
/// ```
/// let program = "mask = 000000000000000000000000000000X1001X
/// mem[42] = 100
/// mask = 00000000000000000000000000000000X0XX
/// mem[26] = 1";
///
/// let memory = run_program_v2(program);
/// assert_eq!(208usize, sum_memory(memory));
/// ```
fn run_program_v2(program: &str) -> HashMap<usize, usize> {
let mut memory = HashMap::new();
let mut current_mask = Mask { mask: 0, data: 0 };
for line in program.lines() {
match parse_line(line) {
Left(Mask { mask, data }) => current_mask = Mask { mask, data },
Right(Mem { address, value }) =>
for address in explode_addresses(¤t_mask, address) {
memory.insert(address, value);
},
}
}
return memory;
}
/// Because floating bits can take on any value, this returns all the addresses that a given mask
/// applied to the input address refers to.
///
/// 1. The base address is the address where all the `X` values in the mask are `0`. Additionally
/// bits where the mask data is 1 all should be 1 for all addresses in the final output i.e.
/// `(input | mask.data) &!mask.mask`
/// 2. Iterate through the bits, and where the mask is `X` add an additional address to each of the
/// existing combinations for the address where that bit is `1` rather than `0`, so the set
/// doubles in size each time we encounter an `X`. With some boiler plate as the existing set
/// can't be appended to as it's being iterated.
///
/// # Examples from Tests
/// ```
/// let expected: HashSet<usize> = vec!(26usize, 27usize, 58usize, 59usize).into_iter().collect();
/// assert_eq!(
/// expected,
/// explode_addresses(
/// &Mask {
/// mask: 0b000000000000000000000000000000100001,
/// data: 0b000000000000000000000000000000010010,
/// },
/// 42,
/// )
/// );
///
/// let expected: HashSet<usize> =
/// vec!(16usize, 17usize, 18usize, 19usize, 24usize, 25usize, 26usize, 27usize)
/// .into_iter().collect();
/// assert_eq!(
/// expected,
/// explode_addresses(
/// &parse_line("mask = 00000000000000000000000000000000X0XX")
/// .expect_left("Failed to parse as mask"),
/// 26,
/// )
/// );
/// ```
fn explode_addresses(mask: &Mask, input: usize) -> HashSet<usize> {
let mut addresses = HashSet::new();
addresses.insert((input | mask.data) &!mask.mask);
for i in 0..36 {
if (1 << i) & mask.mask!= 0 { | for &address in addresses.iter() {
new_addresses.insert(address | (1 << i));
}
for &new_address in new_addresses.iter() {
addresses.insert(new_address);
};
}
}
addresses
}
/// Sum a memory snapshot
///
/// Both puzzle parts finally sum all the memory registers into a single number as the expected
/// answer. Extracted into a function to avoid repetition.
fn sum_memory(memory: HashMap<usize, usize>) -> usize {
memory.iter().map(|(_, v)| *v).sum()
}
#[cfg(test)]
mod tests {
use day_14::{parse_line, Mask, Mem, run_program_v1, sum_memory, explode_addresses, run_program_v2};
use either::Either::*;
use im::{HashMap, HashSet};
//noinspection SpellCheckingInspection
#[test]
fn can_parse() {
assert_eq!(
Left(Mask {
mask: 0b111111111111111111111111111111111111,
data: 0b000000000000000000000000000000000000,
}),
parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
);
assert_eq!(
Left(Mask {
mask: 0b111111111111111111111111111110111101,
data: 0b000000000000000000000000000001000000,
}),
parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X")
);
assert_eq!(
Right(Mem { address: 8, value: 11 }),
parse_line("mem[8] = 11")
);
assert_eq!(
Right(Mem { address: 7, value: 101 }),
parse_line("mem[7] = 101")
);
assert_eq!(
Right(Mem { address: 8, value: 0 }),
parse_line("mem[8] = 0")
);
}
//noinspection SpellCheckingInspection
#[test]
fn can_run_program_v1() {
let mut expected: HashMap<usize, usize> = HashMap::new();
let program_1 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\nmem[8] = 11";
expected.insert(8, 73);
assert_eq!(expected, run_program_v1(program_1));
let program_2 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
mem[8] = 11
mem[7] = 101
mem[8] = 0";
expected.insert(7, 101);
expected.insert(8, 64);
let memory = run_program_v1(program_2);
assert_eq!(expected, memory);
assert_eq!(165usize, sum_memory(memory));
}
#[test]
fn can_explode_addresses() {
let expected: HashSet<usize> = vec!(26usize, 27usize, 58usize, 59usize).into_iter().collect();
assert_eq!(
expected,
explode_addresses(
&Mask {
mask: 0b000000000000000000000000000000100001,
data: 0b000000000000000000000000000000010010,
},
42,
)
);
let expected: HashSet<usize> =
vec!(16usize, 17usize, 18usize, 19usize, 24usize, 25usize, 26usize, 27usize)
.into_iter().collect();
assert_eq!(
expected,
explode_addresses(
&parse_line("mask = 00000000000000000000000000000000X0XX")
.expect_left("Failed to parse as mask"),
26,
)
);
}
#[test]
fn can_run_program_v2() {
let program = "mask = 000000000000000000000000000000X1001X
mem[42] = 100
mask = 00000000000000000000000000000000X0XX
mem[26] = 1";
let memory = run_program_v2(program);
assert_eq!(208usize, sum_memory(memory));
}
} | let mut new_addresses = HashSet::new();
| random_line_split |
day_14.rs | //! This is my solution for [Advent of Code - Day 14](https://adventofcode.com/2020/day/14) -
//! _Docking Data_
//!
//! This was themed around bitwise operations. The challenge was mostly parsing the puzzle
//! description into the bitwise operations needed. This was the first time I needed an Either
//! implementation rather than just using an enum as I needed to be able to store the current Mask
//! in a variable that is explicitly a Mask rather than an Instruction that could be either a Mask
//! or a Mem.
use std::fs;
use regex::Regex;
use im::{HashMap, HashSet};
use either::Either;
use either::Either::*;
/// The entry point for running the solutions with the'real' puzzle input.
///
/// - The puzzle input is expected to be at `<project_root>/res/day-14-input`
/// - It is expected this will be called by [`super::main()`] when the user elects to run day 14.
pub fn run() {
let contents = fs::read_to_string("res/day-14-input").expect("Failed to read file");
let memory = run_program_v1(contents.as_str());
let sum = sum_memory(memory);
println!("The sum of memory values after running the program v1 is: {}", sum);
let memory = run_program_v2(contents.as_str());
let sum = sum_memory(memory);
println!("The sum of memory values after running the program v2 is: {}", sum);
}
/// Representing an input line that overwrites the current bitmask, see [`parse_line`].
#[derive(Debug, Eq, PartialEq)]
struct Mask { mask: usize, data: usize }
/// Represents an input line that updates the current memory values, see [`parse_line`].
#[derive(Debug, Eq, PartialEq)]
struct Mem { address: usize, value: usize }
/// Parse a line from the puzzle input into structured data
///
/// A line will be of one of the two following formats:
/// * `mask = 000000000000000000000000000000X1001X`
/// * `mem[8] = 11`
///
/// ## Masks
/// For both parts of the puzzle the mask has two uses, where the character is a `0 `or `1` it
/// should be treated a raw data that will in someway override other input, and `X` will be used as
/// the mask. It is easier to store this as two bitmaps, one for the data and one for the mask, as
/// these are used separately.
///
/// ## Memory Updates
/// Whilst the two parts use the mask to modify where/what actually gets written `mem[8] = 11`
/// should be interpreted as address = 8, value = 11.
///
/// # Examples from Tests
/// ```
/// assert_eq!(
/// Left(Mask {
/// mask: 0b111111111111111111111111111111111111,
/// data: 0b000000000000000000000000000000000000,
/// }),
/// parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
/// );
/// assert_eq!(
/// Left(Mask {
/// mask: 0b111111111111111111111111111110111101,
/// data: 0b000000000000000000000000000001000000,
/// }),
/// parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X")
/// );
///
/// assert_eq!(
/// Right(Mem { address: 8, value: 11 }),
/// parse_line("mem[8] = 11")
/// );
/// assert_eq!(
/// Right(Mem { address: 7, value: 101 }),
/// parse_line("mem[7] = 101")
/// );
/// assert_eq!(
/// Right(Mem { address: 8, value: 0 }),
/// parse_line("mem[8] = 0")
/// );
/// ```
fn parse_line(line: &str) -> Either<Mask, Mem> {
let mut parts = line.split(" = ");
let inst = parts.next().expect("Invalid line");
let value = parts.next().expect("Invalid line");
if inst == "mask" {
let (mask, data) =
value.chars().fold(
(0usize, 0usize),
|(mask, data), char| (
mask << 1 | if char == 'X' { 1 } else { 0 },
data << 1 | if char == '1' { 1 } else { 0 }
),
);
Left(Mask { mask, data })
} else {
let re = Regex::new(r"^mem\[(\d+)]$").unwrap();
match re.captures(inst) {
Some(cap) => Right(Mem {
address: cap.get(1).unwrap().as_str().parse::<usize>().unwrap(),
value: value.parse::<usize>().unwrap(),
}),
None => panic!("Invalid line")
}
}
}
/// Takes the string input and returns the memory state after that has been interpreted using the
/// part 1 protocol
///
/// > The current bitmask is applied to values immediately before they are written to memory: a 0 or
/// > 1 overwrites the corresponding bit in the value, while an X leaves the bit in the value
/// > unchanged.
///
/// # Example from Tests
/// ```
/// let mut expected: HashMap<usize, usize> = HashMap::new();
///
/// let program_1 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\nmem[8] = 11";
///
/// expected.insert(8, 73);
/// assert_eq!(expected, run_program_v1(program_1));
///
/// let program_2 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
/// mem[8] = 11
/// mem[7] = 101
/// mem[8] = 0";
///
/// expected.insert(7, 101);
/// expected.insert(8, 64);
/// let memory = run_program_v1(program_2);
///
/// assert_eq!(expected, memory);
///
/// assert_eq!(165usize, sum_memory(memory));
/// ```
fn run_program_v1(program: &str) -> HashMap<usize, usize> {
let mut memory = HashMap::new();
let mut current_mask = Mask { mask: 0, data: 0 };
for line in program.lines() {
match parse_line(line) {
Left(Mask { mask, data }) => current_mask = Mask { mask, data },
Right(Mem { address, value }) => {
memory.insert(
address,
value & current_mask.mask | current_mask.data,
);
}
}
}
return memory;
}
/// Takes the string input and returns the memory state after that has been interpreted using the
/// part 2 protocol.
///
/// > Immediately before a value is written to memory, each bit in the bitmask modifies the
/// > corresponding bit of the destination memory address in the following way:
/// > - If the bitmask bit is 0, the corresponding memory address bit is unchanged.
/// > - If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1.
/// > - If the bitmask bit is X, the corresponding memory address bit is floating.
/// >
/// > A floating bit is not connected to anything and instead fluctuates unpredictably. In practice,
/// > this means the floating bits will take on all possible values, potentially causing many memory
/// > addresses to be written all at once!
///
/// The set of addresses a mask will write to is given by [`explode_addresses`]
///
/// # Example from Tests
/// ```
/// let program = "mask = 000000000000000000000000000000X1001X
/// mem[42] = 100
/// mask = 00000000000000000000000000000000X0XX
/// mem[26] = 1";
///
/// let memory = run_program_v2(program);
/// assert_eq!(208usize, sum_memory(memory));
/// ```
fn run_program_v2(program: &str) -> HashMap<usize, usize> {
let mut memory = HashMap::new();
let mut current_mask = Mask { mask: 0, data: 0 };
for line in program.lines() {
match parse_line(line) {
Left(Mask { mask, data }) => current_mask = Mask { mask, data },
Right(Mem { address, value }) =>
for address in explode_addresses(¤t_mask, address) {
memory.insert(address, value);
},
}
}
return memory;
}
/// Because floating bits can take on any value, this returns all the addresses that a given mask
/// applied to the input address refers to.
///
/// 1. The base address is the address where all the `X` values in the mask are `0`. Additionally
/// bits where the mask data is 1 all should be 1 for all addresses in the final output i.e.
/// `(input | mask.data) &!mask.mask`
/// 2. Iterate through the bits, and where the mask is `X` add an additional address to each of the
/// existing combinations for the address where that bit is `1` rather than `0`, so the set
/// doubles in size each time we encounter an `X`. With some boiler plate as the existing set
/// can't be appended to as it's being iterated.
///
/// # Examples from Tests
/// ```
/// let expected: HashSet<usize> = vec!(26usize, 27usize, 58usize, 59usize).into_iter().collect();
/// assert_eq!(
/// expected,
/// explode_addresses(
/// &Mask {
/// mask: 0b000000000000000000000000000000100001,
/// data: 0b000000000000000000000000000000010010,
/// },
/// 42,
/// )
/// );
///
/// let expected: HashSet<usize> =
/// vec!(16usize, 17usize, 18usize, 19usize, 24usize, 25usize, 26usize, 27usize)
/// .into_iter().collect();
/// assert_eq!(
/// expected,
/// explode_addresses(
/// &parse_line("mask = 00000000000000000000000000000000X0XX")
/// .expect_left("Failed to parse as mask"),
/// 26,
/// )
/// );
/// ```
fn explode_addresses(mask: &Mask, input: usize) -> HashSet<usize> {
let mut addresses = HashSet::new();
addresses.insert((input | mask.data) &!mask.mask);
for i in 0..36 {
if (1 << i) & mask.mask!= 0 {
let mut new_addresses = HashSet::new();
for &address in addresses.iter() {
new_addresses.insert(address | (1 << i));
}
for &new_address in new_addresses.iter() {
addresses.insert(new_address);
};
}
}
addresses
}
/// Sum a memory snapshot
///
/// Both puzzle parts finally sum all the memory registers into a single number as the expected
/// answer. Extracted into a function to avoid repetition.
fn sum_memory(memory: HashMap<usize, usize>) -> usize {
memory.iter().map(|(_, v)| *v).sum()
}
#[cfg(test)]
mod tests {
use day_14::{parse_line, Mask, Mem, run_program_v1, sum_memory, explode_addresses, run_program_v2};
use either::Either::*;
use im::{HashMap, HashSet};
//noinspection SpellCheckingInspection
#[test]
fn can_parse() {
assert_eq!(
Left(Mask {
mask: 0b111111111111111111111111111111111111,
data: 0b000000000000000000000000000000000000,
}),
parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
);
assert_eq!(
Left(Mask {
mask: 0b111111111111111111111111111110111101,
data: 0b000000000000000000000000000001000000,
}),
parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X")
);
assert_eq!(
Right(Mem { address: 8, value: 11 }),
parse_line("mem[8] = 11")
);
assert_eq!(
Right(Mem { address: 7, value: 101 }),
parse_line("mem[7] = 101")
);
assert_eq!(
Right(Mem { address: 8, value: 0 }),
parse_line("mem[8] = 0")
);
}
//noinspection SpellCheckingInspection
#[test]
fn can_run_program_v1() |
#[test]
fn can_explode_addresses() {
let expected: HashSet<usize> = vec!(26usize, 27usize, 58usize, 59usize).into_iter().collect();
assert_eq!(
expected,
explode_addresses(
&Mask {
mask: 0b000000000000000000000000000000100001,
data: 0b000000000000000000000000000000010010,
},
42,
)
);
let expected: HashSet<usize> =
vec!(16usize, 17usize, 18usize, 19usize, 24usize, 25usize, 26usize, 27usize)
.into_iter().collect();
assert_eq!(
expected,
explode_addresses(
&parse_line("mask = 00000000000000000000000000000000X0XX")
.expect_left("Failed to parse as mask"),
26,
)
);
}
#[test]
fn can_run_program_v2() {
let program = "mask = 000000000000000000000000000000X1001X
mem[42] = 100
mask = 00000000000000000000000000000000X0XX
mem[26] = 1";
let memory = run_program_v2(program);
assert_eq!(208usize, sum_memory(memory));
}
}
| {
let mut expected: HashMap<usize, usize> = HashMap::new();
let program_1 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\nmem[8] = 11";
expected.insert(8, 73);
assert_eq!(expected, run_program_v1(program_1));
let program_2 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
mem[8] = 11
mem[7] = 101
mem[8] = 0";
expected.insert(7, 101);
expected.insert(8, 64);
let memory = run_program_v1(program_2);
assert_eq!(expected, memory);
assert_eq!(165usize, sum_memory(memory));
} | identifier_body |
day_14.rs | //! This is my solution for [Advent of Code - Day 14](https://adventofcode.com/2020/day/14) -
//! _Docking Data_
//!
//! This was themed around bitwise operations. The challenge was mostly parsing the puzzle
//! description into the bitwise operations needed. This was the first time I needed an Either
//! implementation rather than just using an enum as I needed to be able to store the current Mask
//! in a variable that is explicitly a Mask rather than an Instruction that could be either a Mask
//! or a Mem.
use std::fs;
use regex::Regex;
use im::{HashMap, HashSet};
use either::Either;
use either::Either::*;
/// The entry point for running the solutions with the'real' puzzle input.
///
/// - The puzzle input is expected to be at `<project_root>/res/day-14-input`
/// - It is expected this will be called by [`super::main()`] when the user elects to run day 14.
pub fn run() {
let contents = fs::read_to_string("res/day-14-input").expect("Failed to read file");
let memory = run_program_v1(contents.as_str());
let sum = sum_memory(memory);
println!("The sum of memory values after running the program v1 is: {}", sum);
let memory = run_program_v2(contents.as_str());
let sum = sum_memory(memory);
println!("The sum of memory values after running the program v2 is: {}", sum);
}
/// Representing an input line that overwrites the current bitmask, see [`parse_line`].
#[derive(Debug, Eq, PartialEq)]
struct Mask { mask: usize, data: usize }
/// Represents an input line that updates the current memory values, see [`parse_line`].
#[derive(Debug, Eq, PartialEq)]
struct Mem { address: usize, value: usize }
/// Parse a line from the puzzle input into structured data
///
/// A line will be of one of the two following formats:
/// * `mask = 000000000000000000000000000000X1001X`
/// * `mem[8] = 11`
///
/// ## Masks
/// For both parts of the puzzle the mask has two uses, where the character is a `0 `or `1` it
/// should be treated a raw data that will in someway override other input, and `X` will be used as
/// the mask. It is easier to store this as two bitmaps, one for the data and one for the mask, as
/// these are used separately.
///
/// ## Memory Updates
/// Whilst the two parts use the mask to modify where/what actually gets written `mem[8] = 11`
/// should be interpreted as address = 8, value = 11.
///
/// # Examples from Tests
/// ```
/// assert_eq!(
/// Left(Mask {
/// mask: 0b111111111111111111111111111111111111,
/// data: 0b000000000000000000000000000000000000,
/// }),
/// parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
/// );
/// assert_eq!(
/// Left(Mask {
/// mask: 0b111111111111111111111111111110111101,
/// data: 0b000000000000000000000000000001000000,
/// }),
/// parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X")
/// );
///
/// assert_eq!(
/// Right(Mem { address: 8, value: 11 }),
/// parse_line("mem[8] = 11")
/// );
/// assert_eq!(
/// Right(Mem { address: 7, value: 101 }),
/// parse_line("mem[7] = 101")
/// );
/// assert_eq!(
/// Right(Mem { address: 8, value: 0 }),
/// parse_line("mem[8] = 0")
/// );
/// ```
fn | (line: &str) -> Either<Mask, Mem> {
let mut parts = line.split(" = ");
let inst = parts.next().expect("Invalid line");
let value = parts.next().expect("Invalid line");
if inst == "mask" {
let (mask, data) =
value.chars().fold(
(0usize, 0usize),
|(mask, data), char| (
mask << 1 | if char == 'X' { 1 } else { 0 },
data << 1 | if char == '1' { 1 } else { 0 }
),
);
Left(Mask { mask, data })
} else {
let re = Regex::new(r"^mem\[(\d+)]$").unwrap();
match re.captures(inst) {
Some(cap) => Right(Mem {
address: cap.get(1).unwrap().as_str().parse::<usize>().unwrap(),
value: value.parse::<usize>().unwrap(),
}),
None => panic!("Invalid line")
}
}
}
/// Takes the string input and returns the memory state after that has been interpreted using the
/// part 1 protocol
///
/// > The current bitmask is applied to values immediately before they are written to memory: a 0 or
/// > 1 overwrites the corresponding bit in the value, while an X leaves the bit in the value
/// > unchanged.
///
/// # Example from Tests
/// ```
/// let mut expected: HashMap<usize, usize> = HashMap::new();
///
/// let program_1 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\nmem[8] = 11";
///
/// expected.insert(8, 73);
/// assert_eq!(expected, run_program_v1(program_1));
///
/// let program_2 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
/// mem[8] = 11
/// mem[7] = 101
/// mem[8] = 0";
///
/// expected.insert(7, 101);
/// expected.insert(8, 64);
/// let memory = run_program_v1(program_2);
///
/// assert_eq!(expected, memory);
///
/// assert_eq!(165usize, sum_memory(memory));
/// ```
fn run_program_v1(program: &str) -> HashMap<usize, usize> {
let mut memory = HashMap::new();
let mut current_mask = Mask { mask: 0, data: 0 };
for line in program.lines() {
match parse_line(line) {
Left(Mask { mask, data }) => current_mask = Mask { mask, data },
Right(Mem { address, value }) => {
memory.insert(
address,
value & current_mask.mask | current_mask.data,
);
}
}
}
return memory;
}
/// Takes the string input and returns the memory state after that has been interpreted using the
/// part 2 protocol.
///
/// > Immediately before a value is written to memory, each bit in the bitmask modifies the
/// > corresponding bit of the destination memory address in the following way:
/// > - If the bitmask bit is 0, the corresponding memory address bit is unchanged.
/// > - If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1.
/// > - If the bitmask bit is X, the corresponding memory address bit is floating.
/// >
/// > A floating bit is not connected to anything and instead fluctuates unpredictably. In practice,
/// > this means the floating bits will take on all possible values, potentially causing many memory
/// > addresses to be written all at once!
///
/// The set of addresses a mask will write to is given by [`explode_addresses`]
///
/// # Example from Tests
/// ```
/// let program = "mask = 000000000000000000000000000000X1001X
/// mem[42] = 100
/// mask = 00000000000000000000000000000000X0XX
/// mem[26] = 1";
///
/// let memory = run_program_v2(program);
/// assert_eq!(208usize, sum_memory(memory));
/// ```
fn run_program_v2(program: &str) -> HashMap<usize, usize> {
let mut memory = HashMap::new();
let mut current_mask = Mask { mask: 0, data: 0 };
for line in program.lines() {
match parse_line(line) {
Left(Mask { mask, data }) => current_mask = Mask { mask, data },
Right(Mem { address, value }) =>
for address in explode_addresses(¤t_mask, address) {
memory.insert(address, value);
},
}
}
return memory;
}
/// Because floating bits can take on any value, this returns all the addresses that a given mask
/// applied to the input address refers to.
///
/// 1. The base address is the address where all the `X` values in the mask are `0`. Additionally
/// bits where the mask data is 1 all should be 1 for all addresses in the final output i.e.
/// `(input | mask.data) &!mask.mask`
/// 2. Iterate through the bits, and where the mask is `X` add an additional address to each of the
/// existing combinations for the address where that bit is `1` rather than `0`, so the set
/// doubles in size each time we encounter an `X`. With some boiler plate as the existing set
/// can't be appended to as it's being iterated.
///
/// # Examples from Tests
/// ```
/// let expected: HashSet<usize> = vec!(26usize, 27usize, 58usize, 59usize).into_iter().collect();
/// assert_eq!(
/// expected,
/// explode_addresses(
/// &Mask {
/// mask: 0b000000000000000000000000000000100001,
/// data: 0b000000000000000000000000000000010010,
/// },
/// 42,
/// )
/// );
///
/// let expected: HashSet<usize> =
/// vec!(16usize, 17usize, 18usize, 19usize, 24usize, 25usize, 26usize, 27usize)
/// .into_iter().collect();
/// assert_eq!(
/// expected,
/// explode_addresses(
/// &parse_line("mask = 00000000000000000000000000000000X0XX")
/// .expect_left("Failed to parse as mask"),
/// 26,
/// )
/// );
/// ```
fn explode_addresses(mask: &Mask, input: usize) -> HashSet<usize> {
let mut addresses = HashSet::new();
addresses.insert((input | mask.data) &!mask.mask);
for i in 0..36 {
if (1 << i) & mask.mask!= 0 {
let mut new_addresses = HashSet::new();
for &address in addresses.iter() {
new_addresses.insert(address | (1 << i));
}
for &new_address in new_addresses.iter() {
addresses.insert(new_address);
};
}
}
addresses
}
/// Sum a memory snapshot
///
/// Both puzzle parts finally sum all the memory registers into a single number as the expected
/// answer. Extracted into a function to avoid repetition.
fn sum_memory(memory: HashMap<usize, usize>) -> usize {
memory.iter().map(|(_, v)| *v).sum()
}
#[cfg(test)]
mod tests {
use day_14::{parse_line, Mask, Mem, run_program_v1, sum_memory, explode_addresses, run_program_v2};
use either::Either::*;
use im::{HashMap, HashSet};
//noinspection SpellCheckingInspection
#[test]
fn can_parse() {
assert_eq!(
Left(Mask {
mask: 0b111111111111111111111111111111111111,
data: 0b000000000000000000000000000000000000,
}),
parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
);
assert_eq!(
Left(Mask {
mask: 0b111111111111111111111111111110111101,
data: 0b000000000000000000000000000001000000,
}),
parse_line("mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X")
);
assert_eq!(
Right(Mem { address: 8, value: 11 }),
parse_line("mem[8] = 11")
);
assert_eq!(
Right(Mem { address: 7, value: 101 }),
parse_line("mem[7] = 101")
);
assert_eq!(
Right(Mem { address: 8, value: 0 }),
parse_line("mem[8] = 0")
);
}
//noinspection SpellCheckingInspection
#[test]
fn can_run_program_v1() {
let mut expected: HashMap<usize, usize> = HashMap::new();
let program_1 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X\nmem[8] = 11";
expected.insert(8, 73);
assert_eq!(expected, run_program_v1(program_1));
let program_2 = "mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
mem[8] = 11
mem[7] = 101
mem[8] = 0";
expected.insert(7, 101);
expected.insert(8, 64);
let memory = run_program_v1(program_2);
assert_eq!(expected, memory);
assert_eq!(165usize, sum_memory(memory));
}
#[test]
fn can_explode_addresses() {
let expected: HashSet<usize> = vec!(26usize, 27usize, 58usize, 59usize).into_iter().collect();
assert_eq!(
expected,
explode_addresses(
&Mask {
mask: 0b000000000000000000000000000000100001,
data: 0b000000000000000000000000000000010010,
},
42,
)
);
let expected: HashSet<usize> =
vec!(16usize, 17usize, 18usize, 19usize, 24usize, 25usize, 26usize, 27usize)
.into_iter().collect();
assert_eq!(
expected,
explode_addresses(
&parse_line("mask = 00000000000000000000000000000000X0XX")
.expect_left("Failed to parse as mask"),
26,
)
);
}
#[test]
fn can_run_program_v2() {
let program = "mask = 000000000000000000000000000000X1001X
mem[42] = 100
mask = 00000000000000000000000000000000X0XX
mem[26] = 1";
let memory = run_program_v2(program);
assert_eq!(208usize, sum_memory(memory));
}
}
| parse_line | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.