file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
runtime.rs | //#![allow(dead_code)]
use std::sync::{Arc};
use std::path::{PathBuf};
use cgmath::{Vector2, Point2};
use input::{Input, Button, Key, ButtonState, ButtonArgs};
use window::{Window, WindowSettings};
use slog::{Logger};
use calcium_flowy::FlowyRenderer;
use flowy::{Ui, Element};
use flowy::style::{Style, Position, Size, SideH, SideV};
use palette::pixel::{Srgb};
use rusttype::{FontCollection};
use tiled;
use calcium_game::{LoopTimer};
use calcium_rendering::{Error};
use calcium_rendering::texture::{Texture};
use calcium_rendering_2d::render_data::{RenderBatch, ShaderMode, Rectangle, Projection, RenderData, RenderSet, UvMode};
use calcium_rendering_2d::{Renderer2DTarget};
use calcium_rendering_context::{Runtime, Context};
use calcium_rendering::raw::{RendererRaw};
use model::{Map};
use view::{MapRenderer};
struct FriendlyUnit<R: RendererRaw> {
name: String,
tex: Arc<Texture<R>>,
selecttex: Arc<Texture<R>>,
position: Point2<f32>,
size: Vector2<f32>,
speed: f32,
selected: bool,
tabrel: f32,
}
impl <R: RendererRaw> FriendlyUnit<R> {
pub fn new(name: String, tex: Arc<Texture<R>>, selecttex: Arc<Texture<R>>, position: Point2<f32>, size: Vector2<f32>, speed: f32) -> FriendlyUnit<R> {
FriendlyUnit {name: name, tex: tex, selecttex: selecttex, position: position, size: size, speed: speed, selected: false, tabrel: 0.0}
}
pub fn update(&mut self, delta: f32, selected: bool, pinput: &PlayerInput) {
/* do update-y things */
self.tabrel -= delta;
if self.tabrel <= 0.0 && pinput.tab {
//println!("I am {}, Selection Status: {}.", self.name, selected);
self.tabrel = 0.1;
self.selected = selected;
}
if self.selected {
if pinput.w {self.position.y -= self.speed * delta;}
if pinput.a {self.position.x -= self.speed * delta;}
if pinput.s {self.position.y += self.speed * delta;}
if pinput.d {self.position.x += self.speed * delta;}
} | );
normaltexture.push_rectangle_full_texture(
// position is centered in the texture
Rectangle::new(self.position + -self.size/2.0, self.position + self.size/2.0)
);
batches.push(normaltexture);
if self.selected {
let mut selectiontexture = RenderBatch::new(
ShaderMode::Texture(self.selecttex.clone()), UvMode::YDown
);
selectiontexture.push_rectangle_full_texture(
Rectangle::new(self.position + -self.size, self.position + self.size)
);
batches.push(selectiontexture);
}
}
pub fn get_position(&mut self) -> Point2<f32> {
self.position
}
pub fn get_name(&mut self) -> &String {
&self.name
}
}
struct PlayerInput {
pub w: bool,
pub a: bool,
pub s: bool,
pub d: bool,
pub tab: bool,
}
pub struct StaticRuntime {
pub log: Logger,
}
impl Runtime for StaticRuntime {
fn run<C: Context>(self, context: C) -> Result<(), Error> {
info!(self.log, "Loading program");
// Set up everything we need to render
let window_settings = WindowSettings::new("RPG Game", [1280, 720]);
let (mut renderer, mut window) =
context.renderer(Some(self.log.clone()), &window_settings)?;
let mut simple2d_renderer = context.simple2d_renderer(&mut renderer)?;
let mut simple2d_render_target = Renderer2DTarget::new(
true, &renderer, &simple2d_renderer
);
let mut ui_renderer = FlowyRenderer::new(&mut renderer)?;
let mut ui = Ui::new();
let root_id = ui.elements.root_id();
let font = FontCollection::from_bytes(
::ttf_noto_sans::REGULAR
).into_font().unwrap();
ui.fonts.push(font);
let fps = Element::new(Style {
position: Position::Relative(Point2::new(0.0, 0.0), SideH::Right, SideV::Top),
size: Size::units(120.0, 14.0),
text_color: Srgb::new(1.0, 1.0, 1.0).into(),
text_size: 14.0,
.. Style::new()
});
let fps_id = ui.elements.add_child(fps, root_id);
{
let fpso = &mut ui.elements[fps_id];
fpso.set_text(format!("test text"));
}
// Units data
let friendly_texture = Texture::new()
.from_file("./assets/friendly.png")
.with_nearest_sampling()
.build(&mut renderer)?;
let selection_texture = Texture::new()
.from_file("./assets/selection.png")
.with_nearest_sampling()
.build(&mut renderer)?;
// Set up the game map's tiles
let map_path = PathBuf::from("./assets/test_map.tmx");
let tmap = tiled::parse_file(&map_path).unwrap();
let map = Map::new(&tmap, &self.log);
let map_renderer = MapRenderer::new(&tmap, &map_path, &mut renderer)?;
let mut players_units = Vec::new();
let alfred = FriendlyUnit::new(String::from("Alfred"), friendly_texture.clone(), selection_texture.clone(), Point2::new(200.0,200.0), Vector2::new(32.0,32.0), 256.0 );
let bertil = FriendlyUnit::new(String::from("Bertil"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,300.0), Vector2::new(32.0,32.0), 256.0 );
let carl = FriendlyUnit::new(String::from("Carl"), friendly_texture.clone(), selection_texture.clone(), Point2::new(400.0,400.0), Vector2::new(32.0,32.0), 256.0 );
let dagobert = FriendlyUnit::new(String::from("Dagobert"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,500.0), Vector2::new(32.0,32.0), 256.0 );
players_units.push(alfred);
players_units.push(bertil);
players_units.push(carl);
players_units.push(dagobert);
let (mut selected_unit, mut tabrelease) = (3,0.1);
let (mut left_pressed, mut right_pressed, mut up_pressed, mut down_pressed, mut tab_pressed) =
(false, false, false, false, false);
// Run the actual game loop
let mut timer = LoopTimer::start();
info!(self.log, "Finished loading, starting main loop");
while!window.should_close() {
let delta = timer.tick();
// Handle input
while let Some(event) = window.poll_event() {
// Let the context handle anything needed
context.handle_event(&event, &mut renderer, &mut window);
match event {
Input::Button(ButtonArgs {state, button, scancode: _scancode}) => {
let press = state == ButtonState::Press;
match button {
Button::Keyboard(Key::A) =>
left_pressed = press,
Button::Keyboard(Key::D) =>
right_pressed = press,
Button::Keyboard(Key::W) =>
up_pressed = press,
Button::Keyboard(Key::S) =>
down_pressed = press,
Button::Keyboard(Key::Tab) =>
tab_pressed = press,
_ => {},
}
},
_ => {},
}
}
let pinput = PlayerInput {w: up_pressed, a: left_pressed, s: down_pressed, d: right_pressed, tab: tab_pressed};
{
let fpso = &mut ui.elements[fps_id];
fpso.style_mut().position = Position::Relative(players_units[selected_unit].get_position(), SideH::Left, SideV::Top);
fpso.set_text(players_units[selected_unit].get_name().clone());
}
// TODO: kill this
tabrelease -= delta;
if tabrelease <= 0.0 && tab_pressed {
if selected_unit == 3 {
selected_unit = 0;
}
else {
selected_unit+=1;
}
tabrelease = 0.1;
println!("selected unit is now {}", selected_unit);
}
// Update the player units
for (i, unit) in players_units.iter_mut().enumerate() {
unit.update(delta, i == selected_unit, &pinput);
}
// Set up the rendering data we'll need
let mut render_data = RenderData::new();
let mut world_batches = Vec::new();
let camera_size = renderer.size().cast();
// Render the tiles
map_renderer.render(&map, &mut world_batches, camera_size);
// Render the player units
for unit in &mut players_units {
unit.render(&mut world_batches);
}
// Submit the world render data
//let camera = Camera::new(32.0, Point2::new(0.0, 0.0));
//Projection::Camera(camera)
render_data.render_sets.push(RenderSet::new(Projection::Pixels, world_batches));
// Render the UI
let mut ui_batches = Vec::new();
ui_renderer.render(&mut ui, &mut ui_batches, camera_size, &mut renderer)?;
render_data.render_sets.push(RenderSet::new(Projection::Pixels, ui_batches));
// Finally do the 2D rendering itself
let mut frame = renderer.start_frame();
simple2d_renderer.render(
&render_data, &mut frame, &mut simple2d_render_target, &mut renderer
);
renderer.finish_frame(frame);
window.swap_buffers();
}
Ok(())
}
} | }
pub fn render(&mut self, batches: &mut Vec<RenderBatch<R>>) {
//let mut batches = Vec::new();
let mut normaltexture = RenderBatch::new(
ShaderMode::Texture(self.tex.clone()), UvMode::YDown | random_line_split |
runtime.rs | //#![allow(dead_code)]
use std::sync::{Arc};
use std::path::{PathBuf};
use cgmath::{Vector2, Point2};
use input::{Input, Button, Key, ButtonState, ButtonArgs};
use window::{Window, WindowSettings};
use slog::{Logger};
use calcium_flowy::FlowyRenderer;
use flowy::{Ui, Element};
use flowy::style::{Style, Position, Size, SideH, SideV};
use palette::pixel::{Srgb};
use rusttype::{FontCollection};
use tiled;
use calcium_game::{LoopTimer};
use calcium_rendering::{Error};
use calcium_rendering::texture::{Texture};
use calcium_rendering_2d::render_data::{RenderBatch, ShaderMode, Rectangle, Projection, RenderData, RenderSet, UvMode};
use calcium_rendering_2d::{Renderer2DTarget};
use calcium_rendering_context::{Runtime, Context};
use calcium_rendering::raw::{RendererRaw};
use model::{Map};
use view::{MapRenderer};
struct FriendlyUnit<R: RendererRaw> {
name: String,
tex: Arc<Texture<R>>,
selecttex: Arc<Texture<R>>,
position: Point2<f32>,
size: Vector2<f32>,
speed: f32,
selected: bool,
tabrel: f32,
}
impl <R: RendererRaw> FriendlyUnit<R> {
pub fn new(name: String, tex: Arc<Texture<R>>, selecttex: Arc<Texture<R>>, position: Point2<f32>, size: Vector2<f32>, speed: f32) -> FriendlyUnit<R> {
FriendlyUnit {name: name, tex: tex, selecttex: selecttex, position: position, size: size, speed: speed, selected: false, tabrel: 0.0}
}
pub fn update(&mut self, delta: f32, selected: bool, pinput: &PlayerInput) {
/* do update-y things */
self.tabrel -= delta;
if self.tabrel <= 0.0 && pinput.tab {
//println!("I am {}, Selection Status: {}.", self.name, selected);
self.tabrel = 0.1;
self.selected = selected;
}
if self.selected {
if pinput.w {self.position.y -= self.speed * delta;}
if pinput.a {self.position.x -= self.speed * delta;}
if pinput.s {self.position.y += self.speed * delta;}
if pinput.d {self.position.x += self.speed * delta;}
}
}
pub fn render(&mut self, batches: &mut Vec<RenderBatch<R>>) | }
pub fn get_position(&mut self) -> Point2<f32> {
self.position
}
pub fn get_name(&mut self) -> &String {
&self.name
}
}
struct PlayerInput {
pub w: bool,
pub a: bool,
pub s: bool,
pub d: bool,
pub tab: bool,
}
pub struct StaticRuntime {
pub log: Logger,
}
impl Runtime for StaticRuntime {
fn run<C: Context>(self, context: C) -> Result<(), Error> {
info!(self.log, "Loading program");
// Set up everything we need to render
let window_settings = WindowSettings::new("RPG Game", [1280, 720]);
let (mut renderer, mut window) =
context.renderer(Some(self.log.clone()), &window_settings)?;
let mut simple2d_renderer = context.simple2d_renderer(&mut renderer)?;
let mut simple2d_render_target = Renderer2DTarget::new(
true, &renderer, &simple2d_renderer
);
let mut ui_renderer = FlowyRenderer::new(&mut renderer)?;
let mut ui = Ui::new();
let root_id = ui.elements.root_id();
let font = FontCollection::from_bytes(
::ttf_noto_sans::REGULAR
).into_font().unwrap();
ui.fonts.push(font);
let fps = Element::new(Style {
position: Position::Relative(Point2::new(0.0, 0.0), SideH::Right, SideV::Top),
size: Size::units(120.0, 14.0),
text_color: Srgb::new(1.0, 1.0, 1.0).into(),
text_size: 14.0,
.. Style::new()
});
let fps_id = ui.elements.add_child(fps, root_id);
{
let fpso = &mut ui.elements[fps_id];
fpso.set_text(format!("test text"));
}
// Units data
let friendly_texture = Texture::new()
.from_file("./assets/friendly.png")
.with_nearest_sampling()
.build(&mut renderer)?;
let selection_texture = Texture::new()
.from_file("./assets/selection.png")
.with_nearest_sampling()
.build(&mut renderer)?;
// Set up the game map's tiles
let map_path = PathBuf::from("./assets/test_map.tmx");
let tmap = tiled::parse_file(&map_path).unwrap();
let map = Map::new(&tmap, &self.log);
let map_renderer = MapRenderer::new(&tmap, &map_path, &mut renderer)?;
let mut players_units = Vec::new();
let alfred = FriendlyUnit::new(String::from("Alfred"), friendly_texture.clone(), selection_texture.clone(), Point2::new(200.0,200.0), Vector2::new(32.0,32.0), 256.0 );
let bertil = FriendlyUnit::new(String::from("Bertil"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,300.0), Vector2::new(32.0,32.0), 256.0 );
let carl = FriendlyUnit::new(String::from("Carl"), friendly_texture.clone(), selection_texture.clone(), Point2::new(400.0,400.0), Vector2::new(32.0,32.0), 256.0 );
let dagobert = FriendlyUnit::new(String::from("Dagobert"), friendly_texture.clone(), selection_texture.clone(), Point2::new(300.0,500.0), Vector2::new(32.0,32.0), 256.0 );
players_units.push(alfred);
players_units.push(bertil);
players_units.push(carl);
players_units.push(dagobert);
let (mut selected_unit, mut tabrelease) = (3,0.1);
let (mut left_pressed, mut right_pressed, mut up_pressed, mut down_pressed, mut tab_pressed) =
(false, false, false, false, false);
// Run the actual game loop
let mut timer = LoopTimer::start();
info!(self.log, "Finished loading, starting main loop");
while!window.should_close() {
let delta = timer.tick();
// Handle input
while let Some(event) = window.poll_event() {
// Let the context handle anything needed
context.handle_event(&event, &mut renderer, &mut window);
match event {
Input::Button(ButtonArgs {state, button, scancode: _scancode}) => {
let press = state == ButtonState::Press;
match button {
Button::Keyboard(Key::A) =>
left_pressed = press,
Button::Keyboard(Key::D) =>
right_pressed = press,
Button::Keyboard(Key::W) =>
up_pressed = press,
Button::Keyboard(Key::S) =>
down_pressed = press,
Button::Keyboard(Key::Tab) =>
tab_pressed = press,
_ => {},
}
},
_ => {},
}
}
let pinput = PlayerInput {w: up_pressed, a: left_pressed, s: down_pressed, d: right_pressed, tab: tab_pressed};
{
let fpso = &mut ui.elements[fps_id];
fpso.style_mut().position = Position::Relative(players_units[selected_unit].get_position(), SideH::Left, SideV::Top);
fpso.set_text(players_units[selected_unit].get_name().clone());
}
// TODO: kill this
tabrelease -= delta;
if tabrelease <= 0.0 && tab_pressed {
if selected_unit == 3 {
selected_unit = 0;
}
else {
selected_unit+=1;
}
tabrelease = 0.1;
println!("selected unit is now {}", selected_unit);
}
// Update the player units
for (i, unit) in players_units.iter_mut().enumerate() {
unit.update(delta, i == selected_unit, &pinput);
}
// Set up the rendering data we'll need
let mut render_data = RenderData::new();
let mut world_batches = Vec::new();
let camera_size = renderer.size().cast();
// Render the tiles
map_renderer.render(&map, &mut world_batches, camera_size);
// Render the player units
for unit in &mut players_units {
unit.render(&mut world_batches);
}
// Submit the world render data
//let camera = Camera::new(32.0, Point2::new(0.0, 0.0));
//Projection::Camera(camera)
render_data.render_sets.push(RenderSet::new(Projection::Pixels, world_batches));
// Render the UI
let mut ui_batches = Vec::new();
ui_renderer.render(&mut ui, &mut ui_batches, camera_size, &mut renderer)?;
render_data.render_sets.push(RenderSet::new(Projection::Pixels, ui_batches));
// Finally do the 2D rendering itself
let mut frame = renderer.start_frame();
simple2d_renderer.render(
&render_data, &mut frame, &mut simple2d_render_target, &mut renderer
);
renderer.finish_frame(frame);
window.swap_buffers();
}
Ok(())
}
}
| {
//let mut batches = Vec::new();
let mut normaltexture = RenderBatch::new(
ShaderMode::Texture(self.tex.clone()), UvMode::YDown
);
normaltexture.push_rectangle_full_texture(
// position is centered in the texture
Rectangle::new(self.position + -self.size/2.0, self.position + self.size/2.0)
);
batches.push(normaltexture);
if self.selected {
let mut selectiontexture = RenderBatch::new(
ShaderMode::Texture(self.selecttex.clone()), UvMode::YDown
);
selectiontexture.push_rectangle_full_texture(
Rectangle::new(self.position + -self.size, self.position + self.size)
);
batches.push(selectiontexture);
} | identifier_body |
array.rs | /*
#![feature(collections_range)]
#![feature(drain_filter)]
#![feature(slice_rsplit)]
#![feature(slice_get_slice)]
#![feature(vec_resize_default)]
#![feature(vec_remove_item)]
#![feature(collections_range)]
#![feature(slice_rotate)]
#![feature(swap_with_slice)]
*/
foo bar baz
use collections::range::RangeArgument;
use std::cmp::Ordering;
use std::borrow::Borrow;
use std::vec::{Drain,Splice,DrainFilter};
use std::ops::{Deref,DerefMut,Index,IndexMut};
use std::slice::{Iter,IterMut,Windows,Chunks,ChunksMut,Split,SplitMut,RSplit,RSplitMut,RSplitN,RSplitNMut,SplitN,SplitNMut,SliceIndex};
use std::marker::PhantomData; // this sucks!
//todo, how to handle 'enumerate'.
// would we have to impl'my_enumerate' or something?
// wrapper for Vec<T> with indexing defaulting to i32
// todo, real vector impl, with smallvec stuff
pub trait IndexTrait { // TODO - would be better to use official from/into, but it doesn't let us impl
fn my_from(x:usize)->Self;
fn my_into(self)->usize;
}
impl IndexTrait for i32{
fn my_from(x:usize)->Self{x as Self}
fn my_into(self)->usize{self as usize}
}
// grrr. can't impl theirs this way round?!
//trait MyInto {
//}
//TODO - wrapper or macro to roll a'strongly typed index'
// e.g. I32<Polygon>
/*
impl Into<usize> for i32{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for u32{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for i16{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for u32{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for i8{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for u8{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for isize{
fn into(self)->usize{ self as usize }
}
*/
#[derive(Debug)]
pub struct Array<T,I=i32>(pub Vec<T>,PhantomData<I>);
// my array helper fn's
impl<T:Clone,I:IndexTrait+Clone> Array<T,I>{
/// TODO - better name. preserves ordering of vec![v;count].
pub fn from_val_n(val:T, n:i32)->Self{
let v=vec![val; n as usize];
Array(v,PhantomData)
}
pub fn from_fn<F:Fn(I)->T>(count:I,f:F)->Self{
let mut v=Vec::new();
v.reserve(count.clone().my_into());
for x in 0..count.my_into() {v.push(f(I::my_from(x)))}
Array(v,PhantomData)
}
pub fn map<B,F:Fn(&T)->B>(&self,f:F)->Array<B,I>{
let mut out=Array::<B,I>::new();
out.reserve(self.len());
for x in self.iter(){
out.push(f(x))
}
out
}
}
impl<T,I:IndexTrait+Clone> Array<T,I>{
pub fn num_elems(&self)->i32{ self.0.len() as i32} // TODO - figure out generic int
pub fn new()->Self{ Array(Vec::new(),PhantomData) }
pub fn reserve(&mut self, additional: I){
self.0.reserve(additional.my_into());
}
pub fn push(&mut self,val:T){self.0.push(val)}
pub fn shrink_to_fit(&mut self){self.0.shrink_to_fit()}
pub fn truncate(&mut self, len: I){
self.0.truncate(len.my_into());
}
pub fn as_slice(&self) -> &[T]{
self.0.as_slice()
}
pub fn as_mut_slice(&mut self) -> &mut [T]{
self.0.as_mut_slice()
}
pub fn swap_remove(&mut self, index: I) -> T{
self.0.swap_remove(index.my_into())
}
pub fn insert(&mut self, index: I, element: T){
self.0.insert(index.my_into(),element)
}
pub fn remove(&mut self, index: I) -> T{
self.0.remove(index.my_into())
}
// aka filter in place
pub fn retain<F:FnMut(&T)->bool>(&mut self, f: F) {
self.0.retain(f)
}
pub fn dedup_by_key<F:FnMut(&mut T)->K, K:PartialEq<K>>(&mut self, key: F) {
self.0.dedup_by_key(key)
}
pub fn dedup_by<F:FnMut(&mut T,&mut T)->bool>(&mut self, same_bucket: F) {
self.0.dedup_by(same_bucket)
}
#[cfg(nightly_vector)]
pub fn place_back(&mut self) -> PlaceBack<T>{
self.0.place_back()
}
pub fn pop(&mut self) -> Option<T>{
self.0.pop()
}
pub fn append(&mut self, other: &mut Vec<T>){
self.0.append(other)
}
#[cfg(UseRangeArgument)]
pub fn drain<R:RangeArgument<I>>(&mut self, range: R) -> Drain<T>
{
self.0.drain(range)
}
pub fn clear(&mut self){
self.0.clear()
}
// pub fn len(&self)->I{
// self.0.len() as Index
// }
// pub fn is_empty(&self)->bool{ self.0.is_empty()}
pub fn split_off(&mut self,at:I)->Array<T>{
Array(self.0.split_off(at.my_into()),PhantomData)
}
}
impl<T:Clone,I:IndexTrait> Array<T,I>{
pub fn resize(&mut self, new_len:I, value:T){
self.0.resize(new_len.my_into(),value)
}
pub fn extend_from_slice(&mut self, other:&[T]){
self.0.extend_from_slice(other)
}
}
impl<T:Default,I:IndexTrait> Array<T,I>{
pub fn resize_default(&mut self, new_len:I){
self.0.resize_default(new_len.my_into())
}
}
impl<T:PartialEq<T>,I:IndexTrait> Array<T,I>{
pub fn dedup(&mut self){
self.0.dedup()
}
pub fn remove_item(&mut self, item:&T)->Option<T>{
self.0.remove_item(item)
}
} | {
self.0.splice(range,replace_with)
}
pub fn drain_filter<F:FnMut(&mut T)->bool>(&mut self, filter: F) -> DrainFilter<T, F> {
self.0.drain_filter(filter)
}
}
impl<T,INDEX:IndexTrait> Deref for Array<T,INDEX>{
type Target=[T];
fn deref(&self)->&Self::Target { self.0.deref() }
}
impl<T,INDEX:IndexTrait> Array<T,INDEX>{
fn len(&self)->INDEX{INDEX::my_from(self.0.len())}
fn is_empty(&self)->bool{self.0.is_empty()}
fn first(&self)->Option<&T>{self.0.first()}
fn first_mut(&mut self)->Option<&mut T>{self.0.first_mut()}
fn split_first(&self)->Option<(&T,&[T])>{self.0.split_first()}
fn split_first_mut(&mut self)->Option<(&mut T, &mut [T])>{ self.0.split_first_mut() }
fn split_last(&self)->Option<(&T,&[T])>{self.0.split_last()}
fn split_last_mut(&mut self)->Option<(&mut T, &mut[T])>{self.0.split_last_mut()}
fn last(&self)->Option<&T>{self.0.last()}
fn last_mut(&mut self)->Option<&mut T>{self.0.last_mut()}
fn get<I>(&self, index:I)->Option<&<I as SliceIndex<[T]> >::Output>
where I:SliceIndex<[T]>
{
self.0.get(index)
}
fn get_mut<I>(&mut self, index:I)->Option<&mut <I as SliceIndex<[T]>>::Output>
where I:SliceIndex<[T]>
{
self.0.get_mut(index)
}
unsafe fn get_unchecked<I>(&self, index: I) -> &<I as SliceIndex<[T]>>::Output
where
I: SliceIndex<[T]> {self.0.get_unchecked(index)}
unsafe fn get_unchecked_mut<I>(
&mut self,
index: I
) -> &mut <I as SliceIndex<[T]>>::Output
where
I: SliceIndex<[T]>{
self.0.get_unchecked_mut(index)
}
fn as_ptr(&self)->*const T{self.0.as_ptr()}
fn as_mut_ptr(&mut self)->*mut T{self.0.as_mut_ptr()}
fn swap(&mut self, a:INDEX,b:INDEX){
self.0.swap(a.my_into(),b.my_into())
}
fn reverse(&mut self){self.0.reverse()}
fn iter(&self)->Iter<T>{self.0.iter()}
fn iter_mut(&mut self)->IterMut<T>{self.0.iter_mut()}
fn windows(&self,size:INDEX)->Windows<T>{self.0.windows(size.my_into())}
fn chunks(&self,chunk_size:INDEX)->Chunks<T>{self.0.chunks(chunk_size.my_into())}
fn chunks_mut(&mut self,chunk_size:INDEX)->ChunksMut<T>{self.0.chunks_mut(chunk_size.my_into())}
fn split_at(&self, mid: INDEX) -> (&[T], &[T]){
self.0.split_at(mid.my_into())
}
fn split_at_mut(&mut self, mid: INDEX) -> (&mut [T], &mut [T]){
self.0.split_at_mut(mid.my_into())
}
fn split<F>(&self, pred: F) -> Split<T, F>
where F:FnMut(&T)->bool
{
self.0.split(pred)
}
fn split_mut<F>(&mut self, pred: F) -> SplitMut<T, F>
where F: FnMut(&T) -> bool
{
self.0.split_mut(pred)
}
fn rsplit<F>(&self, pred: F) -> RSplit<T, F>
where F: FnMut(&T) -> bool,
{
self.0.rsplit(pred)
}
fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<T, F>
where F: FnMut(&T) -> bool
{
self.0.rsplit_mut(pred)
}
fn splitn<F>(&self, n: INDEX, pred: F) -> SplitN<T, F>
where F: FnMut(&T) -> bool
{
self.0.splitn(n.my_into(),pred)
}
fn splitn_mut<F>(&mut self, n: INDEX, pred: F) -> SplitNMut<T, F>
where F: FnMut(&T) -> bool
{
self.0.splitn_mut(n.my_into(),pred)
}
fn rsplitn<F>(&self, n: INDEX, pred: F) -> RSplitN<T, F>
where F: FnMut(&T) -> bool{
self.0.rsplitn(n.my_into(),pred)
}
fn rsplitn_mut<F>(&mut self, n: INDEX, pred: F) -> RSplitNMut<T, F>
where
F: FnMut(&T) -> bool{
self.0.rsplitn_mut(n.my_into(),pred)
}
fn contains(&self, x: &T) -> bool
where
T: PartialEq<T>{
self.0.contains(x)
}
fn starts_with(&self, needle: &[T]) -> bool
where
T: PartialEq<T>{
self.0.starts_with(needle)
}
fn ends_with(&self, needle: &[T]) -> bool
where
T: PartialEq<T>{
self.0.ends_with(needle)
}
fn binary_search(&self, a: &T) -> Result<INDEX, INDEX>
where
T: Ord{
match self.0.binary_search(a){
Ok(x)=>Ok(INDEX::my_from(x)),
Err(x)=>Err(INDEX::my_from(x))
}
}
fn binary_search_by<'a, F>(&'a self, f: F) -> Result<INDEX, INDEX>
where F: FnMut(&'a T) -> Ordering{
match self.0.binary_search_by(f){
Ok(x)=>Ok(INDEX::my_from(x)),
Err(x)=>Err(INDEX::my_from(x))
}
}
fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result<INDEX, INDEX>
where
B: Ord,
F: FnMut(&'a T) -> B,
T: Ord
{
match self.0.binary_search_by_key(b,f){
Ok(x)=>Ok(INDEX::my_from(x)),
Err(x)=>Err(INDEX::my_from(x))
}
}
fn sort(&mut self) where T:Ord{
self.0.sort()
}
fn sort_by<F>(&mut self,f:F) where F:FnMut(&T,&T)->Ordering{
self.0.sort_by(f)
}
fn sort_by_key<F,B>(&mut self,f:F) where B:Ord,F:FnMut(&T)->B{
self.0.sort_by_key(f)
}
fn sort_unstable(&mut self)where T:Ord{self.0.sort_unstable()}
fn sort_unstable_by<F>(&mut self,f:F)where T:Ord,F:FnMut(&T,&T)->Ordering{self.0.sort_unstable_by(f)}
fn sort_unstable_by_key<B:Ord,F>(&mut self,f:F)where T:Ord,F:FnMut(&T)->B{self.0.sort_unstable_by_key(f)}
fn rotate(&mut self,mid:INDEX){
self.0.rotate(mid.my_into())
}
fn clone_from_slice(&mut self, src:&[T]) where T:Clone{
self.0.clone_from_slice(src)
}
fn copy_from_slice(&mut self, src:&[T]) where T:Copy{
self.0.copy_from_slice(src)
}
fn swap_with_slice(&mut self, src:&mut[T]){
self.0.swap_with_slice(src)
}
fn to_vec(&self)->Array<T> where T:Clone{
Array(self.0.to_vec(),PhantomData)
}
}
impl<T,INDEX:IndexTrait> Index<INDEX> for Array<T,INDEX>{
type Output=T;
fn index(&self,i:INDEX)->&T{
&self.0.index(i.my_into())
}
}
impl<T,INDEX:IndexTrait> IndexMut<INDEX> for Array<T,INDEX>{
fn index_mut(&mut self,i:INDEX)->&mut T{
self.0.index_mut(i.my_into())
}
}
impl<T:Clone,INDEX:IndexTrait> Clone for Array<T,INDEX>{
fn clone(&self)->Self{
Array(self.0.clone(),PhantomData)
}
fn clone_from(&mut self, other:&Self){
self.0.clone_from(&other.0);
self.1.clone_from(&other.1);
}
}
impl<T,INDEX:IndexTrait> Default for Array<T,INDEX>{
fn default()->Self{
Array(Vec::<T>::default(),PhantomData)
}
}
impl<T,INDEX:IndexTrait> Borrow<[T]> for Array<T,INDEX>{
fn borrow(&self) -> &[T]{
self.0.borrow()
}
}
impl<T,INDEX:IndexTrait> AsRef<[T]> for Array<T,INDEX>{
fn as_ref(&self)->&[T]{
self.0.as_ref()
}
}
impl<T,INDEX:IndexTrait> AsRef<Array<T,INDEX>> for Array<T,INDEX>{
fn as_ref(&self)->&Self{
self
}
} |
impl<T,INDEX:IndexTrait> Array<T,INDEX>{
/// TODO - figure out how to convert RangeArguemnt indices
pub fn splice<I:IntoIterator<Item=T>,R:RangeArgument<usize>>(&mut self, range:R, replace_with:I)-> Splice<<I as IntoIterator>::IntoIter> | random_line_split |
array.rs | /*
#![feature(collections_range)]
#![feature(drain_filter)]
#![feature(slice_rsplit)]
#![feature(slice_get_slice)]
#![feature(vec_resize_default)]
#![feature(vec_remove_item)]
#![feature(collections_range)]
#![feature(slice_rotate)]
#![feature(swap_with_slice)]
*/
foo bar baz
use collections::range::RangeArgument;
use std::cmp::Ordering;
use std::borrow::Borrow;
use std::vec::{Drain,Splice,DrainFilter};
use std::ops::{Deref,DerefMut,Index,IndexMut};
use std::slice::{Iter,IterMut,Windows,Chunks,ChunksMut,Split,SplitMut,RSplit,RSplitMut,RSplitN,RSplitNMut,SplitN,SplitNMut,SliceIndex};
use std::marker::PhantomData; // this sucks!
//todo, how to handle 'enumerate'.
// would we have to impl'my_enumerate' or something?
// wrapper for Vec<T> with indexing defaulting to i32
// todo, real vector impl, with smallvec stuff
pub trait IndexTrait { // TODO - would be better to use official from/into, but it doesn't let us impl
fn my_from(x:usize)->Self;
fn my_into(self)->usize;
}
impl IndexTrait for i32{
fn my_from(x:usize)->Self{x as Self}
fn my_into(self)->usize{self as usize}
}
// grrr. can't impl theirs this way round?!
//trait MyInto {
//}
//TODO - wrapper or macro to roll a'strongly typed index'
// e.g. I32<Polygon>
/*
impl Into<usize> for i32{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for u32{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for i16{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for u32{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for i8{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for u8{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for isize{
fn into(self)->usize{ self as usize }
}
*/
#[derive(Debug)]
pub struct Array<T,I=i32>(pub Vec<T>,PhantomData<I>);
// my array helper fn's
impl<T:Clone,I:IndexTrait+Clone> Array<T,I>{
/// TODO - better name. preserves ordering of vec![v;count].
pub fn from_val_n(val:T, n:i32)->Self{
let v=vec![val; n as usize];
Array(v,PhantomData)
}
pub fn from_fn<F:Fn(I)->T>(count:I,f:F)->Self{
let mut v=Vec::new();
v.reserve(count.clone().my_into());
for x in 0..count.my_into() {v.push(f(I::my_from(x)))}
Array(v,PhantomData)
}
pub fn map<B,F:Fn(&T)->B>(&self,f:F)->Array<B,I>{
let mut out=Array::<B,I>::new();
out.reserve(self.len());
for x in self.iter(){
out.push(f(x))
}
out
}
}
impl<T,I:IndexTrait+Clone> Array<T,I>{
pub fn num_elems(&self)->i32{ self.0.len() as i32} // TODO - figure out generic int
pub fn new()->Self{ Array(Vec::new(),PhantomData) }
pub fn reserve(&mut self, additional: I){
self.0.reserve(additional.my_into());
}
pub fn push(&mut self,val:T){self.0.push(val)}
pub fn shrink_to_fit(&mut self){self.0.shrink_to_fit()}
pub fn truncate(&mut self, len: I){
self.0.truncate(len.my_into());
}
pub fn as_slice(&self) -> &[T]{
self.0.as_slice()
}
pub fn as_mut_slice(&mut self) -> &mut [T]{
self.0.as_mut_slice()
}
pub fn swap_remove(&mut self, index: I) -> T{
self.0.swap_remove(index.my_into())
}
pub fn insert(&mut self, index: I, element: T){
self.0.insert(index.my_into(),element)
}
pub fn remove(&mut self, index: I) -> T{
self.0.remove(index.my_into())
}
// aka filter in place
pub fn retain<F:FnMut(&T)->bool>(&mut self, f: F) {
self.0.retain(f)
}
pub fn dedup_by_key<F:FnMut(&mut T)->K, K:PartialEq<K>>(&mut self, key: F) {
self.0.dedup_by_key(key)
}
pub fn dedup_by<F:FnMut(&mut T,&mut T)->bool>(&mut self, same_bucket: F) {
self.0.dedup_by(same_bucket)
}
#[cfg(nightly_vector)]
pub fn place_back(&mut self) -> PlaceBack<T>{
self.0.place_back()
}
pub fn pop(&mut self) -> Option<T>{
self.0.pop()
}
pub fn append(&mut self, other: &mut Vec<T>){
self.0.append(other)
}
#[cfg(UseRangeArgument)]
pub fn drain<R:RangeArgument<I>>(&mut self, range: R) -> Drain<T>
{
self.0.drain(range)
}
pub fn clear(&mut self){
self.0.clear()
}
// pub fn len(&self)->I{
// self.0.len() as Index
// }
// pub fn is_empty(&self)->bool{ self.0.is_empty()}
pub fn split_off(&mut self,at:I)->Array<T>{
Array(self.0.split_off(at.my_into()),PhantomData)
}
}
impl<T:Clone,I:IndexTrait> Array<T,I>{
pub fn resize(&mut self, new_len:I, value:T){
self.0.resize(new_len.my_into(),value)
}
pub fn extend_from_slice(&mut self, other:&[T]){
self.0.extend_from_slice(other)
}
}
impl<T:Default,I:IndexTrait> Array<T,I>{
pub fn resize_default(&mut self, new_len:I){
self.0.resize_default(new_len.my_into())
}
}
impl<T:PartialEq<T>,I:IndexTrait> Array<T,I>{
pub fn dedup(&mut self){
self.0.dedup()
}
pub fn remove_item(&mut self, item:&T)->Option<T>{
self.0.remove_item(item)
}
}
impl<T,INDEX:IndexTrait> Array<T,INDEX>{
/// TODO - figure out how to convert RangeArguemnt indices
pub fn splice<I:IntoIterator<Item=T>,R:RangeArgument<usize>>(&mut self, range:R, replace_with:I)-> Splice<<I as IntoIterator>::IntoIter>
{
self.0.splice(range,replace_with)
}
pub fn drain_filter<F:FnMut(&mut T)->bool>(&mut self, filter: F) -> DrainFilter<T, F> {
self.0.drain_filter(filter)
}
}
impl<T,INDEX:IndexTrait> Deref for Array<T,INDEX>{
type Target=[T];
fn deref(&self)->&Self::Target { self.0.deref() }
}
impl<T,INDEX:IndexTrait> Array<T,INDEX>{
fn len(&self)->INDEX{INDEX::my_from(self.0.len())}
fn is_empty(&self)->bool{self.0.is_empty()}
fn first(&self)->Option<&T>{self.0.first()}
fn first_mut(&mut self)->Option<&mut T>{self.0.first_mut()}
fn split_first(&self)->Option<(&T,&[T])>{self.0.split_first()}
fn split_first_mut(&mut self)->Option<(&mut T, &mut [T])>{ self.0.split_first_mut() }
fn split_last(&self)->Option<(&T,&[T])>{self.0.split_last()}
fn split_last_mut(&mut self)->Option<(&mut T, &mut[T])>{self.0.split_last_mut()}
fn last(&self)->Option<&T>{self.0.last()}
fn last_mut(&mut self)->Option<&mut T>{self.0.last_mut()}
fn get<I>(&self, index:I)->Option<&<I as SliceIndex<[T]> >::Output>
where I:SliceIndex<[T]>
{
self.0.get(index)
}
fn get_mut<I>(&mut self, index:I)->Option<&mut <I as SliceIndex<[T]>>::Output>
where I:SliceIndex<[T]>
{
self.0.get_mut(index)
}
unsafe fn get_unchecked<I>(&self, index: I) -> &<I as SliceIndex<[T]>>::Output
where
I: SliceIndex<[T]> {self.0.get_unchecked(index)}
unsafe fn get_unchecked_mut<I>(
&mut self,
index: I
) -> &mut <I as SliceIndex<[T]>>::Output
where
I: SliceIndex<[T]>{
self.0.get_unchecked_mut(index)
}
fn | (&self)->*const T{self.0.as_ptr()}
fn as_mut_ptr(&mut self)->*mut T{self.0.as_mut_ptr()}
fn swap(&mut self, a:INDEX,b:INDEX){
self.0.swap(a.my_into(),b.my_into())
}
fn reverse(&mut self){self.0.reverse()}
fn iter(&self)->Iter<T>{self.0.iter()}
fn iter_mut(&mut self)->IterMut<T>{self.0.iter_mut()}
fn windows(&self,size:INDEX)->Windows<T>{self.0.windows(size.my_into())}
fn chunks(&self,chunk_size:INDEX)->Chunks<T>{self.0.chunks(chunk_size.my_into())}
fn chunks_mut(&mut self,chunk_size:INDEX)->ChunksMut<T>{self.0.chunks_mut(chunk_size.my_into())}
fn split_at(&self, mid: INDEX) -> (&[T], &[T]){
self.0.split_at(mid.my_into())
}
fn split_at_mut(&mut self, mid: INDEX) -> (&mut [T], &mut [T]){
self.0.split_at_mut(mid.my_into())
}
fn split<F>(&self, pred: F) -> Split<T, F>
where F:FnMut(&T)->bool
{
self.0.split(pred)
}
fn split_mut<F>(&mut self, pred: F) -> SplitMut<T, F>
where F: FnMut(&T) -> bool
{
self.0.split_mut(pred)
}
fn rsplit<F>(&self, pred: F) -> RSplit<T, F>
where F: FnMut(&T) -> bool,
{
self.0.rsplit(pred)
}
fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<T, F>
where F: FnMut(&T) -> bool
{
self.0.rsplit_mut(pred)
}
fn splitn<F>(&self, n: INDEX, pred: F) -> SplitN<T, F>
where F: FnMut(&T) -> bool
{
self.0.splitn(n.my_into(),pred)
}
fn splitn_mut<F>(&mut self, n: INDEX, pred: F) -> SplitNMut<T, F>
where F: FnMut(&T) -> bool
{
self.0.splitn_mut(n.my_into(),pred)
}
fn rsplitn<F>(&self, n: INDEX, pred: F) -> RSplitN<T, F>
where F: FnMut(&T) -> bool{
self.0.rsplitn(n.my_into(),pred)
}
fn rsplitn_mut<F>(&mut self, n: INDEX, pred: F) -> RSplitNMut<T, F>
where
F: FnMut(&T) -> bool{
self.0.rsplitn_mut(n.my_into(),pred)
}
fn contains(&self, x: &T) -> bool
where
T: PartialEq<T>{
self.0.contains(x)
}
fn starts_with(&self, needle: &[T]) -> bool
where
T: PartialEq<T>{
self.0.starts_with(needle)
}
fn ends_with(&self, needle: &[T]) -> bool
where
T: PartialEq<T>{
self.0.ends_with(needle)
}
fn binary_search(&self, a: &T) -> Result<INDEX, INDEX>
where
T: Ord{
match self.0.binary_search(a){
Ok(x)=>Ok(INDEX::my_from(x)),
Err(x)=>Err(INDEX::my_from(x))
}
}
fn binary_search_by<'a, F>(&'a self, f: F) -> Result<INDEX, INDEX>
where F: FnMut(&'a T) -> Ordering{
match self.0.binary_search_by(f){
Ok(x)=>Ok(INDEX::my_from(x)),
Err(x)=>Err(INDEX::my_from(x))
}
}
fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result<INDEX, INDEX>
where
B: Ord,
F: FnMut(&'a T) -> B,
T: Ord
{
match self.0.binary_search_by_key(b,f){
Ok(x)=>Ok(INDEX::my_from(x)),
Err(x)=>Err(INDEX::my_from(x))
}
}
fn sort(&mut self) where T:Ord{
self.0.sort()
}
fn sort_by<F>(&mut self,f:F) where F:FnMut(&T,&T)->Ordering{
self.0.sort_by(f)
}
fn sort_by_key<F,B>(&mut self,f:F) where B:Ord,F:FnMut(&T)->B{
self.0.sort_by_key(f)
}
fn sort_unstable(&mut self)where T:Ord{self.0.sort_unstable()}
fn sort_unstable_by<F>(&mut self,f:F)where T:Ord,F:FnMut(&T,&T)->Ordering{self.0.sort_unstable_by(f)}
fn sort_unstable_by_key<B:Ord,F>(&mut self,f:F)where T:Ord,F:FnMut(&T)->B{self.0.sort_unstable_by_key(f)}
fn rotate(&mut self,mid:INDEX){
self.0.rotate(mid.my_into())
}
fn clone_from_slice(&mut self, src:&[T]) where T:Clone{
self.0.clone_from_slice(src)
}
fn copy_from_slice(&mut self, src:&[T]) where T:Copy{
self.0.copy_from_slice(src)
}
fn swap_with_slice(&mut self, src:&mut[T]){
self.0.swap_with_slice(src)
}
fn to_vec(&self)->Array<T> where T:Clone{
Array(self.0.to_vec(),PhantomData)
}
}
impl<T,INDEX:IndexTrait> Index<INDEX> for Array<T,INDEX>{
type Output=T;
fn index(&self,i:INDEX)->&T{
&self.0.index(i.my_into())
}
}
impl<T,INDEX:IndexTrait> IndexMut<INDEX> for Array<T,INDEX>{
fn index_mut(&mut self,i:INDEX)->&mut T{
self.0.index_mut(i.my_into())
}
}
impl<T:Clone,INDEX:IndexTrait> Clone for Array<T,INDEX>{
fn clone(&self)->Self{
Array(self.0.clone(),PhantomData)
}
fn clone_from(&mut self, other:&Self){
self.0.clone_from(&other.0);
self.1.clone_from(&other.1);
}
}
impl<T,INDEX:IndexTrait> Default for Array<T,INDEX>{
fn default()->Self{
Array(Vec::<T>::default(),PhantomData)
}
}
impl<T,INDEX:IndexTrait> Borrow<[T]> for Array<T,INDEX>{
fn borrow(&self) -> &[T]{
self.0.borrow()
}
}
impl<T,INDEX:IndexTrait> AsRef<[T]> for Array<T,INDEX>{
fn as_ref(&self)->&[T]{
self.0.as_ref()
}
}
impl<T,INDEX:IndexTrait> AsRef<Array<T,INDEX>> for Array<T,INDEX>{
fn as_ref(&self)->&Self{
self
}
}
| as_ptr | identifier_name |
array.rs | /*
#![feature(collections_range)]
#![feature(drain_filter)]
#![feature(slice_rsplit)]
#![feature(slice_get_slice)]
#![feature(vec_resize_default)]
#![feature(vec_remove_item)]
#![feature(collections_range)]
#![feature(slice_rotate)]
#![feature(swap_with_slice)]
*/
foo bar baz
use collections::range::RangeArgument;
use std::cmp::Ordering;
use std::borrow::Borrow;
use std::vec::{Drain,Splice,DrainFilter};
use std::ops::{Deref,DerefMut,Index,IndexMut};
use std::slice::{Iter,IterMut,Windows,Chunks,ChunksMut,Split,SplitMut,RSplit,RSplitMut,RSplitN,RSplitNMut,SplitN,SplitNMut,SliceIndex};
use std::marker::PhantomData; // this sucks!
//todo, how to handle 'enumerate'.
// would we have to impl'my_enumerate' or something?
// wrapper for Vec<T> with indexing defaulting to i32
// todo, real vector impl, with smallvec stuff
pub trait IndexTrait { // TODO - would be better to use official from/into, but it doesn't let us impl
fn my_from(x:usize)->Self;
fn my_into(self)->usize;
}
impl IndexTrait for i32{
fn my_from(x:usize)->Self{x as Self}
fn my_into(self)->usize{self as usize}
}
// grrr. can't impl theirs this way round?!
//trait MyInto {
//}
//TODO - wrapper or macro to roll a'strongly typed index'
// e.g. I32<Polygon>
/*
impl Into<usize> for i32{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for u32{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for i16{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for u32{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for i8{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for u8{
fn into(self)->usize{ self as usize }
}
impl Into<usize> for isize{
fn into(self)->usize{ self as usize }
}
*/
#[derive(Debug)]
pub struct Array<T,I=i32>(pub Vec<T>,PhantomData<I>);
// my array helper fn's
impl<T:Clone,I:IndexTrait+Clone> Array<T,I>{
/// TODO - better name. preserves ordering of vec![v;count].
pub fn from_val_n(val:T, n:i32)->Self{
let v=vec![val; n as usize];
Array(v,PhantomData)
}
pub fn from_fn<F:Fn(I)->T>(count:I,f:F)->Self{
let mut v=Vec::new();
v.reserve(count.clone().my_into());
for x in 0..count.my_into() {v.push(f(I::my_from(x)))}
Array(v,PhantomData)
}
pub fn map<B,F:Fn(&T)->B>(&self,f:F)->Array<B,I>{
let mut out=Array::<B,I>::new();
out.reserve(self.len());
for x in self.iter(){
out.push(f(x))
}
out
}
}
impl<T,I:IndexTrait+Clone> Array<T,I>{
pub fn num_elems(&self)->i32{ self.0.len() as i32} // TODO - figure out generic int
pub fn new()->Self{ Array(Vec::new(),PhantomData) }
pub fn reserve(&mut self, additional: I){
self.0.reserve(additional.my_into());
}
pub fn push(&mut self,val:T){self.0.push(val)}
pub fn shrink_to_fit(&mut self){self.0.shrink_to_fit()}
pub fn truncate(&mut self, len: I) |
pub fn as_slice(&self) -> &[T]{
self.0.as_slice()
}
pub fn as_mut_slice(&mut self) -> &mut [T]{
self.0.as_mut_slice()
}
pub fn swap_remove(&mut self, index: I) -> T{
self.0.swap_remove(index.my_into())
}
pub fn insert(&mut self, index: I, element: T){
self.0.insert(index.my_into(),element)
}
pub fn remove(&mut self, index: I) -> T{
self.0.remove(index.my_into())
}
// aka filter in place
pub fn retain<F:FnMut(&T)->bool>(&mut self, f: F) {
self.0.retain(f)
}
pub fn dedup_by_key<F:FnMut(&mut T)->K, K:PartialEq<K>>(&mut self, key: F) {
self.0.dedup_by_key(key)
}
pub fn dedup_by<F:FnMut(&mut T,&mut T)->bool>(&mut self, same_bucket: F) {
self.0.dedup_by(same_bucket)
}
#[cfg(nightly_vector)]
pub fn place_back(&mut self) -> PlaceBack<T>{
self.0.place_back()
}
pub fn pop(&mut self) -> Option<T>{
self.0.pop()
}
pub fn append(&mut self, other: &mut Vec<T>){
self.0.append(other)
}
#[cfg(UseRangeArgument)]
pub fn drain<R:RangeArgument<I>>(&mut self, range: R) -> Drain<T>
{
self.0.drain(range)
}
pub fn clear(&mut self){
self.0.clear()
}
// pub fn len(&self)->I{
// self.0.len() as Index
// }
// pub fn is_empty(&self)->bool{ self.0.is_empty()}
pub fn split_off(&mut self,at:I)->Array<T>{
Array(self.0.split_off(at.my_into()),PhantomData)
}
}
impl<T:Clone,I:IndexTrait> Array<T,I>{
pub fn resize(&mut self, new_len:I, value:T){
self.0.resize(new_len.my_into(),value)
}
pub fn extend_from_slice(&mut self, other:&[T]){
self.0.extend_from_slice(other)
}
}
impl<T:Default,I:IndexTrait> Array<T,I>{
pub fn resize_default(&mut self, new_len:I){
self.0.resize_default(new_len.my_into())
}
}
impl<T:PartialEq<T>,I:IndexTrait> Array<T,I>{
pub fn dedup(&mut self){
self.0.dedup()
}
pub fn remove_item(&mut self, item:&T)->Option<T>{
self.0.remove_item(item)
}
}
impl<T,INDEX:IndexTrait> Array<T,INDEX>{
/// TODO - figure out how to convert RangeArguemnt indices
pub fn splice<I:IntoIterator<Item=T>,R:RangeArgument<usize>>(&mut self, range:R, replace_with:I)-> Splice<<I as IntoIterator>::IntoIter>
{
self.0.splice(range,replace_with)
}
pub fn drain_filter<F:FnMut(&mut T)->bool>(&mut self, filter: F) -> DrainFilter<T, F> {
self.0.drain_filter(filter)
}
}
impl<T,INDEX:IndexTrait> Deref for Array<T,INDEX>{
type Target=[T];
fn deref(&self)->&Self::Target { self.0.deref() }
}
impl<T,INDEX:IndexTrait> Array<T,INDEX>{
fn len(&self)->INDEX{INDEX::my_from(self.0.len())}
fn is_empty(&self)->bool{self.0.is_empty()}
fn first(&self)->Option<&T>{self.0.first()}
fn first_mut(&mut self)->Option<&mut T>{self.0.first_mut()}
fn split_first(&self)->Option<(&T,&[T])>{self.0.split_first()}
fn split_first_mut(&mut self)->Option<(&mut T, &mut [T])>{ self.0.split_first_mut() }
fn split_last(&self)->Option<(&T,&[T])>{self.0.split_last()}
fn split_last_mut(&mut self)->Option<(&mut T, &mut[T])>{self.0.split_last_mut()}
fn last(&self)->Option<&T>{self.0.last()}
fn last_mut(&mut self)->Option<&mut T>{self.0.last_mut()}
fn get<I>(&self, index:I)->Option<&<I as SliceIndex<[T]> >::Output>
where I:SliceIndex<[T]>
{
self.0.get(index)
}
fn get_mut<I>(&mut self, index:I)->Option<&mut <I as SliceIndex<[T]>>::Output>
where I:SliceIndex<[T]>
{
self.0.get_mut(index)
}
unsafe fn get_unchecked<I>(&self, index: I) -> &<I as SliceIndex<[T]>>::Output
where
I: SliceIndex<[T]> {self.0.get_unchecked(index)}
unsafe fn get_unchecked_mut<I>(
&mut self,
index: I
) -> &mut <I as SliceIndex<[T]>>::Output
where
I: SliceIndex<[T]>{
self.0.get_unchecked_mut(index)
}
fn as_ptr(&self)->*const T{self.0.as_ptr()}
fn as_mut_ptr(&mut self)->*mut T{self.0.as_mut_ptr()}
fn swap(&mut self, a:INDEX,b:INDEX){
self.0.swap(a.my_into(),b.my_into())
}
fn reverse(&mut self){self.0.reverse()}
fn iter(&self)->Iter<T>{self.0.iter()}
fn iter_mut(&mut self)->IterMut<T>{self.0.iter_mut()}
fn windows(&self,size:INDEX)->Windows<T>{self.0.windows(size.my_into())}
fn chunks(&self,chunk_size:INDEX)->Chunks<T>{self.0.chunks(chunk_size.my_into())}
fn chunks_mut(&mut self,chunk_size:INDEX)->ChunksMut<T>{self.0.chunks_mut(chunk_size.my_into())}
fn split_at(&self, mid: INDEX) -> (&[T], &[T]){
self.0.split_at(mid.my_into())
}
fn split_at_mut(&mut self, mid: INDEX) -> (&mut [T], &mut [T]){
self.0.split_at_mut(mid.my_into())
}
fn split<F>(&self, pred: F) -> Split<T, F>
where F:FnMut(&T)->bool
{
self.0.split(pred)
}
fn split_mut<F>(&mut self, pred: F) -> SplitMut<T, F>
where F: FnMut(&T) -> bool
{
self.0.split_mut(pred)
}
fn rsplit<F>(&self, pred: F) -> RSplit<T, F>
where F: FnMut(&T) -> bool,
{
self.0.rsplit(pred)
}
fn rsplit_mut<F>(&mut self, pred: F) -> RSplitMut<T, F>
where F: FnMut(&T) -> bool
{
self.0.rsplit_mut(pred)
}
fn splitn<F>(&self, n: INDEX, pred: F) -> SplitN<T, F>
where F: FnMut(&T) -> bool
{
self.0.splitn(n.my_into(),pred)
}
fn splitn_mut<F>(&mut self, n: INDEX, pred: F) -> SplitNMut<T, F>
where F: FnMut(&T) -> bool
{
self.0.splitn_mut(n.my_into(),pred)
}
fn rsplitn<F>(&self, n: INDEX, pred: F) -> RSplitN<T, F>
where F: FnMut(&T) -> bool{
self.0.rsplitn(n.my_into(),pred)
}
fn rsplitn_mut<F>(&mut self, n: INDEX, pred: F) -> RSplitNMut<T, F>
where
F: FnMut(&T) -> bool{
self.0.rsplitn_mut(n.my_into(),pred)
}
fn contains(&self, x: &T) -> bool
where
T: PartialEq<T>{
self.0.contains(x)
}
fn starts_with(&self, needle: &[T]) -> bool
where
T: PartialEq<T>{
self.0.starts_with(needle)
}
fn ends_with(&self, needle: &[T]) -> bool
where
T: PartialEq<T>{
self.0.ends_with(needle)
}
fn binary_search(&self, a: &T) -> Result<INDEX, INDEX>
where
T: Ord{
match self.0.binary_search(a){
Ok(x)=>Ok(INDEX::my_from(x)),
Err(x)=>Err(INDEX::my_from(x))
}
}
fn binary_search_by<'a, F>(&'a self, f: F) -> Result<INDEX, INDEX>
where F: FnMut(&'a T) -> Ordering{
match self.0.binary_search_by(f){
Ok(x)=>Ok(INDEX::my_from(x)),
Err(x)=>Err(INDEX::my_from(x))
}
}
fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result<INDEX, INDEX>
where
B: Ord,
F: FnMut(&'a T) -> B,
T: Ord
{
match self.0.binary_search_by_key(b,f){
Ok(x)=>Ok(INDEX::my_from(x)),
Err(x)=>Err(INDEX::my_from(x))
}
}
fn sort(&mut self) where T:Ord{
self.0.sort()
}
fn sort_by<F>(&mut self,f:F) where F:FnMut(&T,&T)->Ordering{
self.0.sort_by(f)
}
fn sort_by_key<F,B>(&mut self,f:F) where B:Ord,F:FnMut(&T)->B{
self.0.sort_by_key(f)
}
fn sort_unstable(&mut self)where T:Ord{self.0.sort_unstable()}
fn sort_unstable_by<F>(&mut self,f:F)where T:Ord,F:FnMut(&T,&T)->Ordering{self.0.sort_unstable_by(f)}
fn sort_unstable_by_key<B:Ord,F>(&mut self,f:F)where T:Ord,F:FnMut(&T)->B{self.0.sort_unstable_by_key(f)}
fn rotate(&mut self,mid:INDEX){
self.0.rotate(mid.my_into())
}
fn clone_from_slice(&mut self, src:&[T]) where T:Clone{
self.0.clone_from_slice(src)
}
fn copy_from_slice(&mut self, src:&[T]) where T:Copy{
self.0.copy_from_slice(src)
}
fn swap_with_slice(&mut self, src:&mut[T]){
self.0.swap_with_slice(src)
}
fn to_vec(&self)->Array<T> where T:Clone{
Array(self.0.to_vec(),PhantomData)
}
}
impl<T,INDEX:IndexTrait> Index<INDEX> for Array<T,INDEX>{
type Output=T;
fn index(&self,i:INDEX)->&T{
&self.0.index(i.my_into())
}
}
impl<T,INDEX:IndexTrait> IndexMut<INDEX> for Array<T,INDEX>{
fn index_mut(&mut self,i:INDEX)->&mut T{
self.0.index_mut(i.my_into())
}
}
impl<T:Clone,INDEX:IndexTrait> Clone for Array<T,INDEX>{
fn clone(&self)->Self{
Array(self.0.clone(),PhantomData)
}
fn clone_from(&mut self, other:&Self){
self.0.clone_from(&other.0);
self.1.clone_from(&other.1);
}
}
impl<T,INDEX:IndexTrait> Default for Array<T,INDEX>{
fn default()->Self{
Array(Vec::<T>::default(),PhantomData)
}
}
impl<T,INDEX:IndexTrait> Borrow<[T]> for Array<T,INDEX>{
fn borrow(&self) -> &[T]{
self.0.borrow()
}
}
impl<T,INDEX:IndexTrait> AsRef<[T]> for Array<T,INDEX>{
fn as_ref(&self)->&[T]{
self.0.as_ref()
}
}
impl<T,INDEX:IndexTrait> AsRef<Array<T,INDEX>> for Array<T,INDEX>{
fn as_ref(&self)->&Self{
self
}
}
| {
self.0.truncate(len.my_into());
} | identifier_body |
codemap.rs | use std::cell::RefCell;
use std::cmp;
use std::env;
use std::{fmt, fs};
use std::io::{self, Read};
use std::ops::{Add, Sub};
use std::path::{Path, PathBuf};
use std::rc::Rc;
use ast::Name;
pub trait Pos {
fn from_usize(n: usize) -> Self;
fn to_usize(&self) -> usize;
}
/// A byte offset. Keep this small (currently 32-bits), as AST contains
/// a lot of them.
#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
pub struct BytePos(pub u32);
/// A character offset. Because of multibyte utf8 characters, a byte offset
/// is not equivalent to a character offset. The CodeMap will convert BytePos
/// values to CharPos values as necessary.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Debug)]
pub struct CharPos(pub usize);
impl Pos for BytePos {
fn from_usize(n: usize) -> BytePos { BytePos(n as u32) }
fn to_usize(&self) -> usize { let BytePos(n) = *self; n as usize }
}
impl Add for BytePos {
type Output = BytePos;
fn add(self, rhs: BytePos) -> BytePos {
BytePos((self.to_usize() + rhs.to_usize()) as u32)
}
}
impl Sub for BytePos {
type Output = BytePos;
fn sub(self, rhs: BytePos) -> BytePos {
BytePos((self.to_usize() - rhs.to_usize()) as u32)
}
}
impl Pos for CharPos {
fn from_usize(n: usize) -> CharPos { CharPos(n) }
fn to_usize(&self) -> usize { let CharPos(n) = *self; n }
}
impl Add for CharPos {
type Output = CharPos;
fn add(self, rhs: CharPos) -> CharPos {
CharPos(self.to_usize() + rhs.to_usize())
}
}
impl Sub for CharPos {
type Output = CharPos;
fn sub(self, rhs: CharPos) -> CharPos {
CharPos(self.to_usize() - rhs.to_usize())
}
}
/// Spans represent a region of code, used for error reporting. Positions in spans
/// are *absolute* positions from the beginning of the codemap.
#[derive(Clone, Copy, Hash, PartialEq, Eq, Debug)]
pub struct Span {
pub lo: BytePos,
pub hi: BytePos
}
pub const DUMMY_SPAN: Span = Span { lo: BytePos(0), hi: BytePos(0) };
pub fn spanned<T>(lo: BytePos, hi: BytePos, t: T) -> Spanned<T> {
respan(mk_span(lo, hi), t)
}
pub fn respan<T>(sp: Span, t: T) -> Spanned<T> {
Spanned {node: t, span: sp}
}
/* assuming that we're not in macro expansion */
pub fn mk_span(lo: BytePos, hi: BytePos) -> Span {
Span {lo: lo, hi: hi}
}
impl Span {
/// Returns a new span representing just the end-point of this span
pub fn end_point(self) -> Span {
let lo = cmp::max(self.hi.0 - 1, self.lo.0);
Span { lo: BytePos(lo), hi: self.hi }
}
/// Returns `self` if `self` is not the dummy span, and `other` otherwise.
pub fn substitute_dummy(self, other: Span) -> Span {
if self.source_equal(&DUMMY_SPAN) { other } else { self }
}
pub fn contains(self, other: Span) -> bool {
self.lo <= other.lo && other.hi <= self.hi
}
/// Return true if the spans are equal with regards to the source text.
///
/// Use this instead of `==` when either span could be generated code,
/// and you only care that they point to the same bytes of source text.
pub fn source_equal(&self, other: &Span) -> bool {
self.lo == other.lo && self.hi == other.hi
}
/// Returns `Some(span)`, a union of `self` and `other`, on overlap.
pub fn merge(self, other: Span) -> Option<Span> {
if (self.lo <= other.lo && self.hi > other.lo) ||
(self.lo >= other.lo && self.lo < other.hi) {
Some(Span {
lo: cmp::min(self.lo, other.lo),
hi: cmp::max(self.hi, other.hi),
})
} else {
None
}
}
/// Returns `Some(span)`, where the start is trimmed by the end of `other`
pub fn trim_start(self, other: Span) -> Option<Span> {
if self.hi > other.hi {
Some(Span { lo: cmp::max(self.lo, other.hi),.. self })
} else {
None
}
}
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, Copy)]
pub struct Spanned<T> {
pub node: T,
pub span: Span,
}
/// A collection of spans. Spans have two orthogonal attributes:
///
/// - they can be *primary spans*. In this case they are the locus of
/// the error, and would be rendered with `^^^`.
/// - they can have a *label*. In this case, the label is written next
/// to the mark in the snippet when we render.
#[derive(Clone)]
pub struct MultiSpan {
primary_spans: Vec<Span>,
span_labels: Vec<(Span, String)>,
}
#[derive(Clone, Debug)]
pub struct SpanLabel {
/// The span we are going to include in the final snippet.
pub span: Span,
/// Is this a primary span? This is the "locus" of the message,
/// and is indicated with a `^^^^` underline, versus `----`.
pub is_primary: bool,
/// What label should we attach to this span (if any)?
pub label: Option<String>,
}
impl MultiSpan {
pub fn new() -> MultiSpan {
MultiSpan {
primary_spans: vec![],
span_labels: vec![]
}
}
pub fn from_span(primary_span: Span) -> MultiSpan {
MultiSpan {
primary_spans: vec![primary_span],
span_labels: vec![]
}
}
pub fn from_spans(vec: Vec<Span>) -> MultiSpan {
MultiSpan {
primary_spans: vec,
span_labels: vec![]
}
}
pub fn push_span_label(&mut self, span: Span, label: String) {
self.span_labels.push((span, label));
}
/// Selects the first primary span (if any)
pub fn primary_span(&self) -> Option<Span> {
self.primary_spans.first().cloned()
}
/// Returns all primary spans.
pub fn primary_spans(&self) -> &[Span] {
&self.primary_spans
}
/// Returns the strings to highlight. We always ensure that there
/// is an entry for each of the primary spans -- for each primary
/// span P, if there is at least one label with span P, we return
/// those labels (marked as primary). But otherwise we return
/// `SpanLabel` instances with empty labels.
pub fn span_labels(&self) -> Vec<SpanLabel> {
let is_primary = |span| self.primary_spans.contains(&span);
let mut span_labels = vec![];
for &(span, ref label) in &self.span_labels {
span_labels.push(SpanLabel {
span: span,
is_primary: is_primary(span),
label: Some(label.clone())
});
}
for &span in &self.primary_spans {
if!span_labels.iter().any(|sl| sl.span == span) {
span_labels.push(SpanLabel {
span: span,
is_primary: true,
label: None
});
}
}
span_labels
}
}
impl From<Span> for MultiSpan {
fn from(span: Span) -> MultiSpan {
MultiSpan::from_span(span)
}
}
// _____________________________________________________________________________
// Loc, LocWithOpt, FileMapAndLine, FileMapAndBytePos
//
/// A source code location used for error reporting
#[derive(Debug)]
pub struct Loc {
/// Information about the original source
pub file: Rc<FileMap>,
/// The (1-based) line number
pub line: usize,
/// The (0-based) column offset
pub col: CharPos
}
/// A source code location used as the result of lookup_char_pos_adj
// Actually, *none* of the clients use the filename *or* file field;
// perhaps they should just be removed.
#[derive(Debug)]
pub struct LocWithOpt {
pub filename: FileName,
pub line: usize,
pub col: CharPos,
pub file: Option<Rc<FileMap>>,
}
// used to be structural records. Better names, anyone?
#[derive(Debug)]
pub struct FileMapAndLine { pub fm: Rc<FileMap>, pub line: usize }
#[derive(Debug)]
pub struct FileMapAndBytePos { pub fm: Rc<FileMap>, pub pos: BytePos }
// _____________________________________________________________________________
// ExpnFormat, NameAndSpan, ExpnInfo, ExpnId
//
/// The source of expansion.
#[derive(Clone, Hash, Debug, PartialEq, Eq)]
pub enum ExpnFormat {
/// e.g. #[derive(...)] <item>
MacroAttribute(Name),
/// e.g. `format!()`
MacroBang(Name),
}
#[derive(Clone, Hash, Debug)]
pub struct NameAndSpan {
/// The format with which the macro was invoked.
pub format: ExpnFormat,
/// Whether the macro is allowed to use #[unstable]/feature-gated
/// features internally without forcing the whole crate to opt-in
/// to them.
pub allow_internal_unstable: bool,
/// The span of the macro definition itself. The macro may not
/// have a sensible definition span (e.g. something defined
/// completely inside libsyntax) in which case this is None.
pub span: Option<Span>
}
impl NameAndSpan {
pub fn name(&self) -> Name {
match self.format {
ExpnFormat::MacroAttribute(s) => s,
ExpnFormat::MacroBang(s) => s,
}
}
}
/// Extra information for tracking spans of macro and syntax sugar expansion
#[derive(Hash, Debug)]
pub struct ExpnInfo {
/// The location of the actual macro invocation or syntax sugar, e.g.
/// `let x = foo!();` or `if let Some(y) = x {}`
///
/// This may recursively refer to other macro invocations, e.g. if
/// `foo!()` invoked `bar!()` internally, and there was an
/// expression inside `bar!`; the call_site of the expression in
/// the expansion would point to the `bar!` invocation; that
/// call_site span would have its own ExpnInfo, with the call_site
/// pointing to the `foo!` invocation.
pub call_site: Span,
/// Information about the expansion.
pub callee: NameAndSpan
}
// _____________________________________________________________________________
// FileMap, MultiByteChar, FileName, FileLines
//
pub type FileName = String;
/// Identifies an offset of a multi-byte character in a FileMap
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct MultiByteChar {
/// The absolute offset of the character in the CodeMap
pub pos: BytePos,
/// The number of bytes, >=2
pub bytes: usize,
}
/// A single source in the CodeMap.
pub struct FileMap {
/// The name of the file that the source came from, source that doesn't
/// originate from files has names between angle brackets by convention,
/// e.g. `<anon>`
pub name: FileName,
/// The absolute path of the file that the source came from.
pub abs_path: Option<FileName>,
/// The complete source code
pub src: Option<Rc<String>>,
/// The start position of this source in the CodeMap
pub start_pos: BytePos,
/// The end position of this source in the CodeMap
pub end_pos: BytePos,
/// Locations of lines beginnings in the source code
pub lines: RefCell<Vec<BytePos>>,
/// Locations of multi-byte characters in the source code
pub multibyte_chars: RefCell<Vec<MultiByteChar>>,
}
impl fmt::Debug for FileMap {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "FileMap({})", self.name)
}
}
/// An abstraction over the fs operations used by the Parser.
pub trait FileLoader {
/// Query the existence of a file.
fn file_exists(&self, path: &Path) -> bool;
/// Return an absolute path to a file, if possible.
fn abs_path(&self, path: &Path) -> Option<PathBuf>;
/// Read the contents of an UTF-8 file into memory.
fn read_file(&self, path: &Path) -> io::Result<String>;
}
/// A FileLoader that uses std::fs to load real files.
pub struct RealFileLoader;
impl FileLoader for RealFileLoader {
fn file_exists(&self, path: &Path) -> bool {
fs::metadata(path).is_ok()
}
fn abs_path(&self, path: &Path) -> Option<PathBuf> {
if path.is_absolute() {
Some(path.to_path_buf())
} else {
env::current_dir()
.ok()
.map(|cwd| cwd.join(path))
}
}
fn read_file(&self, path: &Path) -> io::Result<String> {
let mut src = String::new();
fs::File::open(path)?.read_to_string(&mut src)?;
Ok(src)
}
}
// _____________________________________________________________________________
// CodeMap
//
pub struct CodeMap {
pub files: RefCell<Vec<Rc<FileMap>>>,
expansions: RefCell<Vec<ExpnInfo>>,
file_loader: Box<FileLoader>
}
impl CodeMap {
pub fn new() -> CodeMap {
CodeMap {
files: RefCell::new(Vec::new()),
expansions: RefCell::new(Vec::new()),
file_loader: Box::new(RealFileLoader)
}
}
pub fn with_file_loader(file_loader: Box<FileLoader>) -> CodeMap {
CodeMap {
files: RefCell::new(Vec::new()),
expansions: RefCell::new(Vec::new()),
file_loader: file_loader
}
}
pub fn file_exists(&self, path: &Path) -> bool {
self.file_loader.file_exists(path)
}
pub fn load_file(&self, path: &Path) -> io::Result<Rc<FileMap>> {
let src = self.file_loader.read_file(path)?;
let abs_path = self.file_loader.abs_path(path).map(|p| p.to_str().unwrap().to_string());
Ok(self.new_filemap(path.to_str().unwrap().to_string(), abs_path, src))
}
fn next_start_pos(&self) -> usize {
let files = self.files.borrow();
match files.last() {
None => 0,
// Add one so there is some space between files. This lets us distinguish
// positions in the codemap, even in the presence of zero-length files.
Some(last) => last.end_pos.to_usize() + 1,
}
}
/// Creates a new filemap without setting its line information. If you don't
/// intend to set the line information yourself, you should use new_filemap_and_lines.
pub fn new_filemap(&self, filename: FileName, abs_path: Option<FileName>,
mut src: String) -> Rc<FileMap> {
let start_pos = self.next_start_pos();
let mut files = self.files.borrow_mut();
// Remove utf-8 BOM if any.
if src.starts_with("\u{feff}") |
let end_pos = start_pos + src.len();
let filemap = Rc::new(FileMap {
name: filename,
abs_path: abs_path,
src: Some(Rc::new(src)),
start_pos: Pos::from_usize(start_pos),
end_pos: Pos::from_usize(end_pos),
lines: RefCell::new(Vec::new()),
multibyte_chars: RefCell::new(Vec::new()),
});
files.push(filemap.clone());
filemap
}
/// Lookup source information about a BytePos
pub fn lookup_char_pos(&self, pos: BytePos) -> Loc {
let chpos = self.bytepos_to_file_charpos(pos);
match self.lookup_line(pos) {
Ok(FileMapAndLine { fm: f, line: a }) => {
let line = a + 1; // Line numbers start at 1
let linebpos = (*f.lines.borrow())[a];
let linechpos = self.bytepos_to_file_charpos(linebpos);
debug!("byte pos {:?} is on the line at byte pos {:?}",
pos, linebpos);
debug!("char pos {:?} is on the line at char pos {:?}",
chpos, linechpos);
debug!("byte is on line: {}", line);
assert!(chpos >= linechpos);
Loc {
file: f,
line: line,
col: chpos - linechpos,
}
}
Err(f) => {
Loc {
file: f,
line: 0,
col: chpos,
}
}
}
}
// If the relevant filemap is empty, we don't return a line number.
fn lookup_line(&self, pos: BytePos) -> Result<FileMapAndLine, Rc<FileMap>> {
let idx = self.lookup_filemap_idx(pos);
let files = self.files.borrow();
let f = (*files)[idx].clone();
let len = f.lines.borrow().len();
if len == 0 {
return Err(f);
}
let mut a = 0;
{
let lines = f.lines.borrow();
let mut b = lines.len();
while b - a > 1 {
let m = (a + b) / 2;
if (*lines)[m] > pos {
b = m;
} else {
a = m;
}
}
assert!(a <= lines.len());
}
Ok(FileMapAndLine { fm: f, line: a })
}
/// Converts an absolute BytePos to a CharPos relative to the filemap.
pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let idx = self.lookup_filemap_idx(bpos);
let files = self.files.borrow();
let map = &(*files)[idx];
// The number of extra bytes due to multibyte chars in the FileMap
let mut total_extra_bytes = 0;
for mbc in map.multibyte_chars.borrow().iter() {
debug!("{}-byte char at {:?}", mbc.bytes, mbc.pos);
if mbc.pos < bpos {
// every character is at least one byte, so we only
// count the actual extra bytes.
total_extra_bytes += mbc.bytes - 1;
// We should never see a byte position in the middle of a
// character
assert!(bpos.to_usize() >= mbc.pos.to_usize() + mbc.bytes);
} else {
break;
}
}
assert!(map.start_pos.to_usize() + total_extra_bytes <= bpos.to_usize());
CharPos(bpos.to_usize() - map.start_pos.to_usize() - total_extra_bytes)
}
// Return the index of the filemap (in self.files) which contains pos.
fn lookup_filemap_idx(&self, pos: BytePos) -> usize {
let files = self.files.borrow();
let files = &*files;
let count = files.len();
// Binary search for the filemap.
let mut a = 0;
let mut b = count;
while b - a > 1 {
let m = (a + b) / 2;
if files[m].start_pos > pos {
b = m;
} else {
a = m;
}
}
assert!(a < count, "position {} does not resolve to a source location", pos.to_usize());
return a;
}
} | {
src.drain(..3);
} | conditional_block |
codemap.rs | use std::cell::RefCell;
use std::cmp;
use std::env;
use std::{fmt, fs};
use std::io::{self, Read};
use std::ops::{Add, Sub};
use std::path::{Path, PathBuf};
use std::rc::Rc;
use ast::Name;
pub trait Pos {
fn from_usize(n: usize) -> Self;
fn to_usize(&self) -> usize;
}
/// A byte offset. Keep this small (currently 32-bits), as AST contains
/// a lot of them.
#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
pub struct BytePos(pub u32);
/// A character offset. Because of multibyte utf8 characters, a byte offset
/// is not equivalent to a character offset. The CodeMap will convert BytePos
/// values to CharPos values as necessary.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Debug)]
pub struct CharPos(pub usize);
impl Pos for BytePos {
fn from_usize(n: usize) -> BytePos { BytePos(n as u32) }
fn to_usize(&self) -> usize { let BytePos(n) = *self; n as usize }
}
impl Add for BytePos {
type Output = BytePos;
fn add(self, rhs: BytePos) -> BytePos {
BytePos((self.to_usize() + rhs.to_usize()) as u32)
}
}
impl Sub for BytePos {
type Output = BytePos;
fn sub(self, rhs: BytePos) -> BytePos {
BytePos((self.to_usize() - rhs.to_usize()) as u32)
}
}
impl Pos for CharPos {
fn from_usize(n: usize) -> CharPos { CharPos(n) }
fn to_usize(&self) -> usize { let CharPos(n) = *self; n }
}
impl Add for CharPos {
type Output = CharPos;
fn add(self, rhs: CharPos) -> CharPos {
CharPos(self.to_usize() + rhs.to_usize())
}
}
impl Sub for CharPos {
type Output = CharPos;
fn sub(self, rhs: CharPos) -> CharPos {
CharPos(self.to_usize() - rhs.to_usize())
}
}
/// Spans represent a region of code, used for error reporting. Positions in spans
/// are *absolute* positions from the beginning of the codemap.
#[derive(Clone, Copy, Hash, PartialEq, Eq, Debug)]
pub struct Span {
pub lo: BytePos,
pub hi: BytePos
}
pub const DUMMY_SPAN: Span = Span { lo: BytePos(0), hi: BytePos(0) };
pub fn spanned<T>(lo: BytePos, hi: BytePos, t: T) -> Spanned<T> {
respan(mk_span(lo, hi), t)
}
pub fn respan<T>(sp: Span, t: T) -> Spanned<T> {
Spanned {node: t, span: sp}
}
/* assuming that we're not in macro expansion */
pub fn mk_span(lo: BytePos, hi: BytePos) -> Span {
Span {lo: lo, hi: hi}
}
impl Span {
/// Returns a new span representing just the end-point of this span
pub fn end_point(self) -> Span {
let lo = cmp::max(self.hi.0 - 1, self.lo.0);
Span { lo: BytePos(lo), hi: self.hi }
}
/// Returns `self` if `self` is not the dummy span, and `other` otherwise.
pub fn substitute_dummy(self, other: Span) -> Span {
if self.source_equal(&DUMMY_SPAN) { other } else { self }
}
pub fn contains(self, other: Span) -> bool {
self.lo <= other.lo && other.hi <= self.hi
}
/// Return true if the spans are equal with regards to the source text.
///
/// Use this instead of `==` when either span could be generated code,
/// and you only care that they point to the same bytes of source text.
pub fn source_equal(&self, other: &Span) -> bool {
self.lo == other.lo && self.hi == other.hi
}
/// Returns `Some(span)`, a union of `self` and `other`, on overlap.
pub fn merge(self, other: Span) -> Option<Span> {
if (self.lo <= other.lo && self.hi > other.lo) ||
(self.lo >= other.lo && self.lo < other.hi) {
Some(Span {
lo: cmp::min(self.lo, other.lo),
hi: cmp::max(self.hi, other.hi),
})
} else {
None
}
}
/// Returns `Some(span)`, where the start is trimmed by the end of `other`
pub fn trim_start(self, other: Span) -> Option<Span> {
if self.hi > other.hi {
Some(Span { lo: cmp::max(self.lo, other.hi),.. self })
} else {
None
}
}
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, Copy)]
pub struct Spanned<T> {
pub node: T,
pub span: Span,
}
/// A collection of spans. Spans have two orthogonal attributes:
///
/// - they can be *primary spans*. In this case they are the locus of
/// the error, and would be rendered with `^^^`.
/// - they can have a *label*. In this case, the label is written next
/// to the mark in the snippet when we render.
#[derive(Clone)]
pub struct MultiSpan {
primary_spans: Vec<Span>,
span_labels: Vec<(Span, String)>,
}
#[derive(Clone, Debug)]
pub struct SpanLabel {
/// The span we are going to include in the final snippet.
pub span: Span,
/// Is this a primary span? This is the "locus" of the message,
/// and is indicated with a `^^^^` underline, versus `----`.
pub is_primary: bool,
/// What label should we attach to this span (if any)?
pub label: Option<String>,
}
impl MultiSpan {
pub fn new() -> MultiSpan {
MultiSpan {
primary_spans: vec![],
span_labels: vec![]
}
}
pub fn from_span(primary_span: Span) -> MultiSpan {
MultiSpan {
primary_spans: vec![primary_span],
span_labels: vec![]
}
}
pub fn from_spans(vec: Vec<Span>) -> MultiSpan {
MultiSpan {
primary_spans: vec,
span_labels: vec![]
}
}
pub fn push_span_label(&mut self, span: Span, label: String) {
self.span_labels.push((span, label));
}
/// Selects the first primary span (if any)
pub fn primary_span(&self) -> Option<Span> {
self.primary_spans.first().cloned()
}
/// Returns all primary spans.
pub fn primary_spans(&self) -> &[Span] {
&self.primary_spans
}
/// Returns the strings to highlight. We always ensure that there
/// is an entry for each of the primary spans -- for each primary
/// span P, if there is at least one label with span P, we return
/// those labels (marked as primary). But otherwise we return
/// `SpanLabel` instances with empty labels.
pub fn span_labels(&self) -> Vec<SpanLabel> {
let is_primary = |span| self.primary_spans.contains(&span);
let mut span_labels = vec![];
for &(span, ref label) in &self.span_labels {
span_labels.push(SpanLabel {
span: span,
is_primary: is_primary(span),
label: Some(label.clone())
});
}
for &span in &self.primary_spans {
if!span_labels.iter().any(|sl| sl.span == span) {
span_labels.push(SpanLabel {
span: span,
is_primary: true,
label: None
});
}
}
span_labels
}
}
impl From<Span> for MultiSpan {
fn from(span: Span) -> MultiSpan {
MultiSpan::from_span(span)
}
}
// _____________________________________________________________________________
// Loc, LocWithOpt, FileMapAndLine, FileMapAndBytePos
//
/// A source code location used for error reporting
#[derive(Debug)]
pub struct Loc {
/// Information about the original source
pub file: Rc<FileMap>,
/// The (1-based) line number
pub line: usize,
/// The (0-based) column offset
pub col: CharPos
}
/// A source code location used as the result of lookup_char_pos_adj
// Actually, *none* of the clients use the filename *or* file field;
// perhaps they should just be removed.
#[derive(Debug)]
pub struct LocWithOpt {
pub filename: FileName,
pub line: usize,
pub col: CharPos,
pub file: Option<Rc<FileMap>>,
}
// used to be structural records. Better names, anyone?
#[derive(Debug)]
pub struct FileMapAndLine { pub fm: Rc<FileMap>, pub line: usize }
#[derive(Debug)]
pub struct FileMapAndBytePos { pub fm: Rc<FileMap>, pub pos: BytePos }
// _____________________________________________________________________________
// ExpnFormat, NameAndSpan, ExpnInfo, ExpnId
//
/// The source of expansion.
#[derive(Clone, Hash, Debug, PartialEq, Eq)]
pub enum ExpnFormat {
/// e.g. #[derive(...)] <item>
MacroAttribute(Name),
/// e.g. `format!()`
MacroBang(Name),
}
#[derive(Clone, Hash, Debug)]
pub struct NameAndSpan {
/// The format with which the macro was invoked.
pub format: ExpnFormat,
/// Whether the macro is allowed to use #[unstable]/feature-gated
/// features internally without forcing the whole crate to opt-in
/// to them.
pub allow_internal_unstable: bool,
/// The span of the macro definition itself. The macro may not
/// have a sensible definition span (e.g. something defined
/// completely inside libsyntax) in which case this is None.
pub span: Option<Span>
}
impl NameAndSpan {
pub fn name(&self) -> Name {
match self.format {
ExpnFormat::MacroAttribute(s) => s,
ExpnFormat::MacroBang(s) => s,
}
}
}
/// Extra information for tracking spans of macro and syntax sugar expansion
#[derive(Hash, Debug)]
pub struct ExpnInfo {
/// The location of the actual macro invocation or syntax sugar, e.g.
/// `let x = foo!();` or `if let Some(y) = x {}`
///
/// This may recursively refer to other macro invocations, e.g. if
/// `foo!()` invoked `bar!()` internally, and there was an
/// expression inside `bar!`; the call_site of the expression in
/// the expansion would point to the `bar!` invocation; that
/// call_site span would have its own ExpnInfo, with the call_site
/// pointing to the `foo!` invocation.
pub call_site: Span,
/// Information about the expansion.
pub callee: NameAndSpan
}
// _____________________________________________________________________________
// FileMap, MultiByteChar, FileName, FileLines
//
pub type FileName = String;
/// Identifies an offset of a multi-byte character in a FileMap
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct MultiByteChar {
/// The absolute offset of the character in the CodeMap
pub pos: BytePos,
/// The number of bytes, >=2
pub bytes: usize,
}
/// A single source in the CodeMap.
pub struct FileMap {
/// The name of the file that the source came from, source that doesn't | /// The absolute path of the file that the source came from.
pub abs_path: Option<FileName>,
/// The complete source code
pub src: Option<Rc<String>>,
/// The start position of this source in the CodeMap
pub start_pos: BytePos,
/// The end position of this source in the CodeMap
pub end_pos: BytePos,
/// Locations of lines beginnings in the source code
pub lines: RefCell<Vec<BytePos>>,
/// Locations of multi-byte characters in the source code
pub multibyte_chars: RefCell<Vec<MultiByteChar>>,
}
impl fmt::Debug for FileMap {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "FileMap({})", self.name)
}
}
/// An abstraction over the fs operations used by the Parser.
pub trait FileLoader {
/// Query the existence of a file.
fn file_exists(&self, path: &Path) -> bool;
/// Return an absolute path to a file, if possible.
fn abs_path(&self, path: &Path) -> Option<PathBuf>;
/// Read the contents of an UTF-8 file into memory.
fn read_file(&self, path: &Path) -> io::Result<String>;
}
/// A FileLoader that uses std::fs to load real files.
pub struct RealFileLoader;
impl FileLoader for RealFileLoader {
fn file_exists(&self, path: &Path) -> bool {
fs::metadata(path).is_ok()
}
fn abs_path(&self, path: &Path) -> Option<PathBuf> {
if path.is_absolute() {
Some(path.to_path_buf())
} else {
env::current_dir()
.ok()
.map(|cwd| cwd.join(path))
}
}
fn read_file(&self, path: &Path) -> io::Result<String> {
let mut src = String::new();
fs::File::open(path)?.read_to_string(&mut src)?;
Ok(src)
}
}
// _____________________________________________________________________________
// CodeMap
//
pub struct CodeMap {
pub files: RefCell<Vec<Rc<FileMap>>>,
expansions: RefCell<Vec<ExpnInfo>>,
file_loader: Box<FileLoader>
}
impl CodeMap {
pub fn new() -> CodeMap {
CodeMap {
files: RefCell::new(Vec::new()),
expansions: RefCell::new(Vec::new()),
file_loader: Box::new(RealFileLoader)
}
}
pub fn with_file_loader(file_loader: Box<FileLoader>) -> CodeMap {
CodeMap {
files: RefCell::new(Vec::new()),
expansions: RefCell::new(Vec::new()),
file_loader: file_loader
}
}
pub fn file_exists(&self, path: &Path) -> bool {
self.file_loader.file_exists(path)
}
pub fn load_file(&self, path: &Path) -> io::Result<Rc<FileMap>> {
let src = self.file_loader.read_file(path)?;
let abs_path = self.file_loader.abs_path(path).map(|p| p.to_str().unwrap().to_string());
Ok(self.new_filemap(path.to_str().unwrap().to_string(), abs_path, src))
}
fn next_start_pos(&self) -> usize {
let files = self.files.borrow();
match files.last() {
None => 0,
// Add one so there is some space between files. This lets us distinguish
// positions in the codemap, even in the presence of zero-length files.
Some(last) => last.end_pos.to_usize() + 1,
}
}
/// Creates a new filemap without setting its line information. If you don't
/// intend to set the line information yourself, you should use new_filemap_and_lines.
pub fn new_filemap(&self, filename: FileName, abs_path: Option<FileName>,
mut src: String) -> Rc<FileMap> {
let start_pos = self.next_start_pos();
let mut files = self.files.borrow_mut();
// Remove utf-8 BOM if any.
if src.starts_with("\u{feff}") {
src.drain(..3);
}
let end_pos = start_pos + src.len();
let filemap = Rc::new(FileMap {
name: filename,
abs_path: abs_path,
src: Some(Rc::new(src)),
start_pos: Pos::from_usize(start_pos),
end_pos: Pos::from_usize(end_pos),
lines: RefCell::new(Vec::new()),
multibyte_chars: RefCell::new(Vec::new()),
});
files.push(filemap.clone());
filemap
}
/// Lookup source information about a BytePos
pub fn lookup_char_pos(&self, pos: BytePos) -> Loc {
let chpos = self.bytepos_to_file_charpos(pos);
match self.lookup_line(pos) {
Ok(FileMapAndLine { fm: f, line: a }) => {
let line = a + 1; // Line numbers start at 1
let linebpos = (*f.lines.borrow())[a];
let linechpos = self.bytepos_to_file_charpos(linebpos);
debug!("byte pos {:?} is on the line at byte pos {:?}",
pos, linebpos);
debug!("char pos {:?} is on the line at char pos {:?}",
chpos, linechpos);
debug!("byte is on line: {}", line);
assert!(chpos >= linechpos);
Loc {
file: f,
line: line,
col: chpos - linechpos,
}
}
Err(f) => {
Loc {
file: f,
line: 0,
col: chpos,
}
}
}
}
// If the relevant filemap is empty, we don't return a line number.
fn lookup_line(&self, pos: BytePos) -> Result<FileMapAndLine, Rc<FileMap>> {
let idx = self.lookup_filemap_idx(pos);
let files = self.files.borrow();
let f = (*files)[idx].clone();
let len = f.lines.borrow().len();
if len == 0 {
return Err(f);
}
let mut a = 0;
{
let lines = f.lines.borrow();
let mut b = lines.len();
while b - a > 1 {
let m = (a + b) / 2;
if (*lines)[m] > pos {
b = m;
} else {
a = m;
}
}
assert!(a <= lines.len());
}
Ok(FileMapAndLine { fm: f, line: a })
}
/// Converts an absolute BytePos to a CharPos relative to the filemap.
pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let idx = self.lookup_filemap_idx(bpos);
let files = self.files.borrow();
let map = &(*files)[idx];
// The number of extra bytes due to multibyte chars in the FileMap
let mut total_extra_bytes = 0;
for mbc in map.multibyte_chars.borrow().iter() {
debug!("{}-byte char at {:?}", mbc.bytes, mbc.pos);
if mbc.pos < bpos {
// every character is at least one byte, so we only
// count the actual extra bytes.
total_extra_bytes += mbc.bytes - 1;
// We should never see a byte position in the middle of a
// character
assert!(bpos.to_usize() >= mbc.pos.to_usize() + mbc.bytes);
} else {
break;
}
}
assert!(map.start_pos.to_usize() + total_extra_bytes <= bpos.to_usize());
CharPos(bpos.to_usize() - map.start_pos.to_usize() - total_extra_bytes)
}
// Return the index of the filemap (in self.files) which contains pos.
fn lookup_filemap_idx(&self, pos: BytePos) -> usize {
let files = self.files.borrow();
let files = &*files;
let count = files.len();
// Binary search for the filemap.
let mut a = 0;
let mut b = count;
while b - a > 1 {
let m = (a + b) / 2;
if files[m].start_pos > pos {
b = m;
} else {
a = m;
}
}
assert!(a < count, "position {} does not resolve to a source location", pos.to_usize());
return a;
}
} | /// originate from files has names between angle brackets by convention,
/// e.g. `<anon>`
pub name: FileName, | random_line_split |
codemap.rs | use std::cell::RefCell;
use std::cmp;
use std::env;
use std::{fmt, fs};
use std::io::{self, Read};
use std::ops::{Add, Sub};
use std::path::{Path, PathBuf};
use std::rc::Rc;
use ast::Name;
pub trait Pos {
fn from_usize(n: usize) -> Self;
fn to_usize(&self) -> usize;
}
/// A byte offset. Keep this small (currently 32-bits), as AST contains
/// a lot of them.
#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
pub struct BytePos(pub u32);
/// A character offset. Because of multibyte utf8 characters, a byte offset
/// is not equivalent to a character offset. The CodeMap will convert BytePos
/// values to CharPos values as necessary.
#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Debug)]
pub struct CharPos(pub usize);
impl Pos for BytePos {
fn from_usize(n: usize) -> BytePos { BytePos(n as u32) }
fn to_usize(&self) -> usize { let BytePos(n) = *self; n as usize }
}
impl Add for BytePos {
type Output = BytePos;
fn add(self, rhs: BytePos) -> BytePos {
BytePos((self.to_usize() + rhs.to_usize()) as u32)
}
}
impl Sub for BytePos {
type Output = BytePos;
fn sub(self, rhs: BytePos) -> BytePos {
BytePos((self.to_usize() - rhs.to_usize()) as u32)
}
}
impl Pos for CharPos {
fn from_usize(n: usize) -> CharPos { CharPos(n) }
fn to_usize(&self) -> usize { let CharPos(n) = *self; n }
}
impl Add for CharPos {
type Output = CharPos;
fn add(self, rhs: CharPos) -> CharPos {
CharPos(self.to_usize() + rhs.to_usize())
}
}
impl Sub for CharPos {
type Output = CharPos;
fn sub(self, rhs: CharPos) -> CharPos {
CharPos(self.to_usize() - rhs.to_usize())
}
}
/// Spans represent a region of code, used for error reporting. Positions in spans
/// are *absolute* positions from the beginning of the codemap.
#[derive(Clone, Copy, Hash, PartialEq, Eq, Debug)]
pub struct Span {
pub lo: BytePos,
pub hi: BytePos
}
pub const DUMMY_SPAN: Span = Span { lo: BytePos(0), hi: BytePos(0) };
pub fn spanned<T>(lo: BytePos, hi: BytePos, t: T) -> Spanned<T> {
respan(mk_span(lo, hi), t)
}
pub fn respan<T>(sp: Span, t: T) -> Spanned<T> {
Spanned {node: t, span: sp}
}
/* assuming that we're not in macro expansion */
pub fn mk_span(lo: BytePos, hi: BytePos) -> Span {
Span {lo: lo, hi: hi}
}
impl Span {
/// Returns a new span representing just the end-point of this span
pub fn end_point(self) -> Span {
let lo = cmp::max(self.hi.0 - 1, self.lo.0);
Span { lo: BytePos(lo), hi: self.hi }
}
/// Returns `self` if `self` is not the dummy span, and `other` otherwise.
pub fn substitute_dummy(self, other: Span) -> Span {
if self.source_equal(&DUMMY_SPAN) { other } else { self }
}
pub fn contains(self, other: Span) -> bool {
self.lo <= other.lo && other.hi <= self.hi
}
/// Return true if the spans are equal with regards to the source text.
///
/// Use this instead of `==` when either span could be generated code,
/// and you only care that they point to the same bytes of source text.
pub fn source_equal(&self, other: &Span) -> bool {
self.lo == other.lo && self.hi == other.hi
}
/// Returns `Some(span)`, a union of `self` and `other`, on overlap.
pub fn merge(self, other: Span) -> Option<Span> {
if (self.lo <= other.lo && self.hi > other.lo) ||
(self.lo >= other.lo && self.lo < other.hi) {
Some(Span {
lo: cmp::min(self.lo, other.lo),
hi: cmp::max(self.hi, other.hi),
})
} else {
None
}
}
/// Returns `Some(span)`, where the start is trimmed by the end of `other`
pub fn | (self, other: Span) -> Option<Span> {
if self.hi > other.hi {
Some(Span { lo: cmp::max(self.lo, other.hi),.. self })
} else {
None
}
}
}
#[derive(Clone, PartialEq, Eq, Hash, Debug, Copy)]
pub struct Spanned<T> {
pub node: T,
pub span: Span,
}
/// A collection of spans. Spans have two orthogonal attributes:
///
/// - they can be *primary spans*. In this case they are the locus of
/// the error, and would be rendered with `^^^`.
/// - they can have a *label*. In this case, the label is written next
/// to the mark in the snippet when we render.
#[derive(Clone)]
pub struct MultiSpan {
primary_spans: Vec<Span>,
span_labels: Vec<(Span, String)>,
}
#[derive(Clone, Debug)]
pub struct SpanLabel {
/// The span we are going to include in the final snippet.
pub span: Span,
/// Is this a primary span? This is the "locus" of the message,
/// and is indicated with a `^^^^` underline, versus `----`.
pub is_primary: bool,
/// What label should we attach to this span (if any)?
pub label: Option<String>,
}
impl MultiSpan {
pub fn new() -> MultiSpan {
MultiSpan {
primary_spans: vec![],
span_labels: vec![]
}
}
pub fn from_span(primary_span: Span) -> MultiSpan {
MultiSpan {
primary_spans: vec![primary_span],
span_labels: vec![]
}
}
pub fn from_spans(vec: Vec<Span>) -> MultiSpan {
MultiSpan {
primary_spans: vec,
span_labels: vec![]
}
}
pub fn push_span_label(&mut self, span: Span, label: String) {
self.span_labels.push((span, label));
}
/// Selects the first primary span (if any)
pub fn primary_span(&self) -> Option<Span> {
self.primary_spans.first().cloned()
}
/// Returns all primary spans.
pub fn primary_spans(&self) -> &[Span] {
&self.primary_spans
}
/// Returns the strings to highlight. We always ensure that there
/// is an entry for each of the primary spans -- for each primary
/// span P, if there is at least one label with span P, we return
/// those labels (marked as primary). But otherwise we return
/// `SpanLabel` instances with empty labels.
pub fn span_labels(&self) -> Vec<SpanLabel> {
let is_primary = |span| self.primary_spans.contains(&span);
let mut span_labels = vec![];
for &(span, ref label) in &self.span_labels {
span_labels.push(SpanLabel {
span: span,
is_primary: is_primary(span),
label: Some(label.clone())
});
}
for &span in &self.primary_spans {
if!span_labels.iter().any(|sl| sl.span == span) {
span_labels.push(SpanLabel {
span: span,
is_primary: true,
label: None
});
}
}
span_labels
}
}
impl From<Span> for MultiSpan {
fn from(span: Span) -> MultiSpan {
MultiSpan::from_span(span)
}
}
// _____________________________________________________________________________
// Loc, LocWithOpt, FileMapAndLine, FileMapAndBytePos
//
/// A source code location used for error reporting
#[derive(Debug)]
pub struct Loc {
/// Information about the original source
pub file: Rc<FileMap>,
/// The (1-based) line number
pub line: usize,
/// The (0-based) column offset
pub col: CharPos
}
/// A source code location used as the result of lookup_char_pos_adj
// Actually, *none* of the clients use the filename *or* file field;
// perhaps they should just be removed.
#[derive(Debug)]
pub struct LocWithOpt {
pub filename: FileName,
pub line: usize,
pub col: CharPos,
pub file: Option<Rc<FileMap>>,
}
// used to be structural records. Better names, anyone?
#[derive(Debug)]
pub struct FileMapAndLine { pub fm: Rc<FileMap>, pub line: usize }
#[derive(Debug)]
pub struct FileMapAndBytePos { pub fm: Rc<FileMap>, pub pos: BytePos }
// _____________________________________________________________________________
// ExpnFormat, NameAndSpan, ExpnInfo, ExpnId
//
/// The source of expansion.
#[derive(Clone, Hash, Debug, PartialEq, Eq)]
pub enum ExpnFormat {
/// e.g. #[derive(...)] <item>
MacroAttribute(Name),
/// e.g. `format!()`
MacroBang(Name),
}
#[derive(Clone, Hash, Debug)]
pub struct NameAndSpan {
/// The format with which the macro was invoked.
pub format: ExpnFormat,
/// Whether the macro is allowed to use #[unstable]/feature-gated
/// features internally without forcing the whole crate to opt-in
/// to them.
pub allow_internal_unstable: bool,
/// The span of the macro definition itself. The macro may not
/// have a sensible definition span (e.g. something defined
/// completely inside libsyntax) in which case this is None.
pub span: Option<Span>
}
impl NameAndSpan {
pub fn name(&self) -> Name {
match self.format {
ExpnFormat::MacroAttribute(s) => s,
ExpnFormat::MacroBang(s) => s,
}
}
}
/// Extra information for tracking spans of macro and syntax sugar expansion
#[derive(Hash, Debug)]
pub struct ExpnInfo {
/// The location of the actual macro invocation or syntax sugar, e.g.
/// `let x = foo!();` or `if let Some(y) = x {}`
///
/// This may recursively refer to other macro invocations, e.g. if
/// `foo!()` invoked `bar!()` internally, and there was an
/// expression inside `bar!`; the call_site of the expression in
/// the expansion would point to the `bar!` invocation; that
/// call_site span would have its own ExpnInfo, with the call_site
/// pointing to the `foo!` invocation.
pub call_site: Span,
/// Information about the expansion.
pub callee: NameAndSpan
}
// _____________________________________________________________________________
// FileMap, MultiByteChar, FileName, FileLines
//
pub type FileName = String;
/// Identifies an offset of a multi-byte character in a FileMap
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct MultiByteChar {
/// The absolute offset of the character in the CodeMap
pub pos: BytePos,
/// The number of bytes, >=2
pub bytes: usize,
}
/// A single source in the CodeMap.
pub struct FileMap {
/// The name of the file that the source came from, source that doesn't
/// originate from files has names between angle brackets by convention,
/// e.g. `<anon>`
pub name: FileName,
/// The absolute path of the file that the source came from.
pub abs_path: Option<FileName>,
/// The complete source code
pub src: Option<Rc<String>>,
/// The start position of this source in the CodeMap
pub start_pos: BytePos,
/// The end position of this source in the CodeMap
pub end_pos: BytePos,
/// Locations of lines beginnings in the source code
pub lines: RefCell<Vec<BytePos>>,
/// Locations of multi-byte characters in the source code
pub multibyte_chars: RefCell<Vec<MultiByteChar>>,
}
impl fmt::Debug for FileMap {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "FileMap({})", self.name)
}
}
/// An abstraction over the fs operations used by the Parser.
pub trait FileLoader {
/// Query the existence of a file.
fn file_exists(&self, path: &Path) -> bool;
/// Return an absolute path to a file, if possible.
fn abs_path(&self, path: &Path) -> Option<PathBuf>;
/// Read the contents of an UTF-8 file into memory.
fn read_file(&self, path: &Path) -> io::Result<String>;
}
/// A FileLoader that uses std::fs to load real files.
pub struct RealFileLoader;
impl FileLoader for RealFileLoader {
fn file_exists(&self, path: &Path) -> bool {
fs::metadata(path).is_ok()
}
fn abs_path(&self, path: &Path) -> Option<PathBuf> {
if path.is_absolute() {
Some(path.to_path_buf())
} else {
env::current_dir()
.ok()
.map(|cwd| cwd.join(path))
}
}
fn read_file(&self, path: &Path) -> io::Result<String> {
let mut src = String::new();
fs::File::open(path)?.read_to_string(&mut src)?;
Ok(src)
}
}
// _____________________________________________________________________________
// CodeMap
//
pub struct CodeMap {
pub files: RefCell<Vec<Rc<FileMap>>>,
expansions: RefCell<Vec<ExpnInfo>>,
file_loader: Box<FileLoader>
}
impl CodeMap {
pub fn new() -> CodeMap {
CodeMap {
files: RefCell::new(Vec::new()),
expansions: RefCell::new(Vec::new()),
file_loader: Box::new(RealFileLoader)
}
}
pub fn with_file_loader(file_loader: Box<FileLoader>) -> CodeMap {
CodeMap {
files: RefCell::new(Vec::new()),
expansions: RefCell::new(Vec::new()),
file_loader: file_loader
}
}
pub fn file_exists(&self, path: &Path) -> bool {
self.file_loader.file_exists(path)
}
pub fn load_file(&self, path: &Path) -> io::Result<Rc<FileMap>> {
let src = self.file_loader.read_file(path)?;
let abs_path = self.file_loader.abs_path(path).map(|p| p.to_str().unwrap().to_string());
Ok(self.new_filemap(path.to_str().unwrap().to_string(), abs_path, src))
}
fn next_start_pos(&self) -> usize {
let files = self.files.borrow();
match files.last() {
None => 0,
// Add one so there is some space between files. This lets us distinguish
// positions in the codemap, even in the presence of zero-length files.
Some(last) => last.end_pos.to_usize() + 1,
}
}
/// Creates a new filemap without setting its line information. If you don't
/// intend to set the line information yourself, you should use new_filemap_and_lines.
pub fn new_filemap(&self, filename: FileName, abs_path: Option<FileName>,
mut src: String) -> Rc<FileMap> {
let start_pos = self.next_start_pos();
let mut files = self.files.borrow_mut();
// Remove utf-8 BOM if any.
if src.starts_with("\u{feff}") {
src.drain(..3);
}
let end_pos = start_pos + src.len();
let filemap = Rc::new(FileMap {
name: filename,
abs_path: abs_path,
src: Some(Rc::new(src)),
start_pos: Pos::from_usize(start_pos),
end_pos: Pos::from_usize(end_pos),
lines: RefCell::new(Vec::new()),
multibyte_chars: RefCell::new(Vec::new()),
});
files.push(filemap.clone());
filemap
}
/// Lookup source information about a BytePos
pub fn lookup_char_pos(&self, pos: BytePos) -> Loc {
let chpos = self.bytepos_to_file_charpos(pos);
match self.lookup_line(pos) {
Ok(FileMapAndLine { fm: f, line: a }) => {
let line = a + 1; // Line numbers start at 1
let linebpos = (*f.lines.borrow())[a];
let linechpos = self.bytepos_to_file_charpos(linebpos);
debug!("byte pos {:?} is on the line at byte pos {:?}",
pos, linebpos);
debug!("char pos {:?} is on the line at char pos {:?}",
chpos, linechpos);
debug!("byte is on line: {}", line);
assert!(chpos >= linechpos);
Loc {
file: f,
line: line,
col: chpos - linechpos,
}
}
Err(f) => {
Loc {
file: f,
line: 0,
col: chpos,
}
}
}
}
// If the relevant filemap is empty, we don't return a line number.
fn lookup_line(&self, pos: BytePos) -> Result<FileMapAndLine, Rc<FileMap>> {
let idx = self.lookup_filemap_idx(pos);
let files = self.files.borrow();
let f = (*files)[idx].clone();
let len = f.lines.borrow().len();
if len == 0 {
return Err(f);
}
let mut a = 0;
{
let lines = f.lines.borrow();
let mut b = lines.len();
while b - a > 1 {
let m = (a + b) / 2;
if (*lines)[m] > pos {
b = m;
} else {
a = m;
}
}
assert!(a <= lines.len());
}
Ok(FileMapAndLine { fm: f, line: a })
}
/// Converts an absolute BytePos to a CharPos relative to the filemap.
pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let idx = self.lookup_filemap_idx(bpos);
let files = self.files.borrow();
let map = &(*files)[idx];
// The number of extra bytes due to multibyte chars in the FileMap
let mut total_extra_bytes = 0;
for mbc in map.multibyte_chars.borrow().iter() {
debug!("{}-byte char at {:?}", mbc.bytes, mbc.pos);
if mbc.pos < bpos {
// every character is at least one byte, so we only
// count the actual extra bytes.
total_extra_bytes += mbc.bytes - 1;
// We should never see a byte position in the middle of a
// character
assert!(bpos.to_usize() >= mbc.pos.to_usize() + mbc.bytes);
} else {
break;
}
}
assert!(map.start_pos.to_usize() + total_extra_bytes <= bpos.to_usize());
CharPos(bpos.to_usize() - map.start_pos.to_usize() - total_extra_bytes)
}
// Return the index of the filemap (in self.files) which contains pos.
fn lookup_filemap_idx(&self, pos: BytePos) -> usize {
let files = self.files.borrow();
let files = &*files;
let count = files.len();
// Binary search for the filemap.
let mut a = 0;
let mut b = count;
while b - a > 1 {
let m = (a + b) / 2;
if files[m].start_pos > pos {
b = m;
} else {
a = m;
}
}
assert!(a < count, "position {} does not resolve to a source location", pos.to_usize());
return a;
}
} | trim_start | identifier_name |
canvas.rs | use crate::color::Color;
use std::collections::VecDeque;
use std::io::{self, BufRead, BufReader, Read};
#[derive(Clone, Debug)]
pub struct Canvas {
pub width: usize,
pub height: usize,
data: Vec<Vec<Color>>,
}
const MAX_COLOR_VAL: u16 = 255;
const MAX_PPM_LINE_LENGTH: usize = 70;
// length of "255" is 3
// TODO: this should be evaluated programmatically, but "no matching in consts allowed" error prevented this
const MAX_COLOR_VAL_STR_LEN: usize = 3;
impl Canvas {
// Create a canvas initialized to all black
pub fn new(width: usize, height: usize) -> Canvas {
Canvas {
width,
height,
data: vec![vec![color!(0, 0, 0); width]; height],
}
}
pub fn write_pixel(&mut self, x: usize, y: usize, color: Color) {
if x <= self.width && y <= self.height {
self.data[y][x] = color;
} else {
// return fail result
}
}
pub fn pixel_at(&self, x: usize, y: usize) -> Color {
self.data[y][x]
}
// scale/clamp color values from 0-1 to 0-255
fn scale_color(&self, rgb: f32) -> u8 {
(rgb * MAX_COLOR_VAL as f32)
.min(MAX_COLOR_VAL as f32)
.max(0.0) as u8
}
// If current line has no more room for more RGB values, add it to the PPM string and clear it;
// otherwise, add a space separator in preparation for the next RGB value
fn write_rgb_separator(&self, line: &mut String, ppm: &mut String) {
if line.len() < MAX_PPM_LINE_LENGTH - MAX_COLOR_VAL_STR_LEN {
(*line).push(' ');
} else {
ppm.push_str(&line);
ppm.push('\n');
line.clear();
}
}
// Return string containing PPM (portable pixel map) data representing current canvas
pub fn to_ppm(&self) -> String {
let mut ppm = String::new();
// write header
ppm.push_str("P3\n");
ppm.push_str(&(format!("{} {}\n", self.width, self.height)));
ppm.push_str(&(format!("{}\n", MAX_COLOR_VAL)));
// Write pixel data. Each pixel RGB value is written with a separating space or newline;
// new rows are written on new lines for human reading convenience, but lines longer than
// MAX_PPM_LINE_LENGTH must also be split.
let mut current_line = String::new();
for row in 0..self.height {
current_line.clear();
for (i, column) in (0..self.width).enumerate() {
let color = self.pixel_at(column, row);
let r = self.scale_color(color.r);
let g = self.scale_color(color.g);
let b = self.scale_color(color.b);
current_line.push_str(&r.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
current_line.push_str(&g.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
current_line.push_str(&b.to_string());
// if not at end of row yet, write a space or newline if the next point will be on this line
if i!= self.width - 1 {
self.write_rgb_separator(&mut current_line, &mut ppm);
}
}
if!current_line.is_empty() {
ppm.push_str(¤t_line);
ppm.push('\n');
}
}
ppm
}
}
// TODO: proper parsing errors should also contain the line and column number
#[derive(Debug)]
pub enum ParseError {
IoError(io::Error),
IncorrectFormat(String),
ParseIntError(std::num::ParseIntError),
MalformedDimensionHeader(String),
}
impl From<io::Error> for ParseError {
fn from(err: io::Error) -> ParseError {
ParseError::IoError(err)
}
}
impl From<std::num::ParseIntError> for ParseError {
fn from(err: std::num::ParseIntError) -> ParseError {
ParseError::ParseIntError(err)
}
}
type RgbElement = u32;
pub fn canvas_from_ppm<T: Read>(reader: T) -> Result<Canvas, ParseError> {
let buf_reader = BufReader::new(reader);
let mut line_iter = buf_reader.lines().enumerate().filter_map(clean_line);
// TODO: these unwrap()'s are not great; should really fail properly if the file doesn't
// contain this many lines
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
if line!= "P3" {
return Err(ParseError::IncorrectFormat(format!(
"Incorrect magic number at line 1: expected P3, found {}",
line
)));
}
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let elements: Vec<&str> = line.split_whitespace().collect();
if elements.len()!= 2 {
return Err(ParseError::MalformedDimensionHeader(format!(
"Expected width and height at line 2; found {}",
line
)));
}
let width = elements[0].parse::<usize>()?;
let height = elements[1].parse::<usize>()?;
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let scale = line.parse::<RgbElement>()? as f32;
let mut canvas = Canvas::new(width, height);
let mut raw_rgb: VecDeque<RgbElement> = VecDeque::new();
let mut x = 0;
let mut y = 0;
for (_, (_index, line)) in line_iter.enumerate() {
let line = line?;
let line = line.trim();
let line_rgb = line
.split_whitespace()
.map(|s| s.parse::<RgbElement>())
.collect::<Result<Vec<RgbElement>, std::num::ParseIntError>>()?;
raw_rgb.extend(line_rgb);
while raw_rgb.len() >= 3 {
let r = raw_rgb.pop_front().unwrap() as f32 / scale;
let g = raw_rgb.pop_front().unwrap() as f32 / scale;
let b = raw_rgb.pop_front().unwrap() as f32 / scale;
canvas.write_pixel(x, y, color!(r, g, b));
// move to next canvas pixel
x += 1;
if x >= width {
x = 0;
y += 1;
}
}
}
Ok(canvas)
}
fn clean_line(
(index, line): (usize, Result<String, std::io::Error>),
) -> Option<(usize, Result<String, std::io::Error>)> {
match line {
Ok(s) => {
let s = s.trim();
if s.starts_with("#") || s.is_empty() {
None
} else {
Some((index, Ok(s.to_string())))
}
}
Err(_) => Some((index, line)),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_height_and_width() {
let c = Canvas::new(15, 10);
assert_eq!(c.width, 15);
assert_eq!(c.height, 10);
}
#[test]
fn test_write_and_read_pixels() {
let mut canvas = Canvas::new(10, 5);
let color = color!(0.1, 0.2, 0.3);
canvas.write_pixel(7, 4, color);
assert_eq!(canvas.pixel_at(7, 4), color);
}
#[test]
fn test_ppm_header() {
let c = Canvas::new(20, 5);
let ppm = c.to_ppm();
let mut lines = ppm.lines();
assert_eq!(lines.next().unwrap(), "P3");
assert_eq!(lines.next().unwrap(), "20 5");
assert_eq!(lines.next().unwrap(), "255");
}
#[test]
fn test_ppm_pixel_data() {
let mut c = Canvas::new(5, 3);
c.write_pixel(0, 0, color!(1.5, 0, 0));
c.write_pixel(2, 1, color!(0, 0.5, 0));
c.write_pixel(4, 2, color!(-0.5, 0, 1));
let ppm = c.to_ppm();
let mut lines = ppm.lines();
// ignore header
lines.next();
lines.next();
lines.next();
assert_eq!(lines.next().unwrap(), "255 0 0 0 0 0 0 0 0 0 0 0 0 0 0");
// book says 128, but I'll trust Rust's rounding for now
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 127 0 0 0 0 0 0 0");
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 0 0 0 0 0 0 0 255");
}
#[test]
fn test_splitting_long_ppm_lines() | lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!(
lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
}
#[test]
fn reading_file_with_wrong_magic_number() {
let ppm = "P32
1 1
255
0 0 0";
let result = canvas_from_ppm(ppm.as_bytes());
match result {
Err(ParseError::IncorrectFormat(msg)) => {
assert!(msg.contains("Incorrect magic number"))
}
_ => assert!(false, "Should return IncorrectFormat error"),
}
}
#[test]
fn reading_ppm_returns_canvas_with_correct_size() {
let ppm = "P3
10 2
255
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.width, 10);
assert_eq!(canvas.height, 2);
}
#[test]
fn reading_pixel_data_from_ppm_file() {
let ppm = "P3
4 3
255
255 127 0 0 127 255 127 255 0 255 255 255
0 0 0 255 0 0 0 255 0 0 0 255
255 255 0 0 255 255 255 0 255 127 127 127";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
let test_data = vec![
("1", 0, 0, color!(1, 0.49803922, 0)),
("2", 1, 0, color!(0, 0.49803922, 1)),
("3", 2, 0, color!(0.49803922, 1, 0)),
("4", 3, 0, color!(1, 1, 1)),
("5", 0, 1, color!(0, 0, 0)),
("6", 1, 1, color!(1, 0, 0)),
("7", 2, 1, color!(0, 1, 0)),
("8", 3, 1, color!(0, 0, 1)),
("9", 0, 2, color!(1, 1, 0)),
("10", 1, 2, color!(0, 1, 1)),
("11", 2, 2, color!(1, 0, 1)),
("12", 3, 2, color!(0.49803922, 0.49803922, 0.49803922)),
];
for (name, x, y, expected_color) in test_data {
println!("Case {}", name);
assert_abs_diff_eq!(canvas.pixel_at(x, y), expected_color);
}
}
#[test]
fn ppm_parsing_ignores_comment_lines() {
let ppm = "P3
# this is a comment
2 1
# this, too
255
# another comment
255 255 255
# oh, no, comments in the pixel data!
255 0 255
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(1, 1, 1));
assert_eq!(canvas.pixel_at(1, 0), color!(1, 0, 1));
}
#[test]
fn ppm_parsing_allows_rgb_triplet_to_span_lines() {
let ppm = "P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_skips_empty_lines() {
let ppm = "
P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_respects_scale_setting() {
let ppm = "P3
2 2
100
100 100 100 50 50 50
75 50 25 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 1), color!(0.75, 0.5, 0.25));
}
}
| {
let mut canvas = Canvas::new(10, 2);
let color = color!(1, 0.8, 0.6);
// TODO: maybe turn this into a function on canvas?
for row in 0..canvas.height {
for column in 0..canvas.width {
canvas.write_pixel(column, row, color);
}
}
let ppm = canvas.to_ppm();
let mut lines = ppm.lines();
// skip header
lines.next();
lines.next();
lines.next();
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!( | identifier_body |
canvas.rs | use crate::color::Color;
use std::collections::VecDeque;
use std::io::{self, BufRead, BufReader, Read};
#[derive(Clone, Debug)]
pub struct Canvas {
pub width: usize,
pub height: usize,
data: Vec<Vec<Color>>,
}
const MAX_COLOR_VAL: u16 = 255;
const MAX_PPM_LINE_LENGTH: usize = 70;
// length of "255" is 3
// TODO: this should be evaluated programmatically, but "no matching in consts allowed" error prevented this
const MAX_COLOR_VAL_STR_LEN: usize = 3;
impl Canvas {
// Create a canvas initialized to all black
pub fn new(width: usize, height: usize) -> Canvas {
Canvas {
width,
height,
data: vec![vec![color!(0, 0, 0); width]; height],
}
}
pub fn write_pixel(&mut self, x: usize, y: usize, color: Color) {
if x <= self.width && y <= self.height {
self.data[y][x] = color;
} else {
// return fail result
}
}
pub fn pixel_at(&self, x: usize, y: usize) -> Color {
self.data[y][x]
}
// scale/clamp color values from 0-1 to 0-255
fn scale_color(&self, rgb: f32) -> u8 {
(rgb * MAX_COLOR_VAL as f32)
.min(MAX_COLOR_VAL as f32)
.max(0.0) as u8
}
// If current line has no more room for more RGB values, add it to the PPM string and clear it;
// otherwise, add a space separator in preparation for the next RGB value
fn | (&self, line: &mut String, ppm: &mut String) {
if line.len() < MAX_PPM_LINE_LENGTH - MAX_COLOR_VAL_STR_LEN {
(*line).push(' ');
} else {
ppm.push_str(&line);
ppm.push('\n');
line.clear();
}
}
// Return string containing PPM (portable pixel map) data representing current canvas
pub fn to_ppm(&self) -> String {
let mut ppm = String::new();
// write header
ppm.push_str("P3\n");
ppm.push_str(&(format!("{} {}\n", self.width, self.height)));
ppm.push_str(&(format!("{}\n", MAX_COLOR_VAL)));
// Write pixel data. Each pixel RGB value is written with a separating space or newline;
// new rows are written on new lines for human reading convenience, but lines longer than
// MAX_PPM_LINE_LENGTH must also be split.
let mut current_line = String::new();
for row in 0..self.height {
current_line.clear();
for (i, column) in (0..self.width).enumerate() {
let color = self.pixel_at(column, row);
let r = self.scale_color(color.r);
let g = self.scale_color(color.g);
let b = self.scale_color(color.b);
current_line.push_str(&r.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
current_line.push_str(&g.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
current_line.push_str(&b.to_string());
// if not at end of row yet, write a space or newline if the next point will be on this line
if i!= self.width - 1 {
self.write_rgb_separator(&mut current_line, &mut ppm);
}
}
if!current_line.is_empty() {
ppm.push_str(¤t_line);
ppm.push('\n');
}
}
ppm
}
}
// TODO: proper parsing errors should also contain the line and column number
#[derive(Debug)]
pub enum ParseError {
IoError(io::Error),
IncorrectFormat(String),
ParseIntError(std::num::ParseIntError),
MalformedDimensionHeader(String),
}
impl From<io::Error> for ParseError {
fn from(err: io::Error) -> ParseError {
ParseError::IoError(err)
}
}
impl From<std::num::ParseIntError> for ParseError {
fn from(err: std::num::ParseIntError) -> ParseError {
ParseError::ParseIntError(err)
}
}
type RgbElement = u32;
pub fn canvas_from_ppm<T: Read>(reader: T) -> Result<Canvas, ParseError> {
let buf_reader = BufReader::new(reader);
let mut line_iter = buf_reader.lines().enumerate().filter_map(clean_line);
// TODO: these unwrap()'s are not great; should really fail properly if the file doesn't
// contain this many lines
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
if line!= "P3" {
return Err(ParseError::IncorrectFormat(format!(
"Incorrect magic number at line 1: expected P3, found {}",
line
)));
}
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let elements: Vec<&str> = line.split_whitespace().collect();
if elements.len()!= 2 {
return Err(ParseError::MalformedDimensionHeader(format!(
"Expected width and height at line 2; found {}",
line
)));
}
let width = elements[0].parse::<usize>()?;
let height = elements[1].parse::<usize>()?;
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let scale = line.parse::<RgbElement>()? as f32;
let mut canvas = Canvas::new(width, height);
let mut raw_rgb: VecDeque<RgbElement> = VecDeque::new();
let mut x = 0;
let mut y = 0;
for (_, (_index, line)) in line_iter.enumerate() {
let line = line?;
let line = line.trim();
let line_rgb = line
.split_whitespace()
.map(|s| s.parse::<RgbElement>())
.collect::<Result<Vec<RgbElement>, std::num::ParseIntError>>()?;
raw_rgb.extend(line_rgb);
while raw_rgb.len() >= 3 {
let r = raw_rgb.pop_front().unwrap() as f32 / scale;
let g = raw_rgb.pop_front().unwrap() as f32 / scale;
let b = raw_rgb.pop_front().unwrap() as f32 / scale;
canvas.write_pixel(x, y, color!(r, g, b));
// move to next canvas pixel
x += 1;
if x >= width {
x = 0;
y += 1;
}
}
}
Ok(canvas)
}
fn clean_line(
(index, line): (usize, Result<String, std::io::Error>),
) -> Option<(usize, Result<String, std::io::Error>)> {
match line {
Ok(s) => {
let s = s.trim();
if s.starts_with("#") || s.is_empty() {
None
} else {
Some((index, Ok(s.to_string())))
}
}
Err(_) => Some((index, line)),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_height_and_width() {
let c = Canvas::new(15, 10);
assert_eq!(c.width, 15);
assert_eq!(c.height, 10);
}
#[test]
fn test_write_and_read_pixels() {
let mut canvas = Canvas::new(10, 5);
let color = color!(0.1, 0.2, 0.3);
canvas.write_pixel(7, 4, color);
assert_eq!(canvas.pixel_at(7, 4), color);
}
#[test]
fn test_ppm_header() {
let c = Canvas::new(20, 5);
let ppm = c.to_ppm();
let mut lines = ppm.lines();
assert_eq!(lines.next().unwrap(), "P3");
assert_eq!(lines.next().unwrap(), "20 5");
assert_eq!(lines.next().unwrap(), "255");
}
#[test]
fn test_ppm_pixel_data() {
let mut c = Canvas::new(5, 3);
c.write_pixel(0, 0, color!(1.5, 0, 0));
c.write_pixel(2, 1, color!(0, 0.5, 0));
c.write_pixel(4, 2, color!(-0.5, 0, 1));
let ppm = c.to_ppm();
let mut lines = ppm.lines();
// ignore header
lines.next();
lines.next();
lines.next();
assert_eq!(lines.next().unwrap(), "255 0 0 0 0 0 0 0 0 0 0 0 0 0 0");
// book says 128, but I'll trust Rust's rounding for now
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 127 0 0 0 0 0 0 0");
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 0 0 0 0 0 0 0 255");
}
#[test]
fn test_splitting_long_ppm_lines() {
let mut canvas = Canvas::new(10, 2);
let color = color!(1, 0.8, 0.6);
// TODO: maybe turn this into a function on canvas?
for row in 0..canvas.height {
for column in 0..canvas.width {
canvas.write_pixel(column, row, color);
}
}
let ppm = canvas.to_ppm();
let mut lines = ppm.lines();
// skip header
lines.next();
lines.next();
lines.next();
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!(
lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!(
lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
}
#[test]
fn reading_file_with_wrong_magic_number() {
let ppm = "P32
1 1
255
0 0 0";
let result = canvas_from_ppm(ppm.as_bytes());
match result {
Err(ParseError::IncorrectFormat(msg)) => {
assert!(msg.contains("Incorrect magic number"))
}
_ => assert!(false, "Should return IncorrectFormat error"),
}
}
#[test]
fn reading_ppm_returns_canvas_with_correct_size() {
let ppm = "P3
10 2
255
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.width, 10);
assert_eq!(canvas.height, 2);
}
#[test]
fn reading_pixel_data_from_ppm_file() {
let ppm = "P3
4 3
255
255 127 0 0 127 255 127 255 0 255 255 255
0 0 0 255 0 0 0 255 0 0 0 255
255 255 0 0 255 255 255 0 255 127 127 127";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
let test_data = vec![
("1", 0, 0, color!(1, 0.49803922, 0)),
("2", 1, 0, color!(0, 0.49803922, 1)),
("3", 2, 0, color!(0.49803922, 1, 0)),
("4", 3, 0, color!(1, 1, 1)),
("5", 0, 1, color!(0, 0, 0)),
("6", 1, 1, color!(1, 0, 0)),
("7", 2, 1, color!(0, 1, 0)),
("8", 3, 1, color!(0, 0, 1)),
("9", 0, 2, color!(1, 1, 0)),
("10", 1, 2, color!(0, 1, 1)),
("11", 2, 2, color!(1, 0, 1)),
("12", 3, 2, color!(0.49803922, 0.49803922, 0.49803922)),
];
for (name, x, y, expected_color) in test_data {
println!("Case {}", name);
assert_abs_diff_eq!(canvas.pixel_at(x, y), expected_color);
}
}
#[test]
fn ppm_parsing_ignores_comment_lines() {
let ppm = "P3
# this is a comment
2 1
# this, too
255
# another comment
255 255 255
# oh, no, comments in the pixel data!
255 0 255
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(1, 1, 1));
assert_eq!(canvas.pixel_at(1, 0), color!(1, 0, 1));
}
#[test]
fn ppm_parsing_allows_rgb_triplet_to_span_lines() {
let ppm = "P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_skips_empty_lines() {
let ppm = "
P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_respects_scale_setting() {
let ppm = "P3
2 2
100
100 100 100 50 50 50
75 50 25 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 1), color!(0.75, 0.5, 0.25));
}
}
| write_rgb_separator | identifier_name |
canvas.rs | use crate::color::Color;
use std::collections::VecDeque;
use std::io::{self, BufRead, BufReader, Read};
#[derive(Clone, Debug)]
pub struct Canvas {
pub width: usize,
pub height: usize,
data: Vec<Vec<Color>>,
}
const MAX_COLOR_VAL: u16 = 255;
const MAX_PPM_LINE_LENGTH: usize = 70;
// length of "255" is 3
// TODO: this should be evaluated programmatically, but "no matching in consts allowed" error prevented this
const MAX_COLOR_VAL_STR_LEN: usize = 3;
impl Canvas {
// Create a canvas initialized to all black
pub fn new(width: usize, height: usize) -> Canvas {
Canvas {
width,
height,
data: vec![vec![color!(0, 0, 0); width]; height],
}
}
pub fn write_pixel(&mut self, x: usize, y: usize, color: Color) {
if x <= self.width && y <= self.height {
self.data[y][x] = color;
} else {
// return fail result
}
}
pub fn pixel_at(&self, x: usize, y: usize) -> Color {
self.data[y][x]
}
// scale/clamp color values from 0-1 to 0-255
fn scale_color(&self, rgb: f32) -> u8 {
(rgb * MAX_COLOR_VAL as f32)
.min(MAX_COLOR_VAL as f32)
.max(0.0) as u8
}
// If current line has no more room for more RGB values, add it to the PPM string and clear it;
// otherwise, add a space separator in preparation for the next RGB value
fn write_rgb_separator(&self, line: &mut String, ppm: &mut String) {
if line.len() < MAX_PPM_LINE_LENGTH - MAX_COLOR_VAL_STR_LEN {
(*line).push(' ');
} else {
ppm.push_str(&line);
ppm.push('\n');
line.clear();
}
}
// Return string containing PPM (portable pixel map) data representing current canvas
pub fn to_ppm(&self) -> String {
let mut ppm = String::new();
// write header
ppm.push_str("P3\n");
ppm.push_str(&(format!("{} {}\n", self.width, self.height)));
ppm.push_str(&(format!("{}\n", MAX_COLOR_VAL)));
// Write pixel data. Each pixel RGB value is written with a separating space or newline;
// new rows are written on new lines for human reading convenience, but lines longer than
// MAX_PPM_LINE_LENGTH must also be split.
let mut current_line = String::new();
for row in 0..self.height {
current_line.clear();
for (i, column) in (0..self.width).enumerate() {
let color = self.pixel_at(column, row);
let r = self.scale_color(color.r);
let g = self.scale_color(color.g);
let b = self.scale_color(color.b);
current_line.push_str(&r.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
current_line.push_str(&g.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
| if i!= self.width - 1 {
self.write_rgb_separator(&mut current_line, &mut ppm);
}
}
if!current_line.is_empty() {
ppm.push_str(¤t_line);
ppm.push('\n');
}
}
ppm
}
}
// TODO: proper parsing errors should also contain the line and column number
#[derive(Debug)]
pub enum ParseError {
IoError(io::Error),
IncorrectFormat(String),
ParseIntError(std::num::ParseIntError),
MalformedDimensionHeader(String),
}
impl From<io::Error> for ParseError {
fn from(err: io::Error) -> ParseError {
ParseError::IoError(err)
}
}
impl From<std::num::ParseIntError> for ParseError {
fn from(err: std::num::ParseIntError) -> ParseError {
ParseError::ParseIntError(err)
}
}
type RgbElement = u32;
pub fn canvas_from_ppm<T: Read>(reader: T) -> Result<Canvas, ParseError> {
let buf_reader = BufReader::new(reader);
let mut line_iter = buf_reader.lines().enumerate().filter_map(clean_line);
// TODO: these unwrap()'s are not great; should really fail properly if the file doesn't
// contain this many lines
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
if line!= "P3" {
return Err(ParseError::IncorrectFormat(format!(
"Incorrect magic number at line 1: expected P3, found {}",
line
)));
}
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let elements: Vec<&str> = line.split_whitespace().collect();
if elements.len()!= 2 {
return Err(ParseError::MalformedDimensionHeader(format!(
"Expected width and height at line 2; found {}",
line
)));
}
let width = elements[0].parse::<usize>()?;
let height = elements[1].parse::<usize>()?;
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let scale = line.parse::<RgbElement>()? as f32;
let mut canvas = Canvas::new(width, height);
let mut raw_rgb: VecDeque<RgbElement> = VecDeque::new();
let mut x = 0;
let mut y = 0;
for (_, (_index, line)) in line_iter.enumerate() {
let line = line?;
let line = line.trim();
let line_rgb = line
.split_whitespace()
.map(|s| s.parse::<RgbElement>())
.collect::<Result<Vec<RgbElement>, std::num::ParseIntError>>()?;
raw_rgb.extend(line_rgb);
while raw_rgb.len() >= 3 {
let r = raw_rgb.pop_front().unwrap() as f32 / scale;
let g = raw_rgb.pop_front().unwrap() as f32 / scale;
let b = raw_rgb.pop_front().unwrap() as f32 / scale;
canvas.write_pixel(x, y, color!(r, g, b));
// move to next canvas pixel
x += 1;
if x >= width {
x = 0;
y += 1;
}
}
}
Ok(canvas)
}
fn clean_line(
(index, line): (usize, Result<String, std::io::Error>),
) -> Option<(usize, Result<String, std::io::Error>)> {
match line {
Ok(s) => {
let s = s.trim();
if s.starts_with("#") || s.is_empty() {
None
} else {
Some((index, Ok(s.to_string())))
}
}
Err(_) => Some((index, line)),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_height_and_width() {
let c = Canvas::new(15, 10);
assert_eq!(c.width, 15);
assert_eq!(c.height, 10);
}
#[test]
fn test_write_and_read_pixels() {
let mut canvas = Canvas::new(10, 5);
let color = color!(0.1, 0.2, 0.3);
canvas.write_pixel(7, 4, color);
assert_eq!(canvas.pixel_at(7, 4), color);
}
#[test]
fn test_ppm_header() {
let c = Canvas::new(20, 5);
let ppm = c.to_ppm();
let mut lines = ppm.lines();
assert_eq!(lines.next().unwrap(), "P3");
assert_eq!(lines.next().unwrap(), "20 5");
assert_eq!(lines.next().unwrap(), "255");
}
#[test]
fn test_ppm_pixel_data() {
let mut c = Canvas::new(5, 3);
c.write_pixel(0, 0, color!(1.5, 0, 0));
c.write_pixel(2, 1, color!(0, 0.5, 0));
c.write_pixel(4, 2, color!(-0.5, 0, 1));
let ppm = c.to_ppm();
let mut lines = ppm.lines();
// ignore header
lines.next();
lines.next();
lines.next();
assert_eq!(lines.next().unwrap(), "255 0 0 0 0 0 0 0 0 0 0 0 0 0 0");
// book says 128, but I'll trust Rust's rounding for now
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 127 0 0 0 0 0 0 0");
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 0 0 0 0 0 0 0 255");
}
#[test]
fn test_splitting_long_ppm_lines() {
let mut canvas = Canvas::new(10, 2);
let color = color!(1, 0.8, 0.6);
// TODO: maybe turn this into a function on canvas?
for row in 0..canvas.height {
for column in 0..canvas.width {
canvas.write_pixel(column, row, color);
}
}
let ppm = canvas.to_ppm();
let mut lines = ppm.lines();
// skip header
lines.next();
lines.next();
lines.next();
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!(
lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!(
lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
}
#[test]
fn reading_file_with_wrong_magic_number() {
let ppm = "P32
1 1
255
0 0 0";
let result = canvas_from_ppm(ppm.as_bytes());
match result {
Err(ParseError::IncorrectFormat(msg)) => {
assert!(msg.contains("Incorrect magic number"))
}
_ => assert!(false, "Should return IncorrectFormat error"),
}
}
#[test]
fn reading_ppm_returns_canvas_with_correct_size() {
let ppm = "P3
10 2
255
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.width, 10);
assert_eq!(canvas.height, 2);
}
#[test]
fn reading_pixel_data_from_ppm_file() {
let ppm = "P3
4 3
255
255 127 0 0 127 255 127 255 0 255 255 255
0 0 0 255 0 0 0 255 0 0 0 255
255 255 0 0 255 255 255 0 255 127 127 127";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
let test_data = vec![
("1", 0, 0, color!(1, 0.49803922, 0)),
("2", 1, 0, color!(0, 0.49803922, 1)),
("3", 2, 0, color!(0.49803922, 1, 0)),
("4", 3, 0, color!(1, 1, 1)),
("5", 0, 1, color!(0, 0, 0)),
("6", 1, 1, color!(1, 0, 0)),
("7", 2, 1, color!(0, 1, 0)),
("8", 3, 1, color!(0, 0, 1)),
("9", 0, 2, color!(1, 1, 0)),
("10", 1, 2, color!(0, 1, 1)),
("11", 2, 2, color!(1, 0, 1)),
("12", 3, 2, color!(0.49803922, 0.49803922, 0.49803922)),
];
for (name, x, y, expected_color) in test_data {
println!("Case {}", name);
assert_abs_diff_eq!(canvas.pixel_at(x, y), expected_color);
}
}
#[test]
fn ppm_parsing_ignores_comment_lines() {
let ppm = "P3
# this is a comment
2 1
# this, too
255
# another comment
255 255 255
# oh, no, comments in the pixel data!
255 0 255
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(1, 1, 1));
assert_eq!(canvas.pixel_at(1, 0), color!(1, 0, 1));
}
#[test]
fn ppm_parsing_allows_rgb_triplet_to_span_lines() {
let ppm = "P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_skips_empty_lines() {
let ppm = "
P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_respects_scale_setting() {
let ppm = "P3
2 2
100
100 100 100 50 50 50
75 50 25 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 1), color!(0.75, 0.5, 0.25));
}
} | current_line.push_str(&b.to_string());
// if not at end of row yet, write a space or newline if the next point will be on this line | random_line_split |
canvas.rs | use crate::color::Color;
use std::collections::VecDeque;
use std::io::{self, BufRead, BufReader, Read};
#[derive(Clone, Debug)]
pub struct Canvas {
pub width: usize,
pub height: usize,
data: Vec<Vec<Color>>,
}
const MAX_COLOR_VAL: u16 = 255;
const MAX_PPM_LINE_LENGTH: usize = 70;
// length of "255" is 3
// TODO: this should be evaluated programmatically, but "no matching in consts allowed" error prevented this
const MAX_COLOR_VAL_STR_LEN: usize = 3;
impl Canvas {
// Create a canvas initialized to all black
pub fn new(width: usize, height: usize) -> Canvas {
Canvas {
width,
height,
data: vec![vec![color!(0, 0, 0); width]; height],
}
}
pub fn write_pixel(&mut self, x: usize, y: usize, color: Color) {
if x <= self.width && y <= self.height {
self.data[y][x] = color;
} else {
// return fail result
}
}
pub fn pixel_at(&self, x: usize, y: usize) -> Color {
self.data[y][x]
}
// scale/clamp color values from 0-1 to 0-255
fn scale_color(&self, rgb: f32) -> u8 {
(rgb * MAX_COLOR_VAL as f32)
.min(MAX_COLOR_VAL as f32)
.max(0.0) as u8
}
// If current line has no more room for more RGB values, add it to the PPM string and clear it;
// otherwise, add a space separator in preparation for the next RGB value
fn write_rgb_separator(&self, line: &mut String, ppm: &mut String) {
if line.len() < MAX_PPM_LINE_LENGTH - MAX_COLOR_VAL_STR_LEN {
(*line).push(' ');
} else {
ppm.push_str(&line);
ppm.push('\n');
line.clear();
}
}
// Return string containing PPM (portable pixel map) data representing current canvas
pub fn to_ppm(&self) -> String {
let mut ppm = String::new();
// write header
ppm.push_str("P3\n");
ppm.push_str(&(format!("{} {}\n", self.width, self.height)));
ppm.push_str(&(format!("{}\n", MAX_COLOR_VAL)));
// Write pixel data. Each pixel RGB value is written with a separating space or newline;
// new rows are written on new lines for human reading convenience, but lines longer than
// MAX_PPM_LINE_LENGTH must also be split.
let mut current_line = String::new();
for row in 0..self.height {
current_line.clear();
for (i, column) in (0..self.width).enumerate() {
let color = self.pixel_at(column, row);
let r = self.scale_color(color.r);
let g = self.scale_color(color.g);
let b = self.scale_color(color.b);
current_line.push_str(&r.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
current_line.push_str(&g.to_string());
self.write_rgb_separator(&mut current_line, &mut ppm);
current_line.push_str(&b.to_string());
// if not at end of row yet, write a space or newline if the next point will be on this line
if i!= self.width - 1 {
self.write_rgb_separator(&mut current_line, &mut ppm);
}
}
if!current_line.is_empty() {
ppm.push_str(¤t_line);
ppm.push('\n');
}
}
ppm
}
}
// TODO: proper parsing errors should also contain the line and column number
#[derive(Debug)]
pub enum ParseError {
IoError(io::Error),
IncorrectFormat(String),
ParseIntError(std::num::ParseIntError),
MalformedDimensionHeader(String),
}
impl From<io::Error> for ParseError {
fn from(err: io::Error) -> ParseError {
ParseError::IoError(err)
}
}
impl From<std::num::ParseIntError> for ParseError {
fn from(err: std::num::ParseIntError) -> ParseError {
ParseError::ParseIntError(err)
}
}
type RgbElement = u32;
pub fn canvas_from_ppm<T: Read>(reader: T) -> Result<Canvas, ParseError> {
let buf_reader = BufReader::new(reader);
let mut line_iter = buf_reader.lines().enumerate().filter_map(clean_line);
// TODO: these unwrap()'s are not great; should really fail properly if the file doesn't
// contain this many lines
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
if line!= "P3" {
return Err(ParseError::IncorrectFormat(format!(
"Incorrect magic number at line 1: expected P3, found {}",
line
)));
}
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let elements: Vec<&str> = line.split_whitespace().collect();
if elements.len()!= 2 {
return Err(ParseError::MalformedDimensionHeader(format!(
"Expected width and height at line 2; found {}",
line
)));
}
let width = elements[0].parse::<usize>()?;
let height = elements[1].parse::<usize>()?;
let (_, line) = line_iter.next().unwrap();
let line = line?;
let line = line.trim();
let scale = line.parse::<RgbElement>()? as f32;
let mut canvas = Canvas::new(width, height);
let mut raw_rgb: VecDeque<RgbElement> = VecDeque::new();
let mut x = 0;
let mut y = 0;
for (_, (_index, line)) in line_iter.enumerate() {
let line = line?;
let line = line.trim();
let line_rgb = line
.split_whitespace()
.map(|s| s.parse::<RgbElement>())
.collect::<Result<Vec<RgbElement>, std::num::ParseIntError>>()?;
raw_rgb.extend(line_rgb);
while raw_rgb.len() >= 3 {
let r = raw_rgb.pop_front().unwrap() as f32 / scale;
let g = raw_rgb.pop_front().unwrap() as f32 / scale;
let b = raw_rgb.pop_front().unwrap() as f32 / scale;
canvas.write_pixel(x, y, color!(r, g, b));
// move to next canvas pixel
x += 1;
if x >= width |
}
}
Ok(canvas)
}
fn clean_line(
(index, line): (usize, Result<String, std::io::Error>),
) -> Option<(usize, Result<String, std::io::Error>)> {
match line {
Ok(s) => {
let s = s.trim();
if s.starts_with("#") || s.is_empty() {
None
} else {
Some((index, Ok(s.to_string())))
}
}
Err(_) => Some((index, line)),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_height_and_width() {
let c = Canvas::new(15, 10);
assert_eq!(c.width, 15);
assert_eq!(c.height, 10);
}
#[test]
fn test_write_and_read_pixels() {
let mut canvas = Canvas::new(10, 5);
let color = color!(0.1, 0.2, 0.3);
canvas.write_pixel(7, 4, color);
assert_eq!(canvas.pixel_at(7, 4), color);
}
#[test]
fn test_ppm_header() {
let c = Canvas::new(20, 5);
let ppm = c.to_ppm();
let mut lines = ppm.lines();
assert_eq!(lines.next().unwrap(), "P3");
assert_eq!(lines.next().unwrap(), "20 5");
assert_eq!(lines.next().unwrap(), "255");
}
#[test]
fn test_ppm_pixel_data() {
let mut c = Canvas::new(5, 3);
c.write_pixel(0, 0, color!(1.5, 0, 0));
c.write_pixel(2, 1, color!(0, 0.5, 0));
c.write_pixel(4, 2, color!(-0.5, 0, 1));
let ppm = c.to_ppm();
let mut lines = ppm.lines();
// ignore header
lines.next();
lines.next();
lines.next();
assert_eq!(lines.next().unwrap(), "255 0 0 0 0 0 0 0 0 0 0 0 0 0 0");
// book says 128, but I'll trust Rust's rounding for now
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 127 0 0 0 0 0 0 0");
assert_eq!(lines.next().unwrap(), "0 0 0 0 0 0 0 0 0 0 0 0 0 0 255");
}
#[test]
fn test_splitting_long_ppm_lines() {
let mut canvas = Canvas::new(10, 2);
let color = color!(1, 0.8, 0.6);
// TODO: maybe turn this into a function on canvas?
for row in 0..canvas.height {
for column in 0..canvas.width {
canvas.write_pixel(column, row, color);
}
}
let ppm = canvas.to_ppm();
let mut lines = ppm.lines();
// skip header
lines.next();
lines.next();
lines.next();
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!(
lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
assert_eq!(
lines.next().unwrap(),
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204"
);
assert_eq!(
lines.next().unwrap(),
"153 255 204 153 255 204 153 255 204 153 255 204 153"
);
}
#[test]
fn reading_file_with_wrong_magic_number() {
let ppm = "P32
1 1
255
0 0 0";
let result = canvas_from_ppm(ppm.as_bytes());
match result {
Err(ParseError::IncorrectFormat(msg)) => {
assert!(msg.contains("Incorrect magic number"))
}
_ => assert!(false, "Should return IncorrectFormat error"),
}
}
#[test]
fn reading_ppm_returns_canvas_with_correct_size() {
let ppm = "P3
10 2
255
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.width, 10);
assert_eq!(canvas.height, 2);
}
#[test]
fn reading_pixel_data_from_ppm_file() {
let ppm = "P3
4 3
255
255 127 0 0 127 255 127 255 0 255 255 255
0 0 0 255 0 0 0 255 0 0 0 255
255 255 0 0 255 255 255 0 255 127 127 127";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
let test_data = vec![
("1", 0, 0, color!(1, 0.49803922, 0)),
("2", 1, 0, color!(0, 0.49803922, 1)),
("3", 2, 0, color!(0.49803922, 1, 0)),
("4", 3, 0, color!(1, 1, 1)),
("5", 0, 1, color!(0, 0, 0)),
("6", 1, 1, color!(1, 0, 0)),
("7", 2, 1, color!(0, 1, 0)),
("8", 3, 1, color!(0, 0, 1)),
("9", 0, 2, color!(1, 1, 0)),
("10", 1, 2, color!(0, 1, 1)),
("11", 2, 2, color!(1, 0, 1)),
("12", 3, 2, color!(0.49803922, 0.49803922, 0.49803922)),
];
for (name, x, y, expected_color) in test_data {
println!("Case {}", name);
assert_abs_diff_eq!(canvas.pixel_at(x, y), expected_color);
}
}
#[test]
fn ppm_parsing_ignores_comment_lines() {
let ppm = "P3
# this is a comment
2 1
# this, too
255
# another comment
255 255 255
# oh, no, comments in the pixel data!
255 0 255
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(1, 1, 1));
assert_eq!(canvas.pixel_at(1, 0), color!(1, 0, 1));
}
#[test]
fn ppm_parsing_allows_rgb_triplet_to_span_lines() {
let ppm = "P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_skips_empty_lines() {
let ppm = "
P3
1 1
255
51
153
204
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 0), color!(0.2, 0.6, 0.8));
}
#[test]
fn ppm_parsing_respects_scale_setting() {
let ppm = "P3
2 2
100
100 100 100 50 50 50
75 50 25 0 0 0
";
let canvas = canvas_from_ppm(ppm.as_bytes()).unwrap();
assert_eq!(canvas.pixel_at(0, 1), color!(0.75, 0.5, 0.25));
}
}
| {
x = 0;
y += 1;
} | conditional_block |
handshake.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! The handshake module implements the handshake part of the protocol.
//! This module also implements additional anti-DoS mitigation,
//! by including a timestamp in each handshake initialization message.
//! Refer to the module's documentation for more information.
//! A successful handshake returns a `NoiseStream` which is defined in the
//! [stream] module.
//!
//! [stream]: network::noise::stream
use crate::noise::stream::NoiseStream;
use futures::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libra_config::config::NetworkPeerInfo;
use libra_crypto::{noise, x25519};
use libra_types::PeerId;
use netcore::transport::ConnectionOrigin;
use std::{
collections::HashMap,
io,
sync::{Arc, RwLock},
time,
};
/// In a mutually authenticated network, a client message is accompanied with a timestamp.
/// This is in order to prevent replay attacks, where the attacker does not know the client's static key,
/// but can still replay a handshake message in order to force a peer into performing a few Diffie-Hellman key exchange operations.
///
/// Thus, to prevent replay attacks a responder will always check if the timestamp is strictly increasing,
/// effectively considering it as a stateful counter.
///
/// If the client timestamp has been seen before, or is not strictly increasing,
/// we can abort the handshake early and avoid heavy Diffie-Hellman computations.
/// If the client timestamp is valid, we store it.
#[derive(Default)]
pub struct AntiReplayTimestamps(HashMap<x25519::PublicKey, u64>);
impl AntiReplayTimestamps {
/// Returns true if the timestamp has already been observed for this peer
/// or if it's an old timestamp
pub fn is_replay(&self, pubkey: x25519::PublicKey, timestamp: u64) -> bool {
if let Some(last_timestamp) = self.0.get(&pubkey) {
×tamp <= last_timestamp
} else {
false
}
}
/// Stores the timestamp
pub fn store_timestamp(&mut self, pubkey: x25519::PublicKey, timestamp: u64) {
self.0
.entry(pubkey)
.and_modify(|last_timestamp| *last_timestamp = timestamp)
.or_insert(timestamp);
}
}
/// The timestamp is sent as a payload, so that it is encrypted.
/// Note that a millisecond value is a 16-byte value in rust,
/// but as we use it to store a duration since UNIX_EPOCH we will never use more than 8 bytes.
const PAYLOAD_SIZE: usize = 8;
/// Noise handshake authentication mode.
pub enum HandshakeAuthMode {
/// In `Mutual` mode, both sides will authenticate each other with their
/// `trusted_peers` set. We also include replay attack mitigation in this mode.
///
/// For example, in the Libra validator network, validator peers will only
/// allow connections from other validator peers. They will use this mode to
/// check that inbound connections authenticate to a network public key
/// actually contained in the current validator set.
Mutual {
// Only use anti replay protection in mutual-auth scenarios. In theory,
// this is applicable everywhere; however, we would need to spend some
// time making this more sophisticated so it garbage collects old
// timestamps and doesn't use unbounded space. These are not problems in
// mutual-auth scenarios because we have a bounded set of trusted peers
// that rarely changes.
anti_replay_timestamps: RwLock<AntiReplayTimestamps>,
trusted_peers: Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>,
},
/// In `ServerOnly` mode, the dialer authenticates the server. However, the
/// server does not care who connects to them and will allow inbound connections
/// from any peer.
ServerOnly,
}
impl HandshakeAuthMode {
pub fn mutual(trusted_peers: Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>) -> Self {
HandshakeAuthMode::Mutual {
anti_replay_timestamps: RwLock::new(AntiReplayTimestamps::default()),
trusted_peers,
}
}
fn anti_replay_timestamps(&self) -> Option<&RwLock<AntiReplayTimestamps>> {
match &self {
HandshakeAuthMode::Mutual {
anti_replay_timestamps,
..
} => Some(&anti_replay_timestamps),
HandshakeAuthMode::ServerOnly => None,
}
}
fn trusted_peers(&self) -> Option<&RwLock<HashMap<PeerId, NetworkPeerInfo>>> {
match &self {
HandshakeAuthMode::Mutual { trusted_peers,.. } => Some(&trusted_peers),
HandshakeAuthMode::ServerOnly => None,
}
}
}
// Noise Upgrader
// --------------
// Noise by default is not aware of the above or lower protocol layers,
// We thus need to build this wrapper around Noise to both:
//
// - fragment messages that need to be encrypted by noise (due to its maximum 65535-byte messages)
// - understand how long noise messages we send and receive are,
// in order to pass them to the noise implementaiton
//
/// The Noise configuration to be used to perform a protocol upgrade on an underlying socket.
pub struct NoiseUpgrader {
/// Config for executing Noise handshakes. Includes our static private key.
noise_config: noise::NoiseConfig,
/// Handshake authentication can be either mutual or server-only authentication.
auth_mode: HandshakeAuthMode,
}
impl NoiseUpgrader {
/// Create a new NoiseConfig with the provided keypair and authentication mode.
pub fn new(key: x25519::PrivateKey, auth_mode: HandshakeAuthMode) -> Self {
Self {
noise_config: noise::NoiseConfig::new(key),
auth_mode,
}
}
/// Perform a protocol upgrade on an underlying connection. In addition perform the noise IX
/// handshake to establish a noise stream and exchange static public keys. Upon success,
/// returns the static public key of the remote as well as a NoiseStream.
// TODO(philiphayes): rework socket-bench-server so we can remove this function
#[allow(dead_code)]
pub async fn upgrade<TSocket>(
&self,
socket: TSocket,
origin: ConnectionOrigin,
remote_public_key: Option<x25519::PublicKey>,
) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// perform the noise handshake
let socket = match origin {
ConnectionOrigin::Outbound => {
let remote_public_key = match remote_public_key {
Some(key) => key,
None if cfg!(any(test, feature = "fuzzing")) => unreachable!(),
None => {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"noise: SHOULD NOT HAPPEN: missing server's key when dialing",
));
}
};
self.upgrade_outbound(socket, remote_public_key).await?
}
ConnectionOrigin::Inbound => self.upgrade_inbound(socket).await?,
};
// return remote public key with a socket including the noise stream
let remote_public_key = socket.get_remote_static();
Ok((remote_public_key, socket))
}
/// Perform an outbound protocol upgrade on this connection.
///
/// This runs the "client" side of the Noise IK handshake to establish a
/// secure Noise stream and exchange static public keys. In mutual auth
/// scenarios, we will also include an anti replay attack counter in the
/// Noise handshake payload. Currently this counter is always a millisecond-
/// granularity unix epoch timestamp.
pub async fn upgrade_outbound<TSocket>(
&self,
mut socket: TSocket,
remote_public_key: x25519::PublicKey,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// in mutual authenticated networks, send a payload of the current timestamp (in milliseconds)
let payload = match self.auth_mode {
HandshakeAuthMode::Mutual {.. } => {
let now: u64 = time::SystemTime::now()
.duration_since(time::UNIX_EPOCH)
.expect("system clock should work")
.as_millis() as u64;
// e.g. [157, 126, 253, 97, 114, 1, 0, 0]
let now = now.to_le_bytes().to_vec();
Some(now)
}
HandshakeAuthMode::ServerOnly => None,
};
// create first handshake message (-> e, es, s, ss)
let mut rng = rand::rngs::OsRng;
let mut first_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
let initiator_state = self
.noise_config
.initiate_connection(
&mut rng,
&[],
remote_public_key,
payload.as_ref().map(|x| &x[..]),
&mut first_message,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// write the first handshake message
socket.write_all(&first_message).await?;
// flush
socket.flush().await?;
// receive the server's response (<- e, ee, se)
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
socket.read_exact(&mut server_response).await?;
// parse the server's response
// TODO: security logging here? (mimoo)
let (_, session) = self
.noise_config
.finalize_connection(initiator_state, &server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
/// Perform an inbound protocol upgrade on this connection.
///
/// This runs the "server" side of the Noise IK handshake to establish a
/// secure Noise stream and exchange static public keys. If the configuration
/// requires mutual authentication, we will only allow connections from peers
/// that successfully authenticate to a public key in our `trusted_peers` set.
/// In addition, we will expect the client to include an anti replay attack
/// counter in the Noise handshake payload in mutual auth scenarios.
pub async fn upgrade_inbound<TSocket>(
&self,
mut socket: TSocket,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// receive the initiation message
let mut client_init_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
socket.read_exact(&mut client_init_message).await?;
// parse it
let (their_public_key, handshake_state, payload) = self
.noise_config
.parse_client_init_message(&[], &client_init_message)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// if mutual auth mode, verify the remote pubkey is in our set of trusted peers
if let Some(trusted_peers) = self.auth_mode.trusted_peers() {
let found = trusted_peers
.read()
.map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read trusted_peers lock",
)
})?
.iter()
.any(|(_peer_id, public_keys)| public_keys.identity_public_key == their_public_key);
if!found {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client connecting to us with an unknown public key: {}",
their_public_key
),
));
}
}
// if mutual auth mode, verify this handshake is not a replay
if let Some(anti_replay_timestamps) = self.auth_mode.anti_replay_timestamps() {
// check that the payload received as the client timestamp (in seconds)
if payload.len()!= PAYLOAD_SIZE {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"noise: client initiated connection without an 8-byte timestamp",
));
}
let mut client_timestamp = [0u8; PAYLOAD_SIZE];
client_timestamp.copy_from_slice(&payload);
let client_timestamp = u64::from_le_bytes(client_timestamp);
// check the timestamp is not a replay
let mut anti_replay_timestamps = anti_replay_timestamps.write().map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read anti_replay_timestamps lock",
)
})?;
if anti_replay_timestamps.is_replay(their_public_key, client_timestamp) {
// TODO: security logging the ip + blocking the ip? (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client initiated connection with a timestamp already seen before: {}",
client_timestamp
),
));
}
// store the timestamp
anti_replay_timestamps.store_timestamp(their_public_key, client_timestamp);
}
// construct the response
let mut rng = rand::rngs::OsRng;
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
let session = self
.noise_config
.respond_to_client(&mut rng, handshake_state, None, &mut server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// send the response
socket.write_all(&server_response).await?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
}
//
// Tests
// -----
//
#[cfg(test)]
mod test {
use super::*;
use crate::common::NetworkPublicKeys;
use futures::{executor::block_on, future::join};
use libra_crypto::{test_utils::TEST_SEED, traits::Uniform as _};
use memsocket::MemorySocket;
use rand::SeedableRng as _;
use std::{
io,
sync::{Arc, RwLock},
};
/// helper to setup two testing peers
fn build_peers(
is_mutual_auth: bool,
) -> (
(NoiseUpgrader, x25519::PublicKey),
(NoiseUpgrader, x25519::PublicKey),
) {
let mut rng = ::rand::rngs::StdRng::from_seed(TEST_SEED);
let client_private = x25519::PrivateKey::generate(&mut rng);
let client_public = client_private.public_key();
let server_private = x25519::PrivateKey::generate(&mut rng);
let server_public = server_private.public_key();
let (client_auth, server_auth) = if is_mutual_auth {
let client_id = PeerId::random();
let client_keys = NetworkPublicKeys {
identity_public_key: client_public,
};
let server_id = PeerId::random();
let server_keys = NetworkPublicKeys {
identity_public_key: server_public,
};
let trusted_peers = Arc::new(RwLock::new( | let server_auth = HandshakeAuthMode::mutual(trusted_peers);
(client_auth, server_auth)
} else {
(HandshakeAuthMode::ServerOnly, HandshakeAuthMode::ServerOnly)
};
let client = NoiseUpgrader::new(client_private, client_auth);
let server = NoiseUpgrader::new(server_private, server_auth);
((client, client_public), (server, server_public))
}
/// helper to perform a noise handshake with two peers
fn perform_handshake(
client: NoiseUpgrader,
server: NoiseUpgrader,
server_public_key: x25519::PublicKey,
) -> io::Result<(NoiseStream<MemorySocket>, NoiseStream<MemorySocket>)> {
// create an in-memory socket for testing
let (dialer_socket, listener_socket) = MemorySocket::new_pair();
// perform the handshake
let (client_session, server_session) = block_on(join(
client.upgrade_outbound(dialer_socket, server_public_key),
server.upgrade_inbound(listener_socket),
));
Ok((client_session?, server_session?))
}
fn test_handshake_success(is_mutual_auth: bool) {
// perform handshake with two testing peers
let ((client, client_public), (server, server_public)) = build_peers(is_mutual_auth);
let (client, server) = perform_handshake(client, server, server_public).unwrap();
assert_eq!(client.get_remote_static(), server_public);
assert_eq!(server.get_remote_static(), client_public);
}
#[test]
fn test_handshake_server_only_auth() {
test_handshake_success(false /* is_mutual_auth */);
}
#[test]
fn test_handshake_mutual_auth() {
test_handshake_success(true /* is_mutual_auth */);
}
} | vec![(client_id, client_keys), (server_id, server_keys)]
.into_iter()
.collect(),
));
let client_auth = HandshakeAuthMode::mutual(trusted_peers.clone()); | random_line_split |
handshake.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! The handshake module implements the handshake part of the protocol.
//! This module also implements additional anti-DoS mitigation,
//! by including a timestamp in each handshake initialization message.
//! Refer to the module's documentation for more information.
//! A successful handshake returns a `NoiseStream` which is defined in the
//! [stream] module.
//!
//! [stream]: network::noise::stream
use crate::noise::stream::NoiseStream;
use futures::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libra_config::config::NetworkPeerInfo;
use libra_crypto::{noise, x25519};
use libra_types::PeerId;
use netcore::transport::ConnectionOrigin;
use std::{
collections::HashMap,
io,
sync::{Arc, RwLock},
time,
};
/// In a mutually authenticated network, a client message is accompanied with a timestamp.
/// This is in order to prevent replay attacks, where the attacker does not know the client's static key,
/// but can still replay a handshake message in order to force a peer into performing a few Diffie-Hellman key exchange operations.
///
/// Thus, to prevent replay attacks a responder will always check if the timestamp is strictly increasing,
/// effectively considering it as a stateful counter.
///
/// If the client timestamp has been seen before, or is not strictly increasing,
/// we can abort the handshake early and avoid heavy Diffie-Hellman computations.
/// If the client timestamp is valid, we store it.
#[derive(Default)]
pub struct AntiReplayTimestamps(HashMap<x25519::PublicKey, u64>);
impl AntiReplayTimestamps {
/// Returns true if the timestamp has already been observed for this peer
/// or if it's an old timestamp
pub fn is_replay(&self, pubkey: x25519::PublicKey, timestamp: u64) -> bool {
if let Some(last_timestamp) = self.0.get(&pubkey) {
×tamp <= last_timestamp
} else {
false
}
}
/// Stores the timestamp
pub fn store_timestamp(&mut self, pubkey: x25519::PublicKey, timestamp: u64) {
self.0
.entry(pubkey)
.and_modify(|last_timestamp| *last_timestamp = timestamp)
.or_insert(timestamp);
}
}
/// The timestamp is sent as a payload, so that it is encrypted.
/// Note that a millisecond value is a 16-byte value in rust,
/// but as we use it to store a duration since UNIX_EPOCH we will never use more than 8 bytes.
const PAYLOAD_SIZE: usize = 8;
/// Noise handshake authentication mode.
pub enum HandshakeAuthMode {
/// In `Mutual` mode, both sides will authenticate each other with their
/// `trusted_peers` set. We also include replay attack mitigation in this mode.
///
/// For example, in the Libra validator network, validator peers will only
/// allow connections from other validator peers. They will use this mode to
/// check that inbound connections authenticate to a network public key
/// actually contained in the current validator set.
Mutual {
// Only use anti replay protection in mutual-auth scenarios. In theory,
// this is applicable everywhere; however, we would need to spend some
// time making this more sophisticated so it garbage collects old
// timestamps and doesn't use unbounded space. These are not problems in
// mutual-auth scenarios because we have a bounded set of trusted peers
// that rarely changes.
anti_replay_timestamps: RwLock<AntiReplayTimestamps>,
trusted_peers: Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>,
},
/// In `ServerOnly` mode, the dialer authenticates the server. However, the
/// server does not care who connects to them and will allow inbound connections
/// from any peer.
ServerOnly,
}
impl HandshakeAuthMode {
pub fn mutual(trusted_peers: Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>) -> Self |
fn anti_replay_timestamps(&self) -> Option<&RwLock<AntiReplayTimestamps>> {
match &self {
HandshakeAuthMode::Mutual {
anti_replay_timestamps,
..
} => Some(&anti_replay_timestamps),
HandshakeAuthMode::ServerOnly => None,
}
}
fn trusted_peers(&self) -> Option<&RwLock<HashMap<PeerId, NetworkPeerInfo>>> {
match &self {
HandshakeAuthMode::Mutual { trusted_peers,.. } => Some(&trusted_peers),
HandshakeAuthMode::ServerOnly => None,
}
}
}
// Noise Upgrader
// --------------
// Noise by default is not aware of the above or lower protocol layers,
// We thus need to build this wrapper around Noise to both:
//
// - fragment messages that need to be encrypted by noise (due to its maximum 65535-byte messages)
// - understand how long noise messages we send and receive are,
// in order to pass them to the noise implementaiton
//
/// The Noise configuration to be used to perform a protocol upgrade on an underlying socket.
pub struct NoiseUpgrader {
/// Config for executing Noise handshakes. Includes our static private key.
noise_config: noise::NoiseConfig,
/// Handshake authentication can be either mutual or server-only authentication.
auth_mode: HandshakeAuthMode,
}
impl NoiseUpgrader {
/// Create a new NoiseConfig with the provided keypair and authentication mode.
pub fn new(key: x25519::PrivateKey, auth_mode: HandshakeAuthMode) -> Self {
Self {
noise_config: noise::NoiseConfig::new(key),
auth_mode,
}
}
/// Perform a protocol upgrade on an underlying connection. In addition perform the noise IX
/// handshake to establish a noise stream and exchange static public keys. Upon success,
/// returns the static public key of the remote as well as a NoiseStream.
// TODO(philiphayes): rework socket-bench-server so we can remove this function
#[allow(dead_code)]
pub async fn upgrade<TSocket>(
&self,
socket: TSocket,
origin: ConnectionOrigin,
remote_public_key: Option<x25519::PublicKey>,
) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// perform the noise handshake
let socket = match origin {
ConnectionOrigin::Outbound => {
let remote_public_key = match remote_public_key {
Some(key) => key,
None if cfg!(any(test, feature = "fuzzing")) => unreachable!(),
None => {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"noise: SHOULD NOT HAPPEN: missing server's key when dialing",
));
}
};
self.upgrade_outbound(socket, remote_public_key).await?
}
ConnectionOrigin::Inbound => self.upgrade_inbound(socket).await?,
};
// return remote public key with a socket including the noise stream
let remote_public_key = socket.get_remote_static();
Ok((remote_public_key, socket))
}
/// Perform an outbound protocol upgrade on this connection.
///
/// This runs the "client" side of the Noise IK handshake to establish a
/// secure Noise stream and exchange static public keys. In mutual auth
/// scenarios, we will also include an anti replay attack counter in the
/// Noise handshake payload. Currently this counter is always a millisecond-
/// granularity unix epoch timestamp.
pub async fn upgrade_outbound<TSocket>(
&self,
mut socket: TSocket,
remote_public_key: x25519::PublicKey,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// in mutual authenticated networks, send a payload of the current timestamp (in milliseconds)
let payload = match self.auth_mode {
HandshakeAuthMode::Mutual {.. } => {
let now: u64 = time::SystemTime::now()
.duration_since(time::UNIX_EPOCH)
.expect("system clock should work")
.as_millis() as u64;
// e.g. [157, 126, 253, 97, 114, 1, 0, 0]
let now = now.to_le_bytes().to_vec();
Some(now)
}
HandshakeAuthMode::ServerOnly => None,
};
// create first handshake message (-> e, es, s, ss)
let mut rng = rand::rngs::OsRng;
let mut first_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
let initiator_state = self
.noise_config
.initiate_connection(
&mut rng,
&[],
remote_public_key,
payload.as_ref().map(|x| &x[..]),
&mut first_message,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// write the first handshake message
socket.write_all(&first_message).await?;
// flush
socket.flush().await?;
// receive the server's response (<- e, ee, se)
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
socket.read_exact(&mut server_response).await?;
// parse the server's response
// TODO: security logging here? (mimoo)
let (_, session) = self
.noise_config
.finalize_connection(initiator_state, &server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
/// Perform an inbound protocol upgrade on this connection.
///
/// This runs the "server" side of the Noise IK handshake to establish a
/// secure Noise stream and exchange static public keys. If the configuration
/// requires mutual authentication, we will only allow connections from peers
/// that successfully authenticate to a public key in our `trusted_peers` set.
/// In addition, we will expect the client to include an anti replay attack
/// counter in the Noise handshake payload in mutual auth scenarios.
pub async fn upgrade_inbound<TSocket>(
&self,
mut socket: TSocket,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// receive the initiation message
let mut client_init_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
socket.read_exact(&mut client_init_message).await?;
// parse it
let (their_public_key, handshake_state, payload) = self
.noise_config
.parse_client_init_message(&[], &client_init_message)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// if mutual auth mode, verify the remote pubkey is in our set of trusted peers
if let Some(trusted_peers) = self.auth_mode.trusted_peers() {
let found = trusted_peers
.read()
.map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read trusted_peers lock",
)
})?
.iter()
.any(|(_peer_id, public_keys)| public_keys.identity_public_key == their_public_key);
if!found {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client connecting to us with an unknown public key: {}",
their_public_key
),
));
}
}
// if mutual auth mode, verify this handshake is not a replay
if let Some(anti_replay_timestamps) = self.auth_mode.anti_replay_timestamps() {
// check that the payload received as the client timestamp (in seconds)
if payload.len()!= PAYLOAD_SIZE {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"noise: client initiated connection without an 8-byte timestamp",
));
}
let mut client_timestamp = [0u8; PAYLOAD_SIZE];
client_timestamp.copy_from_slice(&payload);
let client_timestamp = u64::from_le_bytes(client_timestamp);
// check the timestamp is not a replay
let mut anti_replay_timestamps = anti_replay_timestamps.write().map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read anti_replay_timestamps lock",
)
})?;
if anti_replay_timestamps.is_replay(their_public_key, client_timestamp) {
// TODO: security logging the ip + blocking the ip? (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client initiated connection with a timestamp already seen before: {}",
client_timestamp
),
));
}
// store the timestamp
anti_replay_timestamps.store_timestamp(their_public_key, client_timestamp);
}
// construct the response
let mut rng = rand::rngs::OsRng;
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
let session = self
.noise_config
.respond_to_client(&mut rng, handshake_state, None, &mut server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// send the response
socket.write_all(&server_response).await?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
}
//
// Tests
// -----
//
#[cfg(test)]
mod test {
use super::*;
use crate::common::NetworkPublicKeys;
use futures::{executor::block_on, future::join};
use libra_crypto::{test_utils::TEST_SEED, traits::Uniform as _};
use memsocket::MemorySocket;
use rand::SeedableRng as _;
use std::{
io,
sync::{Arc, RwLock},
};
/// helper to setup two testing peers
fn build_peers(
is_mutual_auth: bool,
) -> (
(NoiseUpgrader, x25519::PublicKey),
(NoiseUpgrader, x25519::PublicKey),
) {
let mut rng = ::rand::rngs::StdRng::from_seed(TEST_SEED);
let client_private = x25519::PrivateKey::generate(&mut rng);
let client_public = client_private.public_key();
let server_private = x25519::PrivateKey::generate(&mut rng);
let server_public = server_private.public_key();
let (client_auth, server_auth) = if is_mutual_auth {
let client_id = PeerId::random();
let client_keys = NetworkPublicKeys {
identity_public_key: client_public,
};
let server_id = PeerId::random();
let server_keys = NetworkPublicKeys {
identity_public_key: server_public,
};
let trusted_peers = Arc::new(RwLock::new(
vec![(client_id, client_keys), (server_id, server_keys)]
.into_iter()
.collect(),
));
let client_auth = HandshakeAuthMode::mutual(trusted_peers.clone());
let server_auth = HandshakeAuthMode::mutual(trusted_peers);
(client_auth, server_auth)
} else {
(HandshakeAuthMode::ServerOnly, HandshakeAuthMode::ServerOnly)
};
let client = NoiseUpgrader::new(client_private, client_auth);
let server = NoiseUpgrader::new(server_private, server_auth);
((client, client_public), (server, server_public))
}
/// helper to perform a noise handshake with two peers
fn perform_handshake(
client: NoiseUpgrader,
server: NoiseUpgrader,
server_public_key: x25519::PublicKey,
) -> io::Result<(NoiseStream<MemorySocket>, NoiseStream<MemorySocket>)> {
// create an in-memory socket for testing
let (dialer_socket, listener_socket) = MemorySocket::new_pair();
// perform the handshake
let (client_session, server_session) = block_on(join(
client.upgrade_outbound(dialer_socket, server_public_key),
server.upgrade_inbound(listener_socket),
));
Ok((client_session?, server_session?))
}
fn test_handshake_success(is_mutual_auth: bool) {
// perform handshake with two testing peers
let ((client, client_public), (server, server_public)) = build_peers(is_mutual_auth);
let (client, server) = perform_handshake(client, server, server_public).unwrap();
assert_eq!(client.get_remote_static(), server_public);
assert_eq!(server.get_remote_static(), client_public);
}
#[test]
fn test_handshake_server_only_auth() {
test_handshake_success(false /* is_mutual_auth */);
}
#[test]
fn test_handshake_mutual_auth() {
test_handshake_success(true /* is_mutual_auth */);
}
}
| {
HandshakeAuthMode::Mutual {
anti_replay_timestamps: RwLock::new(AntiReplayTimestamps::default()),
trusted_peers,
}
} | identifier_body |
handshake.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! The handshake module implements the handshake part of the protocol.
//! This module also implements additional anti-DoS mitigation,
//! by including a timestamp in each handshake initialization message.
//! Refer to the module's documentation for more information.
//! A successful handshake returns a `NoiseStream` which is defined in the
//! [stream] module.
//!
//! [stream]: network::noise::stream
use crate::noise::stream::NoiseStream;
use futures::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use libra_config::config::NetworkPeerInfo;
use libra_crypto::{noise, x25519};
use libra_types::PeerId;
use netcore::transport::ConnectionOrigin;
use std::{
collections::HashMap,
io,
sync::{Arc, RwLock},
time,
};
/// In a mutually authenticated network, a client message is accompanied with a timestamp.
/// This is in order to prevent replay attacks, where the attacker does not know the client's static key,
/// but can still replay a handshake message in order to force a peer into performing a few Diffie-Hellman key exchange operations.
///
/// Thus, to prevent replay attacks a responder will always check if the timestamp is strictly increasing,
/// effectively considering it as a stateful counter.
///
/// If the client timestamp has been seen before, or is not strictly increasing,
/// we can abort the handshake early and avoid heavy Diffie-Hellman computations.
/// If the client timestamp is valid, we store it.
#[derive(Default)]
pub struct AntiReplayTimestamps(HashMap<x25519::PublicKey, u64>);
impl AntiReplayTimestamps {
/// Returns true if the timestamp has already been observed for this peer
/// or if it's an old timestamp
pub fn is_replay(&self, pubkey: x25519::PublicKey, timestamp: u64) -> bool {
if let Some(last_timestamp) = self.0.get(&pubkey) {
×tamp <= last_timestamp
} else {
false
}
}
/// Stores the timestamp
pub fn store_timestamp(&mut self, pubkey: x25519::PublicKey, timestamp: u64) {
self.0
.entry(pubkey)
.and_modify(|last_timestamp| *last_timestamp = timestamp)
.or_insert(timestamp);
}
}
/// The timestamp is sent as a payload, so that it is encrypted.
/// Note that a millisecond value is a 16-byte value in rust,
/// but as we use it to store a duration since UNIX_EPOCH we will never use more than 8 bytes.
const PAYLOAD_SIZE: usize = 8;
/// Noise handshake authentication mode.
pub enum HandshakeAuthMode {
/// In `Mutual` mode, both sides will authenticate each other with their
/// `trusted_peers` set. We also include replay attack mitigation in this mode.
///
/// For example, in the Libra validator network, validator peers will only
/// allow connections from other validator peers. They will use this mode to
/// check that inbound connections authenticate to a network public key
/// actually contained in the current validator set.
Mutual {
// Only use anti replay protection in mutual-auth scenarios. In theory,
// this is applicable everywhere; however, we would need to spend some
// time making this more sophisticated so it garbage collects old
// timestamps and doesn't use unbounded space. These are not problems in
// mutual-auth scenarios because we have a bounded set of trusted peers
// that rarely changes.
anti_replay_timestamps: RwLock<AntiReplayTimestamps>,
trusted_peers: Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>,
},
/// In `ServerOnly` mode, the dialer authenticates the server. However, the
/// server does not care who connects to them and will allow inbound connections
/// from any peer.
ServerOnly,
}
impl HandshakeAuthMode {
pub fn mutual(trusted_peers: Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>) -> Self {
HandshakeAuthMode::Mutual {
anti_replay_timestamps: RwLock::new(AntiReplayTimestamps::default()),
trusted_peers,
}
}
fn anti_replay_timestamps(&self) -> Option<&RwLock<AntiReplayTimestamps>> {
match &self {
HandshakeAuthMode::Mutual {
anti_replay_timestamps,
..
} => Some(&anti_replay_timestamps),
HandshakeAuthMode::ServerOnly => None,
}
}
fn trusted_peers(&self) -> Option<&RwLock<HashMap<PeerId, NetworkPeerInfo>>> {
match &self {
HandshakeAuthMode::Mutual { trusted_peers,.. } => Some(&trusted_peers),
HandshakeAuthMode::ServerOnly => None,
}
}
}
// Noise Upgrader
// --------------
// Noise by default is not aware of the above or lower protocol layers,
// We thus need to build this wrapper around Noise to both:
//
// - fragment messages that need to be encrypted by noise (due to its maximum 65535-byte messages)
// - understand how long noise messages we send and receive are,
// in order to pass them to the noise implementaiton
//
/// The Noise configuration to be used to perform a protocol upgrade on an underlying socket.
pub struct NoiseUpgrader {
/// Config for executing Noise handshakes. Includes our static private key.
noise_config: noise::NoiseConfig,
/// Handshake authentication can be either mutual or server-only authentication.
auth_mode: HandshakeAuthMode,
}
impl NoiseUpgrader {
/// Create a new NoiseConfig with the provided keypair and authentication mode.
pub fn new(key: x25519::PrivateKey, auth_mode: HandshakeAuthMode) -> Self {
Self {
noise_config: noise::NoiseConfig::new(key),
auth_mode,
}
}
/// Perform a protocol upgrade on an underlying connection. In addition perform the noise IX
/// handshake to establish a noise stream and exchange static public keys. Upon success,
/// returns the static public key of the remote as well as a NoiseStream.
// TODO(philiphayes): rework socket-bench-server so we can remove this function
#[allow(dead_code)]
pub async fn | <TSocket>(
&self,
socket: TSocket,
origin: ConnectionOrigin,
remote_public_key: Option<x25519::PublicKey>,
) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// perform the noise handshake
let socket = match origin {
ConnectionOrigin::Outbound => {
let remote_public_key = match remote_public_key {
Some(key) => key,
None if cfg!(any(test, feature = "fuzzing")) => unreachable!(),
None => {
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
"noise: SHOULD NOT HAPPEN: missing server's key when dialing",
));
}
};
self.upgrade_outbound(socket, remote_public_key).await?
}
ConnectionOrigin::Inbound => self.upgrade_inbound(socket).await?,
};
// return remote public key with a socket including the noise stream
let remote_public_key = socket.get_remote_static();
Ok((remote_public_key, socket))
}
/// Perform an outbound protocol upgrade on this connection.
///
/// This runs the "client" side of the Noise IK handshake to establish a
/// secure Noise stream and exchange static public keys. In mutual auth
/// scenarios, we will also include an anti replay attack counter in the
/// Noise handshake payload. Currently this counter is always a millisecond-
/// granularity unix epoch timestamp.
pub async fn upgrade_outbound<TSocket>(
&self,
mut socket: TSocket,
remote_public_key: x25519::PublicKey,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// in mutual authenticated networks, send a payload of the current timestamp (in milliseconds)
let payload = match self.auth_mode {
HandshakeAuthMode::Mutual {.. } => {
let now: u64 = time::SystemTime::now()
.duration_since(time::UNIX_EPOCH)
.expect("system clock should work")
.as_millis() as u64;
// e.g. [157, 126, 253, 97, 114, 1, 0, 0]
let now = now.to_le_bytes().to_vec();
Some(now)
}
HandshakeAuthMode::ServerOnly => None,
};
// create first handshake message (-> e, es, s, ss)
let mut rng = rand::rngs::OsRng;
let mut first_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
let initiator_state = self
.noise_config
.initiate_connection(
&mut rng,
&[],
remote_public_key,
payload.as_ref().map(|x| &x[..]),
&mut first_message,
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// write the first handshake message
socket.write_all(&first_message).await?;
// flush
socket.flush().await?;
// receive the server's response (<- e, ee, se)
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
socket.read_exact(&mut server_response).await?;
// parse the server's response
// TODO: security logging here? (mimoo)
let (_, session) = self
.noise_config
.finalize_connection(initiator_state, &server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
/// Perform an inbound protocol upgrade on this connection.
///
/// This runs the "server" side of the Noise IK handshake to establish a
/// secure Noise stream and exchange static public keys. If the configuration
/// requires mutual authentication, we will only allow connections from peers
/// that successfully authenticate to a public key in our `trusted_peers` set.
/// In addition, we will expect the client to include an anti replay attack
/// counter in the Noise handshake payload in mutual auth scenarios.
pub async fn upgrade_inbound<TSocket>(
&self,
mut socket: TSocket,
) -> io::Result<NoiseStream<TSocket>>
where
TSocket: AsyncRead + AsyncWrite + Unpin,
{
// receive the initiation message
let mut client_init_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)];
socket.read_exact(&mut client_init_message).await?;
// parse it
let (their_public_key, handshake_state, payload) = self
.noise_config
.parse_client_init_message(&[], &client_init_message)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// if mutual auth mode, verify the remote pubkey is in our set of trusted peers
if let Some(trusted_peers) = self.auth_mode.trusted_peers() {
let found = trusted_peers
.read()
.map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read trusted_peers lock",
)
})?
.iter()
.any(|(_peer_id, public_keys)| public_keys.identity_public_key == their_public_key);
if!found {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client connecting to us with an unknown public key: {}",
their_public_key
),
));
}
}
// if mutual auth mode, verify this handshake is not a replay
if let Some(anti_replay_timestamps) = self.auth_mode.anti_replay_timestamps() {
// check that the payload received as the client timestamp (in seconds)
if payload.len()!= PAYLOAD_SIZE {
// TODO: security logging (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"noise: client initiated connection without an 8-byte timestamp",
));
}
let mut client_timestamp = [0u8; PAYLOAD_SIZE];
client_timestamp.copy_from_slice(&payload);
let client_timestamp = u64::from_le_bytes(client_timestamp);
// check the timestamp is not a replay
let mut anti_replay_timestamps = anti_replay_timestamps.write().map_err(|_| {
io::Error::new(
io::ErrorKind::Other,
"noise: unable to read anti_replay_timestamps lock",
)
})?;
if anti_replay_timestamps.is_replay(their_public_key, client_timestamp) {
// TODO: security logging the ip + blocking the ip? (mimoo)
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!(
"noise: client initiated connection with a timestamp already seen before: {}",
client_timestamp
),
));
}
// store the timestamp
anti_replay_timestamps.store_timestamp(their_public_key, client_timestamp);
}
// construct the response
let mut rng = rand::rngs::OsRng;
let mut server_response = [0u8; noise::handshake_resp_msg_len(0)];
let session = self
.noise_config
.respond_to_client(&mut rng, handshake_state, None, &mut server_response)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
// send the response
socket.write_all(&server_response).await?;
// finalize the connection
Ok(NoiseStream::new(socket, session))
}
}
//
// Tests
// -----
//
#[cfg(test)]
mod test {
use super::*;
use crate::common::NetworkPublicKeys;
use futures::{executor::block_on, future::join};
use libra_crypto::{test_utils::TEST_SEED, traits::Uniform as _};
use memsocket::MemorySocket;
use rand::SeedableRng as _;
use std::{
io,
sync::{Arc, RwLock},
};
/// helper to setup two testing peers
fn build_peers(
is_mutual_auth: bool,
) -> (
(NoiseUpgrader, x25519::PublicKey),
(NoiseUpgrader, x25519::PublicKey),
) {
let mut rng = ::rand::rngs::StdRng::from_seed(TEST_SEED);
let client_private = x25519::PrivateKey::generate(&mut rng);
let client_public = client_private.public_key();
let server_private = x25519::PrivateKey::generate(&mut rng);
let server_public = server_private.public_key();
let (client_auth, server_auth) = if is_mutual_auth {
let client_id = PeerId::random();
let client_keys = NetworkPublicKeys {
identity_public_key: client_public,
};
let server_id = PeerId::random();
let server_keys = NetworkPublicKeys {
identity_public_key: server_public,
};
let trusted_peers = Arc::new(RwLock::new(
vec![(client_id, client_keys), (server_id, server_keys)]
.into_iter()
.collect(),
));
let client_auth = HandshakeAuthMode::mutual(trusted_peers.clone());
let server_auth = HandshakeAuthMode::mutual(trusted_peers);
(client_auth, server_auth)
} else {
(HandshakeAuthMode::ServerOnly, HandshakeAuthMode::ServerOnly)
};
let client = NoiseUpgrader::new(client_private, client_auth);
let server = NoiseUpgrader::new(server_private, server_auth);
((client, client_public), (server, server_public))
}
/// helper to perform a noise handshake with two peers
fn perform_handshake(
client: NoiseUpgrader,
server: NoiseUpgrader,
server_public_key: x25519::PublicKey,
) -> io::Result<(NoiseStream<MemorySocket>, NoiseStream<MemorySocket>)> {
// create an in-memory socket for testing
let (dialer_socket, listener_socket) = MemorySocket::new_pair();
// perform the handshake
let (client_session, server_session) = block_on(join(
client.upgrade_outbound(dialer_socket, server_public_key),
server.upgrade_inbound(listener_socket),
));
Ok((client_session?, server_session?))
}
fn test_handshake_success(is_mutual_auth: bool) {
// perform handshake with two testing peers
let ((client, client_public), (server, server_public)) = build_peers(is_mutual_auth);
let (client, server) = perform_handshake(client, server, server_public).unwrap();
assert_eq!(client.get_remote_static(), server_public);
assert_eq!(server.get_remote_static(), client_public);
}
#[test]
fn test_handshake_server_only_auth() {
test_handshake_success(false /* is_mutual_auth */);
}
#[test]
fn test_handshake_mutual_auth() {
test_handshake_success(true /* is_mutual_auth */);
}
}
| upgrade | identifier_name |
lib.rs | // This crate is a library
#![crate_type = "lib"]
// This crate is named "pixel"
#![crate_name = "pixel"]
// Use +nightly to overpass this
#![feature(test)]
#[cfg(test)]
mod tests;
extern crate rand;
use std::ops::Not;
use std::path::Path;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use rand::Rng;
#[derive(Debug, Copy, Clone, PartialEq)]
/// A Color is represented here by three colors, each of them are u8.
pub struct Color {
/// Red
r: u8,
/// Green
g: u8,
/// Blue
b: u8
}
/// Implements some functions for the struct Color
impl Color {
/// Constructor
pub fn | (red: u8, green: u8, blue:u8) -> Color {
return Color {r : red, g : green, b : blue};
}
/// Conctructor with random values for each color
pub fn new_random() -> Color {
let mut r = rand::thread_rng();
return Color {
r : r.gen::<u8>(),
g : r.gen::<u8>(),
b : r.gen::<u8>()
}
}
/// Default constructor
pub fn empty_color() -> Color {
return Color {r : 0, g : 0, b : 0};
}
/// Red's getter
pub fn red(&self) -> u8 {
return self.r;
}
/// Green's getter
pub fn green(&self) -> u8 {
return self.g;
}
/// Blue's getter
pub fn blue(&self) -> u8 {
return self.b;
}
/// toString() to display a Color
pub fn display(&self) {
println!("r : {}, g : {}, b : {}", self.r, self.g, self.b);
}
/// Equals to determine if the two Color in parameters are equals.
/// Return true if self and other and equals
/// (the r, g and b of self are equals to the r, g and b of other)
///
/// # Arguments
/// * `self` - a Color to be compared
/// * `other` - a second Color to compare the first one
///
/// # Return
/// * `bool` - corresponding to the equality (or not) of the two arguments
pub fn eq(&self, other: Color) -> bool {
if(self.red() == other.red())
&& (self.green() == other.green())
&& (self.blue() == other.blue() ) {
return true;
}
return false;
}
/// Transform a RGB pixel (Color) to a grayscale pixel (between 0 and 255).
/// Use an intermediate u32 var to calculate the average without u8 overflow.
///
/// # Arguments
/// * `self` - a Color to be converted
///
/// # Return
/// * `u8` - an integer corresponding to the converted Color
///
/// # Example
/// If a Color(30, 28, 255) is passed as a parameter
/// the function will return 104.
pub fn grayscale(&self) -> u8 {
let average: u32 = (self.r as u32 + self.g as u32 + self.b as u32)/3;
return average as u8;
}
}
/// Impl block to implement the not() function
impl Not for Color {
type Output = Color;
/// Revert a pixel's color with!color
///
/// #Arguments
/// * `self` - a Color to be reverted
///
/// #Return
/// * `Self` - a Color reverted
///
/// #Example
/// If a Color(100, 50, 75) is passed as a parametr
/// the function will return a Color(155, 205, 180).
fn not(self) -> Self::Output {
let mut c = self;
c.r = 255 - c.r;
c.g = 255 - c.g;
c.b = 255 - c.b;
return c;
}
}
#[derive(Debug)]
/// An image is defined with a width, a height and a pixels.
pub struct Image {
/// A width is an u32
width: u32,
/// A height is an u32
height: u32,
/// A pixels is a Vec<Color>
pixels: Vec<Color> // 2D array dynamic
}
/// Used to call every Image's functions
impl Image {
/// Constructor
pub fn new(width:u32, height:u32, pixels:Vec<Color>) -> Image {
return Image {width : width, height : height, pixels : pixels};
}
/// Width's getter
pub fn width(&self) -> u32 {
return self.width;
}
/// Height's getter
pub fn height(&self) -> u32 {
return self.height;
}
/// Pixels getter
pub fn pixels(&self) -> &Vec<Color> {
return &self.pixels;
}
/// Equals()
pub fn eq(&self, other: Image) -> bool {
if self.height!= other.height {
return false;
}
if self.width!= other.width {
return false;
}
if self.pixels!= other.pixels {
return false;
}
return true;
}
/// Create a new Image from a.ppm File
/// # Arguments
/// * filename: &Path - The path corresponding to the file to be read.
///
/// # Return
/// * Option<Image> - The Image created through the file read. It is Optionnal
/// to handle the case where a problem occurs during the reading of the file.
pub fn new_with_file(filename: &Path) -> Option<Image> {
let mut width: u32 = 0;
let mut height: u32 = 0;
let mut pixels: Vec<Color> = Vec::new();
let file = File::open(filename).expect("Unable to open the File");
let buf_reader = BufReader::new(file);
for (i, line) in buf_reader.lines().enumerate().by_ref() {
// Treatment for the first line, if not P3 it's not a RGB picture => exit.
if i == 0 {
if &line.unwrap()!= "P3" {
return None;
}
// The second line is the dimensions of the picture.
} else if i == 1 {
let list_num: Vec<u32> = get_number32_from_string(&line.unwrap());
width = list_num[0];
height = list_num[1];
} else {
// If the line begin with # it's a commentary
// Or line 2 (the max size of a color), we ignore both.
let s: &String = &line.unwrap();
if (s.chars().next().unwrap()!= '#') || (i!= 2) {
let colors = get_number8_from_string(&s);
if colors.len() == 3 {
let c = Color::new(colors[0], colors[1], colors[2]);
pixels.push(c);
}
}
}
}
return Some(Image::new(width, height, pixels));
}
/// Invert the Colors of an Image using c.not()
/// to invert each color of a pixel
///
/// # Arguments
/// * image: Image - the image to be inverted
/// # Return
/// * Image - the image inverted
pub fn invert(image: &Image) -> Image {
let mut inv: Vec<Color> = Vec::new();
for c in &image.pixels {
inv.push(c.not());
}
return Image::new(image.width, image.height, inv);
}
/// Write the image passed as parameter in a file.
/// # Arguments
/// * image:Image - the image to write in the file
/// * path:&Path - the path where the file will be saved plus it's name.
///
/// # Containts
/// * The first line is the type of picture : P3 is for the RGB system color
/// * The second line is the size of the picture (in pixels).
/// Two integers define the file's width and height.
/// * The third line is the max value of each color (255).
/// * The rest of the file is the colors. There is (width * height) lines
/// of three values (RGB) for each pixel.
pub fn save_file_from_image(image: &Image, path: &Path) -> std::io::Result<()> {
let mut file = File::create(path).expect("Unable to create the file");
file.write_all(b"P3\n").expect("Unable to write P3.");
file.write_fmt(format_args!("{} {}\n", image.width(), image.height()))
.expect("Unable to write width and height.");
file.write_all(b"255\n").expect("Unable to write max value for Colors.");
for c in &image.pixels {
file.write_fmt(format_args!("{} {} {} \n", c.red(), c.green(), c.blue()))
.expect("Unable to write colors.");
}
Ok(())
}
/// Return a grayscale Image from a RGB Image.
/// Each pixel of the grayscale Image is the sum of the RGB pixel / 3.
///
/// # Arguments
/// * image:Image - The RGB Image to be converted
/// # Return
/// * Image - The grayscale Image converted
pub fn grayscale(image: &Image) -> Image {
let mut gray: Vec<Color> = Vec::new();
for i in &image.pixels {
let c: u8 = Color::grayscale(i);
gray.push(Color::new(c, c, c));
}
return Image::new(image.width, image.height, gray);
}
}
/// Transform a String with numbers in it into
/// a Vector<u32> for the picture size.
/// # Example :
/// * "1 23 45" passed as parameters will return Vec{1, 23, 45}.
/// * "1 23 azer //& &Γ©45" passed as parameters will return Vec{1, 23, 45}
fn get_number32_from_string(line: &String) -> Vec<u32> {
let mut list_num: Vec<u32> = Vec::new();
let mut n = String::new();
for c in line.chars() {
if c =='' || c == '\0' {
if!n.is_empty() {
list_num.push(n.parse().unwrap());
n.clear();
}
} else if c.is_digit(10){
n.push(c);
}
}
// Add if a numer is at the end of the line
if!n.is_empty() {
list_num.push(n.parse().unwrap());
}
return list_num;
}
/// Transform a String with numbers in it into
/// a Vector<u8> for the colors.
/// # Example :
/// * "1 23 45" passed as parameters will return Vec{1, 23, 45}.
/// * "1 23 azer //& &Γ©45" passed as parameters will return Vec{1, 23, 45}
fn get_number8_from_string(line: &String) -> Vec<u8> {
let mut list_num: Vec<u8> = Vec::new();
let mut n = String::new();
for c in line.chars() {
if c =='' || c == '\0' {
if!n.is_empty() {
list_num.push(n.parse().unwrap());
n.clear();
}
} else if c.is_digit(10){
n.push(c);
}
}
// Add if a numer is at the end of the line
if!n.is_empty() {
list_num.push(n.parse().unwrap());
}
return list_num;
}
| new | identifier_name |
lib.rs | // This crate is a library
#![crate_type = "lib"]
// This crate is named "pixel"
#![crate_name = "pixel"]
// Use +nightly to overpass this
#![feature(test)]
#[cfg(test)]
mod tests;
extern crate rand;
use std::ops::Not;
use std::path::Path;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use rand::Rng;
#[derive(Debug, Copy, Clone, PartialEq)]
/// A Color is represented here by three colors, each of them are u8.
pub struct Color {
/// Red
r: u8,
/// Green
g: u8,
/// Blue
b: u8
}
/// Implements some functions for the struct Color
impl Color {
/// Constructor
pub fn new(red: u8, green: u8, blue:u8) -> Color {
return Color {r : red, g : green, b : blue};
}
/// Conctructor with random values for each color
pub fn new_random() -> Color {
let mut r = rand::thread_rng();
return Color {
r : r.gen::<u8>(),
g : r.gen::<u8>(),
b : r.gen::<u8>()
}
}
/// Default constructor
pub fn empty_color() -> Color {
return Color {r : 0, g : 0, b : 0};
}
/// Red's getter
pub fn red(&self) -> u8 {
return self.r;
}
/// Green's getter
pub fn green(&self) -> u8 {
return self.g;
}
/// Blue's getter
pub fn blue(&self) -> u8 {
return self.b;
}
/// toString() to display a Color
pub fn display(&self) {
println!("r : {}, g : {}, b : {}", self.r, self.g, self.b);
}
/// Equals to determine if the two Color in parameters are equals.
/// Return true if self and other and equals
/// (the r, g and b of self are equals to the r, g and b of other)
///
/// # Arguments
/// * `self` - a Color to be compared
/// * `other` - a second Color to compare the first one
///
/// # Return
/// * `bool` - corresponding to the equality (or not) of the two arguments
pub fn eq(&self, other: Color) -> bool {
if(self.red() == other.red())
&& (self.green() == other.green())
&& (self.blue() == other.blue() ) {
return true;
}
return false;
}
/// Transform a RGB pixel (Color) to a grayscale pixel (between 0 and 255).
/// Use an intermediate u32 var to calculate the average without u8 overflow.
///
/// # Arguments
/// * `self` - a Color to be converted
///
/// # Return
/// * `u8` - an integer corresponding to the converted Color
///
/// # Example
/// If a Color(30, 28, 255) is passed as a parameter
/// the function will return 104.
pub fn grayscale(&self) -> u8 {
let average: u32 = (self.r as u32 + self.g as u32 + self.b as u32)/3;
return average as u8;
}
}
/// Impl block to implement the not() function
impl Not for Color {
type Output = Color;
/// Revert a pixel's color with!color
///
/// #Arguments
/// * `self` - a Color to be reverted
///
/// #Return
/// * `Self` - a Color reverted
///
/// #Example
/// If a Color(100, 50, 75) is passed as a parametr
/// the function will return a Color(155, 205, 180).
fn not(self) -> Self::Output {
let mut c = self;
c.r = 255 - c.r;
c.g = 255 - c.g;
c.b = 255 - c.b;
return c;
}
}
#[derive(Debug)]
/// An image is defined with a width, a height and a pixels.
pub struct Image {
/// A width is an u32
width: u32,
/// A height is an u32
height: u32,
/// A pixels is a Vec<Color>
pixels: Vec<Color> // 2D array dynamic
}
/// Used to call every Image's functions
impl Image {
/// Constructor
pub fn new(width:u32, height:u32, pixels:Vec<Color>) -> Image |
/// Width's getter
pub fn width(&self) -> u32 {
return self.width;
}
/// Height's getter
pub fn height(&self) -> u32 {
return self.height;
}
/// Pixels getter
pub fn pixels(&self) -> &Vec<Color> {
return &self.pixels;
}
/// Equals()
pub fn eq(&self, other: Image) -> bool {
if self.height!= other.height {
return false;
}
if self.width!= other.width {
return false;
}
if self.pixels!= other.pixels {
return false;
}
return true;
}
/// Create a new Image from a.ppm File
/// # Arguments
/// * filename: &Path - The path corresponding to the file to be read.
///
/// # Return
/// * Option<Image> - The Image created through the file read. It is Optionnal
/// to handle the case where a problem occurs during the reading of the file.
pub fn new_with_file(filename: &Path) -> Option<Image> {
let mut width: u32 = 0;
let mut height: u32 = 0;
let mut pixels: Vec<Color> = Vec::new();
let file = File::open(filename).expect("Unable to open the File");
let buf_reader = BufReader::new(file);
for (i, line) in buf_reader.lines().enumerate().by_ref() {
// Treatment for the first line, if not P3 it's not a RGB picture => exit.
if i == 0 {
if &line.unwrap()!= "P3" {
return None;
}
// The second line is the dimensions of the picture.
} else if i == 1 {
let list_num: Vec<u32> = get_number32_from_string(&line.unwrap());
width = list_num[0];
height = list_num[1];
} else {
// If the line begin with # it's a commentary
// Or line 2 (the max size of a color), we ignore both.
let s: &String = &line.unwrap();
if (s.chars().next().unwrap()!= '#') || (i!= 2) {
let colors = get_number8_from_string(&s);
if colors.len() == 3 {
let c = Color::new(colors[0], colors[1], colors[2]);
pixels.push(c);
}
}
}
}
return Some(Image::new(width, height, pixels));
}
/// Invert the Colors of an Image using c.not()
/// to invert each color of a pixel
///
/// # Arguments
/// * image: Image - the image to be inverted
/// # Return
/// * Image - the image inverted
pub fn invert(image: &Image) -> Image {
let mut inv: Vec<Color> = Vec::new();
for c in &image.pixels {
inv.push(c.not());
}
return Image::new(image.width, image.height, inv);
}
/// Write the image passed as parameter in a file.
/// # Arguments
/// * image:Image - the image to write in the file
/// * path:&Path - the path where the file will be saved plus it's name.
///
/// # Containts
/// * The first line is the type of picture : P3 is for the RGB system color
/// * The second line is the size of the picture (in pixels).
/// Two integers define the file's width and height.
/// * The third line is the max value of each color (255).
/// * The rest of the file is the colors. There is (width * height) lines
/// of three values (RGB) for each pixel.
pub fn save_file_from_image(image: &Image, path: &Path) -> std::io::Result<()> {
let mut file = File::create(path).expect("Unable to create the file");
file.write_all(b"P3\n").expect("Unable to write P3.");
file.write_fmt(format_args!("{} {}\n", image.width(), image.height()))
.expect("Unable to write width and height.");
file.write_all(b"255\n").expect("Unable to write max value for Colors.");
for c in &image.pixels {
file.write_fmt(format_args!("{} {} {} \n", c.red(), c.green(), c.blue()))
.expect("Unable to write colors.");
}
Ok(())
}
/// Return a grayscale Image from a RGB Image.
/// Each pixel of the grayscale Image is the sum of the RGB pixel / 3.
///
/// # Arguments
/// * image:Image - The RGB Image to be converted
/// # Return
/// * Image - The grayscale Image converted
pub fn grayscale(image: &Image) -> Image {
let mut gray: Vec<Color> = Vec::new();
for i in &image.pixels {
let c: u8 = Color::grayscale(i);
gray.push(Color::new(c, c, c));
}
return Image::new(image.width, image.height, gray);
}
}
/// Transform a String with numbers in it into
/// a Vector<u32> for the picture size.
/// # Example :
/// * "1 23 45" passed as parameters will return Vec{1, 23, 45}.
/// * "1 23 azer //& &Γ©45" passed as parameters will return Vec{1, 23, 45}
fn get_number32_from_string(line: &String) -> Vec<u32> {
let mut list_num: Vec<u32> = Vec::new();
let mut n = String::new();
for c in line.chars() {
if c =='' || c == '\0' {
if!n.is_empty() {
list_num.push(n.parse().unwrap());
n.clear();
}
} else if c.is_digit(10){
n.push(c);
}
}
// Add if a numer is at the end of the line
if!n.is_empty() {
list_num.push(n.parse().unwrap());
}
return list_num;
}
/// Transform a String with numbers in it into
/// a Vector<u8> for the colors.
/// # Example :
/// * "1 23 45" passed as parameters will return Vec{1, 23, 45}.
/// * "1 23 azer //& &Γ©45" passed as parameters will return Vec{1, 23, 45}
fn get_number8_from_string(line: &String) -> Vec<u8> {
let mut list_num: Vec<u8> = Vec::new();
let mut n = String::new();
for c in line.chars() {
if c =='' || c == '\0' {
if!n.is_empty() {
list_num.push(n.parse().unwrap());
n.clear();
}
} else if c.is_digit(10){
n.push(c);
}
}
// Add if a numer is at the end of the line
if!n.is_empty() {
list_num.push(n.parse().unwrap());
}
return list_num;
}
| {
return Image {width : width, height : height, pixels : pixels};
} | identifier_body |
lib.rs | // This crate is a library
#![crate_type = "lib"]
// This crate is named "pixel"
#![crate_name = "pixel"]
// Use +nightly to overpass this
#![feature(test)]
#[cfg(test)]
mod tests;
extern crate rand;
use std::ops::Not;
use std::path::Path;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use rand::Rng;
#[derive(Debug, Copy, Clone, PartialEq)]
/// A Color is represented here by three colors, each of them are u8.
pub struct Color {
/// Red
r: u8,
/// Green
g: u8,
/// Blue
b: u8
}
/// Implements some functions for the struct Color
impl Color {
/// Constructor
pub fn new(red: u8, green: u8, blue:u8) -> Color {
return Color {r : red, g : green, b : blue};
}
/// Conctructor with random values for each color
pub fn new_random() -> Color {
let mut r = rand::thread_rng();
return Color {
r : r.gen::<u8>(),
g : r.gen::<u8>(),
b : r.gen::<u8>()
}
}
/// Default constructor
pub fn empty_color() -> Color {
return Color {r : 0, g : 0, b : 0};
}
/// Red's getter
pub fn red(&self) -> u8 {
return self.r;
}
/// Green's getter
pub fn green(&self) -> u8 {
return self.g; | pub fn blue(&self) -> u8 {
return self.b;
}
/// toString() to display a Color
pub fn display(&self) {
println!("r : {}, g : {}, b : {}", self.r, self.g, self.b);
}
/// Equals to determine if the two Color in parameters are equals.
/// Return true if self and other and equals
/// (the r, g and b of self are equals to the r, g and b of other)
///
/// # Arguments
/// * `self` - a Color to be compared
/// * `other` - a second Color to compare the first one
///
/// # Return
/// * `bool` - corresponding to the equality (or not) of the two arguments
pub fn eq(&self, other: Color) -> bool {
if(self.red() == other.red())
&& (self.green() == other.green())
&& (self.blue() == other.blue() ) {
return true;
}
return false;
}
/// Transform a RGB pixel (Color) to a grayscale pixel (between 0 and 255).
/// Use an intermediate u32 var to calculate the average without u8 overflow.
///
/// # Arguments
/// * `self` - a Color to be converted
///
/// # Return
/// * `u8` - an integer corresponding to the converted Color
///
/// # Example
/// If a Color(30, 28, 255) is passed as a parameter
/// the function will return 104.
pub fn grayscale(&self) -> u8 {
let average: u32 = (self.r as u32 + self.g as u32 + self.b as u32)/3;
return average as u8;
}
}
/// Impl block to implement the not() function
impl Not for Color {
type Output = Color;
/// Revert a pixel's color with!color
///
/// #Arguments
/// * `self` - a Color to be reverted
///
/// #Return
/// * `Self` - a Color reverted
///
/// #Example
/// If a Color(100, 50, 75) is passed as a parametr
/// the function will return a Color(155, 205, 180).
fn not(self) -> Self::Output {
let mut c = self;
c.r = 255 - c.r;
c.g = 255 - c.g;
c.b = 255 - c.b;
return c;
}
}
#[derive(Debug)]
/// An image is defined with a width, a height and a pixels.
pub struct Image {
/// A width is an u32
width: u32,
/// A height is an u32
height: u32,
/// A pixels is a Vec<Color>
pixels: Vec<Color> // 2D array dynamic
}
/// Used to call every Image's functions
impl Image {
/// Constructor
pub fn new(width:u32, height:u32, pixels:Vec<Color>) -> Image {
return Image {width : width, height : height, pixels : pixels};
}
/// Width's getter
pub fn width(&self) -> u32 {
return self.width;
}
/// Height's getter
pub fn height(&self) -> u32 {
return self.height;
}
/// Pixels getter
pub fn pixels(&self) -> &Vec<Color> {
return &self.pixels;
}
/// Equals()
pub fn eq(&self, other: Image) -> bool {
if self.height!= other.height {
return false;
}
if self.width!= other.width {
return false;
}
if self.pixels!= other.pixels {
return false;
}
return true;
}
/// Create a new Image from a.ppm File
/// # Arguments
/// * filename: &Path - The path corresponding to the file to be read.
///
/// # Return
/// * Option<Image> - The Image created through the file read. It is Optionnal
/// to handle the case where a problem occurs during the reading of the file.
pub fn new_with_file(filename: &Path) -> Option<Image> {
let mut width: u32 = 0;
let mut height: u32 = 0;
let mut pixels: Vec<Color> = Vec::new();
let file = File::open(filename).expect("Unable to open the File");
let buf_reader = BufReader::new(file);
for (i, line) in buf_reader.lines().enumerate().by_ref() {
// Treatment for the first line, if not P3 it's not a RGB picture => exit.
if i == 0 {
if &line.unwrap()!= "P3" {
return None;
}
// The second line is the dimensions of the picture.
} else if i == 1 {
let list_num: Vec<u32> = get_number32_from_string(&line.unwrap());
width = list_num[0];
height = list_num[1];
} else {
// If the line begin with # it's a commentary
// Or line 2 (the max size of a color), we ignore both.
let s: &String = &line.unwrap();
if (s.chars().next().unwrap()!= '#') || (i!= 2) {
let colors = get_number8_from_string(&s);
if colors.len() == 3 {
let c = Color::new(colors[0], colors[1], colors[2]);
pixels.push(c);
}
}
}
}
return Some(Image::new(width, height, pixels));
}
/// Invert the Colors of an Image using c.not()
/// to invert each color of a pixel
///
/// # Arguments
/// * image: Image - the image to be inverted
/// # Return
/// * Image - the image inverted
pub fn invert(image: &Image) -> Image {
let mut inv: Vec<Color> = Vec::new();
for c in &image.pixels {
inv.push(c.not());
}
return Image::new(image.width, image.height, inv);
}
/// Write the image passed as parameter in a file.
/// # Arguments
/// * image:Image - the image to write in the file
/// * path:&Path - the path where the file will be saved plus it's name.
///
/// # Containts
/// * The first line is the type of picture : P3 is for the RGB system color
/// * The second line is the size of the picture (in pixels).
/// Two integers define the file's width and height.
/// * The third line is the max value of each color (255).
/// * The rest of the file is the colors. There is (width * height) lines
/// of three values (RGB) for each pixel.
pub fn save_file_from_image(image: &Image, path: &Path) -> std::io::Result<()> {
let mut file = File::create(path).expect("Unable to create the file");
file.write_all(b"P3\n").expect("Unable to write P3.");
file.write_fmt(format_args!("{} {}\n", image.width(), image.height()))
.expect("Unable to write width and height.");
file.write_all(b"255\n").expect("Unable to write max value for Colors.");
for c in &image.pixels {
file.write_fmt(format_args!("{} {} {} \n", c.red(), c.green(), c.blue()))
.expect("Unable to write colors.");
}
Ok(())
}
/// Return a grayscale Image from a RGB Image.
/// Each pixel of the grayscale Image is the sum of the RGB pixel / 3.
///
/// # Arguments
/// * image:Image - The RGB Image to be converted
/// # Return
/// * Image - The grayscale Image converted
pub fn grayscale(image: &Image) -> Image {
let mut gray: Vec<Color> = Vec::new();
for i in &image.pixels {
let c: u8 = Color::grayscale(i);
gray.push(Color::new(c, c, c));
}
return Image::new(image.width, image.height, gray);
}
}
/// Transform a String with numbers in it into
/// a Vector<u32> for the picture size.
/// # Example :
/// * "1 23 45" passed as parameters will return Vec{1, 23, 45}.
/// * "1 23 azer //& &Γ©45" passed as parameters will return Vec{1, 23, 45}
fn get_number32_from_string(line: &String) -> Vec<u32> {
let mut list_num: Vec<u32> = Vec::new();
let mut n = String::new();
for c in line.chars() {
if c =='' || c == '\0' {
if!n.is_empty() {
list_num.push(n.parse().unwrap());
n.clear();
}
} else if c.is_digit(10){
n.push(c);
}
}
// Add if a numer is at the end of the line
if!n.is_empty() {
list_num.push(n.parse().unwrap());
}
return list_num;
}
/// Transform a String with numbers in it into
/// a Vector<u8> for the colors.
/// # Example :
/// * "1 23 45" passed as parameters will return Vec{1, 23, 45}.
/// * "1 23 azer //& &Γ©45" passed as parameters will return Vec{1, 23, 45}
fn get_number8_from_string(line: &String) -> Vec<u8> {
let mut list_num: Vec<u8> = Vec::new();
let mut n = String::new();
for c in line.chars() {
if c =='' || c == '\0' {
if!n.is_empty() {
list_num.push(n.parse().unwrap());
n.clear();
}
} else if c.is_digit(10){
n.push(c);
}
}
// Add if a numer is at the end of the line
if!n.is_empty() {
list_num.push(n.parse().unwrap());
}
return list_num;
} | }
/// Blue's getter | random_line_split |
timer.rs | //! POSIX per-process timer interface.
//!
//! This module provides a wrapper around POSIX timers (see `timer_create(2)`) and utilities to
//! setup thread-targeted signaling and signal masks.
use std::mem::MaybeUninit;
use std::time::Duration;
use std::{io, mem};
use libc::{c_int, clockid_t, pid_t};
/// Timers can use various clocks. See `timer_create(2)`.
pub enum Clock {
/// Use `CLOCK_REALTIME` for the timer.
Realtime,
/// Use `CLOCK_MONOTONIC` for the timer.
Monotonic,
}
/// Strong thread-id type to prevent accidental conversion of pid_t.
pub struct Tid(pid_t);
/// Convenience helper to get the current thread ID suitable to pass to a
/// `TimerEvent::ThreadSignal` entry.
pub fn gettid() -> Tid {
Tid(unsafe { libc::syscall(libc::SYS_gettid) } as pid_t)
}
/// Strong signal type which is more advanced than nix::sys::signal::Signal as
/// it doesn't prevent you from using signals that the nix crate is unaware
/// of...!
pub struct Signal(c_int);
impl Into<c_int> for Signal {
fn into(self) -> c_int {
self.0
}
}
impl From<c_int> for Signal {
fn from(v: c_int) -> Signal {
Signal(v)
}
}
/// When instantiating a Timer, it needs to have an event type associated with
/// it to be fired whenever the timer expires. Most of the time this will be a
/// `Signal`. Sometimes we need to be able to send signals to specific threads.
pub enum TimerEvent {
/// This will act like passing `NULL` to `timer_create()`, which maps to
/// using the same as `Signal(SIGALRM)`.
None,
/// When the timer expires, send a specific signal to the current process.
Signal(Signal),
/// When the timer expires, send a specific signal to a specific thread.
ThreadSignal(Tid, Signal),
/// Convenience value to send a signal to the current thread. This is
/// equivalent to using `ThreadSignal(gettid(), signal)`.
ThisThreadSignal(Signal),
}
// timer_t is a pointer type, so we create a strongly typed internal handle
// type for it
#[repr(C)]
struct InternalTimerT(u32);
type TimerT = *mut InternalTimerT;
// These wrappers are defined in -lrt.
#[link(name = "rt")]
extern "C" {
fn timer_create(clockid: clockid_t, evp: *mut libc::sigevent, timer: *mut TimerT) -> c_int;
fn timer_delete(timer: TimerT) -> c_int;
fn timer_settime(
timerid: TimerT,
flags: c_int,
new_value: *const libc::itimerspec,
old_value: *mut libc::itimerspec,
) -> c_int;
}
/// Represents a POSIX per-process timer as created via `timer_create(2)`.
pub struct Timer {
timer: TimerT,
}
/// Timer specification used to arm a `Timer`.
#[derive(Default)]
pub struct TimerSpec {
/// The timeout to the next timer event.
pub value: Option<Duration>,
/// When a timer expires, it may be automatically rearmed with another
/// timeout. This will keep happening until this is explicitly disabled
/// or the timer deleted.
pub interval: Option<Duration>,
}
// Helpers to convert between libc::timespec and Option<Duration>
fn opt_duration_to_timespec(v: Option<Duration>) -> libc::timespec {
match v {
None => libc::timespec {
tv_sec: 0,
tv_nsec: 0,
},
Some(value) => libc::timespec {
tv_sec: value.as_secs() as i64,
tv_nsec: value.subsec_nanos() as i64,
},
}
}
fn | (v: libc::timespec) -> Option<Duration> {
if v.tv_sec == 0 && v.tv_nsec == 0 {
None
} else {
Some(Duration::new(v.tv_sec as u64, v.tv_nsec as u32))
}
}
impl TimerSpec {
// Helpers to convert between TimerSpec and libc::itimerspec
fn to_itimerspec(&self) -> libc::itimerspec {
libc::itimerspec {
it_value: opt_duration_to_timespec(self.value),
it_interval: opt_duration_to_timespec(self.interval),
}
}
fn from_itimerspec(ts: libc::itimerspec) -> Self {
TimerSpec {
value: timespec_to_opt_duration(ts.it_value),
interval: timespec_to_opt_duration(ts.it_interval),
}
}
/// Create an empty timer specification representing a disabled timer.
pub fn new() -> Self {
TimerSpec {
value: None,
interval: None,
}
}
/// Change the specification to have a specific value.
pub fn value(self, value: Option<Duration>) -> Self {
TimerSpec {
value,
interval: self.interval,
}
}
/// Change the specification to have a specific interval.
pub fn interval(self, interval: Option<Duration>) -> Self {
TimerSpec {
value: self.value,
interval,
}
}
}
impl Timer {
/// Create a Timer object governing a POSIX timer.
pub fn create(clock: Clock, event: TimerEvent) -> io::Result<Timer> {
// Map from our clock type to the libc id
let clkid = match clock {
Clock::Realtime => libc::CLOCK_REALTIME,
Clock::Monotonic => libc::CLOCK_MONOTONIC,
} as clockid_t;
// Map the TimerEvent to libc::sigevent
let mut ev: libc::sigevent = unsafe { mem::zeroed() };
match event {
TimerEvent::None => ev.sigev_notify = libc::SIGEV_NONE,
TimerEvent::Signal(signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_SIGNAL;
}
TimerEvent::ThreadSignal(tid, signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_THREAD_ID;
ev.sigev_notify_thread_id = tid.0;
}
TimerEvent::ThisThreadSignal(signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_THREAD_ID;
ev.sigev_notify_thread_id = gettid().0;
}
}
// Create the timer
let mut timer: TimerT = unsafe { mem::zeroed() };
let rc = unsafe { timer_create(clkid, &mut ev, &mut timer) };
if rc!= 0 {
Err(io::Error::last_os_error())
} else {
Ok(Timer { timer })
}
}
/// Arm a timer. This returns the previous timer specification.
pub fn arm(&mut self, spec: TimerSpec) -> io::Result<TimerSpec> {
let newspec = spec.to_itimerspec();
let mut oldspec = MaybeUninit::<libc::itimerspec>::uninit();
let rc = unsafe { timer_settime(self.timer, 0, &newspec, &mut *oldspec.as_mut_ptr()) };
if rc!= 0 {
return Err(io::Error::last_os_error());
}
Ok(TimerSpec::from_itimerspec(unsafe { oldspec.assume_init() }))
}
}
impl Drop for Timer {
fn drop(&mut self) {
unsafe {
timer_delete(self.timer);
}
}
}
/// This is the signal number we use in our timeout implementations. We expect
/// the signal handler for this signal to never be replaced by some other
/// library. If this does happen, we need to find another signal. There should
/// be plenty.
/// Currently this is SIGRTMIN+4, the 5th real-time signal. glibc reserves the
/// first two for pthread internals.
pub const SIGTIMEOUT: Signal = Signal(32 + 4);
// Our timeout handler does exactly nothing. We only need it to interrupt
// system calls.
extern "C" fn sig_timeout_handler(_: c_int) {}
// See setup_timeout_handler().
fn do_setup_timeout_handler() -> io::Result<()> {
// Unfortunately nix::sys::signal::Signal cannot represent real time
// signals, so we need to use libc instead...
//
// This WOULD be a nicer impl though:
//nix::sys::signal::sigaction(
// SIGTIMEOUT,
// nix::sys::signal::SigAction::new(
// nix::sys::signal::SigHandler::Handler(sig_timeout_handler),
// nix::sys::signal::SaFlags::empty(),
// nix::sys::signal::SigSet::all()))
// .map(|_|())
unsafe {
let mut sa_mask = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *sa_mask.as_mut_ptr())!= 0
|| libc::sigaddset(&mut *sa_mask.as_mut_ptr(), SIGTIMEOUT.0)!= 0
{
return Err(io::Error::last_os_error());
}
let sa = libc::sigaction {
sa_sigaction:
// libc::sigaction uses `usize` for the function pointer...
sig_timeout_handler as *const extern "C" fn(i32) as usize,
sa_mask: sa_mask.assume_init(),
sa_flags: 0,
sa_restorer: None,
};
if libc::sigaction(SIGTIMEOUT.0, &sa, std::ptr::null_mut())!= 0 {
return Err(io::Error::last_os_error());
}
}
Ok(())
}
// The first time we unblock SIGTIMEOUT should cause approprate initialization:
static SETUP_TIMEOUT_HANDLER: std::sync::Once = std::sync::Once::new();
/// Setup our timeout-signal workflow. This establishes the signal handler for
/// our `SIGTIMEOUT` and should be called once during initialization.
#[inline]
pub fn setup_timeout_handler() {
SETUP_TIMEOUT_HANDLER.call_once(|| {
// We unwrap here.
// If setting up this handler fails you have other problems already,
// plus, if setting up fails you can't *use* it either, so everything
// goes to die.
do_setup_timeout_handler().unwrap();
});
}
/// This guards the state of the timeout signal: We want it blocked usually.
pub struct TimeoutBlockGuard(bool);
impl Drop for TimeoutBlockGuard {
fn drop(&mut self) {
if self.0 {
block_timeout_signal();
} else {
unblock_timeout_signal().forget();
}
}
}
impl TimeoutBlockGuard {
/// Convenience helper to "forget" to restore the signal block mask.
#[inline(always)]
pub fn forget(self) {
std::mem::forget(self);
}
/// Convenience helper to trigger the guard behavior immediately.
#[inline(always)]
pub fn trigger(self) {
std::mem::drop(self); // be explicit here...
}
}
/// Unblock the timeout signal for the current thread. By default we block the
/// signal this behavior should be restored when done using timeouts, therefor this
/// returns a guard:
#[inline(always)]
pub fn unblock_timeout_signal() -> TimeoutBlockGuard {
// This calls std::sync::Once:
setup_timeout_handler();
//let mut set = nix::sys::signal::SigSet::empty();
//set.add(SIGTIMEOUT.0);
//set.thread_unblock()?;
//Ok(TimeoutBlockGuard{})
// Again, nix crate and its signal limitations...
// NOTE:
// sigsetops(3) and pthread_sigmask(3) can only fail if invalid memory is
// passed to the kernel, or signal numbers are "invalid", since we know
// neither is the case we will panic on error...
let was_blocked = unsafe {
let mut mask = MaybeUninit::<libc::sigset_t>::uninit();
let mut oldset = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *mask.as_mut_ptr())!= 0
|| libc::sigaddset(&mut *mask.as_mut_ptr(), SIGTIMEOUT.0)!= 0
|| libc::pthread_sigmask(
libc::SIG_UNBLOCK,
&mask.assume_init(),
&mut *oldset.as_mut_ptr(),
)!= 0
{
panic!("Impossibly failed to unblock SIGTIMEOUT");
//return Err(io::Error::last_os_error());
}
libc::sigismember(&oldset.assume_init(), SIGTIMEOUT.0) == 1
};
TimeoutBlockGuard(was_blocked)
}
/// Block the timeout signal for the current thread. This is the default.
#[inline(always)]
pub fn block_timeout_signal() {
//let mut set = nix::sys::signal::SigSet::empty();
//set.add(SIGTIMEOUT);
//set.thread_block()
unsafe {
let mut mask = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *mask.as_mut_ptr())!= 0
|| libc::sigaddset(&mut *mask.as_mut_ptr(), SIGTIMEOUT.0)!= 0
|| libc::pthread_sigmask(libc::SIG_BLOCK, &mask.assume_init(), std::ptr::null_mut())
!= 0
{
panic!("Impossibly failed to block SIGTIMEOUT");
//return Err(io::Error::last_os_error());
}
}
}
| timespec_to_opt_duration | identifier_name |
timer.rs | //! POSIX per-process timer interface.
//!
//! This module provides a wrapper around POSIX timers (see `timer_create(2)`) and utilities to
//! setup thread-targeted signaling and signal masks.
use std::mem::MaybeUninit;
use std::time::Duration;
use std::{io, mem};
use libc::{c_int, clockid_t, pid_t};
/// Timers can use various clocks. See `timer_create(2)`.
pub enum Clock {
/// Use `CLOCK_REALTIME` for the timer.
Realtime,
/// Use `CLOCK_MONOTONIC` for the timer.
Monotonic,
}
/// Strong thread-id type to prevent accidental conversion of pid_t.
pub struct Tid(pid_t);
/// Convenience helper to get the current thread ID suitable to pass to a
/// `TimerEvent::ThreadSignal` entry.
pub fn gettid() -> Tid {
Tid(unsafe { libc::syscall(libc::SYS_gettid) } as pid_t)
}
/// Strong signal type which is more advanced than nix::sys::signal::Signal as
/// it doesn't prevent you from using signals that the nix crate is unaware
/// of...!
pub struct Signal(c_int);
impl Into<c_int> for Signal {
fn into(self) -> c_int {
self.0
}
}
impl From<c_int> for Signal {
fn from(v: c_int) -> Signal {
Signal(v)
}
}
/// When instantiating a Timer, it needs to have an event type associated with
/// it to be fired whenever the timer expires. Most of the time this will be a
/// `Signal`. Sometimes we need to be able to send signals to specific threads.
pub enum TimerEvent {
/// This will act like passing `NULL` to `timer_create()`, which maps to
/// using the same as `Signal(SIGALRM)`.
None,
/// When the timer expires, send a specific signal to the current process.
Signal(Signal),
/// When the timer expires, send a specific signal to a specific thread.
ThreadSignal(Tid, Signal),
/// Convenience value to send a signal to the current thread. This is
/// equivalent to using `ThreadSignal(gettid(), signal)`.
ThisThreadSignal(Signal),
}
// timer_t is a pointer type, so we create a strongly typed internal handle
// type for it
#[repr(C)]
struct InternalTimerT(u32);
type TimerT = *mut InternalTimerT;
// These wrappers are defined in -lrt.
#[link(name = "rt")]
extern "C" {
fn timer_create(clockid: clockid_t, evp: *mut libc::sigevent, timer: *mut TimerT) -> c_int;
fn timer_delete(timer: TimerT) -> c_int;
fn timer_settime(
timerid: TimerT,
flags: c_int,
new_value: *const libc::itimerspec,
old_value: *mut libc::itimerspec,
) -> c_int;
}
/// Represents a POSIX per-process timer as created via `timer_create(2)`.
pub struct Timer {
timer: TimerT,
}
/// Timer specification used to arm a `Timer`.
#[derive(Default)]
pub struct TimerSpec {
/// The timeout to the next timer event.
pub value: Option<Duration>,
/// When a timer expires, it may be automatically rearmed with another
/// timeout. This will keep happening until this is explicitly disabled
/// or the timer deleted.
pub interval: Option<Duration>,
}
// Helpers to convert between libc::timespec and Option<Duration>
fn opt_duration_to_timespec(v: Option<Duration>) -> libc::timespec {
match v {
None => libc::timespec {
tv_sec: 0,
tv_nsec: 0,
},
Some(value) => libc::timespec {
tv_sec: value.as_secs() as i64,
tv_nsec: value.subsec_nanos() as i64,
},
}
}
fn timespec_to_opt_duration(v: libc::timespec) -> Option<Duration> {
if v.tv_sec == 0 && v.tv_nsec == 0 {
None
} else {
Some(Duration::new(v.tv_sec as u64, v.tv_nsec as u32))
}
}
impl TimerSpec {
// Helpers to convert between TimerSpec and libc::itimerspec
fn to_itimerspec(&self) -> libc::itimerspec {
libc::itimerspec {
it_value: opt_duration_to_timespec(self.value),
it_interval: opt_duration_to_timespec(self.interval),
}
}
fn from_itimerspec(ts: libc::itimerspec) -> Self {
TimerSpec {
value: timespec_to_opt_duration(ts.it_value),
interval: timespec_to_opt_duration(ts.it_interval),
}
}
/// Create an empty timer specification representing a disabled timer.
pub fn new() -> Self {
TimerSpec {
value: None,
interval: None,
}
}
/// Change the specification to have a specific value.
pub fn value(self, value: Option<Duration>) -> Self {
TimerSpec {
value,
interval: self.interval,
}
}
/// Change the specification to have a specific interval.
pub fn interval(self, interval: Option<Duration>) -> Self {
TimerSpec {
value: self.value,
interval,
}
}
}
impl Timer {
/// Create a Timer object governing a POSIX timer.
pub fn create(clock: Clock, event: TimerEvent) -> io::Result<Timer> {
// Map from our clock type to the libc id
let clkid = match clock {
Clock::Realtime => libc::CLOCK_REALTIME,
Clock::Monotonic => libc::CLOCK_MONOTONIC,
} as clockid_t;
// Map the TimerEvent to libc::sigevent
let mut ev: libc::sigevent = unsafe { mem::zeroed() };
match event {
TimerEvent::None => ev.sigev_notify = libc::SIGEV_NONE,
TimerEvent::Signal(signo) => |
TimerEvent::ThreadSignal(tid, signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_THREAD_ID;
ev.sigev_notify_thread_id = tid.0;
}
TimerEvent::ThisThreadSignal(signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_THREAD_ID;
ev.sigev_notify_thread_id = gettid().0;
}
}
// Create the timer
let mut timer: TimerT = unsafe { mem::zeroed() };
let rc = unsafe { timer_create(clkid, &mut ev, &mut timer) };
if rc!= 0 {
Err(io::Error::last_os_error())
} else {
Ok(Timer { timer })
}
}
/// Arm a timer. This returns the previous timer specification.
pub fn arm(&mut self, spec: TimerSpec) -> io::Result<TimerSpec> {
let newspec = spec.to_itimerspec();
let mut oldspec = MaybeUninit::<libc::itimerspec>::uninit();
let rc = unsafe { timer_settime(self.timer, 0, &newspec, &mut *oldspec.as_mut_ptr()) };
if rc!= 0 {
return Err(io::Error::last_os_error());
}
Ok(TimerSpec::from_itimerspec(unsafe { oldspec.assume_init() }))
}
}
impl Drop for Timer {
fn drop(&mut self) {
unsafe {
timer_delete(self.timer);
}
}
}
/// This is the signal number we use in our timeout implementations. We expect
/// the signal handler for this signal to never be replaced by some other
/// library. If this does happen, we need to find another signal. There should
/// be plenty.
/// Currently this is SIGRTMIN+4, the 5th real-time signal. glibc reserves the
/// first two for pthread internals.
pub const SIGTIMEOUT: Signal = Signal(32 + 4);
// Our timeout handler does exactly nothing. We only need it to interrupt
// system calls.
extern "C" fn sig_timeout_handler(_: c_int) {}
// See setup_timeout_handler().
fn do_setup_timeout_handler() -> io::Result<()> {
// Unfortunately nix::sys::signal::Signal cannot represent real time
// signals, so we need to use libc instead...
//
// This WOULD be a nicer impl though:
//nix::sys::signal::sigaction(
// SIGTIMEOUT,
// nix::sys::signal::SigAction::new(
// nix::sys::signal::SigHandler::Handler(sig_timeout_handler),
// nix::sys::signal::SaFlags::empty(),
// nix::sys::signal::SigSet::all()))
// .map(|_|())
unsafe {
let mut sa_mask = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *sa_mask.as_mut_ptr())!= 0
|| libc::sigaddset(&mut *sa_mask.as_mut_ptr(), SIGTIMEOUT.0)!= 0
{
return Err(io::Error::last_os_error());
}
let sa = libc::sigaction {
sa_sigaction:
// libc::sigaction uses `usize` for the function pointer...
sig_timeout_handler as *const extern "C" fn(i32) as usize,
sa_mask: sa_mask.assume_init(),
sa_flags: 0,
sa_restorer: None,
};
if libc::sigaction(SIGTIMEOUT.0, &sa, std::ptr::null_mut())!= 0 {
return Err(io::Error::last_os_error());
}
}
Ok(())
}
// The first time we unblock SIGTIMEOUT should cause approprate initialization:
static SETUP_TIMEOUT_HANDLER: std::sync::Once = std::sync::Once::new();
/// Setup our timeout-signal workflow. This establishes the signal handler for
/// our `SIGTIMEOUT` and should be called once during initialization.
#[inline]
pub fn setup_timeout_handler() {
SETUP_TIMEOUT_HANDLER.call_once(|| {
// We unwrap here.
// If setting up this handler fails you have other problems already,
// plus, if setting up fails you can't *use* it either, so everything
// goes to die.
do_setup_timeout_handler().unwrap();
});
}
/// This guards the state of the timeout signal: We want it blocked usually.
pub struct TimeoutBlockGuard(bool);
impl Drop for TimeoutBlockGuard {
fn drop(&mut self) {
if self.0 {
block_timeout_signal();
} else {
unblock_timeout_signal().forget();
}
}
}
impl TimeoutBlockGuard {
/// Convenience helper to "forget" to restore the signal block mask.
#[inline(always)]
pub fn forget(self) {
std::mem::forget(self);
}
/// Convenience helper to trigger the guard behavior immediately.
#[inline(always)]
pub fn trigger(self) {
std::mem::drop(self); // be explicit here...
}
}
/// Unblock the timeout signal for the current thread. By default we block the
/// signal this behavior should be restored when done using timeouts, therefor this
/// returns a guard:
#[inline(always)]
pub fn unblock_timeout_signal() -> TimeoutBlockGuard {
// This calls std::sync::Once:
setup_timeout_handler();
//let mut set = nix::sys::signal::SigSet::empty();
//set.add(SIGTIMEOUT.0);
//set.thread_unblock()?;
//Ok(TimeoutBlockGuard{})
// Again, nix crate and its signal limitations...
// NOTE:
// sigsetops(3) and pthread_sigmask(3) can only fail if invalid memory is
// passed to the kernel, or signal numbers are "invalid", since we know
// neither is the case we will panic on error...
let was_blocked = unsafe {
let mut mask = MaybeUninit::<libc::sigset_t>::uninit();
let mut oldset = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *mask.as_mut_ptr())!= 0
|| libc::sigaddset(&mut *mask.as_mut_ptr(), SIGTIMEOUT.0)!= 0
|| libc::pthread_sigmask(
libc::SIG_UNBLOCK,
&mask.assume_init(),
&mut *oldset.as_mut_ptr(),
)!= 0
{
panic!("Impossibly failed to unblock SIGTIMEOUT");
//return Err(io::Error::last_os_error());
}
libc::sigismember(&oldset.assume_init(), SIGTIMEOUT.0) == 1
};
TimeoutBlockGuard(was_blocked)
}
/// Block the timeout signal for the current thread. This is the default.
#[inline(always)]
pub fn block_timeout_signal() {
//let mut set = nix::sys::signal::SigSet::empty();
//set.add(SIGTIMEOUT);
//set.thread_block()
unsafe {
let mut mask = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *mask.as_mut_ptr())!= 0
|| libc::sigaddset(&mut *mask.as_mut_ptr(), SIGTIMEOUT.0)!= 0
|| libc::pthread_sigmask(libc::SIG_BLOCK, &mask.assume_init(), std::ptr::null_mut())
!= 0
{
panic!("Impossibly failed to block SIGTIMEOUT");
//return Err(io::Error::last_os_error());
}
}
}
| {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_SIGNAL;
} | conditional_block |
timer.rs | //! POSIX per-process timer interface.
//!
//! This module provides a wrapper around POSIX timers (see `timer_create(2)`) and utilities to
//! setup thread-targeted signaling and signal masks.
use std::mem::MaybeUninit;
use std::time::Duration;
use std::{io, mem};
use libc::{c_int, clockid_t, pid_t};
/// Timers can use various clocks. See `timer_create(2)`.
pub enum Clock {
/// Use `CLOCK_REALTIME` for the timer.
Realtime,
/// Use `CLOCK_MONOTONIC` for the timer.
Monotonic,
}
/// Strong thread-id type to prevent accidental conversion of pid_t.
pub struct Tid(pid_t);
/// Convenience helper to get the current thread ID suitable to pass to a
/// `TimerEvent::ThreadSignal` entry.
pub fn gettid() -> Tid {
Tid(unsafe { libc::syscall(libc::SYS_gettid) } as pid_t)
}
/// Strong signal type which is more advanced than nix::sys::signal::Signal as
/// it doesn't prevent you from using signals that the nix crate is unaware
/// of...!
pub struct Signal(c_int);
impl Into<c_int> for Signal {
fn into(self) -> c_int {
self.0
}
}
impl From<c_int> for Signal {
fn from(v: c_int) -> Signal {
Signal(v)
}
}
/// When instantiating a Timer, it needs to have an event type associated with
/// it to be fired whenever the timer expires. Most of the time this will be a
/// `Signal`. Sometimes we need to be able to send signals to specific threads.
pub enum TimerEvent {
/// This will act like passing `NULL` to `timer_create()`, which maps to
/// using the same as `Signal(SIGALRM)`.
None,
/// When the timer expires, send a specific signal to the current process.
Signal(Signal),
/// When the timer expires, send a specific signal to a specific thread.
ThreadSignal(Tid, Signal),
/// Convenience value to send a signal to the current thread. This is
/// equivalent to using `ThreadSignal(gettid(), signal)`.
ThisThreadSignal(Signal),
}
// timer_t is a pointer type, so we create a strongly typed internal handle
// type for it
#[repr(C)]
struct InternalTimerT(u32);
type TimerT = *mut InternalTimerT;
// These wrappers are defined in -lrt.
#[link(name = "rt")]
extern "C" {
fn timer_create(clockid: clockid_t, evp: *mut libc::sigevent, timer: *mut TimerT) -> c_int;
fn timer_delete(timer: TimerT) -> c_int;
fn timer_settime(
timerid: TimerT,
flags: c_int,
new_value: *const libc::itimerspec,
old_value: *mut libc::itimerspec,
) -> c_int;
}
/// Represents a POSIX per-process timer as created via `timer_create(2)`.
pub struct Timer {
timer: TimerT,
}
/// Timer specification used to arm a `Timer`.
#[derive(Default)]
pub struct TimerSpec {
/// The timeout to the next timer event.
pub value: Option<Duration>,
/// When a timer expires, it may be automatically rearmed with another
/// timeout. This will keep happening until this is explicitly disabled
/// or the timer deleted.
pub interval: Option<Duration>,
}
// Helpers to convert between libc::timespec and Option<Duration>
fn opt_duration_to_timespec(v: Option<Duration>) -> libc::timespec {
match v {
None => libc::timespec {
tv_sec: 0,
tv_nsec: 0,
}, | },
}
}
fn timespec_to_opt_duration(v: libc::timespec) -> Option<Duration> {
if v.tv_sec == 0 && v.tv_nsec == 0 {
None
} else {
Some(Duration::new(v.tv_sec as u64, v.tv_nsec as u32))
}
}
impl TimerSpec {
// Helpers to convert between TimerSpec and libc::itimerspec
fn to_itimerspec(&self) -> libc::itimerspec {
libc::itimerspec {
it_value: opt_duration_to_timespec(self.value),
it_interval: opt_duration_to_timespec(self.interval),
}
}
fn from_itimerspec(ts: libc::itimerspec) -> Self {
TimerSpec {
value: timespec_to_opt_duration(ts.it_value),
interval: timespec_to_opt_duration(ts.it_interval),
}
}
/// Create an empty timer specification representing a disabled timer.
pub fn new() -> Self {
TimerSpec {
value: None,
interval: None,
}
}
/// Change the specification to have a specific value.
pub fn value(self, value: Option<Duration>) -> Self {
TimerSpec {
value,
interval: self.interval,
}
}
/// Change the specification to have a specific interval.
pub fn interval(self, interval: Option<Duration>) -> Self {
TimerSpec {
value: self.value,
interval,
}
}
}
impl Timer {
/// Create a Timer object governing a POSIX timer.
pub fn create(clock: Clock, event: TimerEvent) -> io::Result<Timer> {
// Map from our clock type to the libc id
let clkid = match clock {
Clock::Realtime => libc::CLOCK_REALTIME,
Clock::Monotonic => libc::CLOCK_MONOTONIC,
} as clockid_t;
// Map the TimerEvent to libc::sigevent
let mut ev: libc::sigevent = unsafe { mem::zeroed() };
match event {
TimerEvent::None => ev.sigev_notify = libc::SIGEV_NONE,
TimerEvent::Signal(signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_SIGNAL;
}
TimerEvent::ThreadSignal(tid, signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_THREAD_ID;
ev.sigev_notify_thread_id = tid.0;
}
TimerEvent::ThisThreadSignal(signo) => {
ev.sigev_signo = signo.0;
ev.sigev_notify = libc::SIGEV_THREAD_ID;
ev.sigev_notify_thread_id = gettid().0;
}
}
// Create the timer
let mut timer: TimerT = unsafe { mem::zeroed() };
let rc = unsafe { timer_create(clkid, &mut ev, &mut timer) };
if rc!= 0 {
Err(io::Error::last_os_error())
} else {
Ok(Timer { timer })
}
}
/// Arm a timer. This returns the previous timer specification.
pub fn arm(&mut self, spec: TimerSpec) -> io::Result<TimerSpec> {
let newspec = spec.to_itimerspec();
let mut oldspec = MaybeUninit::<libc::itimerspec>::uninit();
let rc = unsafe { timer_settime(self.timer, 0, &newspec, &mut *oldspec.as_mut_ptr()) };
if rc!= 0 {
return Err(io::Error::last_os_error());
}
Ok(TimerSpec::from_itimerspec(unsafe { oldspec.assume_init() }))
}
}
impl Drop for Timer {
fn drop(&mut self) {
unsafe {
timer_delete(self.timer);
}
}
}
/// This is the signal number we use in our timeout implementations. We expect
/// the signal handler for this signal to never be replaced by some other
/// library. If this does happen, we need to find another signal. There should
/// be plenty.
/// Currently this is SIGRTMIN+4, the 5th real-time signal. glibc reserves the
/// first two for pthread internals.
pub const SIGTIMEOUT: Signal = Signal(32 + 4);
// Our timeout handler does exactly nothing. We only need it to interrupt
// system calls.
extern "C" fn sig_timeout_handler(_: c_int) {}
// See setup_timeout_handler().
fn do_setup_timeout_handler() -> io::Result<()> {
// Unfortunately nix::sys::signal::Signal cannot represent real time
// signals, so we need to use libc instead...
//
// This WOULD be a nicer impl though:
//nix::sys::signal::sigaction(
// SIGTIMEOUT,
// nix::sys::signal::SigAction::new(
// nix::sys::signal::SigHandler::Handler(sig_timeout_handler),
// nix::sys::signal::SaFlags::empty(),
// nix::sys::signal::SigSet::all()))
// .map(|_|())
unsafe {
let mut sa_mask = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *sa_mask.as_mut_ptr())!= 0
|| libc::sigaddset(&mut *sa_mask.as_mut_ptr(), SIGTIMEOUT.0)!= 0
{
return Err(io::Error::last_os_error());
}
let sa = libc::sigaction {
sa_sigaction:
// libc::sigaction uses `usize` for the function pointer...
sig_timeout_handler as *const extern "C" fn(i32) as usize,
sa_mask: sa_mask.assume_init(),
sa_flags: 0,
sa_restorer: None,
};
if libc::sigaction(SIGTIMEOUT.0, &sa, std::ptr::null_mut())!= 0 {
return Err(io::Error::last_os_error());
}
}
Ok(())
}
// The first time we unblock SIGTIMEOUT should cause approprate initialization:
static SETUP_TIMEOUT_HANDLER: std::sync::Once = std::sync::Once::new();
/// Setup our timeout-signal workflow. This establishes the signal handler for
/// our `SIGTIMEOUT` and should be called once during initialization.
#[inline]
pub fn setup_timeout_handler() {
SETUP_TIMEOUT_HANDLER.call_once(|| {
// We unwrap here.
// If setting up this handler fails you have other problems already,
// plus, if setting up fails you can't *use* it either, so everything
// goes to die.
do_setup_timeout_handler().unwrap();
});
}
/// This guards the state of the timeout signal: We want it blocked usually.
pub struct TimeoutBlockGuard(bool);
impl Drop for TimeoutBlockGuard {
fn drop(&mut self) {
if self.0 {
block_timeout_signal();
} else {
unblock_timeout_signal().forget();
}
}
}
impl TimeoutBlockGuard {
/// Convenience helper to "forget" to restore the signal block mask.
#[inline(always)]
pub fn forget(self) {
std::mem::forget(self);
}
/// Convenience helper to trigger the guard behavior immediately.
#[inline(always)]
pub fn trigger(self) {
std::mem::drop(self); // be explicit here...
}
}
/// Unblock the timeout signal for the current thread. By default we block the
/// signal this behavior should be restored when done using timeouts, therefor this
/// returns a guard:
#[inline(always)]
pub fn unblock_timeout_signal() -> TimeoutBlockGuard {
// This calls std::sync::Once:
setup_timeout_handler();
//let mut set = nix::sys::signal::SigSet::empty();
//set.add(SIGTIMEOUT.0);
//set.thread_unblock()?;
//Ok(TimeoutBlockGuard{})
// Again, nix crate and its signal limitations...
// NOTE:
// sigsetops(3) and pthread_sigmask(3) can only fail if invalid memory is
// passed to the kernel, or signal numbers are "invalid", since we know
// neither is the case we will panic on error...
let was_blocked = unsafe {
let mut mask = MaybeUninit::<libc::sigset_t>::uninit();
let mut oldset = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *mask.as_mut_ptr())!= 0
|| libc::sigaddset(&mut *mask.as_mut_ptr(), SIGTIMEOUT.0)!= 0
|| libc::pthread_sigmask(
libc::SIG_UNBLOCK,
&mask.assume_init(),
&mut *oldset.as_mut_ptr(),
)!= 0
{
panic!("Impossibly failed to unblock SIGTIMEOUT");
//return Err(io::Error::last_os_error());
}
libc::sigismember(&oldset.assume_init(), SIGTIMEOUT.0) == 1
};
TimeoutBlockGuard(was_blocked)
}
/// Block the timeout signal for the current thread. This is the default.
#[inline(always)]
pub fn block_timeout_signal() {
//let mut set = nix::sys::signal::SigSet::empty();
//set.add(SIGTIMEOUT);
//set.thread_block()
unsafe {
let mut mask = MaybeUninit::<libc::sigset_t>::uninit();
if libc::sigemptyset(&mut *mask.as_mut_ptr())!= 0
|| libc::sigaddset(&mut *mask.as_mut_ptr(), SIGTIMEOUT.0)!= 0
|| libc::pthread_sigmask(libc::SIG_BLOCK, &mask.assume_init(), std::ptr::null_mut())
!= 0
{
panic!("Impossibly failed to block SIGTIMEOUT");
//return Err(io::Error::last_os_error());
}
}
} | Some(value) => libc::timespec {
tv_sec: value.as_secs() as i64,
tv_nsec: value.subsec_nanos() as i64, | random_line_split |
ias_proxy_server.rs | /*
Copyright 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
*/
extern crate common;
extern crate futures;
extern crate hyper;
extern crate ias_client;
extern crate serde;
extern crate serde_json;
use self::futures::{future, Future};
use self::hyper::{
header::{HeaderMap, HeaderValue},
service::service_fn,
Body, Error, Method, Request, Response, Server, StatusCode,
};
use self::ias_client::{
client_utils,
client_utils::{ClientError, ClientResponse},
ias_client::IasClient,
};
use common::lru_cache::LruCache;
use common::utils::read_binary_file;
use ias_proxy_config::IasProxyConfig;
use std::{borrow::Borrow, net::SocketAddr, str::FromStr, sync::Mutex};
/// type definition for response sent from web server
type ResponseBox = Box<Future<Item = Response<Body>, Error = Error> + Send>;
/// structure defining IAS proxy server
pub struct IasProxyServer {
ias_proxy_ip: String,
ias_proxy_port: String,
ias_client: Box<IasClient>,
}
/// Request body from client, proxy server may deserialize the reuqest in order to get quote if
/// request is for attestation verification report.
#[derive(Deserialize)]
struct IasAVRRequestBody {
#[serde(rename = "isvEnclaveQuote")]
isv_enclave_quote: String,
#[serde(rename = "pseManifest")]
pse_manifest: String,
nonce: String,
}
/// ClientResponse decoded information stored in cache
#[derive(Debug, Clone)]
struct IasResponse {
body_string: String,
header_map: HeaderMap,
}
lazy_static! {
static ref sig_rl_cache: Mutex<LruCache<String, IasResponse>> = Mutex::new(LruCache::new(None));
static ref attestation_cache: Mutex<LruCache<String, IasResponse>> =
Mutex::new(LruCache::new(None));
}
const SIG_RL_LINK: &str = "/attestation/sgx/v2/sigrl";
const AVR_LINK: &str = "/attestation/sgx/v2/report";
const IP_PORT_DELIMITER: &str = ":";
const UNKNOWN_ERROR_STATUS_CODE: u16 = 520;
impl IasProxyServer {
/// Create new instance of IasProxyServer
fn new(config: &IasProxyConfig) -> Self {
IasProxyServer {
ias_proxy_ip: config.get_proxy_ip(),
ias_proxy_port: config.get_proxy_port(),
// Construct new IasClient with input config parameters
ias_client: Box::new(IasClient::new(
config.get_ias_url(),
read_binary_file(config.get_spid_cert_file().as_str()),
config.get_password(),
None,
)),
}
}
/// run method to start listening on the identified IP and port
pub fn run(&self) {
// Start the web server on the configured URL
let mut path = String::new();
path.push_str(self.ias_proxy_ip.as_str());
path.push_str(IP_PORT_DELIMITER);
path.push_str(self.ias_proxy_port.as_str());
info!("Proxy server will be started as {}", path);
// Construct socket address, panics if binding fails
let socket_addr: SocketAddr = match SocketAddr::from_str(&path) {
Ok(address_bind_successful) => address_bind_successful,
Err(err) => panic!("Error binding the address: {}", err),
};
info!("Socket binding successful");
// ias_client's lifetime must be static for not to clone
let ias_client = self.ias_client.clone();
// TODO: Store this server instance and call shutdown
let new_service = move || {
let ias_client = ias_client.clone();
// service_fn() creates a hyper's Service. It accepts a closure for handling the
// request, future response is constructed when request is served.
service_fn(move |req| respond_to_request(req, ias_client.borrow()))
};
// Run proxy server in current thread, serve or panic
hyper::rt::run(Server::bind(&socket_addr).serve(new_service).map_err(|e| {
panic!("Server error: {}", e);
}))
}
/// Stop listening on the port
#[allow(dead_code)]
pub fn stop(&self) {
// TODO: Need to stop the server started and clear the cache
unimplemented!()
}
}
/// Function to construct response by parsing request from IasClient. Accepts the request
/// parameter and reference to IasClient object. First checks if cached content has the response
/// corresponding to the request, if not present go and request IAS, get response, store in
/// cache, construct response back.
///
/// return: A ```Box<Future<Item=Response<Body>, Error=hyper::Error> + Send>``` object:
/// Response message to be sent back for the request.
fn respond_to_request(req: Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get response parsing request parameters
match *req.method() {
Method::GET => handle_get_request(&req, ias_client_obj),
Method::POST => handle_post_request(req, ias_client_obj),
// Proxy server doesn't support any other request types other than GET and POST.
_ => send_response(StatusCode::NOT_FOUND, None, None),
}
}
/// Handle get request from the proxy, this should only be valid for getting signature revocation
/// list. Proxy server doesn't support other GET requests. See ```response_to_request()``` for
/// detailed description.
fn handle_get_request(req: &Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get path from request
let path = req.uri().path().to_owned();
if!path.contains(SIG_RL_LINK) {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// Search cache for the signature revocation list
let mut sig_rl_cache_lock = sig_rl_cache
.lock()
.expect("Error acquiring SigRL cache lock");
let cached = sig_rl_cache_lock.get(&path);
// If there's cache, send it as response, otherwise request from IAS
let response_to_send = match cached {
Some(cached_revocation_list) => Ok(cached_revocation_list.clone()),
None => {
// Request has gid in URI path, we do not need to send gid explicit
let result = ias_client_obj.get_signature_revocation_list(None, Some(path.as_str()));
let ias_response_result = ias_response_from_client_response(result);
if ias_response_result.is_ok() {
let ias_response = ias_response_result.clone().unwrap();
sig_rl_cache
.lock()
.expect("Error acquiring SigRL cache lock")
.set(path, ias_response);
}
ias_response_result
}
};
match response_to_send {
Ok(ias_response) => {
// Send the response to requester
let mut headers = ias_response.header_map;
let body = Body::from(ias_response.body_string);
send_response(StatusCode::OK, Option::from(headers), Option::from(body))
}
Err(error) => {
error!("Error occurred {}", error);
// Unknown error, ideally this case should not occur. Cache must be corrupted or
// IAS returned error.
send_response(
StatusCode::from_u16(UNKNOWN_ERROR_STATUS_CODE)
.expect("Error converting status code"),
None,
None,
)
}
}
}
/// Handle post request from the proxy, this should only be valid for getting attestation
/// verification report. Proxy server doesn't support other POST requests. See
/// ```response_to_request()``` for detailed description.
fn handle_post_request(req: Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get path from request
let path = req.uri().path().to_owned();
if!path.contains(AVR_LINK) {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// read json input data
let read_body_result = client_utils::read_body_as_string(req.into_body());
if read_body_result.is_err() {
return send_response(StatusCode::INTERNAL_SERVER_ERROR, None, None);
}
let read_body = read_body_result.unwrap();
let json_body: IasAVRRequestBody = serde_json::from_str(read_body.as_str())
.expect("Error deserializing IAS verification report");
let quote = json_body.isv_enclave_quote;
if quote.is_empty() {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// If no input quote in attestation cache (isvEnclaveQuote) then return 404
// otherwise check the cache or send the request to actual IAS server
let mut attestation_cache_lock = attestation_cache | // Cache is present, it can be sent
Some(cache_present) => Ok(cache_present.clone()),
// Cache is not presnet, request from IAS and add to cache
None => {
let result = ias_client_obj.post_verify_attestation(
quote.as_bytes(),
Option::from(json_body.pse_manifest.as_str()),
Option::from(json_body.nonce.as_str()),
);
let ias_response_result = ias_response_from_client_response(result);
if ias_response_result.is_ok() {
let ias_response = ias_response_result.clone().unwrap();
// Store the response to the cache
attestation_cache
.lock()
.expect("Error acquiring AVR cache lock")
.set(quote, ias_response);
}
ias_response_result
}
};
match avr {
Ok(avr_content) => {
// AVR is read, send it to the requester
let body = Body::from(avr_content.body_string);
let mut headers = avr_content.header_map;
send_response(StatusCode::OK, Option::from(headers), Option::from(body))
}
Err(error) => {
error!("Error occurred {}", error);
// Unknown error, ideally this case should not occur. Cache must be corrupted or
// IAS returned error.
send_response(
StatusCode::from_u16(UNKNOWN_ERROR_STATUS_CODE)
.expect("Error converting status code"),
None,
None,
)
}
}
}
/// Function to construct ```hyper::Response``` for the supplied input parameters.
/// Accepts http status code and Optional headers, body to be packed in response object.
///
/// return: A ```Box<Future<Item=Response<Body>, Error=hyper::Error> + Send>``` object:
/// Response message to be sent back for the request.
fn send_response(
status_code: StatusCode,
headers: Option<HeaderMap<HeaderValue>>,
body: Option<Body>,
) -> ResponseBox {
// Construct response with empty body, then fill input parameters
let mut response = Response::new(Body::empty());
*response.status_mut() = status_code;
if body.is_some() {
*response.body_mut() = body.unwrap();
};
if headers.is_some() {
*response.headers_mut() = headers.unwrap();
}
Box::new(future::ok(response))
}
/// Function to converts Result<&ClientResponse, ClientError> to Result<IasResponse, ClientError>
/// this is done so to store ClientResponse in LRU cache. ClientResponse cannot be directly
/// stored in cache because it has ```hyper::Body``` which is stream and cannot be cloned.
///
/// return: Result<IasResponse, ClientError>
fn ias_response_from_client_response(
client_response: Result<ClientResponse, ClientError>,
) -> Result<IasResponse, ClientError> {
// Start conversion, need to parse client_resposne
match client_response {
Ok(successful_response) => {
// If there's successful response, then read body to string
let body_string_result = client_utils::read_body_as_string(successful_response.body);
// If reading body as string is successful then construct IasResponse
match body_string_result {
Ok(body_read_successfully) => Ok(IasResponse {
body_string: body_read_successfully,
header_map: successful_response.header_map,
}),
// Conversion of body to string failed
Err(body_read_failed) => Err(body_read_failed),
}
}
// ClientError occurred, there's no valid response to convert
Err(error_response) => Err(error_response),
}
}
/// Function to construct ```IasProxyServer``` object with the input proxy configuration file.
/// 'new()' for ```IasProxyServer``` is private, so use this public method to get instance of it.
///
/// return: A ```IasProxyServer``` object
pub fn get_proxy_server(proxy_config: &IasProxyConfig) -> IasProxyServer {
// Read toml config file as input.
// Conversion to struct would have failed if fields in file doesn't match expectation
// So the config map here has all required values set in it.
IasProxyServer::new(proxy_config)
}
#[cfg(test)]
mod tests {
use self::hyper::header::HeaderName;
use super::*;
#[test]
fn test_get_proxy_server() {
let ias_proxy_config = IasProxyConfig::new(
"127.0.0.1".to_string(),
"8000".to_string(),
"https://dummy-ias-url".to_string(),
"src/tests/dummy_cert.pfx".to_string(),
"".to_string(),
);
// This would also test new function of IasProxyServer
let ias_server = get_proxy_server(&ias_proxy_config);
assert_eq!(ias_server.ias_proxy_ip, "127.0.0.1");
assert_eq!(ias_server.ias_proxy_port, "8000");
}
#[test]
fn test_ias_response_from_client_response() {
let mut header_map = HeaderMap::new();
header_map.insert(
HeaderName::from_static("content-type"),
HeaderValue::from_static("plain/text"),
);
let client_response = Ok(ClientResponse {
body: Body::from("dummy text"),
header_map,
});
let ias_response = ias_response_from_client_response(client_response);
match ias_response {
Ok(expected) => assert_eq!(expected.body_string, "dummy text"),
Err(_unexpected) => assert!(false),
};
}
#[test]
fn test_erraneous_ias_response_from_client_response() {
let client_response = Err(ClientError);
let ias_response = ias_response_from_client_response(client_response);
match ias_response {
Ok(_unexpected) => assert!(false),
Err(_expected) => assert!(true),
};
}
} | .lock()
.expect("Error acquiring AVR cache lock");
let cached_avr = attestation_cache_lock.get("e);
let avr = match cached_avr { | random_line_split |
ias_proxy_server.rs | /*
Copyright 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
*/
extern crate common;
extern crate futures;
extern crate hyper;
extern crate ias_client;
extern crate serde;
extern crate serde_json;
use self::futures::{future, Future};
use self::hyper::{
header::{HeaderMap, HeaderValue},
service::service_fn,
Body, Error, Method, Request, Response, Server, StatusCode,
};
use self::ias_client::{
client_utils,
client_utils::{ClientError, ClientResponse},
ias_client::IasClient,
};
use common::lru_cache::LruCache;
use common::utils::read_binary_file;
use ias_proxy_config::IasProxyConfig;
use std::{borrow::Borrow, net::SocketAddr, str::FromStr, sync::Mutex};
/// type definition for response sent from web server
type ResponseBox = Box<Future<Item = Response<Body>, Error = Error> + Send>;
/// structure defining IAS proxy server
pub struct IasProxyServer {
ias_proxy_ip: String,
ias_proxy_port: String,
ias_client: Box<IasClient>,
}
/// Request body from client, proxy server may deserialize the reuqest in order to get quote if
/// request is for attestation verification report.
#[derive(Deserialize)]
struct IasAVRRequestBody {
#[serde(rename = "isvEnclaveQuote")]
isv_enclave_quote: String,
#[serde(rename = "pseManifest")]
pse_manifest: String,
nonce: String,
}
/// ClientResponse decoded information stored in cache
#[derive(Debug, Clone)]
struct IasResponse {
body_string: String,
header_map: HeaderMap,
}
lazy_static! {
static ref sig_rl_cache: Mutex<LruCache<String, IasResponse>> = Mutex::new(LruCache::new(None));
static ref attestation_cache: Mutex<LruCache<String, IasResponse>> =
Mutex::new(LruCache::new(None));
}
const SIG_RL_LINK: &str = "/attestation/sgx/v2/sigrl";
const AVR_LINK: &str = "/attestation/sgx/v2/report";
const IP_PORT_DELIMITER: &str = ":";
const UNKNOWN_ERROR_STATUS_CODE: u16 = 520;
impl IasProxyServer {
/// Create new instance of IasProxyServer
fn new(config: &IasProxyConfig) -> Self {
IasProxyServer {
ias_proxy_ip: config.get_proxy_ip(),
ias_proxy_port: config.get_proxy_port(),
// Construct new IasClient with input config parameters
ias_client: Box::new(IasClient::new(
config.get_ias_url(),
read_binary_file(config.get_spid_cert_file().as_str()),
config.get_password(),
None,
)),
}
}
/// run method to start listening on the identified IP and port
pub fn run(&self) {
// Start the web server on the configured URL
let mut path = String::new();
path.push_str(self.ias_proxy_ip.as_str());
path.push_str(IP_PORT_DELIMITER);
path.push_str(self.ias_proxy_port.as_str());
info!("Proxy server will be started as {}", path);
// Construct socket address, panics if binding fails
let socket_addr: SocketAddr = match SocketAddr::from_str(&path) {
Ok(address_bind_successful) => address_bind_successful,
Err(err) => panic!("Error binding the address: {}", err),
};
info!("Socket binding successful");
// ias_client's lifetime must be static for not to clone
let ias_client = self.ias_client.clone();
// TODO: Store this server instance and call shutdown
let new_service = move || {
let ias_client = ias_client.clone();
// service_fn() creates a hyper's Service. It accepts a closure for handling the
// request, future response is constructed when request is served.
service_fn(move |req| respond_to_request(req, ias_client.borrow()))
};
// Run proxy server in current thread, serve or panic
hyper::rt::run(Server::bind(&socket_addr).serve(new_service).map_err(|e| {
panic!("Server error: {}", e);
}))
}
/// Stop listening on the port
#[allow(dead_code)]
pub fn stop(&self) {
// TODO: Need to stop the server started and clear the cache
unimplemented!()
}
}
/// Function to construct response by parsing request from IasClient. Accepts the request
/// parameter and reference to IasClient object. First checks if cached content has the response
/// corresponding to the request, if not present go and request IAS, get response, store in
/// cache, construct response back.
///
/// return: A ```Box<Future<Item=Response<Body>, Error=hyper::Error> + Send>``` object:
/// Response message to be sent back for the request.
fn respond_to_request(req: Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get response parsing request parameters
match *req.method() {
Method::GET => handle_get_request(&req, ias_client_obj),
Method::POST => handle_post_request(req, ias_client_obj),
// Proxy server doesn't support any other request types other than GET and POST.
_ => send_response(StatusCode::NOT_FOUND, None, None),
}
}
/// Handle get request from the proxy, this should only be valid for getting signature revocation
/// list. Proxy server doesn't support other GET requests. See ```response_to_request()``` for
/// detailed description.
fn handle_get_request(req: &Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get path from request
let path = req.uri().path().to_owned();
if!path.contains(SIG_RL_LINK) {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// Search cache for the signature revocation list
let mut sig_rl_cache_lock = sig_rl_cache
.lock()
.expect("Error acquiring SigRL cache lock");
let cached = sig_rl_cache_lock.get(&path);
// If there's cache, send it as response, otherwise request from IAS
let response_to_send = match cached {
Some(cached_revocation_list) => Ok(cached_revocation_list.clone()),
None => {
// Request has gid in URI path, we do not need to send gid explicit
let result = ias_client_obj.get_signature_revocation_list(None, Some(path.as_str()));
let ias_response_result = ias_response_from_client_response(result);
if ias_response_result.is_ok() {
let ias_response = ias_response_result.clone().unwrap();
sig_rl_cache
.lock()
.expect("Error acquiring SigRL cache lock")
.set(path, ias_response);
}
ias_response_result
}
};
match response_to_send {
Ok(ias_response) => {
// Send the response to requester
let mut headers = ias_response.header_map;
let body = Body::from(ias_response.body_string);
send_response(StatusCode::OK, Option::from(headers), Option::from(body))
}
Err(error) => {
error!("Error occurred {}", error);
// Unknown error, ideally this case should not occur. Cache must be corrupted or
// IAS returned error.
send_response(
StatusCode::from_u16(UNKNOWN_ERROR_STATUS_CODE)
.expect("Error converting status code"),
None,
None,
)
}
}
}
/// Handle post request from the proxy, this should only be valid for getting attestation
/// verification report. Proxy server doesn't support other POST requests. See
/// ```response_to_request()``` for detailed description.
fn handle_post_request(req: Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get path from request
let path = req.uri().path().to_owned();
if!path.contains(AVR_LINK) {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// read json input data
let read_body_result = client_utils::read_body_as_string(req.into_body());
if read_body_result.is_err() {
return send_response(StatusCode::INTERNAL_SERVER_ERROR, None, None);
}
let read_body = read_body_result.unwrap();
let json_body: IasAVRRequestBody = serde_json::from_str(read_body.as_str())
.expect("Error deserializing IAS verification report");
let quote = json_body.isv_enclave_quote;
if quote.is_empty() {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// If no input quote in attestation cache (isvEnclaveQuote) then return 404
// otherwise check the cache or send the request to actual IAS server
let mut attestation_cache_lock = attestation_cache
.lock()
.expect("Error acquiring AVR cache lock");
let cached_avr = attestation_cache_lock.get("e);
let avr = match cached_avr {
// Cache is present, it can be sent
Some(cache_present) => Ok(cache_present.clone()),
// Cache is not presnet, request from IAS and add to cache
None => {
let result = ias_client_obj.post_verify_attestation(
quote.as_bytes(),
Option::from(json_body.pse_manifest.as_str()),
Option::from(json_body.nonce.as_str()),
);
let ias_response_result = ias_response_from_client_response(result);
if ias_response_result.is_ok() {
let ias_response = ias_response_result.clone().unwrap();
// Store the response to the cache
attestation_cache
.lock()
.expect("Error acquiring AVR cache lock")
.set(quote, ias_response);
}
ias_response_result
}
};
match avr {
Ok(avr_content) => {
// AVR is read, send it to the requester
let body = Body::from(avr_content.body_string);
let mut headers = avr_content.header_map;
send_response(StatusCode::OK, Option::from(headers), Option::from(body))
}
Err(error) => {
error!("Error occurred {}", error);
// Unknown error, ideally this case should not occur. Cache must be corrupted or
// IAS returned error.
send_response(
StatusCode::from_u16(UNKNOWN_ERROR_STATUS_CODE)
.expect("Error converting status code"),
None,
None,
)
}
}
}
/// Function to construct ```hyper::Response``` for the supplied input parameters.
/// Accepts http status code and Optional headers, body to be packed in response object.
///
/// return: A ```Box<Future<Item=Response<Body>, Error=hyper::Error> + Send>``` object:
/// Response message to be sent back for the request.
fn send_response(
status_code: StatusCode,
headers: Option<HeaderMap<HeaderValue>>,
body: Option<Body>,
) -> ResponseBox {
// Construct response with empty body, then fill input parameters
let mut response = Response::new(Body::empty());
*response.status_mut() = status_code;
if body.is_some() {
*response.body_mut() = body.unwrap();
};
if headers.is_some() {
*response.headers_mut() = headers.unwrap();
}
Box::new(future::ok(response))
}
/// Function to converts Result<&ClientResponse, ClientError> to Result<IasResponse, ClientError>
/// this is done so to store ClientResponse in LRU cache. ClientResponse cannot be directly
/// stored in cache because it has ```hyper::Body``` which is stream and cannot be cloned.
///
/// return: Result<IasResponse, ClientError>
fn ias_response_from_client_response(
client_response: Result<ClientResponse, ClientError>,
) -> Result<IasResponse, ClientError> {
// Start conversion, need to parse client_resposne
match client_response {
Ok(successful_response) => {
// If there's successful response, then read body to string
let body_string_result = client_utils::read_body_as_string(successful_response.body);
// If reading body as string is successful then construct IasResponse
match body_string_result {
Ok(body_read_successfully) => Ok(IasResponse {
body_string: body_read_successfully,
header_map: successful_response.header_map,
}),
// Conversion of body to string failed
Err(body_read_failed) => Err(body_read_failed),
}
}
// ClientError occurred, there's no valid response to convert
Err(error_response) => Err(error_response),
}
}
/// Function to construct ```IasProxyServer``` object with the input proxy configuration file.
/// 'new()' for ```IasProxyServer``` is private, so use this public method to get instance of it.
///
/// return: A ```IasProxyServer``` object
pub fn get_proxy_server(proxy_config: &IasProxyConfig) -> IasProxyServer {
// Read toml config file as input.
// Conversion to struct would have failed if fields in file doesn't match expectation
// So the config map here has all required values set in it.
IasProxyServer::new(proxy_config)
}
#[cfg(test)]
mod tests {
use self::hyper::header::HeaderName;
use super::*;
#[test]
fn test_get_proxy_server() {
let ias_proxy_config = IasProxyConfig::new(
"127.0.0.1".to_string(),
"8000".to_string(),
"https://dummy-ias-url".to_string(),
"src/tests/dummy_cert.pfx".to_string(),
"".to_string(),
);
// This would also test new function of IasProxyServer
let ias_server = get_proxy_server(&ias_proxy_config);
assert_eq!(ias_server.ias_proxy_ip, "127.0.0.1");
assert_eq!(ias_server.ias_proxy_port, "8000");
}
#[test]
fn test_ias_response_from_client_response() {
let mut header_map = HeaderMap::new();
header_map.insert(
HeaderName::from_static("content-type"),
HeaderValue::from_static("plain/text"),
);
let client_response = Ok(ClientResponse {
body: Body::from("dummy text"),
header_map,
});
let ias_response = ias_response_from_client_response(client_response);
match ias_response {
Ok(expected) => assert_eq!(expected.body_string, "dummy text"),
Err(_unexpected) => assert!(false),
};
}
#[test]
fn | () {
let client_response = Err(ClientError);
let ias_response = ias_response_from_client_response(client_response);
match ias_response {
Ok(_unexpected) => assert!(false),
Err(_expected) => assert!(true),
};
}
}
| test_erraneous_ias_response_from_client_response | identifier_name |
ias_proxy_server.rs | /*
Copyright 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
*/
extern crate common;
extern crate futures;
extern crate hyper;
extern crate ias_client;
extern crate serde;
extern crate serde_json;
use self::futures::{future, Future};
use self::hyper::{
header::{HeaderMap, HeaderValue},
service::service_fn,
Body, Error, Method, Request, Response, Server, StatusCode,
};
use self::ias_client::{
client_utils,
client_utils::{ClientError, ClientResponse},
ias_client::IasClient,
};
use common::lru_cache::LruCache;
use common::utils::read_binary_file;
use ias_proxy_config::IasProxyConfig;
use std::{borrow::Borrow, net::SocketAddr, str::FromStr, sync::Mutex};
/// type definition for response sent from web server
type ResponseBox = Box<Future<Item = Response<Body>, Error = Error> + Send>;
/// structure defining IAS proxy server
pub struct IasProxyServer {
ias_proxy_ip: String,
ias_proxy_port: String,
ias_client: Box<IasClient>,
}
/// Request body from client, proxy server may deserialize the reuqest in order to get quote if
/// request is for attestation verification report.
#[derive(Deserialize)]
struct IasAVRRequestBody {
#[serde(rename = "isvEnclaveQuote")]
isv_enclave_quote: String,
#[serde(rename = "pseManifest")]
pse_manifest: String,
nonce: String,
}
/// ClientResponse decoded information stored in cache
#[derive(Debug, Clone)]
struct IasResponse {
body_string: String,
header_map: HeaderMap,
}
lazy_static! {
static ref sig_rl_cache: Mutex<LruCache<String, IasResponse>> = Mutex::new(LruCache::new(None));
static ref attestation_cache: Mutex<LruCache<String, IasResponse>> =
Mutex::new(LruCache::new(None));
}
const SIG_RL_LINK: &str = "/attestation/sgx/v2/sigrl";
const AVR_LINK: &str = "/attestation/sgx/v2/report";
const IP_PORT_DELIMITER: &str = ":";
const UNKNOWN_ERROR_STATUS_CODE: u16 = 520;
impl IasProxyServer {
/// Create new instance of IasProxyServer
fn new(config: &IasProxyConfig) -> Self {
IasProxyServer {
ias_proxy_ip: config.get_proxy_ip(),
ias_proxy_port: config.get_proxy_port(),
// Construct new IasClient with input config parameters
ias_client: Box::new(IasClient::new(
config.get_ias_url(),
read_binary_file(config.get_spid_cert_file().as_str()),
config.get_password(),
None,
)),
}
}
/// run method to start listening on the identified IP and port
pub fn run(&self) {
// Start the web server on the configured URL
let mut path = String::new();
path.push_str(self.ias_proxy_ip.as_str());
path.push_str(IP_PORT_DELIMITER);
path.push_str(self.ias_proxy_port.as_str());
info!("Proxy server will be started as {}", path);
// Construct socket address, panics if binding fails
let socket_addr: SocketAddr = match SocketAddr::from_str(&path) {
Ok(address_bind_successful) => address_bind_successful,
Err(err) => panic!("Error binding the address: {}", err),
};
info!("Socket binding successful");
// ias_client's lifetime must be static for not to clone
let ias_client = self.ias_client.clone();
// TODO: Store this server instance and call shutdown
let new_service = move || {
let ias_client = ias_client.clone();
// service_fn() creates a hyper's Service. It accepts a closure for handling the
// request, future response is constructed when request is served.
service_fn(move |req| respond_to_request(req, ias_client.borrow()))
};
// Run proxy server in current thread, serve or panic
hyper::rt::run(Server::bind(&socket_addr).serve(new_service).map_err(|e| {
panic!("Server error: {}", e);
}))
}
/// Stop listening on the port
#[allow(dead_code)]
pub fn stop(&self) {
// TODO: Need to stop the server started and clear the cache
unimplemented!()
}
}
/// Function to construct response by parsing request from IasClient. Accepts the request
/// parameter and reference to IasClient object. First checks if cached content has the response
/// corresponding to the request, if not present go and request IAS, get response, store in
/// cache, construct response back.
///
/// return: A ```Box<Future<Item=Response<Body>, Error=hyper::Error> + Send>``` object:
/// Response message to be sent back for the request.
fn respond_to_request(req: Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get response parsing request parameters
match *req.method() {
Method::GET => handle_get_request(&req, ias_client_obj),
Method::POST => handle_post_request(req, ias_client_obj),
// Proxy server doesn't support any other request types other than GET and POST.
_ => send_response(StatusCode::NOT_FOUND, None, None),
}
}
/// Handle get request from the proxy, this should only be valid for getting signature revocation
/// list. Proxy server doesn't support other GET requests. See ```response_to_request()``` for
/// detailed description.
fn handle_get_request(req: &Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get path from request
let path = req.uri().path().to_owned();
if!path.contains(SIG_RL_LINK) {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// Search cache for the signature revocation list
let mut sig_rl_cache_lock = sig_rl_cache
.lock()
.expect("Error acquiring SigRL cache lock");
let cached = sig_rl_cache_lock.get(&path);
// If there's cache, send it as response, otherwise request from IAS
let response_to_send = match cached {
Some(cached_revocation_list) => Ok(cached_revocation_list.clone()),
None => {
// Request has gid in URI path, we do not need to send gid explicit
let result = ias_client_obj.get_signature_revocation_list(None, Some(path.as_str()));
let ias_response_result = ias_response_from_client_response(result);
if ias_response_result.is_ok() {
let ias_response = ias_response_result.clone().unwrap();
sig_rl_cache
.lock()
.expect("Error acquiring SigRL cache lock")
.set(path, ias_response);
}
ias_response_result
}
};
match response_to_send {
Ok(ias_response) => {
// Send the response to requester
let mut headers = ias_response.header_map;
let body = Body::from(ias_response.body_string);
send_response(StatusCode::OK, Option::from(headers), Option::from(body))
}
Err(error) => {
error!("Error occurred {}", error);
// Unknown error, ideally this case should not occur. Cache must be corrupted or
// IAS returned error.
send_response(
StatusCode::from_u16(UNKNOWN_ERROR_STATUS_CODE)
.expect("Error converting status code"),
None,
None,
)
}
}
}
/// Handle post request from the proxy, this should only be valid for getting attestation
/// verification report. Proxy server doesn't support other POST requests. See
/// ```response_to_request()``` for detailed description.
fn handle_post_request(req: Request<Body>, ias_client_obj: &IasClient) -> ResponseBox {
// Get path from request
let path = req.uri().path().to_owned();
if!path.contains(AVR_LINK) {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// read json input data
let read_body_result = client_utils::read_body_as_string(req.into_body());
if read_body_result.is_err() {
return send_response(StatusCode::INTERNAL_SERVER_ERROR, None, None);
}
let read_body = read_body_result.unwrap();
let json_body: IasAVRRequestBody = serde_json::from_str(read_body.as_str())
.expect("Error deserializing IAS verification report");
let quote = json_body.isv_enclave_quote;
if quote.is_empty() {
return send_response(StatusCode::NOT_FOUND, None, None);
}
// If no input quote in attestation cache (isvEnclaveQuote) then return 404
// otherwise check the cache or send the request to actual IAS server
let mut attestation_cache_lock = attestation_cache
.lock()
.expect("Error acquiring AVR cache lock");
let cached_avr = attestation_cache_lock.get("e);
let avr = match cached_avr {
// Cache is present, it can be sent
Some(cache_present) => Ok(cache_present.clone()),
// Cache is not presnet, request from IAS and add to cache
None => {
let result = ias_client_obj.post_verify_attestation(
quote.as_bytes(),
Option::from(json_body.pse_manifest.as_str()),
Option::from(json_body.nonce.as_str()),
);
let ias_response_result = ias_response_from_client_response(result);
if ias_response_result.is_ok() {
let ias_response = ias_response_result.clone().unwrap();
// Store the response to the cache
attestation_cache
.lock()
.expect("Error acquiring AVR cache lock")
.set(quote, ias_response);
}
ias_response_result
}
};
match avr {
Ok(avr_content) => {
// AVR is read, send it to the requester
let body = Body::from(avr_content.body_string);
let mut headers = avr_content.header_map;
send_response(StatusCode::OK, Option::from(headers), Option::from(body))
}
Err(error) => {
error!("Error occurred {}", error);
// Unknown error, ideally this case should not occur. Cache must be corrupted or
// IAS returned error.
send_response(
StatusCode::from_u16(UNKNOWN_ERROR_STATUS_CODE)
.expect("Error converting status code"),
None,
None,
)
}
}
}
/// Function to construct ```hyper::Response``` for the supplied input parameters.
/// Accepts http status code and Optional headers, body to be packed in response object.
///
/// return: A ```Box<Future<Item=Response<Body>, Error=hyper::Error> + Send>``` object:
/// Response message to be sent back for the request.
fn send_response(
status_code: StatusCode,
headers: Option<HeaderMap<HeaderValue>>,
body: Option<Body>,
) -> ResponseBox {
// Construct response with empty body, then fill input parameters
let mut response = Response::new(Body::empty());
*response.status_mut() = status_code;
if body.is_some() {
*response.body_mut() = body.unwrap();
};
if headers.is_some() {
*response.headers_mut() = headers.unwrap();
}
Box::new(future::ok(response))
}
/// Function to converts Result<&ClientResponse, ClientError> to Result<IasResponse, ClientError>
/// this is done so to store ClientResponse in LRU cache. ClientResponse cannot be directly
/// stored in cache because it has ```hyper::Body``` which is stream and cannot be cloned.
///
/// return: Result<IasResponse, ClientError>
fn ias_response_from_client_response(
client_response: Result<ClientResponse, ClientError>,
) -> Result<IasResponse, ClientError> | Err(error_response) => Err(error_response),
}
}
/// Function to construct ```IasProxyServer``` object with the input proxy configuration file.
/// 'new()' for ```IasProxyServer``` is private, so use this public method to get instance of it.
///
/// return: A ```IasProxyServer``` object
pub fn get_proxy_server(proxy_config: &IasProxyConfig) -> IasProxyServer {
// Read toml config file as input.
// Conversion to struct would have failed if fields in file doesn't match expectation
// So the config map here has all required values set in it.
IasProxyServer::new(proxy_config)
}
#[cfg(test)]
mod tests {
use self::hyper::header::HeaderName;
use super::*;
#[test]
fn test_get_proxy_server() {
let ias_proxy_config = IasProxyConfig::new(
"127.0.0.1".to_string(),
"8000".to_string(),
"https://dummy-ias-url".to_string(),
"src/tests/dummy_cert.pfx".to_string(),
"".to_string(),
);
// This would also test new function of IasProxyServer
let ias_server = get_proxy_server(&ias_proxy_config);
assert_eq!(ias_server.ias_proxy_ip, "127.0.0.1");
assert_eq!(ias_server.ias_proxy_port, "8000");
}
#[test]
fn test_ias_response_from_client_response() {
let mut header_map = HeaderMap::new();
header_map.insert(
HeaderName::from_static("content-type"),
HeaderValue::from_static("plain/text"),
);
let client_response = Ok(ClientResponse {
body: Body::from("dummy text"),
header_map,
});
let ias_response = ias_response_from_client_response(client_response);
match ias_response {
Ok(expected) => assert_eq!(expected.body_string, "dummy text"),
Err(_unexpected) => assert!(false),
};
}
#[test]
fn test_erraneous_ias_response_from_client_response() {
let client_response = Err(ClientError);
let ias_response = ias_response_from_client_response(client_response);
match ias_response {
Ok(_unexpected) => assert!(false),
Err(_expected) => assert!(true),
};
}
}
| {
// Start conversion, need to parse client_resposne
match client_response {
Ok(successful_response) => {
// If there's successful response, then read body to string
let body_string_result = client_utils::read_body_as_string(successful_response.body);
// If reading body as string is successful then construct IasResponse
match body_string_result {
Ok(body_read_successfully) => Ok(IasResponse {
body_string: body_read_successfully,
header_map: successful_response.header_map,
}),
// Conversion of body to string failed
Err(body_read_failed) => Err(body_read_failed),
}
}
// ClientError occurred, there's no valid response to convert | identifier_body |
mod.rs | use crate::graph::Graph;
use log::debug;
use std::collections::HashMap;
use std::fmt;
use ndarray::Array2;
use thiserror::Error;
mod builder;
mod word;
mod cell;
mod add_word;
mod random;
mod spacing;
mod properties;
mod pdf_conversion;
mod matrix;
mod merge;
mod validity;
use word::Word;
use cell::Cell;
pub use builder::CrosswordGridBuilder;
pub use pdf_conversion::CrosswordPrinter;
static VALID_ANSWERCHARS: &str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
static VALID_CLUECHARS: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_β -;:,.?!@'ββ`ββ\"&*()$Β£%";
#[derive(Error,Debug,PartialEq)]
pub enum CellError {
#[error("Attempted to add word {0} to cell in direction {2:?} but cell already has id {1}")]
WordIdMismatch(usize, usize, Direction),
#[error("Attempted to add letter {0} to cell, but cell already has letter {1}")]
LetterMismatch(char, char), | FillBlack,
}
#[derive(Error,Debug,PartialEq)]
pub enum CrosswordError {
#[error("Adjacent cells {0:?} {1:?} incompatible - no word found that links them.")]
AdjacentCellsNoLinkWord(Location, Location),
#[error("Adjacent cells {0:?} {1:?} incompatible - should have a shared word which links them, but the words don't match: {2} {3}")]
AdjacentCellsMismatchedLinkWord(Location, Location, usize, usize),
#[error("Error updating cell at location {0:?}")]
CellError(Location, CellError),
#[error("Cell {0:?} at start/end of word not empty. Last/first cell in word is {1:?}")]
NonEmptyWordBoundary(Location, Location),
#[error("Cell not found in grid {0:?}")]
CellNotFound(Location),
#[error("Word {1} with id {0} already placed at {2:?}")]
WordAlreadyPlaced(usize, String, Location),
#[error("Attempted to place word {1} with id {0} with invalid direction {2:?}")]
InvalidWordDirection(usize, String, Direction),
#[error("Word not found in grid object {0}")]
WordNotFound(usize),
}
#[derive(Clone,Copy,Debug,PartialEq,Eq,Ord,PartialOrd,Hash)]
pub enum Direction {
Across,
Down,
}
impl Direction {
fn rotate(&self) -> Self {
match self {
Direction::Across => Direction::Down,
Direction::Down => Direction::Across,
}
}
}
#[derive(Clone,Copy,Eq,Hash)]
pub struct Location(pub isize, pub isize);
impl fmt::Debug for Location {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Location({}, {})", self.0, self.1)
}
}
impl PartialEq for Location {
fn eq(&self, other: &Location) -> bool {
self.0 == other.0 && self.1 == other.1
}
}
impl Location {
fn relative_location(&self, move_across: isize, move_down: isize) -> Location {
Location(self.0 + move_across, self.1 + move_down)
}
fn relative_location_directed(&self, move_size: isize, direction: Direction) -> Location {
match direction {
Direction::Across => Location(self.0, self.1 + move_size),
Direction::Down => Location(self.0 + move_size, self.1),
}
}
}
#[derive(Clone)]
pub struct CrosswordGrid {
cell_map: HashMap<Location, Cell>,
word_map: HashMap<usize, Word>,
top_left_cell_index: Location,
bottom_right_cell_index: Location,
}
impl fmt::Debug for CrosswordGrid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut words: Vec<(&usize, &Word)> = self.word_map.iter().collect();
words.sort_by_key(|a| *a.0);
let word_strs: Vec<String> = words.iter().map(|x| format!("{:?}: {:?}", x.0, x.1)).collect();
let mut cells: Vec<(&Location, &Cell)> = self.cell_map.iter().collect();
cells.sort_by_key(|a| (a.0.0, a.0.1));
let cell_strs: Vec<String> = cells.iter().map(|x| format!("{:?}: {:?}", x.0, x.1)).collect();
write!(f, "CrosswordGrid(\nIndices: Top left {:?} Bottom right {:?}\nWords:{:#?}\nCells:{:#?}\n))",
self.top_left_cell_index, self.bottom_right_cell_index, word_strs, cell_strs)
}
}
impl CrosswordGrid {
fn get_word(&self, word_id: usize) -> Result<&Word, CrosswordError> {
match self.word_map.get(&word_id) {
Some(word) => Ok(word),
None => Err(CrosswordError::WordNotFound(word_id)),
}
}
fn get_cell(&self, location: &Location) -> Result<&Cell, CrosswordError> {
match self.cell_map.get(location) {
Some(cell) => Ok(cell),
None => Err(CrosswordError::CellNotFound(*location)),
}
}
fn get_cell_mut(&mut self, location: &Location) -> Result<&mut Cell, CrosswordError> {
match self.cell_map.get_mut(location) {
Some(cell) => Ok(cell),
None => Err(CrosswordError::CellNotFound(*location)),
}
}
pub fn new_single_word(word: &str) -> Self {
let mut builder = builder::CrosswordGridBuilder::new();
builder.from_string(word)
}
fn new_from_wordmap_single_placed(word_id: usize,
direction: Direction,
mut word_map: HashMap<usize, Word>) -> Self {
let mut location = Location(0, 0);
let across_id: Option<usize>;
let down_id: Option<usize>;
let mut cell_map: HashMap<Location, Cell> = HashMap::new();
match direction {
Direction::Across => {
across_id = Some(word_id);
down_id = None;
},
Direction::Down => {
across_id = None;
down_id = Some(word_id);
},
};
let word = word_map.get_mut(&word_id).unwrap();
word.update_location(location, direction);
for c in word.word_text.chars() {
cell_map.insert(location, Cell::new(c, across_id, down_id));
location = location.relative_location_directed(1, direction);
}
let mut grid = CrosswordGrid {
cell_map,
word_map,
top_left_cell_index: Location(0, 0),
bottom_right_cell_index: location.relative_location_directed(-1, direction),
};
grid.fit_to_size();
grid
}
fn get_all_intersections(&self) -> Vec<(usize, usize)> {
let mut edges: Vec<(usize, usize)> = vec![];
for cell in self.cell_map.values().filter(|c| c.is_intersection()) {
edges.push((cell.get_across_word_id().unwrap(),
cell.get_down_word_id().unwrap()));
}
edges.sort();
debug!("All intersections found {:#?}", edges);
edges
}
pub fn to_graph_adjacency_matrix(&self) -> Array2<u8> {
let edges = self.get_all_intersections();
let mut word_ids: Vec<usize> = self.word_map.keys().cloned().collect();
word_ids.sort();
let max_word_id = word_ids[word_ids.len() - 1] + 1;
let mut adjacency: Array2<u8> = Array2::zeros((max_word_id, max_word_id));
for (word1, word2) in edges.iter() {
adjacency[[*word1, *word2]] += 1;
adjacency[[*word2, *word1]] += 1;
}
adjacency
}
pub fn to_graph(&self) -> Graph {
let edges = self.get_all_intersections();
let mut graph = Graph::new_from_edges(edges);
for (word_id, _word) in self.word_map.iter().filter(|(_id, w)| w.is_placed()) {
graph.add_node(*word_id);
}
graph
}
pub fn to_string_with_coords(&self) -> String {
let mut string: String = String::from("");
let coord_string: String = format!("Top-left: ({}, {}), bottom-right: ({}, {})\n",
self.top_left_cell_index.0 + 1,
self.top_left_cell_index.1 + 1,
self.bottom_right_cell_index.0 - 1,
self.bottom_right_cell_index.1 - 1);
string.push_str(&coord_string);
string.push_str(&self.to_string());
string
}
pub fn to_string(&self) -> String {
let mut string: String = String::from("");
let mut row = self.top_left_cell_index.0 + 1;
let mut col = self.top_left_cell_index.1 + 1;
while row < self.bottom_right_cell_index.0 {
while col < self.bottom_right_cell_index.1 {
let c = self.cell_map.get(&Location(row, col)).unwrap().to_char();
string.push(c);
col += 1;
}
col = self.top_left_cell_index.1 + 1;
row += 1;
string.push('\n');
}
string
}
pub fn check_valid(&self) {
assert!(self.top_left_cell_index.0 <= self.bottom_right_cell_index.0);
assert!(self.top_left_cell_index.1 <= self.bottom_right_cell_index.1);
let mut row = self.top_left_cell_index.0;
let mut col = self.top_left_cell_index.1;
while row <= self.bottom_right_cell_index.0 {
while col <= self.bottom_right_cell_index.1 {
let present = self.cell_map.contains_key(&Location(row, col));
if!present {
panic!("Cell not present in grid {}, {}", row, col);
}
col += 1;
}
col = self.top_left_cell_index.1;
row += 1;
}
for cell in self.cell_map.values() {
if let Some(word_id) = cell.get_across_word_id() {
assert!(self.word_map.contains_key(&word_id));
}
if let Some(word_id) = cell.get_down_word_id() {
assert!(self.word_map.contains_key(&word_id));
}
}
let graph = self.to_graph();
debug!("{:#?}", graph);
debug!("{:#?}", self.word_map);
debug!("Checking grid connected {}", self.to_string());
assert!(graph.is_connected());
}
fn find_lowest_unused_word_id(&self) -> usize {
let mut word_id: usize = 0;
while self.word_map.contains_key(&word_id) {
word_id += 1;
}
word_id
}
pub fn add_unplaced_word_at_id(&mut self, word_text: &str, clue: &str, word_id: usize, required_direction: Option<Direction>) {
let word = Word::new_unplaced(word_text, clue, required_direction);
self.word_map.insert(word_id, word);
}
pub fn add_unplaced_word(&mut self, word_text: &str, clue: &str, required_direction: Option<Direction>) -> usize {
let word_id = self.find_lowest_unused_word_id();
self.add_unplaced_word_at_id(word_text, clue, word_id, required_direction);
word_id
}
pub fn update_word_id(&mut self, old_word_id: usize, new_word_id: usize) {
// Move in hashmap
let word: Word = self.word_map.remove(&old_word_id).unwrap();
self.word_map.insert(new_word_id, word);
for (_location, cell) in self.cell_map.iter_mut() {
cell.update_word_id(old_word_id, new_word_id);
}
}
pub fn delete_word(&mut self, word_id:usize) {
self.unplace_word(word_id);
self.word_map.remove(&word_id);
}
pub fn unplace_word(&mut self, word_id: usize) {
for (_location, cell) in self.cell_map.iter_mut() {
cell.remove_word(word_id);
}
if let Some(word) = self.word_map.get_mut(&word_id) {
word.remove_placement();
}
self.fit_to_size();
debug!("Now have {} words in grid", self.word_map.len());
}
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::array;
#[test]
fn test_adjacency() -> Result<(), CrosswordError> {
crate::logging::init_logger(true);
let mut grid = CrosswordGrid::new_single_word("ALPHA");
let arrival_word_id = grid.add_unplaced_word("ARRIVAL", "", None);
let bear_word_id = grid.add_unplaced_word("BEARER", "", None);
let innards_word_id = grid.add_unplaced_word("INNARDS", "", None);
let cup_word_id = grid.add_unplaced_word("CUP", "", None);
grid.add_unplaced_word("CAP", "", None);
grid.check_valid();
debug!("{:#?}", grid);
grid.place_word_in_cell(Location(0, 0), arrival_word_id, 0, Direction::Down)?;
grid.place_word_in_cell(Location(0, 4), bear_word_id, 2, Direction::Down)?;
grid.place_word_in_cell(Location(0, 2), cup_word_id, 2, Direction::Down)?;
grid.place_word_in_cell(Location(3, 0), innards_word_id, 0, Direction::Across)?;
debug!("{:#?}", grid);
grid.check_valid();
let adjacency = grid.to_graph_adjacency_matrix();
assert_eq!(adjacency, array![[0, 1, 1, 0, 1, 0],
[1, 0, 0, 1, 0, 0],
[1, 0, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]);
Ok(())
}
} |
#[error("Attempted to fill a cell already marked as black")] | random_line_split |
mod.rs | use crate::graph::Graph;
use log::debug;
use std::collections::HashMap;
use std::fmt;
use ndarray::Array2;
use thiserror::Error;
mod builder;
mod word;
mod cell;
mod add_word;
mod random;
mod spacing;
mod properties;
mod pdf_conversion;
mod matrix;
mod merge;
mod validity;
use word::Word;
use cell::Cell;
pub use builder::CrosswordGridBuilder;
pub use pdf_conversion::CrosswordPrinter;
static VALID_ANSWERCHARS: &str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
static VALID_CLUECHARS: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_β -;:,.?!@'ββ`ββ\"&*()$Β£%";
#[derive(Error,Debug,PartialEq)]
pub enum CellError {
#[error("Attempted to add word {0} to cell in direction {2:?} but cell already has id {1}")]
WordIdMismatch(usize, usize, Direction),
#[error("Attempted to add letter {0} to cell, but cell already has letter {1}")]
LetterMismatch(char, char),
#[error("Attempted to fill a cell already marked as black")]
FillBlack,
}
#[derive(Error,Debug,PartialEq)]
pub enum CrosswordError {
#[error("Adjacent cells {0:?} {1:?} incompatible - no word found that links them.")]
AdjacentCellsNoLinkWord(Location, Location),
#[error("Adjacent cells {0:?} {1:?} incompatible - should have a shared word which links them, but the words don't match: {2} {3}")]
AdjacentCellsMismatchedLinkWord(Location, Location, usize, usize),
#[error("Error updating cell at location {0:?}")]
CellError(Location, CellError),
#[error("Cell {0:?} at start/end of word not empty. Last/first cell in word is {1:?}")]
NonEmptyWordBoundary(Location, Location),
#[error("Cell not found in grid {0:?}")]
CellNotFound(Location),
#[error("Word {1} with id {0} already placed at {2:?}")]
WordAlreadyPlaced(usize, String, Location),
#[error("Attempted to place word {1} with id {0} with invalid direction {2:?}")]
InvalidWordDirection(usize, String, Direction),
#[error("Word not found in grid object {0}")]
WordNotFound(usize),
}
#[derive(Clone,Copy,Debug,PartialEq,Eq,Ord,PartialOrd,Hash)]
pub enum Direction {
Across,
Down,
}
impl Direction {
fn rotate(&self) -> Self {
match self {
Direction::Across => Direction::Down,
Direction::Down => Direction::Across,
}
}
}
#[derive(Clone,Copy,Eq,Hash)]
pub struct Location(pub isize, pub isize);
impl fmt::Debug for Location {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Location({}, {})", self.0, self.1)
}
}
impl PartialEq for Location {
fn eq(&self, other: &Location) -> bool {
self.0 == other.0 && self.1 == other.1
}
}
impl Location {
fn relative_location(&self, move_across: isize, move_down: isize) -> Location {
Location(self.0 + move_across, self.1 + move_down)
}
fn relative_location_directed(&self, move_size: isize, direction: Direction) -> Location {
match direction {
Direction::Across => Location(self.0, self.1 + move_size),
Direction::Down => Location(self.0 + move_size, self.1),
}
}
}
#[derive(Clone)]
pub struct CrosswordGrid {
cell_map: HashMap<Location, Cell>,
word_map: HashMap<usize, Word>,
top_left_cell_index: Location,
bottom_right_cell_index: Location,
}
impl fmt::Debug for CrosswordGrid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut words: Vec<(&usize, &Word)> = self.word_map.iter().collect();
words.sort_by_key(|a| *a.0);
let word_strs: Vec<String> = words.iter().map(|x| format!("{:?}: {:?}", x.0, x.1)).collect();
let mut cells: Vec<(&Location, &Cell)> = self.cell_map.iter().collect();
cells.sort_by_key(|a| (a.0.0, a.0.1));
let cell_strs: Vec<String> = cells.iter().map(|x| format!("{:?}: {:?}", x.0, x.1)).collect();
write!(f, "CrosswordGrid(\nIndices: Top left {:?} Bottom right {:?}\nWords:{:#?}\nCells:{:#?}\n))",
self.top_left_cell_index, self.bottom_right_cell_index, word_strs, cell_strs)
}
}
impl CrosswordGrid {
fn get_word(&self, word_id: usize) -> Result<&Word, CrosswordError> {
match self.word_map.get(&word_id) {
Some(word) => Ok(word),
None => Err(CrosswordError::WordNotFound(word_id)),
}
}
fn get_cell(&self, location: &Location) -> Result<&Cell, CrosswordError> {
match self.cell_map.get(location) {
Some(cell) => Ok(cell),
None => Err(CrosswordError::CellNotFound(*location)),
}
}
fn get_cell_mut(&mut self, location: &Location) -> Result<&mut Cell, CrosswordError> {
match self.cell_map.get_mut(location) {
Some(cell) => Ok(cell),
None => Err(CrosswordError::CellNotFound(*location)),
}
}
pub fn new_single_word(word: &str) -> Self {
let mut builder = builder::CrosswordGridBuilder::new();
builder.from_string(word)
}
fn new_from_wordmap_single_placed(word_id: usize,
direction: Direction,
mut word_map: HashMap<usize, Word>) -> Self {
let mut location = Location(0, 0);
let across_id: Option<usize>;
let down_id: Option<usize>;
let mut cell_map: HashMap<Location, Cell> = HashMap::new();
match direction {
Direction::Across => {
across_id = Some(word_id);
down_id = None;
},
Direction::Down => {
across_id = None;
down_id = Some(word_id);
},
};
let word = word_map.get_mut(&word_id).unwrap();
word.update_location(location, direction);
for c in word.word_text.chars() {
cell_map.insert(location, Cell::new(c, across_id, down_id));
location = location.relative_location_directed(1, direction);
}
let mut grid = CrosswordGrid {
cell_map,
word_map,
top_left_cell_index: Location(0, 0),
bottom_right_cell_index: location.relative_location_directed(-1, direction),
};
grid.fit_to_size();
grid
}
fn get_all_intersections(&self) -> Vec<(usize, usize)> {
let mut edges: Vec<(usize, usize)> = vec![];
for cell in self.cell_map.values().filter(|c| c.is_intersection()) {
edges.push((cell.get_across_word_id().unwrap(),
cell.get_down_word_id().unwrap()));
}
edges.sort();
debug!("All intersections found {:#?}", edges);
edges
}
pub fn to_graph_adjacency_matrix(&self) -> Array2<u8> {
let edges = self.get_all_intersections();
let mut word_ids: Vec<usize> = self.word_map.keys().cloned().collect();
word_ids.sort();
let max_word_id = word_ids[word_ids.len() - 1] + 1;
let mut adjacency: Array2<u8> = Array2::zeros((max_word_id, max_word_id));
for (word1, word2) in edges.iter() {
adjacency[[*word1, *word2]] += 1;
adjacency[[*word2, *word1]] += 1;
}
adjacency
}
pub fn to_graph(&s | Graph {
let edges = self.get_all_intersections();
let mut graph = Graph::new_from_edges(edges);
for (word_id, _word) in self.word_map.iter().filter(|(_id, w)| w.is_placed()) {
graph.add_node(*word_id);
}
graph
}
pub fn to_string_with_coords(&self) -> String {
let mut string: String = String::from("");
let coord_string: String = format!("Top-left: ({}, {}), bottom-right: ({}, {})\n",
self.top_left_cell_index.0 + 1,
self.top_left_cell_index.1 + 1,
self.bottom_right_cell_index.0 - 1,
self.bottom_right_cell_index.1 - 1);
string.push_str(&coord_string);
string.push_str(&self.to_string());
string
}
pub fn to_string(&self) -> String {
let mut string: String = String::from("");
let mut row = self.top_left_cell_index.0 + 1;
let mut col = self.top_left_cell_index.1 + 1;
while row < self.bottom_right_cell_index.0 {
while col < self.bottom_right_cell_index.1 {
let c = self.cell_map.get(&Location(row, col)).unwrap().to_char();
string.push(c);
col += 1;
}
col = self.top_left_cell_index.1 + 1;
row += 1;
string.push('\n');
}
string
}
pub fn check_valid(&self) {
assert!(self.top_left_cell_index.0 <= self.bottom_right_cell_index.0);
assert!(self.top_left_cell_index.1 <= self.bottom_right_cell_index.1);
let mut row = self.top_left_cell_index.0;
let mut col = self.top_left_cell_index.1;
while row <= self.bottom_right_cell_index.0 {
while col <= self.bottom_right_cell_index.1 {
let present = self.cell_map.contains_key(&Location(row, col));
if!present {
panic!("Cell not present in grid {}, {}", row, col);
}
col += 1;
}
col = self.top_left_cell_index.1;
row += 1;
}
for cell in self.cell_map.values() {
if let Some(word_id) = cell.get_across_word_id() {
assert!(self.word_map.contains_key(&word_id));
}
if let Some(word_id) = cell.get_down_word_id() {
assert!(self.word_map.contains_key(&word_id));
}
}
let graph = self.to_graph();
debug!("{:#?}", graph);
debug!("{:#?}", self.word_map);
debug!("Checking grid connected {}", self.to_string());
assert!(graph.is_connected());
}
fn find_lowest_unused_word_id(&self) -> usize {
let mut word_id: usize = 0;
while self.word_map.contains_key(&word_id) {
word_id += 1;
}
word_id
}
pub fn add_unplaced_word_at_id(&mut self, word_text: &str, clue: &str, word_id: usize, required_direction: Option<Direction>) {
let word = Word::new_unplaced(word_text, clue, required_direction);
self.word_map.insert(word_id, word);
}
pub fn add_unplaced_word(&mut self, word_text: &str, clue: &str, required_direction: Option<Direction>) -> usize {
let word_id = self.find_lowest_unused_word_id();
self.add_unplaced_word_at_id(word_text, clue, word_id, required_direction);
word_id
}
pub fn update_word_id(&mut self, old_word_id: usize, new_word_id: usize) {
// Move in hashmap
let word: Word = self.word_map.remove(&old_word_id).unwrap();
self.word_map.insert(new_word_id, word);
for (_location, cell) in self.cell_map.iter_mut() {
cell.update_word_id(old_word_id, new_word_id);
}
}
pub fn delete_word(&mut self, word_id:usize) {
self.unplace_word(word_id);
self.word_map.remove(&word_id);
}
pub fn unplace_word(&mut self, word_id: usize) {
for (_location, cell) in self.cell_map.iter_mut() {
cell.remove_word(word_id);
}
if let Some(word) = self.word_map.get_mut(&word_id) {
word.remove_placement();
}
self.fit_to_size();
debug!("Now have {} words in grid", self.word_map.len());
}
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::array;
#[test]
fn test_adjacency() -> Result<(), CrosswordError> {
crate::logging::init_logger(true);
let mut grid = CrosswordGrid::new_single_word("ALPHA");
let arrival_word_id = grid.add_unplaced_word("ARRIVAL", "", None);
let bear_word_id = grid.add_unplaced_word("BEARER", "", None);
let innards_word_id = grid.add_unplaced_word("INNARDS", "", None);
let cup_word_id = grid.add_unplaced_word("CUP", "", None);
grid.add_unplaced_word("CAP", "", None);
grid.check_valid();
debug!("{:#?}", grid);
grid.place_word_in_cell(Location(0, 0), arrival_word_id, 0, Direction::Down)?;
grid.place_word_in_cell(Location(0, 4), bear_word_id, 2, Direction::Down)?;
grid.place_word_in_cell(Location(0, 2), cup_word_id, 2, Direction::Down)?;
grid.place_word_in_cell(Location(3, 0), innards_word_id, 0, Direction::Across)?;
debug!("{:#?}", grid);
grid.check_valid();
let adjacency = grid.to_graph_adjacency_matrix();
assert_eq!(adjacency, array![[0, 1, 1, 0, 1, 0],
[1, 0, 0, 1, 0, 0],
[1, 0, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]);
Ok(())
}
}
| elf) -> | identifier_name |
mod.rs | use crate::graph::Graph;
use log::debug;
use std::collections::HashMap;
use std::fmt;
use ndarray::Array2;
use thiserror::Error;
mod builder;
mod word;
mod cell;
mod add_word;
mod random;
mod spacing;
mod properties;
mod pdf_conversion;
mod matrix;
mod merge;
mod validity;
use word::Word;
use cell::Cell;
pub use builder::CrosswordGridBuilder;
pub use pdf_conversion::CrosswordPrinter;
static VALID_ANSWERCHARS: &str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
static VALID_CLUECHARS: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_β -;:,.?!@'ββ`ββ\"&*()$Β£%";
#[derive(Error,Debug,PartialEq)]
pub enum CellError {
#[error("Attempted to add word {0} to cell in direction {2:?} but cell already has id {1}")]
WordIdMismatch(usize, usize, Direction),
#[error("Attempted to add letter {0} to cell, but cell already has letter {1}")]
LetterMismatch(char, char),
#[error("Attempted to fill a cell already marked as black")]
FillBlack,
}
#[derive(Error,Debug,PartialEq)]
pub enum CrosswordError {
#[error("Adjacent cells {0:?} {1:?} incompatible - no word found that links them.")]
AdjacentCellsNoLinkWord(Location, Location),
#[error("Adjacent cells {0:?} {1:?} incompatible - should have a shared word which links them, but the words don't match: {2} {3}")]
AdjacentCellsMismatchedLinkWord(Location, Location, usize, usize),
#[error("Error updating cell at location {0:?}")]
CellError(Location, CellError),
#[error("Cell {0:?} at start/end of word not empty. Last/first cell in word is {1:?}")]
NonEmptyWordBoundary(Location, Location),
#[error("Cell not found in grid {0:?}")]
CellNotFound(Location),
#[error("Word {1} with id {0} already placed at {2:?}")]
WordAlreadyPlaced(usize, String, Location),
#[error("Attempted to place word {1} with id {0} with invalid direction {2:?}")]
InvalidWordDirection(usize, String, Direction),
#[error("Word not found in grid object {0}")]
WordNotFound(usize),
}
#[derive(Clone,Copy,Debug,PartialEq,Eq,Ord,PartialOrd,Hash)]
pub enum Direction {
Across,
Down,
}
impl Direction {
fn rotate(&self) -> Self {
match self {
Direction::Across => Direction::Down,
Direction::Down => Direction::Across,
}
}
}
#[derive(Clone,Copy,Eq,Hash)]
pub struct Location(pub isize, pub isize);
impl fmt::Debug for Location {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Location({}, {})", self.0, self.1)
}
}
impl PartialEq for Location {
fn eq(&self, other: &Location) -> bool {
self.0 == other.0 && self.1 == other.1
}
}
impl Location {
fn relative_location(&self, move_across: isize, move_down: isize) -> Location {
Location(self.0 + move_across, self.1 + move_down)
}
fn relative_location_directed(&self, move_size: isize, direction: Direction) -> Location {
match direction {
Direction::Across => Location(self.0, self.1 + move_size),
Direction::Down => Location(self.0 + move_size, self.1),
}
}
}
#[derive(Clone)]
pub struct CrosswordGrid {
cell_map: HashMap<Location, Cell>,
word_map: HashMap<usize, Word>,
top_left_cell_index: Location,
bottom_right_cell_index: Location,
}
impl fmt::Debug for CrosswordGrid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut words: Vec<(&usize, &Word)> = self.word_map.iter().collect();
words.sort_by_key(|a| *a.0);
let word_strs: Vec<String> = words.iter().map(|x| format!("{:?}: {:?}", x.0, x.1)).collect();
let mut cells: Vec<(&Location, &Cell)> = self.cell_map.iter().collect();
cells.sort_by_key(|a| (a.0.0, a.0.1));
let cell_strs: Vec<String> = cells.iter().map(|x| format!("{:?}: {:?}", x.0, x.1)).collect();
write!(f, "CrosswordGrid(\nIndices: Top left {:?} Bottom right {:?}\nWords:{:#?}\nCells:{:#?}\n))",
self.top_left_cell_index, self.bottom_right_cell_index, word_strs, cell_strs)
}
}
impl CrosswordGrid {
fn get_word(&self, word_id: usize) -> Result<&Word, CrosswordError> {
match self.word_map.get(&word_id) {
Some(word) => Ok(word),
None => Err(CrosswordError::WordNotFound(word_id)),
}
}
fn get_cell(&self, location: &Location) -> Result<&Cell, CrosswordError> {
match self.cell_map.get(location) {
Some(cell) => Ok(cell),
None => Err(CrosswordError::CellNotFound(*location)),
}
}
fn get_cell_mut(&mut self, location: &Location) -> Result<&mut Cell, CrosswordError> {
match self.cell_map.get_mut(location) {
Some(cell) => Ok(cell),
None => Err(CrosswordError::CellNotFound(*location)),
}
}
pub fn new_single_word(word: &str) -> Self {
let mut builder = builder::CrosswordGridBuilder::new();
builder.from_string(word)
}
fn new_from_wordmap_single_placed(word_id: usize,
direction: Direction,
mut word_map: HashMap<usize, Word>) -> Self {
let mut location = Location(0, 0);
let across_id: Option<usize>;
let down_id: Option<usize>;
let mut cell_map: HashMap<Location, Cell> = HashMap::new();
match direction {
Direction::Across => {
across_id = Some(word_id);
down_id = None;
},
Direction::Down => {
across_id = None;
down_id = Some(word_id);
},
};
let word = word_map.get_mut(&word_id).unwrap();
word.update_location(location, direction);
for c in word.word_text.chars() {
cell_map.insert(location, Cell::new(c, across_id, down_id));
location = location.relative_location_directed(1, direction);
}
let mut grid = CrosswordGrid {
cell_map,
word_map,
top_left_cell_index: Location(0, 0),
bottom_right_cell_index: location.relative_location_directed(-1, direction),
};
grid.fit_to_size();
grid
}
fn get_all_intersections(&self) -> Vec<(usize, usize)> {
let mut edges: Vec<(usize, usize)> = vec![];
for cell in self.cell_map.values().filter(|c| c.is_intersection()) {
edges.push((cell.get_across_word_id().unwrap(),
cell.get_down_word_id().unwrap()));
}
edges.sort();
debug!("All intersections found {:#?}", edges);
edges
}
pub fn to_graph_adjacency_matrix(&self) -> Array2<u8> {
let edges = self.get_all_intersections();
let mut word_ids: Vec<usize> = self.word_map.keys().cloned().collect();
word_ids.sort();
let max_word_id = word_ids[word_ids.len() - 1] + 1;
let mut adjacency: Array2<u8> = Array2::zeros((max_word_id, max_word_id));
for (word1, word2) in edges.iter() {
adjacency[[*word1, *word2]] += 1;
adjacency[[*word2, *word1]] += 1;
}
adjacency
}
pub fn to_graph(&self) -> Graph {
let edges = self.get_all_intersections();
let mut graph = Graph::new_from_edges(edges);
for (word_id, _word) in self.word_map.iter().filter(|(_id, w)| w.is_placed()) {
graph.add_node(*word_id);
}
graph
}
pub fn to_string_with_coords(&self) -> String {
let mut string: String = String::from("");
let coord_string: String = format!("Top-left: ({}, {}), bottom-right: ({}, {})\n",
self.top_left_cell_index.0 + 1,
self.top_left_cell_index.1 + 1,
self.bottom_right_cell_index.0 - 1,
self.bottom_right_cell_index.1 - 1);
string.push_str(&coord_string);
string.push_str(&self.to_string());
string
}
pub fn to_string(&self) -> String {
let mut string: String = String::from("");
let mut row = self.top_left_cell_index.0 + 1;
let mut col = self.top_left_cell_index.1 + 1;
while row < self.bottom_right_cell_index.0 {
while col < self.bottom_right_cell_index.1 {
let c = self.cell_map.get(&Location(row, col)).unwrap().to_char();
string.push(c);
col += 1;
}
col = self.top_left_cell_index.1 + 1;
row += 1;
string.push('\n');
}
string
}
pub fn check_valid(&self) {
assert!(self.top_left_cell_index.0 <= self.bottom_right_cell_index.0);
assert!(self.top_left_cell_index.1 <= self.bottom_right_cell_index.1);
let mut row = self.top_left_cell_index.0;
let mut col = self.top_left_cell_index.1;
while row <= self.bottom_right_cell_index.0 {
while col <= self.bottom_right_cell_index.1 {
let present = self.cell_map.contains_key(&Location(row, col));
if!present {
panic!("Cell not present in grid {}, {}", row, col);
}
col += 1;
}
col = self.top_left_cell_index.1;
row += 1;
}
for cell in self.cell_map.values() {
if let Some(word_id) = cell.get_across_word_id() {
assert!(self.word_map.contains_key(&word_id));
}
if let Some(word_id) = cell.get_down_word_id() {
assert!(self.word_map.contains_key(&word_id));
}
}
let graph = self.to_graph();
debug!("{:#?}", graph);
debug!("{:#?}", self.word_map);
debug!("Checking grid connected {}", self.to_string());
assert!(graph.is_connected());
}
fn find_lowest_unused_word_id(&self) -> usize {
let mut word_id: usize = 0;
while self.word_map.contains_key(&word_id) {
word_id += 1;
}
word_id
}
pub fn add_unplaced_word_at_id(&mut self, word_text: &str, clue: &str, word_id: usize, required_direction: Option<Direction>) {
let word = Word::new_unplaced(word_text, clue, required_direction);
self.word_map.insert(word_id, word);
}
pub fn add_unplaced_word(&mut self, word_text: &str, clue: &str, required_direction: Option<Direction>) -> usize {
let word_id = self.find_lowest_unused_word_id();
self.add_unplaced_word_at_id(word_text, clue, word_id, required_direction);
word_id
}
pub fn update_word_id(&mut self, old_word_id: usize, new_word_id: usize) {
// Move in hashmap
let word: Word = self.word_map.remove(&old_word_id).unwrap();
self.word_map.insert(new_word_id, word);
for (_location, cell) in self.cell_map.iter_mut() {
cell.update_word_id(old_word_id, new_word_id);
}
}
pub fn delete_word(&mut self, word_id:usize) {
s | n unplace_word(&mut self, word_id: usize) {
for (_location, cell) in self.cell_map.iter_mut() {
cell.remove_word(word_id);
}
if let Some(word) = self.word_map.get_mut(&word_id) {
word.remove_placement();
}
self.fit_to_size();
debug!("Now have {} words in grid", self.word_map.len());
}
}
#[cfg(test)]
mod tests {
use super::*;
use ndarray::array;
#[test]
fn test_adjacency() -> Result<(), CrosswordError> {
crate::logging::init_logger(true);
let mut grid = CrosswordGrid::new_single_word("ALPHA");
let arrival_word_id = grid.add_unplaced_word("ARRIVAL", "", None);
let bear_word_id = grid.add_unplaced_word("BEARER", "", None);
let innards_word_id = grid.add_unplaced_word("INNARDS", "", None);
let cup_word_id = grid.add_unplaced_word("CUP", "", None);
grid.add_unplaced_word("CAP", "", None);
grid.check_valid();
debug!("{:#?}", grid);
grid.place_word_in_cell(Location(0, 0), arrival_word_id, 0, Direction::Down)?;
grid.place_word_in_cell(Location(0, 4), bear_word_id, 2, Direction::Down)?;
grid.place_word_in_cell(Location(0, 2), cup_word_id, 2, Direction::Down)?;
grid.place_word_in_cell(Location(3, 0), innards_word_id, 0, Direction::Across)?;
debug!("{:#?}", grid);
grid.check_valid();
let adjacency = grid.to_graph_adjacency_matrix();
assert_eq!(adjacency, array![[0, 1, 1, 0, 1, 0],
[1, 0, 0, 1, 0, 0],
[1, 0, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]);
Ok(())
}
}
| elf.unplace_word(word_id);
self.word_map.remove(&word_id);
}
pub f | identifier_body |
codec.rs | //! encode and decode the frames for the mux protocol.
//! The frames include the length of a PDU as well as an identifier
//! that informs us how to decode it. The length, ident and serial
//! number are encoded using a variable length integer encoding.
//! Rather than rely solely on serde to serialize and deserialize an
//! enum, we encode the enum variants with a version/identifier tag
//! for ourselves. This will make it a little easier to manage
//! client and server instances that are built from different versions
//! of this code; in this way the client and server can more gracefully
//! manage unknown enum variants.
#![allow(dead_code)]
use crate::mux::domain::DomainId;
use crate::mux::tab::TabId;
use crate::mux::window::WindowId;
use anyhow::{bail, Error};
use leb128;
use log::debug;
use portable_pty::{CommandBuilder, PtySize};
use serde_derive::*;
use std::io::Cursor;
use std::sync::Arc;
use term::selection::SelectionRange;
use termwiz::hyperlink::Hyperlink;
use termwiz::surface::{Change, SequenceNo};
use varbincode;
/// Returns the encoded length of the leb128 representation of value
fn | (value: u64) -> usize {
struct NullWrite {};
impl std::io::Write for NullWrite {
fn write(&mut self, buf: &[u8]) -> std::result::Result<usize, std::io::Error> {
Ok(buf.len())
}
fn flush(&mut self) -> std::result::Result<(), std::io::Error> {
Ok(())
}
};
leb128::write::unsigned(&mut NullWrite {}, value).unwrap()
}
const COMPRESSED_MASK: u64 = 1 << 63;
/// Encode a frame. If the data is compressed, the high bit of the length
/// is set to indicate that. The data written out has the format:
/// tagged_len: leb128 (u64 msb is set if data is compressed)
/// serial: leb128
/// ident: leb128
/// data bytes
fn encode_raw<W: std::io::Write>(
ident: u64,
serial: u64,
data: &[u8],
is_compressed: bool,
mut w: W,
) -> Result<(), std::io::Error> {
let len = data.len() + encoded_length(ident) + encoded_length(serial);
let masked_len = if is_compressed {
(len as u64) | COMPRESSED_MASK
} else {
len as u64
};
// Double-buffer the data; since we run with nodelay enabled, it is
// desirable for the write to be a single packet (or at least, for
// the header portion to go out in a single packet)
let mut buffer = Vec::with_capacity(len + encoded_length(masked_len));
leb128::write::unsigned(&mut buffer, masked_len)?;
leb128::write::unsigned(&mut buffer, serial)?;
leb128::write::unsigned(&mut buffer, ident)?;
buffer.extend_from_slice(data);
w.write_all(&buffer)
}
/// Read a single leb128 encoded value from the stream
fn read_u64<R: std::io::Read>(mut r: R) -> Result<u64, std::io::Error> {
leb128::read::unsigned(&mut r).map_err(|err| match err {
leb128::read::Error::IoError(ioerr) => ioerr,
err => std::io::Error::new(std::io::ErrorKind::Other, format!("{}", err)),
})
}
#[derive(Debug)]
struct Decoded {
ident: u64,
serial: u64,
data: Vec<u8>,
is_compressed: bool,
}
/// Decode a frame.
/// See encode_raw() for the frame format.
fn decode_raw<R: std::io::Read>(mut r: R) -> Result<Decoded, std::io::Error> {
let len = read_u64(r.by_ref())?;
let (len, is_compressed) = if (len & COMPRESSED_MASK)!= 0 {
(len &!COMPRESSED_MASK, true)
} else {
(len, false)
};
let serial = read_u64(r.by_ref())?;
let ident = read_u64(r.by_ref())?;
let data_len = len as usize - (encoded_length(ident) + encoded_length(serial));
let mut data = vec![0u8; data_len];
r.read_exact(&mut data)?;
Ok(Decoded {
ident,
serial,
data,
is_compressed,
})
}
#[derive(Debug, PartialEq)]
pub struct DecodedPdu {
pub serial: u64,
pub pdu: Pdu,
}
/// If the serialized size is larger than this, then we'll consider compressing it
const COMPRESS_THRESH: usize = 32;
fn serialize<T: serde::Serialize>(t: &T) -> Result<(Vec<u8>, bool), Error> {
let mut uncompressed = Vec::new();
let mut encode = varbincode::Serializer::new(&mut uncompressed);
t.serialize(&mut encode)?;
if uncompressed.len() <= COMPRESS_THRESH {
return Ok((uncompressed, false));
}
// It's a little heavy; let's try compressing it
let mut compressed = Vec::new();
let mut compress = zstd::Encoder::new(&mut compressed, zstd::DEFAULT_COMPRESSION_LEVEL)?;
let mut encode = varbincode::Serializer::new(&mut compress);
t.serialize(&mut encode)?;
drop(encode);
compress.finish()?;
debug!(
"serialized+compress len {} vs {}",
compressed.len(),
uncompressed.len()
);
if compressed.len() < uncompressed.len() {
Ok((compressed, true))
} else {
Ok((uncompressed, false))
}
}
fn deserialize<T: serde::de::DeserializeOwned, R: std::io::Read>(
mut r: R,
is_compressed: bool,
) -> Result<T, Error> {
if is_compressed {
let mut decompress = zstd::Decoder::new(r)?;
let mut decode = varbincode::Deserializer::new(&mut decompress);
serde::Deserialize::deserialize(&mut decode).map_err(Into::into)
} else {
let mut decode = varbincode::Deserializer::new(&mut r);
serde::Deserialize::deserialize(&mut decode).map_err(Into::into)
}
}
macro_rules! pdu {
($( $name:ident:$vers:expr),* $(,)?) => {
#[derive(PartialEq, Debug)]
pub enum Pdu {
Invalid{ident: u64},
$(
$name($name)
,)*
}
impl Pdu {
pub fn encode<W: std::io::Write>(&self, w: W, serial: u64) -> Result<(), Error> {
match self {
Pdu::Invalid{..} => bail!("attempted to serialize Pdu::Invalid"),
$(
Pdu::$name(s) => {
let (data, is_compressed) = serialize(s)?;
encode_raw($vers, serial, &data, is_compressed, w)?;
Ok(())
}
,)*
}
}
pub fn decode<R: std::io::Read>(r:R) -> Result<DecodedPdu, Error> {
let decoded = decode_raw(r)?;
match decoded.ident {
$(
$vers => {
Ok(DecodedPdu {
serial: decoded.serial,
pdu: Pdu::$name(deserialize(decoded.data.as_slice(), decoded.is_compressed)?)
})
}
,)*
_ => Ok(DecodedPdu {
serial: decoded.serial,
pdu: Pdu::Invalid{ident:decoded.ident}
}),
}
}
}
}
}
// Defines the Pdu enum.
// Each struct has an explicit identifying number.
// This allows removal of obsolete structs,
// and defining newer structs as the protocol evolves.
pdu! {
ErrorResponse: 0,
Ping: 1,
Pong: 2,
ListTabs: 3,
ListTabsResponse: 4,
Spawn: 7,
SpawnResponse: 8,
WriteToTab: 9,
UnitResponse: 10,
SendKeyDown: 11,
SendMouseEvent: 12,
SendPaste: 13,
Resize: 14,
SendMouseEventResponse: 17,
GetTabRenderChanges: 18,
GetTabRenderChangesResponse: 19,
SetClipboard: 20,
OpenURL: 21,
}
impl Pdu {
pub fn stream_decode(buffer: &mut Vec<u8>) -> anyhow::Result<Option<DecodedPdu>> {
let mut cursor = Cursor::new(buffer.as_slice());
match Self::decode(&mut cursor) {
Ok(decoded) => {
let consumed = cursor.position() as usize;
let remain = buffer.len() - consumed;
// Remove `consumed` bytes from the start of the vec.
// This is safe because the vec is just bytes and we are
// constrained the offsets accordingly.
unsafe {
std::ptr::copy_nonoverlapping(
buffer.as_ptr().add(consumed),
buffer.as_mut_ptr(),
remain,
);
}
buffer.truncate(remain);
Ok(Some(decoded))
}
Err(err) => {
if let Some(ioerr) = err.downcast_ref::<std::io::Error>() {
match ioerr.kind() {
std::io::ErrorKind::UnexpectedEof | std::io::ErrorKind::WouldBlock => {
return Ok(None);
}
_ => {}
}
}
Err(err)
}
}
}
pub fn try_read_and_decode<R: std::io::Read>(
r: &mut R,
buffer: &mut Vec<u8>,
) -> anyhow::Result<Option<DecodedPdu>> {
loop {
if let Some(decoded) = Self::stream_decode(buffer)? {
return Ok(Some(decoded));
}
let mut buf = [0u8; 4096];
let size = match r.read(&mut buf) {
Ok(size) => size,
Err(err) => {
if err.kind() == std::io::ErrorKind::WouldBlock {
return Ok(None);
}
return Err(err.into());
}
};
if size == 0 {
return Err(
std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "End Of File").into(),
);
}
buffer.extend_from_slice(&buf[0..size]);
}
}
pub fn tab_id(&self) -> Option<TabId> {
match self {
Pdu::GetTabRenderChangesResponse(GetTabRenderChangesResponse { tab_id,.. }) => {
Some(*tab_id)
}
Pdu::SetClipboard(SetClipboard { tab_id,.. }) => Some(*tab_id),
Pdu::OpenURL(OpenURL { tab_id,.. }) => Some(*tab_id),
_ => None,
}
}
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct UnitResponse {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct ErrorResponse {
pub reason: String,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Ping {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Pong {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct ListTabs {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct WindowAndTabEntry {
pub window_id: WindowId,
pub tab_id: TabId,
pub title: String,
pub size: PtySize,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct ListTabsResponse {
pub tabs: Vec<WindowAndTabEntry>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Spawn {
pub domain_id: DomainId,
/// If None, create a new window for this new tab
pub window_id: Option<WindowId>,
pub command: Option<CommandBuilder>,
pub size: PtySize,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SpawnResponse {
pub tab_id: TabId,
pub window_id: WindowId,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct WriteToTab {
pub tab_id: TabId,
pub data: Vec<u8>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendPaste {
pub tab_id: TabId,
pub data: String,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendKeyDown {
pub tab_id: TabId,
pub event: termwiz::input::KeyEvent,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendMouseEvent {
pub tab_id: TabId,
pub event: term::input::MouseEvent,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendMouseEventResponse {
pub selection_range: Option<SelectionRange>,
pub highlight: Option<Arc<Hyperlink>>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SetClipboard {
pub tab_id: TabId,
pub clipboard: Option<String>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct OpenURL {
pub tab_id: TabId,
pub url: String,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Resize {
pub tab_id: TabId,
pub size: PtySize,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct GetTabRenderChanges {
pub tab_id: TabId,
pub sequence_no: SequenceNo,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct GetTabRenderChangesResponse {
pub tab_id: TabId,
pub sequence_no: SequenceNo,
pub changes: Vec<Change>,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_frame() {
let mut encoded = Vec::new();
encode_raw(0x81, 0x42, b"hello", false, &mut encoded).unwrap();
assert_eq!(&encoded, b"\x08\x42\x81\x01hello");
let decoded = decode_raw(encoded.as_slice()).unwrap();
assert_eq!(decoded.ident, 0x81);
assert_eq!(decoded.serial, 0x42);
assert_eq!(decoded.data, b"hello");
}
#[test]
fn test_frame_lengths() {
let mut serial = 1;
for target_len in &[128, 247, 256, 65536, 16777216] {
let mut payload = Vec::with_capacity(*target_len);
payload.resize(*target_len, b'a');
let mut encoded = Vec::new();
encode_raw(0x42, serial, payload.as_slice(), false, &mut encoded).unwrap();
let decoded = decode_raw(encoded.as_slice()).unwrap();
assert_eq!(decoded.ident, 0x42);
assert_eq!(decoded.serial, serial);
assert_eq!(decoded.data, payload);
serial += 1;
}
}
#[test]
fn test_pdu_ping() {
let mut encoded = Vec::new();
Pdu::Ping(Ping {}).encode(&mut encoded, 0x40).unwrap();
assert_eq!(&encoded, &[2, 0x40, 1]);
assert_eq!(
DecodedPdu {
serial: 0x40,
pdu: Pdu::Ping(Ping {})
},
Pdu::decode(encoded.as_slice()).unwrap()
);
}
#[test]
fn stream_decode() {
let mut encoded = Vec::new();
Pdu::Ping(Ping {}).encode(&mut encoded, 0x1).unwrap();
Pdu::Pong(Pong {}).encode(&mut encoded, 0x2).unwrap();
assert_eq!(encoded.len(), 6);
let mut cursor = Cursor::new(encoded.as_slice());
let mut read_buffer = Vec::new();
assert_eq!(
Pdu::try_read_and_decode(&mut cursor, &mut read_buffer).unwrap(),
Some(DecodedPdu {
serial: 1,
pdu: Pdu::Ping(Ping {})
})
);
assert_eq!(
Pdu::try_read_and_decode(&mut cursor, &mut read_buffer).unwrap(),
Some(DecodedPdu {
serial: 2,
pdu: Pdu::Pong(Pong {})
})
);
let err = Pdu::try_read_and_decode(&mut cursor, &mut read_buffer).unwrap_err();
assert_eq!(
err.downcast_ref::<std::io::Error>().unwrap().kind(),
std::io::ErrorKind::UnexpectedEof
);
}
#[test]
fn test_pdu_ping_base91() {
let mut encoded = Vec::new();
{
let mut encoder = base91::Base91Encoder::new(&mut encoded);
Pdu::Ping(Ping {}).encode(&mut encoder, 0x41).unwrap();
}
assert_eq!(&encoded, &[60, 67, 75, 65]);
let decoded = base91::decode(&encoded);
assert_eq!(
DecodedPdu {
serial: 0x41,
pdu: Pdu::Ping(Ping {})
},
Pdu::decode(decoded.as_slice()).unwrap()
);
}
#[test]
fn test_pdu_pong() {
let mut encoded = Vec::new();
Pdu::Pong(Pong {}).encode(&mut encoded, 0x42).unwrap();
assert_eq!(&encoded, &[2, 0x42, 2]);
assert_eq!(
DecodedPdu {
serial: 0x42,
pdu: Pdu::Pong(Pong {})
},
Pdu::decode(encoded.as_slice()).unwrap()
);
}
#[test]
fn test_bogus_pdu() {
let mut encoded = Vec::new();
encode_raw(0xdeadbeef, 0x42, b"hello", false, &mut encoded).unwrap();
assert_eq!(
DecodedPdu {
serial: 0x42,
pdu: Pdu::Invalid { ident: 0xdeadbeef }
},
Pdu::decode(encoded.as_slice()).unwrap()
);
}
}
| encoded_length | identifier_name |
codec.rs | //! encode and decode the frames for the mux protocol.
//! The frames include the length of a PDU as well as an identifier
//! that informs us how to decode it. The length, ident and serial
//! number are encoded using a variable length integer encoding.
//! Rather than rely solely on serde to serialize and deserialize an
//! enum, we encode the enum variants with a version/identifier tag
//! for ourselves. This will make it a little easier to manage
//! client and server instances that are built from different versions
//! of this code; in this way the client and server can more gracefully
//! manage unknown enum variants.
#![allow(dead_code)]
use crate::mux::domain::DomainId;
use crate::mux::tab::TabId;
use crate::mux::window::WindowId;
use anyhow::{bail, Error};
use leb128;
use log::debug;
use portable_pty::{CommandBuilder, PtySize};
use serde_derive::*;
use std::io::Cursor;
use std::sync::Arc;
use term::selection::SelectionRange;
use termwiz::hyperlink::Hyperlink;
use termwiz::surface::{Change, SequenceNo};
use varbincode;
/// Returns the encoded length of the leb128 representation of value
fn encoded_length(value: u64) -> usize {
struct NullWrite {};
impl std::io::Write for NullWrite {
fn write(&mut self, buf: &[u8]) -> std::result::Result<usize, std::io::Error> {
Ok(buf.len())
}
fn flush(&mut self) -> std::result::Result<(), std::io::Error> {
Ok(())
}
};
leb128::write::unsigned(&mut NullWrite {}, value).unwrap()
}
const COMPRESSED_MASK: u64 = 1 << 63;
/// Encode a frame. If the data is compressed, the high bit of the length
/// is set to indicate that. The data written out has the format:
/// tagged_len: leb128 (u64 msb is set if data is compressed)
/// serial: leb128
/// ident: leb128
/// data bytes
fn encode_raw<W: std::io::Write>(
ident: u64,
serial: u64,
data: &[u8],
is_compressed: bool,
mut w: W,
) -> Result<(), std::io::Error> {
let len = data.len() + encoded_length(ident) + encoded_length(serial);
let masked_len = if is_compressed {
(len as u64) | COMPRESSED_MASK
} else {
len as u64
};
// Double-buffer the data; since we run with nodelay enabled, it is
// desirable for the write to be a single packet (or at least, for
// the header portion to go out in a single packet)
let mut buffer = Vec::with_capacity(len + encoded_length(masked_len));
leb128::write::unsigned(&mut buffer, masked_len)?;
leb128::write::unsigned(&mut buffer, serial)?;
leb128::write::unsigned(&mut buffer, ident)?;
buffer.extend_from_slice(data);
w.write_all(&buffer)
}
/// Read a single leb128 encoded value from the stream
fn read_u64<R: std::io::Read>(mut r: R) -> Result<u64, std::io::Error> {
leb128::read::unsigned(&mut r).map_err(|err| match err {
leb128::read::Error::IoError(ioerr) => ioerr,
err => std::io::Error::new(std::io::ErrorKind::Other, format!("{}", err)),
})
}
#[derive(Debug)]
struct Decoded {
ident: u64,
serial: u64,
data: Vec<u8>,
is_compressed: bool,
}
/// Decode a frame.
/// See encode_raw() for the frame format.
fn decode_raw<R: std::io::Read>(mut r: R) -> Result<Decoded, std::io::Error> {
let len = read_u64(r.by_ref())?;
let (len, is_compressed) = if (len & COMPRESSED_MASK)!= 0 {
(len &!COMPRESSED_MASK, true)
} else {
(len, false)
};
let serial = read_u64(r.by_ref())?;
let ident = read_u64(r.by_ref())?;
let data_len = len as usize - (encoded_length(ident) + encoded_length(serial));
let mut data = vec![0u8; data_len];
r.read_exact(&mut data)?;
Ok(Decoded {
ident,
serial,
data,
is_compressed,
})
}
| pub serial: u64,
pub pdu: Pdu,
}
/// If the serialized size is larger than this, then we'll consider compressing it
const COMPRESS_THRESH: usize = 32;
fn serialize<T: serde::Serialize>(t: &T) -> Result<(Vec<u8>, bool), Error> {
let mut uncompressed = Vec::new();
let mut encode = varbincode::Serializer::new(&mut uncompressed);
t.serialize(&mut encode)?;
if uncompressed.len() <= COMPRESS_THRESH {
return Ok((uncompressed, false));
}
// It's a little heavy; let's try compressing it
let mut compressed = Vec::new();
let mut compress = zstd::Encoder::new(&mut compressed, zstd::DEFAULT_COMPRESSION_LEVEL)?;
let mut encode = varbincode::Serializer::new(&mut compress);
t.serialize(&mut encode)?;
drop(encode);
compress.finish()?;
debug!(
"serialized+compress len {} vs {}",
compressed.len(),
uncompressed.len()
);
if compressed.len() < uncompressed.len() {
Ok((compressed, true))
} else {
Ok((uncompressed, false))
}
}
fn deserialize<T: serde::de::DeserializeOwned, R: std::io::Read>(
mut r: R,
is_compressed: bool,
) -> Result<T, Error> {
if is_compressed {
let mut decompress = zstd::Decoder::new(r)?;
let mut decode = varbincode::Deserializer::new(&mut decompress);
serde::Deserialize::deserialize(&mut decode).map_err(Into::into)
} else {
let mut decode = varbincode::Deserializer::new(&mut r);
serde::Deserialize::deserialize(&mut decode).map_err(Into::into)
}
}
macro_rules! pdu {
($( $name:ident:$vers:expr),* $(,)?) => {
#[derive(PartialEq, Debug)]
pub enum Pdu {
Invalid{ident: u64},
$(
$name($name)
,)*
}
impl Pdu {
pub fn encode<W: std::io::Write>(&self, w: W, serial: u64) -> Result<(), Error> {
match self {
Pdu::Invalid{..} => bail!("attempted to serialize Pdu::Invalid"),
$(
Pdu::$name(s) => {
let (data, is_compressed) = serialize(s)?;
encode_raw($vers, serial, &data, is_compressed, w)?;
Ok(())
}
,)*
}
}
pub fn decode<R: std::io::Read>(r:R) -> Result<DecodedPdu, Error> {
let decoded = decode_raw(r)?;
match decoded.ident {
$(
$vers => {
Ok(DecodedPdu {
serial: decoded.serial,
pdu: Pdu::$name(deserialize(decoded.data.as_slice(), decoded.is_compressed)?)
})
}
,)*
_ => Ok(DecodedPdu {
serial: decoded.serial,
pdu: Pdu::Invalid{ident:decoded.ident}
}),
}
}
}
}
}
// Defines the Pdu enum.
// Each struct has an explicit identifying number.
// This allows removal of obsolete structs,
// and defining newer structs as the protocol evolves.
pdu! {
ErrorResponse: 0,
Ping: 1,
Pong: 2,
ListTabs: 3,
ListTabsResponse: 4,
Spawn: 7,
SpawnResponse: 8,
WriteToTab: 9,
UnitResponse: 10,
SendKeyDown: 11,
SendMouseEvent: 12,
SendPaste: 13,
Resize: 14,
SendMouseEventResponse: 17,
GetTabRenderChanges: 18,
GetTabRenderChangesResponse: 19,
SetClipboard: 20,
OpenURL: 21,
}
impl Pdu {
pub fn stream_decode(buffer: &mut Vec<u8>) -> anyhow::Result<Option<DecodedPdu>> {
let mut cursor = Cursor::new(buffer.as_slice());
match Self::decode(&mut cursor) {
Ok(decoded) => {
let consumed = cursor.position() as usize;
let remain = buffer.len() - consumed;
// Remove `consumed` bytes from the start of the vec.
// This is safe because the vec is just bytes and we are
// constrained the offsets accordingly.
unsafe {
std::ptr::copy_nonoverlapping(
buffer.as_ptr().add(consumed),
buffer.as_mut_ptr(),
remain,
);
}
buffer.truncate(remain);
Ok(Some(decoded))
}
Err(err) => {
if let Some(ioerr) = err.downcast_ref::<std::io::Error>() {
match ioerr.kind() {
std::io::ErrorKind::UnexpectedEof | std::io::ErrorKind::WouldBlock => {
return Ok(None);
}
_ => {}
}
}
Err(err)
}
}
}
pub fn try_read_and_decode<R: std::io::Read>(
r: &mut R,
buffer: &mut Vec<u8>,
) -> anyhow::Result<Option<DecodedPdu>> {
loop {
if let Some(decoded) = Self::stream_decode(buffer)? {
return Ok(Some(decoded));
}
let mut buf = [0u8; 4096];
let size = match r.read(&mut buf) {
Ok(size) => size,
Err(err) => {
if err.kind() == std::io::ErrorKind::WouldBlock {
return Ok(None);
}
return Err(err.into());
}
};
if size == 0 {
return Err(
std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "End Of File").into(),
);
}
buffer.extend_from_slice(&buf[0..size]);
}
}
pub fn tab_id(&self) -> Option<TabId> {
match self {
Pdu::GetTabRenderChangesResponse(GetTabRenderChangesResponse { tab_id,.. }) => {
Some(*tab_id)
}
Pdu::SetClipboard(SetClipboard { tab_id,.. }) => Some(*tab_id),
Pdu::OpenURL(OpenURL { tab_id,.. }) => Some(*tab_id),
_ => None,
}
}
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct UnitResponse {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct ErrorResponse {
pub reason: String,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Ping {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Pong {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct ListTabs {}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct WindowAndTabEntry {
pub window_id: WindowId,
pub tab_id: TabId,
pub title: String,
pub size: PtySize,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct ListTabsResponse {
pub tabs: Vec<WindowAndTabEntry>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Spawn {
pub domain_id: DomainId,
/// If None, create a new window for this new tab
pub window_id: Option<WindowId>,
pub command: Option<CommandBuilder>,
pub size: PtySize,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SpawnResponse {
pub tab_id: TabId,
pub window_id: WindowId,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct WriteToTab {
pub tab_id: TabId,
pub data: Vec<u8>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendPaste {
pub tab_id: TabId,
pub data: String,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendKeyDown {
pub tab_id: TabId,
pub event: termwiz::input::KeyEvent,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendMouseEvent {
pub tab_id: TabId,
pub event: term::input::MouseEvent,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SendMouseEventResponse {
pub selection_range: Option<SelectionRange>,
pub highlight: Option<Arc<Hyperlink>>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct SetClipboard {
pub tab_id: TabId,
pub clipboard: Option<String>,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct OpenURL {
pub tab_id: TabId,
pub url: String,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct Resize {
pub tab_id: TabId,
pub size: PtySize,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct GetTabRenderChanges {
pub tab_id: TabId,
pub sequence_no: SequenceNo,
}
#[derive(Deserialize, Serialize, PartialEq, Debug)]
pub struct GetTabRenderChangesResponse {
pub tab_id: TabId,
pub sequence_no: SequenceNo,
pub changes: Vec<Change>,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_frame() {
let mut encoded = Vec::new();
encode_raw(0x81, 0x42, b"hello", false, &mut encoded).unwrap();
assert_eq!(&encoded, b"\x08\x42\x81\x01hello");
let decoded = decode_raw(encoded.as_slice()).unwrap();
assert_eq!(decoded.ident, 0x81);
assert_eq!(decoded.serial, 0x42);
assert_eq!(decoded.data, b"hello");
}
#[test]
fn test_frame_lengths() {
let mut serial = 1;
for target_len in &[128, 247, 256, 65536, 16777216] {
let mut payload = Vec::with_capacity(*target_len);
payload.resize(*target_len, b'a');
let mut encoded = Vec::new();
encode_raw(0x42, serial, payload.as_slice(), false, &mut encoded).unwrap();
let decoded = decode_raw(encoded.as_slice()).unwrap();
assert_eq!(decoded.ident, 0x42);
assert_eq!(decoded.serial, serial);
assert_eq!(decoded.data, payload);
serial += 1;
}
}
#[test]
fn test_pdu_ping() {
let mut encoded = Vec::new();
Pdu::Ping(Ping {}).encode(&mut encoded, 0x40).unwrap();
assert_eq!(&encoded, &[2, 0x40, 1]);
assert_eq!(
DecodedPdu {
serial: 0x40,
pdu: Pdu::Ping(Ping {})
},
Pdu::decode(encoded.as_slice()).unwrap()
);
}
#[test]
fn stream_decode() {
let mut encoded = Vec::new();
Pdu::Ping(Ping {}).encode(&mut encoded, 0x1).unwrap();
Pdu::Pong(Pong {}).encode(&mut encoded, 0x2).unwrap();
assert_eq!(encoded.len(), 6);
let mut cursor = Cursor::new(encoded.as_slice());
let mut read_buffer = Vec::new();
assert_eq!(
Pdu::try_read_and_decode(&mut cursor, &mut read_buffer).unwrap(),
Some(DecodedPdu {
serial: 1,
pdu: Pdu::Ping(Ping {})
})
);
assert_eq!(
Pdu::try_read_and_decode(&mut cursor, &mut read_buffer).unwrap(),
Some(DecodedPdu {
serial: 2,
pdu: Pdu::Pong(Pong {})
})
);
let err = Pdu::try_read_and_decode(&mut cursor, &mut read_buffer).unwrap_err();
assert_eq!(
err.downcast_ref::<std::io::Error>().unwrap().kind(),
std::io::ErrorKind::UnexpectedEof
);
}
#[test]
fn test_pdu_ping_base91() {
let mut encoded = Vec::new();
{
let mut encoder = base91::Base91Encoder::new(&mut encoded);
Pdu::Ping(Ping {}).encode(&mut encoder, 0x41).unwrap();
}
assert_eq!(&encoded, &[60, 67, 75, 65]);
let decoded = base91::decode(&encoded);
assert_eq!(
DecodedPdu {
serial: 0x41,
pdu: Pdu::Ping(Ping {})
},
Pdu::decode(decoded.as_slice()).unwrap()
);
}
#[test]
fn test_pdu_pong() {
let mut encoded = Vec::new();
Pdu::Pong(Pong {}).encode(&mut encoded, 0x42).unwrap();
assert_eq!(&encoded, &[2, 0x42, 2]);
assert_eq!(
DecodedPdu {
serial: 0x42,
pdu: Pdu::Pong(Pong {})
},
Pdu::decode(encoded.as_slice()).unwrap()
);
}
#[test]
fn test_bogus_pdu() {
let mut encoded = Vec::new();
encode_raw(0xdeadbeef, 0x42, b"hello", false, &mut encoded).unwrap();
assert_eq!(
DecodedPdu {
serial: 0x42,
pdu: Pdu::Invalid { ident: 0xdeadbeef }
},
Pdu::decode(encoded.as_slice()).unwrap()
);
}
} | #[derive(Debug, PartialEq)]
pub struct DecodedPdu { | random_line_split |
macro.rs | #![crate_name = "docopt_macros"]
#![crate_type = "dylib"]
#![feature(plugin_registrar, quote, rustc_private)]
//! This crate defines the `docopt!` macro. It is documented in the
//! documentation of the `docopt` crate.
extern crate syntax;
extern crate rustc_plugin;
extern crate docopt;
use std::borrow::Borrow;
use std::collections::HashMap;
use rustc_plugin::Registry;
use syntax::{ast, codemap, tokenstream};
use syntax::errors::DiagnosticBuilder;
use syntax::ext::base::{ExtCtxt, MacResult, MacEager, DummyResult};
use syntax::ext::build::AstBuilder;
use syntax::fold::Folder;
use syntax::parse::common::SeqSep;
use syntax::parse::parser::Parser;
use syntax::parse::token;
use syntax::print::pprust;
use syntax::ptr::P;
use syntax::symbol;
use syntax::util::small_vector::SmallVector;
use docopt::{Docopt, ArgvMap};
use docopt::parse::{Options, Atom, Positional, Zero, One};
type PResult<'a, T> = Result<T, DiagnosticBuilder<'a>>;
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("docopt", expand);
}
fn expand(cx: &mut ExtCtxt, span: codemap::Span, tts: &[tokenstream::TokenTree])
-> Box<MacResult+'static> {
let parsed = match MacParser::new(cx, tts).parse() {
Ok(parsed) => parsed,
Err(_) => return DummyResult::any(span),
};
parsed.items(cx)
}
/// Parsed corresponds to the result of parsing a `docopt` macro call.
/// It can be used to write a corresponding struct.
struct Parsed {
struct_info: StructInfo,
doc: Docopt,
/// Overrided type annotations for struct members. May be empty.
/// When a type annotation for an atom doesn't exist, then one is
/// inferred automatically. It is one of: `bool`, `usize`, `String` or
/// `Vec<String>`.
types: HashMap<Atom, P<ast::Ty>>,
}
impl Parsed {
/// Returns a macro result suitable for expansion.
/// Contains two items: one for the struct and one for the struct impls.
fn items(&self, cx: &ExtCtxt) -> Box<MacResult+'static> {
let mut its = vec!();
its.push(self.struct_decl(cx));
let struct_name = self.struct_info.name;
let full_doc = &*self.doc.parser().full_doc;
its.push(quote_item!(cx,
impl $struct_name {
#[allow(dead_code)]
fn docopt() -> docopt::Docopt {
// The unwrap is justified here because this code
// gen only happens if the Docopt usage string is valid.
docopt::Docopt::new($full_doc).unwrap()
}
}
).unwrap());
MacEager::items(SmallVector::many(its))
}
/// Returns an item for the struct definition.
fn struct_decl(&self, cx: &ExtCtxt) -> P<ast::Item> {
let name = self.struct_info.name.clone();
let vis = if self.struct_info.public { ast::Visibility::Public }
else { ast::Visibility::Inherited };
let def = ast::VariantData::Struct(
self.struct_fields(cx), ast::DUMMY_NODE_ID);
let mut traits = vec!["RustcDecodable".to_string()];
traits.extend(self.struct_info.deriving.iter().cloned());
let attrs = vec![attribute(cx, "allow", vec!["non_snake_case"]),
attribute(cx, "derive", traits)];
let st = cx.item_struct(codemap::DUMMY_SP, name.clone(), def);
cx.item(codemap::DUMMY_SP, name, attrs, st.node.clone()).map(|mut it| {
it.vis = vis;
it
})
}
/// Returns a list of fields for the struct definition.
/// Handles type annotations.
fn struct_fields(&self, cx: &ExtCtxt) -> Vec<ast::StructField> {
let mut fields: Vec<ast::StructField> = vec!();
for (atom, opts) in self.doc.parser().descs.iter() {
let name = ArgvMap::key_to_struct_field(&*atom.to_string());
let ty = match self.types.get(atom) {
None => self.pat_type(cx, atom, opts),
Some(ty) => ty.clone(),
};
fields.push(self.mk_struct_field(&*name, ty));
}
fields
}
/// Returns an inferred type for a usage pattern.
/// This is only invoked when a type annotation is not present.
fn pat_type(&self, cx: &ExtCtxt, atom: &Atom, opts: &Options) -> P<ast::Ty> {
let sp = codemap::DUMMY_SP;
match (opts.repeats, &opts.arg) {
(false, &Zero) => {
match atom {
&Positional(_) => cx.ty_ident(sp, ident("String")),
_ => cx.ty_ident(sp, ident("bool")),
}
}
(true, &Zero) => {
match atom {
&Positional(_) => ty_vec_string(cx),
_ => cx.ty_ident(sp, ident("usize")),
}
}
(false, &One(_)) => cx.ty_ident(sp, ident("String")),
(true, &One(_)) => ty_vec_string(cx),
}
}
/// Creates a struct field from a member name and type.
fn mk_struct_field(&self, name: &str, ty: P<ast::Ty>) -> ast::StructField {
ast::StructField {
span: codemap::DUMMY_SP,
ident: Some(ident(name)),
vis: ast::Visibility::Public,
id: ast::DUMMY_NODE_ID,
ty: ty,
attrs: vec!(),
}
}
}
/// State for parsing a `docopt` macro invocation.
struct MacParser<'a, 'b:'a> {
cx: &'a mut ExtCtxt<'b>,
p: Parser<'b>,
}
impl<'a, 'b> MacParser<'a, 'b> {
fn new(cx: &'a mut ExtCtxt<'b>, tts: &[tokenstream::TokenTree]) -> MacParser<'a, 'b> {
let p = cx.new_parser_from_tts(tts);
MacParser { cx: cx, p: p }
}
/// Main entry point for parsing arguments to `docopt` macro.
/// First looks for an identifier for the struct name.
/// Second, a string containing the docopt usage patterns.
/// Third, an optional list of type annotations.
fn parse(&mut self) -> PResult<'b, Parsed> {
if self.p.token == token::Eof {
let err = self.cx.struct_span_err(
self.cx.call_site(), "macro expects arguments");
return Err(err);
}
let struct_info = try!(self.parse_struct_info());
let docstr = try!(self.parse_str());
let mut types = HashMap::new();
if!self.p.check(&token::Eof) {
let sep = SeqSep {
sep: Some(token::Comma),
trailing_sep_allowed: true,
};
types = self.p.parse_seq_to_before_end(
&token::Eof, sep, |p| MacParser::parse_type_annotation(p)
).into_iter()
.map(|(ident, ty)| {
let field_name = ident.to_string();
let key = ArgvMap::struct_field_to_key(&*field_name);
(Atom::new(&*key), ty)
})
.collect::<HashMap<Atom, P<ast::Ty>>>();
try!(self.p.expect(&token::Eof));
}
// This config does not matter because we're only asking for the
// usage patterns in the Docopt string. The configuration does not
// affect the retrieval of usage patterns.
let doc = match Docopt::new(docstr) {
Ok(doc) => doc,
Err(err) => {
let err = self.cx.struct_span_err(
self.cx.call_site(),
&format!("Invalid Docopt usage: {}", err));
return Err(err);
}
};
Ok(Parsed {
struct_info: struct_info,
doc: doc,
types: types,
})
}
/// Parses a single string literal. On failure, an error is logged and
/// unit is returned.
fn parse_str(&mut self) -> PResult<'b, String> {
fn lit_is_str(lit: &ast::Lit) -> bool {
match lit.node {
ast::LitKind::Str(_, _) => true,
_ => false,
}
}
fn lit_to_string(lit: &ast::Lit) -> String {
match lit.node {
ast::LitKind::Str(ref s, _) => s.to_string(),
_ => panic!("BUG: expected string literal"),
}
}
let exp = self.cx.expander().fold_expr(self.p.parse_expr().unwrap());
let s = match exp.node {
ast::ExprKind::Lit(ref lit) if lit_is_str(&**lit) => {
lit_to_string(&**lit)
}
_ => {
let err = format!("Expected string literal but got {}",
pprust::expr_to_string(&*exp));
let err = self.cx.struct_span_err(exp.span, &*err);
return Err(err);
}
};
self.p.bump();
Ok(s)
}
/// Parses a type annotation in a `docopt` invocation of the form
/// `ident: Ty`.
/// Note that this is a static method as it is used as a HOF.
fn parse_type_annotation(p: &mut Parser<'b>)
-> PResult<'b, (ast::Ident, P<ast::Ty>)> {
let ident = try!(p.parse_ident());
try!(p.expect(&token::Colon));
let ty = p.parse_ty().unwrap();
Ok((ident, ty))
}
/// Parses struct information, like visibility, name and deriving.
fn parse_struct_info(&mut self) -> PResult<'b, StructInfo> { | let public = self.p.eat_keyword(symbol::keywords::Pub);
let mut info = StructInfo {
name: try!(self.p.parse_ident()),
public: public,
deriving: vec![],
};
if self.p.eat(&token::Comma) { return Ok(info); }
let deriving = try!(self.p.parse_ident());
if *deriving.name.as_str()!= *"derive" {
let err = format!("Expected 'derive' keyword but got '{}'",
deriving);
let err = self.cx.struct_span_err(self.cx.call_site(), &*err);
return Err(err);
}
while!self.p.eat(&token::Comma) {
info.deriving.push(
try!(self.p.parse_ident()).name.to_string());
}
Ok(info)
}
}
struct StructInfo {
name: ast::Ident,
public: bool,
deriving: Vec<String>,
}
// Convenience functions for building intermediate values.
fn ident(s: &str) -> ast::Ident {
ast::Ident::with_empty_ctxt(symbol::Symbol::intern(s))
}
fn attribute<S, T>(cx: &ExtCtxt, name: S, items: Vec<T>) -> ast::Attribute
where S: Borrow<str>, T: Borrow<str> {
let sp = codemap::DUMMY_SP;
let its = items.into_iter().map(|s| meta_item(cx, s.borrow())).collect();
let mi = cx.meta_list(sp, intern(name.borrow()), its);
cx.attribute(sp, mi)
}
fn meta_item(cx: &ExtCtxt, s: &str) -> codemap::Spanned<ast::NestedMetaItemKind> {
codemap::Spanned {
node: ast::NestedMetaItemKind::MetaItem(cx.meta_word(codemap::DUMMY_SP, intern(s))),
span: cx.call_site(),
}
}
fn intern(s: &str) -> symbol::Symbol {
symbol::Symbol::intern(s)
}
fn ty_vec_string(cx: &ExtCtxt) -> P<ast::Ty> {
let sp = codemap::DUMMY_SP;
let tystr = ast::AngleBracketedParameterData {
lifetimes: vec![],
types: vec![cx.ty_ident(sp, ident("String"))],
bindings: vec![],
};
cx.ty_path(ast::Path {
span: sp,
segments: vec![ast::PathSegment {
identifier: ident("Vec"),
parameters: Some(P(ast::PathParameters::AngleBracketed(tystr))),
}]
})
} | random_line_split |
|
macro.rs | #![crate_name = "docopt_macros"]
#![crate_type = "dylib"]
#![feature(plugin_registrar, quote, rustc_private)]
//! This crate defines the `docopt!` macro. It is documented in the
//! documentation of the `docopt` crate.
extern crate syntax;
extern crate rustc_plugin;
extern crate docopt;
use std::borrow::Borrow;
use std::collections::HashMap;
use rustc_plugin::Registry;
use syntax::{ast, codemap, tokenstream};
use syntax::errors::DiagnosticBuilder;
use syntax::ext::base::{ExtCtxt, MacResult, MacEager, DummyResult};
use syntax::ext::build::AstBuilder;
use syntax::fold::Folder;
use syntax::parse::common::SeqSep;
use syntax::parse::parser::Parser;
use syntax::parse::token;
use syntax::print::pprust;
use syntax::ptr::P;
use syntax::symbol;
use syntax::util::small_vector::SmallVector;
use docopt::{Docopt, ArgvMap};
use docopt::parse::{Options, Atom, Positional, Zero, One};
type PResult<'a, T> = Result<T, DiagnosticBuilder<'a>>;
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("docopt", expand);
}
fn expand(cx: &mut ExtCtxt, span: codemap::Span, tts: &[tokenstream::TokenTree])
-> Box<MacResult+'static> {
let parsed = match MacParser::new(cx, tts).parse() {
Ok(parsed) => parsed,
Err(_) => return DummyResult::any(span),
};
parsed.items(cx)
}
/// Parsed corresponds to the result of parsing a `docopt` macro call.
/// It can be used to write a corresponding struct.
struct Parsed {
struct_info: StructInfo,
doc: Docopt,
/// Overrided type annotations for struct members. May be empty.
/// When a type annotation for an atom doesn't exist, then one is
/// inferred automatically. It is one of: `bool`, `usize`, `String` or
/// `Vec<String>`.
types: HashMap<Atom, P<ast::Ty>>,
}
impl Parsed {
/// Returns a macro result suitable for expansion.
/// Contains two items: one for the struct and one for the struct impls.
fn items(&self, cx: &ExtCtxt) -> Box<MacResult+'static> {
let mut its = vec!();
its.push(self.struct_decl(cx));
let struct_name = self.struct_info.name;
let full_doc = &*self.doc.parser().full_doc;
its.push(quote_item!(cx,
impl $struct_name {
#[allow(dead_code)]
fn docopt() -> docopt::Docopt {
// The unwrap is justified here because this code
// gen only happens if the Docopt usage string is valid.
docopt::Docopt::new($full_doc).unwrap()
}
}
).unwrap());
MacEager::items(SmallVector::many(its))
}
/// Returns an item for the struct definition.
fn struct_decl(&self, cx: &ExtCtxt) -> P<ast::Item> {
let name = self.struct_info.name.clone();
let vis = if self.struct_info.public { ast::Visibility::Public }
else { ast::Visibility::Inherited };
let def = ast::VariantData::Struct(
self.struct_fields(cx), ast::DUMMY_NODE_ID);
let mut traits = vec!["RustcDecodable".to_string()];
traits.extend(self.struct_info.deriving.iter().cloned());
let attrs = vec![attribute(cx, "allow", vec!["non_snake_case"]),
attribute(cx, "derive", traits)];
let st = cx.item_struct(codemap::DUMMY_SP, name.clone(), def);
cx.item(codemap::DUMMY_SP, name, attrs, st.node.clone()).map(|mut it| {
it.vis = vis;
it
})
}
/// Returns a list of fields for the struct definition.
/// Handles type annotations.
fn struct_fields(&self, cx: &ExtCtxt) -> Vec<ast::StructField> {
let mut fields: Vec<ast::StructField> = vec!();
for (atom, opts) in self.doc.parser().descs.iter() {
let name = ArgvMap::key_to_struct_field(&*atom.to_string());
let ty = match self.types.get(atom) {
None => self.pat_type(cx, atom, opts),
Some(ty) => ty.clone(),
};
fields.push(self.mk_struct_field(&*name, ty));
}
fields
}
/// Returns an inferred type for a usage pattern.
/// This is only invoked when a type annotation is not present.
fn pat_type(&self, cx: &ExtCtxt, atom: &Atom, opts: &Options) -> P<ast::Ty> {
let sp = codemap::DUMMY_SP;
match (opts.repeats, &opts.arg) {
(false, &Zero) => {
match atom {
&Positional(_) => cx.ty_ident(sp, ident("String")),
_ => cx.ty_ident(sp, ident("bool")),
}
}
(true, &Zero) => {
match atom {
&Positional(_) => ty_vec_string(cx),
_ => cx.ty_ident(sp, ident("usize")),
}
}
(false, &One(_)) => cx.ty_ident(sp, ident("String")),
(true, &One(_)) => ty_vec_string(cx),
}
}
/// Creates a struct field from a member name and type.
fn mk_struct_field(&self, name: &str, ty: P<ast::Ty>) -> ast::StructField {
ast::StructField {
span: codemap::DUMMY_SP,
ident: Some(ident(name)),
vis: ast::Visibility::Public,
id: ast::DUMMY_NODE_ID,
ty: ty,
attrs: vec!(),
}
}
}
/// State for parsing a `docopt` macro invocation.
struct MacParser<'a, 'b:'a> {
cx: &'a mut ExtCtxt<'b>,
p: Parser<'b>,
}
impl<'a, 'b> MacParser<'a, 'b> {
fn new(cx: &'a mut ExtCtxt<'b>, tts: &[tokenstream::TokenTree]) -> MacParser<'a, 'b> {
let p = cx.new_parser_from_tts(tts);
MacParser { cx: cx, p: p }
}
/// Main entry point for parsing arguments to `docopt` macro.
/// First looks for an identifier for the struct name.
/// Second, a string containing the docopt usage patterns.
/// Third, an optional list of type annotations.
fn parse(&mut self) -> PResult<'b, Parsed> {
if self.p.token == token::Eof {
let err = self.cx.struct_span_err(
self.cx.call_site(), "macro expects arguments");
return Err(err);
}
let struct_info = try!(self.parse_struct_info());
let docstr = try!(self.parse_str());
let mut types = HashMap::new();
if!self.p.check(&token::Eof) {
let sep = SeqSep {
sep: Some(token::Comma),
trailing_sep_allowed: true,
};
types = self.p.parse_seq_to_before_end(
&token::Eof, sep, |p| MacParser::parse_type_annotation(p)
).into_iter()
.map(|(ident, ty)| {
let field_name = ident.to_string();
let key = ArgvMap::struct_field_to_key(&*field_name);
(Atom::new(&*key), ty)
})
.collect::<HashMap<Atom, P<ast::Ty>>>();
try!(self.p.expect(&token::Eof));
}
// This config does not matter because we're only asking for the
// usage patterns in the Docopt string. The configuration does not
// affect the retrieval of usage patterns.
let doc = match Docopt::new(docstr) {
Ok(doc) => doc,
Err(err) => {
let err = self.cx.struct_span_err(
self.cx.call_site(),
&format!("Invalid Docopt usage: {}", err));
return Err(err);
}
};
Ok(Parsed {
struct_info: struct_info,
doc: doc,
types: types,
})
}
/// Parses a single string literal. On failure, an error is logged and
/// unit is returned.
fn parse_str(&mut self) -> PResult<'b, String> {
fn lit_is_str(lit: &ast::Lit) -> bool {
match lit.node {
ast::LitKind::Str(_, _) => true,
_ => false,
}
}
fn lit_to_string(lit: &ast::Lit) -> String {
match lit.node {
ast::LitKind::Str(ref s, _) => s.to_string(),
_ => panic!("BUG: expected string literal"),
}
}
let exp = self.cx.expander().fold_expr(self.p.parse_expr().unwrap());
let s = match exp.node {
ast::ExprKind::Lit(ref lit) if lit_is_str(&**lit) => {
lit_to_string(&**lit)
}
_ => {
let err = format!("Expected string literal but got {}",
pprust::expr_to_string(&*exp));
let err = self.cx.struct_span_err(exp.span, &*err);
return Err(err);
}
};
self.p.bump();
Ok(s)
}
/// Parses a type annotation in a `docopt` invocation of the form
/// `ident: Ty`.
/// Note that this is a static method as it is used as a HOF.
fn parse_type_annotation(p: &mut Parser<'b>)
-> PResult<'b, (ast::Ident, P<ast::Ty>)> {
let ident = try!(p.parse_ident());
try!(p.expect(&token::Colon));
let ty = p.parse_ty().unwrap();
Ok((ident, ty))
}
/// Parses struct information, like visibility, name and deriving.
fn parse_struct_info(&mut self) -> PResult<'b, StructInfo> {
let public = self.p.eat_keyword(symbol::keywords::Pub);
let mut info = StructInfo {
name: try!(self.p.parse_ident()),
public: public,
deriving: vec![],
};
if self.p.eat(&token::Comma) { return Ok(info); }
let deriving = try!(self.p.parse_ident());
if *deriving.name.as_str()!= *"derive" {
let err = format!("Expected 'derive' keyword but got '{}'",
deriving);
let err = self.cx.struct_span_err(self.cx.call_site(), &*err);
return Err(err);
}
while!self.p.eat(&token::Comma) {
info.deriving.push(
try!(self.p.parse_ident()).name.to_string());
}
Ok(info)
}
}
struct StructInfo {
name: ast::Ident,
public: bool,
deriving: Vec<String>,
}
// Convenience functions for building intermediate values.
fn ident(s: &str) -> ast::Ident {
ast::Ident::with_empty_ctxt(symbol::Symbol::intern(s))
}
fn attribute<S, T>(cx: &ExtCtxt, name: S, items: Vec<T>) -> ast::Attribute
where S: Borrow<str>, T: Borrow<str> |
fn meta_item(cx: &ExtCtxt, s: &str) -> codemap::Spanned<ast::NestedMetaItemKind> {
codemap::Spanned {
node: ast::NestedMetaItemKind::MetaItem(cx.meta_word(codemap::DUMMY_SP, intern(s))),
span: cx.call_site(),
}
}
fn intern(s: &str) -> symbol::Symbol {
symbol::Symbol::intern(s)
}
fn ty_vec_string(cx: &ExtCtxt) -> P<ast::Ty> {
let sp = codemap::DUMMY_SP;
let tystr = ast::AngleBracketedParameterData {
lifetimes: vec![],
types: vec![cx.ty_ident(sp, ident("String"))],
bindings: vec![],
};
cx.ty_path(ast::Path {
span: sp,
segments: vec![ast::PathSegment {
identifier: ident("Vec"),
parameters: Some(P(ast::PathParameters::AngleBracketed(tystr))),
}]
})
}
| {
let sp = codemap::DUMMY_SP;
let its = items.into_iter().map(|s| meta_item(cx, s.borrow())).collect();
let mi = cx.meta_list(sp, intern(name.borrow()), its);
cx.attribute(sp, mi)
} | identifier_body |
macro.rs | #![crate_name = "docopt_macros"]
#![crate_type = "dylib"]
#![feature(plugin_registrar, quote, rustc_private)]
//! This crate defines the `docopt!` macro. It is documented in the
//! documentation of the `docopt` crate.
extern crate syntax;
extern crate rustc_plugin;
extern crate docopt;
use std::borrow::Borrow;
use std::collections::HashMap;
use rustc_plugin::Registry;
use syntax::{ast, codemap, tokenstream};
use syntax::errors::DiagnosticBuilder;
use syntax::ext::base::{ExtCtxt, MacResult, MacEager, DummyResult};
use syntax::ext::build::AstBuilder;
use syntax::fold::Folder;
use syntax::parse::common::SeqSep;
use syntax::parse::parser::Parser;
use syntax::parse::token;
use syntax::print::pprust;
use syntax::ptr::P;
use syntax::symbol;
use syntax::util::small_vector::SmallVector;
use docopt::{Docopt, ArgvMap};
use docopt::parse::{Options, Atom, Positional, Zero, One};
type PResult<'a, T> = Result<T, DiagnosticBuilder<'a>>;
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("docopt", expand);
}
fn expand(cx: &mut ExtCtxt, span: codemap::Span, tts: &[tokenstream::TokenTree])
-> Box<MacResult+'static> {
let parsed = match MacParser::new(cx, tts).parse() {
Ok(parsed) => parsed,
Err(_) => return DummyResult::any(span),
};
parsed.items(cx)
}
/// Parsed corresponds to the result of parsing a `docopt` macro call.
/// It can be used to write a corresponding struct.
struct Parsed {
struct_info: StructInfo,
doc: Docopt,
/// Overrided type annotations for struct members. May be empty.
/// When a type annotation for an atom doesn't exist, then one is
/// inferred automatically. It is one of: `bool`, `usize`, `String` or
/// `Vec<String>`.
types: HashMap<Atom, P<ast::Ty>>,
}
impl Parsed {
/// Returns a macro result suitable for expansion.
/// Contains two items: one for the struct and one for the struct impls.
fn items(&self, cx: &ExtCtxt) -> Box<MacResult+'static> {
let mut its = vec!();
its.push(self.struct_decl(cx));
let struct_name = self.struct_info.name;
let full_doc = &*self.doc.parser().full_doc;
its.push(quote_item!(cx,
impl $struct_name {
#[allow(dead_code)]
fn docopt() -> docopt::Docopt {
// The unwrap is justified here because this code
// gen only happens if the Docopt usage string is valid.
docopt::Docopt::new($full_doc).unwrap()
}
}
).unwrap());
MacEager::items(SmallVector::many(its))
}
/// Returns an item for the struct definition.
fn struct_decl(&self, cx: &ExtCtxt) -> P<ast::Item> {
let name = self.struct_info.name.clone();
let vis = if self.struct_info.public { ast::Visibility::Public }
else { ast::Visibility::Inherited };
let def = ast::VariantData::Struct(
self.struct_fields(cx), ast::DUMMY_NODE_ID);
let mut traits = vec!["RustcDecodable".to_string()];
traits.extend(self.struct_info.deriving.iter().cloned());
let attrs = vec![attribute(cx, "allow", vec!["non_snake_case"]),
attribute(cx, "derive", traits)];
let st = cx.item_struct(codemap::DUMMY_SP, name.clone(), def);
cx.item(codemap::DUMMY_SP, name, attrs, st.node.clone()).map(|mut it| {
it.vis = vis;
it
})
}
/// Returns a list of fields for the struct definition.
/// Handles type annotations.
fn | (&self, cx: &ExtCtxt) -> Vec<ast::StructField> {
let mut fields: Vec<ast::StructField> = vec!();
for (atom, opts) in self.doc.parser().descs.iter() {
let name = ArgvMap::key_to_struct_field(&*atom.to_string());
let ty = match self.types.get(atom) {
None => self.pat_type(cx, atom, opts),
Some(ty) => ty.clone(),
};
fields.push(self.mk_struct_field(&*name, ty));
}
fields
}
/// Returns an inferred type for a usage pattern.
/// This is only invoked when a type annotation is not present.
fn pat_type(&self, cx: &ExtCtxt, atom: &Atom, opts: &Options) -> P<ast::Ty> {
let sp = codemap::DUMMY_SP;
match (opts.repeats, &opts.arg) {
(false, &Zero) => {
match atom {
&Positional(_) => cx.ty_ident(sp, ident("String")),
_ => cx.ty_ident(sp, ident("bool")),
}
}
(true, &Zero) => {
match atom {
&Positional(_) => ty_vec_string(cx),
_ => cx.ty_ident(sp, ident("usize")),
}
}
(false, &One(_)) => cx.ty_ident(sp, ident("String")),
(true, &One(_)) => ty_vec_string(cx),
}
}
/// Creates a struct field from a member name and type.
fn mk_struct_field(&self, name: &str, ty: P<ast::Ty>) -> ast::StructField {
ast::StructField {
span: codemap::DUMMY_SP,
ident: Some(ident(name)),
vis: ast::Visibility::Public,
id: ast::DUMMY_NODE_ID,
ty: ty,
attrs: vec!(),
}
}
}
/// State for parsing a `docopt` macro invocation.
struct MacParser<'a, 'b:'a> {
cx: &'a mut ExtCtxt<'b>,
p: Parser<'b>,
}
impl<'a, 'b> MacParser<'a, 'b> {
fn new(cx: &'a mut ExtCtxt<'b>, tts: &[tokenstream::TokenTree]) -> MacParser<'a, 'b> {
let p = cx.new_parser_from_tts(tts);
MacParser { cx: cx, p: p }
}
/// Main entry point for parsing arguments to `docopt` macro.
/// First looks for an identifier for the struct name.
/// Second, a string containing the docopt usage patterns.
/// Third, an optional list of type annotations.
fn parse(&mut self) -> PResult<'b, Parsed> {
if self.p.token == token::Eof {
let err = self.cx.struct_span_err(
self.cx.call_site(), "macro expects arguments");
return Err(err);
}
let struct_info = try!(self.parse_struct_info());
let docstr = try!(self.parse_str());
let mut types = HashMap::new();
if!self.p.check(&token::Eof) {
let sep = SeqSep {
sep: Some(token::Comma),
trailing_sep_allowed: true,
};
types = self.p.parse_seq_to_before_end(
&token::Eof, sep, |p| MacParser::parse_type_annotation(p)
).into_iter()
.map(|(ident, ty)| {
let field_name = ident.to_string();
let key = ArgvMap::struct_field_to_key(&*field_name);
(Atom::new(&*key), ty)
})
.collect::<HashMap<Atom, P<ast::Ty>>>();
try!(self.p.expect(&token::Eof));
}
// This config does not matter because we're only asking for the
// usage patterns in the Docopt string. The configuration does not
// affect the retrieval of usage patterns.
let doc = match Docopt::new(docstr) {
Ok(doc) => doc,
Err(err) => {
let err = self.cx.struct_span_err(
self.cx.call_site(),
&format!("Invalid Docopt usage: {}", err));
return Err(err);
}
};
Ok(Parsed {
struct_info: struct_info,
doc: doc,
types: types,
})
}
/// Parses a single string literal. On failure, an error is logged and
/// unit is returned.
fn parse_str(&mut self) -> PResult<'b, String> {
fn lit_is_str(lit: &ast::Lit) -> bool {
match lit.node {
ast::LitKind::Str(_, _) => true,
_ => false,
}
}
fn lit_to_string(lit: &ast::Lit) -> String {
match lit.node {
ast::LitKind::Str(ref s, _) => s.to_string(),
_ => panic!("BUG: expected string literal"),
}
}
let exp = self.cx.expander().fold_expr(self.p.parse_expr().unwrap());
let s = match exp.node {
ast::ExprKind::Lit(ref lit) if lit_is_str(&**lit) => {
lit_to_string(&**lit)
}
_ => {
let err = format!("Expected string literal but got {}",
pprust::expr_to_string(&*exp));
let err = self.cx.struct_span_err(exp.span, &*err);
return Err(err);
}
};
self.p.bump();
Ok(s)
}
/// Parses a type annotation in a `docopt` invocation of the form
/// `ident: Ty`.
/// Note that this is a static method as it is used as a HOF.
fn parse_type_annotation(p: &mut Parser<'b>)
-> PResult<'b, (ast::Ident, P<ast::Ty>)> {
let ident = try!(p.parse_ident());
try!(p.expect(&token::Colon));
let ty = p.parse_ty().unwrap();
Ok((ident, ty))
}
/// Parses struct information, like visibility, name and deriving.
fn parse_struct_info(&mut self) -> PResult<'b, StructInfo> {
let public = self.p.eat_keyword(symbol::keywords::Pub);
let mut info = StructInfo {
name: try!(self.p.parse_ident()),
public: public,
deriving: vec![],
};
if self.p.eat(&token::Comma) { return Ok(info); }
let deriving = try!(self.p.parse_ident());
if *deriving.name.as_str()!= *"derive" {
let err = format!("Expected 'derive' keyword but got '{}'",
deriving);
let err = self.cx.struct_span_err(self.cx.call_site(), &*err);
return Err(err);
}
while!self.p.eat(&token::Comma) {
info.deriving.push(
try!(self.p.parse_ident()).name.to_string());
}
Ok(info)
}
}
struct StructInfo {
name: ast::Ident,
public: bool,
deriving: Vec<String>,
}
// Convenience functions for building intermediate values.
fn ident(s: &str) -> ast::Ident {
ast::Ident::with_empty_ctxt(symbol::Symbol::intern(s))
}
fn attribute<S, T>(cx: &ExtCtxt, name: S, items: Vec<T>) -> ast::Attribute
where S: Borrow<str>, T: Borrow<str> {
let sp = codemap::DUMMY_SP;
let its = items.into_iter().map(|s| meta_item(cx, s.borrow())).collect();
let mi = cx.meta_list(sp, intern(name.borrow()), its);
cx.attribute(sp, mi)
}
fn meta_item(cx: &ExtCtxt, s: &str) -> codemap::Spanned<ast::NestedMetaItemKind> {
codemap::Spanned {
node: ast::NestedMetaItemKind::MetaItem(cx.meta_word(codemap::DUMMY_SP, intern(s))),
span: cx.call_site(),
}
}
fn intern(s: &str) -> symbol::Symbol {
symbol::Symbol::intern(s)
}
fn ty_vec_string(cx: &ExtCtxt) -> P<ast::Ty> {
let sp = codemap::DUMMY_SP;
let tystr = ast::AngleBracketedParameterData {
lifetimes: vec![],
types: vec![cx.ty_ident(sp, ident("String"))],
bindings: vec![],
};
cx.ty_path(ast::Path {
span: sp,
segments: vec![ast::PathSegment {
identifier: ident("Vec"),
parameters: Some(P(ast::PathParameters::AngleBracketed(tystr))),
}]
})
}
| struct_fields | identifier_name |
table.rs | use std::sync::Arc;
use arrow::array::*;
use arrow::datatypes::*;
use arrow::record_batch::RecordBatch;
use crate::error::*;
#[derive(Clone)] | chunks: Vec<Arc<dyn Array>>,
num_rows: usize,
null_count: usize,
}
impl ChunkedArray {
/// Construct a `ChunkedArray` from a list of `Array`s.
///
/// There must be at least 1 array, and all arrays must have the same data type.
fn from_arrays(arrays: Vec<Arc<dyn Array>>) -> Self {
assert!(!arrays.is_empty());
let mut num_rows = 0;
let mut null_count = 0;
// check that arrays have the same type
let data_type = &arrays[0].data_type();
arrays.iter().for_each(|array| {
assert!(&array.data_type() == data_type);
num_rows += array.len();
null_count += array.null_count();
});
ChunkedArray {
chunks: arrays,
num_rows,
null_count,
}
}
/// Return the length of the arrays in the chunk. This value is pre-computed.
pub fn num_rows(&self) -> usize {
self.num_rows
}
pub fn null_count(&self) -> usize {
self.null_count
}
pub fn num_chunks(&self) -> usize {
self.chunks.len()
}
/// Get the count per chunk
///
/// This is useful for repartitioning
pub(crate) fn chunk_counts(&self) -> Vec<usize> {
self.chunks().iter().map(|chunk| chunk.len()).collect()
}
/// Get a chunk from the chunked array by index
/// TODO: should this have bounds-chacking?
pub fn chunk(&self, i: usize) -> &Arc<dyn Array> {
&self.chunks[i]
}
pub fn chunks(&self) -> &Vec<Arc<dyn Array>> {
&self.chunks
}
/// Construct a zero-copy slice of the chunked array with the indicated offset and length.
///
/// The `offset` is the position of the first element in the constructed slice.
/// `length` is the length of the slice. If there are not enough elements in the chunked array,
/// the length will be adjusted accordingly.
fn slice(&self, offset: usize, length: Option<usize>) -> Self {
let mut offset = offset;
let mut length = length.unwrap_or(std::usize::MAX);
length = std::cmp::min(length, self.num_rows());
let mut current_chunk: usize = 0;
let mut new_chunks: Vec<ArrayRef> = vec![];
// compute the first offset. If offset > whole chunks' lengths, skip those chunks
while current_chunk < self.num_chunks() && offset >= self.chunk(current_chunk).len() {
offset -= self.chunk(current_chunk).len();
current_chunk += 1;
}
while current_chunk < self.num_chunks() && length > 0 {
new_chunks.push(self.chunk(current_chunk).slice(offset, length));
length -= std::cmp::min(length, self.chunk(current_chunk).len() - offset);
offset = 0;
current_chunk += 1;
}
Self::from_arrays(new_chunks)
}
fn filter(&self, condition: &Self) -> Self {
let filtered: arrow::error::Result<Vec<ArrayRef>> = self
.chunks()
.iter()
.zip(condition.chunks())
.map(|(a, b): (&ArrayRef, &ArrayRef)| {
arrow::compute::filter(a.as_ref(), &BooleanArray::from(b.data()))
})
.collect();
Self::from_arrays(filtered.unwrap())
}
fn flatten(&self) {
unimplemented!("This is for flattening struct columns, we aren't yet there")
}
}
pub fn col_to_prim_arrays<T>(column: &Column) -> Vec<&PrimitiveArray<T>>
where
T: ArrowPrimitiveType,
{
let mut arrays: Vec<&PrimitiveArray<T>> = vec![];
for chunk in column.data().chunks() {
arrays.push(chunk.as_any().downcast_ref::<PrimitiveArray<T>>().unwrap())
}
arrays
}
pub fn col_to_string_arrays(column: &Column) -> Vec<&StringArray> {
let mut arrays = vec![];
for chunk in column.data().chunks() {
arrays.push(chunk.as_any().downcast_ref::<StringArray>().unwrap())
}
arrays
}
/// A column data structure consisting of a `Field` and `ChunkedArray`
#[derive(Clone)]
pub struct Column {
pub(crate) data: ChunkedArray,
field: arrow::datatypes::Field,
}
impl Column {
pub fn from_chunked_array(chunk: ChunkedArray, field: arrow::datatypes::Field) -> Self {
Column { data: chunk, field }
}
pub fn from_arrays(arrays: Vec<Arc<dyn Array>>, field: arrow::datatypes::Field) -> Self {
assert!(!arrays.is_empty());
for array in &arrays {
assert!(array.data_type() == field.data_type());
}
Column {
data: ChunkedArray::from_arrays(arrays),
field,
}
}
/// Merge the chunk arrays into a single array
///
/// Returns an error if concatenating the array type is not supported,
/// or the dataframe is empty
pub fn to_array(&self) -> Result<ArrayRef> {
Ok(arrow::compute::concat(self.data().chunks())?)
}
pub fn name(&self) -> &str {
self.field.name()
}
pub fn data_type(&self) -> &DataType {
self.field.data_type()
}
pub fn data(&self) -> &ChunkedArray {
&self.data
}
pub(crate) fn field(&self) -> &Field {
&self.field
}
pub fn slice(&self, offset: usize, length: Option<usize>) -> Self {
Self::from_chunked_array(self.data().slice(offset, length), self.field().clone())
}
pub fn null_count(&self) -> usize {
self.data().null_count()
}
pub fn num_rows(&self) -> usize {
self.data().num_rows()
}
/// Filter this column using a Boolean column as the mask
pub fn filter(&self, condition: &Self) -> Self {
Self::from_chunked_array(self.data.filter(condition.data()), self.field.clone())
}
/// Create a new column by taking values at indices, while repartitioning to the chunk size
pub fn take(&self, indices: &UInt32Array, chunk_size: usize) -> Result<Self> {
let mut consumed_len = 0;
let total_len = indices.len();
let values = self.to_array()?;
let mut outputs = vec![];
while consumed_len < total_len {
let bounded_len = if total_len < chunk_size {
total_len
} else if consumed_len + chunk_size > total_len {
chunk_size
} else {
total_len - consumed_len
};
let slice = indices.slice(consumed_len, bounded_len);
let slice = slice.as_any().downcast_ref::<UInt32Array>().unwrap();
let taken = arrow::compute::take(&values, slice, None)?;
outputs.push(taken);
consumed_len += bounded_len;
}
Ok(Self {
data: ChunkedArray::from_arrays(outputs),
field: self.field.clone(),
})
}
fn flatten() {}
}
/// Alogical table as a sequence of chunked arrays
pub struct Table {
schema: Arc<Schema>,
pub(crate) columns: Vec<Column>,
}
impl Table {
pub fn new(schema: Arc<Schema>, columns: Vec<Column>) -> Self {
// assert that there are some columns
assert!(
!columns.is_empty(),
"at least one column must be defined to create a record batch"
);
// assert that all columns have the same row count
let len = columns[0].data().num_rows();
for column in &columns {
assert_eq!(
len,
column.data().num_rows(),
"all columns in a record batch must have the same length"
);
}
Table { schema, columns }
}
pub fn schema(&self) -> &Arc<Schema> {
&self.schema
}
pub fn num_columns(&self) -> usize {
self.columns.len()
}
pub fn num_rows(&self) -> usize {
self.columns[0].data().num_rows()
}
// keep fn
pub fn column(&self, i: usize) -> &Column {
&self.columns[i]
}
pub fn columns(&self) -> &Vec<Column> {
&self.columns
}
// new fns
fn add_column() {}
// fn remove_column(&self, _i: usize) -> Self {
// Table {
// schema: self.schema.clone(),
// columns: self.columns
// }
// }
/// Replace a column in the table, producing a new `Table`
fn set_column() {}
fn replace_schema_metadata() {}
/// Each column with a struct type is flattened into one column per struct field.
/// Other columns are left unchanged.
fn flatten() {}
/// Construct a `Table` from a sequence of `Column`s and a schema
fn make(columns: Vec<Column>) -> Self {
let fields: Vec<Field> = columns.iter().map(|column| column.field.clone()).collect();
Self {
schema: Arc::new(Schema::new(fields)),
columns,
}
}
/// Construct a `Table` from a sequence of `Column`s and a schema
fn make_with_schema(schema: Arc<Schema>, columns: Vec<Column>) -> Self {
// TODO validate that schema and columns match
Self { schema, columns }
}
/// Slice the table from an offset
pub fn slice(&self, offset: usize, limit: usize) -> Self {
Self {
schema: self.schema.clone(),
columns: self
.columns
.clone()
.into_iter()
.map(|col| col.slice(offset, Some(limit)))
.collect(),
}
}
pub fn filter(&self, condition: &Column) -> Self {
Self {
schema: self.schema.clone(),
columns: self
.columns
.clone()
.into_iter()
.map(|col| col.filter(condition))
.collect(),
}
}
/// Construct a `Table` from a sequence of Arrow `RecordBatch`es.
///
/// Columns are first created from the `RecordBatch`es, with schema validations being performed.
/// A table is then created
pub fn from_record_batches(schema: Arc<Schema>, record_batches: Vec<RecordBatch>) -> Self {
if record_batches.is_empty() {
panic!("Error about record batches (copy from cpp)")
}
let num_columns = record_batches[0].num_columns();
// let mut arrays: Vec<Vec<&Arc<Array>>> = vec![vec![]; num_columns];
let mut arrays: Vec<Vec<Arc<dyn Array>>> = vec![vec![]; num_columns];
// create columns from record batches
for batch in &record_batches {
assert!(
batch.num_columns() == num_columns,
"Each record batch should have the same length as the first batch"
);
batch.columns().iter().enumerate().for_each(|(i, array)| {
arrays[i].push(array.to_owned());
});
}
let columns = arrays
.iter()
.enumerate()
.map(|(i, array)| Column::from_arrays(array.to_owned(), schema.field(i).clone()))
.collect();
Table { schema, columns }
}
fn concatenate_tables() {}
fn to_record_batches() {}
}
unsafe impl Send for Table {}
unsafe impl Sync for Table {}
#[cfg(test)]
mod tests {} | pub struct ChunkedArray { | random_line_split |
table.rs | use std::sync::Arc;
use arrow::array::*;
use arrow::datatypes::*;
use arrow::record_batch::RecordBatch;
use crate::error::*;
#[derive(Clone)]
pub struct ChunkedArray {
chunks: Vec<Arc<dyn Array>>,
num_rows: usize,
null_count: usize,
}
impl ChunkedArray {
/// Construct a `ChunkedArray` from a list of `Array`s.
///
/// There must be at least 1 array, and all arrays must have the same data type.
fn from_arrays(arrays: Vec<Arc<dyn Array>>) -> Self {
assert!(!arrays.is_empty());
let mut num_rows = 0;
let mut null_count = 0;
// check that arrays have the same type
let data_type = &arrays[0].data_type();
arrays.iter().for_each(|array| {
assert!(&array.data_type() == data_type);
num_rows += array.len();
null_count += array.null_count();
});
ChunkedArray {
chunks: arrays,
num_rows,
null_count,
}
}
/// Return the length of the arrays in the chunk. This value is pre-computed.
pub fn num_rows(&self) -> usize {
self.num_rows
}
pub fn null_count(&self) -> usize {
self.null_count
}
pub fn num_chunks(&self) -> usize {
self.chunks.len()
}
/// Get the count per chunk
///
/// This is useful for repartitioning
pub(crate) fn chunk_counts(&self) -> Vec<usize> {
self.chunks().iter().map(|chunk| chunk.len()).collect()
}
/// Get a chunk from the chunked array by index
/// TODO: should this have bounds-chacking?
pub fn | (&self, i: usize) -> &Arc<dyn Array> {
&self.chunks[i]
}
pub fn chunks(&self) -> &Vec<Arc<dyn Array>> {
&self.chunks
}
/// Construct a zero-copy slice of the chunked array with the indicated offset and length.
///
/// The `offset` is the position of the first element in the constructed slice.
/// `length` is the length of the slice. If there are not enough elements in the chunked array,
/// the length will be adjusted accordingly.
fn slice(&self, offset: usize, length: Option<usize>) -> Self {
let mut offset = offset;
let mut length = length.unwrap_or(std::usize::MAX);
length = std::cmp::min(length, self.num_rows());
let mut current_chunk: usize = 0;
let mut new_chunks: Vec<ArrayRef> = vec![];
// compute the first offset. If offset > whole chunks' lengths, skip those chunks
while current_chunk < self.num_chunks() && offset >= self.chunk(current_chunk).len() {
offset -= self.chunk(current_chunk).len();
current_chunk += 1;
}
while current_chunk < self.num_chunks() && length > 0 {
new_chunks.push(self.chunk(current_chunk).slice(offset, length));
length -= std::cmp::min(length, self.chunk(current_chunk).len() - offset);
offset = 0;
current_chunk += 1;
}
Self::from_arrays(new_chunks)
}
fn filter(&self, condition: &Self) -> Self {
let filtered: arrow::error::Result<Vec<ArrayRef>> = self
.chunks()
.iter()
.zip(condition.chunks())
.map(|(a, b): (&ArrayRef, &ArrayRef)| {
arrow::compute::filter(a.as_ref(), &BooleanArray::from(b.data()))
})
.collect();
Self::from_arrays(filtered.unwrap())
}
fn flatten(&self) {
unimplemented!("This is for flattening struct columns, we aren't yet there")
}
}
pub fn col_to_prim_arrays<T>(column: &Column) -> Vec<&PrimitiveArray<T>>
where
T: ArrowPrimitiveType,
{
let mut arrays: Vec<&PrimitiveArray<T>> = vec![];
for chunk in column.data().chunks() {
arrays.push(chunk.as_any().downcast_ref::<PrimitiveArray<T>>().unwrap())
}
arrays
}
pub fn col_to_string_arrays(column: &Column) -> Vec<&StringArray> {
let mut arrays = vec![];
for chunk in column.data().chunks() {
arrays.push(chunk.as_any().downcast_ref::<StringArray>().unwrap())
}
arrays
}
/// A column data structure consisting of a `Field` and `ChunkedArray`
#[derive(Clone)]
pub struct Column {
pub(crate) data: ChunkedArray,
field: arrow::datatypes::Field,
}
impl Column {
pub fn from_chunked_array(chunk: ChunkedArray, field: arrow::datatypes::Field) -> Self {
Column { data: chunk, field }
}
pub fn from_arrays(arrays: Vec<Arc<dyn Array>>, field: arrow::datatypes::Field) -> Self {
assert!(!arrays.is_empty());
for array in &arrays {
assert!(array.data_type() == field.data_type());
}
Column {
data: ChunkedArray::from_arrays(arrays),
field,
}
}
/// Merge the chunk arrays into a single array
///
/// Returns an error if concatenating the array type is not supported,
/// or the dataframe is empty
pub fn to_array(&self) -> Result<ArrayRef> {
Ok(arrow::compute::concat(self.data().chunks())?)
}
pub fn name(&self) -> &str {
self.field.name()
}
pub fn data_type(&self) -> &DataType {
self.field.data_type()
}
pub fn data(&self) -> &ChunkedArray {
&self.data
}
pub(crate) fn field(&self) -> &Field {
&self.field
}
pub fn slice(&self, offset: usize, length: Option<usize>) -> Self {
Self::from_chunked_array(self.data().slice(offset, length), self.field().clone())
}
pub fn null_count(&self) -> usize {
self.data().null_count()
}
pub fn num_rows(&self) -> usize {
self.data().num_rows()
}
/// Filter this column using a Boolean column as the mask
pub fn filter(&self, condition: &Self) -> Self {
Self::from_chunked_array(self.data.filter(condition.data()), self.field.clone())
}
/// Create a new column by taking values at indices, while repartitioning to the chunk size
pub fn take(&self, indices: &UInt32Array, chunk_size: usize) -> Result<Self> {
let mut consumed_len = 0;
let total_len = indices.len();
let values = self.to_array()?;
let mut outputs = vec![];
while consumed_len < total_len {
let bounded_len = if total_len < chunk_size {
total_len
} else if consumed_len + chunk_size > total_len {
chunk_size
} else {
total_len - consumed_len
};
let slice = indices.slice(consumed_len, bounded_len);
let slice = slice.as_any().downcast_ref::<UInt32Array>().unwrap();
let taken = arrow::compute::take(&values, slice, None)?;
outputs.push(taken);
consumed_len += bounded_len;
}
Ok(Self {
data: ChunkedArray::from_arrays(outputs),
field: self.field.clone(),
})
}
fn flatten() {}
}
/// Alogical table as a sequence of chunked arrays
pub struct Table {
schema: Arc<Schema>,
pub(crate) columns: Vec<Column>,
}
impl Table {
pub fn new(schema: Arc<Schema>, columns: Vec<Column>) -> Self {
// assert that there are some columns
assert!(
!columns.is_empty(),
"at least one column must be defined to create a record batch"
);
// assert that all columns have the same row count
let len = columns[0].data().num_rows();
for column in &columns {
assert_eq!(
len,
column.data().num_rows(),
"all columns in a record batch must have the same length"
);
}
Table { schema, columns }
}
pub fn schema(&self) -> &Arc<Schema> {
&self.schema
}
pub fn num_columns(&self) -> usize {
self.columns.len()
}
pub fn num_rows(&self) -> usize {
self.columns[0].data().num_rows()
}
// keep fn
pub fn column(&self, i: usize) -> &Column {
&self.columns[i]
}
pub fn columns(&self) -> &Vec<Column> {
&self.columns
}
// new fns
fn add_column() {}
// fn remove_column(&self, _i: usize) -> Self {
// Table {
// schema: self.schema.clone(),
// columns: self.columns
// }
// }
/// Replace a column in the table, producing a new `Table`
fn set_column() {}
fn replace_schema_metadata() {}
/// Each column with a struct type is flattened into one column per struct field.
/// Other columns are left unchanged.
fn flatten() {}
/// Construct a `Table` from a sequence of `Column`s and a schema
fn make(columns: Vec<Column>) -> Self {
let fields: Vec<Field> = columns.iter().map(|column| column.field.clone()).collect();
Self {
schema: Arc::new(Schema::new(fields)),
columns,
}
}
/// Construct a `Table` from a sequence of `Column`s and a schema
fn make_with_schema(schema: Arc<Schema>, columns: Vec<Column>) -> Self {
// TODO validate that schema and columns match
Self { schema, columns }
}
/// Slice the table from an offset
pub fn slice(&self, offset: usize, limit: usize) -> Self {
Self {
schema: self.schema.clone(),
columns: self
.columns
.clone()
.into_iter()
.map(|col| col.slice(offset, Some(limit)))
.collect(),
}
}
pub fn filter(&self, condition: &Column) -> Self {
Self {
schema: self.schema.clone(),
columns: self
.columns
.clone()
.into_iter()
.map(|col| col.filter(condition))
.collect(),
}
}
/// Construct a `Table` from a sequence of Arrow `RecordBatch`es.
///
/// Columns are first created from the `RecordBatch`es, with schema validations being performed.
/// A table is then created
pub fn from_record_batches(schema: Arc<Schema>, record_batches: Vec<RecordBatch>) -> Self {
if record_batches.is_empty() {
panic!("Error about record batches (copy from cpp)")
}
let num_columns = record_batches[0].num_columns();
// let mut arrays: Vec<Vec<&Arc<Array>>> = vec![vec![]; num_columns];
let mut arrays: Vec<Vec<Arc<dyn Array>>> = vec![vec![]; num_columns];
// create columns from record batches
for batch in &record_batches {
assert!(
batch.num_columns() == num_columns,
"Each record batch should have the same length as the first batch"
);
batch.columns().iter().enumerate().for_each(|(i, array)| {
arrays[i].push(array.to_owned());
});
}
let columns = arrays
.iter()
.enumerate()
.map(|(i, array)| Column::from_arrays(array.to_owned(), schema.field(i).clone()))
.collect();
Table { schema, columns }
}
fn concatenate_tables() {}
fn to_record_batches() {}
}
unsafe impl Send for Table {}
unsafe impl Sync for Table {}
#[cfg(test)]
mod tests {}
| chunk | identifier_name |
table.rs | use std::sync::Arc;
use arrow::array::*;
use arrow::datatypes::*;
use arrow::record_batch::RecordBatch;
use crate::error::*;
#[derive(Clone)]
pub struct ChunkedArray {
chunks: Vec<Arc<dyn Array>>,
num_rows: usize,
null_count: usize,
}
impl ChunkedArray {
/// Construct a `ChunkedArray` from a list of `Array`s.
///
/// There must be at least 1 array, and all arrays must have the same data type.
fn from_arrays(arrays: Vec<Arc<dyn Array>>) -> Self {
assert!(!arrays.is_empty());
let mut num_rows = 0;
let mut null_count = 0;
// check that arrays have the same type
let data_type = &arrays[0].data_type();
arrays.iter().for_each(|array| {
assert!(&array.data_type() == data_type);
num_rows += array.len();
null_count += array.null_count();
});
ChunkedArray {
chunks: arrays,
num_rows,
null_count,
}
}
/// Return the length of the arrays in the chunk. This value is pre-computed.
pub fn num_rows(&self) -> usize {
self.num_rows
}
pub fn null_count(&self) -> usize {
self.null_count
}
pub fn num_chunks(&self) -> usize {
self.chunks.len()
}
/// Get the count per chunk
///
/// This is useful for repartitioning
pub(crate) fn chunk_counts(&self) -> Vec<usize> {
self.chunks().iter().map(|chunk| chunk.len()).collect()
}
/// Get a chunk from the chunked array by index
/// TODO: should this have bounds-chacking?
pub fn chunk(&self, i: usize) -> &Arc<dyn Array> {
&self.chunks[i]
}
pub fn chunks(&self) -> &Vec<Arc<dyn Array>> {
&self.chunks
}
/// Construct a zero-copy slice of the chunked array with the indicated offset and length.
///
/// The `offset` is the position of the first element in the constructed slice.
/// `length` is the length of the slice. If there are not enough elements in the chunked array,
/// the length will be adjusted accordingly.
fn slice(&self, offset: usize, length: Option<usize>) -> Self {
let mut offset = offset;
let mut length = length.unwrap_or(std::usize::MAX);
length = std::cmp::min(length, self.num_rows());
let mut current_chunk: usize = 0;
let mut new_chunks: Vec<ArrayRef> = vec![];
// compute the first offset. If offset > whole chunks' lengths, skip those chunks
while current_chunk < self.num_chunks() && offset >= self.chunk(current_chunk).len() {
offset -= self.chunk(current_chunk).len();
current_chunk += 1;
}
while current_chunk < self.num_chunks() && length > 0 {
new_chunks.push(self.chunk(current_chunk).slice(offset, length));
length -= std::cmp::min(length, self.chunk(current_chunk).len() - offset);
offset = 0;
current_chunk += 1;
}
Self::from_arrays(new_chunks)
}
fn filter(&self, condition: &Self) -> Self {
let filtered: arrow::error::Result<Vec<ArrayRef>> = self
.chunks()
.iter()
.zip(condition.chunks())
.map(|(a, b): (&ArrayRef, &ArrayRef)| {
arrow::compute::filter(a.as_ref(), &BooleanArray::from(b.data()))
})
.collect();
Self::from_arrays(filtered.unwrap())
}
fn flatten(&self) {
unimplemented!("This is for flattening struct columns, we aren't yet there")
}
}
pub fn col_to_prim_arrays<T>(column: &Column) -> Vec<&PrimitiveArray<T>>
where
T: ArrowPrimitiveType,
{
let mut arrays: Vec<&PrimitiveArray<T>> = vec![];
for chunk in column.data().chunks() {
arrays.push(chunk.as_any().downcast_ref::<PrimitiveArray<T>>().unwrap())
}
arrays
}
pub fn col_to_string_arrays(column: &Column) -> Vec<&StringArray> {
let mut arrays = vec![];
for chunk in column.data().chunks() {
arrays.push(chunk.as_any().downcast_ref::<StringArray>().unwrap())
}
arrays
}
/// A column data structure consisting of a `Field` and `ChunkedArray`
#[derive(Clone)]
pub struct Column {
pub(crate) data: ChunkedArray,
field: arrow::datatypes::Field,
}
impl Column {
pub fn from_chunked_array(chunk: ChunkedArray, field: arrow::datatypes::Field) -> Self {
Column { data: chunk, field }
}
pub fn from_arrays(arrays: Vec<Arc<dyn Array>>, field: arrow::datatypes::Field) -> Self {
assert!(!arrays.is_empty());
for array in &arrays {
assert!(array.data_type() == field.data_type());
}
Column {
data: ChunkedArray::from_arrays(arrays),
field,
}
}
/// Merge the chunk arrays into a single array
///
/// Returns an error if concatenating the array type is not supported,
/// or the dataframe is empty
pub fn to_array(&self) -> Result<ArrayRef> {
Ok(arrow::compute::concat(self.data().chunks())?)
}
pub fn name(&self) -> &str {
self.field.name()
}
pub fn data_type(&self) -> &DataType {
self.field.data_type()
}
pub fn data(&self) -> &ChunkedArray {
&self.data
}
pub(crate) fn field(&self) -> &Field {
&self.field
}
pub fn slice(&self, offset: usize, length: Option<usize>) -> Self {
Self::from_chunked_array(self.data().slice(offset, length), self.field().clone())
}
pub fn null_count(&self) -> usize {
self.data().null_count()
}
pub fn num_rows(&self) -> usize {
self.data().num_rows()
}
/// Filter this column using a Boolean column as the mask
pub fn filter(&self, condition: &Self) -> Self {
Self::from_chunked_array(self.data.filter(condition.data()), self.field.clone())
}
/// Create a new column by taking values at indices, while repartitioning to the chunk size
pub fn take(&self, indices: &UInt32Array, chunk_size: usize) -> Result<Self> {
let mut consumed_len = 0;
let total_len = indices.len();
let values = self.to_array()?;
let mut outputs = vec![];
while consumed_len < total_len {
let bounded_len = if total_len < chunk_size | else if consumed_len + chunk_size > total_len {
chunk_size
} else {
total_len - consumed_len
};
let slice = indices.slice(consumed_len, bounded_len);
let slice = slice.as_any().downcast_ref::<UInt32Array>().unwrap();
let taken = arrow::compute::take(&values, slice, None)?;
outputs.push(taken);
consumed_len += bounded_len;
}
Ok(Self {
data: ChunkedArray::from_arrays(outputs),
field: self.field.clone(),
})
}
fn flatten() {}
}
/// Alogical table as a sequence of chunked arrays
pub struct Table {
schema: Arc<Schema>,
pub(crate) columns: Vec<Column>,
}
impl Table {
pub fn new(schema: Arc<Schema>, columns: Vec<Column>) -> Self {
// assert that there are some columns
assert!(
!columns.is_empty(),
"at least one column must be defined to create a record batch"
);
// assert that all columns have the same row count
let len = columns[0].data().num_rows();
for column in &columns {
assert_eq!(
len,
column.data().num_rows(),
"all columns in a record batch must have the same length"
);
}
Table { schema, columns }
}
pub fn schema(&self) -> &Arc<Schema> {
&self.schema
}
pub fn num_columns(&self) -> usize {
self.columns.len()
}
pub fn num_rows(&self) -> usize {
self.columns[0].data().num_rows()
}
// keep fn
pub fn column(&self, i: usize) -> &Column {
&self.columns[i]
}
pub fn columns(&self) -> &Vec<Column> {
&self.columns
}
// new fns
fn add_column() {}
// fn remove_column(&self, _i: usize) -> Self {
// Table {
// schema: self.schema.clone(),
// columns: self.columns
// }
// }
/// Replace a column in the table, producing a new `Table`
fn set_column() {}
fn replace_schema_metadata() {}
/// Each column with a struct type is flattened into one column per struct field.
/// Other columns are left unchanged.
fn flatten() {}
/// Construct a `Table` from a sequence of `Column`s and a schema
fn make(columns: Vec<Column>) -> Self {
let fields: Vec<Field> = columns.iter().map(|column| column.field.clone()).collect();
Self {
schema: Arc::new(Schema::new(fields)),
columns,
}
}
/// Construct a `Table` from a sequence of `Column`s and a schema
fn make_with_schema(schema: Arc<Schema>, columns: Vec<Column>) -> Self {
// TODO validate that schema and columns match
Self { schema, columns }
}
/// Slice the table from an offset
pub fn slice(&self, offset: usize, limit: usize) -> Self {
Self {
schema: self.schema.clone(),
columns: self
.columns
.clone()
.into_iter()
.map(|col| col.slice(offset, Some(limit)))
.collect(),
}
}
pub fn filter(&self, condition: &Column) -> Self {
Self {
schema: self.schema.clone(),
columns: self
.columns
.clone()
.into_iter()
.map(|col| col.filter(condition))
.collect(),
}
}
/// Construct a `Table` from a sequence of Arrow `RecordBatch`es.
///
/// Columns are first created from the `RecordBatch`es, with schema validations being performed.
/// A table is then created
pub fn from_record_batches(schema: Arc<Schema>, record_batches: Vec<RecordBatch>) -> Self {
if record_batches.is_empty() {
panic!("Error about record batches (copy from cpp)")
}
let num_columns = record_batches[0].num_columns();
// let mut arrays: Vec<Vec<&Arc<Array>>> = vec![vec![]; num_columns];
let mut arrays: Vec<Vec<Arc<dyn Array>>> = vec![vec![]; num_columns];
// create columns from record batches
for batch in &record_batches {
assert!(
batch.num_columns() == num_columns,
"Each record batch should have the same length as the first batch"
);
batch.columns().iter().enumerate().for_each(|(i, array)| {
arrays[i].push(array.to_owned());
});
}
let columns = arrays
.iter()
.enumerate()
.map(|(i, array)| Column::from_arrays(array.to_owned(), schema.field(i).clone()))
.collect();
Table { schema, columns }
}
fn concatenate_tables() {}
fn to_record_batches() {}
}
unsafe impl Send for Table {}
unsafe impl Sync for Table {}
#[cfg(test)]
mod tests {}
| {
total_len
} | conditional_block |
table.rs | use std::sync::Arc;
use arrow::array::*;
use arrow::datatypes::*;
use arrow::record_batch::RecordBatch;
use crate::error::*;
#[derive(Clone)]
pub struct ChunkedArray {
chunks: Vec<Arc<dyn Array>>,
num_rows: usize,
null_count: usize,
}
impl ChunkedArray {
/// Construct a `ChunkedArray` from a list of `Array`s.
///
/// There must be at least 1 array, and all arrays must have the same data type.
fn from_arrays(arrays: Vec<Arc<dyn Array>>) -> Self {
assert!(!arrays.is_empty());
let mut num_rows = 0;
let mut null_count = 0;
// check that arrays have the same type
let data_type = &arrays[0].data_type();
arrays.iter().for_each(|array| {
assert!(&array.data_type() == data_type);
num_rows += array.len();
null_count += array.null_count();
});
ChunkedArray {
chunks: arrays,
num_rows,
null_count,
}
}
/// Return the length of the arrays in the chunk. This value is pre-computed.
pub fn num_rows(&self) -> usize {
self.num_rows
}
pub fn null_count(&self) -> usize {
self.null_count
}
pub fn num_chunks(&self) -> usize {
self.chunks.len()
}
/// Get the count per chunk
///
/// This is useful for repartitioning
pub(crate) fn chunk_counts(&self) -> Vec<usize> {
self.chunks().iter().map(|chunk| chunk.len()).collect()
}
/// Get a chunk from the chunked array by index
/// TODO: should this have bounds-chacking?
pub fn chunk(&self, i: usize) -> &Arc<dyn Array> {
&self.chunks[i]
}
pub fn chunks(&self) -> &Vec<Arc<dyn Array>> {
&self.chunks
}
/// Construct a zero-copy slice of the chunked array with the indicated offset and length.
///
/// The `offset` is the position of the first element in the constructed slice.
/// `length` is the length of the slice. If there are not enough elements in the chunked array,
/// the length will be adjusted accordingly.
fn slice(&self, offset: usize, length: Option<usize>) -> Self {
let mut offset = offset;
let mut length = length.unwrap_or(std::usize::MAX);
length = std::cmp::min(length, self.num_rows());
let mut current_chunk: usize = 0;
let mut new_chunks: Vec<ArrayRef> = vec![];
// compute the first offset. If offset > whole chunks' lengths, skip those chunks
while current_chunk < self.num_chunks() && offset >= self.chunk(current_chunk).len() {
offset -= self.chunk(current_chunk).len();
current_chunk += 1;
}
while current_chunk < self.num_chunks() && length > 0 {
new_chunks.push(self.chunk(current_chunk).slice(offset, length));
length -= std::cmp::min(length, self.chunk(current_chunk).len() - offset);
offset = 0;
current_chunk += 1;
}
Self::from_arrays(new_chunks)
}
fn filter(&self, condition: &Self) -> Self {
let filtered: arrow::error::Result<Vec<ArrayRef>> = self
.chunks()
.iter()
.zip(condition.chunks())
.map(|(a, b): (&ArrayRef, &ArrayRef)| {
arrow::compute::filter(a.as_ref(), &BooleanArray::from(b.data()))
})
.collect();
Self::from_arrays(filtered.unwrap())
}
fn flatten(&self) {
unimplemented!("This is for flattening struct columns, we aren't yet there")
}
}
pub fn col_to_prim_arrays<T>(column: &Column) -> Vec<&PrimitiveArray<T>>
where
T: ArrowPrimitiveType,
{
let mut arrays: Vec<&PrimitiveArray<T>> = vec![];
for chunk in column.data().chunks() {
arrays.push(chunk.as_any().downcast_ref::<PrimitiveArray<T>>().unwrap())
}
arrays
}
pub fn col_to_string_arrays(column: &Column) -> Vec<&StringArray> {
let mut arrays = vec![];
for chunk in column.data().chunks() {
arrays.push(chunk.as_any().downcast_ref::<StringArray>().unwrap())
}
arrays
}
/// A column data structure consisting of a `Field` and `ChunkedArray`
#[derive(Clone)]
pub struct Column {
pub(crate) data: ChunkedArray,
field: arrow::datatypes::Field,
}
impl Column {
pub fn from_chunked_array(chunk: ChunkedArray, field: arrow::datatypes::Field) -> Self {
Column { data: chunk, field }
}
pub fn from_arrays(arrays: Vec<Arc<dyn Array>>, field: arrow::datatypes::Field) -> Self {
assert!(!arrays.is_empty());
for array in &arrays {
assert!(array.data_type() == field.data_type());
}
Column {
data: ChunkedArray::from_arrays(arrays),
field,
}
}
/// Merge the chunk arrays into a single array
///
/// Returns an error if concatenating the array type is not supported,
/// or the dataframe is empty
pub fn to_array(&self) -> Result<ArrayRef> {
Ok(arrow::compute::concat(self.data().chunks())?)
}
pub fn name(&self) -> &str {
self.field.name()
}
pub fn data_type(&self) -> &DataType {
self.field.data_type()
}
pub fn data(&self) -> &ChunkedArray {
&self.data
}
pub(crate) fn field(&self) -> &Field {
&self.field
}
pub fn slice(&self, offset: usize, length: Option<usize>) -> Self {
Self::from_chunked_array(self.data().slice(offset, length), self.field().clone())
}
pub fn null_count(&self) -> usize {
self.data().null_count()
}
pub fn num_rows(&self) -> usize {
self.data().num_rows()
}
/// Filter this column using a Boolean column as the mask
pub fn filter(&self, condition: &Self) -> Self {
Self::from_chunked_array(self.data.filter(condition.data()), self.field.clone())
}
/// Create a new column by taking values at indices, while repartitioning to the chunk size
pub fn take(&self, indices: &UInt32Array, chunk_size: usize) -> Result<Self> {
let mut consumed_len = 0;
let total_len = indices.len();
let values = self.to_array()?;
let mut outputs = vec![];
while consumed_len < total_len {
let bounded_len = if total_len < chunk_size {
total_len
} else if consumed_len + chunk_size > total_len {
chunk_size
} else {
total_len - consumed_len
};
let slice = indices.slice(consumed_len, bounded_len);
let slice = slice.as_any().downcast_ref::<UInt32Array>().unwrap();
let taken = arrow::compute::take(&values, slice, None)?;
outputs.push(taken);
consumed_len += bounded_len;
}
Ok(Self {
data: ChunkedArray::from_arrays(outputs),
field: self.field.clone(),
})
}
fn flatten() {}
}
/// Alogical table as a sequence of chunked arrays
pub struct Table {
schema: Arc<Schema>,
pub(crate) columns: Vec<Column>,
}
impl Table {
pub fn new(schema: Arc<Schema>, columns: Vec<Column>) -> Self {
// assert that there are some columns
assert!(
!columns.is_empty(),
"at least one column must be defined to create a record batch"
);
// assert that all columns have the same row count
let len = columns[0].data().num_rows();
for column in &columns {
assert_eq!(
len,
column.data().num_rows(),
"all columns in a record batch must have the same length"
);
}
Table { schema, columns }
}
pub fn schema(&self) -> &Arc<Schema> {
&self.schema
}
pub fn num_columns(&self) -> usize {
self.columns.len()
}
pub fn num_rows(&self) -> usize {
self.columns[0].data().num_rows()
}
// keep fn
pub fn column(&self, i: usize) -> &Column {
&self.columns[i]
}
pub fn columns(&self) -> &Vec<Column> {
&self.columns
}
// new fns
fn add_column() {}
// fn remove_column(&self, _i: usize) -> Self {
// Table {
// schema: self.schema.clone(),
// columns: self.columns
// }
// }
/// Replace a column in the table, producing a new `Table`
fn set_column() {}
fn replace_schema_metadata() {}
/// Each column with a struct type is flattened into one column per struct field.
/// Other columns are left unchanged.
fn flatten() {}
/// Construct a `Table` from a sequence of `Column`s and a schema
fn make(columns: Vec<Column>) -> Self {
let fields: Vec<Field> = columns.iter().map(|column| column.field.clone()).collect();
Self {
schema: Arc::new(Schema::new(fields)),
columns,
}
}
/// Construct a `Table` from a sequence of `Column`s and a schema
fn make_with_schema(schema: Arc<Schema>, columns: Vec<Column>) -> Self |
/// Slice the table from an offset
pub fn slice(&self, offset: usize, limit: usize) -> Self {
Self {
schema: self.schema.clone(),
columns: self
.columns
.clone()
.into_iter()
.map(|col| col.slice(offset, Some(limit)))
.collect(),
}
}
pub fn filter(&self, condition: &Column) -> Self {
Self {
schema: self.schema.clone(),
columns: self
.columns
.clone()
.into_iter()
.map(|col| col.filter(condition))
.collect(),
}
}
/// Construct a `Table` from a sequence of Arrow `RecordBatch`es.
///
/// Columns are first created from the `RecordBatch`es, with schema validations being performed.
/// A table is then created
pub fn from_record_batches(schema: Arc<Schema>, record_batches: Vec<RecordBatch>) -> Self {
if record_batches.is_empty() {
panic!("Error about record batches (copy from cpp)")
}
let num_columns = record_batches[0].num_columns();
// let mut arrays: Vec<Vec<&Arc<Array>>> = vec![vec![]; num_columns];
let mut arrays: Vec<Vec<Arc<dyn Array>>> = vec![vec![]; num_columns];
// create columns from record batches
for batch in &record_batches {
assert!(
batch.num_columns() == num_columns,
"Each record batch should have the same length as the first batch"
);
batch.columns().iter().enumerate().for_each(|(i, array)| {
arrays[i].push(array.to_owned());
});
}
let columns = arrays
.iter()
.enumerate()
.map(|(i, array)| Column::from_arrays(array.to_owned(), schema.field(i).clone()))
.collect();
Table { schema, columns }
}
fn concatenate_tables() {}
fn to_record_batches() {}
}
unsafe impl Send for Table {}
unsafe impl Sync for Table {}
#[cfg(test)]
mod tests {}
| {
// TODO validate that schema and columns match
Self { schema, columns }
} | identifier_body |
client.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::Arc;
use log::debug;
use log::info;
use crate::backoff::DEFAULT_REGION_BACKOFF;
use crate::config::Config;
use crate::pd::PdClient;
use crate::pd::PdRpcClient;
use crate::proto::pdpb::Timestamp;
use crate::request::plan::CleanupLocksResult;
use crate::request::Plan;
use crate::timestamp::TimestampExt;
use crate::transaction::lock::ResolveLocksOptions;
use crate::transaction::lowering::new_scan_lock_request;
use crate::transaction::ResolveLocksContext;
use crate::transaction::Snapshot;
use crate::transaction::Transaction;
use crate::transaction::TransactionOptions;
use crate::Backoff;
use crate::BoundRange;
use crate::Result;
// FIXME: cargo-culted value
const SCAN_LOCK_BATCH_SIZE: u32 = 1024;
/// The TiKV transactional `Client` is used to interact with TiKV using transactional requests.
///
/// Transactions support optimistic and pessimistic modes. For more details see the SIG-transaction
/// [docs](https://github.com/tikv/sig-transaction/tree/master/doc/tikv#optimistic-and-pessimistic-transactions).
///
/// Begin a [`Transaction`] by calling [`begin_optimistic`](Client::begin_optimistic) or
/// [`begin_pessimistic`](Client::begin_pessimistic). A transaction must be rolled back or committed.
///
/// Besides transactions, the client provides some further functionality:
/// - `gc`: trigger a GC process which clears stale data in the cluster.
/// - `current_timestamp`: get the current `Timestamp` from PD.
/// - `snapshot`: get a [`Snapshot`] of the database at a specified timestamp.
/// A `Snapshot` is a read-only transaction.
///
/// The returned results of transactional requests are [`Future`](std::future::Future)s that must be
/// awaited to execute.
pub struct Client {
pd: Arc<PdRpcClient>,
}
impl Clone for Client {
fn clone(&self) -> Self {
Self {
pd: self.pd.clone(),
}
}
}
impl Client {
/// Create a transactional [`Client`] and connect to the TiKV cluster.
///
/// Because TiKV is managed by a [PD](https://github.com/pingcap/pd/) cluster, the endpoints for
/// PD must be provided, not the TiKV nodes. It's important to include more than one PD endpoint
/// (include all endpoints, if possible), this helps avoid having a single point of failure.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// # });
/// ```
pub async fn new<S: Into<String>>(pd_endpoints: Vec<S>) -> Result<Client> {
// debug!("creating transactional client");
Self::new_with_config(pd_endpoints, Config::default()).await
}
/// Create a transactional [`Client`] with a custom configuration, and connect to the TiKV cluster.
///
/// Because TiKV is managed by a [PD](https://github.com/pingcap/pd/) cluster, the endpoints for
/// PD must be provided, not the TiKV nodes. It's important to include more than one PD endpoint
/// (include all endpoints, if possible), this helps avoid having a single point of failure.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # use std::time::Duration;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new_with_config(
/// vec!["192.168.0.100"],
/// Config::default().with_timeout(Duration::from_secs(60)),
/// )
///.await
///.unwrap();
/// # });
/// ```
pub async fn new_with_config<S: Into<String>>(
pd_endpoints: Vec<S>,
config: Config,
) -> Result<Client> {
debug!("creating new transactional client");
let pd_endpoints: Vec<String> = pd_endpoints.into_iter().map(Into::into).collect();
let pd = Arc::new(PdRpcClient::connect(&pd_endpoints, config, true).await?);
Ok(Client { pd })
}
/// Creates a new optimistic [`Transaction`].
///
/// Use the transaction to issue requests like [`get`](Transaction::get) or
/// [`put`](Transaction::put).
///
/// Write operations do not lock data in TiKV, thus the commit request may fail due to a write
/// conflict.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let mut transaction = client.begin_optimistic().await.unwrap();
/// //... Issue some commands.
/// transaction.commit().await.unwrap();
/// # });
/// ```
pub async fn begin_optimistic(&self) -> Result<Transaction> {
debug!("creating new optimistic transaction");
let timestamp = self.current_timestamp().await?;
Ok(self.new_transaction(timestamp, TransactionOptions::new_optimistic()))
}
/// Creates a new pessimistic [`Transaction`].
///
/// Write operations will lock the data until committed, thus commit requests should not suffer
/// from write conflicts.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let mut transaction = client.begin_pessimistic().await.unwrap();
/// //... Issue some commands.
/// transaction.commit().await.unwrap();
/// # });
/// ```
pub async fn begin_pessimistic(&self) -> Result<Transaction> {
debug!("creating new pessimistic transaction");
let timestamp = self.current_timestamp().await?;
Ok(self.new_transaction(timestamp, TransactionOptions::new_pessimistic()))
}
/// Create a new customized [`Transaction`].
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient, TransactionOptions};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let mut transaction = client
/// .begin_with_options(TransactionOptions::default().use_async_commit())
/// .await
/// .unwrap();
/// //... Issue some commands.
/// transaction.commit().await.unwrap();
/// # });
/// ```
pub async fn begin_with_options(&self, options: TransactionOptions) -> Result<Transaction> {
debug!("creating new customized transaction");
let timestamp = self.current_timestamp().await?;
Ok(self.new_transaction(timestamp, options))
}
/// Create a new [`Snapshot`](Snapshot) at the given [`Timestamp`](Timestamp).
pub fn snapshot(&self, timestamp: Timestamp, options: TransactionOptions) -> Snapshot {
debug!("creating new snapshot"); | /// Retrieve the current [`Timestamp`].
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let timestamp = client.current_timestamp().await.unwrap();
/// # });
/// ```
pub async fn current_timestamp(&self) -> Result<Timestamp> {
self.pd.clone().get_timestamp().await
}
/// Request garbage collection (GC) of the TiKV cluster.
///
/// GC deletes MVCC records whose timestamp is lower than the given `safepoint`. We must guarantee
/// that all transactions started before this timestamp had committed. We can keep an active
/// transaction list in application to decide which is the minimal start timestamp of them.
///
/// For each key, the last mutation record (unless it's a deletion) before `safepoint` is retained.
///
/// GC is performed by:
/// 1. resolving all locks with timestamp <= `safepoint`
/// 2. updating PD's known safepoint
///
/// This is a simplified version of [GC in TiDB](https://docs.pingcap.com/tidb/stable/garbage-collection-overview).
/// We skip the second step "delete ranges" which is an optimization for TiDB.
pub async fn gc(&self, safepoint: Timestamp) -> Result<bool> {
debug!("invoking transactional gc request");
let options = ResolveLocksOptions {
batch_size: SCAN_LOCK_BATCH_SIZE,
..Default::default()
};
self.cleanup_locks(.., &safepoint, options).await?;
// update safepoint to PD
let res: bool = self
.pd
.clone()
.update_safepoint(safepoint.version())
.await?;
if!res {
info!("new safepoint!= user-specified safepoint");
}
Ok(res)
}
pub async fn cleanup_locks(
&self,
range: impl Into<BoundRange>,
safepoint: &Timestamp,
options: ResolveLocksOptions,
) -> Result<CleanupLocksResult> {
debug!("invoking cleanup async commit locks");
// scan all locks with ts <= safepoint
let ctx = ResolveLocksContext::default();
let backoff = Backoff::equal_jitter_backoff(100, 10000, 50);
let req = new_scan_lock_request(range.into(), safepoint, options.batch_size);
let plan = crate::request::PlanBuilder::new(self.pd.clone(), req)
.cleanup_locks(ctx.clone(), options, backoff)
.retry_multi_region(DEFAULT_REGION_BACKOFF)
.extract_error()
.merge(crate::request::Collect)
.plan();
plan.execute().await
}
// For test.
// Note: `batch_size` must be >= expected number of locks.
#[cfg(feature = "integration-tests")]
pub async fn scan_locks(
&self,
safepoint: &Timestamp,
range: impl Into<BoundRange>,
batch_size: u32,
) -> Result<Vec<crate::proto::kvrpcpb::LockInfo>> {
let req = new_scan_lock_request(range.into(), safepoint, batch_size);
let plan = crate::request::PlanBuilder::new(self.pd.clone(), req)
.retry_multi_region(DEFAULT_REGION_BACKOFF)
.merge(crate::request::Collect)
.plan();
plan.execute().await
}
fn new_transaction(&self, timestamp: Timestamp, options: TransactionOptions) -> Transaction {
Transaction::new(timestamp, self.pd.clone(), options)
}
} | Snapshot::new(self.new_transaction(timestamp, options.read_only()))
}
| random_line_split |
client.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::Arc;
use log::debug;
use log::info;
use crate::backoff::DEFAULT_REGION_BACKOFF;
use crate::config::Config;
use crate::pd::PdClient;
use crate::pd::PdRpcClient;
use crate::proto::pdpb::Timestamp;
use crate::request::plan::CleanupLocksResult;
use crate::request::Plan;
use crate::timestamp::TimestampExt;
use crate::transaction::lock::ResolveLocksOptions;
use crate::transaction::lowering::new_scan_lock_request;
use crate::transaction::ResolveLocksContext;
use crate::transaction::Snapshot;
use crate::transaction::Transaction;
use crate::transaction::TransactionOptions;
use crate::Backoff;
use crate::BoundRange;
use crate::Result;
// FIXME: cargo-culted value
const SCAN_LOCK_BATCH_SIZE: u32 = 1024;
/// The TiKV transactional `Client` is used to interact with TiKV using transactional requests.
///
/// Transactions support optimistic and pessimistic modes. For more details see the SIG-transaction
/// [docs](https://github.com/tikv/sig-transaction/tree/master/doc/tikv#optimistic-and-pessimistic-transactions).
///
/// Begin a [`Transaction`] by calling [`begin_optimistic`](Client::begin_optimistic) or
/// [`begin_pessimistic`](Client::begin_pessimistic). A transaction must be rolled back or committed.
///
/// Besides transactions, the client provides some further functionality:
/// - `gc`: trigger a GC process which clears stale data in the cluster.
/// - `current_timestamp`: get the current `Timestamp` from PD.
/// - `snapshot`: get a [`Snapshot`] of the database at a specified timestamp.
/// A `Snapshot` is a read-only transaction.
///
/// The returned results of transactional requests are [`Future`](std::future::Future)s that must be
/// awaited to execute.
pub struct Client {
pd: Arc<PdRpcClient>,
}
impl Clone for Client {
fn clone(&self) -> Self {
Self {
pd: self.pd.clone(),
}
}
}
impl Client {
/// Create a transactional [`Client`] and connect to the TiKV cluster.
///
/// Because TiKV is managed by a [PD](https://github.com/pingcap/pd/) cluster, the endpoints for
/// PD must be provided, not the TiKV nodes. It's important to include more than one PD endpoint
/// (include all endpoints, if possible), this helps avoid having a single point of failure.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// # });
/// ```
pub async fn new<S: Into<String>>(pd_endpoints: Vec<S>) -> Result<Client> {
// debug!("creating transactional client");
Self::new_with_config(pd_endpoints, Config::default()).await
}
/// Create a transactional [`Client`] with a custom configuration, and connect to the TiKV cluster.
///
/// Because TiKV is managed by a [PD](https://github.com/pingcap/pd/) cluster, the endpoints for
/// PD must be provided, not the TiKV nodes. It's important to include more than one PD endpoint
/// (include all endpoints, if possible), this helps avoid having a single point of failure.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # use std::time::Duration;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new_with_config(
/// vec!["192.168.0.100"],
/// Config::default().with_timeout(Duration::from_secs(60)),
/// )
///.await
///.unwrap();
/// # });
/// ```
pub async fn new_with_config<S: Into<String>>(
pd_endpoints: Vec<S>,
config: Config,
) -> Result<Client> {
debug!("creating new transactional client");
let pd_endpoints: Vec<String> = pd_endpoints.into_iter().map(Into::into).collect();
let pd = Arc::new(PdRpcClient::connect(&pd_endpoints, config, true).await?);
Ok(Client { pd })
}
/// Creates a new optimistic [`Transaction`].
///
/// Use the transaction to issue requests like [`get`](Transaction::get) or
/// [`put`](Transaction::put).
///
/// Write operations do not lock data in TiKV, thus the commit request may fail due to a write
/// conflict.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let mut transaction = client.begin_optimistic().await.unwrap();
/// //... Issue some commands.
/// transaction.commit().await.unwrap();
/// # });
/// ```
pub async fn begin_optimistic(&self) -> Result<Transaction> {
debug!("creating new optimistic transaction");
let timestamp = self.current_timestamp().await?;
Ok(self.new_transaction(timestamp, TransactionOptions::new_optimistic()))
}
/// Creates a new pessimistic [`Transaction`].
///
/// Write operations will lock the data until committed, thus commit requests should not suffer
/// from write conflicts.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let mut transaction = client.begin_pessimistic().await.unwrap();
/// //... Issue some commands.
/// transaction.commit().await.unwrap();
/// # });
/// ```
pub async fn begin_pessimistic(&self) -> Result<Transaction> {
debug!("creating new pessimistic transaction");
let timestamp = self.current_timestamp().await?;
Ok(self.new_transaction(timestamp, TransactionOptions::new_pessimistic()))
}
/// Create a new customized [`Transaction`].
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient, TransactionOptions};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let mut transaction = client
/// .begin_with_options(TransactionOptions::default().use_async_commit())
/// .await
/// .unwrap();
/// //... Issue some commands.
/// transaction.commit().await.unwrap();
/// # });
/// ```
pub async fn begin_with_options(&self, options: TransactionOptions) -> Result<Transaction> {
debug!("creating new customized transaction");
let timestamp = self.current_timestamp().await?;
Ok(self.new_transaction(timestamp, options))
}
/// Create a new [`Snapshot`](Snapshot) at the given [`Timestamp`](Timestamp).
pub fn snapshot(&self, timestamp: Timestamp, options: TransactionOptions) -> Snapshot {
debug!("creating new snapshot");
Snapshot::new(self.new_transaction(timestamp, options.read_only()))
}
/// Retrieve the current [`Timestamp`].
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let timestamp = client.current_timestamp().await.unwrap();
/// # });
/// ```
pub async fn current_timestamp(&self) -> Result<Timestamp> {
self.pd.clone().get_timestamp().await
}
/// Request garbage collection (GC) of the TiKV cluster.
///
/// GC deletes MVCC records whose timestamp is lower than the given `safepoint`. We must guarantee
/// that all transactions started before this timestamp had committed. We can keep an active
/// transaction list in application to decide which is the minimal start timestamp of them.
///
/// For each key, the last mutation record (unless it's a deletion) before `safepoint` is retained.
///
/// GC is performed by:
/// 1. resolving all locks with timestamp <= `safepoint`
/// 2. updating PD's known safepoint
///
/// This is a simplified version of [GC in TiDB](https://docs.pingcap.com/tidb/stable/garbage-collection-overview).
/// We skip the second step "delete ranges" which is an optimization for TiDB.
pub async fn gc(&self, safepoint: Timestamp) -> Result<bool> {
debug!("invoking transactional gc request");
let options = ResolveLocksOptions {
batch_size: SCAN_LOCK_BATCH_SIZE,
..Default::default()
};
self.cleanup_locks(.., &safepoint, options).await?;
// update safepoint to PD
let res: bool = self
.pd
.clone()
.update_safepoint(safepoint.version())
.await?;
if!res |
Ok(res)
}
pub async fn cleanup_locks(
&self,
range: impl Into<BoundRange>,
safepoint: &Timestamp,
options: ResolveLocksOptions,
) -> Result<CleanupLocksResult> {
debug!("invoking cleanup async commit locks");
// scan all locks with ts <= safepoint
let ctx = ResolveLocksContext::default();
let backoff = Backoff::equal_jitter_backoff(100, 10000, 50);
let req = new_scan_lock_request(range.into(), safepoint, options.batch_size);
let plan = crate::request::PlanBuilder::new(self.pd.clone(), req)
.cleanup_locks(ctx.clone(), options, backoff)
.retry_multi_region(DEFAULT_REGION_BACKOFF)
.extract_error()
.merge(crate::request::Collect)
.plan();
plan.execute().await
}
// For test.
// Note: `batch_size` must be >= expected number of locks.
#[cfg(feature = "integration-tests")]
pub async fn scan_locks(
&self,
safepoint: &Timestamp,
range: impl Into<BoundRange>,
batch_size: u32,
) -> Result<Vec<crate::proto::kvrpcpb::LockInfo>> {
let req = new_scan_lock_request(range.into(), safepoint, batch_size);
let plan = crate::request::PlanBuilder::new(self.pd.clone(), req)
.retry_multi_region(DEFAULT_REGION_BACKOFF)
.merge(crate::request::Collect)
.plan();
plan.execute().await
}
fn new_transaction(&self, timestamp: Timestamp, options: TransactionOptions) -> Transaction {
Transaction::new(timestamp, self.pd.clone(), options)
}
}
| {
info!("new safepoint != user-specified safepoint");
} | conditional_block |
client.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::Arc;
use log::debug;
use log::info;
use crate::backoff::DEFAULT_REGION_BACKOFF;
use crate::config::Config;
use crate::pd::PdClient;
use crate::pd::PdRpcClient;
use crate::proto::pdpb::Timestamp;
use crate::request::plan::CleanupLocksResult;
use crate::request::Plan;
use crate::timestamp::TimestampExt;
use crate::transaction::lock::ResolveLocksOptions;
use crate::transaction::lowering::new_scan_lock_request;
use crate::transaction::ResolveLocksContext;
use crate::transaction::Snapshot;
use crate::transaction::Transaction;
use crate::transaction::TransactionOptions;
use crate::Backoff;
use crate::BoundRange;
use crate::Result;
// FIXME: cargo-culted value
const SCAN_LOCK_BATCH_SIZE: u32 = 1024;
/// The TiKV transactional `Client` is used to interact with TiKV using transactional requests.
///
/// Transactions support optimistic and pessimistic modes. For more details see the SIG-transaction
/// [docs](https://github.com/tikv/sig-transaction/tree/master/doc/tikv#optimistic-and-pessimistic-transactions).
///
/// Begin a [`Transaction`] by calling [`begin_optimistic`](Client::begin_optimistic) or
/// [`begin_pessimistic`](Client::begin_pessimistic). A transaction must be rolled back or committed.
///
/// Besides transactions, the client provides some further functionality:
/// - `gc`: trigger a GC process which clears stale data in the cluster.
/// - `current_timestamp`: get the current `Timestamp` from PD.
/// - `snapshot`: get a [`Snapshot`] of the database at a specified timestamp.
/// A `Snapshot` is a read-only transaction.
///
/// The returned results of transactional requests are [`Future`](std::future::Future)s that must be
/// awaited to execute.
pub struct Client {
pd: Arc<PdRpcClient>,
}
impl Clone for Client {
fn clone(&self) -> Self {
Self {
pd: self.pd.clone(),
}
}
}
impl Client {
/// Create a transactional [`Client`] and connect to the TiKV cluster.
///
/// Because TiKV is managed by a [PD](https://github.com/pingcap/pd/) cluster, the endpoints for
/// PD must be provided, not the TiKV nodes. It's important to include more than one PD endpoint
/// (include all endpoints, if possible), this helps avoid having a single point of failure.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// # });
/// ```
pub async fn new<S: Into<String>>(pd_endpoints: Vec<S>) -> Result<Client> {
// debug!("creating transactional client");
Self::new_with_config(pd_endpoints, Config::default()).await
}
/// Create a transactional [`Client`] with a custom configuration, and connect to the TiKV cluster.
///
/// Because TiKV is managed by a [PD](https://github.com/pingcap/pd/) cluster, the endpoints for
/// PD must be provided, not the TiKV nodes. It's important to include more than one PD endpoint
/// (include all endpoints, if possible), this helps avoid having a single point of failure.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # use std::time::Duration;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new_with_config(
/// vec!["192.168.0.100"],
/// Config::default().with_timeout(Duration::from_secs(60)),
/// )
///.await
///.unwrap();
/// # });
/// ```
pub async fn new_with_config<S: Into<String>>(
pd_endpoints: Vec<S>,
config: Config,
) -> Result<Client> {
debug!("creating new transactional client");
let pd_endpoints: Vec<String> = pd_endpoints.into_iter().map(Into::into).collect();
let pd = Arc::new(PdRpcClient::connect(&pd_endpoints, config, true).await?);
Ok(Client { pd })
}
/// Creates a new optimistic [`Transaction`].
///
/// Use the transaction to issue requests like [`get`](Transaction::get) or
/// [`put`](Transaction::put).
///
/// Write operations do not lock data in TiKV, thus the commit request may fail due to a write
/// conflict.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let mut transaction = client.begin_optimistic().await.unwrap();
/// //... Issue some commands.
/// transaction.commit().await.unwrap();
/// # });
/// ```
pub async fn begin_optimistic(&self) -> Result<Transaction> {
debug!("creating new optimistic transaction");
let timestamp = self.current_timestamp().await?;
Ok(self.new_transaction(timestamp, TransactionOptions::new_optimistic()))
}
/// Creates a new pessimistic [`Transaction`].
///
/// Write operations will lock the data until committed, thus commit requests should not suffer
/// from write conflicts.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let mut transaction = client.begin_pessimistic().await.unwrap();
/// //... Issue some commands.
/// transaction.commit().await.unwrap();
/// # });
/// ```
pub async fn begin_pessimistic(&self) -> Result<Transaction> {
debug!("creating new pessimistic transaction");
let timestamp = self.current_timestamp().await?;
Ok(self.new_transaction(timestamp, TransactionOptions::new_pessimistic()))
}
/// Create a new customized [`Transaction`].
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient, TransactionOptions};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let mut transaction = client
/// .begin_with_options(TransactionOptions::default().use_async_commit())
/// .await
/// .unwrap();
/// //... Issue some commands.
/// transaction.commit().await.unwrap();
/// # });
/// ```
pub async fn begin_with_options(&self, options: TransactionOptions) -> Result<Transaction> {
debug!("creating new customized transaction");
let timestamp = self.current_timestamp().await?;
Ok(self.new_transaction(timestamp, options))
}
/// Create a new [`Snapshot`](Snapshot) at the given [`Timestamp`](Timestamp).
pub fn snapshot(&self, timestamp: Timestamp, options: TransactionOptions) -> Snapshot |
/// Retrieve the current [`Timestamp`].
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let timestamp = client.current_timestamp().await.unwrap();
/// # });
/// ```
pub async fn current_timestamp(&self) -> Result<Timestamp> {
self.pd.clone().get_timestamp().await
}
/// Request garbage collection (GC) of the TiKV cluster.
///
/// GC deletes MVCC records whose timestamp is lower than the given `safepoint`. We must guarantee
/// that all transactions started before this timestamp had committed. We can keep an active
/// transaction list in application to decide which is the minimal start timestamp of them.
///
/// For each key, the last mutation record (unless it's a deletion) before `safepoint` is retained.
///
/// GC is performed by:
/// 1. resolving all locks with timestamp <= `safepoint`
/// 2. updating PD's known safepoint
///
/// This is a simplified version of [GC in TiDB](https://docs.pingcap.com/tidb/stable/garbage-collection-overview).
/// We skip the second step "delete ranges" which is an optimization for TiDB.
pub async fn gc(&self, safepoint: Timestamp) -> Result<bool> {
debug!("invoking transactional gc request");
let options = ResolveLocksOptions {
batch_size: SCAN_LOCK_BATCH_SIZE,
..Default::default()
};
self.cleanup_locks(.., &safepoint, options).await?;
// update safepoint to PD
let res: bool = self
.pd
.clone()
.update_safepoint(safepoint.version())
.await?;
if!res {
info!("new safepoint!= user-specified safepoint");
}
Ok(res)
}
pub async fn cleanup_locks(
&self,
range: impl Into<BoundRange>,
safepoint: &Timestamp,
options: ResolveLocksOptions,
) -> Result<CleanupLocksResult> {
debug!("invoking cleanup async commit locks");
// scan all locks with ts <= safepoint
let ctx = ResolveLocksContext::default();
let backoff = Backoff::equal_jitter_backoff(100, 10000, 50);
let req = new_scan_lock_request(range.into(), safepoint, options.batch_size);
let plan = crate::request::PlanBuilder::new(self.pd.clone(), req)
.cleanup_locks(ctx.clone(), options, backoff)
.retry_multi_region(DEFAULT_REGION_BACKOFF)
.extract_error()
.merge(crate::request::Collect)
.plan();
plan.execute().await
}
// For test.
// Note: `batch_size` must be >= expected number of locks.
#[cfg(feature = "integration-tests")]
pub async fn scan_locks(
&self,
safepoint: &Timestamp,
range: impl Into<BoundRange>,
batch_size: u32,
) -> Result<Vec<crate::proto::kvrpcpb::LockInfo>> {
let req = new_scan_lock_request(range.into(), safepoint, batch_size);
let plan = crate::request::PlanBuilder::new(self.pd.clone(), req)
.retry_multi_region(DEFAULT_REGION_BACKOFF)
.merge(crate::request::Collect)
.plan();
plan.execute().await
}
fn new_transaction(&self, timestamp: Timestamp, options: TransactionOptions) -> Transaction {
Transaction::new(timestamp, self.pd.clone(), options)
}
}
| {
debug!("creating new snapshot");
Snapshot::new(self.new_transaction(timestamp, options.read_only()))
} | identifier_body |
client.rs | // Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use std::sync::Arc;
use log::debug;
use log::info;
use crate::backoff::DEFAULT_REGION_BACKOFF;
use crate::config::Config;
use crate::pd::PdClient;
use crate::pd::PdRpcClient;
use crate::proto::pdpb::Timestamp;
use crate::request::plan::CleanupLocksResult;
use crate::request::Plan;
use crate::timestamp::TimestampExt;
use crate::transaction::lock::ResolveLocksOptions;
use crate::transaction::lowering::new_scan_lock_request;
use crate::transaction::ResolveLocksContext;
use crate::transaction::Snapshot;
use crate::transaction::Transaction;
use crate::transaction::TransactionOptions;
use crate::Backoff;
use crate::BoundRange;
use crate::Result;
// FIXME: cargo-culted value
const SCAN_LOCK_BATCH_SIZE: u32 = 1024;
/// The TiKV transactional `Client` is used to interact with TiKV using transactional requests.
///
/// Transactions support optimistic and pessimistic modes. For more details see the SIG-transaction
/// [docs](https://github.com/tikv/sig-transaction/tree/master/doc/tikv#optimistic-and-pessimistic-transactions).
///
/// Begin a [`Transaction`] by calling [`begin_optimistic`](Client::begin_optimistic) or
/// [`begin_pessimistic`](Client::begin_pessimistic). A transaction must be rolled back or committed.
///
/// Besides transactions, the client provides some further functionality:
/// - `gc`: trigger a GC process which clears stale data in the cluster.
/// - `current_timestamp`: get the current `Timestamp` from PD.
/// - `snapshot`: get a [`Snapshot`] of the database at a specified timestamp.
/// A `Snapshot` is a read-only transaction.
///
/// The returned results of transactional requests are [`Future`](std::future::Future)s that must be
/// awaited to execute.
pub struct Client {
pd: Arc<PdRpcClient>,
}
impl Clone for Client {
fn clone(&self) -> Self {
Self {
pd: self.pd.clone(),
}
}
}
impl Client {
/// Create a transactional [`Client`] and connect to the TiKV cluster.
///
/// Because TiKV is managed by a [PD](https://github.com/pingcap/pd/) cluster, the endpoints for
/// PD must be provided, not the TiKV nodes. It's important to include more than one PD endpoint
/// (include all endpoints, if possible), this helps avoid having a single point of failure.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// # });
/// ```
pub async fn | <S: Into<String>>(pd_endpoints: Vec<S>) -> Result<Client> {
// debug!("creating transactional client");
Self::new_with_config(pd_endpoints, Config::default()).await
}
/// Create a transactional [`Client`] with a custom configuration, and connect to the TiKV cluster.
///
/// Because TiKV is managed by a [PD](https://github.com/pingcap/pd/) cluster, the endpoints for
/// PD must be provided, not the TiKV nodes. It's important to include more than one PD endpoint
/// (include all endpoints, if possible), this helps avoid having a single point of failure.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # use std::time::Duration;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new_with_config(
/// vec!["192.168.0.100"],
/// Config::default().with_timeout(Duration::from_secs(60)),
/// )
///.await
///.unwrap();
/// # });
/// ```
pub async fn new_with_config<S: Into<String>>(
pd_endpoints: Vec<S>,
config: Config,
) -> Result<Client> {
debug!("creating new transactional client");
let pd_endpoints: Vec<String> = pd_endpoints.into_iter().map(Into::into).collect();
let pd = Arc::new(PdRpcClient::connect(&pd_endpoints, config, true).await?);
Ok(Client { pd })
}
/// Creates a new optimistic [`Transaction`].
///
/// Use the transaction to issue requests like [`get`](Transaction::get) or
/// [`put`](Transaction::put).
///
/// Write operations do not lock data in TiKV, thus the commit request may fail due to a write
/// conflict.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let mut transaction = client.begin_optimistic().await.unwrap();
/// //... Issue some commands.
/// transaction.commit().await.unwrap();
/// # });
/// ```
pub async fn begin_optimistic(&self) -> Result<Transaction> {
debug!("creating new optimistic transaction");
let timestamp = self.current_timestamp().await?;
Ok(self.new_transaction(timestamp, TransactionOptions::new_optimistic()))
}
/// Creates a new pessimistic [`Transaction`].
///
/// Write operations will lock the data until committed, thus commit requests should not suffer
/// from write conflicts.
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let mut transaction = client.begin_pessimistic().await.unwrap();
/// //... Issue some commands.
/// transaction.commit().await.unwrap();
/// # });
/// ```
pub async fn begin_pessimistic(&self) -> Result<Transaction> {
debug!("creating new pessimistic transaction");
let timestamp = self.current_timestamp().await?;
Ok(self.new_transaction(timestamp, TransactionOptions::new_pessimistic()))
}
/// Create a new customized [`Transaction`].
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient, TransactionOptions};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let mut transaction = client
/// .begin_with_options(TransactionOptions::default().use_async_commit())
/// .await
/// .unwrap();
/// //... Issue some commands.
/// transaction.commit().await.unwrap();
/// # });
/// ```
pub async fn begin_with_options(&self, options: TransactionOptions) -> Result<Transaction> {
debug!("creating new customized transaction");
let timestamp = self.current_timestamp().await?;
Ok(self.new_transaction(timestamp, options))
}
/// Create a new [`Snapshot`](Snapshot) at the given [`Timestamp`](Timestamp).
pub fn snapshot(&self, timestamp: Timestamp, options: TransactionOptions) -> Snapshot {
debug!("creating new snapshot");
Snapshot::new(self.new_transaction(timestamp, options.read_only()))
}
/// Retrieve the current [`Timestamp`].
///
/// # Examples
///
/// ```rust,no_run
/// # use tikv_client::{Config, TransactionClient};
/// # use futures::prelude::*;
/// # futures::executor::block_on(async {
/// let client = TransactionClient::new(vec!["192.168.0.100"]).await.unwrap();
/// let timestamp = client.current_timestamp().await.unwrap();
/// # });
/// ```
pub async fn current_timestamp(&self) -> Result<Timestamp> {
self.pd.clone().get_timestamp().await
}
/// Request garbage collection (GC) of the TiKV cluster.
///
/// GC deletes MVCC records whose timestamp is lower than the given `safepoint`. We must guarantee
/// that all transactions started before this timestamp had committed. We can keep an active
/// transaction list in application to decide which is the minimal start timestamp of them.
///
/// For each key, the last mutation record (unless it's a deletion) before `safepoint` is retained.
///
/// GC is performed by:
/// 1. resolving all locks with timestamp <= `safepoint`
/// 2. updating PD's known safepoint
///
/// This is a simplified version of [GC in TiDB](https://docs.pingcap.com/tidb/stable/garbage-collection-overview).
/// We skip the second step "delete ranges" which is an optimization for TiDB.
pub async fn gc(&self, safepoint: Timestamp) -> Result<bool> {
debug!("invoking transactional gc request");
let options = ResolveLocksOptions {
batch_size: SCAN_LOCK_BATCH_SIZE,
..Default::default()
};
self.cleanup_locks(.., &safepoint, options).await?;
// update safepoint to PD
let res: bool = self
.pd
.clone()
.update_safepoint(safepoint.version())
.await?;
if!res {
info!("new safepoint!= user-specified safepoint");
}
Ok(res)
}
pub async fn cleanup_locks(
&self,
range: impl Into<BoundRange>,
safepoint: &Timestamp,
options: ResolveLocksOptions,
) -> Result<CleanupLocksResult> {
debug!("invoking cleanup async commit locks");
// scan all locks with ts <= safepoint
let ctx = ResolveLocksContext::default();
let backoff = Backoff::equal_jitter_backoff(100, 10000, 50);
let req = new_scan_lock_request(range.into(), safepoint, options.batch_size);
let plan = crate::request::PlanBuilder::new(self.pd.clone(), req)
.cleanup_locks(ctx.clone(), options, backoff)
.retry_multi_region(DEFAULT_REGION_BACKOFF)
.extract_error()
.merge(crate::request::Collect)
.plan();
plan.execute().await
}
// For test.
// Note: `batch_size` must be >= expected number of locks.
#[cfg(feature = "integration-tests")]
pub async fn scan_locks(
&self,
safepoint: &Timestamp,
range: impl Into<BoundRange>,
batch_size: u32,
) -> Result<Vec<crate::proto::kvrpcpb::LockInfo>> {
let req = new_scan_lock_request(range.into(), safepoint, batch_size);
let plan = crate::request::PlanBuilder::new(self.pd.clone(), req)
.retry_multi_region(DEFAULT_REGION_BACKOFF)
.merge(crate::request::Collect)
.plan();
plan.execute().await
}
fn new_transaction(&self, timestamp: Timestamp, options: TransactionOptions) -> Transaction {
Transaction::new(timestamp, self.pd.clone(), options)
}
}
| new | identifier_name |
main.rs | 0);
let c = v / 100.0 * s / 100.0;
let x = c * (1.0 - (((h / 60.0) % 2.0) - 1.0).abs());
let m = (v / 100.0) - c;
let (mut r, mut g, mut b) = match h {
h if h < 60.0 => (c, x, 0.0),
h if h < 120.0 => (x, c, 0.0),
h if h < 180.0 => (0.0, c, x),
h if h < 240.0 => (0.0, x, c),
h if h < 300.0 => (x, 0.0, c),
_ => (c, 0.0, x),
};
r += m;
g += m;
b += m;
return [(r * 255.0) as u8, (g * 255.0) as u8, (b * 255.0) as u8, 255];
}
enum MessageInternal {
CreateView(fviews::ViewCreationToken, fviews::ViewRefControl, fviews::ViewRef),
OnPresentError {
error: fland::FlatlandError,
},
OnNextFrameBegin {
additional_present_credits: u32,
future_presentation_infos: Vec<flatland::PresentationInfo>,
},
#[allow(dead_code)]
OnFramePresented {
frame_presented_info: fidl_fuchsia_scenic_scheduling::FramePresentedInfo,
},
Relayout {
width: u32,
height: u32,
},
}
struct AppModel<'a> {
flatland: &'a fland::FlatlandProxy,
allocator: fland::AllocatorProxy,
internal_sender: UnboundedSender<MessageInternal>,
allocation: Option<fsysmem::BufferCollectionInfo2>,
sched_lib: &'a dyn SchedulingLib,
hue: f32,
page_size: usize,
last_expected_presentation_time: zx::Time,
}
impl<'a> AppModel<'a> {
fn new(
flatland: &'a fland::FlatlandProxy,
allocator: fland::AllocatorProxy,
internal_sender: UnboundedSender<MessageInternal>,
sched_lib: &'a dyn SchedulingLib,
) -> AppModel<'a> {
AppModel {
flatland,
allocator,
internal_sender,
sched_lib,
allocation: None,
hue: 0.0,
page_size: zx::system_get_page_size().try_into().unwrap(),
last_expected_presentation_time: zx::Time::from_nanos(0),
}
}
async fn init_scene(&mut self) {
// BufferAllocator is a helper which makes it easier to obtain and set constraints on a
// sysmem::BufferCollectionToken. This token can then be registered with Scenic, which will
// set its own constraints; see below.
let mut buffer_allocator = BufferCollectionAllocator::new(
IMAGE_WIDTH,
IMAGE_HEIGHT,
fidl_fuchsia_sysmem::PixelFormatType::Bgra32,
FrameUsage::Cpu,
1,
)
.expect("failed to create BufferCollectionAllocator");
buffer_allocator.set_name(100, "Flatland ViewProvider Example").expect("fidl error");
let sysmem_buffer_collection_token =
buffer_allocator.duplicate_token().await.expect("error duplicating token");
// Register the sysmem BufferCollectionToken with the Scenic Allocator API. This is done by
// creating an import/export token pair, which is fundamentally a pair of zx::event. The
// export token is used as a key to register the sysmem BufferCollectionToken. The
// corresponding import token can be used to access the allocated buffers via other Scenic
// APIs, such as the "Gfx" and "Flatland" APIs, the latter being used in this example. See
// the following invocation of "flatland.create_image()".
let mut buffer_tokens = BufferCollectionTokenPair::new();
let args = fland::RegisterBufferCollectionArgs {
export_token: Some(buffer_tokens.export_token),
buffer_collection_token: Some(sysmem_buffer_collection_token),
..fland::RegisterBufferCollectionArgs::EMPTY
};
self.allocator
.register_buffer_collection(args)
.await
.expect("fidl error")
.expect("error registering buffer collection");
// Now that the BufferCollectionToken has been registered, Scenic is able to set constraints
// on it so that the eventually-allocated buffer can be used by e.g. both Vulkan and the
// hardware display controller. Allocate the buffer and wait for the allocation to finish,
// which cannot happen until Scenic has set all necessary constraints of its own.
self.allocation =
Some(buffer_allocator.allocate_buffers(true).await.expect("buffer allocation failed"));
self.set_image_colors();
// Create an image in the Flatland session, using the sysmem buffer we just allocated.
// As mentioned above, this uses the import token corresponding to the export token that was
// used to register the BufferCollectionToken with the Scenic Allocator.
let image_props = fland::ImageProperties {
size: Some(fmath::SizeU { width: IMAGE_WIDTH, height: IMAGE_HEIGHT }),
..fland::ImageProperties::EMPTY
};
// TODO(fxbug.dev/76640): generated FIDL methods currently expect "&mut" args. This will
// change; according to fxbug.dev/65845 the generated FIDL will use "&" instead (at least
// for POD structs like these). When this lands we can remove the ".clone()" from the call
// sites below.
self.flatland
.create_image(&mut IMAGE_ID.clone(), &mut buffer_tokens.import_token, 0, image_props)
.expect("fidl error");
// Populate the rest of the Flatland scene. There is a single transform which is set as the
// root transform; the newly-created image is set as the content of that transform.
self.flatland.create_transform(&mut TRANSFORM_ID.clone()).expect("fidl error");
self.flatland.set_root_transform(&mut TRANSFORM_ID.clone()).expect("fidl error");
self.flatland
.set_content(&mut TRANSFORM_ID.clone(), &mut IMAGE_ID.clone())
.expect("fidl error");
}
fn create_parent_viewport_watcher(
&mut self,
mut view_creation_token: fviews::ViewCreationToken,
) {
let (parent_viewport_watcher, server_end) =
create_proxy::<fland::ParentViewportWatcherMarker>()
.expect("failed to create ParentViewportWatcherProxy");
// NOTE: it isn't necessary to call maybe_present() for this to take effect, because we will
// relayout when receive the initial layout info. See CreateView() FIDL docs.
self.flatland.create_view(&mut view_creation_token, server_end).expect("fidl error");
// NOTE: there may be a race condition if TemporaryFlatlandViewProvider.CreateView() is
// invoked a second time, causing us to create another graph link. Because Zircon doesn't
// guarantee ordering on responses of different channels, we might receive data from the old | // should be more careful (this assumes that the client expects CreateView() to be called
// multiple times, which clients commonly don't).
let sender = self.internal_sender.clone();
fasync::Task::spawn(async move {
let mut layout_info_stream =
HangingGetStream::new(Box::new(move || Some(parent_viewport_watcher.get_layout())));
while let Some(result) = layout_info_stream.next().await {
match result {
Ok(layout_info) => {
let mut width = 0;
let mut height = 0;
if let Some(logical_size) = layout_info.logical_size {
width = logical_size.width;
height = logical_size.height;
}
sender
.unbounded_send(MessageInternal::Relayout { width, height })
.expect("failed to send MessageInternal.");
}
Err(fidl::Error::ClientChannelClosed {.. }) => {
info!("graph link connection closed.");
return; // from spawned task closure
}
Err(fidl_error) => {
warn!("graph link GetLayout() error: {:?}", fidl_error);
return; // from spawned task closure
}
}
}
})
.detach();
}
fn draw(&mut self, expected_presentation_time: zx::Time) {
trace::duration!("gfx", "FlatlandViewProvider::draw");
let time_since_last_draw_in_seconds = ((expected_presentation_time.into_nanos()
- self.last_expected_presentation_time.into_nanos())
as f32)
/ 1_000_000_000.0;
self.last_expected_presentation_time = expected_presentation_time;
let hue_change_time_per_second = 30 as f32;
self.hue =
(self.hue + hue_change_time_per_second * time_since_last_draw_in_seconds) % 360.0;
self.set_image_colors();
self.sched_lib.request_present();
}
fn on_relayout(&mut self, width: u32, height: u32) {
self.flatland
.set_image_destination_size(&mut IMAGE_ID.clone(), &mut fmath::SizeU { width, height })
.expect("fidl error");
self.sched_lib.request_present();
}
fn set_image_colors(&mut self) {
let allocation = self.allocation.as_ref().unwrap();
// Write pixel values into the allocated buffer.
match &allocation.buffers[0].vmo {
Some(vmo) => {
assert!(IMAGE_WIDTH == 2);
assert!(IMAGE_HEIGHT == 2);
// Compute the same row-pitch as Flatland will compute internally.
assert!(allocation.settings.has_image_format_constraints);
let row_pitch: usize =
minimum_row_bytes(allocation.settings.image_format_constraints, IMAGE_WIDTH)
.expect("failed to compute row-pitch")
.try_into()
.unwrap();
// TODO(fxbug.dev/76640): should look at pixel-format, instead of assuming 32-bit
// BGRA pixels. For now, format is hard-coded anyway.
let p00: [u8; 4] = hsv_to_rgba(self.hue, 30.0, 75.0);
let p10: [u8; 4] = hsv_to_rgba(self.hue + 20.0, 30.0, 75.0);
let p11: [u8; 4] = hsv_to_rgba(self.hue + 60.0, 30.0, 75.0);
let p01: [u8; 4] = hsv_to_rgba(self.hue + 40.0, 30.0, 75.0);
// The size used to map a VMO must be a multiple of the page size. Ensure that the
// VMO is at least one page in size, and that the size returned by sysmem is no
// larger than this. Neither of these should ever fail.
{
let vmo_size: usize =
vmo.get_size().expect("failed to obtain VMO size").try_into().unwrap();
let sysmem_size: usize =
allocation.settings.buffer_settings.size_bytes.try_into().unwrap();
assert!(self.page_size <= vmo_size);
assert!(self.page_size >= sysmem_size);
}
// create_from_vmo() uses an offset of 0 when mapping the VMO; verify that this
// matches the sysmem allocation.
let offset: usize = allocation.buffers[0].vmo_usable_start.try_into().unwrap();
assert_eq!(offset, 0);
let mapping = mapped_vmo::Mapping::create_from_vmo(
&vmo,
self.page_size,
zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
)
.expect("failed to map VMO");
mapping.write_at(0, &p00);
mapping.write_at(4, &p10);
mapping.write_at(row_pitch, &p01);
mapping.write_at(row_pitch + 4, &p11);
}
None => unreachable!(),
}
}
}
fn setup_fidl_services(sender: UnboundedSender<MessageInternal>) {
let view_provider_cb = move |stream: fapp::ViewProviderRequestStream| {
let sender = sender.clone();
fasync::Task::local(
stream
.try_for_each(move |req| {
match req {
fapp::ViewProviderRequest::CreateView2 { args,.. } => {
let view_creation_token = args.view_creation_token.unwrap();
// We do not get passed a view ref so create our own.
let ViewRefPair { control_ref, view_ref } =
ViewRefPair::new().expect("unable to create view ref pair");
sender
.unbounded_send(MessageInternal::CreateView(
view_creation_token,
control_ref,
view_ref,
))
.expect("failed to send MessageInternal.");
}
unhandled_req => {
warn!("Unhandled ViewProvider request: {:?}", unhandled_req);
}
};
future::ok(())
})
.unwrap_or_else(|e| {
eprintln!("error running TemporaryFlatlandViewProvider server: {:?}", e)
}),
)
.detach()
};
let mut fs = component::server::ServiceFs::new();
fs.dir("svc").add_fidl_service(view_provider_cb);
fs.take_and_serve_directory_handle().expect("failed to serve directory handle");
fasync::Task::local(fs.collect()).detach();
}
fn setup_handle_flatland_events(
event_stream: fland::FlatlandEventStream,
sender: UnboundedSender<MessageInternal>,
) {
fasync::Task::local(
event_stream
.try_for_each(move |event| {
match event {
fland::FlatlandEvent::OnNextFrameBegin { values } => {
if let (Some(additional_present_credits), Some(future_presentation_infos)) =
(values.additional_present_credits, values.future_presentation_infos)
{
sender
.unbounded_send(MessageInternal::OnNextFrameBegin {
additional_present_credits,
future_presentation_infos,
})
.expect("failed to send MessageInternal");
} else {
// If not an error, all table fields are guaranteed to be present.
unreachable!()
}
}
fland::FlatlandEvent::OnFramePresented { frame_presented_info } => {
sender
.unbounded_send(MessageInternal::OnFramePresented {
frame_presented_info,
})
.expect("failed to send MessageInternal");
}
fland::FlatlandEvent::OnError { error } => {
sender
.unbounded_send(MessageInternal::OnPresentError { error })
.expect("failed to send MessageInternal.");
}
};
future::ok(())
})
.unwrap_or_else(|e| eprintln!("error listening for Flatland Events: {:?}", e)),
)
.detach();
}
#[fasync::run_singlethreaded]
async fn main() {
fuchsia_trace_provider::trace_provider_create_with_fdio();
fuchsia_syslog::init_with_tags(&["flatland-display"]).expect("failed to initialize logger");
let (internal_sender, mut internal_receiver) = unbounded::<MessageInternal>();
let flatland =
connect_to_protocol::<fland::FlatlandMarker>().expect("error connecting to Flatland");
let sched_lib = ThroughputScheduler::new();
let allocator = connect_to_protocol::<fland::AllocatorMarker>()
.expect("error connecting to Scenic allocator");
info!("Established connections to Flatland and Allocator");
setup_fidl_services(internal_sender.clone());
setup_handle_flatland_events(flatland.take_event_stream(), internal_sender.clone());
let mut app = AppModel::new(&flatland, allocator, internal_sender.clone(), &sched_lib);
app.init_scene().await;
let mut present_count = 0;
loop {
futures::select! {
message = internal_receiver.next().fuse() => {
if let Some(message) = message {
match message {
MessageInternal::CreateView(view_creation_token, _view_ref_control, _view_ref) => {
// TODO(fxbug.dev/78866): handling ViewRefs is necessary for focus management.
// For now, input is unsupported, and so we drop the ViewRef and ViewRefControl.
app.create_parent_viewport_watcher(view_creation_token);
}
MessageInternal::Relayout { width, height } => {
app.on_relayout(width, height);
}
MessageInternal::OnPresentError { error } => {
error!("OnPresentError({:?})", error);
break;
}
MessageInternal::OnNextFrameBegin {
| // link after data from the new link, just before the old link is closed. Non-example code | random_line_split |
main.rs | (h: f32, s: f32, v: f32) -> [u8; 4] {
assert!(s <= 100.0);
assert!(v <= 100.0);
let h = pos_mod(h, 360.0);
let c = v / 100.0 * s / 100.0;
let x = c * (1.0 - (((h / 60.0) % 2.0) - 1.0).abs());
let m = (v / 100.0) - c;
let (mut r, mut g, mut b) = match h {
h if h < 60.0 => (c, x, 0.0),
h if h < 120.0 => (x, c, 0.0),
h if h < 180.0 => (0.0, c, x),
h if h < 240.0 => (0.0, x, c),
h if h < 300.0 => (x, 0.0, c),
_ => (c, 0.0, x),
};
r += m;
g += m;
b += m;
return [(r * 255.0) as u8, (g * 255.0) as u8, (b * 255.0) as u8, 255];
}
enum MessageInternal {
CreateView(fviews::ViewCreationToken, fviews::ViewRefControl, fviews::ViewRef),
OnPresentError {
error: fland::FlatlandError,
},
OnNextFrameBegin {
additional_present_credits: u32,
future_presentation_infos: Vec<flatland::PresentationInfo>,
},
#[allow(dead_code)]
OnFramePresented {
frame_presented_info: fidl_fuchsia_scenic_scheduling::FramePresentedInfo,
},
Relayout {
width: u32,
height: u32,
},
}
struct AppModel<'a> {
flatland: &'a fland::FlatlandProxy,
allocator: fland::AllocatorProxy,
internal_sender: UnboundedSender<MessageInternal>,
allocation: Option<fsysmem::BufferCollectionInfo2>,
sched_lib: &'a dyn SchedulingLib,
hue: f32,
page_size: usize,
last_expected_presentation_time: zx::Time,
}
impl<'a> AppModel<'a> {
fn new(
flatland: &'a fland::FlatlandProxy,
allocator: fland::AllocatorProxy,
internal_sender: UnboundedSender<MessageInternal>,
sched_lib: &'a dyn SchedulingLib,
) -> AppModel<'a> {
AppModel {
flatland,
allocator,
internal_sender,
sched_lib,
allocation: None,
hue: 0.0,
page_size: zx::system_get_page_size().try_into().unwrap(),
last_expected_presentation_time: zx::Time::from_nanos(0),
}
}
async fn init_scene(&mut self) {
// BufferAllocator is a helper which makes it easier to obtain and set constraints on a
// sysmem::BufferCollectionToken. This token can then be registered with Scenic, which will
// set its own constraints; see below.
let mut buffer_allocator = BufferCollectionAllocator::new(
IMAGE_WIDTH,
IMAGE_HEIGHT,
fidl_fuchsia_sysmem::PixelFormatType::Bgra32,
FrameUsage::Cpu,
1,
)
.expect("failed to create BufferCollectionAllocator");
buffer_allocator.set_name(100, "Flatland ViewProvider Example").expect("fidl error");
let sysmem_buffer_collection_token =
buffer_allocator.duplicate_token().await.expect("error duplicating token");
// Register the sysmem BufferCollectionToken with the Scenic Allocator API. This is done by
// creating an import/export token pair, which is fundamentally a pair of zx::event. The
// export token is used as a key to register the sysmem BufferCollectionToken. The
// corresponding import token can be used to access the allocated buffers via other Scenic
// APIs, such as the "Gfx" and "Flatland" APIs, the latter being used in this example. See
// the following invocation of "flatland.create_image()".
let mut buffer_tokens = BufferCollectionTokenPair::new();
let args = fland::RegisterBufferCollectionArgs {
export_token: Some(buffer_tokens.export_token),
buffer_collection_token: Some(sysmem_buffer_collection_token),
..fland::RegisterBufferCollectionArgs::EMPTY
};
self.allocator
.register_buffer_collection(args)
.await
.expect("fidl error")
.expect("error registering buffer collection");
// Now that the BufferCollectionToken has been registered, Scenic is able to set constraints
// on it so that the eventually-allocated buffer can be used by e.g. both Vulkan and the
// hardware display controller. Allocate the buffer and wait for the allocation to finish,
// which cannot happen until Scenic has set all necessary constraints of its own.
self.allocation =
Some(buffer_allocator.allocate_buffers(true).await.expect("buffer allocation failed"));
self.set_image_colors();
// Create an image in the Flatland session, using the sysmem buffer we just allocated.
// As mentioned above, this uses the import token corresponding to the export token that was
// used to register the BufferCollectionToken with the Scenic Allocator.
let image_props = fland::ImageProperties {
size: Some(fmath::SizeU { width: IMAGE_WIDTH, height: IMAGE_HEIGHT }),
..fland::ImageProperties::EMPTY
};
// TODO(fxbug.dev/76640): generated FIDL methods currently expect "&mut" args. This will
// change; according to fxbug.dev/65845 the generated FIDL will use "&" instead (at least
// for POD structs like these). When this lands we can remove the ".clone()" from the call
// sites below.
self.flatland
.create_image(&mut IMAGE_ID.clone(), &mut buffer_tokens.import_token, 0, image_props)
.expect("fidl error");
// Populate the rest of the Flatland scene. There is a single transform which is set as the
// root transform; the newly-created image is set as the content of that transform.
self.flatland.create_transform(&mut TRANSFORM_ID.clone()).expect("fidl error");
self.flatland.set_root_transform(&mut TRANSFORM_ID.clone()).expect("fidl error");
self.flatland
.set_content(&mut TRANSFORM_ID.clone(), &mut IMAGE_ID.clone())
.expect("fidl error");
}
fn create_parent_viewport_watcher(
&mut self,
mut view_creation_token: fviews::ViewCreationToken,
) {
let (parent_viewport_watcher, server_end) =
create_proxy::<fland::ParentViewportWatcherMarker>()
.expect("failed to create ParentViewportWatcherProxy");
// NOTE: it isn't necessary to call maybe_present() for this to take effect, because we will
// relayout when receive the initial layout info. See CreateView() FIDL docs.
self.flatland.create_view(&mut view_creation_token, server_end).expect("fidl error");
// NOTE: there may be a race condition if TemporaryFlatlandViewProvider.CreateView() is
// invoked a second time, causing us to create another graph link. Because Zircon doesn't
// guarantee ordering on responses of different channels, we might receive data from the old
// link after data from the new link, just before the old link is closed. Non-example code
// should be more careful (this assumes that the client expects CreateView() to be called
// multiple times, which clients commonly don't).
let sender = self.internal_sender.clone();
fasync::Task::spawn(async move {
let mut layout_info_stream =
HangingGetStream::new(Box::new(move || Some(parent_viewport_watcher.get_layout())));
while let Some(result) = layout_info_stream.next().await {
match result {
Ok(layout_info) => {
let mut width = 0;
let mut height = 0;
if let Some(logical_size) = layout_info.logical_size {
width = logical_size.width;
height = logical_size.height;
}
sender
.unbounded_send(MessageInternal::Relayout { width, height })
.expect("failed to send MessageInternal.");
}
Err(fidl::Error::ClientChannelClosed {.. }) => {
info!("graph link connection closed.");
return; // from spawned task closure
}
Err(fidl_error) => {
warn!("graph link GetLayout() error: {:?}", fidl_error);
return; // from spawned task closure
}
}
}
})
.detach();
}
fn draw(&mut self, expected_presentation_time: zx::Time) {
trace::duration!("gfx", "FlatlandViewProvider::draw");
let time_since_last_draw_in_seconds = ((expected_presentation_time.into_nanos()
- self.last_expected_presentation_time.into_nanos())
as f32)
/ 1_000_000_000.0;
self.last_expected_presentation_time = expected_presentation_time;
let hue_change_time_per_second = 30 as f32;
self.hue =
(self.hue + hue_change_time_per_second * time_since_last_draw_in_seconds) % 360.0;
self.set_image_colors();
self.sched_lib.request_present();
}
fn on_relayout(&mut self, width: u32, height: u32) {
self.flatland
.set_image_destination_size(&mut IMAGE_ID.clone(), &mut fmath::SizeU { width, height })
.expect("fidl error");
self.sched_lib.request_present();
}
fn set_image_colors(&mut self) {
let allocation = self.allocation.as_ref().unwrap();
// Write pixel values into the allocated buffer.
match &allocation.buffers[0].vmo {
Some(vmo) => {
assert!(IMAGE_WIDTH == 2);
assert!(IMAGE_HEIGHT == 2);
// Compute the same row-pitch as Flatland will compute internally.
assert!(allocation.settings.has_image_format_constraints);
let row_pitch: usize =
minimum_row_bytes(allocation.settings.image_format_constraints, IMAGE_WIDTH)
.expect("failed to compute row-pitch")
.try_into()
.unwrap();
// TODO(fxbug.dev/76640): should look at pixel-format, instead of assuming 32-bit
// BGRA pixels. For now, format is hard-coded anyway.
let p00: [u8; 4] = hsv_to_rgba(self.hue, 30.0, 75.0);
let p10: [u8; 4] = hsv_to_rgba(self.hue + 20.0, 30.0, 75.0);
let p11: [u8; 4] = hsv_to_rgba(self.hue + 60.0, 30.0, 75.0);
let p01: [u8; 4] = hsv_to_rgba(self.hue + 40.0, 30.0, 75.0);
// The size used to map a VMO must be a multiple of the page size. Ensure that the
// VMO is at least one page in size, and that the size returned by sysmem is no
// larger than this. Neither of these should ever fail.
{
let vmo_size: usize =
vmo.get_size().expect("failed to obtain VMO size").try_into().unwrap();
let sysmem_size: usize =
allocation.settings.buffer_settings.size_bytes.try_into().unwrap();
assert!(self.page_size <= vmo_size);
assert!(self.page_size >= sysmem_size);
}
// create_from_vmo() uses an offset of 0 when mapping the VMO; verify that this
// matches the sysmem allocation.
let offset: usize = allocation.buffers[0].vmo_usable_start.try_into().unwrap();
assert_eq!(offset, 0);
let mapping = mapped_vmo::Mapping::create_from_vmo(
&vmo,
self.page_size,
zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
)
.expect("failed to map VMO");
mapping.write_at(0, &p00);
mapping.write_at(4, &p10);
mapping.write_at(row_pitch, &p01);
mapping.write_at(row_pitch + 4, &p11);
}
None => unreachable!(),
}
}
}
fn setup_fidl_services(sender: UnboundedSender<MessageInternal>) {
let view_provider_cb = move |stream: fapp::ViewProviderRequestStream| {
let sender = sender.clone();
fasync::Task::local(
stream
.try_for_each(move |req| {
match req {
fapp::ViewProviderRequest::CreateView2 { args,.. } => {
let view_creation_token = args.view_creation_token.unwrap();
// We do not get passed a view ref so create our own.
let ViewRefPair { control_ref, view_ref } =
ViewRefPair::new().expect("unable to create view ref pair");
sender
.unbounded_send(MessageInternal::CreateView(
view_creation_token,
control_ref,
view_ref,
))
.expect("failed to send MessageInternal.");
}
unhandled_req => {
warn!("Unhandled ViewProvider request: {:?}", unhandled_req);
}
};
future::ok(())
})
.unwrap_or_else(|e| {
eprintln!("error running TemporaryFlatlandViewProvider server: {:?}", e)
}),
)
.detach()
};
let mut fs = component::server::ServiceFs::new();
fs.dir("svc").add_fidl_service(view_provider_cb);
fs.take_and_serve_directory_handle().expect("failed to serve directory handle");
fasync::Task::local(fs.collect()).detach();
}
fn setup_handle_flatland_events(
event_stream: fland::FlatlandEventStream,
sender: UnboundedSender<MessageInternal>,
) {
fasync::Task::local(
event_stream
.try_for_each(move |event| {
match event {
fland::FlatlandEvent::OnNextFrameBegin { values } => {
if let (Some(additional_present_credits), Some(future_presentation_infos)) =
(values.additional_present_credits, values.future_presentation_infos)
{
sender
.unbounded_send(MessageInternal::OnNextFrameBegin {
additional_present_credits,
future_presentation_infos,
})
.expect("failed to send MessageInternal");
} else {
// If not an error, all table fields are guaranteed to be present.
unreachable!()
}
}
fland::FlatlandEvent::OnFramePresented { frame_presented_info } => {
sender
.unbounded_send(MessageInternal::OnFramePresented {
frame_presented_info,
})
.expect("failed to send MessageInternal");
}
fland::FlatlandEvent::OnError { error } => {
sender
.unbounded_send(MessageInternal::OnPresentError { error })
.expect("failed to send MessageInternal.");
}
};
future::ok(())
})
.unwrap_or_else(|e| eprintln!("error listening for Flatland Events: {:?}", e)),
)
.detach();
}
#[fasync::run_singlethreaded]
async fn main() {
fuchsia_trace_provider::trace_provider_create_with_fdio();
fuchsia_syslog::init_with_tags(&["flatland-display"]).expect("failed to initialize logger");
let (internal_sender, mut internal_receiver) = unbounded::<MessageInternal>();
let flatland =
connect_to_protocol::<fland::FlatlandMarker>().expect("error connecting to Flatland");
let sched_lib = ThroughputScheduler::new();
let allocator = connect_to_protocol::<fland::AllocatorMarker>()
.expect("error connecting to Scenic allocator");
info!("Established connections to Flatland and Allocator");
setup_fidl_services(internal_sender.clone());
setup_handle_flatland_events(flatland.take_event_stream(), internal_sender.clone());
let mut app = AppModel::new(&flatland, allocator, internal_sender.clone(), &sched_lib);
app.init_scene().await;
let mut present_count = 0;
loop {
futures::select! {
message = internal_receiver.next().fuse() => {
if let Some(message) = message {
match message {
MessageInternal::CreateView(view_creation_token, _view_ref_control, _view_ref) => {
// TODO(fxbug.dev/78866): handling ViewRefs is necessary for focus management.
// For now, input is unsupported, and so we drop the ViewRef and ViewRefControl.
app.create_parent_viewport_watcher(view_creation_token);
}
MessageInternal::Relayout { width, height } => {
app.on_relayout(width, height);
}
MessageInternal::OnPresentError { error } => {
error!("OnPresentError({:?})", error);
| hsv_to_rgba | identifier_name |
|
main.rs |
let c = v / 100.0 * s / 100.0;
let x = c * (1.0 - (((h / 60.0) % 2.0) - 1.0).abs());
let m = (v / 100.0) - c;
let (mut r, mut g, mut b) = match h {
h if h < 60.0 => (c, x, 0.0),
h if h < 120.0 => (x, c, 0.0),
h if h < 180.0 => (0.0, c, x),
h if h < 240.0 => (0.0, x, c),
h if h < 300.0 => (x, 0.0, c),
_ => (c, 0.0, x),
};
r += m;
g += m;
b += m;
return [(r * 255.0) as u8, (g * 255.0) as u8, (b * 255.0) as u8, 255];
}
enum MessageInternal {
CreateView(fviews::ViewCreationToken, fviews::ViewRefControl, fviews::ViewRef),
OnPresentError {
error: fland::FlatlandError,
},
OnNextFrameBegin {
additional_present_credits: u32,
future_presentation_infos: Vec<flatland::PresentationInfo>,
},
#[allow(dead_code)]
OnFramePresented {
frame_presented_info: fidl_fuchsia_scenic_scheduling::FramePresentedInfo,
},
Relayout {
width: u32,
height: u32,
},
}
struct AppModel<'a> {
flatland: &'a fland::FlatlandProxy,
allocator: fland::AllocatorProxy,
internal_sender: UnboundedSender<MessageInternal>,
allocation: Option<fsysmem::BufferCollectionInfo2>,
sched_lib: &'a dyn SchedulingLib,
hue: f32,
page_size: usize,
last_expected_presentation_time: zx::Time,
}
impl<'a> AppModel<'a> {
fn new(
flatland: &'a fland::FlatlandProxy,
allocator: fland::AllocatorProxy,
internal_sender: UnboundedSender<MessageInternal>,
sched_lib: &'a dyn SchedulingLib,
) -> AppModel<'a> {
AppModel {
flatland,
allocator,
internal_sender,
sched_lib,
allocation: None,
hue: 0.0,
page_size: zx::system_get_page_size().try_into().unwrap(),
last_expected_presentation_time: zx::Time::from_nanos(0),
}
}
async fn init_scene(&mut self) {
// BufferAllocator is a helper which makes it easier to obtain and set constraints on a
// sysmem::BufferCollectionToken. This token can then be registered with Scenic, which will
// set its own constraints; see below.
let mut buffer_allocator = BufferCollectionAllocator::new(
IMAGE_WIDTH,
IMAGE_HEIGHT,
fidl_fuchsia_sysmem::PixelFormatType::Bgra32,
FrameUsage::Cpu,
1,
)
.expect("failed to create BufferCollectionAllocator");
buffer_allocator.set_name(100, "Flatland ViewProvider Example").expect("fidl error");
let sysmem_buffer_collection_token =
buffer_allocator.duplicate_token().await.expect("error duplicating token");
// Register the sysmem BufferCollectionToken with the Scenic Allocator API. This is done by
// creating an import/export token pair, which is fundamentally a pair of zx::event. The
// export token is used as a key to register the sysmem BufferCollectionToken. The
// corresponding import token can be used to access the allocated buffers via other Scenic
// APIs, such as the "Gfx" and "Flatland" APIs, the latter being used in this example. See
// the following invocation of "flatland.create_image()".
let mut buffer_tokens = BufferCollectionTokenPair::new();
let args = fland::RegisterBufferCollectionArgs {
export_token: Some(buffer_tokens.export_token),
buffer_collection_token: Some(sysmem_buffer_collection_token),
..fland::RegisterBufferCollectionArgs::EMPTY
};
self.allocator
.register_buffer_collection(args)
.await
.expect("fidl error")
.expect("error registering buffer collection");
// Now that the BufferCollectionToken has been registered, Scenic is able to set constraints
// on it so that the eventually-allocated buffer can be used by e.g. both Vulkan and the
// hardware display controller. Allocate the buffer and wait for the allocation to finish,
// which cannot happen until Scenic has set all necessary constraints of its own.
self.allocation =
Some(buffer_allocator.allocate_buffers(true).await.expect("buffer allocation failed"));
self.set_image_colors();
// Create an image in the Flatland session, using the sysmem buffer we just allocated.
// As mentioned above, this uses the import token corresponding to the export token that was
// used to register the BufferCollectionToken with the Scenic Allocator.
let image_props = fland::ImageProperties {
size: Some(fmath::SizeU { width: IMAGE_WIDTH, height: IMAGE_HEIGHT }),
..fland::ImageProperties::EMPTY
};
// TODO(fxbug.dev/76640): generated FIDL methods currently expect "&mut" args. This will
// change; according to fxbug.dev/65845 the generated FIDL will use "&" instead (at least
// for POD structs like these). When this lands we can remove the ".clone()" from the call
// sites below.
self.flatland
.create_image(&mut IMAGE_ID.clone(), &mut buffer_tokens.import_token, 0, image_props)
.expect("fidl error");
// Populate the rest of the Flatland scene. There is a single transform which is set as the
// root transform; the newly-created image is set as the content of that transform.
self.flatland.create_transform(&mut TRANSFORM_ID.clone()).expect("fidl error");
self.flatland.set_root_transform(&mut TRANSFORM_ID.clone()).expect("fidl error");
self.flatland
.set_content(&mut TRANSFORM_ID.clone(), &mut IMAGE_ID.clone())
.expect("fidl error");
}
fn create_parent_viewport_watcher(
&mut self,
mut view_creation_token: fviews::ViewCreationToken,
) | while let Some(result) = layout_info_stream.next().await {
match result {
Ok(layout_info) => {
let mut width = 0;
let mut height = 0;
if let Some(logical_size) = layout_info.logical_size {
width = logical_size.width;
height = logical_size.height;
}
sender
.unbounded_send(MessageInternal::Relayout { width, height })
.expect("failed to send MessageInternal.");
}
Err(fidl::Error::ClientChannelClosed {.. }) => {
info!("graph link connection closed.");
return; // from spawned task closure
}
Err(fidl_error) => {
warn!("graph link GetLayout() error: {:?}", fidl_error);
return; // from spawned task closure
}
}
}
})
.detach();
}
fn draw(&mut self, expected_presentation_time: zx::Time) {
trace::duration!("gfx", "FlatlandViewProvider::draw");
let time_since_last_draw_in_seconds = ((expected_presentation_time.into_nanos()
- self.last_expected_presentation_time.into_nanos())
as f32)
/ 1_000_000_000.0;
self.last_expected_presentation_time = expected_presentation_time;
let hue_change_time_per_second = 30 as f32;
self.hue =
(self.hue + hue_change_time_per_second * time_since_last_draw_in_seconds) % 360.0;
self.set_image_colors();
self.sched_lib.request_present();
}
fn on_relayout(&mut self, width: u32, height: u32) {
self.flatland
.set_image_destination_size(&mut IMAGE_ID.clone(), &mut fmath::SizeU { width, height })
.expect("fidl error");
self.sched_lib.request_present();
}
fn set_image_colors(&mut self) {
let allocation = self.allocation.as_ref().unwrap();
// Write pixel values into the allocated buffer.
match &allocation.buffers[0].vmo {
Some(vmo) => {
assert!(IMAGE_WIDTH == 2);
assert!(IMAGE_HEIGHT == 2);
// Compute the same row-pitch as Flatland will compute internally.
assert!(allocation.settings.has_image_format_constraints);
let row_pitch: usize =
minimum_row_bytes(allocation.settings.image_format_constraints, IMAGE_WIDTH)
.expect("failed to compute row-pitch")
.try_into()
.unwrap();
// TODO(fxbug.dev/76640): should look at pixel-format, instead of assuming 32-bit
// BGRA pixels. For now, format is hard-coded anyway.
let p00: [u8; 4] = hsv_to_rgba(self.hue, 30.0, 75.0);
let p10: [u8; 4] = hsv_to_rgba(self.hue + 20.0, 30.0, 75.0);
let p11: [u8; 4] = hsv_to_rgba(self.hue + 60.0, 30.0, 75.0);
let p01: [u8; 4] = hsv_to_rgba(self.hue + 40.0, 30.0, 75.0);
// The size used to map a VMO must be a multiple of the page size. Ensure that the
// VMO is at least one page in size, and that the size returned by sysmem is no
// larger than this. Neither of these should ever fail.
{
let vmo_size: usize =
vmo.get_size().expect("failed to obtain VMO size").try_into().unwrap();
let sysmem_size: usize =
allocation.settings.buffer_settings.size_bytes.try_into().unwrap();
assert!(self.page_size <= vmo_size);
assert!(self.page_size >= sysmem_size);
}
// create_from_vmo() uses an offset of 0 when mapping the VMO; verify that this
// matches the sysmem allocation.
let offset: usize = allocation.buffers[0].vmo_usable_start.try_into().unwrap();
assert_eq!(offset, 0);
let mapping = mapped_vmo::Mapping::create_from_vmo(
&vmo,
self.page_size,
zx::VmarFlags::PERM_READ | zx::VmarFlags::PERM_WRITE,
)
.expect("failed to map VMO");
mapping.write_at(0, &p00);
mapping.write_at(4, &p10);
mapping.write_at(row_pitch, &p01);
mapping.write_at(row_pitch + 4, &p11);
}
None => unreachable!(),
}
}
}
fn setup_fidl_services(sender: UnboundedSender<MessageInternal>) {
let view_provider_cb = move |stream: fapp::ViewProviderRequestStream| {
let sender = sender.clone();
fasync::Task::local(
stream
.try_for_each(move |req| {
match req {
fapp::ViewProviderRequest::CreateView2 { args,.. } => {
let view_creation_token = args.view_creation_token.unwrap();
// We do not get passed a view ref so create our own.
let ViewRefPair { control_ref, view_ref } =
ViewRefPair::new().expect("unable to create view ref pair");
sender
.unbounded_send(MessageInternal::CreateView(
view_creation_token,
control_ref,
view_ref,
))
.expect("failed to send MessageInternal.");
}
unhandled_req => {
warn!("Unhandled ViewProvider request: {:?}", unhandled_req);
}
};
future::ok(())
})
.unwrap_or_else(|e| {
eprintln!("error running TemporaryFlatlandViewProvider server: {:?}", e)
}),
)
.detach()
};
let mut fs = component::server::ServiceFs::new();
fs.dir("svc").add_fidl_service(view_provider_cb);
fs.take_and_serve_directory_handle().expect("failed to serve directory handle");
fasync::Task::local(fs.collect()).detach();
}
fn setup_handle_flatland_events(
event_stream: fland::FlatlandEventStream,
sender: UnboundedSender<MessageInternal>,
) {
fasync::Task::local(
event_stream
.try_for_each(move |event| {
match event {
fland::FlatlandEvent::OnNextFrameBegin { values } => {
if let (Some(additional_present_credits), Some(future_presentation_infos)) =
(values.additional_present_credits, values.future_presentation_infos)
{
sender
.unbounded_send(MessageInternal::OnNextFrameBegin {
additional_present_credits,
future_presentation_infos,
})
.expect("failed to send MessageInternal");
} else {
// If not an error, all table fields are guaranteed to be present.
unreachable!()
}
}
fland::FlatlandEvent::OnFramePresented { frame_presented_info } => {
sender
.unbounded_send(MessageInternal::OnFramePresented {
frame_presented_info,
})
.expect("failed to send MessageInternal");
}
fland::FlatlandEvent::OnError { error } => {
sender
.unbounded_send(MessageInternal::OnPresentError { error })
.expect("failed to send MessageInternal.");
}
};
future::ok(())
})
.unwrap_or_else(|e| eprintln!("error listening for Flatland Events: {:?}", e)),
)
.detach();
}
#[fasync::run_singlethreaded]
async fn main() {
fuchsia_trace_provider::trace_provider_create_with_fdio();
fuchsia_syslog::init_with_tags(&["flatland-display"]).expect("failed to initialize logger");
let (internal_sender, mut internal_receiver) = unbounded::<MessageInternal>();
let flatland =
connect_to_protocol::<fland::FlatlandMarker>().expect("error connecting to Flatland");
let sched_lib = ThroughputScheduler::new();
let allocator = connect_to_protocol::<fland::AllocatorMarker>()
.expect("error connecting to Scenic allocator");
info!("Established connections to Flatland and Allocator");
setup_fidl_services(internal_sender.clone());
setup_handle_flatland_events(flatland.take_event_stream(), internal_sender.clone());
let mut app = AppModel::new(&flatland, allocator, internal_sender.clone(), &sched_lib);
app.init_scene().await;
let mut present_count = 0;
loop {
futures::select! {
message = internal_receiver.next().fuse() => {
if let Some(message) = message {
match message {
MessageInternal::CreateView(view_creation_token, _view_ref_control, _view_ref) => {
// TODO(fxbug.dev/78866): handling ViewRefs is necessary for focus management.
// For now, input is unsupported, and so we drop the ViewRef and ViewRefControl.
app.create_parent_viewport_watcher(view_creation_token);
}
MessageInternal::Relayout { width, height } => {
app.on_relayout(width, height);
}
MessageInternal::OnPresentError { error } => {
error!("OnPresentError({:?})", error);
break;
}
MessageInternal::OnNextFrameBegin {
| {
let (parent_viewport_watcher, server_end) =
create_proxy::<fland::ParentViewportWatcherMarker>()
.expect("failed to create ParentViewportWatcherProxy");
// NOTE: it isn't necessary to call maybe_present() for this to take effect, because we will
// relayout when receive the initial layout info. See CreateView() FIDL docs.
self.flatland.create_view(&mut view_creation_token, server_end).expect("fidl error");
// NOTE: there may be a race condition if TemporaryFlatlandViewProvider.CreateView() is
// invoked a second time, causing us to create another graph link. Because Zircon doesn't
// guarantee ordering on responses of different channels, we might receive data from the old
// link after data from the new link, just before the old link is closed. Non-example code
// should be more careful (this assumes that the client expects CreateView() to be called
// multiple times, which clients commonly don't).
let sender = self.internal_sender.clone();
fasync::Task::spawn(async move {
let mut layout_info_stream =
HangingGetStream::new(Box::new(move || Some(parent_viewport_watcher.get_layout())));
| identifier_body |
main.rs | self.1.x {
if self.0.y < self.1.y {
(
Polarity::Vertical,
Bounds {
low: self.0.y,
high: self.1.y,
bar: self.0.x,
},
)
} else {
(
Polarity::Vertical,
Bounds {
low: self.1.y,
high: self.0.y,
bar: self.0.x,
},
)
}
} else {
if self.0.x < self.1.x {
(
Polarity::Horizontal,
Bounds {
low: self.0.x,
high: self.1.x,
bar: self.0.y,
},
)
} else {
(
Polarity::Horizontal,
Bounds {
low: self.1.x,
high: self.0.x,
bar: self.0.y,
},
)
}
}
}
fn crosses(&self, other: &Segment) -> Option<(Point, u64, u64)> {
let point = match (self.polarity_and_bounds(), other.polarity_and_bounds()) {
((Polarity::Horizontal,..), (Polarity::Horizontal,..))
| ((Polarity::Vertical,..), (Polarity::Vertical,..)) => None,
((Polarity::Vertical, v_bounds), (Polarity::Horizontal, h_bounds))
| ((Polarity::Horizontal, h_bounds), (Polarity::Vertical, v_bounds)) => {
if h_bounds.bar <= v_bounds.low
|| h_bounds.bar >= v_bounds.high
|| v_bounds.bar <= h_bounds.low
|| v_bounds.bar >= h_bounds.high
{
None
} else {
Some(Point {
x: v_bounds.bar,
y: h_bounds.bar,
})
}
}
};
point.map(|p| (p, self.0.flat_distance_to(&p), other.0.flat_distance_to(&p)))
}
fn length(&self) -> u64 {
match self.polarity_and_bounds() {
(_, Bounds { low, high,.. }) => (high - low) as u64,
}
}
}
impl Debug for Segment {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> {
write!(
f,
"{{({}, {}) - ({}, {})}}",
self.0.x, self.0.y, self.1.x, self.1.y
)
}
}
#[derive(Copy, Clone, Debug)]
enum Route {
Up(u32),
Down(u32),
Left(u32),
Right(u32),
}
#[derive(Debug)]
enum Either<T, U> {
A(T),
B(U),
}
impl FromStr for Route {
type Err = Either<char, ParseIntError>;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let first_char = s.as_bytes()[0] as char;
let num = s[1..].parse().map_err(Either::B)?;
match first_char {
'U' => Ok(Route::Up(num)),
'D' => Ok(Route::Down(num)),
'L' => Ok(Route::Left(num)),
'R' => Ok(Route::Right(num)),
_ => Err(Either::A(first_char)),
}
}
}
struct Runner {
path: Vec<Segment>,
cursor: Point,
}
impl Runner {
fn new() -> Self {
Self {
path: vec![],
cursor: Point { x: 0, y: 0 },
}
}
fn follow(&mut self, route: Route) {
let next = match route {
Route::Up(u) => Point {
y: self.cursor.y + u as i64,
..self.cursor
},
Route::Down(d) => Point {
y: self.cursor.y - d as i64,
..self.cursor
},
Route::Left(l) => Point {
x: self.cursor.x - l as i64,
..self.cursor
},
Route::Right(r) => Point {
x: self.cursor.x + r as i64,
..self.cursor
},
};
let segment = Segment(self.cursor, next);
self.path.push(segment);
self.cursor = next;
}
fn finish(self) -> Vec<Segment> {
self.path
}
}
fn run(route1: Vec<Route>, route2: Vec<Route>) -> (u64, u64) {
let mut runner1 = Runner::new();
for route in &route1 {
runner1.follow(*route);
}
let segments1 = runner1.finish();
let mut runner2 = Runner::new();
for route in &route2 {
runner2.follow(*route);
}
let segments2 = runner2.finish();
// This whole section could definitely be optimized...
// O(n*m)
#[cfg(not(feature = "optimized"))]
{
let mut crosses = vec![];
let mut cross_distances = HashMap::<Point, u64>::new();
let mut s1sum = 0;
for s1 in &segments1 {
let mut s2sum = 0;
for s2 in &segments2 {
if let Some((p, s1dist, s2dist)) = s1.crosses(s2) {
crosses.push(p);
if!cross_distances.contains_key(&p) {
cross_distances.insert(p, s1sum + s1dist + s2sum + s2dist);
}
}
s2sum += s2.length();
}
s1sum += s1.length();
}
let min_manhattan = crosses
.into_iter()
.map(|p| p.manhattan_distance())
.min()
.unwrap();
let min_sum_dist = cross_distances.into_iter().map(|(_, v)| v).min().unwrap();
(min_manhattan, min_sum_dist)
}
// optimized
// O(n log n + m log m)
#[cfg(feature = "optimized")]
{
struct ComputeData {
segment: Segment,
polarity: Polarity,
bounds: Bounds,
previous_length: u64,
}
// First we compute the lengths to get to each segment
// and store them together
fn compute_data(seg: Vec<Segment>) -> Vec<ComputeData> {
let mut length = 0;
seg.into_iter()
.map(|segment| {
let next_length = segment.length();
let (polarity, bounds) = segment.polarity_and_bounds();
let result = ComputeData {
segment,
polarity,
bounds,
previous_length: length,
};
length += next_length;
result
})
.collect()
}
let data1 = compute_data(segments1);
let data2 = compute_data(segments2);
// Next we split each segment into horizontal and vertical
// vectors, then sort them according to their horizontal component
fn partition_and_sort(seg: &[ComputeData]) -> (Vec<&ComputeData>, Vec<&ComputeData>) {
let (mut horizontals, mut verticals): (Vec<_>, Vec<_>) =
seg.iter().partition(|data| data.polarity.is_horizontal());
horizontals.sort_by_key(|data| data.segment.0.x);
verticals.sort_by_key(|data| data.segment.0.x);
(horizontals, verticals)
}
let (h1s, v1s) = partition_and_sort(&data1);
let (h2s, v2s) = partition_and_sort(&data2);
// now we can iterate over each horizontal and vertical pair in O(n+m)
fn find_manhattan_and_min_sum_distances(
horizontals: &[&ComputeData],
verticals: &[&ComputeData],
) -> (u64, u64) {
let mut h_iter = horizontals.iter();
let mut v_iter = verticals.iter();
let h_item = h_iter.next();
let v_item = v_iter.next();
// huh? Why the inner stuff here?
// We might run into cases where there are multiple horizontals
// and verticals crossing each other (think of the pound sign -> #).
// Iterating to the next vertical or horizontal after a successful
// intersection would be incorrect. Here, I've chosen to clone the
// verticals' iterator and run nested checks against the following
// vertical segments until they extend past the current horizontal
// segment. After that nested scan is complete, we could then move
// on to the next horizontal segment in the outer loop. ^
// P.S. would you look at that alignment!? ----------------------^
fn inner_find<'a>(
mut h_item: Option<&'a &'a ComputeData>,
mut v_item: Option<&'a &'a ComputeData>,
h_iter: &mut impl Iterator<Item = &'a &'a ComputeData>,
v_iter: &mut (impl Iterator<Item = &'a &'a ComputeData> + Clone),
nested: bool,
) -> (u64, u64) {
let mut min_manhattan = std::u64::MAX;
let mut min_sum = std::u64::MAX;
while let (Some(h_data), Some(v_data)) = (h_item, v_item) {
// In these cases, the vertical segment doesn't intersect, and
// there still might be other vertical segments that could
// intersect with this horizontal segment (due to sorting by x)
// so just move to the next vertical segment.
if v_data.bounds.bar <= h_data.bounds.low
|| h_data.bounds.bar <= v_data.bounds.low
|| h_data.bounds.bar >= v_data.bounds.high
{
v_item = v_iter.next();
continue;
}
// Here the vertical segment is beyond the current horizontal
// segment. Given that we sort by x, no more vertical
// segments will intersect with this horizontal segment. Move
// to the next horizontal segment. If we're in the nested
// loop, then just exit. The outer loop will increment for us.
if v_data.bounds.bar >= h_data.bounds.high {
if nested {
return (min_manhattan, min_sum);
}
h_item = h_iter.next();
continue;
}
let p = Point {
x: v_data.bounds.bar,
y: h_data.bounds.bar,
};
let p_manhattan = p.manhattan_distance();
if p_manhattan < min_manhattan {
min_manhattan = p_manhattan;
}
let p_min_sum = h_data.previous_length
+ h_data.segment.0.flat_distance_to(&p)
+ v_data.previous_length
+ v_data.segment.0.flat_distance_to(&p);
if p_min_sum < min_sum {
min_sum = p_min_sum;
}
if nested {
v_item = v_iter.next();
continue;
}
let (inner_manhattan, inner_min_sum) =
inner_find(h_item, v_item, h_iter, &mut v_iter.clone(), true);
if inner_manhattan < min_manhattan {
min_manhattan = inner_manhattan;
}
if inner_min_sum < min_sum {
min_sum = inner_min_sum;
}
h_item = h_iter.next();
}
(min_manhattan, min_sum)
}
inner_find(h_item, v_item, &mut h_iter, &mut v_iter, false)
}
let (manhattan_a, min_sum_a) = find_manhattan_and_min_sum_distances(&h1s, &v2s);
let (manhattan_b, min_sum_b) = find_manhattan_and_min_sum_distances(&h2s, &v1s);
(manhattan_a.min(manhattan_b), min_sum_a.min(min_sum_b))
}
}
#[cfg(test)]
mod test {
use super::*;
macro_rules! segments_cross {
{(($x1:expr, $y1:expr), ($x2:expr, $y2:expr)) <-|-> (($x3:expr, $y3:expr), ($x4:expr, $y4:expr)) @ ($c1:expr, $c2:expr)} => {
let segment1 = Segment(Point{x: $x1, y: $y1}, Point{x: $x2, y: $y2});
let segment2 = Segment(Point{x: $x3, y: $y3}, Point{x: $x4, y: $y4});
let cross = segment1.crosses(&segment2);
assert!(cross.is_some());
let (cross,..) = cross.unwrap();
assert_eq!(cross.x, $c1);
assert_eq!(cross.y, $c2);
};
{(($x1:expr, $y1:expr), ($x2:expr, $y2:expr)) <---> (($x3:expr, $y3:expr), ($x4:expr, $y4:expr))} => {
let segment1 = Segment(Point{x: $x1, y: $y1}, Point{x: $x2, y: $y2});
let segment2 = Segment(Point{x: $x3, y: $y3}, Point{x: $x4, y: $y4});
assert!(segment1.crosses(&segment2).is_none());
};
}
#[test]
fn segments_cross() {
// two vertical | |
segments_cross!((( 0, 1), ( 0, -1)) <---> (( 0, 2), ( 0, -2)));
// two horizontal =
segments_cross!((( 1, 0), (-1, 0)) <---> (( 2, 0), (-2, 0)));
// too far left - |
segments_cross!(((-1, 0), ( 1, 0)) <---> ((-2, 1), (-2, -1)));
// too far right | -
segments_cross!(((-1, 0), ( 1, 0)) <---> (( 2, 1), ( 2, -1)));
// too far up |
// -
segments_cross!((( 2, -1), ( 2, 1)) <---> ((-1, 0), ( 1, 0)));
// too far down -
// |
segments_cross!(((-2, -1), (-2, 1)) <---> ((-1, 0), ( 1, 0)));
// cross +
segments_cross!(((-1, 0), ( 1, 0)) <-|-> (( 0, -1), ( 0, 1)) @ (0, 0));
// on-edge should not cross -|
segments_cross!(((-1, 0), ( 1, 0)) <---> (( 1, -1), ( 1, 1)));
}
macro_rules! assert_segments_eq {
($seg:expr, {($x1:expr, $y1:expr), ($x2:expr, $y2:expr)}) => {
assert_eq!($seg.0.x, $x1);
assert_eq!($seg.0.y, $y1);
assert_eq!($seg.1.x, $x2);
assert_eq!($seg.1.y, $y2);
};
}
#[test]
fn runner() {
let mut runner = Runner::new();
runner.follow(Route::Up(4));
runner.follow(Route::Right(4));
runner.follow(Route::Down(4));
runner.follow(Route::Left(4));
let path = runner.finish();
assert_segments_eq!(path[0], {(0, 0), (0, 4)});
assert_segments_eq!(path[1], {(0, 4), (4, 4)});
assert_segments_eq!(path[2], {(4, 4), (4, 0)});
assert_segments_eq!(path[3], {(4, 0), (0, 0)}); | }
macro_rules! route_vec {
(@route R $num:expr) => {
| random_line_split |
|
main.rs | (&self) -> u64 {
self.x.abs() as u64 + self.y.abs() as u64
}
fn flat_distance_to(&self, other: &Self) -> u64 {
(self.x - other.x).abs() as u64 + (self.y - other.y).abs() as u64
}
}
enum Polarity {
Vertical,
Horizontal,
}
#[allow(dead_code)]
impl Polarity {
fn is_horizontal(&self) -> bool {
match self {
Polarity::Horizontal => true,
_ => false,
}
}
fn is_vertical(&self) -> bool {
match self {
Polarity::Vertical => true,
_ => false,
}
}
}
struct Bounds {
low: i64,
high: i64,
bar: i64,
}
struct Segment(Point, Point);
impl Segment {
fn polarity_and_bounds(&self) -> (Polarity, Bounds) {
if self.0.x == self.1.x {
if self.0.y < self.1.y {
(
Polarity::Vertical,
Bounds {
low: self.0.y,
high: self.1.y,
bar: self.0.x,
},
)
} else {
(
Polarity::Vertical,
Bounds {
low: self.1.y,
high: self.0.y,
bar: self.0.x,
},
)
}
} else {
if self.0.x < self.1.x {
(
Polarity::Horizontal,
Bounds {
low: self.0.x,
high: self.1.x,
bar: self.0.y,
},
)
} else {
(
Polarity::Horizontal,
Bounds {
low: self.1.x,
high: self.0.x,
bar: self.0.y,
},
)
}
}
}
fn crosses(&self, other: &Segment) -> Option<(Point, u64, u64)> {
let point = match (self.polarity_and_bounds(), other.polarity_and_bounds()) {
((Polarity::Horizontal,..), (Polarity::Horizontal,..))
| ((Polarity::Vertical,..), (Polarity::Vertical,..)) => None,
((Polarity::Vertical, v_bounds), (Polarity::Horizontal, h_bounds))
| ((Polarity::Horizontal, h_bounds), (Polarity::Vertical, v_bounds)) => {
if h_bounds.bar <= v_bounds.low
|| h_bounds.bar >= v_bounds.high
|| v_bounds.bar <= h_bounds.low
|| v_bounds.bar >= h_bounds.high
{
None
} else {
Some(Point {
x: v_bounds.bar,
y: h_bounds.bar,
})
}
}
};
point.map(|p| (p, self.0.flat_distance_to(&p), other.0.flat_distance_to(&p)))
}
fn length(&self) -> u64 {
match self.polarity_and_bounds() {
(_, Bounds { low, high,.. }) => (high - low) as u64,
}
}
}
impl Debug for Segment {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> {
write!(
f,
"{{({}, {}) - ({}, {})}}",
self.0.x, self.0.y, self.1.x, self.1.y
)
}
}
#[derive(Copy, Clone, Debug)]
enum Route {
Up(u32),
Down(u32),
Left(u32),
Right(u32),
}
#[derive(Debug)]
enum Either<T, U> {
A(T),
B(U),
}
impl FromStr for Route {
type Err = Either<char, ParseIntError>;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let first_char = s.as_bytes()[0] as char;
let num = s[1..].parse().map_err(Either::B)?;
match first_char {
'U' => Ok(Route::Up(num)),
'D' => Ok(Route::Down(num)),
'L' => Ok(Route::Left(num)),
'R' => Ok(Route::Right(num)),
_ => Err(Either::A(first_char)),
}
}
}
struct Runner {
path: Vec<Segment>,
cursor: Point,
}
impl Runner {
fn new() -> Self {
Self {
path: vec![],
cursor: Point { x: 0, y: 0 },
}
}
fn follow(&mut self, route: Route) {
let next = match route {
Route::Up(u) => Point {
y: self.cursor.y + u as i64,
..self.cursor
},
Route::Down(d) => Point {
y: self.cursor.y - d as i64,
..self.cursor
},
Route::Left(l) => Point {
x: self.cursor.x - l as i64,
..self.cursor
},
Route::Right(r) => Point {
x: self.cursor.x + r as i64,
..self.cursor
},
};
let segment = Segment(self.cursor, next);
self.path.push(segment);
self.cursor = next;
}
fn finish(self) -> Vec<Segment> {
self.path
}
}
fn run(route1: Vec<Route>, route2: Vec<Route>) -> (u64, u64) {
let mut runner1 = Runner::new();
for route in &route1 {
runner1.follow(*route);
}
let segments1 = runner1.finish();
let mut runner2 = Runner::new();
for route in &route2 {
runner2.follow(*route);
}
let segments2 = runner2.finish();
// This whole section could definitely be optimized...
// O(n*m)
#[cfg(not(feature = "optimized"))]
{
let mut crosses = vec![];
let mut cross_distances = HashMap::<Point, u64>::new();
let mut s1sum = 0;
for s1 in &segments1 {
let mut s2sum = 0;
for s2 in &segments2 {
if let Some((p, s1dist, s2dist)) = s1.crosses(s2) {
crosses.push(p);
if!cross_distances.contains_key(&p) {
cross_distances.insert(p, s1sum + s1dist + s2sum + s2dist);
}
}
s2sum += s2.length();
}
s1sum += s1.length();
}
let min_manhattan = crosses
.into_iter()
.map(|p| p.manhattan_distance())
.min()
.unwrap();
let min_sum_dist = cross_distances.into_iter().map(|(_, v)| v).min().unwrap();
(min_manhattan, min_sum_dist)
}
// optimized
// O(n log n + m log m)
#[cfg(feature = "optimized")]
{
struct ComputeData {
segment: Segment,
polarity: Polarity,
bounds: Bounds,
previous_length: u64,
}
// First we compute the lengths to get to each segment
// and store them together
fn compute_data(seg: Vec<Segment>) -> Vec<ComputeData> {
let mut length = 0;
seg.into_iter()
.map(|segment| {
let next_length = segment.length();
let (polarity, bounds) = segment.polarity_and_bounds();
let result = ComputeData {
segment,
polarity,
bounds,
previous_length: length,
};
length += next_length;
result
})
.collect()
}
let data1 = compute_data(segments1);
let data2 = compute_data(segments2);
// Next we split each segment into horizontal and vertical
// vectors, then sort them according to their horizontal component
fn partition_and_sort(seg: &[ComputeData]) -> (Vec<&ComputeData>, Vec<&ComputeData>) {
let (mut horizontals, mut verticals): (Vec<_>, Vec<_>) =
seg.iter().partition(|data| data.polarity.is_horizontal());
horizontals.sort_by_key(|data| data.segment.0.x);
verticals.sort_by_key(|data| data.segment.0.x);
(horizontals, verticals)
}
let (h1s, v1s) = partition_and_sort(&data1);
let (h2s, v2s) = partition_and_sort(&data2);
// now we can iterate over each horizontal and vertical pair in O(n+m)
fn find_manhattan_and_min_sum_distances(
horizontals: &[&ComputeData],
verticals: &[&ComputeData],
) -> (u64, u64) {
let mut h_iter = horizontals.iter();
let mut v_iter = verticals.iter();
let h_item = h_iter.next();
let v_item = v_iter.next();
// huh? Why the inner stuff here?
// We might run into cases where there are multiple horizontals
// and verticals crossing each other (think of the pound sign -> #).
// Iterating to the next vertical or horizontal after a successful
// intersection would be incorrect. Here, I've chosen to clone the
// verticals' iterator and run nested checks against the following
// vertical segments until they extend past the current horizontal
// segment. After that nested scan is complete, we could then move
// on to the next horizontal segment in the outer loop. ^
// P.S. would you look at that alignment!? ----------------------^
fn inner_find<'a>(
mut h_item: Option<&'a &'a ComputeData>,
mut v_item: Option<&'a &'a ComputeData>,
h_iter: &mut impl Iterator<Item = &'a &'a ComputeData>,
v_iter: &mut (impl Iterator<Item = &'a &'a ComputeData> + Clone),
nested: bool,
) -> (u64, u64) {
let mut min_manhattan = std::u64::MAX;
let mut min_sum = std::u64::MAX;
while let (Some(h_data), Some(v_data)) = (h_item, v_item) {
// In these cases, the vertical segment doesn't intersect, and
// there still might be other vertical segments that could
// intersect with this horizontal segment (due to sorting by x)
// so just move to the next vertical segment.
if v_data.bounds.bar <= h_data.bounds.low
|| h_data.bounds.bar <= v_data.bounds.low
|| h_data.bounds.bar >= v_data.bounds.high
{
v_item = v_iter.next();
continue;
}
// Here the vertical segment is beyond the current horizontal
// segment. Given that we sort by x, no more vertical
// segments will intersect with this horizontal segment. Move
// to the next horizontal segment. If we're in the nested
// loop, then just exit. The outer loop will increment for us.
if v_data.bounds.bar >= h_data.bounds.high {
if nested {
return (min_manhattan, min_sum);
}
h_item = h_iter.next();
continue;
}
let p = Point {
x: v_data.bounds.bar,
y: h_data.bounds.bar,
};
let p_manhattan = p.manhattan_distance();
if p_manhattan < min_manhattan {
min_manhattan = p_manhattan;
}
let p_min_sum = h_data.previous_length
+ h_data.segment.0.flat_distance_to(&p)
+ v_data.previous_length
+ v_data.segment.0.flat_distance_to(&p);
if p_min_sum < min_sum {
min_sum = p_min_sum;
}
if nested {
v_item = v_iter.next();
continue;
}
let (inner_manhattan, inner_min_sum) =
inner_find(h_item, v_item, h_iter, &mut v_iter.clone(), true);
if inner_manhattan < min_manhattan {
min_manhattan = inner_manhattan;
}
if inner_min_sum < min_sum {
min_sum = inner_min_sum;
}
h_item = h_iter.next();
}
(min_manhattan, min_sum)
}
inner_find(h_item, v_item, &mut h_iter, &mut v_iter, false)
}
let (manhattan_a, min_sum_a) = find_manhattan_and_min_sum_distances(&h1s, &v2s);
let (manhattan_b, min_sum_b) = find_manhattan_and_min_sum_distances(&h2s, &v1s);
(manhattan_a.min(manhattan_b), min_sum_a.min(min_sum_b))
}
}
#[cfg(test)]
mod test {
use super::*;
macro_rules! segments_cross {
{(($x1:expr, $y1:expr), ($x2:expr, $y2:expr)) <-|-> (($x3:expr, $y3:expr), ($x4:expr, $y4:expr)) @ ($c1:expr, $c2:expr)} => {
let segment1 = Segment(Point{x: $x1, y: $y1}, Point{x: $x2, y: $y2});
let segment2 = Segment(Point{x: $x3, y: $y3}, Point{x: $x4, y: $y4});
let cross = segment1.crosses(&segment2);
assert!(cross.is_some());
let (cross,..) = cross.unwrap();
assert_eq!(cross.x, $c1);
assert_eq!(cross.y, $c2);
};
{(($x1:expr, $y1:expr), ($x2:expr, $y2:expr)) <---> (($x3:expr, $y3:expr), ($x4:expr, $y4:expr))} => {
let segment1 = Segment(Point{x: $x1, y: $y1}, Point{x: $x2, y: $y2});
let segment2 = Segment(Point{x: $x3, y: $y3}, Point{x: $x4, y: $y4});
assert!(segment1.crosses(&segment2).is_none());
};
}
#[test]
fn segments_cross() {
// two vertical | |
segments_cross!((( 0, 1), ( 0, -1)) <---> (( 0, 2), ( 0, -2)));
// two horizontal =
segments_cross!((( 1, 0), (-1, 0)) <---> (( 2, 0), (-2, 0)));
// too far left - |
segments_cross!(((-1, 0), ( 1, 0)) <---> ((-2, 1), (-2, -1)));
// too far right | -
segments_cross!(((-1, 0), ( 1, 0)) <---> (( 2, 1), ( 2, -1)));
// too far up |
// -
segments_cross!((( 2, -1), ( 2, 1)) <---> ((-1, 0), ( 1, 0)));
// too far down -
// |
segments_cross!(((-2, -1), (-2, 1)) <---> ((-1, 0), ( 1, 0)));
// cross +
segments_cross!(((-1, 0), ( 1, 0)) <-|-> (( 0, -1), ( 0, 1)) @ (0, 0));
// on-edge should not cross -|
segments_cross!(((-1, 0), ( 1, 0)) <---> (( 1, -1), ( 1, 1)));
}
macro_rules! assert_segments_eq {
($seg:expr, {($x1:expr, $y1:expr), ($x2:expr, $y2:expr)}) => {
assert_ | manhattan_distance | identifier_name |
|
translate.rs | use rustast;
use rustast::DUMMY_SP;
use rustast::AstBuilder;
pub use self::RustUse::*;
pub use self::Expr::*;
pub struct Grammar {
pub imports: Vec<RustUse>,
pub rules: Vec<Rule>,
}
#[deriving(Clone)]
pub enum RustUse {
RustUseSimple(String),
RustUseGlob(String),
RustUseList(String, Vec<String>),
}
pub struct Rule {
pub name: String,
pub expr: Box<Expr>,
pub ret_type: String, |
pub struct CharSetCase {
pub start: char,
pub end: char
}
pub struct TaggedExpr {
pub name: Option<String>,
pub expr: Box<Expr>,
}
pub enum Expr {
AnyCharExpr,
LiteralExpr(String),
CharSetExpr(bool, Vec<CharSetCase>),
RuleExpr(String),
SequenceExpr(Vec<Expr>),
ChoiceExpr(Vec<Expr>),
OptionalExpr(Box<Expr>),
Repeat(Box<Expr>, /*min*/ uint, /*max*/ Option<uint>, /*sep*/ Option<Box<Expr>>),
PosAssertExpr(Box<Expr>),
NegAssertExpr(Box<Expr>),
ActionExpr(Vec<TaggedExpr>, String),
}
pub fn compile_grammar(ctxt: &rustast::ExtCtxt, grammar: &Grammar) -> rustast::P<rustast::Mod> {
let mut imports = grammar.imports.clone();
imports.push(RustUseGlob("self::ParseResult".to_string()));
let view_items = translate_view_items(ctxt, imports.as_slice());
let items = header_items(ctxt).into_iter()
.chain(grammar.rules.iter().map(|rule|{
compile_rule(ctxt, rule)
}))
.chain(grammar.rules.iter().filter(|rule| rule.exported).map(|rule| {
compile_rule_export(ctxt, rule)
}))
.collect::<Vec<_>>();
rustast::module(view_items, items)
}
pub fn translate_view_items(ctxt: &rustast::ExtCtxt, imports: &[RustUse]) -> Vec<rustast::ViewItem> {
imports.iter().map(| i |{
match *i {
RustUseSimple(ref p) => ctxt.view_use_simple(DUMMY_SP, rustast::ast::Inherited, rustast::parse_path(p.as_slice())),
RustUseGlob(ref p) => ctxt.view_use_glob(DUMMY_SP, rustast::ast::Inherited, rustast::parse_path_vec(p.as_slice())),
RustUseList(ref p, ref v) => ctxt.view_use_list(DUMMY_SP, rustast::ast::Inherited, rustast::parse_path_vec(p.as_slice()),
v.iter().map(|s| rustast::str_to_ident(s.as_slice())).collect::<Vec<_>>().as_slice()
),
}
}).collect()
}
pub fn header_items(ctxt: &rustast::ExtCtxt) -> Vec<rustast::P<rustast::Item>> {
let mut items = Vec::new();
items.push(quote_item!(ctxt,
enum ParseResult<T> {
Matched(uint, T),
Failed,
}
).unwrap());
items.push(quote_item!(ctxt,
struct ParseState {
max_err_pos: uint,
expected: ::std::collections::HashSet<&'static str>,
}
).unwrap());
items.push(quote_item!(ctxt,
impl ParseState {
fn new() -> ParseState {
ParseState{ max_err_pos: 0, expected: ::std::collections::HashSet::new() }
}
fn mark_failure(&mut self, pos: uint, expected: &'static str) -> ParseResult<()> {
if pos > self.max_err_pos {
self.max_err_pos = pos;
self.expected.clear();
}
if pos == self.max_err_pos {
self.expected.insert(expected);
}
Failed
}
}
).unwrap());
items.push(quote_item!(ctxt,
fn slice_eq(input: &str, state: &mut ParseState, pos: uint, m: &'static str) -> ParseResult<()> {
#![inline]
#![allow(dead_code)]
let l = m.len();
if input.len() >= pos + l && input.as_bytes().slice(pos, pos+l) == m.as_bytes() {
Matched(pos+l, ())
} else {
state.mark_failure(pos, m)
}
}
).unwrap());
items.push(quote_item!(ctxt,
fn any_char(input: &str, state: &mut ParseState, pos: uint) -> ParseResult<()> {
#![inline]
#![allow(dead_code)]
if input.len() > pos {
Matched(input.char_range_at(pos).next, ())
} else {
state.mark_failure(pos, "<character>")
}
}
).unwrap());
items.push(quote_item!(ctxt,
fn pos_to_line(input: &str, pos: uint) -> (uint, uint) {
let mut remaining = pos;
let mut lineno: uint = 1;
for line in input.lines() {
let line_length = line.len() + 1;
if remaining < line_length {
return (lineno, remaining + 1);
}
remaining -= line_length;
lineno += 1;
}
return (lineno, remaining + 1);
}
).unwrap());
items
}
fn compile_rule(ctxt: &rustast::ExtCtxt, rule: &Rule) -> rustast::P<rustast::Item> {
let name = rustast::str_to_ident(format!("parse_{}", rule.name).as_slice());
let ret = rustast::parse_type(rule.ret_type.as_slice());
let body = compile_expr(ctxt, &*rule.expr, rule.ret_type.as_slice()!= "()");
(quote_item!(ctxt,
fn $name<'input>(input: &'input str, state: &mut ParseState, pos: uint) -> ParseResult<$ret> {
$body
}
)).unwrap()
}
fn compile_rule_export(ctxt: &rustast::ExtCtxt, rule: &Rule) -> rustast::P<rustast::Item> {
let name = rustast::str_to_ident(rule.name.as_slice());
let ret = rustast::parse_type(rule.ret_type.as_slice());
let parse_fn = rustast::str_to_ident(format!("parse_{}", rule.name).as_slice());
(quote_item!(ctxt,
pub fn $name<'input>(input: &'input str) -> Result<$ret, String> {
let mut state = ParseState::new();
match $parse_fn(input, &mut state, 0) {
Matched(pos, value) => {
if pos == input.len() {
return Ok(value)
}
}
_ => {}
}
let expected = state.expected.to_string().escape_default();
Err(format!("Error at {}: Expected {}", pos_to_line(input, state.max_err_pos), expected))
}
)).unwrap()
}
fn compile_match_and_then(ctxt: &rustast::ExtCtxt, e: &Expr, value_name: Option<&str>, then: rustast::P<rustast::Expr>) -> rustast::P<rustast::Expr> {
let seq_res = compile_expr(ctxt, e, value_name.is_some());
let name_pat = match value_name {
Some(name) => rustast::str_to_ident(name),
None => rustast::str_to_ident("_")
};
quote_expr!(ctxt, {
let seq_res = $seq_res;
match seq_res {
Matched(pos, $name_pat) => { $then }
Failed => Failed,
}
})
}
fn cond_swap<T>(swap: bool, tup: (T, T)) -> (T, T) {
let (a, b) = tup;
if swap {
(b, a)
} else {
(a, b)
}
}
fn format_char_set(cases: &[CharSetCase]) -> String {
let mut r = "[".into_string();
for &CharSetCase{start, end} in cases.iter() {
r.push(start);
if start!= end {
r.push('-');
r.push(end);
}
}
r.push(']');
r
}
#[allow(unused_imports)] // quote_tokens! imports things
fn compile_expr(ctxt: &rustast::ExtCtxt, e: &Expr, result_used: bool) -> rustast::P<rustast::Expr> {
match *e {
AnyCharExpr => {
quote_expr!(ctxt, any_char(input, state, pos))
}
LiteralExpr(ref s) => {
let sl = s.as_slice();
quote_expr!(ctxt, slice_eq(input, state, pos, $sl))
}
CharSetExpr(invert, ref cases) => {
let expected_set = format_char_set(cases.as_slice());
let expected_str = expected_set.as_slice();
let (in_set, not_in_set) = cond_swap(invert, (
quote_expr!(ctxt, Matched(next, ())),
quote_expr!(ctxt, state.mark_failure(pos, $expected_str)),
));
let m = ctxt.expr_match(DUMMY_SP, quote_expr!(ctxt, ch), vec!(
ctxt.arm(DUMMY_SP, cases.iter().map(|case| {
if case.start == case.end {
ctxt.pat_lit(DUMMY_SP, ctxt.expr_lit(DUMMY_SP, rustast::ast::LitChar(case.start)))
} else {
ctxt.pat(DUMMY_SP, rustast::ast::PatRange(
ctxt.expr_lit(DUMMY_SP, rustast::ast::LitChar(case.start)),
ctxt.expr_lit(DUMMY_SP, rustast::ast::LitChar(case.end))
))
}
}).collect::<Vec<_>>(), in_set),
ctxt.arm(DUMMY_SP, vec!(ctxt.pat_wild(DUMMY_SP)), not_in_set)
));
quote_expr!(ctxt, if input.len() > pos {
let ::std::str::CharRange {ch, next} = input.char_range_at(pos);
$m
} else {
state.mark_failure(pos, $expected_str)
})
}
RuleExpr(ref rule_name) => {
let func = rustast::str_to_ident(format!("parse_{}", *rule_name).as_slice());
quote_expr!(ctxt, $func(input, state, pos))
}
SequenceExpr(ref exprs) => {
fn write_seq(ctxt: &rustast::ExtCtxt, exprs: &[Expr]) -> rustast::P<rustast::Expr> {
if exprs.len() == 1 {
compile_expr(ctxt, &exprs[0], false)
} else {
compile_match_and_then(ctxt, &exprs[0], None, write_seq(ctxt, exprs.tail()))
}
}
if exprs.len() > 0 {
write_seq(ctxt, exprs.as_slice())
} else {
quote_expr!(ctxt, Matched(pos, ()))
}
}
ChoiceExpr(ref exprs) => {
fn write_choice(ctxt: &rustast::ExtCtxt, exprs: &[Expr], result_used: bool) -> rustast::P<rustast::Expr> {
if exprs.len() == 1 {
compile_expr(ctxt, &exprs[0], result_used)
} else {
let choice_res = compile_expr(ctxt, &exprs[0], result_used);
let next = write_choice(ctxt, exprs.tail(), result_used);
quote_expr!(ctxt, {
let choice_res = $choice_res;
match choice_res {
Matched(pos, value) => Matched(pos, value),
Failed => $next
}
})
}
}
if exprs.len() > 0 {
write_choice(ctxt, exprs.as_slice(), result_used)
} else {
quote_expr!(ctxt, Matched(pos, ()))
}
}
OptionalExpr(box ref e) => {
let optional_res = compile_expr(ctxt, e, result_used);
quote_expr!(ctxt, match $optional_res {
Matched(newpos, value) => { Matched(newpos, Some(value)) },
Failed => { Matched(pos, None) },
})
}
Repeat(box ref e, min, max, ref sep) => {
let inner = compile_expr(ctxt, e, result_used);
let match_sep = match *sep {
Some(box ref sep) => {
let sep_inner = compile_expr(ctxt, sep, false);
quote_tokens!(ctxt,
let pos = if repeat_value.len() > 0 {
let sep_res = $sep_inner;
match sep_res {
Matched(newpos, _) => { newpos },
Failed => break,
}
} else { pos };
)
}
None => vec!()
};
let result = if result_used {
quote_expr!(ctxt, repeat_value)
} else {
quote_expr!(ctxt, ())
};
let (repeat_vec, repeat_step) =
if result_used || min > 0 || max.is_some() || sep.is_some() {
(quote_tokens!(ctxt, let mut repeat_value = vec!();),
quote_tokens!(ctxt, repeat_value.push(value);))
} else {
(vec!(), vec!())
};
let max_check = match max {
None => vec!(),
Some(max) => quote_tokens!(ctxt,
if repeat_value.len() >= $max { break }
)
};
let result_check = if min > 0 {
quote_expr!(ctxt,
if repeat_value.len() >= $min {
Matched(repeat_pos, $result)
} else {
Failed
}
)
} else {
quote_expr!(ctxt, Matched(repeat_pos, $result))
};
quote_expr!(ctxt, {
let mut repeat_pos = pos;
$repeat_vec
loop {
let pos = repeat_pos;
$match_sep
$max_check
let step_res = $inner;
match step_res {
Matched(newpos, value) => {
repeat_pos = newpos;
$repeat_step
},
Failed => {
break;
}
}
}
$result_check
})
}
PosAssertExpr(box ref e) => {
let assert_res = compile_expr(ctxt, e, false);
quote_expr!(ctxt, {
let assert_res = $assert_res;
match assert_res {
Matched(..) => Matched(pos, ()),
Failed => Failed,
}
})
}
NegAssertExpr(box ref e) => {
let assert_res = compile_expr(ctxt, e, false);
quote_expr!(ctxt, {
let assert_res = $assert_res;
match assert_res {
Failed => Matched(pos, ()),
Matched(..) => Failed,
}
})
}
ActionExpr(ref exprs, ref code) => {
fn write_seq(ctxt: &rustast::ExtCtxt, exprs: &[TaggedExpr], code: &str) -> rustast::P<rustast::Expr> {
match exprs.head() {
Some(ref head) => {
let name = head.name.as_ref().map(|s| s.as_slice());
compile_match_and_then(ctxt, &*head.expr, name,
write_seq(ctxt, exprs.tail(), code)
)
}
None => {
let code_block = rustast::parse_block(code);
quote_expr!(ctxt, {
let match_str = input.slice(start_pos, pos);
Matched(pos, $code_block)
})
}
}
}
let body = write_seq(ctxt, exprs.as_slice(), code.as_slice());
quote_expr!(ctxt, {
let start_pos = pos;
$body
})
}
}
} | pub exported: bool,
} | random_line_split |
translate.rs | use rustast;
use rustast::DUMMY_SP;
use rustast::AstBuilder;
pub use self::RustUse::*;
pub use self::Expr::*;
pub struct Grammar {
pub imports: Vec<RustUse>,
pub rules: Vec<Rule>,
}
#[deriving(Clone)]
pub enum RustUse {
RustUseSimple(String),
RustUseGlob(String),
RustUseList(String, Vec<String>),
}
pub struct | {
pub name: String,
pub expr: Box<Expr>,
pub ret_type: String,
pub exported: bool,
}
pub struct CharSetCase {
pub start: char,
pub end: char
}
pub struct TaggedExpr {
pub name: Option<String>,
pub expr: Box<Expr>,
}
pub enum Expr {
AnyCharExpr,
LiteralExpr(String),
CharSetExpr(bool, Vec<CharSetCase>),
RuleExpr(String),
SequenceExpr(Vec<Expr>),
ChoiceExpr(Vec<Expr>),
OptionalExpr(Box<Expr>),
Repeat(Box<Expr>, /*min*/ uint, /*max*/ Option<uint>, /*sep*/ Option<Box<Expr>>),
PosAssertExpr(Box<Expr>),
NegAssertExpr(Box<Expr>),
ActionExpr(Vec<TaggedExpr>, String),
}
pub fn compile_grammar(ctxt: &rustast::ExtCtxt, grammar: &Grammar) -> rustast::P<rustast::Mod> {
let mut imports = grammar.imports.clone();
imports.push(RustUseGlob("self::ParseResult".to_string()));
let view_items = translate_view_items(ctxt, imports.as_slice());
let items = header_items(ctxt).into_iter()
.chain(grammar.rules.iter().map(|rule|{
compile_rule(ctxt, rule)
}))
.chain(grammar.rules.iter().filter(|rule| rule.exported).map(|rule| {
compile_rule_export(ctxt, rule)
}))
.collect::<Vec<_>>();
rustast::module(view_items, items)
}
pub fn translate_view_items(ctxt: &rustast::ExtCtxt, imports: &[RustUse]) -> Vec<rustast::ViewItem> {
imports.iter().map(| i |{
match *i {
RustUseSimple(ref p) => ctxt.view_use_simple(DUMMY_SP, rustast::ast::Inherited, rustast::parse_path(p.as_slice())),
RustUseGlob(ref p) => ctxt.view_use_glob(DUMMY_SP, rustast::ast::Inherited, rustast::parse_path_vec(p.as_slice())),
RustUseList(ref p, ref v) => ctxt.view_use_list(DUMMY_SP, rustast::ast::Inherited, rustast::parse_path_vec(p.as_slice()),
v.iter().map(|s| rustast::str_to_ident(s.as_slice())).collect::<Vec<_>>().as_slice()
),
}
}).collect()
}
pub fn header_items(ctxt: &rustast::ExtCtxt) -> Vec<rustast::P<rustast::Item>> {
let mut items = Vec::new();
items.push(quote_item!(ctxt,
enum ParseResult<T> {
Matched(uint, T),
Failed,
}
).unwrap());
items.push(quote_item!(ctxt,
struct ParseState {
max_err_pos: uint,
expected: ::std::collections::HashSet<&'static str>,
}
).unwrap());
items.push(quote_item!(ctxt,
impl ParseState {
fn new() -> ParseState {
ParseState{ max_err_pos: 0, expected: ::std::collections::HashSet::new() }
}
fn mark_failure(&mut self, pos: uint, expected: &'static str) -> ParseResult<()> {
if pos > self.max_err_pos {
self.max_err_pos = pos;
self.expected.clear();
}
if pos == self.max_err_pos {
self.expected.insert(expected);
}
Failed
}
}
).unwrap());
items.push(quote_item!(ctxt,
fn slice_eq(input: &str, state: &mut ParseState, pos: uint, m: &'static str) -> ParseResult<()> {
#![inline]
#![allow(dead_code)]
let l = m.len();
if input.len() >= pos + l && input.as_bytes().slice(pos, pos+l) == m.as_bytes() {
Matched(pos+l, ())
} else {
state.mark_failure(pos, m)
}
}
).unwrap());
items.push(quote_item!(ctxt,
fn any_char(input: &str, state: &mut ParseState, pos: uint) -> ParseResult<()> {
#![inline]
#![allow(dead_code)]
if input.len() > pos {
Matched(input.char_range_at(pos).next, ())
} else {
state.mark_failure(pos, "<character>")
}
}
).unwrap());
items.push(quote_item!(ctxt,
fn pos_to_line(input: &str, pos: uint) -> (uint, uint) {
let mut remaining = pos;
let mut lineno: uint = 1;
for line in input.lines() {
let line_length = line.len() + 1;
if remaining < line_length {
return (lineno, remaining + 1);
}
remaining -= line_length;
lineno += 1;
}
return (lineno, remaining + 1);
}
).unwrap());
items
}
fn compile_rule(ctxt: &rustast::ExtCtxt, rule: &Rule) -> rustast::P<rustast::Item> {
let name = rustast::str_to_ident(format!("parse_{}", rule.name).as_slice());
let ret = rustast::parse_type(rule.ret_type.as_slice());
let body = compile_expr(ctxt, &*rule.expr, rule.ret_type.as_slice()!= "()");
(quote_item!(ctxt,
fn $name<'input>(input: &'input str, state: &mut ParseState, pos: uint) -> ParseResult<$ret> {
$body
}
)).unwrap()
}
fn compile_rule_export(ctxt: &rustast::ExtCtxt, rule: &Rule) -> rustast::P<rustast::Item> {
let name = rustast::str_to_ident(rule.name.as_slice());
let ret = rustast::parse_type(rule.ret_type.as_slice());
let parse_fn = rustast::str_to_ident(format!("parse_{}", rule.name).as_slice());
(quote_item!(ctxt,
pub fn $name<'input>(input: &'input str) -> Result<$ret, String> {
let mut state = ParseState::new();
match $parse_fn(input, &mut state, 0) {
Matched(pos, value) => {
if pos == input.len() {
return Ok(value)
}
}
_ => {}
}
let expected = state.expected.to_string().escape_default();
Err(format!("Error at {}: Expected {}", pos_to_line(input, state.max_err_pos), expected))
}
)).unwrap()
}
fn compile_match_and_then(ctxt: &rustast::ExtCtxt, e: &Expr, value_name: Option<&str>, then: rustast::P<rustast::Expr>) -> rustast::P<rustast::Expr> {
let seq_res = compile_expr(ctxt, e, value_name.is_some());
let name_pat = match value_name {
Some(name) => rustast::str_to_ident(name),
None => rustast::str_to_ident("_")
};
quote_expr!(ctxt, {
let seq_res = $seq_res;
match seq_res {
Matched(pos, $name_pat) => { $then }
Failed => Failed,
}
})
}
fn cond_swap<T>(swap: bool, tup: (T, T)) -> (T, T) {
let (a, b) = tup;
if swap {
(b, a)
} else {
(a, b)
}
}
fn format_char_set(cases: &[CharSetCase]) -> String {
let mut r = "[".into_string();
for &CharSetCase{start, end} in cases.iter() {
r.push(start);
if start!= end {
r.push('-');
r.push(end);
}
}
r.push(']');
r
}
#[allow(unused_imports)] // quote_tokens! imports things
fn compile_expr(ctxt: &rustast::ExtCtxt, e: &Expr, result_used: bool) -> rustast::P<rustast::Expr> {
match *e {
AnyCharExpr => {
quote_expr!(ctxt, any_char(input, state, pos))
}
LiteralExpr(ref s) => {
let sl = s.as_slice();
quote_expr!(ctxt, slice_eq(input, state, pos, $sl))
}
CharSetExpr(invert, ref cases) => {
let expected_set = format_char_set(cases.as_slice());
let expected_str = expected_set.as_slice();
let (in_set, not_in_set) = cond_swap(invert, (
quote_expr!(ctxt, Matched(next, ())),
quote_expr!(ctxt, state.mark_failure(pos, $expected_str)),
));
let m = ctxt.expr_match(DUMMY_SP, quote_expr!(ctxt, ch), vec!(
ctxt.arm(DUMMY_SP, cases.iter().map(|case| {
if case.start == case.end {
ctxt.pat_lit(DUMMY_SP, ctxt.expr_lit(DUMMY_SP, rustast::ast::LitChar(case.start)))
} else {
ctxt.pat(DUMMY_SP, rustast::ast::PatRange(
ctxt.expr_lit(DUMMY_SP, rustast::ast::LitChar(case.start)),
ctxt.expr_lit(DUMMY_SP, rustast::ast::LitChar(case.end))
))
}
}).collect::<Vec<_>>(), in_set),
ctxt.arm(DUMMY_SP, vec!(ctxt.pat_wild(DUMMY_SP)), not_in_set)
));
quote_expr!(ctxt, if input.len() > pos {
let ::std::str::CharRange {ch, next} = input.char_range_at(pos);
$m
} else {
state.mark_failure(pos, $expected_str)
})
}
RuleExpr(ref rule_name) => {
let func = rustast::str_to_ident(format!("parse_{}", *rule_name).as_slice());
quote_expr!(ctxt, $func(input, state, pos))
}
SequenceExpr(ref exprs) => {
fn write_seq(ctxt: &rustast::ExtCtxt, exprs: &[Expr]) -> rustast::P<rustast::Expr> {
if exprs.len() == 1 {
compile_expr(ctxt, &exprs[0], false)
} else {
compile_match_and_then(ctxt, &exprs[0], None, write_seq(ctxt, exprs.tail()))
}
}
if exprs.len() > 0 {
write_seq(ctxt, exprs.as_slice())
} else {
quote_expr!(ctxt, Matched(pos, ()))
}
}
ChoiceExpr(ref exprs) => {
fn write_choice(ctxt: &rustast::ExtCtxt, exprs: &[Expr], result_used: bool) -> rustast::P<rustast::Expr> {
if exprs.len() == 1 {
compile_expr(ctxt, &exprs[0], result_used)
} else {
let choice_res = compile_expr(ctxt, &exprs[0], result_used);
let next = write_choice(ctxt, exprs.tail(), result_used);
quote_expr!(ctxt, {
let choice_res = $choice_res;
match choice_res {
Matched(pos, value) => Matched(pos, value),
Failed => $next
}
})
}
}
if exprs.len() > 0 {
write_choice(ctxt, exprs.as_slice(), result_used)
} else {
quote_expr!(ctxt, Matched(pos, ()))
}
}
OptionalExpr(box ref e) => {
let optional_res = compile_expr(ctxt, e, result_used);
quote_expr!(ctxt, match $optional_res {
Matched(newpos, value) => { Matched(newpos, Some(value)) },
Failed => { Matched(pos, None) },
})
}
Repeat(box ref e, min, max, ref sep) => {
let inner = compile_expr(ctxt, e, result_used);
let match_sep = match *sep {
Some(box ref sep) => {
let sep_inner = compile_expr(ctxt, sep, false);
quote_tokens!(ctxt,
let pos = if repeat_value.len() > 0 {
let sep_res = $sep_inner;
match sep_res {
Matched(newpos, _) => { newpos },
Failed => break,
}
} else { pos };
)
}
None => vec!()
};
let result = if result_used {
quote_expr!(ctxt, repeat_value)
} else {
quote_expr!(ctxt, ())
};
let (repeat_vec, repeat_step) =
if result_used || min > 0 || max.is_some() || sep.is_some() {
(quote_tokens!(ctxt, let mut repeat_value = vec!();),
quote_tokens!(ctxt, repeat_value.push(value);))
} else {
(vec!(), vec!())
};
let max_check = match max {
None => vec!(),
Some(max) => quote_tokens!(ctxt,
if repeat_value.len() >= $max { break }
)
};
let result_check = if min > 0 {
quote_expr!(ctxt,
if repeat_value.len() >= $min {
Matched(repeat_pos, $result)
} else {
Failed
}
)
} else {
quote_expr!(ctxt, Matched(repeat_pos, $result))
};
quote_expr!(ctxt, {
let mut repeat_pos = pos;
$repeat_vec
loop {
let pos = repeat_pos;
$match_sep
$max_check
let step_res = $inner;
match step_res {
Matched(newpos, value) => {
repeat_pos = newpos;
$repeat_step
},
Failed => {
break;
}
}
}
$result_check
})
}
PosAssertExpr(box ref e) => {
let assert_res = compile_expr(ctxt, e, false);
quote_expr!(ctxt, {
let assert_res = $assert_res;
match assert_res {
Matched(..) => Matched(pos, ()),
Failed => Failed,
}
})
}
NegAssertExpr(box ref e) => {
let assert_res = compile_expr(ctxt, e, false);
quote_expr!(ctxt, {
let assert_res = $assert_res;
match assert_res {
Failed => Matched(pos, ()),
Matched(..) => Failed,
}
})
}
ActionExpr(ref exprs, ref code) => {
fn write_seq(ctxt: &rustast::ExtCtxt, exprs: &[TaggedExpr], code: &str) -> rustast::P<rustast::Expr> {
match exprs.head() {
Some(ref head) => {
let name = head.name.as_ref().map(|s| s.as_slice());
compile_match_and_then(ctxt, &*head.expr, name,
write_seq(ctxt, exprs.tail(), code)
)
}
None => {
let code_block = rustast::parse_block(code);
quote_expr!(ctxt, {
let match_str = input.slice(start_pos, pos);
Matched(pos, $code_block)
})
}
}
}
let body = write_seq(ctxt, exprs.as_slice(), code.as_slice());
quote_expr!(ctxt, {
let start_pos = pos;
$body
})
}
}
}
| Rule | identifier_name |
translate.rs | use rustast;
use rustast::DUMMY_SP;
use rustast::AstBuilder;
pub use self::RustUse::*;
pub use self::Expr::*;
pub struct Grammar {
pub imports: Vec<RustUse>,
pub rules: Vec<Rule>,
}
#[deriving(Clone)]
pub enum RustUse {
RustUseSimple(String),
RustUseGlob(String),
RustUseList(String, Vec<String>),
}
pub struct Rule {
pub name: String,
pub expr: Box<Expr>,
pub ret_type: String,
pub exported: bool,
}
pub struct CharSetCase {
pub start: char,
pub end: char
}
pub struct TaggedExpr {
pub name: Option<String>,
pub expr: Box<Expr>,
}
pub enum Expr {
AnyCharExpr,
LiteralExpr(String),
CharSetExpr(bool, Vec<CharSetCase>),
RuleExpr(String),
SequenceExpr(Vec<Expr>),
ChoiceExpr(Vec<Expr>),
OptionalExpr(Box<Expr>),
Repeat(Box<Expr>, /*min*/ uint, /*max*/ Option<uint>, /*sep*/ Option<Box<Expr>>),
PosAssertExpr(Box<Expr>),
NegAssertExpr(Box<Expr>),
ActionExpr(Vec<TaggedExpr>, String),
}
pub fn compile_grammar(ctxt: &rustast::ExtCtxt, grammar: &Grammar) -> rustast::P<rustast::Mod> {
let mut imports = grammar.imports.clone();
imports.push(RustUseGlob("self::ParseResult".to_string()));
let view_items = translate_view_items(ctxt, imports.as_slice());
let items = header_items(ctxt).into_iter()
.chain(grammar.rules.iter().map(|rule|{
compile_rule(ctxt, rule)
}))
.chain(grammar.rules.iter().filter(|rule| rule.exported).map(|rule| {
compile_rule_export(ctxt, rule)
}))
.collect::<Vec<_>>();
rustast::module(view_items, items)
}
pub fn translate_view_items(ctxt: &rustast::ExtCtxt, imports: &[RustUse]) -> Vec<rustast::ViewItem> {
imports.iter().map(| i |{
match *i {
RustUseSimple(ref p) => ctxt.view_use_simple(DUMMY_SP, rustast::ast::Inherited, rustast::parse_path(p.as_slice())),
RustUseGlob(ref p) => ctxt.view_use_glob(DUMMY_SP, rustast::ast::Inherited, rustast::parse_path_vec(p.as_slice())),
RustUseList(ref p, ref v) => ctxt.view_use_list(DUMMY_SP, rustast::ast::Inherited, rustast::parse_path_vec(p.as_slice()),
v.iter().map(|s| rustast::str_to_ident(s.as_slice())).collect::<Vec<_>>().as_slice()
),
}
}).collect()
}
pub fn header_items(ctxt: &rustast::ExtCtxt) -> Vec<rustast::P<rustast::Item>> {
let mut items = Vec::new();
items.push(quote_item!(ctxt,
enum ParseResult<T> {
Matched(uint, T),
Failed,
}
).unwrap());
items.push(quote_item!(ctxt,
struct ParseState {
max_err_pos: uint,
expected: ::std::collections::HashSet<&'static str>,
}
).unwrap());
items.push(quote_item!(ctxt,
impl ParseState {
fn new() -> ParseState {
ParseState{ max_err_pos: 0, expected: ::std::collections::HashSet::new() }
}
fn mark_failure(&mut self, pos: uint, expected: &'static str) -> ParseResult<()> {
if pos > self.max_err_pos {
self.max_err_pos = pos;
self.expected.clear();
}
if pos == self.max_err_pos {
self.expected.insert(expected);
}
Failed
}
}
).unwrap());
items.push(quote_item!(ctxt,
fn slice_eq(input: &str, state: &mut ParseState, pos: uint, m: &'static str) -> ParseResult<()> {
#![inline]
#![allow(dead_code)]
let l = m.len();
if input.len() >= pos + l && input.as_bytes().slice(pos, pos+l) == m.as_bytes() {
Matched(pos+l, ())
} else {
state.mark_failure(pos, m)
}
}
).unwrap());
items.push(quote_item!(ctxt,
fn any_char(input: &str, state: &mut ParseState, pos: uint) -> ParseResult<()> {
#![inline]
#![allow(dead_code)]
if input.len() > pos {
Matched(input.char_range_at(pos).next, ())
} else {
state.mark_failure(pos, "<character>")
}
}
).unwrap());
items.push(quote_item!(ctxt,
fn pos_to_line(input: &str, pos: uint) -> (uint, uint) {
let mut remaining = pos;
let mut lineno: uint = 1;
for line in input.lines() {
let line_length = line.len() + 1;
if remaining < line_length {
return (lineno, remaining + 1);
}
remaining -= line_length;
lineno += 1;
}
return (lineno, remaining + 1);
}
).unwrap());
items
}
fn compile_rule(ctxt: &rustast::ExtCtxt, rule: &Rule) -> rustast::P<rustast::Item> |
fn compile_rule_export(ctxt: &rustast::ExtCtxt, rule: &Rule) -> rustast::P<rustast::Item> {
let name = rustast::str_to_ident(rule.name.as_slice());
let ret = rustast::parse_type(rule.ret_type.as_slice());
let parse_fn = rustast::str_to_ident(format!("parse_{}", rule.name).as_slice());
(quote_item!(ctxt,
pub fn $name<'input>(input: &'input str) -> Result<$ret, String> {
let mut state = ParseState::new();
match $parse_fn(input, &mut state, 0) {
Matched(pos, value) => {
if pos == input.len() {
return Ok(value)
}
}
_ => {}
}
let expected = state.expected.to_string().escape_default();
Err(format!("Error at {}: Expected {}", pos_to_line(input, state.max_err_pos), expected))
}
)).unwrap()
}
fn compile_match_and_then(ctxt: &rustast::ExtCtxt, e: &Expr, value_name: Option<&str>, then: rustast::P<rustast::Expr>) -> rustast::P<rustast::Expr> {
let seq_res = compile_expr(ctxt, e, value_name.is_some());
let name_pat = match value_name {
Some(name) => rustast::str_to_ident(name),
None => rustast::str_to_ident("_")
};
quote_expr!(ctxt, {
let seq_res = $seq_res;
match seq_res {
Matched(pos, $name_pat) => { $then }
Failed => Failed,
}
})
}
fn cond_swap<T>(swap: bool, tup: (T, T)) -> (T, T) {
let (a, b) = tup;
if swap {
(b, a)
} else {
(a, b)
}
}
fn format_char_set(cases: &[CharSetCase]) -> String {
let mut r = "[".into_string();
for &CharSetCase{start, end} in cases.iter() {
r.push(start);
if start!= end {
r.push('-');
r.push(end);
}
}
r.push(']');
r
}
#[allow(unused_imports)] // quote_tokens! imports things
fn compile_expr(ctxt: &rustast::ExtCtxt, e: &Expr, result_used: bool) -> rustast::P<rustast::Expr> {
match *e {
AnyCharExpr => {
quote_expr!(ctxt, any_char(input, state, pos))
}
LiteralExpr(ref s) => {
let sl = s.as_slice();
quote_expr!(ctxt, slice_eq(input, state, pos, $sl))
}
CharSetExpr(invert, ref cases) => {
let expected_set = format_char_set(cases.as_slice());
let expected_str = expected_set.as_slice();
let (in_set, not_in_set) = cond_swap(invert, (
quote_expr!(ctxt, Matched(next, ())),
quote_expr!(ctxt, state.mark_failure(pos, $expected_str)),
));
let m = ctxt.expr_match(DUMMY_SP, quote_expr!(ctxt, ch), vec!(
ctxt.arm(DUMMY_SP, cases.iter().map(|case| {
if case.start == case.end {
ctxt.pat_lit(DUMMY_SP, ctxt.expr_lit(DUMMY_SP, rustast::ast::LitChar(case.start)))
} else {
ctxt.pat(DUMMY_SP, rustast::ast::PatRange(
ctxt.expr_lit(DUMMY_SP, rustast::ast::LitChar(case.start)),
ctxt.expr_lit(DUMMY_SP, rustast::ast::LitChar(case.end))
))
}
}).collect::<Vec<_>>(), in_set),
ctxt.arm(DUMMY_SP, vec!(ctxt.pat_wild(DUMMY_SP)), not_in_set)
));
quote_expr!(ctxt, if input.len() > pos {
let ::std::str::CharRange {ch, next} = input.char_range_at(pos);
$m
} else {
state.mark_failure(pos, $expected_str)
})
}
RuleExpr(ref rule_name) => {
let func = rustast::str_to_ident(format!("parse_{}", *rule_name).as_slice());
quote_expr!(ctxt, $func(input, state, pos))
}
SequenceExpr(ref exprs) => {
fn write_seq(ctxt: &rustast::ExtCtxt, exprs: &[Expr]) -> rustast::P<rustast::Expr> {
if exprs.len() == 1 {
compile_expr(ctxt, &exprs[0], false)
} else {
compile_match_and_then(ctxt, &exprs[0], None, write_seq(ctxt, exprs.tail()))
}
}
if exprs.len() > 0 {
write_seq(ctxt, exprs.as_slice())
} else {
quote_expr!(ctxt, Matched(pos, ()))
}
}
ChoiceExpr(ref exprs) => {
fn write_choice(ctxt: &rustast::ExtCtxt, exprs: &[Expr], result_used: bool) -> rustast::P<rustast::Expr> {
if exprs.len() == 1 {
compile_expr(ctxt, &exprs[0], result_used)
} else {
let choice_res = compile_expr(ctxt, &exprs[0], result_used);
let next = write_choice(ctxt, exprs.tail(), result_used);
quote_expr!(ctxt, {
let choice_res = $choice_res;
match choice_res {
Matched(pos, value) => Matched(pos, value),
Failed => $next
}
})
}
}
if exprs.len() > 0 {
write_choice(ctxt, exprs.as_slice(), result_used)
} else {
quote_expr!(ctxt, Matched(pos, ()))
}
}
OptionalExpr(box ref e) => {
let optional_res = compile_expr(ctxt, e, result_used);
quote_expr!(ctxt, match $optional_res {
Matched(newpos, value) => { Matched(newpos, Some(value)) },
Failed => { Matched(pos, None) },
})
}
Repeat(box ref e, min, max, ref sep) => {
let inner = compile_expr(ctxt, e, result_used);
let match_sep = match *sep {
Some(box ref sep) => {
let sep_inner = compile_expr(ctxt, sep, false);
quote_tokens!(ctxt,
let pos = if repeat_value.len() > 0 {
let sep_res = $sep_inner;
match sep_res {
Matched(newpos, _) => { newpos },
Failed => break,
}
} else { pos };
)
}
None => vec!()
};
let result = if result_used {
quote_expr!(ctxt, repeat_value)
} else {
quote_expr!(ctxt, ())
};
let (repeat_vec, repeat_step) =
if result_used || min > 0 || max.is_some() || sep.is_some() {
(quote_tokens!(ctxt, let mut repeat_value = vec!();),
quote_tokens!(ctxt, repeat_value.push(value);))
} else {
(vec!(), vec!())
};
let max_check = match max {
None => vec!(),
Some(max) => quote_tokens!(ctxt,
if repeat_value.len() >= $max { break }
)
};
let result_check = if min > 0 {
quote_expr!(ctxt,
if repeat_value.len() >= $min {
Matched(repeat_pos, $result)
} else {
Failed
}
)
} else {
quote_expr!(ctxt, Matched(repeat_pos, $result))
};
quote_expr!(ctxt, {
let mut repeat_pos = pos;
$repeat_vec
loop {
let pos = repeat_pos;
$match_sep
$max_check
let step_res = $inner;
match step_res {
Matched(newpos, value) => {
repeat_pos = newpos;
$repeat_step
},
Failed => {
break;
}
}
}
$result_check
})
}
PosAssertExpr(box ref e) => {
let assert_res = compile_expr(ctxt, e, false);
quote_expr!(ctxt, {
let assert_res = $assert_res;
match assert_res {
Matched(..) => Matched(pos, ()),
Failed => Failed,
}
})
}
NegAssertExpr(box ref e) => {
let assert_res = compile_expr(ctxt, e, false);
quote_expr!(ctxt, {
let assert_res = $assert_res;
match assert_res {
Failed => Matched(pos, ()),
Matched(..) => Failed,
}
})
}
ActionExpr(ref exprs, ref code) => {
fn write_seq(ctxt: &rustast::ExtCtxt, exprs: &[TaggedExpr], code: &str) -> rustast::P<rustast::Expr> {
match exprs.head() {
Some(ref head) => {
let name = head.name.as_ref().map(|s| s.as_slice());
compile_match_and_then(ctxt, &*head.expr, name,
write_seq(ctxt, exprs.tail(), code)
)
}
None => {
let code_block = rustast::parse_block(code);
quote_expr!(ctxt, {
let match_str = input.slice(start_pos, pos);
Matched(pos, $code_block)
})
}
}
}
let body = write_seq(ctxt, exprs.as_slice(), code.as_slice());
quote_expr!(ctxt, {
let start_pos = pos;
$body
})
}
}
}
| {
let name = rustast::str_to_ident(format!("parse_{}", rule.name).as_slice());
let ret = rustast::parse_type(rule.ret_type.as_slice());
let body = compile_expr(ctxt, &*rule.expr, rule.ret_type.as_slice() != "()");
(quote_item!(ctxt,
fn $name<'input>(input: &'input str, state: &mut ParseState, pos: uint) -> ParseResult<$ret> {
$body
}
)).unwrap()
} | identifier_body |
lib.rs | use std::fs ;
use std::collections::HashMap;
use std::collections::HashSet;
use std::iter::FromIterator;
use std::iter::Iterator;
use ndarray::Array2;
use rayon;
use rayon::prelude::*;
use flate2::read::GzDecoder;
use std::io::prelude::*;
use glob::glob;
use pyo3::prelude::*;
use pyo3::wrap_pyfunction;
#[derive(Debug)]
struct ClusterResults {
barcodes:Vec<i64>,
labels: Vec<i64>,
barcode_set:HashSet<i64>,
grouped_barcodes: HashMap<i64, HashSet<i64>>,
h_tot: f64,
exp_name:String
}
#[pyclass]
struct ExperimentResults{
#[pyo3(get)]
exp_param :String,
#[pyo3(get)]
cluster_ids : Vec<i64>,
#[pyo3(get)]
stability_scores: Vec<f64>,
#[pyo3(get)]
purity_scores:Vec<f64>
}
impl ExperimentResults{
fn pprint(&self){
for i in 0..self.cluster_ids.len(){
println!("{},{},{}",&self.cluster_ids[i], &self.stability_scores[i],&self.purity_scores[i] )
}
}
fn write_csv(&self, outpath:&str)->std::io::Result<()>{
let mut lines: Vec<String> = vec![String::new();self.cluster_ids.len()];
for i in 0..self.cluster_ids.len(){
lines[i] = format!("{},{},{}\n",self.cluster_ids[i], self.stability_scores[i],self.purity_scores[i])
}
let outfile = format!("{}/{}", outpath, self.exp_param);
let outstring = lines.join("");
fs::write(outfile, outstring).unwrap();
Ok(())
}
fn write_csv_simple(&self, outfile:&str)->std::io::Result<()>{
let mut lines: Vec<String> = vec![String::new();self.cluster_ids.len()];
for i in 0..self.cluster_ids.len(){
lines[i] = format!("{},{},{}\n",self.cluster_ids[i], self.stability_scores[i],self.purity_scores[i])
}
let outstring = lines.join("");
fs::write(outfile, outstring).unwrap();
Ok(())
}
}
fn entropy(group_map: &HashMap<i64, HashSet<i64>>, labels:&Vec<i64> ) -> f64{
let n = labels.len() as f64;
let res: f64 = group_map.values().map(|i|{
let p = i.len() as f64 /n;
p * p.ln()
}).sum();
return res * -1 as f64
}
impl ClusterResults{
fn new(barcodes:Vec<i64>, labels: Vec<i64>, exp_name: String) -> ClusterResults{
let barcode_set: HashSet<i64> = HashSet::from_iter(barcodes.clone());
let mut grouped_barcodes:HashMap<i64, HashSet<i64>> = HashMap::new();
let mut old_label = &labels[0];
let mut current_label = &labels[0];// declare out here so we can add the last set back in
let mut current_set: HashSet<i64> = HashSet::new();
for i in 0..barcodes.len(){
current_label = &labels[i];
let current_barcode = &barcodes[i];
if current_label == old_label{
current_set.insert(current_barcode.clone());
}else{// reach a new cluster
let dup_check = grouped_barcodes.insert(old_label.clone(), current_set);
if!dup_check.is_none(){ // HashMap.insert returns None when new key is added
panic!("A duplicate key was added when making a ClusterResults; input data is not sorted by label")
}
let ns: HashSet<i64> = HashSet::new();
current_set = ns;
current_set.insert(current_barcode.clone());
old_label = current_label;
}
}
grouped_barcodes.insert(current_label.clone(), current_set);
let h_tot = entropy(&grouped_barcodes, &labels);
ClusterResults{barcodes, labels, barcode_set, grouped_barcodes, h_tot, exp_name}
}
fn head(&self){
println!("{:?}", &self.barcodes[0..5]);
println!("{:?}", &self.labels[0..5])
}
}
fn stability_k(ref_bc: &HashSet<i64>, query:&ClusterResults) -> f64{
let intersect: HashSet<i64> = ref_bc.intersection(&query.barcode_set).cloned().collect::<HashSet<i64>>();
if intersect.len() == 0{
return 0.0
} else{
let mut new_bc :Vec<i64> = vec![-1; intersect.len()];
let mut new_labels : Vec<i64> = vec![-1; intersect.len()];
let mut j=0;
for i in 0..query.barcodes.len(){
if intersect.contains(&query.barcodes[i]){
new_bc[j] = query.barcodes[i].clone();
new_labels[j] = query.labels[i].clone();
j+=1;
}
}
let new_clu = ClusterResults::new(new_bc, new_labels, String::new());//use an empty string for these guys, as they get deleted later | let mut s = String::new();
gz.read_to_string(&mut s)?;
Ok(s)
}
fn read_cluster_results( file: &str) ->ClusterResults {
let mut handle = fs::File::open(file).expect("Bad file input");
let mut buffer = Vec::new();
handle.read_to_end(&mut buffer).expect("couldnt read file");
let file_string = decode_reader(buffer).expect("bad gzip");
let file_string: Vec<&str> = file_string.lines().collect();
let mut barcodes: Vec<i64> = vec![-1; file_string.len()];
let mut labels: Vec<i64> = vec![-1; file_string.len()];
for i in 0..file_string.len(){
let line_split : Vec<&str> = file_string[i].split(",").collect();
barcodes[i] = String::from(line_split[0]).parse::<i64>().unwrap();
labels[i] = String::from(line_split[1]).parse::<i64>().unwrap();
}
let exp_name = file.split("/").last().unwrap() ;
ClusterResults::new(barcodes,labels, String::from(exp_name))
}
fn calculate_metrics(ref_cluster:&ClusterResults, query_clusters: &Vec<&ClusterResults>) -> ExperimentResults{
let mut stability_results = Array2::<f64>::zeros(( ref_cluster.grouped_barcodes.len(),query_clusters.len() ));
let mut purity_results = Array2::<f64>::zeros(( ref_cluster.grouped_barcodes.len(),query_clusters.len() ));
for (i, cluster) in ref_cluster.grouped_barcodes.values().enumerate(){
for (j, experiment) in query_clusters.iter().enumerate() {
let mut stab = stability_k(&cluster, &experiment) / experiment.h_tot ;
if stab.is_nan(){// cant compare a naturally occuring NAN to f64::NAN
stab = 1.0;
}
stability_results[[i, j]]= stab ;
purity_results[[i,j]] = purity_k(&cluster, &experiment.grouped_barcodes)
}
}
let stability_scores = stability_results.rows().into_iter().map(|x| 1.0 - x.mean().unwrap()).collect::<Vec<f64>>();
let purity_scores = purity_results.rows().into_iter().map( |x| {
let mut v = x.to_vec();
v.retain(|x| *x!= f64::NAN); // in purity_k f64::NAN is explicitly returned, so this works. Consider changing for conistency
return vmean(v)
} ).collect::<Vec<f64>>();
let cluster_ids: Vec<i64> = ref_cluster.grouped_barcodes.keys().cloned().collect::<Vec<i64>>() ;
let exp_param = ref_cluster.exp_name.clone();
return ExperimentResults{ exp_param,cluster_ids, stability_scores, purity_scores }
}
fn vmean(v:Vec<f64>) -> f64{
return v.iter().sum::<f64>() / v.len() as f64
}
fn purity_k(ref_bc_set: &HashSet<i64>, query_map: &HashMap<i64, HashSet<i64>>) -> f64{
let mut max_overlap = 0;
let mut max_overlap_key:i64 = -100000000;
for query_key in query_map.keys(){
let q_cluster_set = query_map.get(query_key).unwrap();
let overlap = ref_bc_set.intersection(q_cluster_set).count();
if overlap > max_overlap{
max_overlap = overlap;
max_overlap_key = *query_key;
}
}
if max_overlap_key == -100000000{
return f64::NAN;
} else{
return max_overlap as f64 / query_map.get(&max_overlap_key).unwrap().len() as f64
}
}
fn run_pairwise_calculation_threaded(experiment_list:&Vec<&ClusterResults>, nthreads:usize) ->Vec<ExperimentResults>{
let pool = rayon::ThreadPoolBuilder::new().num_threads(nthreads).build().unwrap();
let dummy_array: Vec<usize> = (0..experiment_list.len()).collect();
let res: Vec<ExperimentResults> = pool.install(|| dummy_array.into_par_iter()
.map(|i:usize| {
let ref_clust = experiment_list[i];
let mut query_clusts = experiment_list.clone();
query_clusts.remove(i);
return calculate_metrics(ref_clust, &query_clusts)
})
.collect()
);
return res
}
#[pyfunction]
fn pairwise_metric_calculation_fromdisk(file_glob: &str, nthreads:usize) -> Vec<ExperimentResults> {
let test_clusters_objs:Vec<ClusterResults> = glob(file_glob)
.expect("Failed to read glob pattern")
.map(|x|{let file = String::from(x.unwrap().to_str().expect("Failed to unwrap filename"));
return read_cluster_results(&file)}
)
.collect();
if test_clusters_objs.len() == 0{
panic!("The provided glob string did not match any files!!")
}
let test_cluster_refs: Vec<&ClusterResults> = test_clusters_objs.iter().collect();
let c_res :Vec<ExperimentResults> = run_pairwise_calculation_threaded(&test_cluster_refs, nthreads);
return c_res
}
#[pyfunction]
fn pairwise_metric_calculation_frommem(mut cluster_dfs: Vec<HashMap<String, Vec<i64>>>, exp_names:Vec<String>, nthreads:usize) -> Vec<ExperimentResults> {
let clusters_objs_owned = cluster_dfs.into_iter().enumerate().map(|(i, mut x)|{
ClusterResults::new(x.remove(&String::from("Barcode")).unwrap(),
x.remove(&String::from("labels")).unwrap(),
exp_names[i].clone() )}).collect::<Vec<ClusterResults>>();
let clusters_objs_refs: Vec<&ClusterResults> = clusters_objs_owned.iter().collect();
let c_res :Vec<ExperimentResults> = run_pairwise_calculation_threaded(&clusters_objs_refs, nthreads);
return c_res
}
#[pyfunction]
fn metric_calculation_fromdf(mut ref_df: HashMap<String, Vec<i64>>, query_dfs:Vec<HashMap<String, Vec<i64>>>, exp_name: String)->ExperimentResults{
let ref_cluster = ClusterResults::new(ref_df.remove(&String::from("Barcode")).unwrap(),
ref_df.remove(&String::from("labels")).unwrap(),
exp_name);
let query_clusters_owned = query_dfs.into_iter().map(|mut x|ClusterResults::new(x.remove(&String::from("Barcode")).unwrap(),
x.remove(&String::from("labels")).unwrap(),
String::from("perturbation") )
).collect::<Vec<ClusterResults>>();
let query_clusters_refs = query_clusters_owned.iter().collect::<Vec<&ClusterResults>>();
let res = calculate_metrics(&ref_cluster, &query_clusters_refs);
return res
}
// fn calc_metrics(module: &PyModule) -> PyResult<()> {
// module.add_function(wrap_pyfunction!(pairwise_metric_calculation_fromdisk, module)?)?;
// module.add_function(wrap_pyfunction!(pairwise_metric_calculation_frommem, module)?)?;
// module.add_function(wrap_pyfunction!(oneway_metric_calculation, module)?)?;
// module.add_class::<ExperimentResults>()?;
// Ok(())
// }
#[pymodule]
fn _calc_metrics(py: Python, module: &PyModule) -> PyResult<()> {
module.add_function(wrap_pyfunction!(pairwise_metric_calculation_fromdisk, module)?)?;
module.add_function(wrap_pyfunction!(pairwise_metric_calculation_frommem, module)?)?;
module.add_function(wrap_pyfunction!(metric_calculation_fromdf, module)?)?;
module.add_class::<ExperimentResults>()?;
Ok(())
}
#[test]
fn check_reader(){
let obj = read_cluster_results("test_data/exp-0_resolution-0.4_knn-15_.csv.gz");
assert_eq!(obj.barcodes.len(), obj.labels.len());
} | return entropy(&new_clu.grouped_barcodes, &new_clu.labels);
}
}
fn decode_reader(bytes: Vec<u8>) -> std::io::Result<String> {
let mut gz = GzDecoder::new(&bytes[..]); | random_line_split |
lib.rs | use std::fs ;
use std::collections::HashMap;
use std::collections::HashSet;
use std::iter::FromIterator;
use std::iter::Iterator;
use ndarray::Array2;
use rayon;
use rayon::prelude::*;
use flate2::read::GzDecoder;
use std::io::prelude::*;
use glob::glob;
use pyo3::prelude::*;
use pyo3::wrap_pyfunction;
#[derive(Debug)]
struct ClusterResults {
barcodes:Vec<i64>,
labels: Vec<i64>,
barcode_set:HashSet<i64>,
grouped_barcodes: HashMap<i64, HashSet<i64>>,
h_tot: f64,
exp_name:String
}
#[pyclass]
struct ExperimentResults{
#[pyo3(get)]
exp_param :String,
#[pyo3(get)]
cluster_ids : Vec<i64>,
#[pyo3(get)]
stability_scores: Vec<f64>,
#[pyo3(get)]
purity_scores:Vec<f64>
}
impl ExperimentResults{
fn pprint(&self){
for i in 0..self.cluster_ids.len(){
println!("{},{},{}",&self.cluster_ids[i], &self.stability_scores[i],&self.purity_scores[i] )
}
}
fn write_csv(&self, outpath:&str)->std::io::Result<()> |
fn write_csv_simple(&self, outfile:&str)->std::io::Result<()>{
let mut lines: Vec<String> = vec![String::new();self.cluster_ids.len()];
for i in 0..self.cluster_ids.len(){
lines[i] = format!("{},{},{}\n",self.cluster_ids[i], self.stability_scores[i],self.purity_scores[i])
}
let outstring = lines.join("");
fs::write(outfile, outstring).unwrap();
Ok(())
}
}
fn entropy(group_map: &HashMap<i64, HashSet<i64>>, labels:&Vec<i64> ) -> f64{
let n = labels.len() as f64;
let res: f64 = group_map.values().map(|i|{
let p = i.len() as f64 /n;
p * p.ln()
}).sum();
return res * -1 as f64
}
impl ClusterResults{
fn new(barcodes:Vec<i64>, labels: Vec<i64>, exp_name: String) -> ClusterResults{
let barcode_set: HashSet<i64> = HashSet::from_iter(barcodes.clone());
let mut grouped_barcodes:HashMap<i64, HashSet<i64>> = HashMap::new();
let mut old_label = &labels[0];
let mut current_label = &labels[0];// declare out here so we can add the last set back in
let mut current_set: HashSet<i64> = HashSet::new();
for i in 0..barcodes.len(){
current_label = &labels[i];
let current_barcode = &barcodes[i];
if current_label == old_label{
current_set.insert(current_barcode.clone());
}else{// reach a new cluster
let dup_check = grouped_barcodes.insert(old_label.clone(), current_set);
if!dup_check.is_none(){ // HashMap.insert returns None when new key is added
panic!("A duplicate key was added when making a ClusterResults; input data is not sorted by label")
}
let ns: HashSet<i64> = HashSet::new();
current_set = ns;
current_set.insert(current_barcode.clone());
old_label = current_label;
}
}
grouped_barcodes.insert(current_label.clone(), current_set);
let h_tot = entropy(&grouped_barcodes, &labels);
ClusterResults{barcodes, labels, barcode_set, grouped_barcodes, h_tot, exp_name}
}
fn head(&self){
println!("{:?}", &self.barcodes[0..5]);
println!("{:?}", &self.labels[0..5])
}
}
fn stability_k(ref_bc: &HashSet<i64>, query:&ClusterResults) -> f64{
let intersect: HashSet<i64> = ref_bc.intersection(&query.barcode_set).cloned().collect::<HashSet<i64>>();
if intersect.len() == 0{
return 0.0
} else{
let mut new_bc :Vec<i64> = vec![-1; intersect.len()];
let mut new_labels : Vec<i64> = vec![-1; intersect.len()];
let mut j=0;
for i in 0..query.barcodes.len(){
if intersect.contains(&query.barcodes[i]){
new_bc[j] = query.barcodes[i].clone();
new_labels[j] = query.labels[i].clone();
j+=1;
}
}
let new_clu = ClusterResults::new(new_bc, new_labels, String::new());//use an empty string for these guys, as they get deleted later
return entropy(&new_clu.grouped_barcodes, &new_clu.labels);
}
}
fn decode_reader(bytes: Vec<u8>) -> std::io::Result<String> {
let mut gz = GzDecoder::new(&bytes[..]);
let mut s = String::new();
gz.read_to_string(&mut s)?;
Ok(s)
}
fn read_cluster_results( file: &str) ->ClusterResults {
let mut handle = fs::File::open(file).expect("Bad file input");
let mut buffer = Vec::new();
handle.read_to_end(&mut buffer).expect("couldnt read file");
let file_string = decode_reader(buffer).expect("bad gzip");
let file_string: Vec<&str> = file_string.lines().collect();
let mut barcodes: Vec<i64> = vec![-1; file_string.len()];
let mut labels: Vec<i64> = vec![-1; file_string.len()];
for i in 0..file_string.len(){
let line_split : Vec<&str> = file_string[i].split(",").collect();
barcodes[i] = String::from(line_split[0]).parse::<i64>().unwrap();
labels[i] = String::from(line_split[1]).parse::<i64>().unwrap();
}
let exp_name = file.split("/").last().unwrap() ;
ClusterResults::new(barcodes,labels, String::from(exp_name))
}
fn calculate_metrics(ref_cluster:&ClusterResults, query_clusters: &Vec<&ClusterResults>) -> ExperimentResults{
let mut stability_results = Array2::<f64>::zeros(( ref_cluster.grouped_barcodes.len(),query_clusters.len() ));
let mut purity_results = Array2::<f64>::zeros(( ref_cluster.grouped_barcodes.len(),query_clusters.len() ));
for (i, cluster) in ref_cluster.grouped_barcodes.values().enumerate(){
for (j, experiment) in query_clusters.iter().enumerate() {
let mut stab = stability_k(&cluster, &experiment) / experiment.h_tot ;
if stab.is_nan(){// cant compare a naturally occuring NAN to f64::NAN
stab = 1.0;
}
stability_results[[i, j]]= stab ;
purity_results[[i,j]] = purity_k(&cluster, &experiment.grouped_barcodes)
}
}
let stability_scores = stability_results.rows().into_iter().map(|x| 1.0 - x.mean().unwrap()).collect::<Vec<f64>>();
let purity_scores = purity_results.rows().into_iter().map( |x| {
let mut v = x.to_vec();
v.retain(|x| *x!= f64::NAN); // in purity_k f64::NAN is explicitly returned, so this works. Consider changing for conistency
return vmean(v)
} ).collect::<Vec<f64>>();
let cluster_ids: Vec<i64> = ref_cluster.grouped_barcodes.keys().cloned().collect::<Vec<i64>>() ;
let exp_param = ref_cluster.exp_name.clone();
return ExperimentResults{ exp_param,cluster_ids, stability_scores, purity_scores }
}
fn vmean(v:Vec<f64>) -> f64{
return v.iter().sum::<f64>() / v.len() as f64
}
fn purity_k(ref_bc_set: &HashSet<i64>, query_map: &HashMap<i64, HashSet<i64>>) -> f64{
let mut max_overlap = 0;
let mut max_overlap_key:i64 = -100000000;
for query_key in query_map.keys(){
let q_cluster_set = query_map.get(query_key).unwrap();
let overlap = ref_bc_set.intersection(q_cluster_set).count();
if overlap > max_overlap{
max_overlap = overlap;
max_overlap_key = *query_key;
}
}
if max_overlap_key == -100000000{
return f64::NAN;
} else{
return max_overlap as f64 / query_map.get(&max_overlap_key).unwrap().len() as f64
}
}
fn run_pairwise_calculation_threaded(experiment_list:&Vec<&ClusterResults>, nthreads:usize) ->Vec<ExperimentResults>{
let pool = rayon::ThreadPoolBuilder::new().num_threads(nthreads).build().unwrap();
let dummy_array: Vec<usize> = (0..experiment_list.len()).collect();
let res: Vec<ExperimentResults> = pool.install(|| dummy_array.into_par_iter()
.map(|i:usize| {
let ref_clust = experiment_list[i];
let mut query_clusts = experiment_list.clone();
query_clusts.remove(i);
return calculate_metrics(ref_clust, &query_clusts)
})
.collect()
);
return res
}
#[pyfunction]
fn pairwise_metric_calculation_fromdisk(file_glob: &str, nthreads:usize) -> Vec<ExperimentResults> {
let test_clusters_objs:Vec<ClusterResults> = glob(file_glob)
.expect("Failed to read glob pattern")
.map(|x|{let file = String::from(x.unwrap().to_str().expect("Failed to unwrap filename"));
return read_cluster_results(&file)}
)
.collect();
if test_clusters_objs.len() == 0{
panic!("The provided glob string did not match any files!!")
}
let test_cluster_refs: Vec<&ClusterResults> = test_clusters_objs.iter().collect();
let c_res :Vec<ExperimentResults> = run_pairwise_calculation_threaded(&test_cluster_refs, nthreads);
return c_res
}
#[pyfunction]
fn pairwise_metric_calculation_frommem(mut cluster_dfs: Vec<HashMap<String, Vec<i64>>>, exp_names:Vec<String>, nthreads:usize) -> Vec<ExperimentResults> {
let clusters_objs_owned = cluster_dfs.into_iter().enumerate().map(|(i, mut x)|{
ClusterResults::new(x.remove(&String::from("Barcode")).unwrap(),
x.remove(&String::from("labels")).unwrap(),
exp_names[i].clone() )}).collect::<Vec<ClusterResults>>();
let clusters_objs_refs: Vec<&ClusterResults> = clusters_objs_owned.iter().collect();
let c_res :Vec<ExperimentResults> = run_pairwise_calculation_threaded(&clusters_objs_refs, nthreads);
return c_res
}
#[pyfunction]
fn metric_calculation_fromdf(mut ref_df: HashMap<String, Vec<i64>>, query_dfs:Vec<HashMap<String, Vec<i64>>>, exp_name: String)->ExperimentResults{
let ref_cluster = ClusterResults::new(ref_df.remove(&String::from("Barcode")).unwrap(),
ref_df.remove(&String::from("labels")).unwrap(),
exp_name);
let query_clusters_owned = query_dfs.into_iter().map(|mut x|ClusterResults::new(x.remove(&String::from("Barcode")).unwrap(),
x.remove(&String::from("labels")).unwrap(),
String::from("perturbation") )
).collect::<Vec<ClusterResults>>();
let query_clusters_refs = query_clusters_owned.iter().collect::<Vec<&ClusterResults>>();
let res = calculate_metrics(&ref_cluster, &query_clusters_refs);
return res
}
// fn calc_metrics(module: &PyModule) -> PyResult<()> {
// module.add_function(wrap_pyfunction!(pairwise_metric_calculation_fromdisk, module)?)?;
// module.add_function(wrap_pyfunction!(pairwise_metric_calculation_frommem, module)?)?;
// module.add_function(wrap_pyfunction!(oneway_metric_calculation, module)?)?;
// module.add_class::<ExperimentResults>()?;
// Ok(())
// }
#[pymodule]
fn _calc_metrics(py: Python, module: &PyModule) -> PyResult<()> {
module.add_function(wrap_pyfunction!(pairwise_metric_calculation_fromdisk, module)?)?;
module.add_function(wrap_pyfunction!(pairwise_metric_calculation_frommem, module)?)?;
module.add_function(wrap_pyfunction!(metric_calculation_fromdf, module)?)?;
module.add_class::<ExperimentResults>()?;
Ok(())
}
#[test]
fn check_reader(){
let obj = read_cluster_results("test_data/exp-0_resolution-0.4_knn-15_.csv.gz");
assert_eq!(obj.barcodes.len(), obj.labels.len());
}
| {
let mut lines: Vec<String> = vec![String::new();self.cluster_ids.len()];
for i in 0..self.cluster_ids.len(){
lines[i] = format!("{},{},{}\n",self.cluster_ids[i], self.stability_scores[i],self.purity_scores[i])
}
let outfile = format!("{}/{}", outpath, self.exp_param);
let outstring = lines.join("");
fs::write(outfile, outstring).unwrap();
Ok(())
} | identifier_body |
lib.rs | use std::fs ;
use std::collections::HashMap;
use std::collections::HashSet;
use std::iter::FromIterator;
use std::iter::Iterator;
use ndarray::Array2;
use rayon;
use rayon::prelude::*;
use flate2::read::GzDecoder;
use std::io::prelude::*;
use glob::glob;
use pyo3::prelude::*;
use pyo3::wrap_pyfunction;
#[derive(Debug)]
struct ClusterResults {
barcodes:Vec<i64>,
labels: Vec<i64>,
barcode_set:HashSet<i64>,
grouped_barcodes: HashMap<i64, HashSet<i64>>,
h_tot: f64,
exp_name:String
}
#[pyclass]
struct ExperimentResults{
#[pyo3(get)]
exp_param :String,
#[pyo3(get)]
cluster_ids : Vec<i64>,
#[pyo3(get)]
stability_scores: Vec<f64>,
#[pyo3(get)]
purity_scores:Vec<f64>
}
impl ExperimentResults{
fn pprint(&self){
for i in 0..self.cluster_ids.len(){
println!("{},{},{}",&self.cluster_ids[i], &self.stability_scores[i],&self.purity_scores[i] )
}
}
fn write_csv(&self, outpath:&str)->std::io::Result<()>{
let mut lines: Vec<String> = vec![String::new();self.cluster_ids.len()];
for i in 0..self.cluster_ids.len(){
lines[i] = format!("{},{},{}\n",self.cluster_ids[i], self.stability_scores[i],self.purity_scores[i])
}
let outfile = format!("{}/{}", outpath, self.exp_param);
let outstring = lines.join("");
fs::write(outfile, outstring).unwrap();
Ok(())
}
fn write_csv_simple(&self, outfile:&str)->std::io::Result<()>{
let mut lines: Vec<String> = vec![String::new();self.cluster_ids.len()];
for i in 0..self.cluster_ids.len(){
lines[i] = format!("{},{},{}\n",self.cluster_ids[i], self.stability_scores[i],self.purity_scores[i])
}
let outstring = lines.join("");
fs::write(outfile, outstring).unwrap();
Ok(())
}
}
fn entropy(group_map: &HashMap<i64, HashSet<i64>>, labels:&Vec<i64> ) -> f64{
let n = labels.len() as f64;
let res: f64 = group_map.values().map(|i|{
let p = i.len() as f64 /n;
p * p.ln()
}).sum();
return res * -1 as f64
}
impl ClusterResults{
fn new(barcodes:Vec<i64>, labels: Vec<i64>, exp_name: String) -> ClusterResults{
let barcode_set: HashSet<i64> = HashSet::from_iter(barcodes.clone());
let mut grouped_barcodes:HashMap<i64, HashSet<i64>> = HashMap::new();
let mut old_label = &labels[0];
let mut current_label = &labels[0];// declare out here so we can add the last set back in
let mut current_set: HashSet<i64> = HashSet::new();
for i in 0..barcodes.len(){
current_label = &labels[i];
let current_barcode = &barcodes[i];
if current_label == old_label{
current_set.insert(current_barcode.clone());
}else{// reach a new cluster
let dup_check = grouped_barcodes.insert(old_label.clone(), current_set);
if!dup_check.is_none(){ // HashMap.insert returns None when new key is added
panic!("A duplicate key was added when making a ClusterResults; input data is not sorted by label")
}
let ns: HashSet<i64> = HashSet::new();
current_set = ns;
current_set.insert(current_barcode.clone());
old_label = current_label;
}
}
grouped_barcodes.insert(current_label.clone(), current_set);
let h_tot = entropy(&grouped_barcodes, &labels);
ClusterResults{barcodes, labels, barcode_set, grouped_barcodes, h_tot, exp_name}
}
fn head(&self){
println!("{:?}", &self.barcodes[0..5]);
println!("{:?}", &self.labels[0..5])
}
}
fn stability_k(ref_bc: &HashSet<i64>, query:&ClusterResults) -> f64{
let intersect: HashSet<i64> = ref_bc.intersection(&query.barcode_set).cloned().collect::<HashSet<i64>>();
if intersect.len() == 0{
return 0.0
} else{
let mut new_bc :Vec<i64> = vec![-1; intersect.len()];
let mut new_labels : Vec<i64> = vec![-1; intersect.len()];
let mut j=0;
for i in 0..query.barcodes.len(){
if intersect.contains(&query.barcodes[i]){
new_bc[j] = query.barcodes[i].clone();
new_labels[j] = query.labels[i].clone();
j+=1;
}
}
let new_clu = ClusterResults::new(new_bc, new_labels, String::new());//use an empty string for these guys, as they get deleted later
return entropy(&new_clu.grouped_barcodes, &new_clu.labels);
}
}
fn decode_reader(bytes: Vec<u8>) -> std::io::Result<String> {
let mut gz = GzDecoder::new(&bytes[..]);
let mut s = String::new();
gz.read_to_string(&mut s)?;
Ok(s)
}
fn read_cluster_results( file: &str) ->ClusterResults {
let mut handle = fs::File::open(file).expect("Bad file input");
let mut buffer = Vec::new();
handle.read_to_end(&mut buffer).expect("couldnt read file");
let file_string = decode_reader(buffer).expect("bad gzip");
let file_string: Vec<&str> = file_string.lines().collect();
let mut barcodes: Vec<i64> = vec![-1; file_string.len()];
let mut labels: Vec<i64> = vec![-1; file_string.len()];
for i in 0..file_string.len(){
let line_split : Vec<&str> = file_string[i].split(",").collect();
barcodes[i] = String::from(line_split[0]).parse::<i64>().unwrap();
labels[i] = String::from(line_split[1]).parse::<i64>().unwrap();
}
let exp_name = file.split("/").last().unwrap() ;
ClusterResults::new(barcodes,labels, String::from(exp_name))
}
fn calculate_metrics(ref_cluster:&ClusterResults, query_clusters: &Vec<&ClusterResults>) -> ExperimentResults{
let mut stability_results = Array2::<f64>::zeros(( ref_cluster.grouped_barcodes.len(),query_clusters.len() ));
let mut purity_results = Array2::<f64>::zeros(( ref_cluster.grouped_barcodes.len(),query_clusters.len() ));
for (i, cluster) in ref_cluster.grouped_barcodes.values().enumerate(){
for (j, experiment) in query_clusters.iter().enumerate() {
let mut stab = stability_k(&cluster, &experiment) / experiment.h_tot ;
if stab.is_nan(){// cant compare a naturally occuring NAN to f64::NAN
stab = 1.0;
}
stability_results[[i, j]]= stab ;
purity_results[[i,j]] = purity_k(&cluster, &experiment.grouped_barcodes)
}
}
let stability_scores = stability_results.rows().into_iter().map(|x| 1.0 - x.mean().unwrap()).collect::<Vec<f64>>();
let purity_scores = purity_results.rows().into_iter().map( |x| {
let mut v = x.to_vec();
v.retain(|x| *x!= f64::NAN); // in purity_k f64::NAN is explicitly returned, so this works. Consider changing for conistency
return vmean(v)
} ).collect::<Vec<f64>>();
let cluster_ids: Vec<i64> = ref_cluster.grouped_barcodes.keys().cloned().collect::<Vec<i64>>() ;
let exp_param = ref_cluster.exp_name.clone();
return ExperimentResults{ exp_param,cluster_ids, stability_scores, purity_scores }
}
fn | (v:Vec<f64>) -> f64{
return v.iter().sum::<f64>() / v.len() as f64
}
fn purity_k(ref_bc_set: &HashSet<i64>, query_map: &HashMap<i64, HashSet<i64>>) -> f64{
let mut max_overlap = 0;
let mut max_overlap_key:i64 = -100000000;
for query_key in query_map.keys(){
let q_cluster_set = query_map.get(query_key).unwrap();
let overlap = ref_bc_set.intersection(q_cluster_set).count();
if overlap > max_overlap{
max_overlap = overlap;
max_overlap_key = *query_key;
}
}
if max_overlap_key == -100000000{
return f64::NAN;
} else{
return max_overlap as f64 / query_map.get(&max_overlap_key).unwrap().len() as f64
}
}
fn run_pairwise_calculation_threaded(experiment_list:&Vec<&ClusterResults>, nthreads:usize) ->Vec<ExperimentResults>{
let pool = rayon::ThreadPoolBuilder::new().num_threads(nthreads).build().unwrap();
let dummy_array: Vec<usize> = (0..experiment_list.len()).collect();
let res: Vec<ExperimentResults> = pool.install(|| dummy_array.into_par_iter()
.map(|i:usize| {
let ref_clust = experiment_list[i];
let mut query_clusts = experiment_list.clone();
query_clusts.remove(i);
return calculate_metrics(ref_clust, &query_clusts)
})
.collect()
);
return res
}
#[pyfunction]
fn pairwise_metric_calculation_fromdisk(file_glob: &str, nthreads:usize) -> Vec<ExperimentResults> {
let test_clusters_objs:Vec<ClusterResults> = glob(file_glob)
.expect("Failed to read glob pattern")
.map(|x|{let file = String::from(x.unwrap().to_str().expect("Failed to unwrap filename"));
return read_cluster_results(&file)}
)
.collect();
if test_clusters_objs.len() == 0{
panic!("The provided glob string did not match any files!!")
}
let test_cluster_refs: Vec<&ClusterResults> = test_clusters_objs.iter().collect();
let c_res :Vec<ExperimentResults> = run_pairwise_calculation_threaded(&test_cluster_refs, nthreads);
return c_res
}
#[pyfunction]
fn pairwise_metric_calculation_frommem(mut cluster_dfs: Vec<HashMap<String, Vec<i64>>>, exp_names:Vec<String>, nthreads:usize) -> Vec<ExperimentResults> {
let clusters_objs_owned = cluster_dfs.into_iter().enumerate().map(|(i, mut x)|{
ClusterResults::new(x.remove(&String::from("Barcode")).unwrap(),
x.remove(&String::from("labels")).unwrap(),
exp_names[i].clone() )}).collect::<Vec<ClusterResults>>();
let clusters_objs_refs: Vec<&ClusterResults> = clusters_objs_owned.iter().collect();
let c_res :Vec<ExperimentResults> = run_pairwise_calculation_threaded(&clusters_objs_refs, nthreads);
return c_res
}
#[pyfunction]
fn metric_calculation_fromdf(mut ref_df: HashMap<String, Vec<i64>>, query_dfs:Vec<HashMap<String, Vec<i64>>>, exp_name: String)->ExperimentResults{
let ref_cluster = ClusterResults::new(ref_df.remove(&String::from("Barcode")).unwrap(),
ref_df.remove(&String::from("labels")).unwrap(),
exp_name);
let query_clusters_owned = query_dfs.into_iter().map(|mut x|ClusterResults::new(x.remove(&String::from("Barcode")).unwrap(),
x.remove(&String::from("labels")).unwrap(),
String::from("perturbation") )
).collect::<Vec<ClusterResults>>();
let query_clusters_refs = query_clusters_owned.iter().collect::<Vec<&ClusterResults>>();
let res = calculate_metrics(&ref_cluster, &query_clusters_refs);
return res
}
// fn calc_metrics(module: &PyModule) -> PyResult<()> {
// module.add_function(wrap_pyfunction!(pairwise_metric_calculation_fromdisk, module)?)?;
// module.add_function(wrap_pyfunction!(pairwise_metric_calculation_frommem, module)?)?;
// module.add_function(wrap_pyfunction!(oneway_metric_calculation, module)?)?;
// module.add_class::<ExperimentResults>()?;
// Ok(())
// }
#[pymodule]
fn _calc_metrics(py: Python, module: &PyModule) -> PyResult<()> {
module.add_function(wrap_pyfunction!(pairwise_metric_calculation_fromdisk, module)?)?;
module.add_function(wrap_pyfunction!(pairwise_metric_calculation_frommem, module)?)?;
module.add_function(wrap_pyfunction!(metric_calculation_fromdf, module)?)?;
module.add_class::<ExperimentResults>()?;
Ok(())
}
#[test]
fn check_reader(){
let obj = read_cluster_results("test_data/exp-0_resolution-0.4_knn-15_.csv.gz");
assert_eq!(obj.barcodes.len(), obj.labels.len());
}
| vmean | identifier_name |
lib.rs | use std::fs ;
use std::collections::HashMap;
use std::collections::HashSet;
use std::iter::FromIterator;
use std::iter::Iterator;
use ndarray::Array2;
use rayon;
use rayon::prelude::*;
use flate2::read::GzDecoder;
use std::io::prelude::*;
use glob::glob;
use pyo3::prelude::*;
use pyo3::wrap_pyfunction;
#[derive(Debug)]
struct ClusterResults {
barcodes:Vec<i64>,
labels: Vec<i64>,
barcode_set:HashSet<i64>,
grouped_barcodes: HashMap<i64, HashSet<i64>>,
h_tot: f64,
exp_name:String
}
#[pyclass]
struct ExperimentResults{
#[pyo3(get)]
exp_param :String,
#[pyo3(get)]
cluster_ids : Vec<i64>,
#[pyo3(get)]
stability_scores: Vec<f64>,
#[pyo3(get)]
purity_scores:Vec<f64>
}
impl ExperimentResults{
fn pprint(&self){
for i in 0..self.cluster_ids.len(){
println!("{},{},{}",&self.cluster_ids[i], &self.stability_scores[i],&self.purity_scores[i] )
}
}
fn write_csv(&self, outpath:&str)->std::io::Result<()>{
let mut lines: Vec<String> = vec![String::new();self.cluster_ids.len()];
for i in 0..self.cluster_ids.len(){
lines[i] = format!("{},{},{}\n",self.cluster_ids[i], self.stability_scores[i],self.purity_scores[i])
}
let outfile = format!("{}/{}", outpath, self.exp_param);
let outstring = lines.join("");
fs::write(outfile, outstring).unwrap();
Ok(())
}
fn write_csv_simple(&self, outfile:&str)->std::io::Result<()>{
let mut lines: Vec<String> = vec![String::new();self.cluster_ids.len()];
for i in 0..self.cluster_ids.len(){
lines[i] = format!("{},{},{}\n",self.cluster_ids[i], self.stability_scores[i],self.purity_scores[i])
}
let outstring = lines.join("");
fs::write(outfile, outstring).unwrap();
Ok(())
}
}
fn entropy(group_map: &HashMap<i64, HashSet<i64>>, labels:&Vec<i64> ) -> f64{
let n = labels.len() as f64;
let res: f64 = group_map.values().map(|i|{
let p = i.len() as f64 /n;
p * p.ln()
}).sum();
return res * -1 as f64
}
impl ClusterResults{
fn new(barcodes:Vec<i64>, labels: Vec<i64>, exp_name: String) -> ClusterResults{
let barcode_set: HashSet<i64> = HashSet::from_iter(barcodes.clone());
let mut grouped_barcodes:HashMap<i64, HashSet<i64>> = HashMap::new();
let mut old_label = &labels[0];
let mut current_label = &labels[0];// declare out here so we can add the last set back in
let mut current_set: HashSet<i64> = HashSet::new();
for i in 0..barcodes.len(){
current_label = &labels[i];
let current_barcode = &barcodes[i];
if current_label == old_label{
current_set.insert(current_barcode.clone());
}else{// reach a new cluster
let dup_check = grouped_barcodes.insert(old_label.clone(), current_set);
if!dup_check.is_none() |
let ns: HashSet<i64> = HashSet::new();
current_set = ns;
current_set.insert(current_barcode.clone());
old_label = current_label;
}
}
grouped_barcodes.insert(current_label.clone(), current_set);
let h_tot = entropy(&grouped_barcodes, &labels);
ClusterResults{barcodes, labels, barcode_set, grouped_barcodes, h_tot, exp_name}
}
fn head(&self){
println!("{:?}", &self.barcodes[0..5]);
println!("{:?}", &self.labels[0..5])
}
}
fn stability_k(ref_bc: &HashSet<i64>, query:&ClusterResults) -> f64{
let intersect: HashSet<i64> = ref_bc.intersection(&query.barcode_set).cloned().collect::<HashSet<i64>>();
if intersect.len() == 0{
return 0.0
} else{
let mut new_bc :Vec<i64> = vec![-1; intersect.len()];
let mut new_labels : Vec<i64> = vec![-1; intersect.len()];
let mut j=0;
for i in 0..query.barcodes.len(){
if intersect.contains(&query.barcodes[i]){
new_bc[j] = query.barcodes[i].clone();
new_labels[j] = query.labels[i].clone();
j+=1;
}
}
let new_clu = ClusterResults::new(new_bc, new_labels, String::new());//use an empty string for these guys, as they get deleted later
return entropy(&new_clu.grouped_barcodes, &new_clu.labels);
}
}
fn decode_reader(bytes: Vec<u8>) -> std::io::Result<String> {
let mut gz = GzDecoder::new(&bytes[..]);
let mut s = String::new();
gz.read_to_string(&mut s)?;
Ok(s)
}
fn read_cluster_results( file: &str) ->ClusterResults {
let mut handle = fs::File::open(file).expect("Bad file input");
let mut buffer = Vec::new();
handle.read_to_end(&mut buffer).expect("couldnt read file");
let file_string = decode_reader(buffer).expect("bad gzip");
let file_string: Vec<&str> = file_string.lines().collect();
let mut barcodes: Vec<i64> = vec![-1; file_string.len()];
let mut labels: Vec<i64> = vec![-1; file_string.len()];
for i in 0..file_string.len(){
let line_split : Vec<&str> = file_string[i].split(",").collect();
barcodes[i] = String::from(line_split[0]).parse::<i64>().unwrap();
labels[i] = String::from(line_split[1]).parse::<i64>().unwrap();
}
let exp_name = file.split("/").last().unwrap() ;
ClusterResults::new(barcodes,labels, String::from(exp_name))
}
fn calculate_metrics(ref_cluster:&ClusterResults, query_clusters: &Vec<&ClusterResults>) -> ExperimentResults{
let mut stability_results = Array2::<f64>::zeros(( ref_cluster.grouped_barcodes.len(),query_clusters.len() ));
let mut purity_results = Array2::<f64>::zeros(( ref_cluster.grouped_barcodes.len(),query_clusters.len() ));
for (i, cluster) in ref_cluster.grouped_barcodes.values().enumerate(){
for (j, experiment) in query_clusters.iter().enumerate() {
let mut stab = stability_k(&cluster, &experiment) / experiment.h_tot ;
if stab.is_nan(){// cant compare a naturally occuring NAN to f64::NAN
stab = 1.0;
}
stability_results[[i, j]]= stab ;
purity_results[[i,j]] = purity_k(&cluster, &experiment.grouped_barcodes)
}
}
let stability_scores = stability_results.rows().into_iter().map(|x| 1.0 - x.mean().unwrap()).collect::<Vec<f64>>();
let purity_scores = purity_results.rows().into_iter().map( |x| {
let mut v = x.to_vec();
v.retain(|x| *x!= f64::NAN); // in purity_k f64::NAN is explicitly returned, so this works. Consider changing for conistency
return vmean(v)
} ).collect::<Vec<f64>>();
let cluster_ids: Vec<i64> = ref_cluster.grouped_barcodes.keys().cloned().collect::<Vec<i64>>() ;
let exp_param = ref_cluster.exp_name.clone();
return ExperimentResults{ exp_param,cluster_ids, stability_scores, purity_scores }
}
fn vmean(v:Vec<f64>) -> f64{
return v.iter().sum::<f64>() / v.len() as f64
}
fn purity_k(ref_bc_set: &HashSet<i64>, query_map: &HashMap<i64, HashSet<i64>>) -> f64{
let mut max_overlap = 0;
let mut max_overlap_key:i64 = -100000000;
for query_key in query_map.keys(){
let q_cluster_set = query_map.get(query_key).unwrap();
let overlap = ref_bc_set.intersection(q_cluster_set).count();
if overlap > max_overlap{
max_overlap = overlap;
max_overlap_key = *query_key;
}
}
if max_overlap_key == -100000000{
return f64::NAN;
} else{
return max_overlap as f64 / query_map.get(&max_overlap_key).unwrap().len() as f64
}
}
fn run_pairwise_calculation_threaded(experiment_list:&Vec<&ClusterResults>, nthreads:usize) ->Vec<ExperimentResults>{
let pool = rayon::ThreadPoolBuilder::new().num_threads(nthreads).build().unwrap();
let dummy_array: Vec<usize> = (0..experiment_list.len()).collect();
let res: Vec<ExperimentResults> = pool.install(|| dummy_array.into_par_iter()
.map(|i:usize| {
let ref_clust = experiment_list[i];
let mut query_clusts = experiment_list.clone();
query_clusts.remove(i);
return calculate_metrics(ref_clust, &query_clusts)
})
.collect()
);
return res
}
#[pyfunction]
fn pairwise_metric_calculation_fromdisk(file_glob: &str, nthreads:usize) -> Vec<ExperimentResults> {
let test_clusters_objs:Vec<ClusterResults> = glob(file_glob)
.expect("Failed to read glob pattern")
.map(|x|{let file = String::from(x.unwrap().to_str().expect("Failed to unwrap filename"));
return read_cluster_results(&file)}
)
.collect();
if test_clusters_objs.len() == 0{
panic!("The provided glob string did not match any files!!")
}
let test_cluster_refs: Vec<&ClusterResults> = test_clusters_objs.iter().collect();
let c_res :Vec<ExperimentResults> = run_pairwise_calculation_threaded(&test_cluster_refs, nthreads);
return c_res
}
#[pyfunction]
fn pairwise_metric_calculation_frommem(mut cluster_dfs: Vec<HashMap<String, Vec<i64>>>, exp_names:Vec<String>, nthreads:usize) -> Vec<ExperimentResults> {
let clusters_objs_owned = cluster_dfs.into_iter().enumerate().map(|(i, mut x)|{
ClusterResults::new(x.remove(&String::from("Barcode")).unwrap(),
x.remove(&String::from("labels")).unwrap(),
exp_names[i].clone() )}).collect::<Vec<ClusterResults>>();
let clusters_objs_refs: Vec<&ClusterResults> = clusters_objs_owned.iter().collect();
let c_res :Vec<ExperimentResults> = run_pairwise_calculation_threaded(&clusters_objs_refs, nthreads);
return c_res
}
#[pyfunction]
fn metric_calculation_fromdf(mut ref_df: HashMap<String, Vec<i64>>, query_dfs:Vec<HashMap<String, Vec<i64>>>, exp_name: String)->ExperimentResults{
let ref_cluster = ClusterResults::new(ref_df.remove(&String::from("Barcode")).unwrap(),
ref_df.remove(&String::from("labels")).unwrap(),
exp_name);
let query_clusters_owned = query_dfs.into_iter().map(|mut x|ClusterResults::new(x.remove(&String::from("Barcode")).unwrap(),
x.remove(&String::from("labels")).unwrap(),
String::from("perturbation") )
).collect::<Vec<ClusterResults>>();
let query_clusters_refs = query_clusters_owned.iter().collect::<Vec<&ClusterResults>>();
let res = calculate_metrics(&ref_cluster, &query_clusters_refs);
return res
}
// fn calc_metrics(module: &PyModule) -> PyResult<()> {
// module.add_function(wrap_pyfunction!(pairwise_metric_calculation_fromdisk, module)?)?;
// module.add_function(wrap_pyfunction!(pairwise_metric_calculation_frommem, module)?)?;
// module.add_function(wrap_pyfunction!(oneway_metric_calculation, module)?)?;
// module.add_class::<ExperimentResults>()?;
// Ok(())
// }
#[pymodule]
fn _calc_metrics(py: Python, module: &PyModule) -> PyResult<()> {
module.add_function(wrap_pyfunction!(pairwise_metric_calculation_fromdisk, module)?)?;
module.add_function(wrap_pyfunction!(pairwise_metric_calculation_frommem, module)?)?;
module.add_function(wrap_pyfunction!(metric_calculation_fromdf, module)?)?;
module.add_class::<ExperimentResults>()?;
Ok(())
}
#[test]
fn check_reader(){
let obj = read_cluster_results("test_data/exp-0_resolution-0.4_knn-15_.csv.gz");
assert_eq!(obj.barcodes.len(), obj.labels.len());
}
| { // HashMap.insert returns None when new key is added
panic!("A duplicate key was added when making a ClusterResults; input data is not sorted by label")
} | conditional_block |
vt-3.rs | CString::new("VK_LAYER_KHRONOS_validation").unwrap(),
];
let layers_names_raw: Vec<*const i8> = layer_names
.iter()
.map(|raw_name| raw_name.as_ptr())
.collect();
let extension_names_raw = extension_names();
let debug_utils_create_info = vk::DebugUtilsMessengerCreateInfoEXT {
s_type: vk::StructureType::DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
p_next: ptr::null(),
flags: vk::DebugUtilsMessengerCreateFlagsEXT::empty(),
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT::WARNING
| vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE
// | vk::DebugUtilsMessageSeverityFlagsEXT::INFO
| vk::DebugUtilsMessageSeverityFlagsEXT::ERROR,
message_type: vk::DebugUtilsMessageTypeFlagsEXT::GENERAL
| vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE
| vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION,
pfn_user_callback: Some(vulkan_debug_utils_callback),
p_user_data: ptr::null_mut(),
};
let create_info = vk::InstanceCreateInfo {
s_type: vk::StructureType::INSTANCE_CREATE_INFO,
// setting this to a null pointer also works, but leave it like this to
// be safe i guess?
p_next: &debug_utils_create_info as *const vk::DebugUtilsMessengerCreateInfoEXT
as *const c_void,
flags: vk::InstanceCreateFlags::empty(),
p_application_info: &app_info,
enabled_layer_count: layer_names.len() as u32,
pp_enabled_layer_names: layers_names_raw.as_ptr(),
enabled_extension_count: extension_names_raw.len() as u32,
pp_enabled_extension_names: extension_names_raw.as_ptr(),
};
let entry = Entry::new().unwrap();
let instance = unsafe {
entry
.create_instance(&create_info, None)
.expect("Couldn't create instance")
};
let debug_utils_loader = ash::extensions::ext::DebugUtils::new(&entry, &instance);
let debug_utils_messenger = unsafe {
debug_utils_loader
.create_debug_utils_messenger(&debug_utils_create_info, None)
.expect("Debug Utils Callback")
};
// create surface
let surface =
unsafe { create_surface(&entry, &instance, &window) }.expect("couldn't create surface");
let surface_loader = ash::extensions::khr::Surface::new(&entry, &instance);
// get physical device
let physical_device = {
let phys_devs = unsafe { instance.enumerate_physical_devices() }
.expect("Couldn't enumerate physical devices");
*phys_devs
.iter()
.find(|phys_dev| is_phys_dev_suitable(&instance, phys_dev))
.expect("No suitable physical device found!")
};
// get queue family index
let queue_family_index: u32 = {
let queues =
unsafe { instance.get_physical_device_queue_family_properties(physical_device) };
queues
.iter()
.enumerate()
.find(|(_idx, queue)| queue.queue_flags.contains(vk::QueueFlags::GRAPHICS))
.expect("Couldn't find a graphics queue")
.0
.try_into()
.unwrap()
};
if!unsafe {
surface_loader.get_physical_device_surface_support(
physical_device,
queue_family_index,
surface,
)
} {
panic!("Queue does not have surface support! It's possible that a separate queue with surface support exists, but the current implementation is not capable of finding one.");
}
// get logical device
let device_queue_create_info = vk::DeviceQueueCreateInfo {
s_type: vk::StructureType::DEVICE_QUEUE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::DeviceQueueCreateFlags::empty(),
queue_family_index,
queue_count: 1,
p_queue_priorities: [1.0].as_ptr(),
};
let device_extensions_raw = get_device_extensions_raw();
let device_create_info = vk::DeviceCreateInfo {
s_type: vk::StructureType::DEVICE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::DeviceCreateFlags::empty(),
queue_create_info_count: 1,
p_queue_create_infos: [device_queue_create_info].as_ptr(),
// not used by Vulkan anymore
enabled_layer_count: 0,
pp_enabled_layer_names: ptr::null(),
// these are
enabled_extension_count: device_extensions_raw.len() as u32,
pp_enabled_extension_names: device_extensions_raw.as_ptr(),
p_enabled_features: &vk::PhysicalDeviceFeatures::builder().build(),
};
let device = unsafe {
instance
.create_device(physical_device, &device_create_info, None)
.expect("Couldn't create device")
};
// get queue (0 = take first queue)
let queue = unsafe { device.get_device_queue(queue_family_index, 0) };
// check device swapchain capabilties (not just that it has the extension,
// also formats and stuff like that)
// also returns what dimensions the swapchain should initially be created at
let starting_dims = check_device_swapchain_caps(&surface_loader, physical_device, surface);
// create swapchain
let sc_format = vk::SurfaceFormatKHR {
format: SWAPCHAIN_FORMAT,
color_space: vk::ColorSpaceKHR::default(),
};
let sc_present_mode = vk::PresentModeKHR::IMMEDIATE;
let swapchain_create_info = vk::SwapchainCreateInfoKHR {
s_type: vk::StructureType::SWAPCHAIN_CREATE_INFO_KHR,
p_next: ptr::null(),
flags: vk::SwapchainCreateFlagsKHR::empty(),
surface: surface,
min_image_count: 3,
image_format: SWAPCHAIN_FORMAT,
image_color_space: vk::ColorSpaceKHR::SRGB_NONLINEAR,
image_extent: starting_dims,
image_array_layers: 1,
image_usage: vk::ImageUsageFlags::COLOR_ATTACHMENT,
image_sharing_mode: vk::SharingMode::EXCLUSIVE,
queue_family_index_count: 0,
p_queue_family_indices: ptr::null(),
pre_transform: vk::SurfaceTransformFlagsKHR::IDENTITY,
composite_alpha: vk::CompositeAlphaFlagsKHR::OPAQUE,
present_mode: vk::PresentModeKHR::IMMEDIATE,
clipped: vk::TRUE,
old_swapchain: vk::SwapchainKHR::null(),
};
let swapchain_creator = Swapchain::new(&instance, &device);
let swapchain = unsafe { swapchain_creator.create_swapchain(&swapchain_create_info, None) }
.expect("Couldn't create swapchain");
let images = unsafe { swapchain_creator.get_swapchain_images(swapchain) }
.expect("Couldn't get swapchain images");
let image_views: Vec<_> = images
.iter()
.map(|image| {
let iv_info = vk::ImageViewCreateInfo {
s_type: vk::StructureType::IMAGE_VIEW_CREATE_INFO,
p_next: ptr::null(),
flags: vk::ImageViewCreateFlags::empty(),
image: *image,
view_type: vk::ImageViewType::TYPE_2D,
format: SWAPCHAIN_FORMAT,
components: vk::ComponentMapping {
r: vk::ComponentSwizzle::IDENTITY,
g: vk::ComponentSwizzle::IDENTITY,
b: vk::ComponentSwizzle::IDENTITY,
a: vk::ComponentSwizzle::IDENTITY,
},
subresource_range: vk::ImageSubresourceRange {
aspect_mask: vk::ImageAspectFlags::COLOR,
base_mip_level: 0,
level_count: 1,
base_array_layer: 0,
layer_count: 1,
},
};
})
.collect();
// shaders
let frag_code = read_shader_code(&relative_path("shaders/vt-3/triangle.frag.spv"));
let vert_code = read_shader_code(&relative_path("shaders/vt-3/triangle.vert.spv"));
let frag_module = create_shader_module(&device, frag_code);
let vert_module = create_shader_module(&device, vert_code);
let entry_point = CString::new("main").unwrap();
let vert_stage_info = vk::PipelineShaderStageCreateInfo {
s_type: vk::StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineShaderStageCreateFlags::empty(),
stage: vk::ShaderStageFlags::VERTEX,
module: vert_module,
p_name: entry_point.as_ptr(),
p_specialization_info: &vk::SpecializationInfo::default(),
};
let frag_stage_info = vk::PipelineShaderStageCreateInfo {
s_type: vk::StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineShaderStageCreateFlags::empty(),
stage: vk::ShaderStageFlags::FRAGMENT,
module: frag_module,
p_name: entry_point.as_ptr(),
p_specialization_info: &vk::SpecializationInfo::default(),
};
let shader_stages = [vert_stage_info, frag_stage_info];
// fixed-function pipeline settings
// a.k.a vertex format
// we don't really have a format since they are hard-coded into the vertex
// shader for now
let pipeline_vertex_input_info = vk::PipelineVertexInputStateCreateInfo::default();
let pipeline_input_assembly_info = vk::PipelineInputAssemblyStateCreateInfo {
s_type: vk::StructureType::PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineInputAssemblyStateCreateFlags::empty(),
topology: vk::PrimitiveTopology::TRIANGLE_LIST,
primitive_restart_enable: vk::FALSE,
};
let viewports = [vk::Viewport {
x: 0.0,
y: 0.0,
width: starting_dims.width as f32,
height: starting_dims.height as f32,
min_depth: 0.0,
max_depth: 1.0,
}];
let scissors = [vk::Rect2D {
offset: vk::Offset2D { x: 0, y: 0 },
extent: starting_dims,
}];
let viewport_state = vk::PipelineViewportStateCreateInfo {
s_type: vk::StructureType::PIPELINE_VIEWPORT_STATE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineViewportStateCreateFlags::empty(),
viewport_count: viewports.len() as u32,
p_viewports: viewports.as_ptr(),
scissor_count: scissors.len() as u32,
p_scissors: scissors.as_ptr(),
};
let pipeline_rasterization_info = vk::PipelineRasterizationStateCreateInfo {
s_type: vk::StructureType::PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineRasterizationStateCreateFlags::empty(),
depth_clamp_enable: vk::FALSE,
rasterizer_discard_enable: vk::FALSE,
polygon_mode: vk::PolygonMode::FILL,
cull_mode: vk::CullModeFlags::BACK,
front_face: vk::FrontFace::CLOCKWISE,
depth_bias_enable: vk::FALSE,
depth_bias_constant_factor: 0.0,
depth_bias_clamp: 0.0,
depth_bias_slope_factor: 0.0,
line_width: 1.0,
};
let pipeline_multisample_info = vk::PipelineMultisampleStateCreateInfo {
s_type: vk::StructureType::PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineMultisampleStateCreateFlags::empty(),
rasterization_samples: vk::SampleCountFlags::TYPE_1,
sample_shading_enable: vk::FALSE,
min_sample_shading: 1.0,
p_sample_mask: ptr::null(),
alpha_to_coverage_enable: vk::FALSE,
alpha_to_one_enable: vk::FALSE,
};
// color blending info per framebuffer
let pipeline_color_blend_attachment_infos = [vk::PipelineColorBlendAttachmentState {
blend_enable: vk::FALSE,
// not used because we disabled blending
src_color_blend_factor: vk::BlendFactor::ONE,
dst_color_blend_factor: vk::BlendFactor::ZERO,
color_blend_op: vk::BlendOp::ADD,
src_alpha_blend_factor: vk::BlendFactor::ONE,
dst_alpha_blend_factor: vk::BlendFactor::ZERO,
alpha_blend_op: vk::BlendOp::ADD,
// is used
color_write_mask: vk::ColorComponentFlags::R
& vk::ColorComponentFlags::G
& vk::ColorComponentFlags::G
& vk::ColorComponentFlags::B,
}];
// color blending settings for the whole pipleine
let pipeline_color_blend_info = vk::PipelineColorBlendStateCreateInfo {
s_type: vk::StructureType::PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineColorBlendStateCreateFlags::empty(),
logic_op_enable: vk::FALSE,
logic_op: vk::LogicOp::COPY, // optional
attachment_count: pipeline_color_blend_attachment_infos.len() as u32,
p_attachments: pipeline_color_blend_attachment_infos.as_ptr(),
blend_constants: [0.0, 0.0, 0.0, 0.0], // optional
};
// we don't use any shader uniforms so we leave it empty
let pipeline_layout_info = vk::PipelineLayoutCreateInfo {
s_type: vk::StructureType::PIPELINE_LAYOUT_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineLayoutCreateFlags::empty(),
set_layout_count: 0,
p_set_layouts: ptr::null(),
push_constant_range_count: 0,
p_push_constant_ranges: ptr::null(),
};
let pipeline_layout = unsafe {
device
.create_pipeline_layout(&pipeline_layout_info, None)
.expect("Couldn't create pipeline layout!")
};
// render pass
let render_pass = create_render_pass(&device);
// pipeline
let pipeline_infos = [vk::GraphicsPipelineCreateInfo {
s_type: vk::StructureType::GRAPHICS_PIPELINE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineCreateFlags::empty(),
stage_count: shader_stages.len() as u32,
p_stages: shader_stages.as_ptr(),
p_vertex_input_state: &pipeline_vertex_input_info,
p_input_assembly_state: &pipeline_input_assembly_info,
p_tessellation_state: ptr::null(),
p_viewport_state: &viewport_state,
p_rasterization_state: &pipeline_rasterization_info,
p_multisample_state: &pipeline_multisample_info,
p_depth_stencil_state: ptr::null(),
p_color_blend_state: &pipeline_color_blend_info,
p_dynamic_state: ptr::null(),
layout: pipeline_layout,
render_pass,
subpass: 0,
base_pipeline_handle: vk::Pipeline::null(),
base_pipeline_index: 0,
}];
let pipeline = unsafe {
device.create_graphics_pipelines(vk::PipelineCache::null(), &pipeline_infos, None)
}
.expect("Couldn't create graphics pipeline")[0];
dbg![pipeline];
// shader modules only need to live long enough to create the pipeline
unsafe {
device.destroy_shader_module(frag_module, None);
device.destroy_shader_module(vert_module, None);
}
loop {
let mut exit = false;
events_loop.poll_events(|ev| match ev {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => exit = true,
_ => {}
});
if exit {
break;
}
}
// destroy objects
unsafe {
device.destroy_pipeline(pipeline, None);
device.destroy_pipeline_layout(pipeline_layout, None);
swapchain_creator.destroy_swapchain(swapchain, None);
device.destroy_render_pass(render_pass, None);
device.destroy_device(None);
surface_loader.destroy_surface(surface, None);
debug_utils_loader.destroy_debug_utils_messenger(debug_utils_messenger, None);
instance.destroy_instance(None);
}
}
fn create_shader_module<D: DeviceV1_0>(device: &D, code: Vec<u8>) -> vk::ShaderModule {
use ash::util::read_spv;
use std::io::Cursor;
let readable_code = read_spv(&mut Cursor::new(&code)).expect("Couldn't read SPV");
let shader_module_create_info = vk::ShaderModuleCreateInfo::builder().code(&readable_code);
unsafe {
device
.create_shader_module(&shader_module_create_info, None)
.expect("Couldn't create shader module")
}
}
fn extension_names() -> Vec<*const i8> {
// these are instance extensions
vec![
Surface::name().as_ptr(),
XlibSurface::name().as_ptr(),
DebugUtils::name().as_ptr(),
]
}
unsafe extern "system" fn vulkan_debug_utils_callback(
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT,
message_type: vk::DebugUtilsMessageTypeFlagsEXT,
p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT,
_p_user_data: *mut c_void,
) -> vk::Bool32 {
let severity = match message_severity {
vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => "[Verbose]",
vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => "[Warning]",
vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => "[Error]",
vk::DebugUtilsMessageSeverityFlagsEXT::INFO => "[Info]",
_ => "[Unknown]",
};
let types = match message_type {
vk::DebugUtilsMessageTypeFlagsEXT::GENERAL => "[General]",
vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE => "[Performance]",
vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION => "[Validation]",
_ => "[Unknown]",
};
let message = CStr::from_ptr((*p_callback_data).p_message);
eprintln!("[Debug]{}{}{:?}", severity, types, message);
vk::FALSE
}
fn is_phys_dev_suitable(instance: &ash::Instance, phys_dev: &vk::PhysicalDevice) -> bool {
// gets a list of extensions supported by this device as vulkan strings,
// which don't implement PartialEq
let extension_properties = unsafe { instance.enumerate_device_extension_properties(*phys_dev) }
.expect("Couldn't enumerate device extension properties!");
// Now convert them into rust strings
let available_extension_names: Vec<String> = extension_properties
.iter()
.map(|ext| vk_to_string(&ext.extension_name))
.collect();
// make sure all required device extensions are supported by this device
get_device_extensions().iter().for_each(|name| {
available_extension_names
.iter()
.find(|ext| ext == name)
.expect(&format!("Couldn't find extension {}", name));
});
true
}
fn check_device_swapchain_caps(
surface_loader: &Surface,
physical_device: vk::PhysicalDevice,
surface: vk::SurfaceKHR,
) -> vk::Extent2D {
// returns the current dimensions of the swapchain
let capabilities = unsafe {
surface_loader.get_physical_device_surface_capabilities(physical_device, surface)
}
.expect("Couldn't get physical device surface capabilities");
let formats =
unsafe { surface_loader.get_physical_device_surface_formats(physical_device, surface) }
.expect("Couldn't get physical device surface formats");
let present_modes = unsafe {
surface_loader.get_physical_device_surface_present_modes(physical_device, surface)
}
.expect("Couldn't get physical device surface present modes");
// we will request 3 swapchain images to avoid having to wait while one is
// being cleared or something, idk exactly
assert!(capabilities.min_image_count <= 3 && capabilities.max_image_count >= 3);
formats
.iter()
.find(|fmt| fmt.format == vk::Format::B8G8R8A8_UNORM)
.expect("Swapchain doesn't support B8G8R8A8_UNORM!");
assert!(present_modes.contains(&vk::PresentModeKHR::IMMEDIATE));
capabilities.current_extent
}
// many of these functions are ripped from https://github.com/bwasty/vulkan-tutorial-rs
// only works on linux
unsafe fn create_surface<E: EntryV1_0, I: InstanceV1_0>(
entry: &E,
instance: &I,
window: &winit::Window,
) -> Result<vk::SurfaceKHR, vk::Result> | {
use winit::os::unix::WindowExt;
let x11_display = window.get_xlib_display().unwrap();
let x11_window = window.get_xlib_window().unwrap();
let x11_create_info = vk::XlibSurfaceCreateInfoKHR {
s_type: vk::StructureType::XLIB_SURFACE_CREATE_INFO_KHR,
p_next: ptr::null(),
flags: Default::default(),
window: x11_window as vk::Window,
dpy: x11_display as *mut vk::Display,
};
let xlib_surface_loader = XlibSurface::new(entry, instance);
xlib_surface_loader.create_xlib_surface(&x11_create_info, None)
} | identifier_body |
|
vt-3.rs | CString::new("VK_LAYER_LUNARG_standard_validation").unwrap(),
CString::new("VK_LAYER_KHRONOS_validation").unwrap(),
];
let layers_names_raw: Vec<*const i8> = layer_names
.iter()
.map(|raw_name| raw_name.as_ptr())
.collect();
let extension_names_raw = extension_names();
let debug_utils_create_info = vk::DebugUtilsMessengerCreateInfoEXT {
s_type: vk::StructureType::DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
p_next: ptr::null(),
flags: vk::DebugUtilsMessengerCreateFlagsEXT::empty(),
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT::WARNING
| vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE
// | vk::DebugUtilsMessageSeverityFlagsEXT::INFO
| vk::DebugUtilsMessageSeverityFlagsEXT::ERROR,
message_type: vk::DebugUtilsMessageTypeFlagsEXT::GENERAL
| vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE
| vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION,
pfn_user_callback: Some(vulkan_debug_utils_callback),
p_user_data: ptr::null_mut(),
};
let create_info = vk::InstanceCreateInfo {
s_type: vk::StructureType::INSTANCE_CREATE_INFO,
// setting this to a null pointer also works, but leave it like this to
// be safe i guess?
p_next: &debug_utils_create_info as *const vk::DebugUtilsMessengerCreateInfoEXT
as *const c_void,
flags: vk::InstanceCreateFlags::empty(),
p_application_info: &app_info,
enabled_layer_count: layer_names.len() as u32,
pp_enabled_layer_names: layers_names_raw.as_ptr(),
enabled_extension_count: extension_names_raw.len() as u32,
pp_enabled_extension_names: extension_names_raw.as_ptr(),
};
let entry = Entry::new().unwrap();
let instance = unsafe {
entry
.create_instance(&create_info, None)
.expect("Couldn't create instance")
};
let debug_utils_loader = ash::extensions::ext::DebugUtils::new(&entry, &instance);
let debug_utils_messenger = unsafe {
debug_utils_loader
.create_debug_utils_messenger(&debug_utils_create_info, None)
.expect("Debug Utils Callback")
};
// create surface
let surface =
unsafe { create_surface(&entry, &instance, &window) }.expect("couldn't create surface");
let surface_loader = ash::extensions::khr::Surface::new(&entry, &instance);
// get physical device
let physical_device = {
let phys_devs = unsafe { instance.enumerate_physical_devices() }
.expect("Couldn't enumerate physical devices");
*phys_devs
.iter()
.find(|phys_dev| is_phys_dev_suitable(&instance, phys_dev))
.expect("No suitable physical device found!")
};
// get queue family index
let queue_family_index: u32 = {
let queues =
unsafe { instance.get_physical_device_queue_family_properties(physical_device) };
queues
.iter()
.enumerate()
.find(|(_idx, queue)| queue.queue_flags.contains(vk::QueueFlags::GRAPHICS))
.expect("Couldn't find a graphics queue")
.0
.try_into()
.unwrap()
};
if!unsafe {
surface_loader.get_physical_device_surface_support(
physical_device,
queue_family_index,
surface,
)
} {
panic!("Queue does not have surface support! It's possible that a separate queue with surface support exists, but the current implementation is not capable of finding one.");
}
// get logical device
let device_queue_create_info = vk::DeviceQueueCreateInfo {
s_type: vk::StructureType::DEVICE_QUEUE_CREATE_INFO,
p_next: ptr::null(), | queue_family_index,
queue_count: 1,
p_queue_priorities: [1.0].as_ptr(),
};
let device_extensions_raw = get_device_extensions_raw();
let device_create_info = vk::DeviceCreateInfo {
s_type: vk::StructureType::DEVICE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::DeviceCreateFlags::empty(),
queue_create_info_count: 1,
p_queue_create_infos: [device_queue_create_info].as_ptr(),
// not used by Vulkan anymore
enabled_layer_count: 0,
pp_enabled_layer_names: ptr::null(),
// these are
enabled_extension_count: device_extensions_raw.len() as u32,
pp_enabled_extension_names: device_extensions_raw.as_ptr(),
p_enabled_features: &vk::PhysicalDeviceFeatures::builder().build(),
};
let device = unsafe {
instance
.create_device(physical_device, &device_create_info, None)
.expect("Couldn't create device")
};
// get queue (0 = take first queue)
let queue = unsafe { device.get_device_queue(queue_family_index, 0) };
// check device swapchain capabilties (not just that it has the extension,
// also formats and stuff like that)
// also returns what dimensions the swapchain should initially be created at
let starting_dims = check_device_swapchain_caps(&surface_loader, physical_device, surface);
// create swapchain
let sc_format = vk::SurfaceFormatKHR {
format: SWAPCHAIN_FORMAT,
color_space: vk::ColorSpaceKHR::default(),
};
let sc_present_mode = vk::PresentModeKHR::IMMEDIATE;
let swapchain_create_info = vk::SwapchainCreateInfoKHR {
s_type: vk::StructureType::SWAPCHAIN_CREATE_INFO_KHR,
p_next: ptr::null(),
flags: vk::SwapchainCreateFlagsKHR::empty(),
surface: surface,
min_image_count: 3,
image_format: SWAPCHAIN_FORMAT,
image_color_space: vk::ColorSpaceKHR::SRGB_NONLINEAR,
image_extent: starting_dims,
image_array_layers: 1,
image_usage: vk::ImageUsageFlags::COLOR_ATTACHMENT,
image_sharing_mode: vk::SharingMode::EXCLUSIVE,
queue_family_index_count: 0,
p_queue_family_indices: ptr::null(),
pre_transform: vk::SurfaceTransformFlagsKHR::IDENTITY,
composite_alpha: vk::CompositeAlphaFlagsKHR::OPAQUE,
present_mode: vk::PresentModeKHR::IMMEDIATE,
clipped: vk::TRUE,
old_swapchain: vk::SwapchainKHR::null(),
};
let swapchain_creator = Swapchain::new(&instance, &device);
let swapchain = unsafe { swapchain_creator.create_swapchain(&swapchain_create_info, None) }
.expect("Couldn't create swapchain");
let images = unsafe { swapchain_creator.get_swapchain_images(swapchain) }
.expect("Couldn't get swapchain images");
let image_views: Vec<_> = images
.iter()
.map(|image| {
let iv_info = vk::ImageViewCreateInfo {
s_type: vk::StructureType::IMAGE_VIEW_CREATE_INFO,
p_next: ptr::null(),
flags: vk::ImageViewCreateFlags::empty(),
image: *image,
view_type: vk::ImageViewType::TYPE_2D,
format: SWAPCHAIN_FORMAT,
components: vk::ComponentMapping {
r: vk::ComponentSwizzle::IDENTITY,
g: vk::ComponentSwizzle::IDENTITY,
b: vk::ComponentSwizzle::IDENTITY,
a: vk::ComponentSwizzle::IDENTITY,
},
subresource_range: vk::ImageSubresourceRange {
aspect_mask: vk::ImageAspectFlags::COLOR,
base_mip_level: 0,
level_count: 1,
base_array_layer: 0,
layer_count: 1,
},
};
})
.collect();
// shaders
let frag_code = read_shader_code(&relative_path("shaders/vt-3/triangle.frag.spv"));
let vert_code = read_shader_code(&relative_path("shaders/vt-3/triangle.vert.spv"));
let frag_module = create_shader_module(&device, frag_code);
let vert_module = create_shader_module(&device, vert_code);
let entry_point = CString::new("main").unwrap();
let vert_stage_info = vk::PipelineShaderStageCreateInfo {
s_type: vk::StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineShaderStageCreateFlags::empty(),
stage: vk::ShaderStageFlags::VERTEX,
module: vert_module,
p_name: entry_point.as_ptr(),
p_specialization_info: &vk::SpecializationInfo::default(),
};
let frag_stage_info = vk::PipelineShaderStageCreateInfo {
s_type: vk::StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineShaderStageCreateFlags::empty(),
stage: vk::ShaderStageFlags::FRAGMENT,
module: frag_module,
p_name: entry_point.as_ptr(),
p_specialization_info: &vk::SpecializationInfo::default(),
};
let shader_stages = [vert_stage_info, frag_stage_info];
// fixed-function pipeline settings
// a.k.a vertex format
// we don't really have a format since they are hard-coded into the vertex
// shader for now
let pipeline_vertex_input_info = vk::PipelineVertexInputStateCreateInfo::default();
let pipeline_input_assembly_info = vk::PipelineInputAssemblyStateCreateInfo {
s_type: vk::StructureType::PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineInputAssemblyStateCreateFlags::empty(),
topology: vk::PrimitiveTopology::TRIANGLE_LIST,
primitive_restart_enable: vk::FALSE,
};
let viewports = [vk::Viewport {
x: 0.0,
y: 0.0,
width: starting_dims.width as f32,
height: starting_dims.height as f32,
min_depth: 0.0,
max_depth: 1.0,
}];
let scissors = [vk::Rect2D {
offset: vk::Offset2D { x: 0, y: 0 },
extent: starting_dims,
}];
let viewport_state = vk::PipelineViewportStateCreateInfo {
s_type: vk::StructureType::PIPELINE_VIEWPORT_STATE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineViewportStateCreateFlags::empty(),
viewport_count: viewports.len() as u32,
p_viewports: viewports.as_ptr(),
scissor_count: scissors.len() as u32,
p_scissors: scissors.as_ptr(),
};
let pipeline_rasterization_info = vk::PipelineRasterizationStateCreateInfo {
s_type: vk::StructureType::PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineRasterizationStateCreateFlags::empty(),
depth_clamp_enable: vk::FALSE,
rasterizer_discard_enable: vk::FALSE,
polygon_mode: vk::PolygonMode::FILL,
cull_mode: vk::CullModeFlags::BACK,
front_face: vk::FrontFace::CLOCKWISE,
depth_bias_enable: vk::FALSE,
depth_bias_constant_factor: 0.0,
depth_bias_clamp: 0.0,
depth_bias_slope_factor: 0.0,
line_width: 1.0,
};
let pipeline_multisample_info = vk::PipelineMultisampleStateCreateInfo {
s_type: vk::StructureType::PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineMultisampleStateCreateFlags::empty(),
rasterization_samples: vk::SampleCountFlags::TYPE_1,
sample_shading_enable: vk::FALSE,
min_sample_shading: 1.0,
p_sample_mask: ptr::null(),
alpha_to_coverage_enable: vk::FALSE,
alpha_to_one_enable: vk::FALSE,
};
// color blending info per framebuffer
let pipeline_color_blend_attachment_infos = [vk::PipelineColorBlendAttachmentState {
blend_enable: vk::FALSE,
// not used because we disabled blending
src_color_blend_factor: vk::BlendFactor::ONE,
dst_color_blend_factor: vk::BlendFactor::ZERO,
color_blend_op: vk::BlendOp::ADD,
src_alpha_blend_factor: vk::BlendFactor::ONE,
dst_alpha_blend_factor: vk::BlendFactor::ZERO,
alpha_blend_op: vk::BlendOp::ADD,
// is used
color_write_mask: vk::ColorComponentFlags::R
& vk::ColorComponentFlags::G
& vk::ColorComponentFlags::G
& vk::ColorComponentFlags::B,
}];
// color blending settings for the whole pipleine
let pipeline_color_blend_info = vk::PipelineColorBlendStateCreateInfo {
s_type: vk::StructureType::PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineColorBlendStateCreateFlags::empty(),
logic_op_enable: vk::FALSE,
logic_op: vk::LogicOp::COPY, // optional
attachment_count: pipeline_color_blend_attachment_infos.len() as u32,
p_attachments: pipeline_color_blend_attachment_infos.as_ptr(),
blend_constants: [0.0, 0.0, 0.0, 0.0], // optional
};
// we don't use any shader uniforms so we leave it empty
let pipeline_layout_info = vk::PipelineLayoutCreateInfo {
s_type: vk::StructureType::PIPELINE_LAYOUT_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineLayoutCreateFlags::empty(),
set_layout_count: 0,
p_set_layouts: ptr::null(),
push_constant_range_count: 0,
p_push_constant_ranges: ptr::null(),
};
let pipeline_layout = unsafe {
device
.create_pipeline_layout(&pipeline_layout_info, None)
.expect("Couldn't create pipeline layout!")
};
// render pass
let render_pass = create_render_pass(&device);
// pipeline
let pipeline_infos = [vk::GraphicsPipelineCreateInfo {
s_type: vk::StructureType::GRAPHICS_PIPELINE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineCreateFlags::empty(),
stage_count: shader_stages.len() as u32,
p_stages: shader_stages.as_ptr(),
p_vertex_input_state: &pipeline_vertex_input_info,
p_input_assembly_state: &pipeline_input_assembly_info,
p_tessellation_state: ptr::null(),
p_viewport_state: &viewport_state,
p_rasterization_state: &pipeline_rasterization_info,
p_multisample_state: &pipeline_multisample_info,
p_depth_stencil_state: ptr::null(),
p_color_blend_state: &pipeline_color_blend_info,
p_dynamic_state: ptr::null(),
layout: pipeline_layout,
render_pass,
subpass: 0,
base_pipeline_handle: vk::Pipeline::null(),
base_pipeline_index: 0,
}];
let pipeline = unsafe {
device.create_graphics_pipelines(vk::PipelineCache::null(), &pipeline_infos, None)
}
.expect("Couldn't create graphics pipeline")[0];
dbg![pipeline];
// shader modules only need to live long enough to create the pipeline
unsafe {
device.destroy_shader_module(frag_module, None);
device.destroy_shader_module(vert_module, None);
}
loop {
let mut exit = false;
events_loop.poll_events(|ev| match ev {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => exit = true,
_ => {}
});
if exit {
break;
}
}
// destroy objects
unsafe {
device.destroy_pipeline(pipeline, None);
device.destroy_pipeline_layout(pipeline_layout, None);
swapchain_creator.destroy_swapchain(swapchain, None);
device.destroy_render_pass(render_pass, None);
device.destroy_device(None);
surface_loader.destroy_surface(surface, None);
debug_utils_loader.destroy_debug_utils_messenger(debug_utils_messenger, None);
instance.destroy_instance(None);
}
}
fn create_shader_module<D: DeviceV1_0>(device: &D, code: Vec<u8>) -> vk::ShaderModule {
use ash::util::read_spv;
use std::io::Cursor;
let readable_code = read_spv(&mut Cursor::new(&code)).expect("Couldn't read SPV");
let shader_module_create_info = vk::ShaderModuleCreateInfo::builder().code(&readable_code);
unsafe {
device
.create_shader_module(&shader_module_create_info, None)
.expect("Couldn't create shader module")
}
}
fn extension_names() -> Vec<*const i8> {
// these are instance extensions
vec![
Surface::name().as_ptr(),
XlibSurface::name().as_ptr(),
DebugUtils::name().as_ptr(),
]
}
unsafe extern "system" fn vulkan_debug_utils_callback(
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT,
message_type: vk::DebugUtilsMessageTypeFlagsEXT,
p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT,
_p_user_data: *mut c_void,
) -> vk::Bool32 {
let severity = match message_severity {
vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => "[Verbose]",
vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => "[Warning]",
vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => "[Error]",
vk::DebugUtilsMessageSeverityFlagsEXT::INFO => "[Info]",
_ => "[Unknown]",
};
let types = match message_type {
vk::DebugUtilsMessageTypeFlagsEXT::GENERAL => "[General]",
vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE => "[Performance]",
vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION => "[Validation]",
_ => "[Unknown]",
};
let message = CStr::from_ptr((*p_callback_data).p_message);
eprintln!("[Debug]{}{}{:?}", severity, types, message);
vk::FALSE
}
fn is_phys_dev_suitable(instance: &ash::Instance, phys_dev: &vk::PhysicalDevice) -> bool {
// gets a list of extensions supported by this device as vulkan strings,
// which don't implement PartialEq
let extension_properties = unsafe { instance.enumerate_device_extension_properties(*phys_dev) }
.expect("Couldn't enumerate device extension properties!");
// Now convert them into rust strings
let available_extension_names: Vec<String> = extension_properties
.iter()
.map(|ext| vk_to_string(&ext.extension_name))
.collect();
// make sure all required device extensions are supported by this device
get_device_extensions().iter().for_each(|name| {
available_extension_names
.iter()
.find(|ext| ext == name)
.expect(&format!("Couldn't find extension {}", name));
});
true
}
fn check_device_swapchain_caps(
surface_loader: &Surface,
physical_device: vk::PhysicalDevice,
surface: vk::SurfaceKHR,
) -> vk::Extent2D {
// returns the current dimensions of the swapchain
let capabilities = unsafe {
surface_loader.get_physical_device_surface_capabilities(physical_device, surface)
}
.expect("Couldn't get physical device surface capabilities");
let formats =
unsafe { surface_loader.get_physical_device_surface_formats(physical_device, surface) }
.expect("Couldn't get physical device surface formats");
let present_modes = unsafe {
surface_loader.get_physical_device_surface_present_modes(physical_device, surface)
}
.expect("Couldn't get physical device surface present modes");
// we will request 3 swapchain images to avoid having to wait while one is
// being cleared or something, idk exactly
assert!(capabilities.min_image_count <= 3 && capabilities.max_image_count >= 3);
formats
.iter()
.find(|fmt| fmt.format == vk::Format::B8G8R8A8_UNORM)
.expect("Swapchain doesn't support B8G8R8A8_UNORM!");
assert!(present_modes.contains(&vk::PresentModeKHR::IMMEDIATE));
capabilities.current_extent
}
// many of these functions are ripped from https://github.com/bwasty/vulkan-tutorial-rs
// only works on linux
unsafe fn create_surface<E: EntryV1_0, I: InstanceV1_0>(
entry: &E,
instance: &I,
window: &winit::Window,
) -> Result<vk::SurfaceKHR, vk::Result> {
use winit::os::unix::WindowExt;
let x11_display = window.get_xlib_display().unwrap();
let x11_window = window.get_xlib_window().unwrap();
let x11_create_info = vk::XlibSurfaceCreateInfoKHR {
s_type: vk::StructureType::XLIB_SURFACE_CREATE_INFO_KHR,
p_next: ptr::null(),
flags: Default::default(),
window: x11_window as vk::Window,
dpy: x11_display as *mut vk::Display,
};
let xlib_surface_loader = XlibSurface::new(entry, instance);
| flags: vk::DeviceQueueCreateFlags::empty(), | random_line_split |
vt-3.rs | CString::new("VK_LAYER_LUNARG_standard_validation").unwrap(),
CString::new("VK_LAYER_KHRONOS_validation").unwrap(),
];
let layers_names_raw: Vec<*const i8> = layer_names
.iter()
.map(|raw_name| raw_name.as_ptr())
.collect();
let extension_names_raw = extension_names();
let debug_utils_create_info = vk::DebugUtilsMessengerCreateInfoEXT {
s_type: vk::StructureType::DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
p_next: ptr::null(),
flags: vk::DebugUtilsMessengerCreateFlagsEXT::empty(),
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT::WARNING
| vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE
// | vk::DebugUtilsMessageSeverityFlagsEXT::INFO
| vk::DebugUtilsMessageSeverityFlagsEXT::ERROR,
message_type: vk::DebugUtilsMessageTypeFlagsEXT::GENERAL
| vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE
| vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION,
pfn_user_callback: Some(vulkan_debug_utils_callback),
p_user_data: ptr::null_mut(),
};
let create_info = vk::InstanceCreateInfo {
s_type: vk::StructureType::INSTANCE_CREATE_INFO,
// setting this to a null pointer also works, but leave it like this to
// be safe i guess?
p_next: &debug_utils_create_info as *const vk::DebugUtilsMessengerCreateInfoEXT
as *const c_void,
flags: vk::InstanceCreateFlags::empty(),
p_application_info: &app_info,
enabled_layer_count: layer_names.len() as u32,
pp_enabled_layer_names: layers_names_raw.as_ptr(),
enabled_extension_count: extension_names_raw.len() as u32,
pp_enabled_extension_names: extension_names_raw.as_ptr(),
};
let entry = Entry::new().unwrap();
let instance = unsafe {
entry
.create_instance(&create_info, None)
.expect("Couldn't create instance")
};
let debug_utils_loader = ash::extensions::ext::DebugUtils::new(&entry, &instance);
let debug_utils_messenger = unsafe {
debug_utils_loader
.create_debug_utils_messenger(&debug_utils_create_info, None)
.expect("Debug Utils Callback")
};
// create surface
let surface =
unsafe { create_surface(&entry, &instance, &window) }.expect("couldn't create surface");
let surface_loader = ash::extensions::khr::Surface::new(&entry, &instance);
// get physical device
let physical_device = {
let phys_devs = unsafe { instance.enumerate_physical_devices() }
.expect("Couldn't enumerate physical devices");
*phys_devs
.iter()
.find(|phys_dev| is_phys_dev_suitable(&instance, phys_dev))
.expect("No suitable physical device found!")
};
// get queue family index
let queue_family_index: u32 = {
let queues =
unsafe { instance.get_physical_device_queue_family_properties(physical_device) };
queues
.iter()
.enumerate()
.find(|(_idx, queue)| queue.queue_flags.contains(vk::QueueFlags::GRAPHICS))
.expect("Couldn't find a graphics queue")
.0
.try_into()
.unwrap()
};
if!unsafe {
surface_loader.get_physical_device_surface_support(
physical_device,
queue_family_index,
surface,
)
} {
panic!("Queue does not have surface support! It's possible that a separate queue with surface support exists, but the current implementation is not capable of finding one.");
}
// get logical device
let device_queue_create_info = vk::DeviceQueueCreateInfo {
s_type: vk::StructureType::DEVICE_QUEUE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::DeviceQueueCreateFlags::empty(),
queue_family_index,
queue_count: 1,
p_queue_priorities: [1.0].as_ptr(),
};
let device_extensions_raw = get_device_extensions_raw();
let device_create_info = vk::DeviceCreateInfo {
s_type: vk::StructureType::DEVICE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::DeviceCreateFlags::empty(),
queue_create_info_count: 1,
p_queue_create_infos: [device_queue_create_info].as_ptr(),
// not used by Vulkan anymore
enabled_layer_count: 0,
pp_enabled_layer_names: ptr::null(),
// these are
enabled_extension_count: device_extensions_raw.len() as u32,
pp_enabled_extension_names: device_extensions_raw.as_ptr(),
p_enabled_features: &vk::PhysicalDeviceFeatures::builder().build(),
};
let device = unsafe {
instance
.create_device(physical_device, &device_create_info, None)
.expect("Couldn't create device")
};
// get queue (0 = take first queue)
let queue = unsafe { device.get_device_queue(queue_family_index, 0) };
// check device swapchain capabilties (not just that it has the extension,
// also formats and stuff like that)
// also returns what dimensions the swapchain should initially be created at
let starting_dims = check_device_swapchain_caps(&surface_loader, physical_device, surface);
// create swapchain
let sc_format = vk::SurfaceFormatKHR {
format: SWAPCHAIN_FORMAT,
color_space: vk::ColorSpaceKHR::default(),
};
let sc_present_mode = vk::PresentModeKHR::IMMEDIATE;
let swapchain_create_info = vk::SwapchainCreateInfoKHR {
s_type: vk::StructureType::SWAPCHAIN_CREATE_INFO_KHR,
p_next: ptr::null(),
flags: vk::SwapchainCreateFlagsKHR::empty(),
surface: surface,
min_image_count: 3,
image_format: SWAPCHAIN_FORMAT,
image_color_space: vk::ColorSpaceKHR::SRGB_NONLINEAR,
image_extent: starting_dims,
image_array_layers: 1,
image_usage: vk::ImageUsageFlags::COLOR_ATTACHMENT,
image_sharing_mode: vk::SharingMode::EXCLUSIVE,
queue_family_index_count: 0,
p_queue_family_indices: ptr::null(),
pre_transform: vk::SurfaceTransformFlagsKHR::IDENTITY,
composite_alpha: vk::CompositeAlphaFlagsKHR::OPAQUE,
present_mode: vk::PresentModeKHR::IMMEDIATE,
clipped: vk::TRUE,
old_swapchain: vk::SwapchainKHR::null(),
};
let swapchain_creator = Swapchain::new(&instance, &device);
let swapchain = unsafe { swapchain_creator.create_swapchain(&swapchain_create_info, None) }
.expect("Couldn't create swapchain");
let images = unsafe { swapchain_creator.get_swapchain_images(swapchain) }
.expect("Couldn't get swapchain images");
let image_views: Vec<_> = images
.iter()
.map(|image| {
let iv_info = vk::ImageViewCreateInfo {
s_type: vk::StructureType::IMAGE_VIEW_CREATE_INFO,
p_next: ptr::null(),
flags: vk::ImageViewCreateFlags::empty(),
image: *image,
view_type: vk::ImageViewType::TYPE_2D,
format: SWAPCHAIN_FORMAT,
components: vk::ComponentMapping {
r: vk::ComponentSwizzle::IDENTITY,
g: vk::ComponentSwizzle::IDENTITY,
b: vk::ComponentSwizzle::IDENTITY,
a: vk::ComponentSwizzle::IDENTITY,
},
subresource_range: vk::ImageSubresourceRange {
aspect_mask: vk::ImageAspectFlags::COLOR,
base_mip_level: 0,
level_count: 1,
base_array_layer: 0,
layer_count: 1,
},
};
})
.collect();
// shaders
let frag_code = read_shader_code(&relative_path("shaders/vt-3/triangle.frag.spv"));
let vert_code = read_shader_code(&relative_path("shaders/vt-3/triangle.vert.spv"));
let frag_module = create_shader_module(&device, frag_code);
let vert_module = create_shader_module(&device, vert_code);
let entry_point = CString::new("main").unwrap();
let vert_stage_info = vk::PipelineShaderStageCreateInfo {
s_type: vk::StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineShaderStageCreateFlags::empty(),
stage: vk::ShaderStageFlags::VERTEX,
module: vert_module,
p_name: entry_point.as_ptr(),
p_specialization_info: &vk::SpecializationInfo::default(),
};
let frag_stage_info = vk::PipelineShaderStageCreateInfo {
s_type: vk::StructureType::PIPELINE_SHADER_STAGE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineShaderStageCreateFlags::empty(),
stage: vk::ShaderStageFlags::FRAGMENT,
module: frag_module,
p_name: entry_point.as_ptr(),
p_specialization_info: &vk::SpecializationInfo::default(),
};
let shader_stages = [vert_stage_info, frag_stage_info];
// fixed-function pipeline settings
// a.k.a vertex format
// we don't really have a format since they are hard-coded into the vertex
// shader for now
let pipeline_vertex_input_info = vk::PipelineVertexInputStateCreateInfo::default();
let pipeline_input_assembly_info = vk::PipelineInputAssemblyStateCreateInfo {
s_type: vk::StructureType::PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineInputAssemblyStateCreateFlags::empty(),
topology: vk::PrimitiveTopology::TRIANGLE_LIST,
primitive_restart_enable: vk::FALSE,
};
let viewports = [vk::Viewport {
x: 0.0,
y: 0.0,
width: starting_dims.width as f32,
height: starting_dims.height as f32,
min_depth: 0.0,
max_depth: 1.0,
}];
let scissors = [vk::Rect2D {
offset: vk::Offset2D { x: 0, y: 0 },
extent: starting_dims,
}];
let viewport_state = vk::PipelineViewportStateCreateInfo {
s_type: vk::StructureType::PIPELINE_VIEWPORT_STATE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineViewportStateCreateFlags::empty(),
viewport_count: viewports.len() as u32,
p_viewports: viewports.as_ptr(),
scissor_count: scissors.len() as u32,
p_scissors: scissors.as_ptr(),
};
let pipeline_rasterization_info = vk::PipelineRasterizationStateCreateInfo {
s_type: vk::StructureType::PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineRasterizationStateCreateFlags::empty(),
depth_clamp_enable: vk::FALSE,
rasterizer_discard_enable: vk::FALSE,
polygon_mode: vk::PolygonMode::FILL,
cull_mode: vk::CullModeFlags::BACK,
front_face: vk::FrontFace::CLOCKWISE,
depth_bias_enable: vk::FALSE,
depth_bias_constant_factor: 0.0,
depth_bias_clamp: 0.0,
depth_bias_slope_factor: 0.0,
line_width: 1.0,
};
let pipeline_multisample_info = vk::PipelineMultisampleStateCreateInfo {
s_type: vk::StructureType::PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineMultisampleStateCreateFlags::empty(),
rasterization_samples: vk::SampleCountFlags::TYPE_1,
sample_shading_enable: vk::FALSE,
min_sample_shading: 1.0,
p_sample_mask: ptr::null(),
alpha_to_coverage_enable: vk::FALSE,
alpha_to_one_enable: vk::FALSE,
};
// color blending info per framebuffer
let pipeline_color_blend_attachment_infos = [vk::PipelineColorBlendAttachmentState {
blend_enable: vk::FALSE,
// not used because we disabled blending
src_color_blend_factor: vk::BlendFactor::ONE,
dst_color_blend_factor: vk::BlendFactor::ZERO,
color_blend_op: vk::BlendOp::ADD,
src_alpha_blend_factor: vk::BlendFactor::ONE,
dst_alpha_blend_factor: vk::BlendFactor::ZERO,
alpha_blend_op: vk::BlendOp::ADD,
// is used
color_write_mask: vk::ColorComponentFlags::R
& vk::ColorComponentFlags::G
& vk::ColorComponentFlags::G
& vk::ColorComponentFlags::B,
}];
// color blending settings for the whole pipleine
let pipeline_color_blend_info = vk::PipelineColorBlendStateCreateInfo {
s_type: vk::StructureType::PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineColorBlendStateCreateFlags::empty(),
logic_op_enable: vk::FALSE,
logic_op: vk::LogicOp::COPY, // optional
attachment_count: pipeline_color_blend_attachment_infos.len() as u32,
p_attachments: pipeline_color_blend_attachment_infos.as_ptr(),
blend_constants: [0.0, 0.0, 0.0, 0.0], // optional
};
// we don't use any shader uniforms so we leave it empty
let pipeline_layout_info = vk::PipelineLayoutCreateInfo {
s_type: vk::StructureType::PIPELINE_LAYOUT_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineLayoutCreateFlags::empty(),
set_layout_count: 0,
p_set_layouts: ptr::null(),
push_constant_range_count: 0,
p_push_constant_ranges: ptr::null(),
};
let pipeline_layout = unsafe {
device
.create_pipeline_layout(&pipeline_layout_info, None)
.expect("Couldn't create pipeline layout!")
};
// render pass
let render_pass = create_render_pass(&device);
// pipeline
let pipeline_infos = [vk::GraphicsPipelineCreateInfo {
s_type: vk::StructureType::GRAPHICS_PIPELINE_CREATE_INFO,
p_next: ptr::null(),
flags: vk::PipelineCreateFlags::empty(),
stage_count: shader_stages.len() as u32,
p_stages: shader_stages.as_ptr(),
p_vertex_input_state: &pipeline_vertex_input_info,
p_input_assembly_state: &pipeline_input_assembly_info,
p_tessellation_state: ptr::null(),
p_viewport_state: &viewport_state,
p_rasterization_state: &pipeline_rasterization_info,
p_multisample_state: &pipeline_multisample_info,
p_depth_stencil_state: ptr::null(),
p_color_blend_state: &pipeline_color_blend_info,
p_dynamic_state: ptr::null(),
layout: pipeline_layout,
render_pass,
subpass: 0,
base_pipeline_handle: vk::Pipeline::null(),
base_pipeline_index: 0,
}];
let pipeline = unsafe {
device.create_graphics_pipelines(vk::PipelineCache::null(), &pipeline_infos, None)
}
.expect("Couldn't create graphics pipeline")[0];
dbg![pipeline];
// shader modules only need to live long enough to create the pipeline
unsafe {
device.destroy_shader_module(frag_module, None);
device.destroy_shader_module(vert_module, None);
}
loop {
let mut exit = false;
events_loop.poll_events(|ev| match ev {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => exit = true,
_ => {}
});
if exit {
break;
}
}
// destroy objects
unsafe {
device.destroy_pipeline(pipeline, None);
device.destroy_pipeline_layout(pipeline_layout, None);
swapchain_creator.destroy_swapchain(swapchain, None);
device.destroy_render_pass(render_pass, None);
device.destroy_device(None);
surface_loader.destroy_surface(surface, None);
debug_utils_loader.destroy_debug_utils_messenger(debug_utils_messenger, None);
instance.destroy_instance(None);
}
}
fn create_shader_module<D: DeviceV1_0>(device: &D, code: Vec<u8>) -> vk::ShaderModule {
use ash::util::read_spv;
use std::io::Cursor;
let readable_code = read_spv(&mut Cursor::new(&code)).expect("Couldn't read SPV");
let shader_module_create_info = vk::ShaderModuleCreateInfo::builder().code(&readable_code);
unsafe {
device
.create_shader_module(&shader_module_create_info, None)
.expect("Couldn't create shader module")
}
}
fn extension_names() -> Vec<*const i8> {
// these are instance extensions
vec![
Surface::name().as_ptr(),
XlibSurface::name().as_ptr(),
DebugUtils::name().as_ptr(),
]
}
unsafe extern "system" fn | (
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT,
message_type: vk::DebugUtilsMessageTypeFlagsEXT,
p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT,
_p_user_data: *mut c_void,
) -> vk::Bool32 {
let severity = match message_severity {
vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => "[Verbose]",
vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => "[Warning]",
vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => "[Error]",
vk::DebugUtilsMessageSeverityFlagsEXT::INFO => "[Info]",
_ => "[Unknown]",
};
let types = match message_type {
vk::DebugUtilsMessageTypeFlagsEXT::GENERAL => "[General]",
vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE => "[Performance]",
vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION => "[Validation]",
_ => "[Unknown]",
};
let message = CStr::from_ptr((*p_callback_data).p_message);
eprintln!("[Debug]{}{}{:?}", severity, types, message);
vk::FALSE
}
fn is_phys_dev_suitable(instance: &ash::Instance, phys_dev: &vk::PhysicalDevice) -> bool {
// gets a list of extensions supported by this device as vulkan strings,
// which don't implement PartialEq
let extension_properties = unsafe { instance.enumerate_device_extension_properties(*phys_dev) }
.expect("Couldn't enumerate device extension properties!");
// Now convert them into rust strings
let available_extension_names: Vec<String> = extension_properties
.iter()
.map(|ext| vk_to_string(&ext.extension_name))
.collect();
// make sure all required device extensions are supported by this device
get_device_extensions().iter().for_each(|name| {
available_extension_names
.iter()
.find(|ext| ext == name)
.expect(&format!("Couldn't find extension {}", name));
});
true
}
fn check_device_swapchain_caps(
surface_loader: &Surface,
physical_device: vk::PhysicalDevice,
surface: vk::SurfaceKHR,
) -> vk::Extent2D {
// returns the current dimensions of the swapchain
let capabilities = unsafe {
surface_loader.get_physical_device_surface_capabilities(physical_device, surface)
}
.expect("Couldn't get physical device surface capabilities");
let formats =
unsafe { surface_loader.get_physical_device_surface_formats(physical_device, surface) }
.expect("Couldn't get physical device surface formats");
let present_modes = unsafe {
surface_loader.get_physical_device_surface_present_modes(physical_device, surface)
}
.expect("Couldn't get physical device surface present modes");
// we will request 3 swapchain images to avoid having to wait while one is
// being cleared or something, idk exactly
assert!(capabilities.min_image_count <= 3 && capabilities.max_image_count >= 3);
formats
.iter()
.find(|fmt| fmt.format == vk::Format::B8G8R8A8_UNORM)
.expect("Swapchain doesn't support B8G8R8A8_UNORM!");
assert!(present_modes.contains(&vk::PresentModeKHR::IMMEDIATE));
capabilities.current_extent
}
// many of these functions are ripped from https://github.com/bwasty/vulkan-tutorial-rs
// only works on linux
unsafe fn create_surface<E: EntryV1_0, I: InstanceV1_0>(
entry: &E,
instance: &I,
window: &winit::Window,
) -> Result<vk::SurfaceKHR, vk::Result> {
use winit::os::unix::WindowExt;
let x11_display = window.get_xlib_display().unwrap();
let x11_window = window.get_xlib_window().unwrap();
let x11_create_info = vk::XlibSurfaceCreateInfoKHR {
s_type: vk::StructureType::XLIB_SURFACE_CREATE_INFO_KHR,
p_next: ptr::null(),
flags: Default::default(),
window: x11_window as vk::Window,
dpy: x11_display as *mut vk::Display,
};
let xlib_surface_loader = XlibSurface::new(entry, instance);
| vulkan_debug_utils_callback | identifier_name |
mod.rs | // Copyright (c) The XPeer Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! Protocol for fire-and-forget style message delivery to a peer
//!
//! DirectSend protocol takes advantage of [muxers] and [substream negotiation] to build a simple
//! best effort message delivery protocol. Concretely,
//!
//! 1. Every message runs in its own ephemeral substream. The substream is directional in the way
//! that only the dialer sends a message to the listener, but no messages or acknowledgements
//! sending back on the other direction. So the message delivery is best effort and not
//! guaranteed. Because the substreams are independent, there is no guarantee on the ordering
//! of the message delivery either.
//! 2. An DirectSend call negotiates which protocol to speak using [`protocol-select`]. This
//! allows simple versioning of message delivery and negotiation of which message types are
//! supported. In the future, we can potentially support multiple backwards-incompatible
//! versions of any messages.
//! 3. The actual structure of the wire messages is left for higher layers to specify. The
//! DirectSend protocol is only concerned with shipping around opaque blobs. Current xpeer
//! DirectSend clients (consensus, mempool) mostly send protobuf enums around over a single
//! DirectSend protocol, e.g., `/xpeer/consensus/direct_send/0.1.0`.
//!
//! ## Wire Protocol (dialer):
//!
//! To send a message to a remote peer, the dialer
//!
//! 1. Requests a new outbound substream from the muxer.
//! 2. Negotiates the substream using [`protocol-select`] to the protocol they
//! wish to speak, e.g., `/xpeer/mempool/direct_send/0.1.0`.
//! 3. Sends the serialized message on the newly negotiated substream.
//! 4. Drops the substream.
//!
//! ## Wire Protocol (listener):
//!
//! To receive a message from remote peers, the listener
//!
//! 1. Polls for new inbound substreams on the muxer.
//! 2. Negotiates inbound substreams using [`protocol-select`]. The negotiation
//! must only succeed if the requested protocol is actually supported.
//! 3. Awaits the serialized message on the newly negotiated substream.
//! 4. Drops the substream.
//!
//! Note: negotiated substreams are currently framed with the
//! [muiltiformats unsigned varint length-prefix](https://github.com/multiformats/unsigned-varint)
//!
//! [muxers]:../../../netcore/multiplexing/index.html
//! [substream negotiation]:../../../netcore/negotiate/index.html
//! [`protocol-select`]:../../../netcore/negotiate/index.html
use crate::{
counters,
error::NetworkError,
peer_manager::{PeerManagerNotification, PeerManagerRequestSender},
ProtocolId,
};
use bytes::Bytes;
use channel;
use futures::{
compat::Sink01CompatExt,
future::{FutureExt, TryFutureExt},
io::{AsyncRead, AsyncReadExt, AsyncWrite},
sink::SinkExt,
stream::StreamExt,
};
use logger::prelude::*;
use std::{
collections::{hash_map::Entry, HashMap},
fmt::Debug,
};
use tokio::{codec::Framed, runtime::TaskExecutor};
use types::PeerId;
use unsigned_varint::codec::UviBytes;
#[cfg(test)]
mod test;
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum DirectSendRequest {
/// A request to send out a message.
SendMessage(PeerId, Message),
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum DirectSendNotification {
/// A notification that a DirectSend message is received.
RecvMessage(PeerId, Message),
}
#[derive(Clone, Eq, PartialEq)]
pub struct Message {
/// Message type.
pub protocol: ProtocolId,
/// Serialized message data.
pub mdata: Bytes,
}
impl Debug for Message {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mdata_str = if self.mdata.len() <= 10 {
format!("{:?}", self.mdata)
} else {
format!("{:?}...", self.mdata.slice_to(10))
};
write!(
f,
"Message {{ protocol: {:?}, mdata: {} }}",
self.protocol, mdata_str
)
}
}
/// The DirectSend actor.
pub struct DirectSend<TSubstream> {
/// A handle to a tokio executor.
executor: TaskExecutor,
/// Channel to receive requests from other upstream actors.
ds_requests_rx: channel::Receiver<DirectSendRequest>,
/// Channels to send notifictions to upstream actors.
ds_notifs_tx: channel::Sender<DirectSendNotification>,
/// Channel to receive notifications from PeerManager.
peer_mgr_notifs_rx: channel::Receiver<PeerManagerNotification<TSubstream>>,
/// Channel to send requests to PeerManager.
peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
/// Outbound message queues for each (PeerId, ProtocolId) pair.
message_queues: HashMap<(PeerId, ProtocolId), channel::Sender<Bytes>>,
}
impl<TSubstream> DirectSend<TSubstream>
where
TSubstream: AsyncRead + AsyncWrite + Send + Unpin + Debug +'static,
{
pub fn | (
executor: TaskExecutor,
ds_requests_rx: channel::Receiver<DirectSendRequest>,
ds_notifs_tx: channel::Sender<DirectSendNotification>,
peer_mgr_notifs_rx: channel::Receiver<PeerManagerNotification<TSubstream>>,
peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
) -> Self {
Self {
executor,
ds_requests_rx,
ds_notifs_tx,
peer_mgr_notifs_rx,
peer_mgr_reqs_tx,
message_queues: HashMap::new(),
}
}
pub async fn start(mut self) {
loop {
futures::select! {
req = self.ds_requests_rx.select_next_some() => {
self.handle_direct_send_request(req).await;
}
notif = self.peer_mgr_notifs_rx.select_next_some() => {
self.handle_peer_mgr_notification(notif);
}
complete => {
crit!("Direct send actor terminated");
break;
}
}
}
}
// Handle PeerManagerNotification, which can only be NewInboundSubstream for now.
fn handle_peer_mgr_notification(&self, notif: PeerManagerNotification<TSubstream>) {
trace!("PeerManagerNotification::{:?}", notif);
match notif {
PeerManagerNotification::NewInboundSubstream(peer_id, substream) => {
self.executor.spawn(
Self::handle_inbound_substream(
peer_id,
substream.protocol,
substream.substream,
self.ds_notifs_tx.clone(),
)
.boxed()
.unit_error()
.compat(),
);
}
_ => unreachable!("Unexpected PeerManagerNotification"),
}
}
// Handle a new inbound substream. Keep forwarding the messages to the NetworkProvider.
async fn handle_inbound_substream(
peer_id: PeerId,
protocol: ProtocolId,
substream: TSubstream,
mut ds_notifs_tx: channel::Sender<DirectSendNotification>,
) {
let mut substream =
Framed::new(substream.compat(), UviBytes::<Bytes>::default()).sink_compat();
while let Some(item) = substream.next().await {
match item {
Ok(data) => {
let notif = DirectSendNotification::RecvMessage(
peer_id,
Message {
protocol: protocol.clone(),
mdata: data.freeze(),
},
);
ds_notifs_tx
.send(notif)
.await
.expect("DirectSendNotification send error");
}
Err(e) => {
warn!(
"DirectSend substream with peer {} receives error {}",
peer_id.short_str(),
e
);
break;
}
}
}
warn!(
"DirectSend inbound substream with peer {} closed",
peer_id.short_str()
);
}
// Create a new message queue and spawn a task to forward the messages from the queue to the
// corresponding substream.
async fn start_message_queue_handler(
executor: TaskExecutor,
mut peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
peer_id: PeerId,
protocol: ProtocolId,
) -> Result<channel::Sender<Bytes>, NetworkError> {
// Create a channel for the (PeerId, ProtocolId) pair.
let (msg_tx, msg_rx) = channel::new::<Bytes>(
1024,
&counters::OP_COUNTERS.peer_gauge(
&counters::PENDING_DIRECT_SEND_OUTBOUND_MESSAGES,
&peer_id.short_str(),
),
);
// Open a new substream for the (PeerId, ProtocolId) pair
let raw_substream = peer_mgr_reqs_tx.open_substream(peer_id, protocol).await?;
let substream =
Framed::new(raw_substream.compat(), UviBytes::<Bytes>::default()).sink_compat();
// Spawn a task to forward the messages from the queue to the substream.
let f_substream = async move {
if let Err(e) = msg_rx.map(Ok).forward(substream).await {
warn!(
"Forward messages to peer {} error {:?}",
peer_id.short_str(),
e
);
}
// The messages in queue will be dropped
counters::DIRECT_SEND_MESSAGES_DROPPED.inc_by(
counters::OP_COUNTERS
.peer_gauge(
&counters::PENDING_DIRECT_SEND_OUTBOUND_MESSAGES,
&peer_id.short_str(),
)
.get(),
);
};
executor.spawn(f_substream.boxed().unit_error().compat());
Ok(msg_tx)
}
// Try to send a message to the message queue.
async fn try_send_msg(
&mut self,
peer_id: PeerId,
msg: Message,
peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
) -> Result<(), NetworkError> {
let protocol = msg.protocol.clone();
let substream_queue_tx = match self.message_queues.entry((peer_id, protocol.clone())) {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => {
let msg_tx = Self::start_message_queue_handler(
self.executor.clone(),
peer_mgr_reqs_tx,
peer_id,
protocol.clone(),
)
.await?;
entry.insert(msg_tx)
}
};
substream_queue_tx.send(msg.mdata).await.map_err(|e| {
self.message_queues.remove(&(peer_id, protocol));
e.into()
})
}
// Handle DirectSendRequest, which can only be SendMessage request for now.
async fn handle_direct_send_request(&mut self, req: DirectSendRequest) {
trace!("DirectSendRequest::{:?}", req);
match req {
DirectSendRequest::SendMessage(peer_id, msg) => {
if let Err(e) = self
.try_send_msg(peer_id, msg.clone(), self.peer_mgr_reqs_tx.clone())
.await
{
counters::DIRECT_SEND_MESSAGES_DROPPED.inc();
warn!("DirectSend to peer {} failed: {}", peer_id.short_str(), e);
}
}
}
}
}
| new | identifier_name |
mod.rs | // Copyright (c) The XPeer Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! Protocol for fire-and-forget style message delivery to a peer
//!
//! DirectSend protocol takes advantage of [muxers] and [substream negotiation] to build a simple
//! best effort message delivery protocol. Concretely,
//!
//! 1. Every message runs in its own ephemeral substream. The substream is directional in the way | //! that only the dialer sends a message to the listener, but no messages or acknowledgements
//! sending back on the other direction. So the message delivery is best effort and not
//! guaranteed. Because the substreams are independent, there is no guarantee on the ordering
//! of the message delivery either.
//! 2. An DirectSend call negotiates which protocol to speak using [`protocol-select`]. This
//! allows simple versioning of message delivery and negotiation of which message types are
//! supported. In the future, we can potentially support multiple backwards-incompatible
//! versions of any messages.
//! 3. The actual structure of the wire messages is left for higher layers to specify. The
//! DirectSend protocol is only concerned with shipping around opaque blobs. Current xpeer
//! DirectSend clients (consensus, mempool) mostly send protobuf enums around over a single
//! DirectSend protocol, e.g., `/xpeer/consensus/direct_send/0.1.0`.
//!
//! ## Wire Protocol (dialer):
//!
//! To send a message to a remote peer, the dialer
//!
//! 1. Requests a new outbound substream from the muxer.
//! 2. Negotiates the substream using [`protocol-select`] to the protocol they
//! wish to speak, e.g., `/xpeer/mempool/direct_send/0.1.0`.
//! 3. Sends the serialized message on the newly negotiated substream.
//! 4. Drops the substream.
//!
//! ## Wire Protocol (listener):
//!
//! To receive a message from remote peers, the listener
//!
//! 1. Polls for new inbound substreams on the muxer.
//! 2. Negotiates inbound substreams using [`protocol-select`]. The negotiation
//! must only succeed if the requested protocol is actually supported.
//! 3. Awaits the serialized message on the newly negotiated substream.
//! 4. Drops the substream.
//!
//! Note: negotiated substreams are currently framed with the
//! [muiltiformats unsigned varint length-prefix](https://github.com/multiformats/unsigned-varint)
//!
//! [muxers]:../../../netcore/multiplexing/index.html
//! [substream negotiation]:../../../netcore/negotiate/index.html
//! [`protocol-select`]:../../../netcore/negotiate/index.html
use crate::{
counters,
error::NetworkError,
peer_manager::{PeerManagerNotification, PeerManagerRequestSender},
ProtocolId,
};
use bytes::Bytes;
use channel;
use futures::{
compat::Sink01CompatExt,
future::{FutureExt, TryFutureExt},
io::{AsyncRead, AsyncReadExt, AsyncWrite},
sink::SinkExt,
stream::StreamExt,
};
use logger::prelude::*;
use std::{
collections::{hash_map::Entry, HashMap},
fmt::Debug,
};
use tokio::{codec::Framed, runtime::TaskExecutor};
use types::PeerId;
use unsigned_varint::codec::UviBytes;
#[cfg(test)]
mod test;
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum DirectSendRequest {
/// A request to send out a message.
SendMessage(PeerId, Message),
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum DirectSendNotification {
/// A notification that a DirectSend message is received.
RecvMessage(PeerId, Message),
}
#[derive(Clone, Eq, PartialEq)]
pub struct Message {
/// Message type.
pub protocol: ProtocolId,
/// Serialized message data.
pub mdata: Bytes,
}
impl Debug for Message {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mdata_str = if self.mdata.len() <= 10 {
format!("{:?}", self.mdata)
} else {
format!("{:?}...", self.mdata.slice_to(10))
};
write!(
f,
"Message {{ protocol: {:?}, mdata: {} }}",
self.protocol, mdata_str
)
}
}
/// The DirectSend actor.
pub struct DirectSend<TSubstream> {
/// A handle to a tokio executor.
executor: TaskExecutor,
/// Channel to receive requests from other upstream actors.
ds_requests_rx: channel::Receiver<DirectSendRequest>,
/// Channels to send notifictions to upstream actors.
ds_notifs_tx: channel::Sender<DirectSendNotification>,
/// Channel to receive notifications from PeerManager.
peer_mgr_notifs_rx: channel::Receiver<PeerManagerNotification<TSubstream>>,
/// Channel to send requests to PeerManager.
peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
/// Outbound message queues for each (PeerId, ProtocolId) pair.
message_queues: HashMap<(PeerId, ProtocolId), channel::Sender<Bytes>>,
}
impl<TSubstream> DirectSend<TSubstream>
where
TSubstream: AsyncRead + AsyncWrite + Send + Unpin + Debug +'static,
{
pub fn new(
executor: TaskExecutor,
ds_requests_rx: channel::Receiver<DirectSendRequest>,
ds_notifs_tx: channel::Sender<DirectSendNotification>,
peer_mgr_notifs_rx: channel::Receiver<PeerManagerNotification<TSubstream>>,
peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
) -> Self {
Self {
executor,
ds_requests_rx,
ds_notifs_tx,
peer_mgr_notifs_rx,
peer_mgr_reqs_tx,
message_queues: HashMap::new(),
}
}
pub async fn start(mut self) {
loop {
futures::select! {
req = self.ds_requests_rx.select_next_some() => {
self.handle_direct_send_request(req).await;
}
notif = self.peer_mgr_notifs_rx.select_next_some() => {
self.handle_peer_mgr_notification(notif);
}
complete => {
crit!("Direct send actor terminated");
break;
}
}
}
}
// Handle PeerManagerNotification, which can only be NewInboundSubstream for now.
fn handle_peer_mgr_notification(&self, notif: PeerManagerNotification<TSubstream>) {
trace!("PeerManagerNotification::{:?}", notif);
match notif {
PeerManagerNotification::NewInboundSubstream(peer_id, substream) => {
self.executor.spawn(
Self::handle_inbound_substream(
peer_id,
substream.protocol,
substream.substream,
self.ds_notifs_tx.clone(),
)
.boxed()
.unit_error()
.compat(),
);
}
_ => unreachable!("Unexpected PeerManagerNotification"),
}
}
// Handle a new inbound substream. Keep forwarding the messages to the NetworkProvider.
async fn handle_inbound_substream(
peer_id: PeerId,
protocol: ProtocolId,
substream: TSubstream,
mut ds_notifs_tx: channel::Sender<DirectSendNotification>,
) {
let mut substream =
Framed::new(substream.compat(), UviBytes::<Bytes>::default()).sink_compat();
while let Some(item) = substream.next().await {
match item {
Ok(data) => {
let notif = DirectSendNotification::RecvMessage(
peer_id,
Message {
protocol: protocol.clone(),
mdata: data.freeze(),
},
);
ds_notifs_tx
.send(notif)
.await
.expect("DirectSendNotification send error");
}
Err(e) => {
warn!(
"DirectSend substream with peer {} receives error {}",
peer_id.short_str(),
e
);
break;
}
}
}
warn!(
"DirectSend inbound substream with peer {} closed",
peer_id.short_str()
);
}
// Create a new message queue and spawn a task to forward the messages from the queue to the
// corresponding substream.
async fn start_message_queue_handler(
executor: TaskExecutor,
mut peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
peer_id: PeerId,
protocol: ProtocolId,
) -> Result<channel::Sender<Bytes>, NetworkError> {
// Create a channel for the (PeerId, ProtocolId) pair.
let (msg_tx, msg_rx) = channel::new::<Bytes>(
1024,
&counters::OP_COUNTERS.peer_gauge(
&counters::PENDING_DIRECT_SEND_OUTBOUND_MESSAGES,
&peer_id.short_str(),
),
);
// Open a new substream for the (PeerId, ProtocolId) pair
let raw_substream = peer_mgr_reqs_tx.open_substream(peer_id, protocol).await?;
let substream =
Framed::new(raw_substream.compat(), UviBytes::<Bytes>::default()).sink_compat();
// Spawn a task to forward the messages from the queue to the substream.
let f_substream = async move {
if let Err(e) = msg_rx.map(Ok).forward(substream).await {
warn!(
"Forward messages to peer {} error {:?}",
peer_id.short_str(),
e
);
}
// The messages in queue will be dropped
counters::DIRECT_SEND_MESSAGES_DROPPED.inc_by(
counters::OP_COUNTERS
.peer_gauge(
&counters::PENDING_DIRECT_SEND_OUTBOUND_MESSAGES,
&peer_id.short_str(),
)
.get(),
);
};
executor.spawn(f_substream.boxed().unit_error().compat());
Ok(msg_tx)
}
// Try to send a message to the message queue.
async fn try_send_msg(
&mut self,
peer_id: PeerId,
msg: Message,
peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
) -> Result<(), NetworkError> {
let protocol = msg.protocol.clone();
let substream_queue_tx = match self.message_queues.entry((peer_id, protocol.clone())) {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => {
let msg_tx = Self::start_message_queue_handler(
self.executor.clone(),
peer_mgr_reqs_tx,
peer_id,
protocol.clone(),
)
.await?;
entry.insert(msg_tx)
}
};
substream_queue_tx.send(msg.mdata).await.map_err(|e| {
self.message_queues.remove(&(peer_id, protocol));
e.into()
})
}
// Handle DirectSendRequest, which can only be SendMessage request for now.
async fn handle_direct_send_request(&mut self, req: DirectSendRequest) {
trace!("DirectSendRequest::{:?}", req);
match req {
DirectSendRequest::SendMessage(peer_id, msg) => {
if let Err(e) = self
.try_send_msg(peer_id, msg.clone(), self.peer_mgr_reqs_tx.clone())
.await
{
counters::DIRECT_SEND_MESSAGES_DROPPED.inc();
warn!("DirectSend to peer {} failed: {}", peer_id.short_str(), e);
}
}
}
}
} | random_line_split |
|
mod.rs | // Copyright (c) The XPeer Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! Protocol for fire-and-forget style message delivery to a peer
//!
//! DirectSend protocol takes advantage of [muxers] and [substream negotiation] to build a simple
//! best effort message delivery protocol. Concretely,
//!
//! 1. Every message runs in its own ephemeral substream. The substream is directional in the way
//! that only the dialer sends a message to the listener, but no messages or acknowledgements
//! sending back on the other direction. So the message delivery is best effort and not
//! guaranteed. Because the substreams are independent, there is no guarantee on the ordering
//! of the message delivery either.
//! 2. An DirectSend call negotiates which protocol to speak using [`protocol-select`]. This
//! allows simple versioning of message delivery and negotiation of which message types are
//! supported. In the future, we can potentially support multiple backwards-incompatible
//! versions of any messages.
//! 3. The actual structure of the wire messages is left for higher layers to specify. The
//! DirectSend protocol is only concerned with shipping around opaque blobs. Current xpeer
//! DirectSend clients (consensus, mempool) mostly send protobuf enums around over a single
//! DirectSend protocol, e.g., `/xpeer/consensus/direct_send/0.1.0`.
//!
//! ## Wire Protocol (dialer):
//!
//! To send a message to a remote peer, the dialer
//!
//! 1. Requests a new outbound substream from the muxer.
//! 2. Negotiates the substream using [`protocol-select`] to the protocol they
//! wish to speak, e.g., `/xpeer/mempool/direct_send/0.1.0`.
//! 3. Sends the serialized message on the newly negotiated substream.
//! 4. Drops the substream.
//!
//! ## Wire Protocol (listener):
//!
//! To receive a message from remote peers, the listener
//!
//! 1. Polls for new inbound substreams on the muxer.
//! 2. Negotiates inbound substreams using [`protocol-select`]. The negotiation
//! must only succeed if the requested protocol is actually supported.
//! 3. Awaits the serialized message on the newly negotiated substream.
//! 4. Drops the substream.
//!
//! Note: negotiated substreams are currently framed with the
//! [muiltiformats unsigned varint length-prefix](https://github.com/multiformats/unsigned-varint)
//!
//! [muxers]:../../../netcore/multiplexing/index.html
//! [substream negotiation]:../../../netcore/negotiate/index.html
//! [`protocol-select`]:../../../netcore/negotiate/index.html
use crate::{
counters,
error::NetworkError,
peer_manager::{PeerManagerNotification, PeerManagerRequestSender},
ProtocolId,
};
use bytes::Bytes;
use channel;
use futures::{
compat::Sink01CompatExt,
future::{FutureExt, TryFutureExt},
io::{AsyncRead, AsyncReadExt, AsyncWrite},
sink::SinkExt,
stream::StreamExt,
};
use logger::prelude::*;
use std::{
collections::{hash_map::Entry, HashMap},
fmt::Debug,
};
use tokio::{codec::Framed, runtime::TaskExecutor};
use types::PeerId;
use unsigned_varint::codec::UviBytes;
#[cfg(test)]
mod test;
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum DirectSendRequest {
/// A request to send out a message.
SendMessage(PeerId, Message),
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum DirectSendNotification {
/// A notification that a DirectSend message is received.
RecvMessage(PeerId, Message),
}
#[derive(Clone, Eq, PartialEq)]
pub struct Message {
/// Message type.
pub protocol: ProtocolId,
/// Serialized message data.
pub mdata: Bytes,
}
impl Debug for Message {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mdata_str = if self.mdata.len() <= 10 {
format!("{:?}", self.mdata)
} else | ;
write!(
f,
"Message {{ protocol: {:?}, mdata: {} }}",
self.protocol, mdata_str
)
}
}
/// The DirectSend actor.
pub struct DirectSend<TSubstream> {
/// A handle to a tokio executor.
executor: TaskExecutor,
/// Channel to receive requests from other upstream actors.
ds_requests_rx: channel::Receiver<DirectSendRequest>,
/// Channels to send notifictions to upstream actors.
ds_notifs_tx: channel::Sender<DirectSendNotification>,
/// Channel to receive notifications from PeerManager.
peer_mgr_notifs_rx: channel::Receiver<PeerManagerNotification<TSubstream>>,
/// Channel to send requests to PeerManager.
peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
/// Outbound message queues for each (PeerId, ProtocolId) pair.
message_queues: HashMap<(PeerId, ProtocolId), channel::Sender<Bytes>>,
}
impl<TSubstream> DirectSend<TSubstream>
where
TSubstream: AsyncRead + AsyncWrite + Send + Unpin + Debug +'static,
{
pub fn new(
executor: TaskExecutor,
ds_requests_rx: channel::Receiver<DirectSendRequest>,
ds_notifs_tx: channel::Sender<DirectSendNotification>,
peer_mgr_notifs_rx: channel::Receiver<PeerManagerNotification<TSubstream>>,
peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
) -> Self {
Self {
executor,
ds_requests_rx,
ds_notifs_tx,
peer_mgr_notifs_rx,
peer_mgr_reqs_tx,
message_queues: HashMap::new(),
}
}
pub async fn start(mut self) {
loop {
futures::select! {
req = self.ds_requests_rx.select_next_some() => {
self.handle_direct_send_request(req).await;
}
notif = self.peer_mgr_notifs_rx.select_next_some() => {
self.handle_peer_mgr_notification(notif);
}
complete => {
crit!("Direct send actor terminated");
break;
}
}
}
}
// Handle PeerManagerNotification, which can only be NewInboundSubstream for now.
fn handle_peer_mgr_notification(&self, notif: PeerManagerNotification<TSubstream>) {
trace!("PeerManagerNotification::{:?}", notif);
match notif {
PeerManagerNotification::NewInboundSubstream(peer_id, substream) => {
self.executor.spawn(
Self::handle_inbound_substream(
peer_id,
substream.protocol,
substream.substream,
self.ds_notifs_tx.clone(),
)
.boxed()
.unit_error()
.compat(),
);
}
_ => unreachable!("Unexpected PeerManagerNotification"),
}
}
// Handle a new inbound substream. Keep forwarding the messages to the NetworkProvider.
async fn handle_inbound_substream(
peer_id: PeerId,
protocol: ProtocolId,
substream: TSubstream,
mut ds_notifs_tx: channel::Sender<DirectSendNotification>,
) {
let mut substream =
Framed::new(substream.compat(), UviBytes::<Bytes>::default()).sink_compat();
while let Some(item) = substream.next().await {
match item {
Ok(data) => {
let notif = DirectSendNotification::RecvMessage(
peer_id,
Message {
protocol: protocol.clone(),
mdata: data.freeze(),
},
);
ds_notifs_tx
.send(notif)
.await
.expect("DirectSendNotification send error");
}
Err(e) => {
warn!(
"DirectSend substream with peer {} receives error {}",
peer_id.short_str(),
e
);
break;
}
}
}
warn!(
"DirectSend inbound substream with peer {} closed",
peer_id.short_str()
);
}
// Create a new message queue and spawn a task to forward the messages from the queue to the
// corresponding substream.
async fn start_message_queue_handler(
executor: TaskExecutor,
mut peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
peer_id: PeerId,
protocol: ProtocolId,
) -> Result<channel::Sender<Bytes>, NetworkError> {
// Create a channel for the (PeerId, ProtocolId) pair.
let (msg_tx, msg_rx) = channel::new::<Bytes>(
1024,
&counters::OP_COUNTERS.peer_gauge(
&counters::PENDING_DIRECT_SEND_OUTBOUND_MESSAGES,
&peer_id.short_str(),
),
);
// Open a new substream for the (PeerId, ProtocolId) pair
let raw_substream = peer_mgr_reqs_tx.open_substream(peer_id, protocol).await?;
let substream =
Framed::new(raw_substream.compat(), UviBytes::<Bytes>::default()).sink_compat();
// Spawn a task to forward the messages from the queue to the substream.
let f_substream = async move {
if let Err(e) = msg_rx.map(Ok).forward(substream).await {
warn!(
"Forward messages to peer {} error {:?}",
peer_id.short_str(),
e
);
}
// The messages in queue will be dropped
counters::DIRECT_SEND_MESSAGES_DROPPED.inc_by(
counters::OP_COUNTERS
.peer_gauge(
&counters::PENDING_DIRECT_SEND_OUTBOUND_MESSAGES,
&peer_id.short_str(),
)
.get(),
);
};
executor.spawn(f_substream.boxed().unit_error().compat());
Ok(msg_tx)
}
// Try to send a message to the message queue.
async fn try_send_msg(
&mut self,
peer_id: PeerId,
msg: Message,
peer_mgr_reqs_tx: PeerManagerRequestSender<TSubstream>,
) -> Result<(), NetworkError> {
let protocol = msg.protocol.clone();
let substream_queue_tx = match self.message_queues.entry((peer_id, protocol.clone())) {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => {
let msg_tx = Self::start_message_queue_handler(
self.executor.clone(),
peer_mgr_reqs_tx,
peer_id,
protocol.clone(),
)
.await?;
entry.insert(msg_tx)
}
};
substream_queue_tx.send(msg.mdata).await.map_err(|e| {
self.message_queues.remove(&(peer_id, protocol));
e.into()
})
}
// Handle DirectSendRequest, which can only be SendMessage request for now.
async fn handle_direct_send_request(&mut self, req: DirectSendRequest) {
trace!("DirectSendRequest::{:?}", req);
match req {
DirectSendRequest::SendMessage(peer_id, msg) => {
if let Err(e) = self
.try_send_msg(peer_id, msg.clone(), self.peer_mgr_reqs_tx.clone())
.await
{
counters::DIRECT_SEND_MESSAGES_DROPPED.inc();
warn!("DirectSend to peer {} failed: {}", peer_id.short_str(), e);
}
}
}
}
}
| {
format!("{:?}...", self.mdata.slice_to(10))
} | conditional_block |
test_1.rs | const USAGE: &str = "
Usage: test_1 [--dt N] [--plots]
test_1 --help
A template for DEM spherical particles modelling.
This benchmark is taken from
author: XXXX
paper: XXXX
link: XXXX
Description:
-----------
Describe the test in a few lines.
Method:
--------
What specific DEM model is used to model the current simulation.
Input:
------------
========Any inputs==============
Analysis:
-----------
How is this benchmark validated? What plots are used? What conclusions are been
made?
New features of dem_rust:
-------------------------
Any new features implemented in this current example, which can be later used in
another examples?
Options:
--dt N Time step of the simulation [default: 1e-4]
--tf N Runtime of the simulation [default: 1.]
--plots Show the plots
-h, --help Show this message.
";
// -------------------------------------------------
// std imports
use std::f64::consts::PI;
use std::fs;
use std::fs::OpenOptions;
use std::io::Write;
// external crate imports
use docopt::Docopt;
use gnuplot::*;
use multiphysics::prelude::*;
// -------------------------
// local imports
// -------------------------
// dem granular
pub use dem_rust::basic_equations::*;
pub use dem_rust::dem_3d::*;
pub use dem_rust::wall::*;
pub use dem_rust::prelude::*;
// // rigid body imports
// pub use dem_rust::rb::rb_2d::Rigidbody2D;
// for reading data from file (comma separated)
use crate::read_xy_pairs;
// external crate imports
// use gnuplot::*;
// use simple_shapes::prelude::*;
// -------------------------------------------------
#[derive(Deserialize, Debug)]
pub struct | {
flag_tf: f64,
flag_dt: f64,
flag_plots: bool,
}
pub fn main(args: &[String]) {
// --------------------------------------
// GET THE COMMAND LINE VARIABLES
// --------------------------------------
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.argv(args).deserialize())
.unwrap_or_else(|e| e.exit());
// println!("{:?}", args);
// --------------------------------------
// GET THE COMMAND LINE VARIABLES ENDS
// --------------------------------------
// --------------------------------------
// CREATE PARTICLE ARRAYS
// --------------------------------------
let x = vec![0.];
let y = vec![50. * 1e-2];
let z = vec![0.; x.len()];
let radius = vec![10. * 1e-2; x.len()];
let mut sand = DEM3DParticleArray::from_xyz_rad(&x, &y, &z, &radius);
let rho = 2600.;
sand.rho = vec![rho; x.len()];
sand.m = vec![rho * PI * radius[0] * radius[0]; x.len()];
sand.m_inv = vec![1. / sand.m[0]; x.len()];
let inertia = 4. * (2. * radius[0]) * (2. * radius[0]) / 10.;
sand.mi = vec![inertia; x.len()];
sand.mi_inv = vec![1. / sand.mi[0]; x.len()];
let stiffness = 5. * 1e6;
sand.kn = vec![stiffness; x.len()];
sand.kt = vec![stiffness; x.len()];
// set some dummy Young's modulus for linear DEM case; change these values if
// you are updating this example for nonlinear DEM
let yng = 1.;
let nu = 0.2;
let shr = yng / (2. * (1. + nu));
sand.young_mod = vec![yng; x.len()];
sand.poisson = vec![nu; x.len()];
sand.shear_mod = vec![shr; x.len()];
// this is nice step for debugging
sand.validate_particle_array();
// -------------------------
// create an infinite wall
// -------------------------
let x = vec![0.];
let y = vec![0.];
let z = vec![0.; x.len()];
let wall_points = vec![[Vector3::new(-1., 0., 0.), Vector3::new(1., 0., 0.)]];
let nx = vec![0.];
let ny = vec![1.];
let nz = vec![0.; x.len()];
let mut wall =
WallDEMParticleArray::from_xyz_wall_points_normals(&x, &y, &z, &wall_points, &nx, &ny, &nz);
wall.kn = vec![stiffness; x.len()];
wall.kt = vec![stiffness; x.len()];
// --------------------------------------
// CREATE PARTICLE ARRAYS ENDS
// --------------------------------------
// ---------------------------------------
// SETUP CORRESPONDING NBS NNPS
// ----------------------------------------
let max_coordinate = 50. * 1e-2;
let max_size = 2. * radius[0];
let mut nbs2d_sand =
NBS2D::from_maximum_and_no_of_particles(max_coordinate, max_size, sand.x.len());
// ---------------------------------------
// SETUP CORRESPONDING NBS NNPS ENDS
// ----------------------------------------
// ----------------------------------------
// SOLVER DATA
// ----------------------------------------
let tf = args.flag_tf;
let dt = args.flag_dt;
let total_steps = (tf / dt) as u64;
let mut t = 0.;
let mut step_no = 0;
let total_output_file = 1000;
let pfreq = if total_steps < total_output_file {
1
} else {
total_steps / total_output_file
};
// ----------------------------------------
// SOLVER DATA ENDS
// ----------------------------------------
// ----------------------------------------
// OUTPUT DIRECTORY
// ----------------------------------------
let project_root = env!("CARGO_MANIFEST_DIR");
let dir_name = project_root.to_owned() + "/test_1_output";
let _p = fs::create_dir(&dir_name);
// ----------------------------------------
// OUTPUT DIRECTORY ENDS
// ----------------------------------------
// ----------------------------------------
// SOME CONSTANTS
// ----------------------------------------
let stage1 = 1;
let stage2 = 2;
// coefficient of friction
let mu = 0.5;
let kn = 5. * 1e5;
let kt = 5. * 1e5;
let en = 0.9;
// ----------------------------------------
// SOME CONSTANTS ENDS
// ----------------------------------------
// create a progress bar
let pb = setup_progress_bar(total_steps);
// -------------------------------
// FOR PARAVIEW VISUALIZATION
// -------------------------------
// define variables for automatic visualization
// particle array files
let mut sand_files = "".to_string();
write_to_vtk!(sand, format!("{}/sand_{}.vtk", &dir_name, step_no));
sand_files.push_str(&format!("'{}/sand_{}.vtk', ", &dir_name, step_no));
let mut wall_files = "".to_string();
write_wall_to_vtk!(wall, format!("{}/wall_{}.vtk", &dir_name, step_no));
wall_files.push_str(&format!("'{}/wall_{}.vtk', ", &dir_name, step_no));
// neighbours files
let mut nnps_files = "".to_string();
write_nnps_2d_to_vtk!(nbs2d_sand, format!("{}/nnps.vtk", &dir_name));
nnps_files.push_str(&format!("'{}/nnps.vtk'", &dir_name));
// -------------------------------
// FOR PARAVIEW VISUALIZATION ENDS
// -------------------------------
// -------------------------------
// POST PROCESSING VARIABLES
// -------------------------------
// uncomment and initialize the variables
// -------------------------------
// POST PROCESSING VARIABLES ENDS
// -------------------------------
while t < tf {
// ----------------------
// RK2 INITIALIZE
// ----------------------
rk2_initialize_copy_contacts_3d!((sand));
rk2_initialize_dem!((sand));
// ----------------------
// STAGE 1 EQUATIONS
// ----------------------
nbs2d_sand.register_particles_to_nnps(&sand.x, &sand.y, &sand.z);
make_forces_torques_zero!((sand));
body_force!((sand), 0., -9.81, 0.);
// dem_3d_force_linear_pp!(sand, (sand), (nbs2d_sand), (kn), (kt), (en), (mu), stage1, dt);
dem_3d_force_linear_pw!(sand, (wall), (kn), (kt), (en), (mu), stage1, dt);
// ----------------------
// STAGE 1 STEPPER
// ----------------------
rk2_stage1_dem!((sand), dt);
// ----------------------
// STAGE 2 EQUATIONS
// ----------------------
nbs2d_sand.register_particles_to_nnps(&sand.x, &sand.y, &sand.z);
make_forces_torques_zero!((sand));
body_force!((sand), 0., -9.81, 0.);
// dem_3d_force_linear_pp!(sand, (sand), (nbs2d_sand), (kn), (kt), (en), (mu), stage2, dt);
dem_3d_force_linear_pw!(sand, (wall), (kn), (kt), (en), (mu), stage2, dt);
// ----------------------
// STAGE 2 STEPPER
// ----------------------
rk2_stage2_dem!((sand), dt);
update_contacts_pp!(sand, (sand));
// ----------------------
// OUTPUT THE VTK FILE AND WRITE ALL FILES DATA
// ----------------------
if step_no % pfreq == 0 && step_no!= 0 {
// ----------------------
// WRITE DATA TO VTK FILE
// ----------------------
write_to_vtk!(sand, format!("{}/sand_{}.vtk", &dir_name, step_no));
write_wall_to_vtk!(wall, format!("{}/wall_{}.vtk", &dir_name, step_no));
// ----------------------
// FOR PARAVIEW AUTOMATION
// ----------------------
sand_files.push_str(&format!("'{}/sand_{}.vtk', ", &dir_name, step_no));
wall_files.push_str(&format!("'{}/wall_{}.vtk', ", &dir_name, step_no));
}
step_no += 1;
t += dt;
// progress bar increment
pb.inc(1);
}
pb.finish_with_message("Simulation succesfully completed");
// ---------------------------------------
// write an easy paraview visualization file
// ---------------------------------------
// truncate the extra part of the string
sand_files.truncate(sand_files.len() - 2);
write_vis_file(
format!("{}/vis_paraview.py", &dir_name),
vec!["sand", "wall"],
vec![sand_files, wall_files],
vec![true, false],
vec!["nnps"],
vec![nnps_files],
);
// ---------------------------------------
// write an easy paraview visualization file ends
// ---------------------------------------
// ---------------------------------------
// PLOTTING
// ---------------------------------------
// UNCOMMENT AND USE THE PLOTTING FACILITY
// let (incident_angle_experiment_kharaz, rebound_angle_experiment_kharaz) =
// read_xy_pairs(&format!(
// "{}/data/chung_test_4_incident_angle_vs_rebound_angle_experiment_kharaz.txt",
// &project_root
// ));
// let mut fg = Figure::new();
// fg.axes2d()
// .set_x_label("Incident angle (degree)", &[])
// .set_y_label("Rebound angle (degree)", &[])
// //.set_x_range(Fix(0.), Fix(90.))
// //.set_y_range(Fix(-800.), Fix(0.))
// .lines(
// &incident_angle_experiment_kharaz,
// &rebound_angle_experiment_kharaz,
// &[Caption("Kharaz experiment"), Color("black")],
// )
// .lines(
// &incident_angle_paper_simulated,
// &rebound_angle_paper_simulated,
// &[Caption("Paper simulated"), Color("blue")],
// )
// .points(
// &incident_angle,
// &rebound_angle_al_alloy,
// &[Caption("Al alloy"), Color("black")],
// )
// .points(
// &incident_angle,
// &rebound_angle_al_oxide,
// &[Caption("Al oxide"), Color("blue")],
// );
// let px = 1000;
// fg.save_to_png(
// &format!(
// "{}/chung_test_4_incident_angle_vs_rebound_angle.png",
// &dir_name
// ),
// px,
// px,
// )
//.unwrap();
// if args.flag_plots {
// fg.show().unwrap();
// }
// ---------------------------------------
// PLOTTING ENDS
// ---------------------------------------
}
| Args | identifier_name |
test_1.rs | const USAGE: &str = "
Usage: test_1 [--dt N] [--plots]
test_1 --help
A template for DEM spherical particles modelling.
This benchmark is taken from
author: XXXX
paper: XXXX
link: XXXX
Description:
-----------
Describe the test in a few lines.
Method:
--------
What specific DEM model is used to model the current simulation.
Input:
------------
========Any inputs==============
Analysis:
-----------
How is this benchmark validated? What plots are used? What conclusions are been
made?
New features of dem_rust:
-------------------------
Any new features implemented in this current example, which can be later used in
another examples?
Options:
--dt N Time step of the simulation [default: 1e-4]
--tf N Runtime of the simulation [default: 1.]
--plots Show the plots
-h, --help Show this message.
";
// -------------------------------------------------
// std imports
use std::f64::consts::PI;
use std::fs;
use std::fs::OpenOptions;
use std::io::Write;
// external crate imports
use docopt::Docopt;
use gnuplot::*;
use multiphysics::prelude::*;
// -------------------------
// local imports
// -------------------------
// dem granular
pub use dem_rust::basic_equations::*;
pub use dem_rust::dem_3d::*;
pub use dem_rust::wall::*;
pub use dem_rust::prelude::*;
// // rigid body imports
// pub use dem_rust::rb::rb_2d::Rigidbody2D;
// for reading data from file (comma separated)
use crate::read_xy_pairs;
// external crate imports
// use gnuplot::*;
// use simple_shapes::prelude::*;
// -------------------------------------------------
#[derive(Deserialize, Debug)]
pub struct Args {
flag_tf: f64,
flag_dt: f64,
flag_plots: bool,
}
pub fn main(args: &[String]) | let rho = 2600.;
sand.rho = vec![rho; x.len()];
sand.m = vec![rho * PI * radius[0] * radius[0]; x.len()];
sand.m_inv = vec![1. / sand.m[0]; x.len()];
let inertia = 4. * (2. * radius[0]) * (2. * radius[0]) / 10.;
sand.mi = vec![inertia; x.len()];
sand.mi_inv = vec![1. / sand.mi[0]; x.len()];
let stiffness = 5. * 1e6;
sand.kn = vec![stiffness; x.len()];
sand.kt = vec![stiffness; x.len()];
// set some dummy Young's modulus for linear DEM case; change these values if
// you are updating this example for nonlinear DEM
let yng = 1.;
let nu = 0.2;
let shr = yng / (2. * (1. + nu));
sand.young_mod = vec![yng; x.len()];
sand.poisson = vec![nu; x.len()];
sand.shear_mod = vec![shr; x.len()];
// this is nice step for debugging
sand.validate_particle_array();
// -------------------------
// create an infinite wall
// -------------------------
let x = vec![0.];
let y = vec![0.];
let z = vec![0.; x.len()];
let wall_points = vec![[Vector3::new(-1., 0., 0.), Vector3::new(1., 0., 0.)]];
let nx = vec![0.];
let ny = vec![1.];
let nz = vec![0.; x.len()];
let mut wall =
WallDEMParticleArray::from_xyz_wall_points_normals(&x, &y, &z, &wall_points, &nx, &ny, &nz);
wall.kn = vec![stiffness; x.len()];
wall.kt = vec![stiffness; x.len()];
// --------------------------------------
// CREATE PARTICLE ARRAYS ENDS
// --------------------------------------
// ---------------------------------------
// SETUP CORRESPONDING NBS NNPS
// ----------------------------------------
let max_coordinate = 50. * 1e-2;
let max_size = 2. * radius[0];
let mut nbs2d_sand =
NBS2D::from_maximum_and_no_of_particles(max_coordinate, max_size, sand.x.len());
// ---------------------------------------
// SETUP CORRESPONDING NBS NNPS ENDS
// ----------------------------------------
// ----------------------------------------
// SOLVER DATA
// ----------------------------------------
let tf = args.flag_tf;
let dt = args.flag_dt;
let total_steps = (tf / dt) as u64;
let mut t = 0.;
let mut step_no = 0;
let total_output_file = 1000;
let pfreq = if total_steps < total_output_file {
1
} else {
total_steps / total_output_file
};
// ----------------------------------------
// SOLVER DATA ENDS
// ----------------------------------------
// ----------------------------------------
// OUTPUT DIRECTORY
// ----------------------------------------
let project_root = env!("CARGO_MANIFEST_DIR");
let dir_name = project_root.to_owned() + "/test_1_output";
let _p = fs::create_dir(&dir_name);
// ----------------------------------------
// OUTPUT DIRECTORY ENDS
// ----------------------------------------
// ----------------------------------------
// SOME CONSTANTS
// ----------------------------------------
let stage1 = 1;
let stage2 = 2;
// coefficient of friction
let mu = 0.5;
let kn = 5. * 1e5;
let kt = 5. * 1e5;
let en = 0.9;
// ----------------------------------------
// SOME CONSTANTS ENDS
// ----------------------------------------
// create a progress bar
let pb = setup_progress_bar(total_steps);
// -------------------------------
// FOR PARAVIEW VISUALIZATION
// -------------------------------
// define variables for automatic visualization
// particle array files
let mut sand_files = "".to_string();
write_to_vtk!(sand, format!("{}/sand_{}.vtk", &dir_name, step_no));
sand_files.push_str(&format!("'{}/sand_{}.vtk', ", &dir_name, step_no));
let mut wall_files = "".to_string();
write_wall_to_vtk!(wall, format!("{}/wall_{}.vtk", &dir_name, step_no));
wall_files.push_str(&format!("'{}/wall_{}.vtk', ", &dir_name, step_no));
// neighbours files
let mut nnps_files = "".to_string();
write_nnps_2d_to_vtk!(nbs2d_sand, format!("{}/nnps.vtk", &dir_name));
nnps_files.push_str(&format!("'{}/nnps.vtk'", &dir_name));
// -------------------------------
// FOR PARAVIEW VISUALIZATION ENDS
// -------------------------------
// -------------------------------
// POST PROCESSING VARIABLES
// -------------------------------
// uncomment and initialize the variables
// -------------------------------
// POST PROCESSING VARIABLES ENDS
// -------------------------------
while t < tf {
// ----------------------
// RK2 INITIALIZE
// ----------------------
rk2_initialize_copy_contacts_3d!((sand));
rk2_initialize_dem!((sand));
// ----------------------
// STAGE 1 EQUATIONS
// ----------------------
nbs2d_sand.register_particles_to_nnps(&sand.x, &sand.y, &sand.z);
make_forces_torques_zero!((sand));
body_force!((sand), 0., -9.81, 0.);
// dem_3d_force_linear_pp!(sand, (sand), (nbs2d_sand), (kn), (kt), (en), (mu), stage1, dt);
dem_3d_force_linear_pw!(sand, (wall), (kn), (kt), (en), (mu), stage1, dt);
// ----------------------
// STAGE 1 STEPPER
// ----------------------
rk2_stage1_dem!((sand), dt);
// ----------------------
// STAGE 2 EQUATIONS
// ----------------------
nbs2d_sand.register_particles_to_nnps(&sand.x, &sand.y, &sand.z);
make_forces_torques_zero!((sand));
body_force!((sand), 0., -9.81, 0.);
// dem_3d_force_linear_pp!(sand, (sand), (nbs2d_sand), (kn), (kt), (en), (mu), stage2, dt);
dem_3d_force_linear_pw!(sand, (wall), (kn), (kt), (en), (mu), stage2, dt);
// ----------------------
// STAGE 2 STEPPER
// ----------------------
rk2_stage2_dem!((sand), dt);
update_contacts_pp!(sand, (sand));
// ----------------------
// OUTPUT THE VTK FILE AND WRITE ALL FILES DATA
// ----------------------
if step_no % pfreq == 0 && step_no!= 0 {
// ----------------------
// WRITE DATA TO VTK FILE
// ----------------------
write_to_vtk!(sand, format!("{}/sand_{}.vtk", &dir_name, step_no));
write_wall_to_vtk!(wall, format!("{}/wall_{}.vtk", &dir_name, step_no));
// ----------------------
// FOR PARAVIEW AUTOMATION
// ----------------------
sand_files.push_str(&format!("'{}/sand_{}.vtk', ", &dir_name, step_no));
wall_files.push_str(&format!("'{}/wall_{}.vtk', ", &dir_name, step_no));
}
step_no += 1;
t += dt;
// progress bar increment
pb.inc(1);
}
pb.finish_with_message("Simulation succesfully completed");
// ---------------------------------------
// write an easy paraview visualization file
// ---------------------------------------
// truncate the extra part of the string
sand_files.truncate(sand_files.len() - 2);
write_vis_file(
format!("{}/vis_paraview.py", &dir_name),
vec!["sand", "wall"],
vec![sand_files, wall_files],
vec![true, false],
vec!["nnps"],
vec![nnps_files],
);
// ---------------------------------------
// write an easy paraview visualization file ends
// ---------------------------------------
// ---------------------------------------
// PLOTTING
// ---------------------------------------
// UNCOMMENT AND USE THE PLOTTING FACILITY
// let (incident_angle_experiment_kharaz, rebound_angle_experiment_kharaz) =
// read_xy_pairs(&format!(
// "{}/data/chung_test_4_incident_angle_vs_rebound_angle_experiment_kharaz.txt",
// &project_root
// ));
// let mut fg = Figure::new();
// fg.axes2d()
// .set_x_label("Incident angle (degree)", &[])
// .set_y_label("Rebound angle (degree)", &[])
// //.set_x_range(Fix(0.), Fix(90.))
// //.set_y_range(Fix(-800.), Fix(0.))
// .lines(
// &incident_angle_experiment_kharaz,
// &rebound_angle_experiment_kharaz,
// &[Caption("Kharaz experiment"), Color("black")],
// )
// .lines(
// &incident_angle_paper_simulated,
// &rebound_angle_paper_simulated,
// &[Caption("Paper simulated"), Color("blue")],
// )
// .points(
// &incident_angle,
// &rebound_angle_al_alloy,
// &[Caption("Al alloy"), Color("black")],
// )
// .points(
// &incident_angle,
// &rebound_angle_al_oxide,
// &[Caption("Al oxide"), Color("blue")],
// );
// let px = 1000;
// fg.save_to_png(
// &format!(
// "{}/chung_test_4_incident_angle_vs_rebound_angle.png",
// &dir_name
// ),
// px,
// px,
// )
//.unwrap();
// if args.flag_plots {
// fg.show().unwrap();
// }
// ---------------------------------------
// PLOTTING ENDS
// ---------------------------------------
}
| {
// --------------------------------------
// GET THE COMMAND LINE VARIABLES
// --------------------------------------
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.argv(args).deserialize())
.unwrap_or_else(|e| e.exit());
// println!("{:?}", args);
// --------------------------------------
// GET THE COMMAND LINE VARIABLES ENDS
// --------------------------------------
// --------------------------------------
// CREATE PARTICLE ARRAYS
// --------------------------------------
let x = vec![0.];
let y = vec![50. * 1e-2];
let z = vec![0.; x.len()];
let radius = vec![10. * 1e-2; x.len()];
let mut sand = DEM3DParticleArray::from_xyz_rad(&x, &y, &z, &radius); | identifier_body |
test_1.rs | const USAGE: &str = "
Usage: test_1 [--dt N] [--plots]
test_1 --help
A template for DEM spherical particles modelling.
This benchmark is taken from
author: XXXX
paper: XXXX
link: XXXX
Description:
-----------
Describe the test in a few lines.
Method:
--------
What specific DEM model is used to model the current simulation.
Input:
------------
========Any inputs==============
Analysis:
-----------
How is this benchmark validated? What plots are used? What conclusions are been
made?
New features of dem_rust:
-------------------------
Any new features implemented in this current example, which can be later used in
another examples?
Options:
--dt N Time step of the simulation [default: 1e-4]
--tf N Runtime of the simulation [default: 1.]
--plots Show the plots
-h, --help Show this message.
";
// -------------------------------------------------
// std imports
use std::f64::consts::PI;
use std::fs;
use std::fs::OpenOptions;
use std::io::Write;
// external crate imports
use docopt::Docopt;
use gnuplot::*;
use multiphysics::prelude::*;
// -------------------------
// local imports
// -------------------------
// dem granular
pub use dem_rust::basic_equations::*;
pub use dem_rust::dem_3d::*;
pub use dem_rust::wall::*;
pub use dem_rust::prelude::*;
// // rigid body imports
// pub use dem_rust::rb::rb_2d::Rigidbody2D;
// for reading data from file (comma separated)
use crate::read_xy_pairs;
// external crate imports
// use gnuplot::*;
// use simple_shapes::prelude::*;
// -------------------------------------------------
#[derive(Deserialize, Debug)]
pub struct Args {
flag_tf: f64,
flag_dt: f64,
flag_plots: bool,
}
pub fn main(args: &[String]) {
// --------------------------------------
// GET THE COMMAND LINE VARIABLES
// --------------------------------------
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.argv(args).deserialize())
.unwrap_or_else(|e| e.exit());
// println!("{:?}", args);
// --------------------------------------
// GET THE COMMAND LINE VARIABLES ENDS
// --------------------------------------
// --------------------------------------
// CREATE PARTICLE ARRAYS
// --------------------------------------
let x = vec![0.];
let y = vec![50. * 1e-2];
let z = vec![0.; x.len()];
let radius = vec![10. * 1e-2; x.len()];
let mut sand = DEM3DParticleArray::from_xyz_rad(&x, &y, &z, &radius);
let rho = 2600.;
sand.rho = vec![rho; x.len()];
sand.m = vec![rho * PI * radius[0] * radius[0]; x.len()];
sand.m_inv = vec![1. / sand.m[0]; x.len()];
let inertia = 4. * (2. * radius[0]) * (2. * radius[0]) / 10.;
sand.mi = vec![inertia; x.len()];
sand.mi_inv = vec![1. / sand.mi[0]; x.len()];
let stiffness = 5. * 1e6;
sand.kn = vec![stiffness; x.len()];
sand.kt = vec![stiffness; x.len()];
// set some dummy Young's modulus for linear DEM case; change these values if
// you are updating this example for nonlinear DEM
let yng = 1.;
let nu = 0.2;
let shr = yng / (2. * (1. + nu));
sand.young_mod = vec![yng; x.len()];
sand.poisson = vec![nu; x.len()];
sand.shear_mod = vec![shr; x.len()];
// this is nice step for debugging
sand.validate_particle_array();
// -------------------------
// create an infinite wall
// -------------------------
let x = vec![0.];
let y = vec![0.];
let z = vec![0.; x.len()];
let wall_points = vec![[Vector3::new(-1., 0., 0.), Vector3::new(1., 0., 0.)]];
let nx = vec![0.];
let ny = vec![1.];
let nz = vec![0.; x.len()];
let mut wall =
WallDEMParticleArray::from_xyz_wall_points_normals(&x, &y, &z, &wall_points, &nx, &ny, &nz);
wall.kn = vec![stiffness; x.len()];
wall.kt = vec![stiffness; x.len()];
// --------------------------------------
// CREATE PARTICLE ARRAYS ENDS
// --------------------------------------
// ---------------------------------------
// SETUP CORRESPONDING NBS NNPS
// ----------------------------------------
let max_coordinate = 50. * 1e-2;
let max_size = 2. * radius[0];
let mut nbs2d_sand =
NBS2D::from_maximum_and_no_of_particles(max_coordinate, max_size, sand.x.len());
// ---------------------------------------
// SETUP CORRESPONDING NBS NNPS ENDS
// ----------------------------------------
// ----------------------------------------
// SOLVER DATA
// ----------------------------------------
let tf = args.flag_tf;
let dt = args.flag_dt;
let total_steps = (tf / dt) as u64;
let mut t = 0.;
let mut step_no = 0;
let total_output_file = 1000;
let pfreq = if total_steps < total_output_file {
1
} else | ;
// ----------------------------------------
// SOLVER DATA ENDS
// ----------------------------------------
// ----------------------------------------
// OUTPUT DIRECTORY
// ----------------------------------------
let project_root = env!("CARGO_MANIFEST_DIR");
let dir_name = project_root.to_owned() + "/test_1_output";
let _p = fs::create_dir(&dir_name);
// ----------------------------------------
// OUTPUT DIRECTORY ENDS
// ----------------------------------------
// ----------------------------------------
// SOME CONSTANTS
// ----------------------------------------
let stage1 = 1;
let stage2 = 2;
// coefficient of friction
let mu = 0.5;
let kn = 5. * 1e5;
let kt = 5. * 1e5;
let en = 0.9;
// ----------------------------------------
// SOME CONSTANTS ENDS
// ----------------------------------------
// create a progress bar
let pb = setup_progress_bar(total_steps);
// -------------------------------
// FOR PARAVIEW VISUALIZATION
// -------------------------------
// define variables for automatic visualization
// particle array files
let mut sand_files = "".to_string();
write_to_vtk!(sand, format!("{}/sand_{}.vtk", &dir_name, step_no));
sand_files.push_str(&format!("'{}/sand_{}.vtk', ", &dir_name, step_no));
let mut wall_files = "".to_string();
write_wall_to_vtk!(wall, format!("{}/wall_{}.vtk", &dir_name, step_no));
wall_files.push_str(&format!("'{}/wall_{}.vtk', ", &dir_name, step_no));
// neighbours files
let mut nnps_files = "".to_string();
write_nnps_2d_to_vtk!(nbs2d_sand, format!("{}/nnps.vtk", &dir_name));
nnps_files.push_str(&format!("'{}/nnps.vtk'", &dir_name));
// -------------------------------
// FOR PARAVIEW VISUALIZATION ENDS
// -------------------------------
// -------------------------------
// POST PROCESSING VARIABLES
// -------------------------------
// uncomment and initialize the variables
// -------------------------------
// POST PROCESSING VARIABLES ENDS
// -------------------------------
while t < tf {
// ----------------------
// RK2 INITIALIZE
// ----------------------
rk2_initialize_copy_contacts_3d!((sand));
rk2_initialize_dem!((sand));
// ----------------------
// STAGE 1 EQUATIONS
// ----------------------
nbs2d_sand.register_particles_to_nnps(&sand.x, &sand.y, &sand.z);
make_forces_torques_zero!((sand));
body_force!((sand), 0., -9.81, 0.);
// dem_3d_force_linear_pp!(sand, (sand), (nbs2d_sand), (kn), (kt), (en), (mu), stage1, dt);
dem_3d_force_linear_pw!(sand, (wall), (kn), (kt), (en), (mu), stage1, dt);
// ----------------------
// STAGE 1 STEPPER
// ----------------------
rk2_stage1_dem!((sand), dt);
// ----------------------
// STAGE 2 EQUATIONS
// ----------------------
nbs2d_sand.register_particles_to_nnps(&sand.x, &sand.y, &sand.z);
make_forces_torques_zero!((sand));
body_force!((sand), 0., -9.81, 0.);
// dem_3d_force_linear_pp!(sand, (sand), (nbs2d_sand), (kn), (kt), (en), (mu), stage2, dt);
dem_3d_force_linear_pw!(sand, (wall), (kn), (kt), (en), (mu), stage2, dt);
// ----------------------
// STAGE 2 STEPPER
// ----------------------
rk2_stage2_dem!((sand), dt);
update_contacts_pp!(sand, (sand));
// ----------------------
// OUTPUT THE VTK FILE AND WRITE ALL FILES DATA
// ----------------------
if step_no % pfreq == 0 && step_no!= 0 {
// ----------------------
// WRITE DATA TO VTK FILE
// ----------------------
write_to_vtk!(sand, format!("{}/sand_{}.vtk", &dir_name, step_no));
write_wall_to_vtk!(wall, format!("{}/wall_{}.vtk", &dir_name, step_no));
// ----------------------
// FOR PARAVIEW AUTOMATION
// ----------------------
sand_files.push_str(&format!("'{}/sand_{}.vtk', ", &dir_name, step_no));
wall_files.push_str(&format!("'{}/wall_{}.vtk', ", &dir_name, step_no));
}
step_no += 1;
t += dt;
// progress bar increment
pb.inc(1);
}
pb.finish_with_message("Simulation succesfully completed");
// ---------------------------------------
// write an easy paraview visualization file
// ---------------------------------------
// truncate the extra part of the string
sand_files.truncate(sand_files.len() - 2);
write_vis_file(
format!("{}/vis_paraview.py", &dir_name),
vec!["sand", "wall"],
vec![sand_files, wall_files],
vec![true, false],
vec!["nnps"],
vec![nnps_files],
);
// ---------------------------------------
// write an easy paraview visualization file ends
// ---------------------------------------
// ---------------------------------------
// PLOTTING
// ---------------------------------------
// UNCOMMENT AND USE THE PLOTTING FACILITY
// let (incident_angle_experiment_kharaz, rebound_angle_experiment_kharaz) =
// read_xy_pairs(&format!(
// "{}/data/chung_test_4_incident_angle_vs_rebound_angle_experiment_kharaz.txt",
// &project_root
// ));
// let mut fg = Figure::new();
// fg.axes2d()
// .set_x_label("Incident angle (degree)", &[])
// .set_y_label("Rebound angle (degree)", &[])
// //.set_x_range(Fix(0.), Fix(90.))
// //.set_y_range(Fix(-800.), Fix(0.))
// .lines(
// &incident_angle_experiment_kharaz,
// &rebound_angle_experiment_kharaz,
// &[Caption("Kharaz experiment"), Color("black")],
// )
// .lines(
// &incident_angle_paper_simulated,
// &rebound_angle_paper_simulated,
// &[Caption("Paper simulated"), Color("blue")],
// )
// .points(
// &incident_angle,
// &rebound_angle_al_alloy,
// &[Caption("Al alloy"), Color("black")],
// )
// .points(
// &incident_angle,
// &rebound_angle_al_oxide,
// &[Caption("Al oxide"), Color("blue")],
// );
// let px = 1000;
// fg.save_to_png(
// &format!(
// "{}/chung_test_4_incident_angle_vs_rebound_angle.png",
// &dir_name
// ),
// px,
// px,
// )
//.unwrap();
// if args.flag_plots {
// fg.show().unwrap();
// }
// ---------------------------------------
// PLOTTING ENDS
// ---------------------------------------
}
| {
total_steps / total_output_file
} | conditional_block |
test_1.rs | const USAGE: &str = "
Usage: test_1 [--dt N] [--plots]
test_1 --help
A template for DEM spherical particles modelling.
This benchmark is taken from
author: XXXX
paper: XXXX
link: XXXX
Description:
-----------
Describe the test in a few lines.
Method:
--------
What specific DEM model is used to model the current simulation.
Input:
------------
========Any inputs==============
Analysis:
-----------
How is this benchmark validated? What plots are used? What conclusions are been
made?
New features of dem_rust:
-------------------------
Any new features implemented in this current example, which can be later used in
another examples?
Options:
--dt N Time step of the simulation [default: 1e-4]
--tf N Runtime of the simulation [default: 1.]
--plots Show the plots
-h, --help Show this message.
";
// -------------------------------------------------
// std imports
use std::f64::consts::PI;
use std::fs;
use std::fs::OpenOptions;
use std::io::Write;
// external crate imports
use docopt::Docopt;
use gnuplot::*;
use multiphysics::prelude::*;
// -------------------------
// local imports
// -------------------------
// dem granular
pub use dem_rust::basic_equations::*;
pub use dem_rust::dem_3d::*;
pub use dem_rust::wall::*;
pub use dem_rust::prelude::*;
// // rigid body imports
// pub use dem_rust::rb::rb_2d::Rigidbody2D;
// for reading data from file (comma separated)
use crate::read_xy_pairs;
// external crate imports
// use gnuplot::*;
// use simple_shapes::prelude::*;
// -------------------------------------------------
#[derive(Deserialize, Debug)]
pub struct Args {
flag_tf: f64,
flag_dt: f64,
flag_plots: bool,
}
pub fn main(args: &[String]) {
// --------------------------------------
// GET THE COMMAND LINE VARIABLES
// --------------------------------------
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.argv(args).deserialize())
.unwrap_or_else(|e| e.exit());
// println!("{:?}", args);
// --------------------------------------
// GET THE COMMAND LINE VARIABLES ENDS
// --------------------------------------
// --------------------------------------
// CREATE PARTICLE ARRAYS
// --------------------------------------
let x = vec![0.];
let y = vec![50. * 1e-2];
let z = vec![0.; x.len()];
let radius = vec![10. * 1e-2; x.len()];
let mut sand = DEM3DParticleArray::from_xyz_rad(&x, &y, &z, &radius);
let rho = 2600.;
sand.rho = vec![rho; x.len()];
sand.m = vec![rho * PI * radius[0] * radius[0]; x.len()];
sand.m_inv = vec![1. / sand.m[0]; x.len()];
let inertia = 4. * (2. * radius[0]) * (2. * radius[0]) / 10.;
sand.mi = vec![inertia; x.len()];
sand.mi_inv = vec![1. / sand.mi[0]; x.len()];
let stiffness = 5. * 1e6;
sand.kn = vec![stiffness; x.len()];
sand.kt = vec![stiffness; x.len()];
// set some dummy Young's modulus for linear DEM case; change these values if
// you are updating this example for nonlinear DEM
let yng = 1.;
let nu = 0.2;
let shr = yng / (2. * (1. + nu));
sand.young_mod = vec![yng; x.len()];
sand.poisson = vec![nu; x.len()];
sand.shear_mod = vec![shr; x.len()];
// this is nice step for debugging
sand.validate_particle_array();
// -------------------------
// create an infinite wall
// -------------------------
let x = vec![0.];
let y = vec![0.];
let z = vec![0.; x.len()];
let wall_points = vec![[Vector3::new(-1., 0., 0.), Vector3::new(1., 0., 0.)]];
let nx = vec![0.];
let ny = vec![1.];
let nz = vec![0.; x.len()];
let mut wall =
WallDEMParticleArray::from_xyz_wall_points_normals(&x, &y, &z, &wall_points, &nx, &ny, &nz);
wall.kn = vec![stiffness; x.len()];
wall.kt = vec![stiffness; x.len()];
// --------------------------------------
// CREATE PARTICLE ARRAYS ENDS
// --------------------------------------
// ---------------------------------------
// SETUP CORRESPONDING NBS NNPS
// ----------------------------------------
let max_coordinate = 50. * 1e-2;
let max_size = 2. * radius[0];
let mut nbs2d_sand =
NBS2D::from_maximum_and_no_of_particles(max_coordinate, max_size, sand.x.len());
// ---------------------------------------
// SETUP CORRESPONDING NBS NNPS ENDS
// ----------------------------------------
// ----------------------------------------
// SOLVER DATA
// ----------------------------------------
let tf = args.flag_tf;
let dt = args.flag_dt;
let total_steps = (tf / dt) as u64;
let mut t = 0.;
let mut step_no = 0;
let total_output_file = 1000;
let pfreq = if total_steps < total_output_file {
1
} else {
total_steps / total_output_file
};
// ----------------------------------------
// SOLVER DATA ENDS
// ----------------------------------------
// ----------------------------------------
// OUTPUT DIRECTORY
// ----------------------------------------
let project_root = env!("CARGO_MANIFEST_DIR");
let dir_name = project_root.to_owned() + "/test_1_output";
let _p = fs::create_dir(&dir_name);
// ----------------------------------------
// OUTPUT DIRECTORY ENDS
// ----------------------------------------
// ----------------------------------------
// SOME CONSTANTS
// ----------------------------------------
let stage1 = 1;
let stage2 = 2;
// coefficient of friction
let mu = 0.5;
let kn = 5. * 1e5;
let kt = 5. * 1e5;
let en = 0.9;
// ----------------------------------------
// SOME CONSTANTS ENDS
// ----------------------------------------
// create a progress bar
let pb = setup_progress_bar(total_steps);
// -------------------------------
// FOR PARAVIEW VISUALIZATION
// -------------------------------
// define variables for automatic visualization
// particle array files
let mut sand_files = "".to_string();
write_to_vtk!(sand, format!("{}/sand_{}.vtk", &dir_name, step_no));
sand_files.push_str(&format!("'{}/sand_{}.vtk', ", &dir_name, step_no));
let mut wall_files = "".to_string();
write_wall_to_vtk!(wall, format!("{}/wall_{}.vtk", &dir_name, step_no));
wall_files.push_str(&format!("'{}/wall_{}.vtk', ", &dir_name, step_no));
// neighbours files
let mut nnps_files = "".to_string();
write_nnps_2d_to_vtk!(nbs2d_sand, format!("{}/nnps.vtk", &dir_name));
nnps_files.push_str(&format!("'{}/nnps.vtk'", &dir_name));
// -------------------------------
// FOR PARAVIEW VISUALIZATION ENDS
// -------------------------------
// -------------------------------
// POST PROCESSING VARIABLES
// -------------------------------
// uncomment and initialize the variables
// -------------------------------
// POST PROCESSING VARIABLES ENDS
// -------------------------------
while t < tf {
// ----------------------
// RK2 INITIALIZE
// ----------------------
rk2_initialize_copy_contacts_3d!((sand));
rk2_initialize_dem!((sand));
// ----------------------
// STAGE 1 EQUATIONS
// ----------------------
nbs2d_sand.register_particles_to_nnps(&sand.x, &sand.y, &sand.z);
make_forces_torques_zero!((sand));
body_force!((sand), 0., -9.81, 0.);
// dem_3d_force_linear_pp!(sand, (sand), (nbs2d_sand), (kn), (kt), (en), (mu), stage1, dt);
dem_3d_force_linear_pw!(sand, (wall), (kn), (kt), (en), (mu), stage1, dt);
// ----------------------
// STAGE 1 STEPPER
// ----------------------
rk2_stage1_dem!((sand), dt);
// ----------------------
// STAGE 2 EQUATIONS
// ----------------------
nbs2d_sand.register_particles_to_nnps(&sand.x, &sand.y, &sand.z);
make_forces_torques_zero!((sand));
body_force!((sand), 0., -9.81, 0.);
// dem_3d_force_linear_pp!(sand, (sand), (nbs2d_sand), (kn), (kt), (en), (mu), stage2, dt);
dem_3d_force_linear_pw!(sand, (wall), (kn), (kt), (en), (mu), stage2, dt);
// ----------------------
// STAGE 2 STEPPER
// ----------------------
rk2_stage2_dem!((sand), dt);
update_contacts_pp!(sand, (sand));
// ----------------------
// OUTPUT THE VTK FILE AND WRITE ALL FILES DATA
// ----------------------
if step_no % pfreq == 0 && step_no!= 0 {
// ----------------------
// WRITE DATA TO VTK FILE
// ----------------------
write_to_vtk!(sand, format!("{}/sand_{}.vtk", &dir_name, step_no));
write_wall_to_vtk!(wall, format!("{}/wall_{}.vtk", &dir_name, step_no));
// ----------------------
// FOR PARAVIEW AUTOMATION
// ----------------------
sand_files.push_str(&format!("'{}/sand_{}.vtk', ", &dir_name, step_no));
wall_files.push_str(&format!("'{}/wall_{}.vtk', ", &dir_name, step_no));
}
step_no += 1;
t += dt;
// progress bar increment
pb.inc(1);
}
pb.finish_with_message("Simulation succesfully completed");
// ---------------------------------------
// write an easy paraview visualization file
// ---------------------------------------
// truncate the extra part of the string
sand_files.truncate(sand_files.len() - 2);
write_vis_file(
format!("{}/vis_paraview.py", &dir_name),
vec!["sand", "wall"],
vec![sand_files, wall_files],
vec![true, false],
vec!["nnps"],
vec![nnps_files],
);
// ---------------------------------------
// write an easy paraview visualization file ends
// ---------------------------------------
// ---------------------------------------
// PLOTTING
// ---------------------------------------
// UNCOMMENT AND USE THE PLOTTING FACILITY
// let (incident_angle_experiment_kharaz, rebound_angle_experiment_kharaz) =
// read_xy_pairs(&format!(
// "{}/data/chung_test_4_incident_angle_vs_rebound_angle_experiment_kharaz.txt",
// &project_root
// ));
// let mut fg = Figure::new();
// fg.axes2d()
// .set_x_label("Incident angle (degree)", &[])
// .set_y_label("Rebound angle (degree)", &[])
// //.set_x_range(Fix(0.), Fix(90.))
// //.set_y_range(Fix(-800.), Fix(0.))
// .lines(
// &incident_angle_experiment_kharaz,
// &rebound_angle_experiment_kharaz,
// &[Caption("Kharaz experiment"), Color("black")],
// )
// .lines(
// &incident_angle_paper_simulated,
// &rebound_angle_paper_simulated,
// &[Caption("Paper simulated"), Color("blue")],
// )
// .points(
// &incident_angle,
// &rebound_angle_al_alloy,
// &[Caption("Al alloy"), Color("black")],
// )
// .points(
// &incident_angle,
// &rebound_angle_al_oxide, | // let px = 1000;
// fg.save_to_png(
// &format!(
// "{}/chung_test_4_incident_angle_vs_rebound_angle.png",
// &dir_name
// ),
// px,
// px,
// )
//.unwrap();
// if args.flag_plots {
// fg.show().unwrap();
// }
// ---------------------------------------
// PLOTTING ENDS
// ---------------------------------------
} | // &[Caption("Al oxide"), Color("blue")],
// );
| random_line_split |
lib.rs | //! An incomplete wrapper over the WinRT toast api
//!
//! Tested in Windows 10 and 8.1. Untested in Windows 8, might work.
//!
//! Todo:
//!
//! * Add support for Adaptive Content
//! * Add support for Actions
//!
//! Known Issues:
//!
//! * Will not work for Windows 7.
//! * Will not build when targeting the 32-bit gnu toolchain (i686-pc-windows-gnu).
//!
//! Limitations:
//!
//! * Windows 8.1 only supports a single image, the last image (icon, hero, image) will be the one on the toast
/// for xml schema details check out:
///
/// * https://docs.microsoft.com/en-us/uwp/schemas/tiles/toastschema/root-elements
/// * https://docs.microsoft.com/en-us/windows/uwp/controls-and-patterns/tiles-and-notifications-toast-xml-schema
/// * https://docs.microsoft.com/en-us/windows/uwp/controls-and-patterns/tiles-and-notifications-adaptive-interactive-toasts
/// * https://msdn.microsoft.com/library/14a07fce-d631-4bad-ab99-305b703713e6#Sending_toast_notifications_from_desktop_apps
/// for Windows 7 and older support look into Shell_NotifyIcon
/// https://msdn.microsoft.com/en-us/library/windows/desktop/ee330740(v=vs.85).aspx
/// https://softwareengineering.stackexchange.com/questions/222339/using-the-system-tray-notification-area-app-in-windows-7
extern crate windows;
extern crate xml;
#[macro_use]
extern crate strum;
#[allow(dead_code)]
mod bindings {
::windows::include_bindings!();
}
use bindings::{
windows::data::xml::dom::XmlDocument,
windows::ui::notifications::ToastNotification,
windows::ui::notifications::ToastNotificationManager,
windows::HString,
};
use std::fmt;
use std::path::Path;
use xml::escape::escape_str_attribute;
mod windows_check;
pub use windows::Error;
pub struct Toast {
duration: String,
title: String,
line1: String,
line2: String,
images: String,
audio: String,
app_id: String,
}
#[derive(Clone, Copy)]
pub enum Duration {
/// 7 seconds
Short,
/// 25 seconds
Long,
}
#[derive(Debug, EnumString, Clone, Copy)]
pub enum Sound {
Default,
IM,
Mail,
Reminder,
SMS,
/// Play the loopable sound only once
#[strum(disabled)]
Single(LoopableSound),
/// Loop the loopable sound for the entire duration of the toast
#[strum(disabled)]
Loop(LoopableSound),
}
/// Sounds suitable for Looping
#[allow(dead_code)]
#[derive(Debug, Clone, Copy)]
pub enum LoopableSound {
Alarm,
Alarm2,
Alarm3,
Alarm4,
Alarm5,
Alarm6,
Alarm7,
Alarm8, | Call,
Call2,
Call3,
Call4,
Call5,
Call6,
Call7,
Call8,
Call9,
Call10,
}
#[allow(dead_code)]
#[derive(Clone, Copy)]
pub enum IconCrop {
Square,
Circular,
}
#[doc(hidden)]
impl fmt::Display for Sound {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(self, f)
}
}
#[doc(hidden)]
impl fmt::Display for LoopableSound {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(self, f)
}
}
impl Toast {
/// This can be used if you do not have a AppUserModelID.
///
/// However, the toast will erroniously report its origin as powershell.
pub const POWERSHELL_APP_ID: &'static str = "{1AC14E77-02E7-4E5D-B744-2EB1AE5198B7}\
\\WindowsPowerShell\\v1.0\\powershell.exe";
/// Constructor for the toast builder.
///
/// app_id is the running application's [AppUserModelID][1].
///
/// [1]: https://msdn.microsoft.com/en-us/library/windows/desktop/dd378459(v=vs.85).aspx
///
/// If the program you are using this in was not installed, use Toast::POWERSHELL_APP_ID for now
#[allow(dead_code)]
pub fn new(app_id: &str) -> Toast {
Toast {
duration: String::new(),
title: String::new(),
line1: String::new(),
line2: String::new(),
images: String::new(),
audio: String::new(),
app_id: app_id.to_string(),
}
}
/// Sets the title of the toast.
///
/// Will be white.
/// Supports Unicode β
pub fn title(mut self, content: &str) -> Toast {
self.title = format!(r#"<text id="1">{}</text>"#, escape_str_attribute(content));
self
}
/// Add/Sets the first line of text below title.
///
/// Will be grey.
/// Supports Unicode β
pub fn text1(mut self, content: &str) -> Toast {
self.line1 = format!(r#"<text id="2">{}</text>"#, escape_str_attribute(content));
self
}
/// Add/Sets the second line of text below title.
///
/// Will be grey.
/// Supports Unicode β
pub fn text2(mut self, content: &str) -> Toast {
self.line2 = format!(r#"<text id="3">{}</text>"#, escape_str_attribute(content));
self
}
/// Set the length of time to show the toast
pub fn duration(mut self, duration: Duration) -> Toast {
self.duration = match duration {
Duration::Long => "duration=\"long\"",
Duration::Short => "duration=\"short\"",
}
.to_owned();
self
}
/// Set the icon shown in the upper left of the toast
///
/// The default is determined by your app id.
/// If you are using the powershell workaround, it will be the powershell icon
pub fn icon(mut self, source: &Path, crop: IconCrop, alt_text: &str) -> Toast {
if windows_check::is_newer_than_windows81() {
let crop_type_attr = match crop {
IconCrop::Square => "".to_string(),
IconCrop::Circular => "hint-crop=\"circle\"".to_string(),
};
self.images = format!(
r#"{}<image placement="appLogoOverride" {} src="file:///{}" alt="{}" />"#,
self.images,
crop_type_attr,
escape_str_attribute(&source.display().to_string()),
escape_str_attribute(alt_text)
);
self
} else {
// Win81 rejects the above xml so we fallback to a simpler call
self.image(source, alt_text)
}
}
/// Add/Set a Hero image for the toast.
///
/// This will be above the toast text and the icon.
pub fn hero(mut self, source: &Path, alt_text: &str) -> Toast {
if windows_check::is_newer_than_windows81() {
self.images = format!(
r#"{}<image placement="Hero" src="file:///{}" alt="{}" />"#,
self.images,
escape_str_attribute(&source.display().to_string()),
escape_str_attribute(alt_text)
);
self
} else {
// win81 rejects the above xml so we fallback to a simpler call
self.image(source, alt_text)
}
}
/// Add an image to the toast
///
/// May be done many times.
/// Will appear below text.
pub fn image(mut self, source: &Path, alt_text: &str) -> Toast {
if!windows_check::is_newer_than_windows81() {
// win81 cannot have more than 1 image and shows nothing if there is more than that
self.images = "".to_owned();
}
self.images = format!(
r#"{}<image id="1" src="file:///{}" alt="{}" />"#,
self.images,
escape_str_attribute(&source.display().to_string()),
escape_str_attribute(alt_text)
);
self
}
/// Set the sound for the toast or silence it
///
/// Default is [Sound::IM](enum.Sound.html)
pub fn sound(mut self, src: Option<Sound>) -> Toast {
self.audio = match src {
None => "<audio silent=\"true\" />".to_owned(),
Some(Sound::Default) => "".to_owned(),
Some(Sound::Loop(sound)) => format!(
r#"<audio loop="true" src="ms-winsoundevent:Notification.Looping.{}" />"#,
sound
),
Some(Sound::Single(sound)) => format!(
r#"<audio src="ms-winsoundevent:Notification.Looping.{}" />"#,
sound
),
Some(sound) => format!(r#"<audio src="ms-winsoundevent:Notification.{}" />"#, sound),
};
self
}
/// Display the toast on the screen
pub fn show(&self) -> windows::Result<()> {
//using this to get an instance of XmlDocument
let toast_xml = XmlDocument::new()?;
let template_binding = if windows_check::is_newer_than_windows81() {
"ToastGeneric"
} else
//win8 or win81
{
// Need to do this or an empty placeholder will be shown if no image is set
if self.images == "" {
"ToastText04"
} else {
"ToastImageAndText04"
}
};
toast_xml.load_xml(HString::from(format!(
"<toast {}>
<visual>
<binding template=\"{}\">
{}
{}{}{}
</binding>
</visual>
{}
</toast>",
self.duration,
template_binding,
self.images,
self.title,
self.line1,
self.line2,
self.audio,
)))?;
// Create the toast and attach event listeners
let toast_template = ToastNotification::create_toast_notification(toast_xml)?;
// Show the toast.
let toast_notifier =
ToastNotificationManager::create_toast_notifier_with_id(HString::from(&self.app_id))?;
let result = toast_notifier.show(&toast_template);
std::thread::sleep(std::time::Duration::from_millis(10));
result
}
}
#[cfg(test)]
mod tests {
use crate::*;
use std::path::Path;
#[test]
fn simple_toast() {
let toast = Toast::new(Toast::POWERSHELL_APP_ID);
toast
.hero(
&Path::new(env!("CARGO_MANIFEST_DIR")).join("resources/test/flower.jpeg"),
"flower",
)
.icon(
&Path::new(env!("CARGO_MANIFEST_DIR")).join("resources/test/chick.jpeg"),
IconCrop::Circular,
"chicken",
)
.title("title")
.text1("line1")
.text2("line2")
.duration(Duration::Short)
//.sound(Some(Sound::Loop(LoopableSound::Call)))
//.sound(Some(Sound::SMS))
.sound(None)
.show()
// silently consume errors
.expect("notification failed");
}
} | Alarm9,
Alarm10, | random_line_split |
lib.rs | //! An incomplete wrapper over the WinRT toast api
//!
//! Tested in Windows 10 and 8.1. Untested in Windows 8, might work.
//!
//! Todo:
//!
//! * Add support for Adaptive Content
//! * Add support for Actions
//!
//! Known Issues:
//!
//! * Will not work for Windows 7.
//! * Will not build when targeting the 32-bit gnu toolchain (i686-pc-windows-gnu).
//!
//! Limitations:
//!
//! * Windows 8.1 only supports a single image, the last image (icon, hero, image) will be the one on the toast
/// for xml schema details check out:
///
/// * https://docs.microsoft.com/en-us/uwp/schemas/tiles/toastschema/root-elements
/// * https://docs.microsoft.com/en-us/windows/uwp/controls-and-patterns/tiles-and-notifications-toast-xml-schema
/// * https://docs.microsoft.com/en-us/windows/uwp/controls-and-patterns/tiles-and-notifications-adaptive-interactive-toasts
/// * https://msdn.microsoft.com/library/14a07fce-d631-4bad-ab99-305b703713e6#Sending_toast_notifications_from_desktop_apps
/// for Windows 7 and older support look into Shell_NotifyIcon
/// https://msdn.microsoft.com/en-us/library/windows/desktop/ee330740(v=vs.85).aspx
/// https://softwareengineering.stackexchange.com/questions/222339/using-the-system-tray-notification-area-app-in-windows-7
extern crate windows;
extern crate xml;
#[macro_use]
extern crate strum;
#[allow(dead_code)]
mod bindings {
::windows::include_bindings!();
}
use bindings::{
windows::data::xml::dom::XmlDocument,
windows::ui::notifications::ToastNotification,
windows::ui::notifications::ToastNotificationManager,
windows::HString,
};
use std::fmt;
use std::path::Path;
use xml::escape::escape_str_attribute;
mod windows_check;
pub use windows::Error;
pub struct Toast {
duration: String,
title: String,
line1: String,
line2: String,
images: String,
audio: String,
app_id: String,
}
#[derive(Clone, Copy)]
pub enum | {
/// 7 seconds
Short,
/// 25 seconds
Long,
}
#[derive(Debug, EnumString, Clone, Copy)]
pub enum Sound {
Default,
IM,
Mail,
Reminder,
SMS,
/// Play the loopable sound only once
#[strum(disabled)]
Single(LoopableSound),
/// Loop the loopable sound for the entire duration of the toast
#[strum(disabled)]
Loop(LoopableSound),
}
/// Sounds suitable for Looping
#[allow(dead_code)]
#[derive(Debug, Clone, Copy)]
pub enum LoopableSound {
Alarm,
Alarm2,
Alarm3,
Alarm4,
Alarm5,
Alarm6,
Alarm7,
Alarm8,
Alarm9,
Alarm10,
Call,
Call2,
Call3,
Call4,
Call5,
Call6,
Call7,
Call8,
Call9,
Call10,
}
#[allow(dead_code)]
#[derive(Clone, Copy)]
pub enum IconCrop {
Square,
Circular,
}
#[doc(hidden)]
impl fmt::Display for Sound {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(self, f)
}
}
#[doc(hidden)]
impl fmt::Display for LoopableSound {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(self, f)
}
}
impl Toast {
/// This can be used if you do not have a AppUserModelID.
///
/// However, the toast will erroniously report its origin as powershell.
pub const POWERSHELL_APP_ID: &'static str = "{1AC14E77-02E7-4E5D-B744-2EB1AE5198B7}\
\\WindowsPowerShell\\v1.0\\powershell.exe";
/// Constructor for the toast builder.
///
/// app_id is the running application's [AppUserModelID][1].
///
/// [1]: https://msdn.microsoft.com/en-us/library/windows/desktop/dd378459(v=vs.85).aspx
///
/// If the program you are using this in was not installed, use Toast::POWERSHELL_APP_ID for now
#[allow(dead_code)]
pub fn new(app_id: &str) -> Toast {
Toast {
duration: String::new(),
title: String::new(),
line1: String::new(),
line2: String::new(),
images: String::new(),
audio: String::new(),
app_id: app_id.to_string(),
}
}
/// Sets the title of the toast.
///
/// Will be white.
/// Supports Unicode β
pub fn title(mut self, content: &str) -> Toast {
self.title = format!(r#"<text id="1">{}</text>"#, escape_str_attribute(content));
self
}
/// Add/Sets the first line of text below title.
///
/// Will be grey.
/// Supports Unicode β
pub fn text1(mut self, content: &str) -> Toast {
self.line1 = format!(r#"<text id="2">{}</text>"#, escape_str_attribute(content));
self
}
/// Add/Sets the second line of text below title.
///
/// Will be grey.
/// Supports Unicode β
pub fn text2(mut self, content: &str) -> Toast {
self.line2 = format!(r#"<text id="3">{}</text>"#, escape_str_attribute(content));
self
}
/// Set the length of time to show the toast
pub fn duration(mut self, duration: Duration) -> Toast {
self.duration = match duration {
Duration::Long => "duration=\"long\"",
Duration::Short => "duration=\"short\"",
}
.to_owned();
self
}
/// Set the icon shown in the upper left of the toast
///
/// The default is determined by your app id.
/// If you are using the powershell workaround, it will be the powershell icon
pub fn icon(mut self, source: &Path, crop: IconCrop, alt_text: &str) -> Toast {
if windows_check::is_newer_than_windows81() {
let crop_type_attr = match crop {
IconCrop::Square => "".to_string(),
IconCrop::Circular => "hint-crop=\"circle\"".to_string(),
};
self.images = format!(
r#"{}<image placement="appLogoOverride" {} src="file:///{}" alt="{}" />"#,
self.images,
crop_type_attr,
escape_str_attribute(&source.display().to_string()),
escape_str_attribute(alt_text)
);
self
} else {
// Win81 rejects the above xml so we fallback to a simpler call
self.image(source, alt_text)
}
}
/// Add/Set a Hero image for the toast.
///
/// This will be above the toast text and the icon.
pub fn hero(mut self, source: &Path, alt_text: &str) -> Toast {
if windows_check::is_newer_than_windows81() {
self.images = format!(
r#"{}<image placement="Hero" src="file:///{}" alt="{}" />"#,
self.images,
escape_str_attribute(&source.display().to_string()),
escape_str_attribute(alt_text)
);
self
} else {
// win81 rejects the above xml so we fallback to a simpler call
self.image(source, alt_text)
}
}
/// Add an image to the toast
///
/// May be done many times.
/// Will appear below text.
pub fn image(mut self, source: &Path, alt_text: &str) -> Toast {
if!windows_check::is_newer_than_windows81() {
// win81 cannot have more than 1 image and shows nothing if there is more than that
self.images = "".to_owned();
}
self.images = format!(
r#"{}<image id="1" src="file:///{}" alt="{}" />"#,
self.images,
escape_str_attribute(&source.display().to_string()),
escape_str_attribute(alt_text)
);
self
}
/// Set the sound for the toast or silence it
///
/// Default is [Sound::IM](enum.Sound.html)
pub fn sound(mut self, src: Option<Sound>) -> Toast {
self.audio = match src {
None => "<audio silent=\"true\" />".to_owned(),
Some(Sound::Default) => "".to_owned(),
Some(Sound::Loop(sound)) => format!(
r#"<audio loop="true" src="ms-winsoundevent:Notification.Looping.{}" />"#,
sound
),
Some(Sound::Single(sound)) => format!(
r#"<audio src="ms-winsoundevent:Notification.Looping.{}" />"#,
sound
),
Some(sound) => format!(r#"<audio src="ms-winsoundevent:Notification.{}" />"#, sound),
};
self
}
/// Display the toast on the screen
pub fn show(&self) -> windows::Result<()> {
//using this to get an instance of XmlDocument
let toast_xml = XmlDocument::new()?;
let template_binding = if windows_check::is_newer_than_windows81() {
"ToastGeneric"
} else
//win8 or win81
{
// Need to do this or an empty placeholder will be shown if no image is set
if self.images == "" {
"ToastText04"
} else {
"ToastImageAndText04"
}
};
toast_xml.load_xml(HString::from(format!(
"<toast {}>
<visual>
<binding template=\"{}\">
{}
{}{}{}
</binding>
</visual>
{}
</toast>",
self.duration,
template_binding,
self.images,
self.title,
self.line1,
self.line2,
self.audio,
)))?;
// Create the toast and attach event listeners
let toast_template = ToastNotification::create_toast_notification(toast_xml)?;
// Show the toast.
let toast_notifier =
ToastNotificationManager::create_toast_notifier_with_id(HString::from(&self.app_id))?;
let result = toast_notifier.show(&toast_template);
std::thread::sleep(std::time::Duration::from_millis(10));
result
}
}
#[cfg(test)]
mod tests {
use crate::*;
use std::path::Path;
#[test]
fn simple_toast() {
let toast = Toast::new(Toast::POWERSHELL_APP_ID);
toast
.hero(
&Path::new(env!("CARGO_MANIFEST_DIR")).join("resources/test/flower.jpeg"),
"flower",
)
.icon(
&Path::new(env!("CARGO_MANIFEST_DIR")).join("resources/test/chick.jpeg"),
IconCrop::Circular,
"chicken",
)
.title("title")
.text1("line1")
.text2("line2")
.duration(Duration::Short)
//.sound(Some(Sound::Loop(LoopableSound::Call)))
//.sound(Some(Sound::SMS))
.sound(None)
.show()
// silently consume errors
.expect("notification failed");
}
}
| Duration | identifier_name |
lib.rs | //! An incomplete wrapper over the WinRT toast api
//!
//! Tested in Windows 10 and 8.1. Untested in Windows 8, might work.
//!
//! Todo:
//!
//! * Add support for Adaptive Content
//! * Add support for Actions
//!
//! Known Issues:
//!
//! * Will not work for Windows 7.
//! * Will not build when targeting the 32-bit gnu toolchain (i686-pc-windows-gnu).
//!
//! Limitations:
//!
//! * Windows 8.1 only supports a single image, the last image (icon, hero, image) will be the one on the toast
/// for xml schema details check out:
///
/// * https://docs.microsoft.com/en-us/uwp/schemas/tiles/toastschema/root-elements
/// * https://docs.microsoft.com/en-us/windows/uwp/controls-and-patterns/tiles-and-notifications-toast-xml-schema
/// * https://docs.microsoft.com/en-us/windows/uwp/controls-and-patterns/tiles-and-notifications-adaptive-interactive-toasts
/// * https://msdn.microsoft.com/library/14a07fce-d631-4bad-ab99-305b703713e6#Sending_toast_notifications_from_desktop_apps
/// for Windows 7 and older support look into Shell_NotifyIcon
/// https://msdn.microsoft.com/en-us/library/windows/desktop/ee330740(v=vs.85).aspx
/// https://softwareengineering.stackexchange.com/questions/222339/using-the-system-tray-notification-area-app-in-windows-7
extern crate windows;
extern crate xml;
#[macro_use]
extern crate strum;
#[allow(dead_code)]
mod bindings {
::windows::include_bindings!();
}
use bindings::{
windows::data::xml::dom::XmlDocument,
windows::ui::notifications::ToastNotification,
windows::ui::notifications::ToastNotificationManager,
windows::HString,
};
use std::fmt;
use std::path::Path;
use xml::escape::escape_str_attribute;
mod windows_check;
pub use windows::Error;
pub struct Toast {
duration: String,
title: String,
line1: String,
line2: String,
images: String,
audio: String,
app_id: String,
}
#[derive(Clone, Copy)]
pub enum Duration {
/// 7 seconds
Short,
/// 25 seconds
Long,
}
#[derive(Debug, EnumString, Clone, Copy)]
pub enum Sound {
Default,
IM,
Mail,
Reminder,
SMS,
/// Play the loopable sound only once
#[strum(disabled)]
Single(LoopableSound),
/// Loop the loopable sound for the entire duration of the toast
#[strum(disabled)]
Loop(LoopableSound),
}
/// Sounds suitable for Looping
#[allow(dead_code)]
#[derive(Debug, Clone, Copy)]
pub enum LoopableSound {
Alarm,
Alarm2,
Alarm3,
Alarm4,
Alarm5,
Alarm6,
Alarm7,
Alarm8,
Alarm9,
Alarm10,
Call,
Call2,
Call3,
Call4,
Call5,
Call6,
Call7,
Call8,
Call9,
Call10,
}
#[allow(dead_code)]
#[derive(Clone, Copy)]
pub enum IconCrop {
Square,
Circular,
}
#[doc(hidden)]
impl fmt::Display for Sound {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(self, f)
}
}
#[doc(hidden)]
impl fmt::Display for LoopableSound {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(self, f)
}
}
impl Toast {
/// This can be used if you do not have a AppUserModelID.
///
/// However, the toast will erroniously report its origin as powershell.
pub const POWERSHELL_APP_ID: &'static str = "{1AC14E77-02E7-4E5D-B744-2EB1AE5198B7}\
\\WindowsPowerShell\\v1.0\\powershell.exe";
/// Constructor for the toast builder.
///
/// app_id is the running application's [AppUserModelID][1].
///
/// [1]: https://msdn.microsoft.com/en-us/library/windows/desktop/dd378459(v=vs.85).aspx
///
/// If the program you are using this in was not installed, use Toast::POWERSHELL_APP_ID for now
#[allow(dead_code)]
pub fn new(app_id: &str) -> Toast {
Toast {
duration: String::new(),
title: String::new(),
line1: String::new(),
line2: String::new(),
images: String::new(),
audio: String::new(),
app_id: app_id.to_string(),
}
}
/// Sets the title of the toast.
///
/// Will be white.
/// Supports Unicode β
pub fn title(mut self, content: &str) -> Toast {
self.title = format!(r#"<text id="1">{}</text>"#, escape_str_attribute(content));
self
}
/// Add/Sets the first line of text below title.
///
/// Will be grey.
/// Supports Unicode β
pub fn text1(mut self, content: &str) -> Toast {
self.line1 = format!(r#"<text id="2">{}</text>"#, escape_str_attribute(content));
self
}
/// Add/Sets the second line of text below title.
///
/// Will be grey.
/// Supports Unicode β
pub fn text2(mut self, content: &str) -> Toast {
self.line2 = format!(r#"<text id="3">{}</text>"#, escape_str_attribute(content));
self
}
/// Set the length of time to show the toast
pub fn duration(mut self, duration: Duration) -> Toast {
self.duration = match duration {
Duration::Long => "duration=\"long\"",
Duration::Short => "duration=\"short\"",
}
.to_owned();
self
}
/// Set the icon shown in the upper left of the toast
///
/// The default is determined by your app id.
/// If you are using the powershell workaround, it will be the powershell icon
pub fn icon(mut self, source: &Path, crop: IconCrop, alt_text: &str) -> Toast {
if windows_check::is_newer_than_windows81() {
let crop_type_attr = match crop {
IconCrop::Square => "".to_string(),
IconCrop::Circular => "hint-crop=\"circle\"".to_string(),
};
self.images = format!(
r#"{}<image placement="appLogoOverride" {} src="file:///{}" alt="{}" />"#,
self.images,
crop_type_attr,
escape_str_attribute(&source.display().to_string()),
escape_str_attribute(alt_text)
);
self
} else {
// Win81 rejects the above xml so we fallback to a simpler call
self.image(source, alt_text)
}
}
/// Add/Set a Hero image for the toast.
///
/// This will be above the toast text and the icon.
pub fn hero(mut self, source: &Path, alt_text: &str) -> Toast {
if windows_check::is_newer_than_windows81() {
self.images = format!(
r#"{}<image placement="Hero" src="file:///{}" alt="{}" />"#,
self.images,
escape_str_attribute(&source.display().to_string()),
escape_str_attribute(alt_text)
);
self
} else {
// win81 rejects the above xml so we fallback to a simpler call
self.image(source, alt_text)
}
}
/// Add an image to the toast
///
/// May be done many times.
/// Will appear below text.
pub fn image(mut self, source: &Path, alt_text: &str) -> Toast {
if!windows_check::is_newer_than_windows81() {
// win81 cannot have more than 1 image and shows nothing if there is more than that
self.images = "".to_owned();
}
self.images = format!(
r#"{}<image id="1" src="file:///{}" alt="{}" />"#,
self.images,
escape_str_attribute(&source.display().to_string()),
escape_str_attribute(alt_text)
);
self
}
/// Set the sound for the toast or silence it
///
/// Default is [Sound::IM](enum.Sound.html)
pub fn sound(mut self, src: Option<Sound>) -> Toast {
self.audio = match src {
None => "<audio silent=\"true\" />".to_owned(),
Some(Sound::Default) => "".to_owned(),
Some(Sound::Loop(sound)) => format!(
r#"<audio loop="true" src="ms-winsoundevent:Notification.Looping.{}" />"#,
sound
),
Some(Sound::Single(sound)) => format!(
r#"<audio src="ms-winsoundevent:Notification.Looping.{}" />"#,
sound
),
Some(sound) => format!(r#"<audio src="ms-winsoundevent:Notification.{}" />"#, sound),
};
self
}
/// Display the toast on the screen
pub fn show(&self) -> windows::Result<()> {
| {}
{}{}{}
</binding>
</visual>
{}
</toast>",
self.duration,
template_binding,
self.images,
self.title,
self.line1,
self.line2,
self.audio,
)))?;
// Create the toast and attach event listeners
let toast_template = ToastNotification::create_toast_notification(toast_xml)?;
// Show the toast.
let toast_notifier =
ToastNotificationManager::create_toast_notifier_with_id(HString::from(&self.app_id))?;
let result = toast_notifier.show(&toast_template);
std::thread::sleep(std::time::Duration::from_millis(10));
result
}
}
#[
cfg(test)]
mod tests {
use crate::*;
use std::path::Path;
#[test]
fn simple_toast() {
let toast = Toast::new(Toast::POWERSHELL_APP_ID);
toast
.hero(
&Path::new(env!("CARGO_MANIFEST_DIR")).join("resources/test/flower.jpeg"),
"flower",
)
.icon(
&Path::new(env!("CARGO_MANIFEST_DIR")).join("resources/test/chick.jpeg"),
IconCrop::Circular,
"chicken",
)
.title("title")
.text1("line1")
.text2("line2")
.duration(Duration::Short)
//.sound(Some(Sound::Loop(LoopableSound::Call)))
//.sound(Some(Sound::SMS))
.sound(None)
.show()
// silently consume errors
.expect("notification failed");
}
}
| //using this to get an instance of XmlDocument
let toast_xml = XmlDocument::new()?;
let template_binding = if windows_check::is_newer_than_windows81() {
"ToastGeneric"
} else
//win8 or win81
{
// Need to do this or an empty placeholder will be shown if no image is set
if self.images == "" {
"ToastText04"
} else {
"ToastImageAndText04"
}
};
toast_xml.load_xml(HString::from(format!(
"<toast {}>
<visual>
<binding template=\"{}\"> | identifier_body |
authorization.rs |
use rocket::{Request, Outcome};
use rocket::response::{Redirect, Flash};
use rocket::request::{FromRequest, FromForm, FormItems, FormItem};
use rocket::http::{Cookie, Cookies};
use std::collections::HashMap;
use std::marker::Sized;
use sanitization::*;
#[derive(Debug, Clone)]
pub struct UserQuery {
pub user: String,
}
#[derive(Debug, Clone)]
pub struct AuthCont<T: AuthorizeCookie> {
pub cookie: T,
}
#[derive(Debug, Clone, FromForm)]
pub struct AuthFail {
pub user: String,
pub msg: String,
}
impl AuthFail {
pub fn new(user: String, msg: String) -> AuthFail {
AuthFail {
user,
msg,
}
}
}
#[derive(Debug, Clone)]
pub struct LoginCont<T: AuthorizeForm> {
pub form: T,
}
impl<T: AuthorizeForm + Clone> LoginCont<T> {
pub fn form(&self) -> T {
self.form.clone()
}
}
/// The CookieId trait contains a single method, `cookie_id()`.
/// The `cookie_id()` function returns the name or id of the cookie.
/// Note: if you have another cookie of the same name that is secured
/// that already exists (say created by running the tls_example then database_example)
/// if your cookies have the same name it will not work. This is because
/// if the existing cookie is set to secured you attempt to login without
/// using tls the cookie will not work correctly and login will fail.
pub trait CookieId {
/// Ensure `cookie_id()` does not conflict with other cookies that
/// may be set using secured when not using tls. Secured cookies
/// will only work usnig tls and cookies of the same name could
/// create problems.
fn | <'a>() -> &'a str {
"sid"
}
}
/// ## Cookie Data
/// The AuthorizeCookie trait is used with a custom data structure that
/// will contain the data in the cookie. This trait provides methods
/// to store and retrieve a data structure from a cookie's string contents.
///
/// Using a request guard a route can easily check whether the user is
/// a valid Administrator or any custom user type.
///
/// ### Example
///
/// ```
///
/// use rocket::{Request, Outcome};
/// use rocket::request::FromRequest;
/// use auth::authorization::*;
/// // Define a custom data type that hold the cookie information
/// pub struct AdministratorCookie {
/// pub userid: u32,
/// pub username: String,
/// pub display: Option<String>,
/// }
///
/// // Implement CookieId for AdministratorCookie
/// impl CookieId for AdministratorCookie {
/// // Tell
/// type CookieType = AdministratorCookie;
/// fn cookie_id<'a>() -> &'a str {
/// "asid"
/// }
/// }
///
/// // Implement AuthorizeCookie for the AdministratorCookie
/// // This code can be changed to use other serialization formats
/// impl AuthorizeCookie for AdministratorCookie {
/// fn store_cookie(&self) -> String {
/// ::serde_json::to_string(self).expect("Could not serialize structure")
/// }
/// fn retrieve_cookie(string: String) -> Option<Self> {
/// let mut des_buf = string.clone();
/// let des: Result<AdministratorCookie, _> = ::serde_json::from_str(&mut des_buf);
/// if let Ok(cooky) = des {
/// Some(cooky)
/// } else {
/// None
/// }
/// }
/// }
///
/// // Implement FromRequest for the Cookie type to allow direct
/// // use of the type in routes, instead of through AuthCont
/// //
/// // The only part that needs to be changed is the impl and
/// // function return type; the type should match your struct
/// impl<'a, 'r> FromRequest<'a, 'r> for AdministratorCookie {
/// type Error = ();
/// // Change the return type to match your type
/// fn from_request(request: &'a Request<'r>) -> ::rocket::request::Outcome<AdministratorCookie,Self::Error>{
/// let cid = AdministratorCookie::cookie_id();
/// let mut cookies = request.cookies();
///
/// match cookies.get_private(cid) {
/// Some(cookie) => {
/// if let Some(cookie_deserialized) = AdministratorCookie::retrieve_cookie(cookie.value().to_string()) {
/// Outcome::Success(
/// cookie_deserialized
/// )
/// } else {
/// Outcome::Forward(())
/// }
/// },
/// None => Outcome::Forward(())
/// }
/// }
/// }
///
/// // In your route use the AdministratorCookie request guard to ensure
/// // that only verified administrators can reach a page
/// #[get("/administrator", rank=1)]
/// fn admin_page(admin: AdministratorCookie) -> Html<String> {
/// // Show the display field in AdminstratorCookie as defined above
/// Html( format!("Welcome adminstrator {}!", admin.display) )
/// }
/// #[get("/administrator", rank=2)]
/// fn admin_login_form() -> Html<String> {
/// // Html form here, see the example directory for a complete example
/// }
///
/// fn main() {
/// rocket::ignite().mount("/", routes![admin_page, admin_login_form]).launc();
/// }
///
/// ```
///
pub trait AuthorizeCookie : CookieId {
/// Serialize the cookie data type - must be implemented by cookie data type
fn store_cookie(&self) -> String;
/// Deserialize the cookie data type - must be implemented by cookie data type
fn retrieve_cookie(String) -> Option<Self> where Self: Sized;
/// Deletes a cookie. This does not need to be implemented, it defaults to removing
/// the private key with the named specified by cookie_id() method.
fn delete_cookie(cookies: &mut Cookies) {
cookies.remove_private(
Cookie::named( Self::cookie_id() )
);
}
}
/// ## Form Data
/// The AuthorizeForm trait handles collecting a submitted login form into a
/// data structure and authenticating the credentials inside. It also contains
/// default methods to process the login and conditionally redirecting the user
/// to the correct page depending upon successful authentication or failure.
///
/// ### Authentication Failure
/// Upon failure the user is redirected to a page with a query string specified
/// by the `fail_url()` method. This allows the specified username to persist
/// across attempts.
///
/// ### Flash Message
/// The `flash_redirect()` method redirects the user but also adds a cookie
/// called a flash message that once read is deleted immediately. This is used
/// to indicate why the authentication failed. If the user refreshes the page
/// after failing to login the message that appears above the login indicating
/// why it failed will disappear. To redirect without a flash message use the
/// `redirect()` method instead of `flash_redirect()`.
///
/// ## Example
/// ```
///
/// use rocket::{Request, Outcome};
/// use std::collections::HashMap;
/// use auth::authorization::*;
/// // Create the structure that will contain the login form data
/// #[derive(Debug, Clone, Serialize, Deserialize)]
/// pub struct AdministratorForm {
/// pub username: String,
/// pub password: String,
/// }
///
/// // Ipmlement CookieId for the form structure
/// impl CookieId for AdministratorForm {
/// fn cookie_id<'a>() -> &'a str {
/// "acid"
/// }
/// }
///
/// // Implement the AuthorizeForm for the form structure
/// impl AuthorizeForm for AdministratorForm {
/// type CookieType = AdministratorCookie;
///
/// /// Authenticate the credentials inside the login form
/// fn authenticate(&self) -> Result<Self::CookieType, AuthFail> {
/// // The code in this function should be replace with whatever
/// // you use to authenticate users.
/// println!("Authenticating {} with password: {}", &self.username, &self.password);
/// if &self.username == "administrator" && &self.password!= "" {
/// Ok(
/// AdministratorCookie {
/// userid: 1,
/// username: "administrator".to_string(),
/// display: Some("Administrator".to_string()),
/// }
/// )
/// } else {
/// Err(
/// AuthFail::new(self.username.to_string(), "Incorrect username".to_string())
/// )
/// }
/// }
///
/// /// Create a new login form instance
/// fn new_form(user: &str, pass: &str, _extras: Option<HashMap<String, String>>) -> Self {
/// AdministratorForm {
/// username: user.to_string(),
/// password: pass.to_string(),
/// }
/// }
/// }
///
/// # fn main() {}
///
/// ```
///
/// # Example Code
/// For more detailed example please see the example directory.
/// The example directory contains a fully working example of processing
/// and checking login information.
///
pub trait AuthorizeForm : CookieId {
type CookieType: AuthorizeCookie;
/// Determine whether the login form structure containts
/// valid credentials, otherwise send back the username and
/// a message indicating why it failed in the `AuthFail` struct
///
/// Must be implemented on the login form structure
fn authenticate(&self) -> Result<Self::CookieType, AuthFail>;
/// Create a new login form Structure with
/// the specified username and password.
/// The first parameter is the username, then password,
/// and then optionally a HashMap containing any extra fields.
///
/// Must be implemented on the login form structure
///
// /// The password is a u8 slice, allowing passwords to be stored without
// /// being converted to hex. The slice is sufficient because new_form()
// /// is called within the from_form() function, so when the password is
// /// collected as a vector of bytes the reference to those bytes are sent
// /// to the new_form() method.
fn new_form(&str, &str, Option<HashMap<String, String>>) -> Self;
/// The `fail_url()` method is used to create a url that the user is sent
/// to when the authentication fails. The default implementation
/// redirects the user to the /page?user=<ateempted_username>
/// which enables the form to display the username that was attempted
/// and unlike FlashMessages it will persist across refreshes
fn fail_url(user: &str) -> String {
let mut output = String::with_capacity(user.len() + 10);
output.push_str("?user=");
output.push_str(user);
output
}
/// Sanitizes the username before storing in the login form structure.
/// The default implementation uses the `sanitize()` function in the
/// `sanitization` module. This can be overriden in your
/// `impl AuthorizeForm for {login structure}` implementation to
/// customize how the text is cleaned/sanitized.
fn clean_username(string: &str) -> String {
sanitize(string)
}
/// Sanitizes the password before storing in the login form structure.
/// The default implementation uses the `sanitize_oassword()` function in the
/// `sanitization` module. This can be overriden in your
/// `impl AuthorizeForm for {login structure}` implementation to
/// customize how the text is cleaned/sanitized.
fn clean_password(string: &str) -> String {
sanitize_password(string)
}
/// Sanitizes any extra variables before storing in the login form structure.
/// The default implementation uses the `sanitize()` function in the
/// `sanitization` module. This can be overriden in your
/// `impl AuthorizeForm for {login structure}` implementation to
/// customize how the text is cleaned/sanitized.
fn clean_extras(string: &str) -> String {
sanitize(string)
}
/// Redirect the user to one page on successful authentication or
/// another page (with a `FlashMessage` indicating why) if authentication fails.
///
/// `FlashMessage` is used to indicate why the authentication failed
/// this is so that the user can see why it failed but when they refresh
/// it will disappear, enabling a clean start, but with the user name
/// from the url's query string (determined by `fail_url()`)
fn flash_redirect(&self, ok_redir: impl Into<String>, err_redir: impl Into<String>, cookies: &mut Cookies) -> Result<Redirect, Flash<Redirect>> {
match self.authenticate() {
Ok(cooky) => {
let cid = Self::cookie_id();
let contents = cooky.store_cookie();
cookies.add_private(Cookie::new(cid, contents));
Ok(Redirect::to(ok_redir.into()))
},
Err(fail) => {
let mut furl = err_redir.into();
if &fail.user!= "" {
let furl_qrystr = Self::fail_url(&fail.user);
furl.push_str(&furl_qrystr);
}
Err( Flash::error(Redirect::to(furl), &fail.msg) )
},
}
}
/// Redirect the user to one page on successful authentication or
/// another page if authentication fails.
fn redirect(&self, ok_redir: &str, err_redir: &str, cookies: &mut Cookies) -> Result<Redirect, Redirect> {
match self.authenticate() {
Ok(cooky) => {
let cid = Self::cookie_id();
let contents = cooky.store_cookie();
cookies.add_private(Cookie::new(cid, contents));
Ok(Redirect::to(ok_redir.to_string()))
},
Err(fail) => {
let mut furl = String::from(err_redir);
if &fail.user!= "" {
let furl_qrystr = Self::fail_url(&fail.user);
furl.push_str(&furl_qrystr);
}
Err( Redirect::to(furl) )
},
}
}
}
impl<T: AuthorizeCookie + Clone> AuthCont<T> {
pub fn cookie_data(&self) -> T {
// Todo: change the signature from &self to self
// and remove the.clone() method call
self.cookie.clone()
}
}
/// # Request Guard
/// Request guard for the AuthCont (Authentication Container).
/// This allows a route to call a user type like:
///
/// ```rust,no_run
///
/// use auth::authorization::*;
/// # use administration:*;
/// use rocket;
/// #[get("/protected")]
/// fn protected(container: AuthCont<AdministratorCookie>) -> Html<String> {
/// let admin = container.cookie;
/// String::new()
/// }
///
/// # fn main() {
/// # rocket::ignite().mount("/", routes![]).launch();
/// # }
///
/// ```
///
impl<'a, 'r, T: AuthorizeCookie> FromRequest<'a, 'r> for AuthCont<T> {
type Error = ();
fn from_request(request: &'a Request<'r>) -> ::rocket::request::Outcome<AuthCont<T>,Self::Error>{
let cid = T::cookie_id();
let mut cookies = request.cookies();
match cookies.get_private(cid) {
Some(cookie) => {
if let Some(cookie_deserialized) = T::retrieve_cookie(cookie.value().to_string()) {
Outcome::Success(
AuthCont {
cookie: cookie_deserialized,
}
)
} else {
Outcome::Forward(())
}
},
None => Outcome::Forward(())
}
}
}
/// #Collecting Login Form Data
/// If your login form requires more than just a username and password the
/// extras parameter, in `AuthorizeForm::new_form(user, pass, extras)`, holds
/// all other fields in a `HashMap<String, String>` to allow processing any
/// field that was submitted. The username and password are separate because
/// those are universal fields.
///
/// ## Custom Username/Password Field Names
/// By default the function will look for a username and a password field.
/// If your form does not use those particular names you can always use the
/// extras `HashMap` to retrieve the username and password when using different
/// input box names. The function will return `Ok()` even if no username or
/// password was entered, this is to allow custom field names to be accessed
/// and authenticated by the `authenticate()` method.
impl<'f, A: AuthorizeForm> FromForm<'f> for LoginCont<A> {
type Error = &'static str;
fn from_form(form_items: &mut FormItems<'f>, _strict: bool) -> Result<Self, Self::Error> {
let mut user: String = String::new();
let mut pass: String = String::new();
let mut extras: HashMap<String, String> = HashMap::new();
for FormItem { key, value,.. } in form_items {
match key.as_str(){
"username" => {
user = A::clean_username(&value.url_decode().unwrap_or(String::new()));
},
"password" => {
pass = A::clean_password(&value.url_decode().unwrap_or(String::new()));
},
// _ => {},
a => {
// extras.insert( a.to_string(), A::clean_extras( &value.url_decode().unwrap_or(String::new()) ) );
extras.insert( a.to_string(), value.url_decode().unwrap_or(String::new()) );
},
}
}
// Do not need to check for username / password here,
// if the authentication method requires them it will
// fail at that point.
Ok(
LoginCont {
form: if extras.len() == 0 {
A::new_form(&user, &pass, None)
} else {
A::new_form(&user, &pass, Some(extras))
},
}
)
}
}
impl<'f> FromForm<'f> for UserQuery {
type Error = &'static str;
fn from_form(form_items: &mut FormItems<'f>, _strict: bool) -> Result<UserQuery, Self::Error> {
let mut name: String = String::new();
for FormItem { key, value,.. } in form_items {
match key.as_str() {
"user" => { name = sanitize( &value.url_decode().unwrap_or(String::new()) ); },
_ => {},
}
}
Ok(UserQuery { user: name })
}
}
| cookie_id | identifier_name |
authorization.rs | use rocket::{Request, Outcome};
use rocket::response::{Redirect, Flash};
use rocket::request::{FromRequest, FromForm, FormItems, FormItem};
use rocket::http::{Cookie, Cookies};
use std::collections::HashMap;
use std::marker::Sized;
use sanitization::*;
#[derive(Debug, Clone)]
pub struct UserQuery {
pub user: String,
}
#[derive(Debug, Clone)]
pub struct AuthCont<T: AuthorizeCookie> {
pub cookie: T,
}
#[derive(Debug, Clone, FromForm)]
pub struct AuthFail {
pub user: String,
pub msg: String,
}
impl AuthFail {
pub fn new(user: String, msg: String) -> AuthFail {
AuthFail {
user,
msg,
}
}
}
#[derive(Debug, Clone)]
pub struct LoginCont<T: AuthorizeForm> {
pub form: T,
}
impl<T: AuthorizeForm + Clone> LoginCont<T> {
pub fn form(&self) -> T {
self.form.clone()
}
}
/// The CookieId trait contains a single method, `cookie_id()`.
/// The `cookie_id()` function returns the name or id of the cookie.
/// Note: if you have another cookie of the same name that is secured
/// that already exists (say created by running the tls_example then database_example)
/// if your cookies have the same name it will not work. This is because
/// if the existing cookie is set to secured you attempt to login without
/// using tls the cookie will not work correctly and login will fail.
pub trait CookieId {
/// Ensure `cookie_id()` does not conflict with other cookies that
/// may be set using secured when not using tls. Secured cookies
/// will only work usnig tls and cookies of the same name could
/// create problems.
fn cookie_id<'a>() -> &'a str {
"sid"
}
}
/// ## Cookie Data
/// The AuthorizeCookie trait is used with a custom data structure that
/// will contain the data in the cookie. This trait provides methods
/// to store and retrieve a data structure from a cookie's string contents.
///
/// Using a request guard a route can easily check whether the user is
/// a valid Administrator or any custom user type.
///
/// ### Example
///
/// ```
///
/// use rocket::{Request, Outcome};
/// use rocket::request::FromRequest;
/// use auth::authorization::*;
/// // Define a custom data type that hold the cookie information
/// pub struct AdministratorCookie {
/// pub userid: u32,
/// pub username: String,
/// pub display: Option<String>,
/// }
///
/// // Implement CookieId for AdministratorCookie
/// impl CookieId for AdministratorCookie {
/// // Tell
/// type CookieType = AdministratorCookie;
/// fn cookie_id<'a>() -> &'a str {
/// "asid"
/// }
/// }
///
/// // Implement AuthorizeCookie for the AdministratorCookie
/// // This code can be changed to use other serialization formats
/// impl AuthorizeCookie for AdministratorCookie {
/// fn store_cookie(&self) -> String {
/// ::serde_json::to_string(self).expect("Could not serialize structure")
/// }
/// fn retrieve_cookie(string: String) -> Option<Self> {
/// let mut des_buf = string.clone();
/// let des: Result<AdministratorCookie, _> = ::serde_json::from_str(&mut des_buf);
/// if let Ok(cooky) = des {
/// Some(cooky)
/// } else {
/// None
/// }
/// }
/// }
///
/// // Implement FromRequest for the Cookie type to allow direct
/// // use of the type in routes, instead of through AuthCont
/// //
/// // The only part that needs to be changed is the impl and
/// // function return type; the type should match your struct
/// impl<'a, 'r> FromRequest<'a, 'r> for AdministratorCookie { | /// // Change the return type to match your type
/// fn from_request(request: &'a Request<'r>) -> ::rocket::request::Outcome<AdministratorCookie,Self::Error>{
/// let cid = AdministratorCookie::cookie_id();
/// let mut cookies = request.cookies();
///
/// match cookies.get_private(cid) {
/// Some(cookie) => {
/// if let Some(cookie_deserialized) = AdministratorCookie::retrieve_cookie(cookie.value().to_string()) {
/// Outcome::Success(
/// cookie_deserialized
/// )
/// } else {
/// Outcome::Forward(())
/// }
/// },
/// None => Outcome::Forward(())
/// }
/// }
/// }
///
/// // In your route use the AdministratorCookie request guard to ensure
/// // that only verified administrators can reach a page
/// #[get("/administrator", rank=1)]
/// fn admin_page(admin: AdministratorCookie) -> Html<String> {
/// // Show the display field in AdminstratorCookie as defined above
/// Html( format!("Welcome adminstrator {}!", admin.display) )
/// }
/// #[get("/administrator", rank=2)]
/// fn admin_login_form() -> Html<String> {
/// // Html form here, see the example directory for a complete example
/// }
///
/// fn main() {
/// rocket::ignite().mount("/", routes![admin_page, admin_login_form]).launc();
/// }
///
/// ```
///
pub trait AuthorizeCookie : CookieId {
/// Serialize the cookie data type - must be implemented by cookie data type
fn store_cookie(&self) -> String;
/// Deserialize the cookie data type - must be implemented by cookie data type
fn retrieve_cookie(String) -> Option<Self> where Self: Sized;
/// Deletes a cookie. This does not need to be implemented, it defaults to removing
/// the private key with the named specified by cookie_id() method.
fn delete_cookie(cookies: &mut Cookies) {
cookies.remove_private(
Cookie::named( Self::cookie_id() )
);
}
}
/// ## Form Data
/// The AuthorizeForm trait handles collecting a submitted login form into a
/// data structure and authenticating the credentials inside. It also contains
/// default methods to process the login and conditionally redirecting the user
/// to the correct page depending upon successful authentication or failure.
///
/// ### Authentication Failure
/// Upon failure the user is redirected to a page with a query string specified
/// by the `fail_url()` method. This allows the specified username to persist
/// across attempts.
///
/// ### Flash Message
/// The `flash_redirect()` method redirects the user but also adds a cookie
/// called a flash message that once read is deleted immediately. This is used
/// to indicate why the authentication failed. If the user refreshes the page
/// after failing to login the message that appears above the login indicating
/// why it failed will disappear. To redirect without a flash message use the
/// `redirect()` method instead of `flash_redirect()`.
///
/// ## Example
/// ```
///
/// use rocket::{Request, Outcome};
/// use std::collections::HashMap;
/// use auth::authorization::*;
/// // Create the structure that will contain the login form data
/// #[derive(Debug, Clone, Serialize, Deserialize)]
/// pub struct AdministratorForm {
/// pub username: String,
/// pub password: String,
/// }
///
/// // Ipmlement CookieId for the form structure
/// impl CookieId for AdministratorForm {
/// fn cookie_id<'a>() -> &'a str {
/// "acid"
/// }
/// }
///
/// // Implement the AuthorizeForm for the form structure
/// impl AuthorizeForm for AdministratorForm {
/// type CookieType = AdministratorCookie;
///
/// /// Authenticate the credentials inside the login form
/// fn authenticate(&self) -> Result<Self::CookieType, AuthFail> {
/// // The code in this function should be replace with whatever
/// // you use to authenticate users.
/// println!("Authenticating {} with password: {}", &self.username, &self.password);
/// if &self.username == "administrator" && &self.password!= "" {
/// Ok(
/// AdministratorCookie {
/// userid: 1,
/// username: "administrator".to_string(),
/// display: Some("Administrator".to_string()),
/// }
/// )
/// } else {
/// Err(
/// AuthFail::new(self.username.to_string(), "Incorrect username".to_string())
/// )
/// }
/// }
///
/// /// Create a new login form instance
/// fn new_form(user: &str, pass: &str, _extras: Option<HashMap<String, String>>) -> Self {
/// AdministratorForm {
/// username: user.to_string(),
/// password: pass.to_string(),
/// }
/// }
/// }
///
/// # fn main() {}
///
/// ```
///
/// # Example Code
/// For more detailed example please see the example directory.
/// The example directory contains a fully working example of processing
/// and checking login information.
///
pub trait AuthorizeForm : CookieId {
type CookieType: AuthorizeCookie;
/// Determine whether the login form structure containts
/// valid credentials, otherwise send back the username and
/// a message indicating why it failed in the `AuthFail` struct
///
/// Must be implemented on the login form structure
fn authenticate(&self) -> Result<Self::CookieType, AuthFail>;
/// Create a new login form Structure with
/// the specified username and password.
/// The first parameter is the username, then password,
/// and then optionally a HashMap containing any extra fields.
///
/// Must be implemented on the login form structure
///
// /// The password is a u8 slice, allowing passwords to be stored without
// /// being converted to hex. The slice is sufficient because new_form()
// /// is called within the from_form() function, so when the password is
// /// collected as a vector of bytes the reference to those bytes are sent
// /// to the new_form() method.
fn new_form(&str, &str, Option<HashMap<String, String>>) -> Self;
/// The `fail_url()` method is used to create a url that the user is sent
/// to when the authentication fails. The default implementation
/// redirects the user to the /page?user=<ateempted_username>
/// which enables the form to display the username that was attempted
/// and unlike FlashMessages it will persist across refreshes
fn fail_url(user: &str) -> String {
let mut output = String::with_capacity(user.len() + 10);
output.push_str("?user=");
output.push_str(user);
output
}
/// Sanitizes the username before storing in the login form structure.
/// The default implementation uses the `sanitize()` function in the
/// `sanitization` module. This can be overriden in your
/// `impl AuthorizeForm for {login structure}` implementation to
/// customize how the text is cleaned/sanitized.
fn clean_username(string: &str) -> String {
sanitize(string)
}
/// Sanitizes the password before storing in the login form structure.
/// The default implementation uses the `sanitize_oassword()` function in the
/// `sanitization` module. This can be overriden in your
/// `impl AuthorizeForm for {login structure}` implementation to
/// customize how the text is cleaned/sanitized.
fn clean_password(string: &str) -> String {
sanitize_password(string)
}
/// Sanitizes any extra variables before storing in the login form structure.
/// The default implementation uses the `sanitize()` function in the
/// `sanitization` module. This can be overriden in your
/// `impl AuthorizeForm for {login structure}` implementation to
/// customize how the text is cleaned/sanitized.
fn clean_extras(string: &str) -> String {
sanitize(string)
}
/// Redirect the user to one page on successful authentication or
/// another page (with a `FlashMessage` indicating why) if authentication fails.
///
/// `FlashMessage` is used to indicate why the authentication failed
/// this is so that the user can see why it failed but when they refresh
/// it will disappear, enabling a clean start, but with the user name
/// from the url's query string (determined by `fail_url()`)
fn flash_redirect(&self, ok_redir: impl Into<String>, err_redir: impl Into<String>, cookies: &mut Cookies) -> Result<Redirect, Flash<Redirect>> {
match self.authenticate() {
Ok(cooky) => {
let cid = Self::cookie_id();
let contents = cooky.store_cookie();
cookies.add_private(Cookie::new(cid, contents));
Ok(Redirect::to(ok_redir.into()))
},
Err(fail) => {
let mut furl = err_redir.into();
if &fail.user!= "" {
let furl_qrystr = Self::fail_url(&fail.user);
furl.push_str(&furl_qrystr);
}
Err( Flash::error(Redirect::to(furl), &fail.msg) )
},
}
}
/// Redirect the user to one page on successful authentication or
/// another page if authentication fails.
fn redirect(&self, ok_redir: &str, err_redir: &str, cookies: &mut Cookies) -> Result<Redirect, Redirect> {
match self.authenticate() {
Ok(cooky) => {
let cid = Self::cookie_id();
let contents = cooky.store_cookie();
cookies.add_private(Cookie::new(cid, contents));
Ok(Redirect::to(ok_redir.to_string()))
},
Err(fail) => {
let mut furl = String::from(err_redir);
if &fail.user!= "" {
let furl_qrystr = Self::fail_url(&fail.user);
furl.push_str(&furl_qrystr);
}
Err( Redirect::to(furl) )
},
}
}
}
impl<T: AuthorizeCookie + Clone> AuthCont<T> {
pub fn cookie_data(&self) -> T {
// Todo: change the signature from &self to self
// and remove the.clone() method call
self.cookie.clone()
}
}
/// # Request Guard
/// Request guard for the AuthCont (Authentication Container).
/// This allows a route to call a user type like:
///
/// ```rust,no_run
///
/// use auth::authorization::*;
/// # use administration:*;
/// use rocket;
/// #[get("/protected")]
/// fn protected(container: AuthCont<AdministratorCookie>) -> Html<String> {
/// let admin = container.cookie;
/// String::new()
/// }
///
/// # fn main() {
/// # rocket::ignite().mount("/", routes![]).launch();
/// # }
///
/// ```
///
impl<'a, 'r, T: AuthorizeCookie> FromRequest<'a, 'r> for AuthCont<T> {
type Error = ();
fn from_request(request: &'a Request<'r>) -> ::rocket::request::Outcome<AuthCont<T>,Self::Error>{
let cid = T::cookie_id();
let mut cookies = request.cookies();
match cookies.get_private(cid) {
Some(cookie) => {
if let Some(cookie_deserialized) = T::retrieve_cookie(cookie.value().to_string()) {
Outcome::Success(
AuthCont {
cookie: cookie_deserialized,
}
)
} else {
Outcome::Forward(())
}
},
None => Outcome::Forward(())
}
}
}
/// #Collecting Login Form Data
/// If your login form requires more than just a username and password the
/// extras parameter, in `AuthorizeForm::new_form(user, pass, extras)`, holds
/// all other fields in a `HashMap<String, String>` to allow processing any
/// field that was submitted. The username and password are separate because
/// those are universal fields.
///
/// ## Custom Username/Password Field Names
/// By default the function will look for a username and a password field.
/// If your form does not use those particular names you can always use the
/// extras `HashMap` to retrieve the username and password when using different
/// input box names. The function will return `Ok()` even if no username or
/// password was entered, this is to allow custom field names to be accessed
/// and authenticated by the `authenticate()` method.
impl<'f, A: AuthorizeForm> FromForm<'f> for LoginCont<A> {
type Error = &'static str;
fn from_form(form_items: &mut FormItems<'f>, _strict: bool) -> Result<Self, Self::Error> {
let mut user: String = String::new();
let mut pass: String = String::new();
let mut extras: HashMap<String, String> = HashMap::new();
for FormItem { key, value,.. } in form_items {
match key.as_str(){
"username" => {
user = A::clean_username(&value.url_decode().unwrap_or(String::new()));
},
"password" => {
pass = A::clean_password(&value.url_decode().unwrap_or(String::new()));
},
// _ => {},
a => {
// extras.insert( a.to_string(), A::clean_extras( &value.url_decode().unwrap_or(String::new()) ) );
extras.insert( a.to_string(), value.url_decode().unwrap_or(String::new()) );
},
}
}
// Do not need to check for username / password here,
// if the authentication method requires them it will
// fail at that point.
Ok(
LoginCont {
form: if extras.len() == 0 {
A::new_form(&user, &pass, None)
} else {
A::new_form(&user, &pass, Some(extras))
},
}
)
}
}
impl<'f> FromForm<'f> for UserQuery {
type Error = &'static str;
fn from_form(form_items: &mut FormItems<'f>, _strict: bool) -> Result<UserQuery, Self::Error> {
let mut name: String = String::new();
for FormItem { key, value,.. } in form_items {
match key.as_str() {
"user" => { name = sanitize( &value.url_decode().unwrap_or(String::new()) ); },
_ => {},
}
}
Ok(UserQuery { user: name })
}
} | /// type Error = (); | random_line_split |
authorization.rs |
use rocket::{Request, Outcome};
use rocket::response::{Redirect, Flash};
use rocket::request::{FromRequest, FromForm, FormItems, FormItem};
use rocket::http::{Cookie, Cookies};
use std::collections::HashMap;
use std::marker::Sized;
use sanitization::*;
#[derive(Debug, Clone)]
pub struct UserQuery {
pub user: String,
}
#[derive(Debug, Clone)]
pub struct AuthCont<T: AuthorizeCookie> {
pub cookie: T,
}
#[derive(Debug, Clone, FromForm)]
pub struct AuthFail {
pub user: String,
pub msg: String,
}
impl AuthFail {
pub fn new(user: String, msg: String) -> AuthFail {
AuthFail {
user,
msg,
}
}
}
#[derive(Debug, Clone)]
pub struct LoginCont<T: AuthorizeForm> {
pub form: T,
}
impl<T: AuthorizeForm + Clone> LoginCont<T> {
pub fn form(&self) -> T {
self.form.clone()
}
}
/// The CookieId trait contains a single method, `cookie_id()`.
/// The `cookie_id()` function returns the name or id of the cookie.
/// Note: if you have another cookie of the same name that is secured
/// that already exists (say created by running the tls_example then database_example)
/// if your cookies have the same name it will not work. This is because
/// if the existing cookie is set to secured you attempt to login without
/// using tls the cookie will not work correctly and login will fail.
pub trait CookieId {
/// Ensure `cookie_id()` does not conflict with other cookies that
/// may be set using secured when not using tls. Secured cookies
/// will only work usnig tls and cookies of the same name could
/// create problems.
fn cookie_id<'a>() -> &'a str {
"sid"
}
}
/// ## Cookie Data
/// The AuthorizeCookie trait is used with a custom data structure that
/// will contain the data in the cookie. This trait provides methods
/// to store and retrieve a data structure from a cookie's string contents.
///
/// Using a request guard a route can easily check whether the user is
/// a valid Administrator or any custom user type.
///
/// ### Example
///
/// ```
///
/// use rocket::{Request, Outcome};
/// use rocket::request::FromRequest;
/// use auth::authorization::*;
/// // Define a custom data type that hold the cookie information
/// pub struct AdministratorCookie {
/// pub userid: u32,
/// pub username: String,
/// pub display: Option<String>,
/// }
///
/// // Implement CookieId for AdministratorCookie
/// impl CookieId for AdministratorCookie {
/// // Tell
/// type CookieType = AdministratorCookie;
/// fn cookie_id<'a>() -> &'a str {
/// "asid"
/// }
/// }
///
/// // Implement AuthorizeCookie for the AdministratorCookie
/// // This code can be changed to use other serialization formats
/// impl AuthorizeCookie for AdministratorCookie {
/// fn store_cookie(&self) -> String {
/// ::serde_json::to_string(self).expect("Could not serialize structure")
/// }
/// fn retrieve_cookie(string: String) -> Option<Self> {
/// let mut des_buf = string.clone();
/// let des: Result<AdministratorCookie, _> = ::serde_json::from_str(&mut des_buf);
/// if let Ok(cooky) = des {
/// Some(cooky)
/// } else {
/// None
/// }
/// }
/// }
///
/// // Implement FromRequest for the Cookie type to allow direct
/// // use of the type in routes, instead of through AuthCont
/// //
/// // The only part that needs to be changed is the impl and
/// // function return type; the type should match your struct
/// impl<'a, 'r> FromRequest<'a, 'r> for AdministratorCookie {
/// type Error = ();
/// // Change the return type to match your type
/// fn from_request(request: &'a Request<'r>) -> ::rocket::request::Outcome<AdministratorCookie,Self::Error>{
/// let cid = AdministratorCookie::cookie_id();
/// let mut cookies = request.cookies();
///
/// match cookies.get_private(cid) {
/// Some(cookie) => {
/// if let Some(cookie_deserialized) = AdministratorCookie::retrieve_cookie(cookie.value().to_string()) {
/// Outcome::Success(
/// cookie_deserialized
/// )
/// } else {
/// Outcome::Forward(())
/// }
/// },
/// None => Outcome::Forward(())
/// }
/// }
/// }
///
/// // In your route use the AdministratorCookie request guard to ensure
/// // that only verified administrators can reach a page
/// #[get("/administrator", rank=1)]
/// fn admin_page(admin: AdministratorCookie) -> Html<String> {
/// // Show the display field in AdminstratorCookie as defined above
/// Html( format!("Welcome adminstrator {}!", admin.display) )
/// }
/// #[get("/administrator", rank=2)]
/// fn admin_login_form() -> Html<String> {
/// // Html form here, see the example directory for a complete example
/// }
///
/// fn main() {
/// rocket::ignite().mount("/", routes![admin_page, admin_login_form]).launc();
/// }
///
/// ```
///
pub trait AuthorizeCookie : CookieId {
/// Serialize the cookie data type - must be implemented by cookie data type
fn store_cookie(&self) -> String;
/// Deserialize the cookie data type - must be implemented by cookie data type
fn retrieve_cookie(String) -> Option<Self> where Self: Sized;
/// Deletes a cookie. This does not need to be implemented, it defaults to removing
/// the private key with the named specified by cookie_id() method.
fn delete_cookie(cookies: &mut Cookies) {
cookies.remove_private(
Cookie::named( Self::cookie_id() )
);
}
}
/// ## Form Data
/// The AuthorizeForm trait handles collecting a submitted login form into a
/// data structure and authenticating the credentials inside. It also contains
/// default methods to process the login and conditionally redirecting the user
/// to the correct page depending upon successful authentication or failure.
///
/// ### Authentication Failure
/// Upon failure the user is redirected to a page with a query string specified
/// by the `fail_url()` method. This allows the specified username to persist
/// across attempts.
///
/// ### Flash Message
/// The `flash_redirect()` method redirects the user but also adds a cookie
/// called a flash message that once read is deleted immediately. This is used
/// to indicate why the authentication failed. If the user refreshes the page
/// after failing to login the message that appears above the login indicating
/// why it failed will disappear. To redirect without a flash message use the
/// `redirect()` method instead of `flash_redirect()`.
///
/// ## Example
/// ```
///
/// use rocket::{Request, Outcome};
/// use std::collections::HashMap;
/// use auth::authorization::*;
/// // Create the structure that will contain the login form data
/// #[derive(Debug, Clone, Serialize, Deserialize)]
/// pub struct AdministratorForm {
/// pub username: String,
/// pub password: String,
/// }
///
/// // Ipmlement CookieId for the form structure
/// impl CookieId for AdministratorForm {
/// fn cookie_id<'a>() -> &'a str {
/// "acid"
/// }
/// }
///
/// // Implement the AuthorizeForm for the form structure
/// impl AuthorizeForm for AdministratorForm {
/// type CookieType = AdministratorCookie;
///
/// /// Authenticate the credentials inside the login form
/// fn authenticate(&self) -> Result<Self::CookieType, AuthFail> {
/// // The code in this function should be replace with whatever
/// // you use to authenticate users.
/// println!("Authenticating {} with password: {}", &self.username, &self.password);
/// if &self.username == "administrator" && &self.password!= "" {
/// Ok(
/// AdministratorCookie {
/// userid: 1,
/// username: "administrator".to_string(),
/// display: Some("Administrator".to_string()),
/// }
/// )
/// } else {
/// Err(
/// AuthFail::new(self.username.to_string(), "Incorrect username".to_string())
/// )
/// }
/// }
///
/// /// Create a new login form instance
/// fn new_form(user: &str, pass: &str, _extras: Option<HashMap<String, String>>) -> Self {
/// AdministratorForm {
/// username: user.to_string(),
/// password: pass.to_string(),
/// }
/// }
/// }
///
/// # fn main() {}
///
/// ```
///
/// # Example Code
/// For more detailed example please see the example directory.
/// The example directory contains a fully working example of processing
/// and checking login information.
///
pub trait AuthorizeForm : CookieId {
type CookieType: AuthorizeCookie;
/// Determine whether the login form structure containts
/// valid credentials, otherwise send back the username and
/// a message indicating why it failed in the `AuthFail` struct
///
/// Must be implemented on the login form structure
fn authenticate(&self) -> Result<Self::CookieType, AuthFail>;
/// Create a new login form Structure with
/// the specified username and password.
/// The first parameter is the username, then password,
/// and then optionally a HashMap containing any extra fields.
///
/// Must be implemented on the login form structure
///
// /// The password is a u8 slice, allowing passwords to be stored without
// /// being converted to hex. The slice is sufficient because new_form()
// /// is called within the from_form() function, so when the password is
// /// collected as a vector of bytes the reference to those bytes are sent
// /// to the new_form() method.
fn new_form(&str, &str, Option<HashMap<String, String>>) -> Self;
/// The `fail_url()` method is used to create a url that the user is sent
/// to when the authentication fails. The default implementation
/// redirects the user to the /page?user=<ateempted_username>
/// which enables the form to display the username that was attempted
/// and unlike FlashMessages it will persist across refreshes
fn fail_url(user: &str) -> String {
let mut output = String::with_capacity(user.len() + 10);
output.push_str("?user=");
output.push_str(user);
output
}
/// Sanitizes the username before storing in the login form structure.
/// The default implementation uses the `sanitize()` function in the
/// `sanitization` module. This can be overriden in your
/// `impl AuthorizeForm for {login structure}` implementation to
/// customize how the text is cleaned/sanitized.
fn clean_username(string: &str) -> String {
sanitize(string)
}
/// Sanitizes the password before storing in the login form structure.
/// The default implementation uses the `sanitize_oassword()` function in the
/// `sanitization` module. This can be overriden in your
/// `impl AuthorizeForm for {login structure}` implementation to
/// customize how the text is cleaned/sanitized.
fn clean_password(string: &str) -> String {
sanitize_password(string)
}
/// Sanitizes any extra variables before storing in the login form structure.
/// The default implementation uses the `sanitize()` function in the
/// `sanitization` module. This can be overriden in your
/// `impl AuthorizeForm for {login structure}` implementation to
/// customize how the text is cleaned/sanitized.
fn clean_extras(string: &str) -> String {
sanitize(string)
}
/// Redirect the user to one page on successful authentication or
/// another page (with a `FlashMessage` indicating why) if authentication fails.
///
/// `FlashMessage` is used to indicate why the authentication failed
/// this is so that the user can see why it failed but when they refresh
/// it will disappear, enabling a clean start, but with the user name
/// from the url's query string (determined by `fail_url()`)
fn flash_redirect(&self, ok_redir: impl Into<String>, err_redir: impl Into<String>, cookies: &mut Cookies) -> Result<Redirect, Flash<Redirect>> {
match self.authenticate() {
Ok(cooky) => {
let cid = Self::cookie_id();
let contents = cooky.store_cookie();
cookies.add_private(Cookie::new(cid, contents));
Ok(Redirect::to(ok_redir.into()))
},
Err(fail) => {
let mut furl = err_redir.into();
if &fail.user!= "" {
let furl_qrystr = Self::fail_url(&fail.user);
furl.push_str(&furl_qrystr);
}
Err( Flash::error(Redirect::to(furl), &fail.msg) )
},
}
}
/// Redirect the user to one page on successful authentication or
/// another page if authentication fails.
fn redirect(&self, ok_redir: &str, err_redir: &str, cookies: &mut Cookies) -> Result<Redirect, Redirect> {
match self.authenticate() {
Ok(cooky) => {
let cid = Self::cookie_id();
let contents = cooky.store_cookie();
cookies.add_private(Cookie::new(cid, contents));
Ok(Redirect::to(ok_redir.to_string()))
},
Err(fail) => {
let mut furl = String::from(err_redir);
if &fail.user!= "" {
let furl_qrystr = Self::fail_url(&fail.user);
furl.push_str(&furl_qrystr);
}
Err( Redirect::to(furl) )
},
}
}
}
impl<T: AuthorizeCookie + Clone> AuthCont<T> {
pub fn cookie_data(&self) -> T {
// Todo: change the signature from &self to self
// and remove the.clone() method call
self.cookie.clone()
}
}
/// # Request Guard
/// Request guard for the AuthCont (Authentication Container).
/// This allows a route to call a user type like:
///
/// ```rust,no_run
///
/// use auth::authorization::*;
/// # use administration:*;
/// use rocket;
/// #[get("/protected")]
/// fn protected(container: AuthCont<AdministratorCookie>) -> Html<String> {
/// let admin = container.cookie;
/// String::new()
/// }
///
/// # fn main() {
/// # rocket::ignite().mount("/", routes![]).launch();
/// # }
///
/// ```
///
impl<'a, 'r, T: AuthorizeCookie> FromRequest<'a, 'r> for AuthCont<T> {
type Error = ();
fn from_request(request: &'a Request<'r>) -> ::rocket::request::Outcome<AuthCont<T>,Self::Error>{
let cid = T::cookie_id();
let mut cookies = request.cookies();
match cookies.get_private(cid) {
Some(cookie) => {
if let Some(cookie_deserialized) = T::retrieve_cookie(cookie.value().to_string()) {
Outcome::Success(
AuthCont {
cookie: cookie_deserialized,
}
)
} else {
Outcome::Forward(())
}
},
None => Outcome::Forward(())
}
}
}
/// #Collecting Login Form Data
/// If your login form requires more than just a username and password the
/// extras parameter, in `AuthorizeForm::new_form(user, pass, extras)`, holds
/// all other fields in a `HashMap<String, String>` to allow processing any
/// field that was submitted. The username and password are separate because
/// those are universal fields.
///
/// ## Custom Username/Password Field Names
/// By default the function will look for a username and a password field.
/// If your form does not use those particular names you can always use the
/// extras `HashMap` to retrieve the username and password when using different
/// input box names. The function will return `Ok()` even if no username or
/// password was entered, this is to allow custom field names to be accessed
/// and authenticated by the `authenticate()` method.
impl<'f, A: AuthorizeForm> FromForm<'f> for LoginCont<A> {
type Error = &'static str;
fn from_form(form_items: &mut FormItems<'f>, _strict: bool) -> Result<Self, Self::Error> {
let mut user: String = String::new();
let mut pass: String = String::new();
let mut extras: HashMap<String, String> = HashMap::new();
for FormItem { key, value,.. } in form_items {
match key.as_str(){
"username" => {
user = A::clean_username(&value.url_decode().unwrap_or(String::new()));
},
"password" => | ,
// _ => {},
a => {
// extras.insert( a.to_string(), A::clean_extras( &value.url_decode().unwrap_or(String::new()) ) );
extras.insert( a.to_string(), value.url_decode().unwrap_or(String::new()) );
},
}
}
// Do not need to check for username / password here,
// if the authentication method requires them it will
// fail at that point.
Ok(
LoginCont {
form: if extras.len() == 0 {
A::new_form(&user, &pass, None)
} else {
A::new_form(&user, &pass, Some(extras))
},
}
)
}
}
impl<'f> FromForm<'f> for UserQuery {
type Error = &'static str;
fn from_form(form_items: &mut FormItems<'f>, _strict: bool) -> Result<UserQuery, Self::Error> {
let mut name: String = String::new();
for FormItem { key, value,.. } in form_items {
match key.as_str() {
"user" => { name = sanitize( &value.url_decode().unwrap_or(String::new()) ); },
_ => {},
}
}
Ok(UserQuery { user: name })
}
}
| {
pass = A::clean_password(&value.url_decode().unwrap_or(String::new()));
} | conditional_block |
authorization.rs |
use rocket::{Request, Outcome};
use rocket::response::{Redirect, Flash};
use rocket::request::{FromRequest, FromForm, FormItems, FormItem};
use rocket::http::{Cookie, Cookies};
use std::collections::HashMap;
use std::marker::Sized;
use sanitization::*;
#[derive(Debug, Clone)]
pub struct UserQuery {
pub user: String,
}
#[derive(Debug, Clone)]
pub struct AuthCont<T: AuthorizeCookie> {
pub cookie: T,
}
#[derive(Debug, Clone, FromForm)]
pub struct AuthFail {
pub user: String,
pub msg: String,
}
impl AuthFail {
pub fn new(user: String, msg: String) -> AuthFail {
AuthFail {
user,
msg,
}
}
}
#[derive(Debug, Clone)]
pub struct LoginCont<T: AuthorizeForm> {
pub form: T,
}
impl<T: AuthorizeForm + Clone> LoginCont<T> {
pub fn form(&self) -> T {
self.form.clone()
}
}
/// The CookieId trait contains a single method, `cookie_id()`.
/// The `cookie_id()` function returns the name or id of the cookie.
/// Note: if you have another cookie of the same name that is secured
/// that already exists (say created by running the tls_example then database_example)
/// if your cookies have the same name it will not work. This is because
/// if the existing cookie is set to secured you attempt to login without
/// using tls the cookie will not work correctly and login will fail.
pub trait CookieId {
/// Ensure `cookie_id()` does not conflict with other cookies that
/// may be set using secured when not using tls. Secured cookies
/// will only work usnig tls and cookies of the same name could
/// create problems.
fn cookie_id<'a>() -> &'a str {
"sid"
}
}
/// ## Cookie Data
/// The AuthorizeCookie trait is used with a custom data structure that
/// will contain the data in the cookie. This trait provides methods
/// to store and retrieve a data structure from a cookie's string contents.
///
/// Using a request guard a route can easily check whether the user is
/// a valid Administrator or any custom user type.
///
/// ### Example
///
/// ```
///
/// use rocket::{Request, Outcome};
/// use rocket::request::FromRequest;
/// use auth::authorization::*;
/// // Define a custom data type that hold the cookie information
/// pub struct AdministratorCookie {
/// pub userid: u32,
/// pub username: String,
/// pub display: Option<String>,
/// }
///
/// // Implement CookieId for AdministratorCookie
/// impl CookieId for AdministratorCookie {
/// // Tell
/// type CookieType = AdministratorCookie;
/// fn cookie_id<'a>() -> &'a str {
/// "asid"
/// }
/// }
///
/// // Implement AuthorizeCookie for the AdministratorCookie
/// // This code can be changed to use other serialization formats
/// impl AuthorizeCookie for AdministratorCookie {
/// fn store_cookie(&self) -> String {
/// ::serde_json::to_string(self).expect("Could not serialize structure")
/// }
/// fn retrieve_cookie(string: String) -> Option<Self> {
/// let mut des_buf = string.clone();
/// let des: Result<AdministratorCookie, _> = ::serde_json::from_str(&mut des_buf);
/// if let Ok(cooky) = des {
/// Some(cooky)
/// } else {
/// None
/// }
/// }
/// }
///
/// // Implement FromRequest for the Cookie type to allow direct
/// // use of the type in routes, instead of through AuthCont
/// //
/// // The only part that needs to be changed is the impl and
/// // function return type; the type should match your struct
/// impl<'a, 'r> FromRequest<'a, 'r> for AdministratorCookie {
/// type Error = ();
/// // Change the return type to match your type
/// fn from_request(request: &'a Request<'r>) -> ::rocket::request::Outcome<AdministratorCookie,Self::Error>{
/// let cid = AdministratorCookie::cookie_id();
/// let mut cookies = request.cookies();
///
/// match cookies.get_private(cid) {
/// Some(cookie) => {
/// if let Some(cookie_deserialized) = AdministratorCookie::retrieve_cookie(cookie.value().to_string()) {
/// Outcome::Success(
/// cookie_deserialized
/// )
/// } else {
/// Outcome::Forward(())
/// }
/// },
/// None => Outcome::Forward(())
/// }
/// }
/// }
///
/// // In your route use the AdministratorCookie request guard to ensure
/// // that only verified administrators can reach a page
/// #[get("/administrator", rank=1)]
/// fn admin_page(admin: AdministratorCookie) -> Html<String> {
/// // Show the display field in AdminstratorCookie as defined above
/// Html( format!("Welcome adminstrator {}!", admin.display) )
/// }
/// #[get("/administrator", rank=2)]
/// fn admin_login_form() -> Html<String> {
/// // Html form here, see the example directory for a complete example
/// }
///
/// fn main() {
/// rocket::ignite().mount("/", routes![admin_page, admin_login_form]).launc();
/// }
///
/// ```
///
pub trait AuthorizeCookie : CookieId {
/// Serialize the cookie data type - must be implemented by cookie data type
fn store_cookie(&self) -> String;
/// Deserialize the cookie data type - must be implemented by cookie data type
fn retrieve_cookie(String) -> Option<Self> where Self: Sized;
/// Deletes a cookie. This does not need to be implemented, it defaults to removing
/// the private key with the named specified by cookie_id() method.
fn delete_cookie(cookies: &mut Cookies) |
}
/// ## Form Data
/// The AuthorizeForm trait handles collecting a submitted login form into a
/// data structure and authenticating the credentials inside. It also contains
/// default methods to process the login and conditionally redirecting the user
/// to the correct page depending upon successful authentication or failure.
///
/// ### Authentication Failure
/// Upon failure the user is redirected to a page with a query string specified
/// by the `fail_url()` method. This allows the specified username to persist
/// across attempts.
///
/// ### Flash Message
/// The `flash_redirect()` method redirects the user but also adds a cookie
/// called a flash message that once read is deleted immediately. This is used
/// to indicate why the authentication failed. If the user refreshes the page
/// after failing to login the message that appears above the login indicating
/// why it failed will disappear. To redirect without a flash message use the
/// `redirect()` method instead of `flash_redirect()`.
///
/// ## Example
/// ```
///
/// use rocket::{Request, Outcome};
/// use std::collections::HashMap;
/// use auth::authorization::*;
/// // Create the structure that will contain the login form data
/// #[derive(Debug, Clone, Serialize, Deserialize)]
/// pub struct AdministratorForm {
/// pub username: String,
/// pub password: String,
/// }
///
/// // Ipmlement CookieId for the form structure
/// impl CookieId for AdministratorForm {
/// fn cookie_id<'a>() -> &'a str {
/// "acid"
/// }
/// }
///
/// // Implement the AuthorizeForm for the form structure
/// impl AuthorizeForm for AdministratorForm {
/// type CookieType = AdministratorCookie;
///
/// /// Authenticate the credentials inside the login form
/// fn authenticate(&self) -> Result<Self::CookieType, AuthFail> {
/// // The code in this function should be replace with whatever
/// // you use to authenticate users.
/// println!("Authenticating {} with password: {}", &self.username, &self.password);
/// if &self.username == "administrator" && &self.password!= "" {
/// Ok(
/// AdministratorCookie {
/// userid: 1,
/// username: "administrator".to_string(),
/// display: Some("Administrator".to_string()),
/// }
/// )
/// } else {
/// Err(
/// AuthFail::new(self.username.to_string(), "Incorrect username".to_string())
/// )
/// }
/// }
///
/// /// Create a new login form instance
/// fn new_form(user: &str, pass: &str, _extras: Option<HashMap<String, String>>) -> Self {
/// AdministratorForm {
/// username: user.to_string(),
/// password: pass.to_string(),
/// }
/// }
/// }
///
/// # fn main() {}
///
/// ```
///
/// # Example Code
/// For more detailed example please see the example directory.
/// The example directory contains a fully working example of processing
/// and checking login information.
///
pub trait AuthorizeForm : CookieId {
type CookieType: AuthorizeCookie;
/// Determine whether the login form structure containts
/// valid credentials, otherwise send back the username and
/// a message indicating why it failed in the `AuthFail` struct
///
/// Must be implemented on the login form structure
fn authenticate(&self) -> Result<Self::CookieType, AuthFail>;
/// Create a new login form Structure with
/// the specified username and password.
/// The first parameter is the username, then password,
/// and then optionally a HashMap containing any extra fields.
///
/// Must be implemented on the login form structure
///
// /// The password is a u8 slice, allowing passwords to be stored without
// /// being converted to hex. The slice is sufficient because new_form()
// /// is called within the from_form() function, so when the password is
// /// collected as a vector of bytes the reference to those bytes are sent
// /// to the new_form() method.
fn new_form(&str, &str, Option<HashMap<String, String>>) -> Self;
/// The `fail_url()` method is used to create a url that the user is sent
/// to when the authentication fails. The default implementation
/// redirects the user to the /page?user=<ateempted_username>
/// which enables the form to display the username that was attempted
/// and unlike FlashMessages it will persist across refreshes
fn fail_url(user: &str) -> String {
let mut output = String::with_capacity(user.len() + 10);
output.push_str("?user=");
output.push_str(user);
output
}
/// Sanitizes the username before storing in the login form structure.
/// The default implementation uses the `sanitize()` function in the
/// `sanitization` module. This can be overriden in your
/// `impl AuthorizeForm for {login structure}` implementation to
/// customize how the text is cleaned/sanitized.
fn clean_username(string: &str) -> String {
sanitize(string)
}
/// Sanitizes the password before storing in the login form structure.
/// The default implementation uses the `sanitize_oassword()` function in the
/// `sanitization` module. This can be overriden in your
/// `impl AuthorizeForm for {login structure}` implementation to
/// customize how the text is cleaned/sanitized.
fn clean_password(string: &str) -> String {
sanitize_password(string)
}
/// Sanitizes any extra variables before storing in the login form structure.
/// The default implementation uses the `sanitize()` function in the
/// `sanitization` module. This can be overriden in your
/// `impl AuthorizeForm for {login structure}` implementation to
/// customize how the text is cleaned/sanitized.
fn clean_extras(string: &str) -> String {
sanitize(string)
}
/// Redirect the user to one page on successful authentication or
/// another page (with a `FlashMessage` indicating why) if authentication fails.
///
/// `FlashMessage` is used to indicate why the authentication failed
/// this is so that the user can see why it failed but when they refresh
/// it will disappear, enabling a clean start, but with the user name
/// from the url's query string (determined by `fail_url()`)
fn flash_redirect(&self, ok_redir: impl Into<String>, err_redir: impl Into<String>, cookies: &mut Cookies) -> Result<Redirect, Flash<Redirect>> {
match self.authenticate() {
Ok(cooky) => {
let cid = Self::cookie_id();
let contents = cooky.store_cookie();
cookies.add_private(Cookie::new(cid, contents));
Ok(Redirect::to(ok_redir.into()))
},
Err(fail) => {
let mut furl = err_redir.into();
if &fail.user!= "" {
let furl_qrystr = Self::fail_url(&fail.user);
furl.push_str(&furl_qrystr);
}
Err( Flash::error(Redirect::to(furl), &fail.msg) )
},
}
}
/// Redirect the user to one page on successful authentication or
/// another page if authentication fails.
fn redirect(&self, ok_redir: &str, err_redir: &str, cookies: &mut Cookies) -> Result<Redirect, Redirect> {
match self.authenticate() {
Ok(cooky) => {
let cid = Self::cookie_id();
let contents = cooky.store_cookie();
cookies.add_private(Cookie::new(cid, contents));
Ok(Redirect::to(ok_redir.to_string()))
},
Err(fail) => {
let mut furl = String::from(err_redir);
if &fail.user!= "" {
let furl_qrystr = Self::fail_url(&fail.user);
furl.push_str(&furl_qrystr);
}
Err( Redirect::to(furl) )
},
}
}
}
impl<T: AuthorizeCookie + Clone> AuthCont<T> {
pub fn cookie_data(&self) -> T {
// Todo: change the signature from &self to self
// and remove the.clone() method call
self.cookie.clone()
}
}
/// # Request Guard
/// Request guard for the AuthCont (Authentication Container).
/// This allows a route to call a user type like:
///
/// ```rust,no_run
///
/// use auth::authorization::*;
/// # use administration:*;
/// use rocket;
/// #[get("/protected")]
/// fn protected(container: AuthCont<AdministratorCookie>) -> Html<String> {
/// let admin = container.cookie;
/// String::new()
/// }
///
/// # fn main() {
/// # rocket::ignite().mount("/", routes![]).launch();
/// # }
///
/// ```
///
impl<'a, 'r, T: AuthorizeCookie> FromRequest<'a, 'r> for AuthCont<T> {
type Error = ();
fn from_request(request: &'a Request<'r>) -> ::rocket::request::Outcome<AuthCont<T>,Self::Error>{
let cid = T::cookie_id();
let mut cookies = request.cookies();
match cookies.get_private(cid) {
Some(cookie) => {
if let Some(cookie_deserialized) = T::retrieve_cookie(cookie.value().to_string()) {
Outcome::Success(
AuthCont {
cookie: cookie_deserialized,
}
)
} else {
Outcome::Forward(())
}
},
None => Outcome::Forward(())
}
}
}
/// #Collecting Login Form Data
/// If your login form requires more than just a username and password the
/// extras parameter, in `AuthorizeForm::new_form(user, pass, extras)`, holds
/// all other fields in a `HashMap<String, String>` to allow processing any
/// field that was submitted. The username and password are separate because
/// those are universal fields.
///
/// ## Custom Username/Password Field Names
/// By default the function will look for a username and a password field.
/// If your form does not use those particular names you can always use the
/// extras `HashMap` to retrieve the username and password when using different
/// input box names. The function will return `Ok()` even if no username or
/// password was entered, this is to allow custom field names to be accessed
/// and authenticated by the `authenticate()` method.
impl<'f, A: AuthorizeForm> FromForm<'f> for LoginCont<A> {
type Error = &'static str;
fn from_form(form_items: &mut FormItems<'f>, _strict: bool) -> Result<Self, Self::Error> {
let mut user: String = String::new();
let mut pass: String = String::new();
let mut extras: HashMap<String, String> = HashMap::new();
for FormItem { key, value,.. } in form_items {
match key.as_str(){
"username" => {
user = A::clean_username(&value.url_decode().unwrap_or(String::new()));
},
"password" => {
pass = A::clean_password(&value.url_decode().unwrap_or(String::new()));
},
// _ => {},
a => {
// extras.insert( a.to_string(), A::clean_extras( &value.url_decode().unwrap_or(String::new()) ) );
extras.insert( a.to_string(), value.url_decode().unwrap_or(String::new()) );
},
}
}
// Do not need to check for username / password here,
// if the authentication method requires them it will
// fail at that point.
Ok(
LoginCont {
form: if extras.len() == 0 {
A::new_form(&user, &pass, None)
} else {
A::new_form(&user, &pass, Some(extras))
},
}
)
}
}
impl<'f> FromForm<'f> for UserQuery {
type Error = &'static str;
fn from_form(form_items: &mut FormItems<'f>, _strict: bool) -> Result<UserQuery, Self::Error> {
let mut name: String = String::new();
for FormItem { key, value,.. } in form_items {
match key.as_str() {
"user" => { name = sanitize( &value.url_decode().unwrap_or(String::new()) ); },
_ => {},
}
}
Ok(UserQuery { user: name })
}
}
| {
cookies.remove_private(
Cookie::named( Self::cookie_id() )
);
} | identifier_body |
binder.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::{
capability::{
CapabilityProvider, CapabilitySource, ComponentCapability, InternalCapability,
OptionalTask,
},
channel,
model::{
component::{BindReason, WeakComponentInstance},
error::ModelError,
hooks::{Event, EventPayload, EventType, Hook, HooksRegistration},
model::Model,
routing::report_routing_failure,
},
},
async_trait::async_trait,
cm_rust::{CapabilityName, CapabilityPath, ProtocolDecl},
fuchsia_async as fasync, fuchsia_zircon as zx,
lazy_static::lazy_static,
moniker::{AbsoluteMoniker, AbsoluteMonikerBase, ExtendedMoniker},
std::{
path::PathBuf,
sync::{Arc, Weak},
},
};
lazy_static! {
pub static ref BINDER_SERVICE: CapabilityName = "fuchsia.component.Binder".into();
pub static ref BINDER_CAPABILITY: ComponentCapability =
ComponentCapability::Protocol(ProtocolDecl {
name: BINDER_SERVICE.clone(),
source_path: Some(CapabilityPath {
basename: "fuchsia.component.Binder".into(),
dirname: "svc".into()
}),
});
}
/// Implementation of `fuchsia.component.Binder` FIDL protocol.
pub struct BinderCapabilityProvider {
source: WeakComponentInstance,
target: WeakComponentInstance,
host: Arc<BinderCapabilityHost>,
}
impl BinderCapabilityProvider {
pub fn new(
source: WeakComponentInstance,
target: WeakComponentInstance,
host: Arc<BinderCapabilityHost>,
) -> Self {
Self { source, target, host }
}
}
#[async_trait]
impl CapabilityProvider for BinderCapabilityProvider {
async fn open(
self: Box<Self>,
_flags: u32,
_open_mode: u32,
_relative_path: PathBuf,
server_end: &mut zx::Channel,
) -> Result<OptionalTask, ModelError> {
let host = self.host.clone();
let target = self.target.clone();
let source = self.source.clone();
let server_end = channel::take_channel(server_end);
Ok(fasync::Task::spawn(async move {
if let Err(err) = host.bind(source).await {
let res = target.upgrade().map_err(|e| ModelError::from(e));
match res {
Ok(target) => {
report_routing_failure(&target, &*BINDER_CAPABILITY, &err, server_end)
.await;
}
Err(err) => {
log::warn!("failed to upgrade reference to {}: {}", target.moniker, err);
}
}
}
})
.into())
}
}
// A `Hook` that serves the `fuchsia.component.Binder` FIDL protocol.
#[derive(Clone)]
pub struct BinderCapabilityHost {
model: Weak<Model>,
}
impl BinderCapabilityHost {
pub fn new(model: Weak<Model>) -> Self {
Self { model }
}
pub fn hooks(self: &Arc<Self>) -> Vec<HooksRegistration> {
vec![HooksRegistration::new(
"BinderCapabilityHost",
vec![EventType::CapabilityRouted],
Arc::downgrade(self) as Weak<dyn Hook>,
)]
}
pub async fn bind(&self, source: WeakComponentInstance) -> Result<(), ModelError> {
let source = source.upgrade().map_err(|e| ModelError::from(e))?;
source.bind(&BindReason::Binder).await?;
Ok(())
}
async fn on_scoped_framework_capability_routed_async<'a>(
self: Arc<Self>,
source: WeakComponentInstance,
target_moniker: AbsoluteMoniker,
capability: &'a InternalCapability,
capability_provider: Option<Box<dyn CapabilityProvider>>,
) -> Result<Option<Box<dyn CapabilityProvider>>, ModelError> {
// If some other capability has already been installed, then there's nothing to
// do here.
if capability_provider.is_none() && capability.matches_protocol(&BINDER_SERVICE) {
let model = self.model.upgrade().ok_or(ModelError::ModelNotAvailable)?;
let target =
WeakComponentInstance::new(&model.look_up(&target_moniker.to_partial()).await?);
Ok(Some(Box::new(BinderCapabilityProvider::new(source, target, self.clone()))
as Box<dyn CapabilityProvider>))
} else {
Ok(capability_provider)
}
}
}
#[async_trait]
impl Hook for BinderCapabilityHost {
async fn on(self: Arc<Self>, event: &Event) -> Result<(), ModelError> {
if let Ok(EventPayload::CapabilityRouted {
source: CapabilitySource::Framework { capability, component },
capability_provider,
}) = &event.result
{
let target_moniker = match &event.target_moniker {
ExtendedMoniker::ComponentManager => {
Err(ModelError::UnexpectedComponentManagerMoniker)
}
ExtendedMoniker::ComponentInstance(moniker) => Ok(moniker),
}?;
let mut capability_provider = capability_provider.lock().await;
*capability_provider = self
.on_scoped_framework_capability_routed_async(
component.clone(),
target_moniker.clone(),
&capability,
capability_provider.take(),
)
.await?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use {
super::*,
crate::{
builtin_environment::BuiltinEnvironment,
capability::CapabilityProvider,
model::{
events::{source::EventSource, stream::EventStream},
testing::test_helpers::*,
},
},
cm_rust::{self, CapabilityName, ComponentDecl, EventMode},
cm_rust_testing::*,
fidl::{client::Client, handle::AsyncChannel},
fuchsia_zircon as zx,
futures::{lock::Mutex, StreamExt},
matches::assert_matches,
moniker::AbsoluteMoniker,
std::path::PathBuf,
};
struct BinderCapabilityTestFixture {
builtin_environment: Arc<Mutex<BuiltinEnvironment>>,
}
impl BinderCapabilityTestFixture {
async fn new(components: Vec<(&'static str, ComponentDecl)>) -> Self {
let TestModelResult { builtin_environment,.. } =
TestEnvironmentBuilder::new().set_components(components).build().await;
BinderCapabilityTestFixture { builtin_environment }
}
async fn new_event_stream(
&self,
events: Vec<CapabilityName>,
mode: EventMode,
) -> (EventSource, EventStream) {
new_event_stream(self.builtin_environment.clone(), events, mode).await
}
async fn provider(
&self,
source: AbsoluteMoniker,
target: AbsoluteMoniker,
) -> Box<BinderCapabilityProvider> {
let builtin_environment = self.builtin_environment.lock().await;
let host = builtin_environment.binder_capability_host.clone();
let source = builtin_environment
.model
.look_up(&source.to_partial())
.await
.expect("failed to look up source moniker");
let target = builtin_environment
.model
.look_up(&target.to_partial())
.await
.expect("failed to look up target moniker");
Box::new(BinderCapabilityProvider::new(
WeakComponentInstance::new(&source),
WeakComponentInstance::new(&target),
host,
))
}
}
#[fuchsia::test]
async fn component_starts_on_open() {
let fixture = BinderCapabilityTestFixture::new(vec![
(
"root",
ComponentDeclBuilder::new()
.add_lazy_child("source")
.add_lazy_child("target")
.build(),
),
("source", component_decl_with_test_runner()),
("target", component_decl_with_test_runner()),
])
.await;
let (_event_source, mut event_stream) = fixture
.new_event_stream(
vec![EventType::Resolved.into(), EventType::Started.into()],
EventMode::Async,
)
.await;
let (_client_end, mut server_end) =
zx::Channel::create().expect("failed to create channels");
let moniker: AbsoluteMoniker = vec!["source:0"].into();
let () = fixture
.provider(moniker.clone(), vec!["target:0"].into())
.await
.open(0, 0, PathBuf::new(), &mut server_end)
.await
.expect("failed to call open()")
.take()
.expect("task is empty")
.await;
assert!(event_stream.wait_until(EventType::Resolved, moniker.clone()).await.is_some());
assert!(event_stream.wait_until(EventType::Started, moniker.clone()).await.is_some());
}
// TODO(yaneury): Figure out a way to test this behavior.
#[ignore]
#[fuchsia::test]
async fn channel_is_closed_if_component_does_not_exist() | .expect("task is empty")
.await;
let client_end =
AsyncChannel::from_channel(client_end).expect("failed to create AsyncChanel");
let client = Client::new(client_end, "binder_service");
let mut event_receiver = client.take_event_receiver();
assert_matches!(
event_receiver.next().await,
Some(Err(fidl::Error::ClientChannelClosed {
status: zx::Status::NOT_FOUND,
protocol_name: "binder_service"
}))
);
assert_matches!(event_receiver.next().await, None);
}
}
| {
let fixture = BinderCapabilityTestFixture::new(vec![(
"root",
ComponentDeclBuilder::new()
.add_lazy_child("target")
.add_lazy_child("unresolvable")
.build(),
)])
.await;
let (client_end, mut server_end) =
zx::Channel::create().expect("failed to create channels");
let moniker: AbsoluteMoniker = AbsoluteMoniker::from(vec!["foo:0"]);
let () = fixture
.provider(moniker, vec![].into())
.await
.open(0, 0, PathBuf::new(), &mut server_end)
.await
.expect("failed to call open()")
.take() | identifier_body |
binder.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::{
capability::{
CapabilityProvider, CapabilitySource, ComponentCapability, InternalCapability,
OptionalTask,
},
channel,
model::{
component::{BindReason, WeakComponentInstance},
error::ModelError,
hooks::{Event, EventPayload, EventType, Hook, HooksRegistration},
model::Model,
routing::report_routing_failure,
},
},
async_trait::async_trait,
cm_rust::{CapabilityName, CapabilityPath, ProtocolDecl},
fuchsia_async as fasync, fuchsia_zircon as zx,
lazy_static::lazy_static,
moniker::{AbsoluteMoniker, AbsoluteMonikerBase, ExtendedMoniker},
std::{
path::PathBuf,
sync::{Arc, Weak},
},
};
lazy_static! {
pub static ref BINDER_SERVICE: CapabilityName = "fuchsia.component.Binder".into();
pub static ref BINDER_CAPABILITY: ComponentCapability =
ComponentCapability::Protocol(ProtocolDecl {
name: BINDER_SERVICE.clone(),
source_path: Some(CapabilityPath {
basename: "fuchsia.component.Binder".into(),
dirname: "svc".into()
}),
});
}
/// Implementation of `fuchsia.component.Binder` FIDL protocol.
pub struct BinderCapabilityProvider {
source: WeakComponentInstance,
target: WeakComponentInstance,
host: Arc<BinderCapabilityHost>,
}
impl BinderCapabilityProvider {
pub fn new(
source: WeakComponentInstance,
target: WeakComponentInstance,
host: Arc<BinderCapabilityHost>,
) -> Self {
Self { source, target, host }
} |
#[async_trait]
impl CapabilityProvider for BinderCapabilityProvider {
async fn open(
self: Box<Self>,
_flags: u32,
_open_mode: u32,
_relative_path: PathBuf,
server_end: &mut zx::Channel,
) -> Result<OptionalTask, ModelError> {
let host = self.host.clone();
let target = self.target.clone();
let source = self.source.clone();
let server_end = channel::take_channel(server_end);
Ok(fasync::Task::spawn(async move {
if let Err(err) = host.bind(source).await {
let res = target.upgrade().map_err(|e| ModelError::from(e));
match res {
Ok(target) => {
report_routing_failure(&target, &*BINDER_CAPABILITY, &err, server_end)
.await;
}
Err(err) => {
log::warn!("failed to upgrade reference to {}: {}", target.moniker, err);
}
}
}
})
.into())
}
}
// A `Hook` that serves the `fuchsia.component.Binder` FIDL protocol.
#[derive(Clone)]
pub struct BinderCapabilityHost {
model: Weak<Model>,
}
impl BinderCapabilityHost {
pub fn new(model: Weak<Model>) -> Self {
Self { model }
}
pub fn hooks(self: &Arc<Self>) -> Vec<HooksRegistration> {
vec![HooksRegistration::new(
"BinderCapabilityHost",
vec![EventType::CapabilityRouted],
Arc::downgrade(self) as Weak<dyn Hook>,
)]
}
pub async fn bind(&self, source: WeakComponentInstance) -> Result<(), ModelError> {
let source = source.upgrade().map_err(|e| ModelError::from(e))?;
source.bind(&BindReason::Binder).await?;
Ok(())
}
async fn on_scoped_framework_capability_routed_async<'a>(
self: Arc<Self>,
source: WeakComponentInstance,
target_moniker: AbsoluteMoniker,
capability: &'a InternalCapability,
capability_provider: Option<Box<dyn CapabilityProvider>>,
) -> Result<Option<Box<dyn CapabilityProvider>>, ModelError> {
// If some other capability has already been installed, then there's nothing to
// do here.
if capability_provider.is_none() && capability.matches_protocol(&BINDER_SERVICE) {
let model = self.model.upgrade().ok_or(ModelError::ModelNotAvailable)?;
let target =
WeakComponentInstance::new(&model.look_up(&target_moniker.to_partial()).await?);
Ok(Some(Box::new(BinderCapabilityProvider::new(source, target, self.clone()))
as Box<dyn CapabilityProvider>))
} else {
Ok(capability_provider)
}
}
}
#[async_trait]
impl Hook for BinderCapabilityHost {
async fn on(self: Arc<Self>, event: &Event) -> Result<(), ModelError> {
if let Ok(EventPayload::CapabilityRouted {
source: CapabilitySource::Framework { capability, component },
capability_provider,
}) = &event.result
{
let target_moniker = match &event.target_moniker {
ExtendedMoniker::ComponentManager => {
Err(ModelError::UnexpectedComponentManagerMoniker)
}
ExtendedMoniker::ComponentInstance(moniker) => Ok(moniker),
}?;
let mut capability_provider = capability_provider.lock().await;
*capability_provider = self
.on_scoped_framework_capability_routed_async(
component.clone(),
target_moniker.clone(),
&capability,
capability_provider.take(),
)
.await?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use {
super::*,
crate::{
builtin_environment::BuiltinEnvironment,
capability::CapabilityProvider,
model::{
events::{source::EventSource, stream::EventStream},
testing::test_helpers::*,
},
},
cm_rust::{self, CapabilityName, ComponentDecl, EventMode},
cm_rust_testing::*,
fidl::{client::Client, handle::AsyncChannel},
fuchsia_zircon as zx,
futures::{lock::Mutex, StreamExt},
matches::assert_matches,
moniker::AbsoluteMoniker,
std::path::PathBuf,
};
struct BinderCapabilityTestFixture {
builtin_environment: Arc<Mutex<BuiltinEnvironment>>,
}
impl BinderCapabilityTestFixture {
async fn new(components: Vec<(&'static str, ComponentDecl)>) -> Self {
let TestModelResult { builtin_environment,.. } =
TestEnvironmentBuilder::new().set_components(components).build().await;
BinderCapabilityTestFixture { builtin_environment }
}
async fn new_event_stream(
&self,
events: Vec<CapabilityName>,
mode: EventMode,
) -> (EventSource, EventStream) {
new_event_stream(self.builtin_environment.clone(), events, mode).await
}
async fn provider(
&self,
source: AbsoluteMoniker,
target: AbsoluteMoniker,
) -> Box<BinderCapabilityProvider> {
let builtin_environment = self.builtin_environment.lock().await;
let host = builtin_environment.binder_capability_host.clone();
let source = builtin_environment
.model
.look_up(&source.to_partial())
.await
.expect("failed to look up source moniker");
let target = builtin_environment
.model
.look_up(&target.to_partial())
.await
.expect("failed to look up target moniker");
Box::new(BinderCapabilityProvider::new(
WeakComponentInstance::new(&source),
WeakComponentInstance::new(&target),
host,
))
}
}
#[fuchsia::test]
async fn component_starts_on_open() {
let fixture = BinderCapabilityTestFixture::new(vec![
(
"root",
ComponentDeclBuilder::new()
.add_lazy_child("source")
.add_lazy_child("target")
.build(),
),
("source", component_decl_with_test_runner()),
("target", component_decl_with_test_runner()),
])
.await;
let (_event_source, mut event_stream) = fixture
.new_event_stream(
vec![EventType::Resolved.into(), EventType::Started.into()],
EventMode::Async,
)
.await;
let (_client_end, mut server_end) =
zx::Channel::create().expect("failed to create channels");
let moniker: AbsoluteMoniker = vec!["source:0"].into();
let () = fixture
.provider(moniker.clone(), vec!["target:0"].into())
.await
.open(0, 0, PathBuf::new(), &mut server_end)
.await
.expect("failed to call open()")
.take()
.expect("task is empty")
.await;
assert!(event_stream.wait_until(EventType::Resolved, moniker.clone()).await.is_some());
assert!(event_stream.wait_until(EventType::Started, moniker.clone()).await.is_some());
}
// TODO(yaneury): Figure out a way to test this behavior.
#[ignore]
#[fuchsia::test]
async fn channel_is_closed_if_component_does_not_exist() {
let fixture = BinderCapabilityTestFixture::new(vec![(
"root",
ComponentDeclBuilder::new()
.add_lazy_child("target")
.add_lazy_child("unresolvable")
.build(),
)])
.await;
let (client_end, mut server_end) =
zx::Channel::create().expect("failed to create channels");
let moniker: AbsoluteMoniker = AbsoluteMoniker::from(vec!["foo:0"]);
let () = fixture
.provider(moniker, vec![].into())
.await
.open(0, 0, PathBuf::new(), &mut server_end)
.await
.expect("failed to call open()")
.take()
.expect("task is empty")
.await;
let client_end =
AsyncChannel::from_channel(client_end).expect("failed to create AsyncChanel");
let client = Client::new(client_end, "binder_service");
let mut event_receiver = client.take_event_receiver();
assert_matches!(
event_receiver.next().await,
Some(Err(fidl::Error::ClientChannelClosed {
status: zx::Status::NOT_FOUND,
protocol_name: "binder_service"
}))
);
assert_matches!(event_receiver.next().await, None);
}
} | } | random_line_split |
binder.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::{
capability::{
CapabilityProvider, CapabilitySource, ComponentCapability, InternalCapability,
OptionalTask,
},
channel,
model::{
component::{BindReason, WeakComponentInstance},
error::ModelError,
hooks::{Event, EventPayload, EventType, Hook, HooksRegistration},
model::Model,
routing::report_routing_failure,
},
},
async_trait::async_trait,
cm_rust::{CapabilityName, CapabilityPath, ProtocolDecl},
fuchsia_async as fasync, fuchsia_zircon as zx,
lazy_static::lazy_static,
moniker::{AbsoluteMoniker, AbsoluteMonikerBase, ExtendedMoniker},
std::{
path::PathBuf,
sync::{Arc, Weak},
},
};
lazy_static! {
pub static ref BINDER_SERVICE: CapabilityName = "fuchsia.component.Binder".into();
pub static ref BINDER_CAPABILITY: ComponentCapability =
ComponentCapability::Protocol(ProtocolDecl {
name: BINDER_SERVICE.clone(),
source_path: Some(CapabilityPath {
basename: "fuchsia.component.Binder".into(),
dirname: "svc".into()
}),
});
}
/// Implementation of `fuchsia.component.Binder` FIDL protocol.
pub struct BinderCapabilityProvider {
source: WeakComponentInstance,
target: WeakComponentInstance,
host: Arc<BinderCapabilityHost>,
}
impl BinderCapabilityProvider {
pub fn new(
source: WeakComponentInstance,
target: WeakComponentInstance,
host: Arc<BinderCapabilityHost>,
) -> Self {
Self { source, target, host }
}
}
#[async_trait]
impl CapabilityProvider for BinderCapabilityProvider {
async fn open(
self: Box<Self>,
_flags: u32,
_open_mode: u32,
_relative_path: PathBuf,
server_end: &mut zx::Channel,
) -> Result<OptionalTask, ModelError> {
let host = self.host.clone();
let target = self.target.clone();
let source = self.source.clone();
let server_end = channel::take_channel(server_end);
Ok(fasync::Task::spawn(async move {
if let Err(err) = host.bind(source).await {
let res = target.upgrade().map_err(|e| ModelError::from(e));
match res {
Ok(target) => {
report_routing_failure(&target, &*BINDER_CAPABILITY, &err, server_end)
.await;
}
Err(err) => {
log::warn!("failed to upgrade reference to {}: {}", target.moniker, err);
}
}
}
})
.into())
}
}
// A `Hook` that serves the `fuchsia.component.Binder` FIDL protocol.
#[derive(Clone)]
pub struct BinderCapabilityHost {
model: Weak<Model>,
}
impl BinderCapabilityHost {
pub fn new(model: Weak<Model>) -> Self {
Self { model }
}
pub fn hooks(self: &Arc<Self>) -> Vec<HooksRegistration> {
vec![HooksRegistration::new(
"BinderCapabilityHost",
vec![EventType::CapabilityRouted],
Arc::downgrade(self) as Weak<dyn Hook>,
)]
}
pub async fn bind(&self, source: WeakComponentInstance) -> Result<(), ModelError> {
let source = source.upgrade().map_err(|e| ModelError::from(e))?;
source.bind(&BindReason::Binder).await?;
Ok(())
}
async fn on_scoped_framework_capability_routed_async<'a>(
self: Arc<Self>,
source: WeakComponentInstance,
target_moniker: AbsoluteMoniker,
capability: &'a InternalCapability,
capability_provider: Option<Box<dyn CapabilityProvider>>,
) -> Result<Option<Box<dyn CapabilityProvider>>, ModelError> {
// If some other capability has already been installed, then there's nothing to
// do here.
if capability_provider.is_none() && capability.matches_protocol(&BINDER_SERVICE) | else {
Ok(capability_provider)
}
}
}
#[async_trait]
impl Hook for BinderCapabilityHost {
async fn on(self: Arc<Self>, event: &Event) -> Result<(), ModelError> {
if let Ok(EventPayload::CapabilityRouted {
source: CapabilitySource::Framework { capability, component },
capability_provider,
}) = &event.result
{
let target_moniker = match &event.target_moniker {
ExtendedMoniker::ComponentManager => {
Err(ModelError::UnexpectedComponentManagerMoniker)
}
ExtendedMoniker::ComponentInstance(moniker) => Ok(moniker),
}?;
let mut capability_provider = capability_provider.lock().await;
*capability_provider = self
.on_scoped_framework_capability_routed_async(
component.clone(),
target_moniker.clone(),
&capability,
capability_provider.take(),
)
.await?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use {
super::*,
crate::{
builtin_environment::BuiltinEnvironment,
capability::CapabilityProvider,
model::{
events::{source::EventSource, stream::EventStream},
testing::test_helpers::*,
},
},
cm_rust::{self, CapabilityName, ComponentDecl, EventMode},
cm_rust_testing::*,
fidl::{client::Client, handle::AsyncChannel},
fuchsia_zircon as zx,
futures::{lock::Mutex, StreamExt},
matches::assert_matches,
moniker::AbsoluteMoniker,
std::path::PathBuf,
};
struct BinderCapabilityTestFixture {
builtin_environment: Arc<Mutex<BuiltinEnvironment>>,
}
impl BinderCapabilityTestFixture {
async fn new(components: Vec<(&'static str, ComponentDecl)>) -> Self {
let TestModelResult { builtin_environment,.. } =
TestEnvironmentBuilder::new().set_components(components).build().await;
BinderCapabilityTestFixture { builtin_environment }
}
async fn new_event_stream(
&self,
events: Vec<CapabilityName>,
mode: EventMode,
) -> (EventSource, EventStream) {
new_event_stream(self.builtin_environment.clone(), events, mode).await
}
async fn provider(
&self,
source: AbsoluteMoniker,
target: AbsoluteMoniker,
) -> Box<BinderCapabilityProvider> {
let builtin_environment = self.builtin_environment.lock().await;
let host = builtin_environment.binder_capability_host.clone();
let source = builtin_environment
.model
.look_up(&source.to_partial())
.await
.expect("failed to look up source moniker");
let target = builtin_environment
.model
.look_up(&target.to_partial())
.await
.expect("failed to look up target moniker");
Box::new(BinderCapabilityProvider::new(
WeakComponentInstance::new(&source),
WeakComponentInstance::new(&target),
host,
))
}
}
#[fuchsia::test]
async fn component_starts_on_open() {
let fixture = BinderCapabilityTestFixture::new(vec![
(
"root",
ComponentDeclBuilder::new()
.add_lazy_child("source")
.add_lazy_child("target")
.build(),
),
("source", component_decl_with_test_runner()),
("target", component_decl_with_test_runner()),
])
.await;
let (_event_source, mut event_stream) = fixture
.new_event_stream(
vec![EventType::Resolved.into(), EventType::Started.into()],
EventMode::Async,
)
.await;
let (_client_end, mut server_end) =
zx::Channel::create().expect("failed to create channels");
let moniker: AbsoluteMoniker = vec!["source:0"].into();
let () = fixture
.provider(moniker.clone(), vec!["target:0"].into())
.await
.open(0, 0, PathBuf::new(), &mut server_end)
.await
.expect("failed to call open()")
.take()
.expect("task is empty")
.await;
assert!(event_stream.wait_until(EventType::Resolved, moniker.clone()).await.is_some());
assert!(event_stream.wait_until(EventType::Started, moniker.clone()).await.is_some());
}
// TODO(yaneury): Figure out a way to test this behavior.
#[ignore]
#[fuchsia::test]
async fn channel_is_closed_if_component_does_not_exist() {
let fixture = BinderCapabilityTestFixture::new(vec![(
"root",
ComponentDeclBuilder::new()
.add_lazy_child("target")
.add_lazy_child("unresolvable")
.build(),
)])
.await;
let (client_end, mut server_end) =
zx::Channel::create().expect("failed to create channels");
let moniker: AbsoluteMoniker = AbsoluteMoniker::from(vec!["foo:0"]);
let () = fixture
.provider(moniker, vec![].into())
.await
.open(0, 0, PathBuf::new(), &mut server_end)
.await
.expect("failed to call open()")
.take()
.expect("task is empty")
.await;
let client_end =
AsyncChannel::from_channel(client_end).expect("failed to create AsyncChanel");
let client = Client::new(client_end, "binder_service");
let mut event_receiver = client.take_event_receiver();
assert_matches!(
event_receiver.next().await,
Some(Err(fidl::Error::ClientChannelClosed {
status: zx::Status::NOT_FOUND,
protocol_name: "binder_service"
}))
);
assert_matches!(event_receiver.next().await, None);
}
}
| {
let model = self.model.upgrade().ok_or(ModelError::ModelNotAvailable)?;
let target =
WeakComponentInstance::new(&model.look_up(&target_moniker.to_partial()).await?);
Ok(Some(Box::new(BinderCapabilityProvider::new(source, target, self.clone()))
as Box<dyn CapabilityProvider>))
} | conditional_block |
binder.rs | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
crate::{
capability::{
CapabilityProvider, CapabilitySource, ComponentCapability, InternalCapability,
OptionalTask,
},
channel,
model::{
component::{BindReason, WeakComponentInstance},
error::ModelError,
hooks::{Event, EventPayload, EventType, Hook, HooksRegistration},
model::Model,
routing::report_routing_failure,
},
},
async_trait::async_trait,
cm_rust::{CapabilityName, CapabilityPath, ProtocolDecl},
fuchsia_async as fasync, fuchsia_zircon as zx,
lazy_static::lazy_static,
moniker::{AbsoluteMoniker, AbsoluteMonikerBase, ExtendedMoniker},
std::{
path::PathBuf,
sync::{Arc, Weak},
},
};
lazy_static! {
pub static ref BINDER_SERVICE: CapabilityName = "fuchsia.component.Binder".into();
pub static ref BINDER_CAPABILITY: ComponentCapability =
ComponentCapability::Protocol(ProtocolDecl {
name: BINDER_SERVICE.clone(),
source_path: Some(CapabilityPath {
basename: "fuchsia.component.Binder".into(),
dirname: "svc".into()
}),
});
}
/// Implementation of `fuchsia.component.Binder` FIDL protocol.
pub struct BinderCapabilityProvider {
source: WeakComponentInstance,
target: WeakComponentInstance,
host: Arc<BinderCapabilityHost>,
}
impl BinderCapabilityProvider {
pub fn new(
source: WeakComponentInstance,
target: WeakComponentInstance,
host: Arc<BinderCapabilityHost>,
) -> Self {
Self { source, target, host }
}
}
#[async_trait]
impl CapabilityProvider for BinderCapabilityProvider {
async fn open(
self: Box<Self>,
_flags: u32,
_open_mode: u32,
_relative_path: PathBuf,
server_end: &mut zx::Channel,
) -> Result<OptionalTask, ModelError> {
let host = self.host.clone();
let target = self.target.clone();
let source = self.source.clone();
let server_end = channel::take_channel(server_end);
Ok(fasync::Task::spawn(async move {
if let Err(err) = host.bind(source).await {
let res = target.upgrade().map_err(|e| ModelError::from(e));
match res {
Ok(target) => {
report_routing_failure(&target, &*BINDER_CAPABILITY, &err, server_end)
.await;
}
Err(err) => {
log::warn!("failed to upgrade reference to {}: {}", target.moniker, err);
}
}
}
})
.into())
}
}
// A `Hook` that serves the `fuchsia.component.Binder` FIDL protocol.
#[derive(Clone)]
pub struct BinderCapabilityHost {
model: Weak<Model>,
}
impl BinderCapabilityHost {
pub fn new(model: Weak<Model>) -> Self {
Self { model }
}
pub fn hooks(self: &Arc<Self>) -> Vec<HooksRegistration> {
vec![HooksRegistration::new(
"BinderCapabilityHost",
vec![EventType::CapabilityRouted],
Arc::downgrade(self) as Weak<dyn Hook>,
)]
}
pub async fn bind(&self, source: WeakComponentInstance) -> Result<(), ModelError> {
let source = source.upgrade().map_err(|e| ModelError::from(e))?;
source.bind(&BindReason::Binder).await?;
Ok(())
}
async fn on_scoped_framework_capability_routed_async<'a>(
self: Arc<Self>,
source: WeakComponentInstance,
target_moniker: AbsoluteMoniker,
capability: &'a InternalCapability,
capability_provider: Option<Box<dyn CapabilityProvider>>,
) -> Result<Option<Box<dyn CapabilityProvider>>, ModelError> {
// If some other capability has already been installed, then there's nothing to
// do here.
if capability_provider.is_none() && capability.matches_protocol(&BINDER_SERVICE) {
let model = self.model.upgrade().ok_or(ModelError::ModelNotAvailable)?;
let target =
WeakComponentInstance::new(&model.look_up(&target_moniker.to_partial()).await?);
Ok(Some(Box::new(BinderCapabilityProvider::new(source, target, self.clone()))
as Box<dyn CapabilityProvider>))
} else {
Ok(capability_provider)
}
}
}
#[async_trait]
impl Hook for BinderCapabilityHost {
async fn on(self: Arc<Self>, event: &Event) -> Result<(), ModelError> {
if let Ok(EventPayload::CapabilityRouted {
source: CapabilitySource::Framework { capability, component },
capability_provider,
}) = &event.result
{
let target_moniker = match &event.target_moniker {
ExtendedMoniker::ComponentManager => {
Err(ModelError::UnexpectedComponentManagerMoniker)
}
ExtendedMoniker::ComponentInstance(moniker) => Ok(moniker),
}?;
let mut capability_provider = capability_provider.lock().await;
*capability_provider = self
.on_scoped_framework_capability_routed_async(
component.clone(),
target_moniker.clone(),
&capability,
capability_provider.take(),
)
.await?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use {
super::*,
crate::{
builtin_environment::BuiltinEnvironment,
capability::CapabilityProvider,
model::{
events::{source::EventSource, stream::EventStream},
testing::test_helpers::*,
},
},
cm_rust::{self, CapabilityName, ComponentDecl, EventMode},
cm_rust_testing::*,
fidl::{client::Client, handle::AsyncChannel},
fuchsia_zircon as zx,
futures::{lock::Mutex, StreamExt},
matches::assert_matches,
moniker::AbsoluteMoniker,
std::path::PathBuf,
};
struct BinderCapabilityTestFixture {
builtin_environment: Arc<Mutex<BuiltinEnvironment>>,
}
impl BinderCapabilityTestFixture {
async fn | (components: Vec<(&'static str, ComponentDecl)>) -> Self {
let TestModelResult { builtin_environment,.. } =
TestEnvironmentBuilder::new().set_components(components).build().await;
BinderCapabilityTestFixture { builtin_environment }
}
async fn new_event_stream(
&self,
events: Vec<CapabilityName>,
mode: EventMode,
) -> (EventSource, EventStream) {
new_event_stream(self.builtin_environment.clone(), events, mode).await
}
async fn provider(
&self,
source: AbsoluteMoniker,
target: AbsoluteMoniker,
) -> Box<BinderCapabilityProvider> {
let builtin_environment = self.builtin_environment.lock().await;
let host = builtin_environment.binder_capability_host.clone();
let source = builtin_environment
.model
.look_up(&source.to_partial())
.await
.expect("failed to look up source moniker");
let target = builtin_environment
.model
.look_up(&target.to_partial())
.await
.expect("failed to look up target moniker");
Box::new(BinderCapabilityProvider::new(
WeakComponentInstance::new(&source),
WeakComponentInstance::new(&target),
host,
))
}
}
#[fuchsia::test]
async fn component_starts_on_open() {
let fixture = BinderCapabilityTestFixture::new(vec![
(
"root",
ComponentDeclBuilder::new()
.add_lazy_child("source")
.add_lazy_child("target")
.build(),
),
("source", component_decl_with_test_runner()),
("target", component_decl_with_test_runner()),
])
.await;
let (_event_source, mut event_stream) = fixture
.new_event_stream(
vec![EventType::Resolved.into(), EventType::Started.into()],
EventMode::Async,
)
.await;
let (_client_end, mut server_end) =
zx::Channel::create().expect("failed to create channels");
let moniker: AbsoluteMoniker = vec!["source:0"].into();
let () = fixture
.provider(moniker.clone(), vec!["target:0"].into())
.await
.open(0, 0, PathBuf::new(), &mut server_end)
.await
.expect("failed to call open()")
.take()
.expect("task is empty")
.await;
assert!(event_stream.wait_until(EventType::Resolved, moniker.clone()).await.is_some());
assert!(event_stream.wait_until(EventType::Started, moniker.clone()).await.is_some());
}
// TODO(yaneury): Figure out a way to test this behavior.
#[ignore]
#[fuchsia::test]
async fn channel_is_closed_if_component_does_not_exist() {
let fixture = BinderCapabilityTestFixture::new(vec![(
"root",
ComponentDeclBuilder::new()
.add_lazy_child("target")
.add_lazy_child("unresolvable")
.build(),
)])
.await;
let (client_end, mut server_end) =
zx::Channel::create().expect("failed to create channels");
let moniker: AbsoluteMoniker = AbsoluteMoniker::from(vec!["foo:0"]);
let () = fixture
.provider(moniker, vec![].into())
.await
.open(0, 0, PathBuf::new(), &mut server_end)
.await
.expect("failed to call open()")
.take()
.expect("task is empty")
.await;
let client_end =
AsyncChannel::from_channel(client_end).expect("failed to create AsyncChanel");
let client = Client::new(client_end, "binder_service");
let mut event_receiver = client.take_event_receiver();
assert_matches!(
event_receiver.next().await,
Some(Err(fidl::Error::ClientChannelClosed {
status: zx::Status::NOT_FOUND,
protocol_name: "binder_service"
}))
);
assert_matches!(event_receiver.next().await, None);
}
}
| new | identifier_name |
lib.rs | (arg: *mut #name) {
unsafe {
assert!(!arg.is_null());
&*arg;
}
}
};
let default_name = swig_fn(&name, "default");
// TOOD: Add more derive capabilities
// Extracting the derived methods from `#[swig_derive(...)]`.
// We need to automatically add the SWIG code since we cant somehow
// add the `#[swiggen(Foo)]` attribute to the derived methods.
let derivs = get_derives(&self.attrs);
let new_toks = derivs.iter().filter_map(|w| {
match w.as_str() {
"Default" => {
Some(quote! {
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn #default_name() -> *mut #name {
Box::into_raw(Box::new(#name::default()))
}
})
},
_ => None
}
});
tokens.append_all(new_toks);
tokens
}
}
/// A method definition inside an impl block has an additional
/// `base` variable corresponding to the name of the type.
struct InternalFn<'a> {
base: &'a Option<syn::Ident>,
fn_def: &'a syn::ItemFn,
}
/// Convenience method to use cbindgen to convert types into C-compat types.
/// e.g. "input: u32" -> `cbindgen_write((input, u32))` might output `uint32 input`.
fn cbindgen_write<S: Source>(s: &S) -> String {
let mut buf = Vec::new();
{
let cfg = cbindgen::Config::default();
let mut sw = SourceWriter::new(&mut buf, &cfg);
s.write(&cfg, &mut sw);
}
String::from_utf8(buf).unwrap().replace("str", "char")
}
/// Hacky method to take a `&self` or `self` function argument and produce
/// something compatible with `extern "C"` method. Since we can't use `self`,
/// we coerce this to a pointer, and call the arg `wrapped_self`.
fn convert_self_type(arg: &syn::FnArg, base: &Option<syn::Ident>) -> syn::FnArg {
let base = base.clone().expect("Cannot convert `self` arg without provided base name.
Try: `#[swiggen(Foo)]` in macro");
let mut arg = arg.clone().into_token_stream().to_string();
arg = if arg.starts_with('&') {
arg.replace("&", "*const ")
} else {
"*mut ".to_string() + &arg
};
arg = format!("wrapped_self: {}", arg.replace("self", &base.to_string()));
syn::parse_str(&arg).unwrap()
}
/// For inputs, if the type is a primitive (as defined by cbindgen), we don't
/// do anything. Otherwise, assume we will take it in as a pointer.
fn convert_arg_type(syn::ArgCaptured { ref pat, ref ty,.. }: &syn::ArgCaptured) -> syn::FnArg {
if ty.clone().into_token_stream().to_string().ends_with("str") {
parse_quote!(#pat: *const c_char)
} else {
if needs_ref(ty) {
parse_quote!(#pat: *const #ty)
} else {
parse_quote!(#pat: #ty)
}
}
}
/// Similar to above, make sure that we return primitives when
/// recognised
fn convert_ret_type(rty: &syn::ReturnType, base: &Option<syn::Ident>) -> syn::ReturnType {
match rty {
syn::ReturnType::Default => syn::ReturnType::Default,
syn::ReturnType::Type(_, ty) => {
if needs_ref(ty) {
if ty.clone().into_token_stream().to_string() == "Self" {
let base = base.clone().expect("Cannot convert `Self` return type without provided base name.
Try: `#[swiggen(Foo)]` in macro");
parse_quote!(-> *mut #base)
} else if ty.clone().into_token_stream().to_string() == "String" {
parse_quote!(-> *mut c_char)
} else {
parse_quote!(-> *mut #ty)
}
} else {
parse_quote!(-> #ty)
}
}
}
}
/// For paths, assume we can convert to an opaque pointer.
fn needs_ref(ty: &syn::Type) -> bool {
match ty::Type::load(ty) {
Ok(Some(ty::Type::Primitive(_))) => false,
Ok(Some(ty::Type::Path(_)))=> true,
_ => false,
}
}
impl<'a> AsExtern for InternalFn<'a> {
fn as_extern(&self) -> TokenStream {
// Messy blob of code to convert function name, arguments, types,
// return type and generate appropriate code.
// Should be extracted out into smaller functions.
let name = &self.fn_def.ident;
let ext_name = swig_fn(&name, "ffi");
let mut args = Vec::<TokenStream>::new();
let mut caller = Vec::<syn::Ident>::new();
let mut caller_ref = Vec::<TokenStream>::new();
self.fn_def.decl.inputs.iter().for_each(|ref arg| {
match arg {
syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => {
// For self methods, we do some extra work to wrap the
// function so that `impl Foo { fn bar(&self); }`
// becomes `Foo_bar(wrapped_self: *const Foo)`.
let wrapped_self = convert_self_type(&arg, self.base);
args.push(wrapped_self.into_token_stream());
let ws = syn::Ident::new("wrapped_self", Span::call_site());
caller.push(ws.clone());
caller_ref.push(quote!{@ref #ws});
}
syn::FnArg::Captured(ref ac) => {
let id = match &ac.pat {
syn::Pat::Ident(pi) => {
&pi.ident
},
_ => unimplemented!(),
};
args.push(convert_arg_type(ac).into_token_stream());
caller.push(id.clone());
// this later calls the appropriate macro function as to
// whether we need to do some pointer/box stuff
if ac.ty.clone().into_token_stream().to_string().ends_with("str") {
caller_ref.push(quote!{@str #id});
} else if let syn::Type::Reference(_) = ac.ty {
caller_ref.push(quote!{@ref #id});
} else {
caller_ref.push(quote!{@prim #id});
}
},
_ => ()
}
});
let base = self.base;
let out = convert_ret_type(&self.fn_def.decl.output, self.base);
// Similar to the above, this later calls the appropriate macro function
// as to whether we need to do some pointer/box stuff
let res_ref = if let syn::ReturnType::Type(_, ref ty) = self.fn_def.decl.output {
if ty.clone().into_token_stream().to_string() == "String" {
quote!{@str res}
} else if needs_ref(&ty) {
quote!{res}
} else {
quote!{@prim res}
}
} else {
quote!{@prim res}
};
/// Generate the function. We also inject some macro
/// definitions to help with converting pointers into types and types
/// into pointers.
let tokens = quote! {
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn #ext_name(#(#args),*) #out {
#(ffi_ref!(#caller_ref);)*
let res = #base::#name(#(#caller),*);
box_ptr!(#res_ref)
}
};
tokens
}
}
/// Helper function to define the exported/mangled names.
fn swig_fn(name: &syn::Ident, fn_name: &str) -> syn::Ident {
syn::Ident::new(&format!("{}{}_{}", SwigTag::SwigInject, fn_name, name), Span::call_site())
}
fn swig_free(name: &syn::Ident) -> syn::Ident {
swig_fn(name, "free")
}
impl ToSwig for syn::DeriveInput {
fn to_swig(&self) -> String {
/// Generate the SWIG wrapper code as a string.
/// Basically, a class for the Rust struct `Foo` is just a wrapper
/// class called `Foo` which contains a pointer to the actual Rust
/// object.
// prefix with tag
let mut swigged = SwigTag::CodeStart.to_string();
let mut swigged_h = SwigTag::HdrStart.to_string();
let name = &self.ident;
match &self.data {
syn::Data::Struct(ref _ds) => {
// simple wrapper definition to wrap opaque pointer.
// methods get added elsewhere
swigged.push_str(&format!("\
// Wrapper for Rust class {name}
class {name} {{
public:
ffi::{name} *self;
{name}(ffi::{name} *ptr) {{
self = ptr;
}};
~{name}(){{
ffi::{free_name}(self);
self = NULL;
}};
", name=name, free_name=swig_free(&name))
);
swigged_h.push_str(&format!("\
// Wrapper for Rust class {name}
class {name} {{
ffi::{name} *self;
public:
~{name}();
", name=name)
);
// pull out any derive implementations we want to wrap
// TODO: do this in a less ad-hoc way
get_derives(&self.attrs).iter().for_each(|w| {
match w.as_str() {
"Default" => {
swigged.push_str(&format!(
"{name}() {{ self = {def_name}(); }};\n",
name=name, def_name=swig_fn(&name, "default")
));
swigged_h.push_str(&format!("{}();\n",name));
},
_ => (),
}
});
swigged.push_str("};\n");
swigged_h.push_str("};\n");
},
_ => unimplemented!(),
}
swigged.push_str(&SwigTag::CodeEnd.to_str());
swigged_h.push_str(&SwigTag::HdrEnd.to_str());
swigged.push_str(&swigged_h);
swigged
}
}
impl<'a> ToSwig for InternalFn<'a> {
fn to_swig(&self) -> String {
// Generate SWIG wrapper for methods.
// Main complication is making sure that namespaces are correct since
// we are basically overwriting names.
// Also a bit of magic to take an impl method, and add it back into
// being a class method.
// prefix with tag
let mut swigged = SwigTag::CodeStart.to_string();
let mut swigged_h = SwigTag::HdrStart.to_string();
let name = &self.fn_def.ident;
let cb_fn = cbindgen::ir::Function::load(name.to_string(),
&self.fn_def.decl,
true,
&[],
&None).unwrap();
let mut args = String::new();
let mut caller = String::new();
// Convert function arguments
cb_fn.args.iter().for_each(|arg| {
if args.len() > 0 {
args += ", ";
}
if caller.len() > 0 {
caller += ", ";
}
if arg.0 == "self" {
caller += "$self->self";
} else {
args += &cbindgen_write(arg);
caller += &arg.0;
}
});
// Convert return type
let mut out = cbindgen_write(&cb_fn.ret);
if out == "Self" {
out = self.base.clone().expect("Cannot convert `Self` return type without provided base name.
Try: `#[swiggen(Foo)]` in macro").to_string();
} else if out == "String" {
out = "char *".to_string()
}
let mut ret_out = out.clone();
// Convert function name.
let name = if name.to_string() == "new" {
// Custom format for new functions
ret_out = "".to_string();
out = "new PKG_NAME::".to_string() + &out;
self.base.clone().expect("Cannot convert `Self` return type without provided base name.
Try: `#[swiggen(Foo)]` in macro").to_string()
} else {
name.to_string()
};
// Get the mangled name exported by Rust
let ext_name = swig_fn(&self.fn_def.ident, "ffi");
// The following code generates the function definitions and the header
// Code needed for SWIG to generate bindings.
if self.base.is_none() {
swigged.push_str(&format!("\
{ret_out} {name}({args}) {{
return ({out})(ffi::{ext_name}({caller}));
}}"
, name=name, ext_name=ext_name, out=out, ret_out=ret_out, args=args, caller=caller));
}
if let Some(base) = self.base {
// Note the %extend is used by SWIG to make this a class method for
// `base`.
swigged_h.push_str(&format!("
%extend {base_name} {{
{ret_out} {name}({args}) {{
return ({out})(ffi::{ext_name}({caller}));
}}
}};\n"
,name=name, ext_name=ext_name, base_name=base, ret_out=ret_out, out=out, args=args, caller=caller));
} else {
swigged_h.push_str(&format!("\
{out} {name}({args});"
, name=name, out=out, args=args));
}
swigged.push_str(&SwigTag::CodeEnd.to_str());
swigged_h.push_str(&SwigTag::HdrEnd.to_str());
swigged.push_str(&swigged_h);
swigged
}
}
/// Generate extern and SWIG code for a `#[derive(Swig)]` annotated item.
pub fn impl_extern_it(ast: &syn::DeriveInput) -> TokenStream {
let comment = ast.to_swig();
let comment = format!("#[doc=\"{}\"] #[allow(non_camel_case_types)] struct {}{};", comment, SwigTag::SwigInject, ast.ident);
let doc_comment: syn::ItemStruct = syn::parse_str(&comment).expect("failed to generate SWIG code correctly");
let mut tokens: TokenStream = doc_comment.into_token_stream();
tokens.append_all(ast.as_extern().into_iter());
tokens
}
/// Generate extern and SWIG code for a `#[swiggen]` annotated method.
pub fn impl_extern_fn(base_name: &Option<syn::Ident>, ast: &syn::ItemFn) -> TokenStream {
let ifn = InternalFn {
base: base_name,
fn_def: ast,
};
let tok = ifn.as_extern();
let comment = ifn.to_swig();
let hidden = swig_fn(&ast.ident, "hidden_ffi");
quote! {
#[allow(non_snake_case)]
#[doc=#comment]
fn #hidden(){}
#tok
}
}
/// Write the swig code (injected via doc comments) into `swig.i`.
/// This parses expanded Rust code, and writes the SWIG code to a file.
pub fn gen_swig(pkg_name: &str, src: &str) {
let mut tmp_file = File::create("swig.i").unwrap();
tmp_file.write_all(format!("\
%module {name}
#define PKG_NAME {name}
%include <std_vector.i>
%include <stdint.i>
%include <std_string.i>
%typemap(newfree) char * \"free_string($1);\";
%{{
namespace ffi {{
#include \"bindings.h\"
}}
using namespace ffi;
namespace {name} {{
", name=pkg_name).as_bytes()).unwrap();
let syntax = syn::parse_file(&src).expect("Unable to parse file");
trace!("Syntax: {:#?}", syntax);
let mut hdr = String::new();
// SWIG code is inside doc comments:
// #[doc = "<swig code here>"]
// struct __SWIG_INJECT_Foo;
//
// So we extract this out.
syntax.items.iter().flat_map(|i| {
// Extract out all of the attributes which are attached to structs/functions
// starting with "__SWIG_INJECT"
match i {
syn::Item::Impl(ii) => {
ii.items.iter().fold(Vec::new(), |mut acc, ref ii| {
match ii {
syn::ImplItem::Method(iim) => { | }
acc
},
_ => Vec::new(),
}
})
},
syn::Item::Struct(syn::ItemStruct { attrs, ident,.. }) |
syn::Item::Fn(syn::ItemFn { attrs, ident,..}) => {
if ident.to_string().starts_with(SwigTag::SwigInject.to_str()) {
debug!("{:#?}", | debug!("{:#?}", iim);
if iim.sig.ident.to_string().starts_with(SwigTag::SwigInject.to_str()) {
acc.extend_from_slice(&iim.attrs[..]); | random_line_split |
lib.rs | arg: *mut #name) {
unsafe {
assert!(!arg.is_null());
&*arg;
}
}
};
let default_name = swig_fn(&name, "default");
// TOOD: Add more derive capabilities
// Extracting the derived methods from `#[swig_derive(...)]`.
// We need to automatically add the SWIG code since we cant somehow
// add the `#[swiggen(Foo)]` attribute to the derived methods.
let derivs = get_derives(&self.attrs);
let new_toks = derivs.iter().filter_map(|w| {
match w.as_str() {
"Default" => {
Some(quote! {
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn #default_name() -> *mut #name {
Box::into_raw(Box::new(#name::default()))
}
})
},
_ => None
}
});
tokens.append_all(new_toks);
tokens
}
}
/// A method definition inside an impl block has an additional
/// `base` variable corresponding to the name of the type.
struct InternalFn<'a> {
base: &'a Option<syn::Ident>,
fn_def: &'a syn::ItemFn,
}
/// Convenience method to use cbindgen to convert types into C-compat types.
/// e.g. "input: u32" -> `cbindgen_write((input, u32))` might output `uint32 input`.
fn cbindgen_write<S: Source>(s: &S) -> String {
let mut buf = Vec::new();
{
let cfg = cbindgen::Config::default();
let mut sw = SourceWriter::new(&mut buf, &cfg);
s.write(&cfg, &mut sw);
}
String::from_utf8(buf).unwrap().replace("str", "char")
}
/// Hacky method to take a `&self` or `self` function argument and produce
/// something compatible with `extern "C"` method. Since we can't use `self`,
/// we coerce this to a pointer, and call the arg `wrapped_self`.
fn convert_self_type(arg: &syn::FnArg, base: &Option<syn::Ident>) -> syn::FnArg {
let base = base.clone().expect("Cannot convert `self` arg without provided base name.
Try: `#[swiggen(Foo)]` in macro");
let mut arg = arg.clone().into_token_stream().to_string();
arg = if arg.starts_with('&') {
arg.replace("&", "*const ")
} else {
"*mut ".to_string() + &arg
};
arg = format!("wrapped_self: {}", arg.replace("self", &base.to_string()));
syn::parse_str(&arg).unwrap()
}
/// For inputs, if the type is a primitive (as defined by cbindgen), we don't
/// do anything. Otherwise, assume we will take it in as a pointer.
fn convert_arg_type(syn::ArgCaptured { ref pat, ref ty,.. }: &syn::ArgCaptured) -> syn::FnArg {
if ty.clone().into_token_stream().to_string().ends_with("str") {
parse_quote!(#pat: *const c_char)
} else {
if needs_ref(ty) {
parse_quote!(#pat: *const #ty)
} else {
parse_quote!(#pat: #ty)
}
}
}
/// Similar to above, make sure that we return primitives when
/// recognised
fn convert_ret_type(rty: &syn::ReturnType, base: &Option<syn::Ident>) -> syn::ReturnType {
match rty {
syn::ReturnType::Default => syn::ReturnType::Default,
syn::ReturnType::Type(_, ty) => {
if needs_ref(ty) {
if ty.clone().into_token_stream().to_string() == "Self" {
let base = base.clone().expect("Cannot convert `Self` return type without provided base name.
Try: `#[swiggen(Foo)]` in macro");
parse_quote!(-> *mut #base)
} else if ty.clone().into_token_stream().to_string() == "String" {
parse_quote!(-> *mut c_char)
} else {
parse_quote!(-> *mut #ty)
}
} else {
parse_quote!(-> #ty)
}
}
}
}
/// For paths, assume we can convert to an opaque pointer.
fn needs_ref(ty: &syn::Type) -> bool {
match ty::Type::load(ty) {
Ok(Some(ty::Type::Primitive(_))) => false,
Ok(Some(ty::Type::Path(_)))=> true,
_ => false,
}
}
impl<'a> AsExtern for InternalFn<'a> {
fn as_extern(&self) -> TokenStream {
// Messy blob of code to convert function name, arguments, types,
// return type and generate appropriate code.
// Should be extracted out into smaller functions.
let name = &self.fn_def.ident;
let ext_name = swig_fn(&name, "ffi");
let mut args = Vec::<TokenStream>::new();
let mut caller = Vec::<syn::Ident>::new();
let mut caller_ref = Vec::<TokenStream>::new();
self.fn_def.decl.inputs.iter().for_each(|ref arg| {
match arg {
syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => {
// For self methods, we do some extra work to wrap the
// function so that `impl Foo { fn bar(&self); }`
// becomes `Foo_bar(wrapped_self: *const Foo)`.
let wrapped_self = convert_self_type(&arg, self.base);
args.push(wrapped_self.into_token_stream());
let ws = syn::Ident::new("wrapped_self", Span::call_site());
caller.push(ws.clone());
caller_ref.push(quote!{@ref #ws});
}
syn::FnArg::Captured(ref ac) => {
let id = match &ac.pat {
syn::Pat::Ident(pi) => {
&pi.ident
},
_ => unimplemented!(),
};
args.push(convert_arg_type(ac).into_token_stream());
caller.push(id.clone());
// this later calls the appropriate macro function as to
// whether we need to do some pointer/box stuff
if ac.ty.clone().into_token_stream().to_string().ends_with("str") {
caller_ref.push(quote!{@str #id});
} else if let syn::Type::Reference(_) = ac.ty {
caller_ref.push(quote!{@ref #id});
} else {
caller_ref.push(quote!{@prim #id});
}
},
_ => ()
}
});
let base = self.base;
let out = convert_ret_type(&self.fn_def.decl.output, self.base);
// Similar to the above, this later calls the appropriate macro function
// as to whether we need to do some pointer/box stuff
let res_ref = if let syn::ReturnType::Type(_, ref ty) = self.fn_def.decl.output {
if ty.clone().into_token_stream().to_string() == "String" {
quote!{@str res}
} else if needs_ref(&ty) {
quote!{res}
} else {
quote!{@prim res}
}
} else {
quote!{@prim res}
};
/// Generate the function. We also inject some macro
/// definitions to help with converting pointers into types and types
/// into pointers.
let tokens = quote! {
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn #ext_name(#(#args),*) #out {
#(ffi_ref!(#caller_ref);)*
let res = #base::#name(#(#caller),*);
box_ptr!(#res_ref)
}
};
tokens
}
}
/// Helper function to define the exported/mangled names.
fn swig_fn(name: &syn::Ident, fn_name: &str) -> syn::Ident {
syn::Ident::new(&format!("{}{}_{}", SwigTag::SwigInject, fn_name, name), Span::call_site())
}
fn swig_free(name: &syn::Ident) -> syn::Ident {
swig_fn(name, "free")
}
impl ToSwig for syn::DeriveInput {
fn to_swig(&self) -> String {
/// Generate the SWIG wrapper code as a string.
/// Basically, a class for the Rust struct `Foo` is just a wrapper
/// class called `Foo` which contains a pointer to the actual Rust
/// object.
// prefix with tag
let mut swigged = SwigTag::CodeStart.to_string();
let mut swigged_h = SwigTag::HdrStart.to_string();
let name = &self.ident;
match &self.data {
syn::Data::Struct(ref _ds) => {
// simple wrapper definition to wrap opaque pointer.
// methods get added elsewhere
swigged.push_str(&format!("\
// Wrapper for Rust class {name}
class {name} {{
public:
ffi::{name} *self;
{name}(ffi::{name} *ptr) {{
self = ptr;
}};
~{name}(){{
ffi::{free_name}(self);
self = NULL;
}};
", name=name, free_name=swig_free(&name))
);
swigged_h.push_str(&format!("\
// Wrapper for Rust class {name}
class {name} {{
ffi::{name} *self;
public:
~{name}();
", name=name)
);
// pull out any derive implementations we want to wrap
// TODO: do this in a less ad-hoc way
get_derives(&self.attrs).iter().for_each(|w| {
match w.as_str() {
"Default" => {
swigged.push_str(&format!(
"{name}() {{ self = {def_name}(); }};\n",
name=name, def_name=swig_fn(&name, "default")
));
swigged_h.push_str(&format!("{}();\n",name));
},
_ => (),
}
});
swigged.push_str("};\n");
swigged_h.push_str("};\n");
},
_ => unimplemented!(),
}
swigged.push_str(&SwigTag::CodeEnd.to_str());
swigged_h.push_str(&SwigTag::HdrEnd.to_str());
swigged.push_str(&swigged_h);
swigged
}
}
impl<'a> ToSwig for InternalFn<'a> {
fn to_swig(&self) -> String {
// Generate SWIG wrapper for methods.
// Main complication is making sure that namespaces are correct since
// we are basically overwriting names.
// Also a bit of magic to take an impl method, and add it back into
// being a class method.
// prefix with tag
let mut swigged = SwigTag::CodeStart.to_string();
let mut swigged_h = SwigTag::HdrStart.to_string();
let name = &self.fn_def.ident;
let cb_fn = cbindgen::ir::Function::load(name.to_string(),
&self.fn_def.decl,
true,
&[],
&None).unwrap();
let mut args = String::new();
let mut caller = String::new();
// Convert function arguments
cb_fn.args.iter().for_each(|arg| {
if args.len() > 0 {
args += ", ";
}
if caller.len() > 0 {
caller += ", ";
}
if arg.0 == "self" {
caller += "$self->self";
} else {
args += &cbindgen_write(arg);
caller += &arg.0;
}
});
// Convert return type
let mut out = cbindgen_write(&cb_fn.ret);
if out == "Self" {
out = self.base.clone().expect("Cannot convert `Self` return type without provided base name.
Try: `#[swiggen(Foo)]` in macro").to_string();
} else if out == "String" {
out = "char *".to_string()
}
let mut ret_out = out.clone();
// Convert function name.
let name = if name.to_string() == "new" {
// Custom format for new functions
ret_out = "".to_string();
out = "new PKG_NAME::".to_string() + &out;
self.base.clone().expect("Cannot convert `Self` return type without provided base name.
Try: `#[swiggen(Foo)]` in macro").to_string()
} else {
name.to_string()
};
// Get the mangled name exported by Rust
let ext_name = swig_fn(&self.fn_def.ident, "ffi");
// The following code generates the function definitions and the header
// Code needed for SWIG to generate bindings.
if self.base.is_none() {
swigged.push_str(&format!("\
{ret_out} {name}({args}) {{
return ({out})(ffi::{ext_name}({caller}));
}}"
, name=name, ext_name=ext_name, out=out, ret_out=ret_out, args=args, caller=caller));
}
if let Some(base) = self.base {
// Note the %extend is used by SWIG to make this a class method for
// `base`.
swigged_h.push_str(&format!("
%extend {base_name} {{
{ret_out} {name}({args}) {{
return ({out})(ffi::{ext_name}({caller}));
}}
}};\n"
,name=name, ext_name=ext_name, base_name=base, ret_out=ret_out, out=out, args=args, caller=caller));
} else {
swigged_h.push_str(&format!("\
{out} {name}({args});"
, name=name, out=out, args=args));
}
swigged.push_str(&SwigTag::CodeEnd.to_str());
swigged_h.push_str(&SwigTag::HdrEnd.to_str());
swigged.push_str(&swigged_h);
swigged
}
}
/// Generate extern and SWIG code for a `#[derive(Swig)]` annotated item.
pub fn impl_extern_it(ast: &syn::DeriveInput) -> TokenStream {
let comment = ast.to_swig();
let comment = format!("#[doc=\"{}\"] #[allow(non_camel_case_types)] struct {}{};", comment, SwigTag::SwigInject, ast.ident);
let doc_comment: syn::ItemStruct = syn::parse_str(&comment).expect("failed to generate SWIG code correctly");
let mut tokens: TokenStream = doc_comment.into_token_stream();
tokens.append_all(ast.as_extern().into_iter());
tokens
}
/// Generate extern and SWIG code for a `#[swiggen]` annotated method.
pub fn impl_extern_fn(base_name: &Option<syn::Ident>, ast: &syn::ItemFn) -> TokenStream |
/// Write the swig code (injected via doc comments) into `swig.i`.
/// This parses expanded Rust code, and writes the SWIG code to a file.
pub fn gen_swig(pkg_name: &str, src: &str) {
let mut tmp_file = File::create("swig.i").unwrap();
tmp_file.write_all(format!("\
%module {name}
#define PKG_NAME {name}
%include <std_vector.i>
%include <stdint.i>
%include <std_string.i>
%typemap(newfree) char * \"free_string($1);\";
%{{
namespace ffi {{
#include \"bindings.h\"
}}
using namespace ffi;
namespace {name} {{
", name=pkg_name).as_bytes()).unwrap();
let syntax = syn::parse_file(&src).expect("Unable to parse file");
trace!("Syntax: {:#?}", syntax);
let mut hdr = String::new();
// SWIG code is inside doc comments:
// #[doc = "<swig code here>"]
// struct __SWIG_INJECT_Foo;
//
// So we extract this out.
syntax.items.iter().flat_map(|i| {
// Extract out all of the attributes which are attached to structs/functions
// starting with "__SWIG_INJECT"
match i {
syn::Item::Impl(ii) => {
ii.items.iter().fold(Vec::new(), |mut acc, ref ii| {
match ii {
syn::ImplItem::Method(iim) => {
debug!("{:#?}", iim);
if iim.sig.ident.to_string().starts_with(SwigTag::SwigInject.to_str()) {
acc.extend_from_slice(&iim.attrs[..]);
}
acc
},
_ => Vec::new(),
}
})
},
syn::Item::Struct(syn::ItemStruct { attrs, ident,.. }) |
syn::Item::Fn(syn::ItemFn { attrs, ident,..}) => {
if ident.to_string().starts_with(SwigTag::SwigInject.to_str()) {
debug!("{ | {
let ifn = InternalFn {
base: base_name,
fn_def: ast,
};
let tok = ifn.as_extern();
let comment = ifn.to_swig();
let hidden = swig_fn(&ast.ident, "hidden_ffi");
quote! {
#[allow(non_snake_case)]
#[doc=#comment]
fn #hidden(){}
#tok
}
} | identifier_body |
lib.rs | #ty)
}
}
}
/// Similar to above, make sure that we return primitives when
/// recognised
fn convert_ret_type(rty: &syn::ReturnType, base: &Option<syn::Ident>) -> syn::ReturnType {
match rty {
syn::ReturnType::Default => syn::ReturnType::Default,
syn::ReturnType::Type(_, ty) => {
if needs_ref(ty) {
if ty.clone().into_token_stream().to_string() == "Self" {
let base = base.clone().expect("Cannot convert `Self` return type without provided base name.
Try: `#[swiggen(Foo)]` in macro");
parse_quote!(-> *mut #base)
} else if ty.clone().into_token_stream().to_string() == "String" {
parse_quote!(-> *mut c_char)
} else {
parse_quote!(-> *mut #ty)
}
} else {
parse_quote!(-> #ty)
}
}
}
}
/// For paths, assume we can convert to an opaque pointer.
fn needs_ref(ty: &syn::Type) -> bool {
match ty::Type::load(ty) {
Ok(Some(ty::Type::Primitive(_))) => false,
Ok(Some(ty::Type::Path(_)))=> true,
_ => false,
}
}
impl<'a> AsExtern for InternalFn<'a> {
fn as_extern(&self) -> TokenStream {
// Messy blob of code to convert function name, arguments, types,
// return type and generate appropriate code.
// Should be extracted out into smaller functions.
let name = &self.fn_def.ident;
let ext_name = swig_fn(&name, "ffi");
let mut args = Vec::<TokenStream>::new();
let mut caller = Vec::<syn::Ident>::new();
let mut caller_ref = Vec::<TokenStream>::new();
self.fn_def.decl.inputs.iter().for_each(|ref arg| {
match arg {
syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => {
// For self methods, we do some extra work to wrap the
// function so that `impl Foo { fn bar(&self); }`
// becomes `Foo_bar(wrapped_self: *const Foo)`.
let wrapped_self = convert_self_type(&arg, self.base);
args.push(wrapped_self.into_token_stream());
let ws = syn::Ident::new("wrapped_self", Span::call_site());
caller.push(ws.clone());
caller_ref.push(quote!{@ref #ws});
}
syn::FnArg::Captured(ref ac) => {
let id = match &ac.pat {
syn::Pat::Ident(pi) => {
&pi.ident
},
_ => unimplemented!(),
};
args.push(convert_arg_type(ac).into_token_stream());
caller.push(id.clone());
// this later calls the appropriate macro function as to
// whether we need to do some pointer/box stuff
if ac.ty.clone().into_token_stream().to_string().ends_with("str") {
caller_ref.push(quote!{@str #id});
} else if let syn::Type::Reference(_) = ac.ty {
caller_ref.push(quote!{@ref #id});
} else {
caller_ref.push(quote!{@prim #id});
}
},
_ => ()
}
});
let base = self.base;
let out = convert_ret_type(&self.fn_def.decl.output, self.base);
// Similar to the above, this later calls the appropriate macro function
// as to whether we need to do some pointer/box stuff
let res_ref = if let syn::ReturnType::Type(_, ref ty) = self.fn_def.decl.output {
if ty.clone().into_token_stream().to_string() == "String" {
quote!{@str res}
} else if needs_ref(&ty) {
quote!{res}
} else {
quote!{@prim res}
}
} else {
quote!{@prim res}
};
/// Generate the function. We also inject some macro
/// definitions to help with converting pointers into types and types
/// into pointers.
let tokens = quote! {
#[allow(non_snake_case)]
#[no_mangle]
pub extern "C" fn #ext_name(#(#args),*) #out {
#(ffi_ref!(#caller_ref);)*
let res = #base::#name(#(#caller),*);
box_ptr!(#res_ref)
}
};
tokens
}
}
/// Helper function to define the exported/mangled names.
fn swig_fn(name: &syn::Ident, fn_name: &str) -> syn::Ident {
syn::Ident::new(&format!("{}{}_{}", SwigTag::SwigInject, fn_name, name), Span::call_site())
}
fn swig_free(name: &syn::Ident) -> syn::Ident {
swig_fn(name, "free")
}
impl ToSwig for syn::DeriveInput {
fn to_swig(&self) -> String {
/// Generate the SWIG wrapper code as a string.
/// Basically, a class for the Rust struct `Foo` is just a wrapper
/// class called `Foo` which contains a pointer to the actual Rust
/// object.
// prefix with tag
let mut swigged = SwigTag::CodeStart.to_string();
let mut swigged_h = SwigTag::HdrStart.to_string();
let name = &self.ident;
match &self.data {
syn::Data::Struct(ref _ds) => {
// simple wrapper definition to wrap opaque pointer.
// methods get added elsewhere
swigged.push_str(&format!("\
// Wrapper for Rust class {name}
class {name} {{
public:
ffi::{name} *self;
{name}(ffi::{name} *ptr) {{
self = ptr;
}};
~{name}(){{
ffi::{free_name}(self);
self = NULL;
}};
", name=name, free_name=swig_free(&name))
);
swigged_h.push_str(&format!("\
// Wrapper for Rust class {name}
class {name} {{
ffi::{name} *self;
public:
~{name}();
", name=name)
);
// pull out any derive implementations we want to wrap
// TODO: do this in a less ad-hoc way
get_derives(&self.attrs).iter().for_each(|w| {
match w.as_str() {
"Default" => {
swigged.push_str(&format!(
"{name}() {{ self = {def_name}(); }};\n",
name=name, def_name=swig_fn(&name, "default")
));
swigged_h.push_str(&format!("{}();\n",name));
},
_ => (),
}
});
swigged.push_str("};\n");
swigged_h.push_str("};\n");
},
_ => unimplemented!(),
}
swigged.push_str(&SwigTag::CodeEnd.to_str());
swigged_h.push_str(&SwigTag::HdrEnd.to_str());
swigged.push_str(&swigged_h);
swigged
}
}
impl<'a> ToSwig for InternalFn<'a> {
fn to_swig(&self) -> String {
// Generate SWIG wrapper for methods.
// Main complication is making sure that namespaces are correct since
// we are basically overwriting names.
// Also a bit of magic to take an impl method, and add it back into
// being a class method.
// prefix with tag
let mut swigged = SwigTag::CodeStart.to_string();
let mut swigged_h = SwigTag::HdrStart.to_string();
let name = &self.fn_def.ident;
let cb_fn = cbindgen::ir::Function::load(name.to_string(),
&self.fn_def.decl,
true,
&[],
&None).unwrap();
let mut args = String::new();
let mut caller = String::new();
// Convert function arguments
cb_fn.args.iter().for_each(|arg| {
if args.len() > 0 {
args += ", ";
}
if caller.len() > 0 {
caller += ", ";
}
if arg.0 == "self" {
caller += "$self->self";
} else {
args += &cbindgen_write(arg);
caller += &arg.0;
}
});
// Convert return type
let mut out = cbindgen_write(&cb_fn.ret);
if out == "Self" {
out = self.base.clone().expect("Cannot convert `Self` return type without provided base name.
Try: `#[swiggen(Foo)]` in macro").to_string();
} else if out == "String" {
out = "char *".to_string()
}
let mut ret_out = out.clone();
// Convert function name.
let name = if name.to_string() == "new" {
// Custom format for new functions
ret_out = "".to_string();
out = "new PKG_NAME::".to_string() + &out;
self.base.clone().expect("Cannot convert `Self` return type without provided base name.
Try: `#[swiggen(Foo)]` in macro").to_string()
} else {
name.to_string()
};
// Get the mangled name exported by Rust
let ext_name = swig_fn(&self.fn_def.ident, "ffi");
// The following code generates the function definitions and the header
// Code needed for SWIG to generate bindings.
if self.base.is_none() {
swigged.push_str(&format!("\
{ret_out} {name}({args}) {{
return ({out})(ffi::{ext_name}({caller}));
}}"
, name=name, ext_name=ext_name, out=out, ret_out=ret_out, args=args, caller=caller));
}
if let Some(base) = self.base {
// Note the %extend is used by SWIG to make this a class method for
// `base`.
swigged_h.push_str(&format!("
%extend {base_name} {{
{ret_out} {name}({args}) {{
return ({out})(ffi::{ext_name}({caller}));
}}
}};\n"
,name=name, ext_name=ext_name, base_name=base, ret_out=ret_out, out=out, args=args, caller=caller));
} else {
swigged_h.push_str(&format!("\
{out} {name}({args});"
, name=name, out=out, args=args));
}
swigged.push_str(&SwigTag::CodeEnd.to_str());
swigged_h.push_str(&SwigTag::HdrEnd.to_str());
swigged.push_str(&swigged_h);
swigged
}
}
/// Generate extern and SWIG code for a `#[derive(Swig)]` annotated item.
pub fn impl_extern_it(ast: &syn::DeriveInput) -> TokenStream {
let comment = ast.to_swig();
let comment = format!("#[doc=\"{}\"] #[allow(non_camel_case_types)] struct {}{};", comment, SwigTag::SwigInject, ast.ident);
let doc_comment: syn::ItemStruct = syn::parse_str(&comment).expect("failed to generate SWIG code correctly");
let mut tokens: TokenStream = doc_comment.into_token_stream();
tokens.append_all(ast.as_extern().into_iter());
tokens
}
/// Generate extern and SWIG code for a `#[swiggen]` annotated method.
pub fn impl_extern_fn(base_name: &Option<syn::Ident>, ast: &syn::ItemFn) -> TokenStream {
let ifn = InternalFn {
base: base_name,
fn_def: ast,
};
let tok = ifn.as_extern();
let comment = ifn.to_swig();
let hidden = swig_fn(&ast.ident, "hidden_ffi");
quote! {
#[allow(non_snake_case)]
#[doc=#comment]
fn #hidden(){}
#tok
}
}
/// Write the swig code (injected via doc comments) into `swig.i`.
/// This parses expanded Rust code, and writes the SWIG code to a file.
pub fn gen_swig(pkg_name: &str, src: &str) {
let mut tmp_file = File::create("swig.i").unwrap();
tmp_file.write_all(format!("\
%module {name}
#define PKG_NAME {name}
%include <std_vector.i>
%include <stdint.i>
%include <std_string.i>
%typemap(newfree) char * \"free_string($1);\";
%{{
namespace ffi {{
#include \"bindings.h\"
}}
using namespace ffi;
namespace {name} {{
", name=pkg_name).as_bytes()).unwrap();
let syntax = syn::parse_file(&src).expect("Unable to parse file");
trace!("Syntax: {:#?}", syntax);
let mut hdr = String::new();
// SWIG code is inside doc comments:
// #[doc = "<swig code here>"]
// struct __SWIG_INJECT_Foo;
//
// So we extract this out.
syntax.items.iter().flat_map(|i| {
// Extract out all of the attributes which are attached to structs/functions
// starting with "__SWIG_INJECT"
match i {
syn::Item::Impl(ii) => {
ii.items.iter().fold(Vec::new(), |mut acc, ref ii| {
match ii {
syn::ImplItem::Method(iim) => {
debug!("{:#?}", iim);
if iim.sig.ident.to_string().starts_with(SwigTag::SwigInject.to_str()) {
acc.extend_from_slice(&iim.attrs[..]);
}
acc
},
_ => Vec::new(),
}
})
},
syn::Item::Struct(syn::ItemStruct { attrs, ident,.. }) |
syn::Item::Fn(syn::ItemFn { attrs, ident,..}) => {
if ident.to_string().starts_with(SwigTag::SwigInject.to_str()) {
debug!("{:#?}", attrs);
attrs.clone()
} else {
Vec::new()
}
},
_ => Vec::new()
}
}).for_each(|ref attr| {
match attr.interpret_meta() {
Some(syn::Meta::NameValue(ref mnv)) if &mnv.ident.to_string() == "doc" => {
// Extract out the doc comment for these attributes
if let syn::Lit::Str(ref ls) = mnv.lit {
let mut swig_class = ls.value().replace("\\n", "\n");
let prefix_offset = swig_class.find(SwigTag::CodeStart.to_str()).expect("no code prefix") + SwigTag::CodeStart.len();
let suffix_offset = swig_class.find(SwigTag::CodeEnd.to_str()).expect("no code suffix");
let final_class = &swig_class[prefix_offset..suffix_offset];
let prefix_offset = swig_class.find(SwigTag::HdrStart.to_str()).expect("no header prefix") + SwigTag::HdrStart.len();
let suffix_offset = swig_class.find(SwigTag::HdrEnd.to_str()).expect("no header suffix");
let final_hdr = &swig_class[prefix_offset..suffix_offset];
tmp_file.write_all(&final_class.replace("\\n", "\n").as_bytes()).unwrap();
hdr += &final_hdr.replace("\\n", "\n");
debug!("{}", final_hdr);
debug!("{}", final_class);
}
},
_ => ()
}
});
tmp_file.write_all(format!("\
}}
%}}
namespace {name} {{
{header}
}}
%ignore {inject};
%include \"bindings.h\";
", name=pkg_name, header=hdr, inject=SwigTag::SwigInject).as_bytes()).unwrap();
}
/// Extract out any `derive(Foo)` attributes.
fn get_derives(attrs: &[syn::Attribute]) -> Vec<String> {
attrs.iter().filter_map(|a| a.interpret_meta())
.filter_map(|a| {
if let syn::Meta::List(ml) = a {
Some(ml)
} else {
None
}
}).filter(|ml| ml.ident.to_string() == "swig_derive")
.flat_map(|ml| ml.nested)
.filter_map(|nm| {
if let syn::NestedMeta::Meta(m) = nm {
if let syn::Meta::Word(w) = m {
Some(w.to_string())
} else {
None
}
} else {
None
}
}).collect()
}
/// Parse a Rust file to extract any extern "C" functions or
/// `#[swiggen]`-annotated methods and move these out of the impl block.
pub fn | split_out_externs | identifier_name |
|
park.rs | use std::{
cell::Cell,
sync::atomic::{AtomicBool, Ordering},
thread::{self, Thread},
};
use conquer_util::BackOff;
use crate::{
cell::{Block, Unblock},
state::{
AtomicOnceState, BlockedState,
OnceState::{Ready, Uninit, WouldBlock},
},
POISON_PANIC_MSG,
};
use self::internal::ParkThread;
#[cfg(any(test, feature = "std"))]
/// A type for lazy initialization of e.g. global static variables, which
/// provides the same functionality as the `lazy_static!` macro.
///
/// This type uses the blocking synchronization mechanism provided by the
/// underlying operating system.
///
/// For the API of this type alias, see the API of the generic
/// [`Lazy`](crate::doc::Lazy) type.
///
/// # Examples
///
/// ```
/// use std::sync::Mutex;
///
/// # #[cfg(feature = "std")]
/// use conquer_once::Lazy;
/// # #[cfg(not(feature = "std"))]
/// # use conquer_once::spin::Lazy;
///
/// static MUTEX: Lazy<Mutex<Vec<i32>>> = Lazy::new(Mutex::default);
///
/// let mut lock = MUTEX.lock().unwrap();
///
/// lock.push(1);
/// lock.push(2);
/// lock.push(3);
///
/// assert_eq!(lock.as_slice(), &[1, 2, 3]);
/// ```
///
/// The associated [`new`](crate::lazy::Lazy::new) function can be used with any
/// function or closure that implements `Fn() -> T`.
///
/// ```
/// use std::collections::HashMap;
///
/// # #[cfg(feature = "std")]
/// use conquer_once::Lazy;
/// # #[cfg(not(feature = "std"))]
/// # use conquer_once::spin::Lazy;
///
/// static CAPITALS: Lazy<HashMap<&str, &str>> = Lazy::new(|| {
/// let mut map = HashMap::new();
/// map.insert("Norway", "Oslo");
/// map.insert("Belgium", "Brussels");
/// map.insert("Latvia", "Riga");
/// map
/// });
///
/// assert_eq!(CAPITALS.get(&"Norway"), Some(&"Oslo"));
/// assert_eq!(CAPITALS.get(&"Belgium"), Some(&"Brussels"));
/// assert_eq!(CAPITALS.get(&"Latvia"), Some(&"Riga"));
/// ```
pub type Lazy<T, F = fn() -> T> = crate::lazy::Lazy<T, ParkThread, F>;
#[cfg(any(test, feature = "std"))]
/// An interior mutability cell type which allows synchronized one-time
/// initialization and read-only access exclusively after initialization.
///
/// This type uses the blocking synchronization mechanism provided by the
/// underlying operating system.
///
/// For the API of this type alias, see the generic
/// [`OnceCell`](crate::doc::OnceCell) type.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "std")]
/// use conquer_once::OnceCell;
/// # #[cfg(not(feature = "std"))]
/// # use conquer_once::spin::OnceCell;
///
/// #[derive(Copy, Clone)]
/// struct Configuration {
/// mode: i32,
/// threshold: u64,
/// msg: &'static str,
/// }
///
/// static CONFIG: OnceCell<Configuration> = OnceCell::uninit();
///
/// // producer thread
/// CONFIG.init_once(|| Configuration {
/// mode: 2,
/// threshold: 128,
/// msg: "..."
/// });
///
/// // consumer thread
/// let res = CONFIG.get().copied();
/// if let Some(config) = res {
/// assert_eq!(config.mode, 2);
/// assert_eq!(config.threshold, 128);
/// }
/// ```
pub type OnceCell<T> = crate::cell::OnceCell<T, ParkThread>;
#[cfg(any(test, feature = "std"))]
/// A synchronization primitive which can be used to run a one-time global
/// initialization.
///
/// This type uses the blocking synchronization mechanism provided by the
/// underlying operating system.
///
/// For the API of this type alias, see the generic
/// [`OnceCell`](crate::doc::OnceCell) type.
/// This is a specialization with `T = ()`.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "std")]
/// use conquer_once::Once;
/// # #[cfg(not(feature = "std"))]
/// # use conquer_once::spin::Once;
///
/// static mut GLOBAL: usize = 0;
/// static INIT: Once = Once::uninit();
///
/// fn get_global() -> usize {
/// // SAFETY: this is safe because the `Once` ensures the `static mut` is
/// // assigned by only one thread and without data races.
/// unsafe {
/// INIT.init_once(|| {
/// GLOBAL = expensive_computation();
/// });
/// # assert_eq!(GLOBAL, 1);
/// GLOBAL
/// }
/// }
///
/// fn expensive_computation() -> usize {
/// //...
/// # 1
/// }
/// ```
pub type Once = OnceCell<()>;
mod internal {
/// Blocking strategy using low-level and OS-reliant parking and un-parking
/// mechanisms.
#[derive(Copy, Clone, Debug, Default, Eq, Ord, PartialEq, PartialOrd)]
pub struct ParkThread;
}
impl ParkThread {
#[inline]
pub(crate) fn try_block_spinning(
state: &AtomicOnceState,
back_off: &BackOff,
) -> Result<(), BlockedState> {
loop {
// (wait:1) this acquire load syncs-with the release swaps (guard:2)
// and the acq-rel CAS (wait:2)
match state.load(Ordering::Acquire).expect(POISON_PANIC_MSG) {
Ready => return Ok(()),
WouldBlock(blocked) if back_off.advise_yield() => {
back_off.reset();
return Err(blocked);
}
_ => {}
}
back_off.spin();
}
}
}
impl Unblock for ParkThread {
/// Unblocks all blocked waiting threads.
#[inline]
unsafe fn on_unblock(state: BlockedState) {
let mut curr = state.as_ptr() as *const StackWaiter;
while!curr.is_null() {
let thread = {
// SAFETY: no mutable references to a stack waiter can exist
// and the waiter struct is ensured to live while its thread is
// parked, so the pointer can be safely dereferenced
#[allow(unused_unsafe)]
let waiter = unsafe { &*curr };
curr = waiter.next.get();
// there can be now data race when mutating the thread-cell as only the unblocking
// thread will access it, the stack waiter can dropped as soon as the following
// store becomes visible, so the thread MUST be taken out first
let thread = waiter.thread.take().unwrap();
// (ready:2) this release store syncs-with the acquire load (ready:1)
waiter.ready.store(true, Ordering::Release);
thread
};
thread.unpark();
}
}
}
unsafe impl Block for ParkThread {
/// Blocks (parks) the current thread until it is woken up by the thread
/// with permission to initialize the `OnceCell`.
#[inline]
fn | (state: &AtomicOnceState) {
// spin a little before parking the thread in case the state is
// quickly unlocked again
let back_off = BackOff::new();
let blocked = match Self::try_block_spinning(state, &back_off) {
Ok(_) => return,
Err(blocked) => blocked,
};
// create a linked list node on the current thread's stack, which is
// guaranteed to stay alive while the thread is parked.
let waiter = StackWaiter {
ready: AtomicBool::new(false),
thread: Cell::new(Some(thread::current())),
next: Cell::new(blocked.as_ptr() as *const StackWaiter),
};
let mut curr = blocked;
let head = BlockedState::from(&waiter as *const _);
// SAFETY: `head` is a valid pointer to a `StackWaiter` that will live
// for the duration of this function, which in turn will only return
// when no other thread can still observe any pointer to it
// (wait:2) this acq-rel CAS syncs-with itself and the acq load (wait:1)
while let Err(err) = unsafe { state.try_enqueue_waiter(curr, head, Ordering::AcqRel) } {
match err {
// another parked thread succeeded in placing itself at the queue's front
WouldBlock(queue) => {
// the waiter hasn't been shared yet, so it's still safe to
// mutate the next pointer
curr = queue;
waiter.next.set(queue.as_ptr() as *const StackWaiter);
back_off.spin();
}
// acquire-release is required here to enforce acquire ordering in the failure case,
// which guarantees that any (non-atomic) stores to the cell's inner state preceding
// (guard:2) have become visible, if the function returns;
// (alternatively an explicit acquire fence could be placed into this path)
Ready => return,
Uninit => unreachable!("cell state can not become `UNINIT again`"),
}
}
// park the thread until it is woken up by the thread that first set the state to blocked.
// the loop guards against spurious wake ups
// (ready:1) this acquire load syncs-with the release store (ready:2)
while!waiter.ready.load(Ordering::Acquire) {
thread::park();
}
// SAFETY: propagates poisoning as required by the trait
// (wait:3) this acquire load syncs-with the acq-rel swap (guard:2)
assert_eq!(state.load(Ordering::Acquire).expect(POISON_PANIC_MSG), Ready);
}
}
/// A linked list node that lives on the stack of a parked thread.
#[repr(align(4))]
pub(crate) struct StackWaiter {
/// The flag marking the waiter as either blocked or ready to proceed.
///
/// This is read by the owning thread and is set by the thread that gets to
/// run the initialization closure and responsible for unparking all blocked
/// threads, which may be either the same or any other thread.
ready: AtomicBool,
/// The handle for the parked thread that is used to unpark it, once the
/// initialization is complete.
///
/// This field is in fact mutated by a thread that is potentially not the
/// same as the owning thread, but exclusively in the case where the
/// mutating thread has exclusive access to this field.
thread: Cell<Option<Thread>>,
/// The pointer to the next blocked thread.
///
/// This field is mutated exclusively by **either** the owning thread
/// **before** the waiter becomes visible to other threads or by the thread
/// responsible for unparking all waiting threads.
next: Cell<*const StackWaiter>,
}
#[cfg(test)]
mod tests {
generate_tests_non_blocking!();
generate_tests!();
}
| block | identifier_name |
park.rs | use std::{
cell::Cell,
sync::atomic::{AtomicBool, Ordering},
thread::{self, Thread},
};
use conquer_util::BackOff;
use crate::{
cell::{Block, Unblock},
state::{
AtomicOnceState, BlockedState,
OnceState::{Ready, Uninit, WouldBlock},
},
POISON_PANIC_MSG,
};
use self::internal::ParkThread;
#[cfg(any(test, feature = "std"))]
/// A type for lazy initialization of e.g. global static variables, which
/// provides the same functionality as the `lazy_static!` macro.
///
/// This type uses the blocking synchronization mechanism provided by the
/// underlying operating system.
///
/// For the API of this type alias, see the API of the generic
/// [`Lazy`](crate::doc::Lazy) type.
///
/// # Examples
///
/// ```
/// use std::sync::Mutex;
///
/// # #[cfg(feature = "std")]
/// use conquer_once::Lazy;
/// # #[cfg(not(feature = "std"))]
/// # use conquer_once::spin::Lazy;
///
/// static MUTEX: Lazy<Mutex<Vec<i32>>> = Lazy::new(Mutex::default);
///
/// let mut lock = MUTEX.lock().unwrap();
///
/// lock.push(1);
/// lock.push(2);
/// lock.push(3);
///
/// assert_eq!(lock.as_slice(), &[1, 2, 3]);
/// ```
///
/// The associated [`new`](crate::lazy::Lazy::new) function can be used with any
/// function or closure that implements `Fn() -> T`.
///
/// ```
/// use std::collections::HashMap;
///
/// # #[cfg(feature = "std")]
/// use conquer_once::Lazy;
/// # #[cfg(not(feature = "std"))]
/// # use conquer_once::spin::Lazy;
///
/// static CAPITALS: Lazy<HashMap<&str, &str>> = Lazy::new(|| {
/// let mut map = HashMap::new();
/// map.insert("Norway", "Oslo");
/// map.insert("Belgium", "Brussels");
/// map.insert("Latvia", "Riga");
/// map
/// });
///
/// assert_eq!(CAPITALS.get(&"Norway"), Some(&"Oslo"));
/// assert_eq!(CAPITALS.get(&"Belgium"), Some(&"Brussels"));
/// assert_eq!(CAPITALS.get(&"Latvia"), Some(&"Riga"));
/// ```
pub type Lazy<T, F = fn() -> T> = crate::lazy::Lazy<T, ParkThread, F>;
#[cfg(any(test, feature = "std"))]
/// An interior mutability cell type which allows synchronized one-time
/// initialization and read-only access exclusively after initialization.
///
/// This type uses the blocking synchronization mechanism provided by the
/// underlying operating system.
///
/// For the API of this type alias, see the generic
/// [`OnceCell`](crate::doc::OnceCell) type.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "std")]
/// use conquer_once::OnceCell;
/// # #[cfg(not(feature = "std"))]
/// # use conquer_once::spin::OnceCell;
///
/// #[derive(Copy, Clone)]
/// struct Configuration {
/// mode: i32,
/// threshold: u64,
/// msg: &'static str,
/// }
///
/// static CONFIG: OnceCell<Configuration> = OnceCell::uninit();
///
/// // producer thread
/// CONFIG.init_once(|| Configuration {
/// mode: 2,
/// threshold: 128,
/// msg: "..."
/// });
///
/// // consumer thread
/// let res = CONFIG.get().copied();
/// if let Some(config) = res {
/// assert_eq!(config.mode, 2);
/// assert_eq!(config.threshold, 128);
/// }
/// ```
pub type OnceCell<T> = crate::cell::OnceCell<T, ParkThread>;
#[cfg(any(test, feature = "std"))]
/// A synchronization primitive which can be used to run a one-time global
/// initialization.
///
/// This type uses the blocking synchronization mechanism provided by the
/// underlying operating system.
///
/// For the API of this type alias, see the generic
/// [`OnceCell`](crate::doc::OnceCell) type.
/// This is a specialization with `T = ()`.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "std")]
/// use conquer_once::Once;
/// # #[cfg(not(feature = "std"))]
/// # use conquer_once::spin::Once;
///
/// static mut GLOBAL: usize = 0;
/// static INIT: Once = Once::uninit();
///
/// fn get_global() -> usize {
/// // SAFETY: this is safe because the `Once` ensures the `static mut` is
/// // assigned by only one thread and without data races.
/// unsafe {
/// INIT.init_once(|| {
/// GLOBAL = expensive_computation();
/// });
/// # assert_eq!(GLOBAL, 1);
/// GLOBAL
/// }
/// }
///
/// fn expensive_computation() -> usize {
/// //...
/// # 1
/// }
/// ```
pub type Once = OnceCell<()>;
mod internal {
/// Blocking strategy using low-level and OS-reliant parking and un-parking
/// mechanisms.
#[derive(Copy, Clone, Debug, Default, Eq, Ord, PartialEq, PartialOrd)]
pub struct ParkThread;
}
impl ParkThread {
#[inline]
pub(crate) fn try_block_spinning(
state: &AtomicOnceState,
back_off: &BackOff,
) -> Result<(), BlockedState> {
loop {
// (wait:1) this acquire load syncs-with the release swaps (guard:2)
// and the acq-rel CAS (wait:2)
match state.load(Ordering::Acquire).expect(POISON_PANIC_MSG) {
Ready => return Ok(()),
WouldBlock(blocked) if back_off.advise_yield() => {
back_off.reset();
return Err(blocked);
}
_ => {}
}
back_off.spin();
}
}
}
impl Unblock for ParkThread {
/// Unblocks all blocked waiting threads.
#[inline]
unsafe fn on_unblock(state: BlockedState) {
let mut curr = state.as_ptr() as *const StackWaiter;
while!curr.is_null() {
let thread = {
// SAFETY: no mutable references to a stack waiter can exist
// and the waiter struct is ensured to live while its thread is
// parked, so the pointer can be safely dereferenced
#[allow(unused_unsafe)]
let waiter = unsafe { &*curr };
curr = waiter.next.get();
// there can be now data race when mutating the thread-cell as only the unblocking
// thread will access it, the stack waiter can dropped as soon as the following
// store becomes visible, so the thread MUST be taken out first
let thread = waiter.thread.take().unwrap();
// (ready:2) this release store syncs-with the acquire load (ready:1)
waiter.ready.store(true, Ordering::Release);
thread
};
thread.unpark();
}
}
}
unsafe impl Block for ParkThread {
/// Blocks (parks) the current thread until it is woken up by the thread
/// with permission to initialize the `OnceCell`.
#[inline]
fn block(state: &AtomicOnceState) | // SAFETY: `head` is a valid pointer to a `StackWaiter` that will live
// for the duration of this function, which in turn will only return
// when no other thread can still observe any pointer to it
// (wait:2) this acq-rel CAS syncs-with itself and the acq load (wait:1)
while let Err(err) = unsafe { state.try_enqueue_waiter(curr, head, Ordering::AcqRel) } {
match err {
// another parked thread succeeded in placing itself at the queue's front
WouldBlock(queue) => {
// the waiter hasn't been shared yet, so it's still safe to
// mutate the next pointer
curr = queue;
waiter.next.set(queue.as_ptr() as *const StackWaiter);
back_off.spin();
}
// acquire-release is required here to enforce acquire ordering in the failure case,
// which guarantees that any (non-atomic) stores to the cell's inner state preceding
// (guard:2) have become visible, if the function returns;
// (alternatively an explicit acquire fence could be placed into this path)
Ready => return,
Uninit => unreachable!("cell state can not become `UNINIT again`"),
}
}
// park the thread until it is woken up by the thread that first set the state to blocked.
// the loop guards against spurious wake ups
// (ready:1) this acquire load syncs-with the release store (ready:2)
while!waiter.ready.load(Ordering::Acquire) {
thread::park();
}
// SAFETY: propagates poisoning as required by the trait
// (wait:3) this acquire load syncs-with the acq-rel swap (guard:2)
assert_eq!(state.load(Ordering::Acquire).expect(POISON_PANIC_MSG), Ready);
}
}
/// A linked list node that lives on the stack of a parked thread.
#[repr(align(4))]
pub(crate) struct StackWaiter {
/// The flag marking the waiter as either blocked or ready to proceed.
///
/// This is read by the owning thread and is set by the thread that gets to
/// run the initialization closure and responsible for unparking all blocked
/// threads, which may be either the same or any other thread.
ready: AtomicBool,
/// The handle for the parked thread that is used to unpark it, once the
/// initialization is complete.
///
/// This field is in fact mutated by a thread that is potentially not the
/// same as the owning thread, but exclusively in the case where the
/// mutating thread has exclusive access to this field.
thread: Cell<Option<Thread>>,
/// The pointer to the next blocked thread.
///
/// This field is mutated exclusively by **either** the owning thread
/// **before** the waiter becomes visible to other threads or by the thread
/// responsible for unparking all waiting threads.
next: Cell<*const StackWaiter>,
}
#[cfg(test)]
mod tests {
generate_tests_non_blocking!();
generate_tests!();
}
| {
// spin a little before parking the thread in case the state is
// quickly unlocked again
let back_off = BackOff::new();
let blocked = match Self::try_block_spinning(state, &back_off) {
Ok(_) => return,
Err(blocked) => blocked,
};
// create a linked list node on the current thread's stack, which is
// guaranteed to stay alive while the thread is parked.
let waiter = StackWaiter {
ready: AtomicBool::new(false),
thread: Cell::new(Some(thread::current())),
next: Cell::new(blocked.as_ptr() as *const StackWaiter),
};
let mut curr = blocked;
let head = BlockedState::from(&waiter as *const _);
| identifier_body |
park.rs | use std::{
cell::Cell,
sync::atomic::{AtomicBool, Ordering},
thread::{self, Thread},
};
use conquer_util::BackOff;
use crate::{
cell::{Block, Unblock},
state::{
AtomicOnceState, BlockedState,
OnceState::{Ready, Uninit, WouldBlock},
},
POISON_PANIC_MSG,
};
use self::internal::ParkThread;
#[cfg(any(test, feature = "std"))]
/// A type for lazy initialization of e.g. global static variables, which
/// provides the same functionality as the `lazy_static!` macro.
///
/// This type uses the blocking synchronization mechanism provided by the
/// underlying operating system.
///
/// For the API of this type alias, see the API of the generic
/// [`Lazy`](crate::doc::Lazy) type.
///
/// # Examples
///
/// ```
/// use std::sync::Mutex;
///
/// # #[cfg(feature = "std")]
/// use conquer_once::Lazy;
/// # #[cfg(not(feature = "std"))]
/// # use conquer_once::spin::Lazy;
///
/// static MUTEX: Lazy<Mutex<Vec<i32>>> = Lazy::new(Mutex::default);
///
/// let mut lock = MUTEX.lock().unwrap();
///
/// lock.push(1);
/// lock.push(2);
/// lock.push(3);
///
/// assert_eq!(lock.as_slice(), &[1, 2, 3]);
/// ```
///
/// The associated [`new`](crate::lazy::Lazy::new) function can be used with any
/// function or closure that implements `Fn() -> T`.
///
/// ```
/// use std::collections::HashMap;
///
/// # #[cfg(feature = "std")]
/// use conquer_once::Lazy;
/// # #[cfg(not(feature = "std"))]
/// # use conquer_once::spin::Lazy;
///
/// static CAPITALS: Lazy<HashMap<&str, &str>> = Lazy::new(|| {
/// let mut map = HashMap::new();
/// map.insert("Norway", "Oslo");
/// map.insert("Belgium", "Brussels");
/// map.insert("Latvia", "Riga");
/// map
/// });
///
/// assert_eq!(CAPITALS.get(&"Norway"), Some(&"Oslo"));
/// assert_eq!(CAPITALS.get(&"Belgium"), Some(&"Brussels"));
/// assert_eq!(CAPITALS.get(&"Latvia"), Some(&"Riga"));
/// ```
pub type Lazy<T, F = fn() -> T> = crate::lazy::Lazy<T, ParkThread, F>;
#[cfg(any(test, feature = "std"))]
/// An interior mutability cell type which allows synchronized one-time
/// initialization and read-only access exclusively after initialization.
///
/// This type uses the blocking synchronization mechanism provided by the
/// underlying operating system.
///
/// For the API of this type alias, see the generic
/// [`OnceCell`](crate::doc::OnceCell) type.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "std")]
/// use conquer_once::OnceCell;
/// # #[cfg(not(feature = "std"))]
/// # use conquer_once::spin::OnceCell;
///
/// #[derive(Copy, Clone)]
/// struct Configuration {
/// mode: i32,
/// threshold: u64,
/// msg: &'static str,
/// }
///
/// static CONFIG: OnceCell<Configuration> = OnceCell::uninit();
///
/// // producer thread
/// CONFIG.init_once(|| Configuration {
/// mode: 2,
/// threshold: 128,
/// msg: "..."
/// });
///
/// // consumer thread
/// let res = CONFIG.get().copied();
/// if let Some(config) = res {
/// assert_eq!(config.mode, 2);
/// assert_eq!(config.threshold, 128);
/// }
/// ```
pub type OnceCell<T> = crate::cell::OnceCell<T, ParkThread>;
#[cfg(any(test, feature = "std"))]
/// A synchronization primitive which can be used to run a one-time global
/// initialization.
///
/// This type uses the blocking synchronization mechanism provided by the
/// underlying operating system.
///
/// For the API of this type alias, see the generic
/// [`OnceCell`](crate::doc::OnceCell) type.
/// This is a specialization with `T = ()`.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "std")]
/// use conquer_once::Once;
/// # #[cfg(not(feature = "std"))]
/// # use conquer_once::spin::Once;
///
/// static mut GLOBAL: usize = 0;
/// static INIT: Once = Once::uninit();
///
/// fn get_global() -> usize {
/// // SAFETY: this is safe because the `Once` ensures the `static mut` is
/// // assigned by only one thread and without data races.
/// unsafe {
/// INIT.init_once(|| {
/// GLOBAL = expensive_computation();
/// });
/// # assert_eq!(GLOBAL, 1);
/// GLOBAL
/// }
/// }
///
/// fn expensive_computation() -> usize {
/// //...
/// # 1
/// }
/// ```
pub type Once = OnceCell<()>;
mod internal {
/// Blocking strategy using low-level and OS-reliant parking and un-parking
/// mechanisms.
#[derive(Copy, Clone, Debug, Default, Eq, Ord, PartialEq, PartialOrd)]
pub struct ParkThread;
}
impl ParkThread {
#[inline]
pub(crate) fn try_block_spinning(
state: &AtomicOnceState,
back_off: &BackOff,
) -> Result<(), BlockedState> {
loop {
// (wait:1) this acquire load syncs-with the release swaps (guard:2)
// and the acq-rel CAS (wait:2)
match state.load(Ordering::Acquire).expect(POISON_PANIC_MSG) {
Ready => return Ok(()),
WouldBlock(blocked) if back_off.advise_yield() => {
back_off.reset();
return Err(blocked);
}
_ => {}
}
| }
}
}
impl Unblock for ParkThread {
/// Unblocks all blocked waiting threads.
#[inline]
unsafe fn on_unblock(state: BlockedState) {
let mut curr = state.as_ptr() as *const StackWaiter;
while!curr.is_null() {
let thread = {
// SAFETY: no mutable references to a stack waiter can exist
// and the waiter struct is ensured to live while its thread is
// parked, so the pointer can be safely dereferenced
#[allow(unused_unsafe)]
let waiter = unsafe { &*curr };
curr = waiter.next.get();
// there can be now data race when mutating the thread-cell as only the unblocking
// thread will access it, the stack waiter can dropped as soon as the following
// store becomes visible, so the thread MUST be taken out first
let thread = waiter.thread.take().unwrap();
// (ready:2) this release store syncs-with the acquire load (ready:1)
waiter.ready.store(true, Ordering::Release);
thread
};
thread.unpark();
}
}
}
unsafe impl Block for ParkThread {
/// Blocks (parks) the current thread until it is woken up by the thread
/// with permission to initialize the `OnceCell`.
#[inline]
fn block(state: &AtomicOnceState) {
// spin a little before parking the thread in case the state is
// quickly unlocked again
let back_off = BackOff::new();
let blocked = match Self::try_block_spinning(state, &back_off) {
Ok(_) => return,
Err(blocked) => blocked,
};
// create a linked list node on the current thread's stack, which is
// guaranteed to stay alive while the thread is parked.
let waiter = StackWaiter {
ready: AtomicBool::new(false),
thread: Cell::new(Some(thread::current())),
next: Cell::new(blocked.as_ptr() as *const StackWaiter),
};
let mut curr = blocked;
let head = BlockedState::from(&waiter as *const _);
// SAFETY: `head` is a valid pointer to a `StackWaiter` that will live
// for the duration of this function, which in turn will only return
// when no other thread can still observe any pointer to it
// (wait:2) this acq-rel CAS syncs-with itself and the acq load (wait:1)
while let Err(err) = unsafe { state.try_enqueue_waiter(curr, head, Ordering::AcqRel) } {
match err {
// another parked thread succeeded in placing itself at the queue's front
WouldBlock(queue) => {
// the waiter hasn't been shared yet, so it's still safe to
// mutate the next pointer
curr = queue;
waiter.next.set(queue.as_ptr() as *const StackWaiter);
back_off.spin();
}
// acquire-release is required here to enforce acquire ordering in the failure case,
// which guarantees that any (non-atomic) stores to the cell's inner state preceding
// (guard:2) have become visible, if the function returns;
// (alternatively an explicit acquire fence could be placed into this path)
Ready => return,
Uninit => unreachable!("cell state can not become `UNINIT again`"),
}
}
// park the thread until it is woken up by the thread that first set the state to blocked.
// the loop guards against spurious wake ups
// (ready:1) this acquire load syncs-with the release store (ready:2)
while!waiter.ready.load(Ordering::Acquire) {
thread::park();
}
// SAFETY: propagates poisoning as required by the trait
// (wait:3) this acquire load syncs-with the acq-rel swap (guard:2)
assert_eq!(state.load(Ordering::Acquire).expect(POISON_PANIC_MSG), Ready);
}
}
/// A linked list node that lives on the stack of a parked thread.
#[repr(align(4))]
pub(crate) struct StackWaiter {
/// The flag marking the waiter as either blocked or ready to proceed.
///
/// This is read by the owning thread and is set by the thread that gets to
/// run the initialization closure and responsible for unparking all blocked
/// threads, which may be either the same or any other thread.
ready: AtomicBool,
/// The handle for the parked thread that is used to unpark it, once the
/// initialization is complete.
///
/// This field is in fact mutated by a thread that is potentially not the
/// same as the owning thread, but exclusively in the case where the
/// mutating thread has exclusive access to this field.
thread: Cell<Option<Thread>>,
/// The pointer to the next blocked thread.
///
/// This field is mutated exclusively by **either** the owning thread
/// **before** the waiter becomes visible to other threads or by the thread
/// responsible for unparking all waiting threads.
next: Cell<*const StackWaiter>,
}
#[cfg(test)]
mod tests {
generate_tests_non_blocking!();
generate_tests!();
} | back_off.spin(); | random_line_split |
proof.rs | use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use ring::digest::Algorithm;
use crate::hashutils::HashUtils;
use crate::tree::Tree;
/// An inclusion proof represent the fact that a `value` is a member
/// of a `MerkleTree` with root hash `root_hash`, and hash function `algorithm`.
#[cfg_attr(feature = "serialization-serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug)]
pub struct Proof<T> {
/// The hashing algorithm used in the original `MerkleTree`
#[cfg_attr(feature = "serialization-serde", serde(with = "algorithm_serde"))]
pub algorithm: &'static Algorithm,
/// The hash of the root of the original `MerkleTree`
pub root_hash: Vec<u8>,
/// The first `Lemma` of the `Proof`
pub lemma: Lemma,
/// The value concerned by this `Proof`
pub value: T,
}
#[cfg(feature = "serialization-serde")]
mod algorithm_serde {
use ring::digest::{self, Algorithm};
use serde::de::Error;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub fn serialize<S: Serializer>(
algorithm: &'static Algorithm,
se: S,
) -> Result<S::Ok, S::Error> {
// The `Debug` implementation of `Algorithm` prints its ID.
format!("{:?}", algorithm).serialize(se)
}
pub fn deserialize<'de, D: Deserializer<'de>>(de: D) -> Result<&'static Algorithm, D::Error> {
let alg_str: String = Deserialize::deserialize(de)?;
match &*alg_str {
"SHA1" => Ok(&digest::SHA1_FOR_LEGACY_USE_ONLY),
"SHA256" => Ok(&digest::SHA256),
"SHA384" => Ok(&digest::SHA384),
"SHA512" => Ok(&digest::SHA512),
"SHA512_256" => Ok(&digest::SHA512_256),
_ => Err(D::Error::custom("unknown hash algorithm")),
}
}
#[cfg(test)]
mod test {
use super::*;
use ring::digest::{
SHA1_FOR_LEGACY_USE_ONLY as sha1, SHA256 as sha256, SHA384 as sha384, SHA512 as sha512,
SHA512_256 as sha512_256,
};
static SHA1: &Algorithm = &sha1;
static SHA256: &Algorithm = &sha256;
static SHA384: &Algorithm = &sha384;
static SHA512: &Algorithm = &sha512;
static SHA512_256: &Algorithm = &sha512_256;
#[test]
fn test_serialize_known_algorithms() {
extern crate serde_json;
for alg in &[SHA1, SHA256, SHA384, SHA512, SHA512_256] {
let mut serializer = serde_json::Serializer::with_formatter(
vec![],
serde_json::ser::PrettyFormatter::new(),
);
serialize(alg, &mut serializer).unwrap_or_else(|_| panic!("{:?}", alg));
let alg_ = deserialize(&mut serde_json::Deserializer::from_slice(
&serializer.into_inner()[..],
))
.unwrap_or_else(|_| panic!("{:?}", alg));
assert_eq!(*alg, alg_);
}
}
#[test]
#[should_panic(expected = "unknown hash algorithm")]
fn test_serialize_unknown_algorithm() {
extern crate serde_json;
{
let alg_str = "\"BLAKE2b\"";
let mut deserializer = serde_json::Deserializer::from_str(alg_str);
let _ = deserialize(&mut deserializer)
.unwrap_or_else(|_| panic!("unknown hash algorithm {:?}", alg_str));
}
}
}
}
impl<T: PartialEq> PartialEq for Proof<T> {
fn eq(&self, other: &Proof<T>) -> bool {
self.root_hash == other.root_hash && self.lemma == other.lemma && self.value == other.value
}
}
impl<T: Eq> Eq for Proof<T> {}
impl<T: Ord> PartialOrd for Proof<T> {
fn partial_cmp(&self, other: &Proof<T>) -> Option<Ordering> |
}
impl<T: Ord> Ord for Proof<T> {
fn cmp(&self, other: &Proof<T>) -> Ordering {
self.root_hash
.cmp(&other.root_hash)
.then(self.value.cmp(&other.value))
.then_with(|| self.lemma.cmp(&other.lemma))
}
}
impl<T: Hash> Hash for Proof<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.root_hash.hash(state);
self.lemma.hash(state);
self.value.hash(state);
}
}
impl<T> Proof<T> {
/// Constructs a new `Proof`
pub fn new(algorithm: &'static Algorithm, root_hash: Vec<u8>, lemma: Lemma, value: T) -> Self {
Proof {
algorithm,
root_hash,
lemma,
value,
}
}
/// Checks whether this inclusion proof is well-formed,
/// and whether its root hash matches the given `root_hash`.
pub fn validate(&self, root_hash: &[u8]) -> bool {
if self.root_hash!= root_hash || self.lemma.node_hash!= root_hash {
return false;
}
self.lemma.validate(self.algorithm)
}
/// Returns the index of this proof's value, given the total number of items in the tree.
///
/// # Panics
///
/// Panics if the proof is malformed. Call `validate` first.
pub fn index(&self, count: usize) -> usize {
self.lemma.index(count)
}
}
/// A `Lemma` holds the hash of a node, the hash of its sibling node,
/// and a sub lemma, whose `node_hash`, when combined with this `sibling_hash`
/// must be equal to this `node_hash`.
#[cfg_attr(feature = "serialization-serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Lemma {
pub node_hash: Vec<u8>,
pub sibling_hash: Option<Positioned<Vec<u8>>>,
pub sub_lemma: Option<Box<Lemma>>,
}
impl Lemma {
/// Attempts to generate a proof that the a value with hash `needle` is a
/// member of the given `tree`.
pub fn new<T>(tree: &Tree<T>, needle: &[u8]) -> Option<Lemma> {
match *tree {
Tree::Empty {.. } => None,
Tree::Leaf { ref hash,.. } => Lemma::new_leaf_proof(hash, needle),
Tree::Node {
ref hash,
ref left,
ref right,
} => Lemma::new_tree_proof(hash, needle, left, right),
}
}
/// Attempts to generate a proof that the `idx`-th leaf is a member of
/// the given tree. The `count` must equal the number of leaves in the
/// `tree`. If `idx >= count`, `None` is returned. Otherwise it returns
/// the new `Lemma` and the `idx`-th value.
pub fn new_by_index<T>(tree: &Tree<T>, idx: usize, count: usize) -> Option<(Lemma, &T)> {
if idx >= count {
return None;
}
match *tree {
Tree::Empty {.. } => None,
Tree::Leaf {
ref hash,
ref value,
..
} => {
if count!= 1 {
return None;
}
let lemma = Lemma {
node_hash: hash.clone(),
sibling_hash: None,
sub_lemma: None,
};
Some((lemma, value))
}
Tree::Node {
ref hash,
ref left,
ref right,
} => {
let left_count = count.next_power_of_two() / 2;
let (sub_lem_val, sibling_hash);
if idx < left_count {
sub_lem_val = Lemma::new_by_index(left, idx, left_count);
sibling_hash = Positioned::Right(right.hash().clone());
} else {
sub_lem_val = Lemma::new_by_index(right, idx - left_count, count - left_count);
sibling_hash = Positioned::Left(left.hash().clone());
}
sub_lem_val.map(|(sub_lemma, value)| {
let lemma = Lemma {
node_hash: hash.clone(),
sibling_hash: Some(sibling_hash),
sub_lemma: Some(Box::new(sub_lemma)),
};
(lemma, value)
})
}
}
}
/// Returns the index of this lemma's value, given the total number of items in the tree.
///
/// # Panics
///
/// Panics if the lemma is malformed. Call `validate_lemma` first.
pub fn index(&self, count: usize) -> usize {
let left_count = count.next_power_of_two() / 2;
match (self.sub_lemma.as_ref(), self.sibling_hash.as_ref()) {
(None, None) => 0,
(Some(l), Some(&Positioned::Left(_))) => left_count + l.index(count - left_count),
(Some(l), Some(&Positioned::Right(_))) => l.index(left_count),
(None, Some(_)) | (Some(_), None) => panic!("malformed lemma"),
}
}
fn new_leaf_proof(hash: &[u8], needle: &[u8]) -> Option<Lemma> {
if *hash == *needle {
Some(Lemma {
node_hash: hash.into(),
sibling_hash: None,
sub_lemma: None,
})
} else {
None
}
}
fn new_tree_proof<T>(
hash: &[u8],
needle: &[u8],
left: &Tree<T>,
right: &Tree<T>,
) -> Option<Lemma> {
Lemma::new(left, needle)
.map(|lemma| {
let right_hash = right.hash().clone();
let sub_lemma = Some(Positioned::Right(right_hash));
(lemma, sub_lemma)
})
.or_else(|| {
let sub_lemma = Lemma::new(right, needle);
sub_lemma.map(|lemma| {
let left_hash = left.hash().clone();
let sub_lemma = Some(Positioned::Left(left_hash));
(lemma, sub_lemma)
})
})
.map(|(sub_lemma, sibling_hash)| Lemma {
node_hash: hash.into(),
sibling_hash,
sub_lemma: Some(Box::new(sub_lemma)),
})
}
fn validate(&self, algorithm: &'static Algorithm) -> bool {
match self.sub_lemma {
None => self.sibling_hash.is_none(),
Some(ref sub) => match self.sibling_hash {
None => false,
Some(Positioned::Left(ref hash)) => {
let combined = algorithm.hash_nodes(hash, &sub.node_hash);
let hashes_match = combined.as_ref() == self.node_hash.as_slice();
hashes_match && sub.validate(algorithm)
}
Some(Positioned::Right(ref hash)) => {
let combined = algorithm.hash_nodes(&sub.node_hash, hash);
let hashes_match = combined.as_ref() == self.node_hash.as_slice();
hashes_match && sub.validate(algorithm)
}
},
}
}
}
/// Tags a value so that we know from which branch of a `Tree` (if any) it was found.
#[cfg_attr(feature = "serialization-serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum Positioned<T> {
/// The value was found in the left branch
Left(T),
/// The value was found in the right branch
Right(T),
}
| {
Some(self.cmp(other))
} | identifier_body |
proof.rs | use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use ring::digest::Algorithm;
use crate::hashutils::HashUtils;
use crate::tree::Tree;
/// An inclusion proof represent the fact that a `value` is a member
/// of a `MerkleTree` with root hash `root_hash`, and hash function `algorithm`.
#[cfg_attr(feature = "serialization-serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug)]
pub struct Proof<T> {
/// The hashing algorithm used in the original `MerkleTree`
#[cfg_attr(feature = "serialization-serde", serde(with = "algorithm_serde"))]
pub algorithm: &'static Algorithm,
/// The hash of the root of the original `MerkleTree`
pub root_hash: Vec<u8>,
/// The first `Lemma` of the `Proof`
pub lemma: Lemma,
/// The value concerned by this `Proof`
pub value: T,
}
#[cfg(feature = "serialization-serde")]
mod algorithm_serde {
use ring::digest::{self, Algorithm};
use serde::de::Error;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub fn serialize<S: Serializer>(
algorithm: &'static Algorithm,
se: S,
) -> Result<S::Ok, S::Error> {
// The `Debug` implementation of `Algorithm` prints its ID.
format!("{:?}", algorithm).serialize(se)
}
pub fn deserialize<'de, D: Deserializer<'de>>(de: D) -> Result<&'static Algorithm, D::Error> {
let alg_str: String = Deserialize::deserialize(de)?;
match &*alg_str {
"SHA1" => Ok(&digest::SHA1_FOR_LEGACY_USE_ONLY),
"SHA256" => Ok(&digest::SHA256),
"SHA384" => Ok(&digest::SHA384),
"SHA512" => Ok(&digest::SHA512),
"SHA512_256" => Ok(&digest::SHA512_256),
_ => Err(D::Error::custom("unknown hash algorithm")),
}
}
#[cfg(test)]
mod test {
use super::*;
use ring::digest::{
SHA1_FOR_LEGACY_USE_ONLY as sha1, SHA256 as sha256, SHA384 as sha384, SHA512 as sha512,
SHA512_256 as sha512_256,
};
static SHA1: &Algorithm = &sha1;
static SHA256: &Algorithm = &sha256;
static SHA384: &Algorithm = &sha384;
static SHA512: &Algorithm = &sha512;
static SHA512_256: &Algorithm = &sha512_256;
#[test]
fn test_serialize_known_algorithms() {
extern crate serde_json;
for alg in &[SHA1, SHA256, SHA384, SHA512, SHA512_256] {
let mut serializer = serde_json::Serializer::with_formatter(
vec![],
serde_json::ser::PrettyFormatter::new(),
);
serialize(alg, &mut serializer).unwrap_or_else(|_| panic!("{:?}", alg));
let alg_ = deserialize(&mut serde_json::Deserializer::from_slice(
&serializer.into_inner()[..],
))
.unwrap_or_else(|_| panic!("{:?}", alg));
assert_eq!(*alg, alg_);
}
}
#[test]
#[should_panic(expected = "unknown hash algorithm")]
fn test_serialize_unknown_algorithm() {
extern crate serde_json;
{
let alg_str = "\"BLAKE2b\"";
let mut deserializer = serde_json::Deserializer::from_str(alg_str);
let _ = deserialize(&mut deserializer)
.unwrap_or_else(|_| panic!("unknown hash algorithm {:?}", alg_str));
}
}
}
}
impl<T: PartialEq> PartialEq for Proof<T> {
fn eq(&self, other: &Proof<T>) -> bool {
self.root_hash == other.root_hash && self.lemma == other.lemma && self.value == other.value
}
}
impl<T: Eq> Eq for Proof<T> {}
impl<T: Ord> PartialOrd for Proof<T> {
fn partial_cmp(&self, other: &Proof<T>) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T: Ord> Ord for Proof<T> {
fn cmp(&self, other: &Proof<T>) -> Ordering {
self.root_hash
.cmp(&other.root_hash)
.then(self.value.cmp(&other.value))
.then_with(|| self.lemma.cmp(&other.lemma))
}
}
impl<T: Hash> Hash for Proof<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.root_hash.hash(state);
self.lemma.hash(state);
self.value.hash(state);
}
}
impl<T> Proof<T> {
/// Constructs a new `Proof`
pub fn new(algorithm: &'static Algorithm, root_hash: Vec<u8>, lemma: Lemma, value: T) -> Self {
Proof {
algorithm,
root_hash,
lemma,
value,
}
}
/// Checks whether this inclusion proof is well-formed,
/// and whether its root hash matches the given `root_hash`.
pub fn validate(&self, root_hash: &[u8]) -> bool {
if self.root_hash!= root_hash || self.lemma.node_hash!= root_hash {
return false;
}
self.lemma.validate(self.algorithm)
}
/// Returns the index of this proof's value, given the total number of items in the tree.
///
/// # Panics
///
/// Panics if the proof is malformed. Call `validate` first.
pub fn index(&self, count: usize) -> usize {
self.lemma.index(count)
}
}
/// A `Lemma` holds the hash of a node, the hash of its sibling node,
/// and a sub lemma, whose `node_hash`, when combined with this `sibling_hash`
/// must be equal to this `node_hash`.
#[cfg_attr(feature = "serialization-serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Lemma {
pub node_hash: Vec<u8>,
pub sibling_hash: Option<Positioned<Vec<u8>>>,
pub sub_lemma: Option<Box<Lemma>>,
}
impl Lemma {
/// Attempts to generate a proof that the a value with hash `needle` is a
/// member of the given `tree`.
pub fn new<T>(tree: &Tree<T>, needle: &[u8]) -> Option<Lemma> {
match *tree {
Tree::Empty {.. } => None,
Tree::Leaf { ref hash,.. } => Lemma::new_leaf_proof(hash, needle),
Tree::Node {
ref hash,
ref left,
ref right,
} => Lemma::new_tree_proof(hash, needle, left, right),
}
}
/// Attempts to generate a proof that the `idx`-th leaf is a member of
/// the given tree. The `count` must equal the number of leaves in the
/// `tree`. If `idx >= count`, `None` is returned. Otherwise it returns
/// the new `Lemma` and the `idx`-th value.
pub fn new_by_index<T>(tree: &Tree<T>, idx: usize, count: usize) -> Option<(Lemma, &T)> {
if idx >= count {
return None;
}
match *tree {
Tree::Empty {.. } => None,
Tree::Leaf {
ref hash,
ref value,
..
} => {
if count!= 1 {
return None;
}
let lemma = Lemma {
node_hash: hash.clone(),
sibling_hash: None,
sub_lemma: None,
};
Some((lemma, value))
}
Tree::Node {
ref hash,
ref left,
ref right,
} => {
let left_count = count.next_power_of_two() / 2;
let (sub_lem_val, sibling_hash);
if idx < left_count {
sub_lem_val = Lemma::new_by_index(left, idx, left_count);
sibling_hash = Positioned::Right(right.hash().clone());
} else {
sub_lem_val = Lemma::new_by_index(right, idx - left_count, count - left_count);
sibling_hash = Positioned::Left(left.hash().clone());
}
sub_lem_val.map(|(sub_lemma, value)| {
let lemma = Lemma {
node_hash: hash.clone(),
sibling_hash: Some(sibling_hash),
sub_lemma: Some(Box::new(sub_lemma)),
};
(lemma, value)
})
}
}
}
/// Returns the index of this lemma's value, given the total number of items in the tree.
///
/// # Panics
///
/// Panics if the lemma is malformed. Call `validate_lemma` first.
pub fn index(&self, count: usize) -> usize {
let left_count = count.next_power_of_two() / 2;
match (self.sub_lemma.as_ref(), self.sibling_hash.as_ref()) {
(None, None) => 0,
(Some(l), Some(&Positioned::Left(_))) => left_count + l.index(count - left_count),
(Some(l), Some(&Positioned::Right(_))) => l.index(left_count),
(None, Some(_)) | (Some(_), None) => panic!("malformed lemma"),
}
}
fn new_leaf_proof(hash: &[u8], needle: &[u8]) -> Option<Lemma> {
if *hash == *needle | else {
None
}
}
fn new_tree_proof<T>(
hash: &[u8],
needle: &[u8],
left: &Tree<T>,
right: &Tree<T>,
) -> Option<Lemma> {
Lemma::new(left, needle)
.map(|lemma| {
let right_hash = right.hash().clone();
let sub_lemma = Some(Positioned::Right(right_hash));
(lemma, sub_lemma)
})
.or_else(|| {
let sub_lemma = Lemma::new(right, needle);
sub_lemma.map(|lemma| {
let left_hash = left.hash().clone();
let sub_lemma = Some(Positioned::Left(left_hash));
(lemma, sub_lemma)
})
})
.map(|(sub_lemma, sibling_hash)| Lemma {
node_hash: hash.into(),
sibling_hash,
sub_lemma: Some(Box::new(sub_lemma)),
})
}
fn validate(&self, algorithm: &'static Algorithm) -> bool {
match self.sub_lemma {
None => self.sibling_hash.is_none(),
Some(ref sub) => match self.sibling_hash {
None => false,
Some(Positioned::Left(ref hash)) => {
let combined = algorithm.hash_nodes(hash, &sub.node_hash);
let hashes_match = combined.as_ref() == self.node_hash.as_slice();
hashes_match && sub.validate(algorithm)
}
Some(Positioned::Right(ref hash)) => {
let combined = algorithm.hash_nodes(&sub.node_hash, hash);
let hashes_match = combined.as_ref() == self.node_hash.as_slice();
hashes_match && sub.validate(algorithm)
}
},
}
}
}
/// Tags a value so that we know from which branch of a `Tree` (if any) it was found.
#[cfg_attr(feature = "serialization-serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum Positioned<T> {
/// The value was found in the left branch
Left(T),
/// The value was found in the right branch
Right(T),
}
| {
Some(Lemma {
node_hash: hash.into(),
sibling_hash: None,
sub_lemma: None,
})
} | conditional_block |
proof.rs | use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use ring::digest::Algorithm;
use crate::hashutils::HashUtils;
use crate::tree::Tree;
/// An inclusion proof represent the fact that a `value` is a member
/// of a `MerkleTree` with root hash `root_hash`, and hash function `algorithm`.
#[cfg_attr(feature = "serialization-serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug)]
pub struct Proof<T> {
/// The hashing algorithm used in the original `MerkleTree`
#[cfg_attr(feature = "serialization-serde", serde(with = "algorithm_serde"))]
pub algorithm: &'static Algorithm,
/// The hash of the root of the original `MerkleTree`
pub root_hash: Vec<u8>,
/// The first `Lemma` of the `Proof`
pub lemma: Lemma,
/// The value concerned by this `Proof`
pub value: T,
}
#[cfg(feature = "serialization-serde")]
mod algorithm_serde {
use ring::digest::{self, Algorithm};
use serde::de::Error;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub fn serialize<S: Serializer>(
algorithm: &'static Algorithm,
se: S,
) -> Result<S::Ok, S::Error> {
// The `Debug` implementation of `Algorithm` prints its ID.
format!("{:?}", algorithm).serialize(se)
}
pub fn deserialize<'de, D: Deserializer<'de>>(de: D) -> Result<&'static Algorithm, D::Error> {
let alg_str: String = Deserialize::deserialize(de)?;
match &*alg_str {
"SHA1" => Ok(&digest::SHA1_FOR_LEGACY_USE_ONLY),
"SHA256" => Ok(&digest::SHA256),
"SHA384" => Ok(&digest::SHA384),
"SHA512" => Ok(&digest::SHA512),
"SHA512_256" => Ok(&digest::SHA512_256),
_ => Err(D::Error::custom("unknown hash algorithm")),
}
}
#[cfg(test)]
mod test {
use super::*;
use ring::digest::{
SHA1_FOR_LEGACY_USE_ONLY as sha1, SHA256 as sha256, SHA384 as sha384, SHA512 as sha512,
SHA512_256 as sha512_256,
};
static SHA1: &Algorithm = &sha1;
static SHA256: &Algorithm = &sha256;
static SHA384: &Algorithm = &sha384;
static SHA512: &Algorithm = &sha512;
static SHA512_256: &Algorithm = &sha512_256;
#[test]
fn test_serialize_known_algorithms() {
extern crate serde_json;
for alg in &[SHA1, SHA256, SHA384, SHA512, SHA512_256] {
let mut serializer = serde_json::Serializer::with_formatter(
vec![],
serde_json::ser::PrettyFormatter::new(),
);
serialize(alg, &mut serializer).unwrap_or_else(|_| panic!("{:?}", alg));
let alg_ = deserialize(&mut serde_json::Deserializer::from_slice(
&serializer.into_inner()[..],
))
.unwrap_or_else(|_| panic!("{:?}", alg));
assert_eq!(*alg, alg_);
}
}
#[test]
#[should_panic(expected = "unknown hash algorithm")]
fn test_serialize_unknown_algorithm() {
extern crate serde_json;
{
let alg_str = "\"BLAKE2b\"";
let mut deserializer = serde_json::Deserializer::from_str(alg_str);
let _ = deserialize(&mut deserializer)
.unwrap_or_else(|_| panic!("unknown hash algorithm {:?}", alg_str));
}
}
}
}
impl<T: PartialEq> PartialEq for Proof<T> {
fn eq(&self, other: &Proof<T>) -> bool {
self.root_hash == other.root_hash && self.lemma == other.lemma && self.value == other.value
}
}
impl<T: Eq> Eq for Proof<T> {}
impl<T: Ord> PartialOrd for Proof<T> {
fn partial_cmp(&self, other: &Proof<T>) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T: Ord> Ord for Proof<T> {
fn cmp(&self, other: &Proof<T>) -> Ordering {
self.root_hash
.cmp(&other.root_hash)
.then(self.value.cmp(&other.value))
.then_with(|| self.lemma.cmp(&other.lemma))
}
}
impl<T: Hash> Hash for Proof<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.root_hash.hash(state);
self.lemma.hash(state);
self.value.hash(state);
}
}
impl<T> Proof<T> {
/// Constructs a new `Proof`
pub fn new(algorithm: &'static Algorithm, root_hash: Vec<u8>, lemma: Lemma, value: T) -> Self {
Proof {
algorithm,
root_hash,
lemma,
value,
}
}
/// Checks whether this inclusion proof is well-formed,
/// and whether its root hash matches the given `root_hash`.
pub fn validate(&self, root_hash: &[u8]) -> bool {
if self.root_hash!= root_hash || self.lemma.node_hash!= root_hash {
return false;
}
self.lemma.validate(self.algorithm)
}
/// Returns the index of this proof's value, given the total number of items in the tree.
///
/// # Panics
///
/// Panics if the proof is malformed. Call `validate` first.
pub fn index(&self, count: usize) -> usize {
self.lemma.index(count)
}
}
/// A `Lemma` holds the hash of a node, the hash of its sibling node,
/// and a sub lemma, whose `node_hash`, when combined with this `sibling_hash`
/// must be equal to this `node_hash`.
#[cfg_attr(feature = "serialization-serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Lemma {
pub node_hash: Vec<u8>,
pub sibling_hash: Option<Positioned<Vec<u8>>>,
pub sub_lemma: Option<Box<Lemma>>,
}
impl Lemma {
/// Attempts to generate a proof that the a value with hash `needle` is a
/// member of the given `tree`.
pub fn new<T>(tree: &Tree<T>, needle: &[u8]) -> Option<Lemma> {
match *tree {
Tree::Empty {.. } => None,
Tree::Leaf { ref hash,.. } => Lemma::new_leaf_proof(hash, needle),
Tree::Node {
ref hash,
ref left,
ref right,
} => Lemma::new_tree_proof(hash, needle, left, right),
}
}
/// Attempts to generate a proof that the `idx`-th leaf is a member of
/// the given tree. The `count` must equal the number of leaves in the
/// `tree`. If `idx >= count`, `None` is returned. Otherwise it returns
/// the new `Lemma` and the `idx`-th value.
pub fn new_by_index<T>(tree: &Tree<T>, idx: usize, count: usize) -> Option<(Lemma, &T)> {
if idx >= count {
return None;
}
match *tree {
Tree::Empty {.. } => None,
Tree::Leaf {
ref hash,
ref value,
..
} => {
if count!= 1 {
return None;
}
let lemma = Lemma {
node_hash: hash.clone(),
sibling_hash: None,
sub_lemma: None,
};
Some((lemma, value))
}
Tree::Node {
ref hash,
ref left,
ref right,
} => {
let left_count = count.next_power_of_two() / 2;
let (sub_lem_val, sibling_hash);
if idx < left_count {
sub_lem_val = Lemma::new_by_index(left, idx, left_count);
sibling_hash = Positioned::Right(right.hash().clone());
} else {
sub_lem_val = Lemma::new_by_index(right, idx - left_count, count - left_count);
sibling_hash = Positioned::Left(left.hash().clone());
}
sub_lem_val.map(|(sub_lemma, value)| {
let lemma = Lemma {
node_hash: hash.clone(),
sibling_hash: Some(sibling_hash),
sub_lemma: Some(Box::new(sub_lemma)),
};
(lemma, value)
})
}
}
}
/// Returns the index of this lemma's value, given the total number of items in the tree.
///
/// # Panics
///
/// Panics if the lemma is malformed. Call `validate_lemma` first.
pub fn index(&self, count: usize) -> usize {
let left_count = count.next_power_of_two() / 2;
match (self.sub_lemma.as_ref(), self.sibling_hash.as_ref()) {
(None, None) => 0,
(Some(l), Some(&Positioned::Left(_))) => left_count + l.index(count - left_count),
(Some(l), Some(&Positioned::Right(_))) => l.index(left_count),
(None, Some(_)) | (Some(_), None) => panic!("malformed lemma"),
}
}
fn new_leaf_proof(hash: &[u8], needle: &[u8]) -> Option<Lemma> {
if *hash == *needle {
Some(Lemma {
node_hash: hash.into(),
sibling_hash: None,
sub_lemma: None,
})
} else {
None
}
}
fn | <T>(
hash: &[u8],
needle: &[u8],
left: &Tree<T>,
right: &Tree<T>,
) -> Option<Lemma> {
Lemma::new(left, needle)
.map(|lemma| {
let right_hash = right.hash().clone();
let sub_lemma = Some(Positioned::Right(right_hash));
(lemma, sub_lemma)
})
.or_else(|| {
let sub_lemma = Lemma::new(right, needle);
sub_lemma.map(|lemma| {
let left_hash = left.hash().clone();
let sub_lemma = Some(Positioned::Left(left_hash));
(lemma, sub_lemma)
})
})
.map(|(sub_lemma, sibling_hash)| Lemma {
node_hash: hash.into(),
sibling_hash,
sub_lemma: Some(Box::new(sub_lemma)),
})
}
fn validate(&self, algorithm: &'static Algorithm) -> bool {
match self.sub_lemma {
None => self.sibling_hash.is_none(),
Some(ref sub) => match self.sibling_hash {
None => false,
Some(Positioned::Left(ref hash)) => {
let combined = algorithm.hash_nodes(hash, &sub.node_hash);
let hashes_match = combined.as_ref() == self.node_hash.as_slice();
hashes_match && sub.validate(algorithm)
}
Some(Positioned::Right(ref hash)) => {
let combined = algorithm.hash_nodes(&sub.node_hash, hash);
let hashes_match = combined.as_ref() == self.node_hash.as_slice();
hashes_match && sub.validate(algorithm)
}
},
}
}
}
/// Tags a value so that we know from which branch of a `Tree` (if any) it was found.
#[cfg_attr(feature = "serialization-serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum Positioned<T> {
/// The value was found in the left branch
Left(T),
/// The value was found in the right branch
Right(T),
}
| new_tree_proof | identifier_name |
proof.rs | use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use ring::digest::Algorithm;
use crate::hashutils::HashUtils;
use crate::tree::Tree;
/// An inclusion proof represent the fact that a `value` is a member
/// of a `MerkleTree` with root hash `root_hash`, and hash function `algorithm`.
#[cfg_attr(feature = "serialization-serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug)]
pub struct Proof<T> {
/// The hashing algorithm used in the original `MerkleTree`
#[cfg_attr(feature = "serialization-serde", serde(with = "algorithm_serde"))]
pub algorithm: &'static Algorithm,
/// The hash of the root of the original `MerkleTree`
pub root_hash: Vec<u8>,
/// The first `Lemma` of the `Proof`
pub lemma: Lemma,
/// The value concerned by this `Proof`
pub value: T,
}
#[cfg(feature = "serialization-serde")]
mod algorithm_serde {
use ring::digest::{self, Algorithm};
use serde::de::Error;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub fn serialize<S: Serializer>(
algorithm: &'static Algorithm,
se: S,
) -> Result<S::Ok, S::Error> {
// The `Debug` implementation of `Algorithm` prints its ID.
format!("{:?}", algorithm).serialize(se)
}
pub fn deserialize<'de, D: Deserializer<'de>>(de: D) -> Result<&'static Algorithm, D::Error> {
let alg_str: String = Deserialize::deserialize(de)?;
match &*alg_str {
"SHA1" => Ok(&digest::SHA1_FOR_LEGACY_USE_ONLY),
"SHA256" => Ok(&digest::SHA256),
"SHA384" => Ok(&digest::SHA384),
"SHA512" => Ok(&digest::SHA512),
"SHA512_256" => Ok(&digest::SHA512_256),
_ => Err(D::Error::custom("unknown hash algorithm")),
}
}
#[cfg(test)]
mod test {
use super::*;
use ring::digest::{
SHA1_FOR_LEGACY_USE_ONLY as sha1, SHA256 as sha256, SHA384 as sha384, SHA512 as sha512,
SHA512_256 as sha512_256,
};
static SHA1: &Algorithm = &sha1;
static SHA256: &Algorithm = &sha256;
static SHA384: &Algorithm = &sha384;
static SHA512: &Algorithm = &sha512;
static SHA512_256: &Algorithm = &sha512_256;
#[test]
fn test_serialize_known_algorithms() {
extern crate serde_json;
for alg in &[SHA1, SHA256, SHA384, SHA512, SHA512_256] {
let mut serializer = serde_json::Serializer::with_formatter(
vec![],
serde_json::ser::PrettyFormatter::new(),
);
serialize(alg, &mut serializer).unwrap_or_else(|_| panic!("{:?}", alg));
let alg_ = deserialize(&mut serde_json::Deserializer::from_slice(
&serializer.into_inner()[..],
))
.unwrap_or_else(|_| panic!("{:?}", alg));
assert_eq!(*alg, alg_);
}
}
#[test]
#[should_panic(expected = "unknown hash algorithm")]
fn test_serialize_unknown_algorithm() {
extern crate serde_json;
{
let alg_str = "\"BLAKE2b\"";
let mut deserializer = serde_json::Deserializer::from_str(alg_str);
let _ = deserialize(&mut deserializer)
.unwrap_or_else(|_| panic!("unknown hash algorithm {:?}", alg_str));
}
}
}
}
impl<T: PartialEq> PartialEq for Proof<T> {
fn eq(&self, other: &Proof<T>) -> bool {
self.root_hash == other.root_hash && self.lemma == other.lemma && self.value == other.value
}
}
impl<T: Eq> Eq for Proof<T> {}
impl<T: Ord> PartialOrd for Proof<T> {
fn partial_cmp(&self, other: &Proof<T>) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T: Ord> Ord for Proof<T> {
fn cmp(&self, other: &Proof<T>) -> Ordering {
self.root_hash
.cmp(&other.root_hash)
.then(self.value.cmp(&other.value))
.then_with(|| self.lemma.cmp(&other.lemma))
}
}
impl<T: Hash> Hash for Proof<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.root_hash.hash(state);
self.lemma.hash(state);
self.value.hash(state);
}
}
impl<T> Proof<T> {
/// Constructs a new `Proof`
pub fn new(algorithm: &'static Algorithm, root_hash: Vec<u8>, lemma: Lemma, value: T) -> Self {
Proof {
algorithm,
root_hash,
lemma,
value,
}
}
/// Checks whether this inclusion proof is well-formed,
/// and whether its root hash matches the given `root_hash`.
pub fn validate(&self, root_hash: &[u8]) -> bool {
if self.root_hash!= root_hash || self.lemma.node_hash!= root_hash {
return false;
}
self.lemma.validate(self.algorithm)
}
/// Returns the index of this proof's value, given the total number of items in the tree.
///
/// # Panics
///
/// Panics if the proof is malformed. Call `validate` first.
pub fn index(&self, count: usize) -> usize {
self.lemma.index(count)
}
}
/// A `Lemma` holds the hash of a node, the hash of its sibling node,
/// and a sub lemma, whose `node_hash`, when combined with this `sibling_hash`
/// must be equal to this `node_hash`.
#[cfg_attr(feature = "serialization-serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Lemma {
pub node_hash: Vec<u8>,
pub sibling_hash: Option<Positioned<Vec<u8>>>,
pub sub_lemma: Option<Box<Lemma>>,
}
impl Lemma {
/// Attempts to generate a proof that the a value with hash `needle` is a
/// member of the given `tree`.
pub fn new<T>(tree: &Tree<T>, needle: &[u8]) -> Option<Lemma> {
match *tree {
Tree::Empty {.. } => None,
Tree::Leaf { ref hash,.. } => Lemma::new_leaf_proof(hash, needle),
Tree::Node {
ref hash,
ref left,
ref right,
} => Lemma::new_tree_proof(hash, needle, left, right),
}
}
/// Attempts to generate a proof that the `idx`-th leaf is a member of
/// the given tree. The `count` must equal the number of leaves in the
/// `tree`. If `idx >= count`, `None` is returned. Otherwise it returns
/// the new `Lemma` and the `idx`-th value.
pub fn new_by_index<T>(tree: &Tree<T>, idx: usize, count: usize) -> Option<(Lemma, &T)> {
if idx >= count {
return None;
}
match *tree {
Tree::Empty {.. } => None,
Tree::Leaf { | ref hash,
ref value,
..
} => {
if count!= 1 {
return None;
}
let lemma = Lemma {
node_hash: hash.clone(),
sibling_hash: None,
sub_lemma: None,
};
Some((lemma, value))
}
Tree::Node {
ref hash,
ref left,
ref right,
} => {
let left_count = count.next_power_of_two() / 2;
let (sub_lem_val, sibling_hash);
if idx < left_count {
sub_lem_val = Lemma::new_by_index(left, idx, left_count);
sibling_hash = Positioned::Right(right.hash().clone());
} else {
sub_lem_val = Lemma::new_by_index(right, idx - left_count, count - left_count);
sibling_hash = Positioned::Left(left.hash().clone());
}
sub_lem_val.map(|(sub_lemma, value)| {
let lemma = Lemma {
node_hash: hash.clone(),
sibling_hash: Some(sibling_hash),
sub_lemma: Some(Box::new(sub_lemma)),
};
(lemma, value)
})
}
}
}
/// Returns the index of this lemma's value, given the total number of items in the tree.
///
/// # Panics
///
/// Panics if the lemma is malformed. Call `validate_lemma` first.
pub fn index(&self, count: usize) -> usize {
let left_count = count.next_power_of_two() / 2;
match (self.sub_lemma.as_ref(), self.sibling_hash.as_ref()) {
(None, None) => 0,
(Some(l), Some(&Positioned::Left(_))) => left_count + l.index(count - left_count),
(Some(l), Some(&Positioned::Right(_))) => l.index(left_count),
(None, Some(_)) | (Some(_), None) => panic!("malformed lemma"),
}
}
fn new_leaf_proof(hash: &[u8], needle: &[u8]) -> Option<Lemma> {
if *hash == *needle {
Some(Lemma {
node_hash: hash.into(),
sibling_hash: None,
sub_lemma: None,
})
} else {
None
}
}
fn new_tree_proof<T>(
hash: &[u8],
needle: &[u8],
left: &Tree<T>,
right: &Tree<T>,
) -> Option<Lemma> {
Lemma::new(left, needle)
.map(|lemma| {
let right_hash = right.hash().clone();
let sub_lemma = Some(Positioned::Right(right_hash));
(lemma, sub_lemma)
})
.or_else(|| {
let sub_lemma = Lemma::new(right, needle);
sub_lemma.map(|lemma| {
let left_hash = left.hash().clone();
let sub_lemma = Some(Positioned::Left(left_hash));
(lemma, sub_lemma)
})
})
.map(|(sub_lemma, sibling_hash)| Lemma {
node_hash: hash.into(),
sibling_hash,
sub_lemma: Some(Box::new(sub_lemma)),
})
}
fn validate(&self, algorithm: &'static Algorithm) -> bool {
match self.sub_lemma {
None => self.sibling_hash.is_none(),
Some(ref sub) => match self.sibling_hash {
None => false,
Some(Positioned::Left(ref hash)) => {
let combined = algorithm.hash_nodes(hash, &sub.node_hash);
let hashes_match = combined.as_ref() == self.node_hash.as_slice();
hashes_match && sub.validate(algorithm)
}
Some(Positioned::Right(ref hash)) => {
let combined = algorithm.hash_nodes(&sub.node_hash, hash);
let hashes_match = combined.as_ref() == self.node_hash.as_slice();
hashes_match && sub.validate(algorithm)
}
},
}
}
}
/// Tags a value so that we know from which branch of a `Tree` (if any) it was found.
#[cfg_attr(feature = "serialization-serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum Positioned<T> {
/// The value was found in the left branch
Left(T),
/// The value was found in the right branch
Right(T),
} | random_line_split |
|
main.rs | mod capture;
mod d3d;
mod displays;
mod hotkey;
mod media;
mod resolution;
mod video;
use std::{path::Path, time::Duration};
use clap::{App, Arg, SubCommand};
use hotkey::HotKey;
use windows::{
core::{Result, RuntimeName},
Foundation::Metadata::ApiInformation,
Graphics::{
Capture::{GraphicsCaptureItem, GraphicsCaptureSession},
SizeInt32,
},
Storage::{
CreationCollisionOption, FileAccessMode, StorageFolder, Streams::IRandomAccessStream,
},
Win32::{
Foundation::{HWND, MAX_PATH, PWSTR},
Graphics::Direct3D11::ID3D11Device,
Media::MediaFoundation::{MFStartup, MFSTARTUP_FULL},
Storage::FileSystem::GetFullPathNameW,
System::{
Diagnostics::Debug::{DebugBreak, IsDebuggerPresent},
Threading::GetCurrentProcessId,
WinRT::{RoInitialize, RO_INIT_MULTITHREADED},
},
UI::{
Input::KeyboardAndMouse::{MOD_CONTROL, MOD_SHIFT},
WindowsAndMessaging::{DispatchMessageW, GetMessageW, MSG, WM_HOTKEY},
},
},
};
use crate::{
capture::create_capture_item_for_monitor,
d3d::create_d3d_device,
displays::get_display_handle_from_index,
media::MF_VERSION,
resolution::Resolution,
video::{encoder_device::VideoEncoderDevice, encoding_session::VideoEncodingSession},
};
fn run(
display_index: usize,
output_path: &str,
bit_rate: u32,
frame_rate: u32,
resolution: Resolution,
encoder_index: usize,
verbose: bool,
wait_for_debugger: bool,
console_mode: bool,
) -> Result<()> {
unsafe {
RoInitialize(RO_INIT_MULTITHREADED)?;
}
unsafe { MFStartup(MF_VERSION, MFSTARTUP_FULL)? }
if wait_for_debugger {
let pid = unsafe { GetCurrentProcessId() };
println!("Waiting for a debugger to attach (PID: {})...", pid);
loop {
if unsafe { IsDebuggerPresent().into() } {
break;
}
std::thread::sleep(Duration::from_secs(1));
}
unsafe {
DebugBreak();
}
}
// Check to make sure Windows.Graphics.Capture is available |
if verbose {
println!(
"Using index \"{}\" and path \"{}\".",
display_index, output_path
);
}
// Get the display handle using the provided index
let display_handle = get_display_handle_from_index(display_index)
.expect("The provided display index was out of bounds!");
let item = create_capture_item_for_monitor(display_handle)?;
// Resolve encoding settings
let resolution = if let Some(resolution) = resolution.get_size() {
resolution
} else {
item.Size()?
};
let bit_rate = bit_rate * 1000000;
let encoder_devices = VideoEncoderDevice::enumerate()?;
if encoder_devices.is_empty() {
exit_with_error("No hardware H264 encoders found!");
}
if verbose {
println!("Encoders ({}):", encoder_devices.len());
for encoder_device in &encoder_devices {
println!(" {}", encoder_device.display_name());
}
}
let encoder_device = if let Some(encoder_device) = encoder_devices.get(encoder_index) {
encoder_device
} else {
exit_with_error("Encoder index is out of bounds!");
};
if verbose {
println!("Using: {}", encoder_device.display_name());
}
// Create our file
let path = unsafe {
let mut output_path: Vec<u16> = output_path.encode_utf16().collect();
output_path.push(0);
let mut new_path = vec![0u16; MAX_PATH as usize];
let length = GetFullPathNameW(
PWSTR(output_path.as_mut_ptr()),
new_path.len() as u32,
PWSTR(new_path.as_mut_ptr()),
std::ptr::null_mut(),
);
new_path.resize(length as usize, 0);
String::from_utf16(&new_path).unwrap()
};
let path = Path::new(&path);
let parent_folder_path = path.parent().unwrap();
let parent_folder =
StorageFolder::GetFolderFromPathAsync(parent_folder_path.as_os_str().to_str().unwrap())?
.get()?;
let file_name = path.file_name().unwrap();
let file = parent_folder
.CreateFileAsync(
file_name.to_str().unwrap(),
CreationCollisionOption::ReplaceExisting,
)?
.get()?;
// Start the recording
{
let stream = file.OpenAsync(FileAccessMode::ReadWrite)?.get()?;
let d3d_device = create_d3d_device()?;
let mut session = create_encoding_session(
d3d_device,
item,
encoder_device,
resolution,
bit_rate,
frame_rate,
stream,
)?;
if!console_mode {
let mut is_recording = false;
pump_messages(|| -> Result<bool> {
Ok(if!is_recording {
is_recording = true;
println!("Starting recording...");
session.start()?;
false
} else {
true
})
})?;
println!("Stopping recording...");
} else {
session.start()?;
pause();
}
session.stop()?;
}
Ok(())
}
fn main() {
let mut app = App::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.author(env!("CARGO_PKG_AUTHORS"))
.about(env!("CARGO_PKG_DESCRIPTION"))
.arg(
Arg::with_name("display")
.short("d")
.long("display")
.value_name("display index")
.help("The index of the display you'd like to record.")
.takes_value(true)
.default_value("0")
.required(false),
)
.arg(
Arg::with_name("bitRate")
.short("b")
.long("bitRate")
.value_name("bit rate (in Mbps)")
.help("The bit rate you would like to encode at (in Mbps).")
.takes_value(true)
.default_value("18")
.required(false),
)
.arg(
Arg::with_name("frameRate")
.short("f")
.long("frameRate")
.value_name("frame rate")
.help("The frame rate you would like to encode at.")
.takes_value(true)
.default_value("60")
.required(false),
)
.arg(
Arg::with_name("resolution")
.short("r")
.long("resolution")
.value_name("resolution enum")
.help("The resolution you would like to encode at: native, 720p, 1080p, 2160p, or 4320p.")
.takes_value(true)
.default_value("native")
.required(false),
)
.arg(
Arg::with_name("encoder")
.short("e")
.long("encoder")
.value_name("encoder index")
.help("The index of the encoder you'd like to use to record (use enum-encoders command for a list of encoders and their indices).")
.takes_value(true)
.default_value("0")
.required(false),
)
.arg(
Arg::with_name("verbose")
.short("v")
.help("Enables verbose (debug) output.")
.required(false),
)
.arg(
Arg::with_name("waitForDebugger")
.long("waitForDebugger")
.help("The program will wait for a debugger to attach before starting.")
.required(false),
)
.arg(
Arg::with_name("consoleMode")
.long("consoleMode")
.help("Recording immediately starts. End the recording through console input.")
.required(false),
)
.arg(
Arg::with_name("OUTPUT FILE")
.help("The output file that will contain the recording.")
.default_value("recording.mp4")
.required(false),
)
.subcommand(
SubCommand::with_name("enum-encoders")
.about("Lists the available hardware H264 encoders.")
);
// Handle /?
let args: Vec<_> = std::env::args().collect();
if args.contains(&"/?".to_owned()) {
app.print_help().unwrap();
std::process::exit(0);
}
let matches = app.get_matches();
if let Some(name) = matches.subcommand_name() {
if name == "enum-encoders" {
enum_encoders().unwrap();
return;
}
}
let monitor_index: usize = matches
.value_of("display")
.unwrap()
.parse()
.expect("Invalid diplay index value!");
let output_path = matches.value_of("OUTPUT FILE").unwrap();
let verbose = matches.is_present("verbose");
let wait_for_debugger = matches.is_present("waitForDebugger");
let console_mode = matches.is_present("consoleMode");
let bit_rate: u32 = matches
.value_of("bitRate")
.unwrap()
.parse()
.expect("Invalid bit rate value!");
let frame_rate: u32 = matches
.value_of("frameRate")
.unwrap()
.parse()
.expect("Invalid frame rate value!");
let resolution: Resolution = matches
.value_of("resolution")
.unwrap()
.parse()
.expect("Invalid resolution value! Expecting: native, 720p, 1080p, 2160p, or 4320p.");
let encoder_index: usize = matches
.value_of("encoder")
.unwrap()
.parse()
.expect("Invalid encoder index value!");
// Validate some of the params
if!validate_path(output_path) {
exit_with_error("Invalid path specified!");
}
let result = run(
monitor_index,
&output_path,
bit_rate,
frame_rate,
resolution,
encoder_index,
verbose | wait_for_debugger,
wait_for_debugger,
console_mode,
);
// We do this for nicer HRESULT printing when errors occur.
if let Err(error) = result {
error.code().unwrap();
}
}
fn pause() {
println!("Press ENTER to stop recording...");
std::io::Read::read(&mut std::io::stdin(), &mut [0]).unwrap();
}
fn enum_encoders() -> Result<()> {
let encoder_devices = VideoEncoderDevice::enumerate()?;
if encoder_devices.is_empty() {
exit_with_error("No hardware H264 encoders found!");
}
println!("Encoders ({}):", encoder_devices.len());
for (i, encoder_device) in encoder_devices.iter().enumerate() {
println!(" {} - {}", i, encoder_device.display_name());
}
Ok(())
}
fn create_encoding_session(
d3d_device: ID3D11Device,
item: GraphicsCaptureItem,
encoder_device: &VideoEncoderDevice,
resolution: SizeInt32,
bit_rate: u32,
frame_rate: u32,
stream: IRandomAccessStream,
) -> Result<VideoEncodingSession> {
let result = VideoEncodingSession::new(
d3d_device,
item,
encoder_device,
resolution,
bit_rate,
frame_rate,
stream,
);
if result.is_err() {
println!("Error during encoder setup, try another set of encoding settings.");
}
result
}
fn validate_path<P: AsRef<Path>>(path: P) -> bool {
let path = path.as_ref();
let mut valid = true;
if let Some(extension) = path.extension() {
if extension!= "mp4" {
valid = false;
}
} else {
valid = false;
}
valid
}
fn exit_with_error(message: &str) ->! {
println!("{}", message);
std::process::exit(1);
}
fn win32_programmatic_capture_supported() -> Result<bool> {
ApiInformation::IsApiContractPresentByMajor("Windows.Foundation.UniversalApiContract", 8)
}
fn required_capture_features_supported() -> Result<bool> {
let result = ApiInformation::IsTypePresent(GraphicsCaptureSession::NAME)? && // Windows.Graphics.Capture is present
GraphicsCaptureSession::IsSupported()? && // The CaptureService is available
win32_programmatic_capture_supported()?;
Ok(result)
}
fn pump_messages<F: FnMut() -> Result<bool>>(mut hot_key_callback: F) -> Result<()> {
let _hot_key = HotKey::new(MOD_SHIFT | MOD_CONTROL, 0x52 /* R */)?;
println!("Press SHIFT+CTRL+R to start/stop the recording...");
unsafe {
let mut message = MSG::default();
while GetMessageW(&mut message, HWND(0), 0, 0).into() {
if message.message == WM_HOTKEY {
if hot_key_callback()? {
break;
}
}
DispatchMessageW(&mut message);
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use crate::validate_path;
#[test]
fn path_parsing_test() {
assert!(validate_path("something.mp4"));
assert!(validate_path("somedir/something.mp4"));
assert!(validate_path("somedir\\something.mp4"));
assert!(validate_path("../something.mp4"));
assert!(!validate_path("."));
assert!(!validate_path("*"));
assert!(!validate_path("something"));
assert!(!validate_path(".mp4"));
assert!(!validate_path("mp4"));
assert!(!validate_path("something.avi"));
}
} | if !required_capture_features_supported()? {
exit_with_error("The required screen capture features are not supported on this device for this release of Windows!\nPlease update your operating system (minimum: Windows 10 Version 1903, Build 18362).");
} | random_line_split |
main.rs | mod capture;
mod d3d;
mod displays;
mod hotkey;
mod media;
mod resolution;
mod video;
use std::{path::Path, time::Duration};
use clap::{App, Arg, SubCommand};
use hotkey::HotKey;
use windows::{
core::{Result, RuntimeName},
Foundation::Metadata::ApiInformation,
Graphics::{
Capture::{GraphicsCaptureItem, GraphicsCaptureSession},
SizeInt32,
},
Storage::{
CreationCollisionOption, FileAccessMode, StorageFolder, Streams::IRandomAccessStream,
},
Win32::{
Foundation::{HWND, MAX_PATH, PWSTR},
Graphics::Direct3D11::ID3D11Device,
Media::MediaFoundation::{MFStartup, MFSTARTUP_FULL},
Storage::FileSystem::GetFullPathNameW,
System::{
Diagnostics::Debug::{DebugBreak, IsDebuggerPresent},
Threading::GetCurrentProcessId,
WinRT::{RoInitialize, RO_INIT_MULTITHREADED},
},
UI::{
Input::KeyboardAndMouse::{MOD_CONTROL, MOD_SHIFT},
WindowsAndMessaging::{DispatchMessageW, GetMessageW, MSG, WM_HOTKEY},
},
},
};
use crate::{
capture::create_capture_item_for_monitor,
d3d::create_d3d_device,
displays::get_display_handle_from_index,
media::MF_VERSION,
resolution::Resolution,
video::{encoder_device::VideoEncoderDevice, encoding_session::VideoEncodingSession},
};
fn run(
display_index: usize,
output_path: &str,
bit_rate: u32,
frame_rate: u32,
resolution: Resolution,
encoder_index: usize,
verbose: bool,
wait_for_debugger: bool,
console_mode: bool,
) -> Result<()> {
unsafe {
RoInitialize(RO_INIT_MULTITHREADED)?;
}
unsafe { MFStartup(MF_VERSION, MFSTARTUP_FULL)? }
if wait_for_debugger {
let pid = unsafe { GetCurrentProcessId() };
println!("Waiting for a debugger to attach (PID: {})...", pid);
loop {
if unsafe { IsDebuggerPresent().into() } {
break;
}
std::thread::sleep(Duration::from_secs(1));
}
unsafe {
DebugBreak();
}
}
// Check to make sure Windows.Graphics.Capture is available
if!required_capture_features_supported()? {
exit_with_error("The required screen capture features are not supported on this device for this release of Windows!\nPlease update your operating system (minimum: Windows 10 Version 1903, Build 18362).");
}
if verbose {
println!(
"Using index \"{}\" and path \"{}\".",
display_index, output_path
);
}
// Get the display handle using the provided index
let display_handle = get_display_handle_from_index(display_index)
.expect("The provided display index was out of bounds!");
let item = create_capture_item_for_monitor(display_handle)?;
// Resolve encoding settings
let resolution = if let Some(resolution) = resolution.get_size() {
resolution
} else {
item.Size()?
};
let bit_rate = bit_rate * 1000000;
let encoder_devices = VideoEncoderDevice::enumerate()?;
if encoder_devices.is_empty() {
exit_with_error("No hardware H264 encoders found!");
}
if verbose {
println!("Encoders ({}):", encoder_devices.len());
for encoder_device in &encoder_devices {
println!(" {}", encoder_device.display_name());
}
}
let encoder_device = if let Some(encoder_device) = encoder_devices.get(encoder_index) {
encoder_device
} else {
exit_with_error("Encoder index is out of bounds!");
};
if verbose {
println!("Using: {}", encoder_device.display_name());
}
// Create our file
let path = unsafe {
let mut output_path: Vec<u16> = output_path.encode_utf16().collect();
output_path.push(0);
let mut new_path = vec![0u16; MAX_PATH as usize];
let length = GetFullPathNameW(
PWSTR(output_path.as_mut_ptr()),
new_path.len() as u32,
PWSTR(new_path.as_mut_ptr()),
std::ptr::null_mut(),
);
new_path.resize(length as usize, 0);
String::from_utf16(&new_path).unwrap()
};
let path = Path::new(&path);
let parent_folder_path = path.parent().unwrap();
let parent_folder =
StorageFolder::GetFolderFromPathAsync(parent_folder_path.as_os_str().to_str().unwrap())?
.get()?;
let file_name = path.file_name().unwrap();
let file = parent_folder
.CreateFileAsync(
file_name.to_str().unwrap(),
CreationCollisionOption::ReplaceExisting,
)?
.get()?;
// Start the recording
{
let stream = file.OpenAsync(FileAccessMode::ReadWrite)?.get()?;
let d3d_device = create_d3d_device()?;
let mut session = create_encoding_session(
d3d_device,
item,
encoder_device,
resolution,
bit_rate,
frame_rate,
stream,
)?;
if!console_mode {
let mut is_recording = false;
pump_messages(|| -> Result<bool> {
Ok(if!is_recording {
is_recording = true;
println!("Starting recording...");
session.start()?;
false
} else {
true
})
})?;
println!("Stopping recording...");
} else {
session.start()?;
pause();
}
session.stop()?;
}
Ok(())
}
fn main() {
let mut app = App::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.author(env!("CARGO_PKG_AUTHORS"))
.about(env!("CARGO_PKG_DESCRIPTION"))
.arg(
Arg::with_name("display")
.short("d")
.long("display")
.value_name("display index")
.help("The index of the display you'd like to record.")
.takes_value(true)
.default_value("0")
.required(false),
)
.arg(
Arg::with_name("bitRate")
.short("b")
.long("bitRate")
.value_name("bit rate (in Mbps)")
.help("The bit rate you would like to encode at (in Mbps).")
.takes_value(true)
.default_value("18")
.required(false),
)
.arg(
Arg::with_name("frameRate")
.short("f")
.long("frameRate")
.value_name("frame rate")
.help("The frame rate you would like to encode at.")
.takes_value(true)
.default_value("60")
.required(false),
)
.arg(
Arg::with_name("resolution")
.short("r")
.long("resolution")
.value_name("resolution enum")
.help("The resolution you would like to encode at: native, 720p, 1080p, 2160p, or 4320p.")
.takes_value(true)
.default_value("native")
.required(false),
)
.arg(
Arg::with_name("encoder")
.short("e")
.long("encoder")
.value_name("encoder index")
.help("The index of the encoder you'd like to use to record (use enum-encoders command for a list of encoders and their indices).")
.takes_value(true)
.default_value("0")
.required(false),
)
.arg(
Arg::with_name("verbose")
.short("v")
.help("Enables verbose (debug) output.")
.required(false),
)
.arg(
Arg::with_name("waitForDebugger")
.long("waitForDebugger")
.help("The program will wait for a debugger to attach before starting.")
.required(false),
)
.arg(
Arg::with_name("consoleMode")
.long("consoleMode")
.help("Recording immediately starts. End the recording through console input.")
.required(false),
)
.arg(
Arg::with_name("OUTPUT FILE")
.help("The output file that will contain the recording.")
.default_value("recording.mp4")
.required(false),
)
.subcommand(
SubCommand::with_name("enum-encoders")
.about("Lists the available hardware H264 encoders.")
);
// Handle /?
let args: Vec<_> = std::env::args().collect();
if args.contains(&"/?".to_owned()) {
app.print_help().unwrap();
std::process::exit(0);
}
let matches = app.get_matches();
if let Some(name) = matches.subcommand_name() {
if name == "enum-encoders" {
enum_encoders().unwrap();
return;
}
}
let monitor_index: usize = matches
.value_of("display")
.unwrap()
.parse()
.expect("Invalid diplay index value!");
let output_path = matches.value_of("OUTPUT FILE").unwrap();
let verbose = matches.is_present("verbose");
let wait_for_debugger = matches.is_present("waitForDebugger");
let console_mode = matches.is_present("consoleMode");
let bit_rate: u32 = matches
.value_of("bitRate")
.unwrap()
.parse()
.expect("Invalid bit rate value!");
let frame_rate: u32 = matches
.value_of("frameRate")
.unwrap()
.parse()
.expect("Invalid frame rate value!");
let resolution: Resolution = matches
.value_of("resolution")
.unwrap()
.parse()
.expect("Invalid resolution value! Expecting: native, 720p, 1080p, 2160p, or 4320p.");
let encoder_index: usize = matches
.value_of("encoder")
.unwrap()
.parse()
.expect("Invalid encoder index value!");
// Validate some of the params
if!validate_path(output_path) {
exit_with_error("Invalid path specified!");
}
let result = run(
monitor_index,
&output_path,
bit_rate,
frame_rate,
resolution,
encoder_index,
verbose | wait_for_debugger,
wait_for_debugger,
console_mode,
);
// We do this for nicer HRESULT printing when errors occur.
if let Err(error) = result {
error.code().unwrap();
}
}
fn pause() {
println!("Press ENTER to stop recording...");
std::io::Read::read(&mut std::io::stdin(), &mut [0]).unwrap();
}
fn enum_encoders() -> Result<()> {
let encoder_devices = VideoEncoderDevice::enumerate()?;
if encoder_devices.is_empty() {
exit_with_error("No hardware H264 encoders found!");
}
println!("Encoders ({}):", encoder_devices.len());
for (i, encoder_device) in encoder_devices.iter().enumerate() {
println!(" {} - {}", i, encoder_device.display_name());
}
Ok(())
}
fn create_encoding_session(
d3d_device: ID3D11Device,
item: GraphicsCaptureItem,
encoder_device: &VideoEncoderDevice,
resolution: SizeInt32,
bit_rate: u32,
frame_rate: u32,
stream: IRandomAccessStream,
) -> Result<VideoEncodingSession> {
let result = VideoEncodingSession::new(
d3d_device,
item,
encoder_device,
resolution,
bit_rate,
frame_rate,
stream,
);
if result.is_err() {
println!("Error during encoder setup, try another set of encoding settings.");
}
result
}
fn validate_path<P: AsRef<Path>>(path: P) -> bool {
let path = path.as_ref();
let mut valid = true;
if let Some(extension) = path.extension() {
if extension!= "mp4" {
valid = false;
}
} else {
valid = false;
}
valid
}
fn exit_with_error(message: &str) ->! {
println!("{}", message);
std::process::exit(1);
}
fn win32_programmatic_capture_supported() -> Result<bool> |
fn required_capture_features_supported() -> Result<bool> {
let result = ApiInformation::IsTypePresent(GraphicsCaptureSession::NAME)? && // Windows.Graphics.Capture is present
GraphicsCaptureSession::IsSupported()? && // The CaptureService is available
win32_programmatic_capture_supported()?;
Ok(result)
}
fn pump_messages<F: FnMut() -> Result<bool>>(mut hot_key_callback: F) -> Result<()> {
let _hot_key = HotKey::new(MOD_SHIFT | MOD_CONTROL, 0x52 /* R */)?;
println!("Press SHIFT+CTRL+R to start/stop the recording...");
unsafe {
let mut message = MSG::default();
while GetMessageW(&mut message, HWND(0), 0, 0).into() {
if message.message == WM_HOTKEY {
if hot_key_callback()? {
break;
}
}
DispatchMessageW(&mut message);
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use crate::validate_path;
#[test]
fn path_parsing_test() {
assert!(validate_path("something.mp4"));
assert!(validate_path("somedir/something.mp4"));
assert!(validate_path("somedir\\something.mp4"));
assert!(validate_path("../something.mp4"));
assert!(!validate_path("."));
assert!(!validate_path("*"));
assert!(!validate_path("something"));
assert!(!validate_path(".mp4"));
assert!(!validate_path("mp4"));
assert!(!validate_path("something.avi"));
}
}
| {
ApiInformation::IsApiContractPresentByMajor("Windows.Foundation.UniversalApiContract", 8)
} | identifier_body |
main.rs | mod capture;
mod d3d;
mod displays;
mod hotkey;
mod media;
mod resolution;
mod video;
use std::{path::Path, time::Duration};
use clap::{App, Arg, SubCommand};
use hotkey::HotKey;
use windows::{
core::{Result, RuntimeName},
Foundation::Metadata::ApiInformation,
Graphics::{
Capture::{GraphicsCaptureItem, GraphicsCaptureSession},
SizeInt32,
},
Storage::{
CreationCollisionOption, FileAccessMode, StorageFolder, Streams::IRandomAccessStream,
},
Win32::{
Foundation::{HWND, MAX_PATH, PWSTR},
Graphics::Direct3D11::ID3D11Device,
Media::MediaFoundation::{MFStartup, MFSTARTUP_FULL},
Storage::FileSystem::GetFullPathNameW,
System::{
Diagnostics::Debug::{DebugBreak, IsDebuggerPresent},
Threading::GetCurrentProcessId,
WinRT::{RoInitialize, RO_INIT_MULTITHREADED},
},
UI::{
Input::KeyboardAndMouse::{MOD_CONTROL, MOD_SHIFT},
WindowsAndMessaging::{DispatchMessageW, GetMessageW, MSG, WM_HOTKEY},
},
},
};
use crate::{
capture::create_capture_item_for_monitor,
d3d::create_d3d_device,
displays::get_display_handle_from_index,
media::MF_VERSION,
resolution::Resolution,
video::{encoder_device::VideoEncoderDevice, encoding_session::VideoEncodingSession},
};
fn run(
display_index: usize,
output_path: &str,
bit_rate: u32,
frame_rate: u32,
resolution: Resolution,
encoder_index: usize,
verbose: bool,
wait_for_debugger: bool,
console_mode: bool,
) -> Result<()> {
unsafe {
RoInitialize(RO_INIT_MULTITHREADED)?;
}
unsafe { MFStartup(MF_VERSION, MFSTARTUP_FULL)? }
if wait_for_debugger {
let pid = unsafe { GetCurrentProcessId() };
println!("Waiting for a debugger to attach (PID: {})...", pid);
loop {
if unsafe { IsDebuggerPresent().into() } {
break;
}
std::thread::sleep(Duration::from_secs(1));
}
unsafe {
DebugBreak();
}
}
// Check to make sure Windows.Graphics.Capture is available
if!required_capture_features_supported()? |
if verbose {
println!(
"Using index \"{}\" and path \"{}\".",
display_index, output_path
);
}
// Get the display handle using the provided index
let display_handle = get_display_handle_from_index(display_index)
.expect("The provided display index was out of bounds!");
let item = create_capture_item_for_monitor(display_handle)?;
// Resolve encoding settings
let resolution = if let Some(resolution) = resolution.get_size() {
resolution
} else {
item.Size()?
};
let bit_rate = bit_rate * 1000000;
let encoder_devices = VideoEncoderDevice::enumerate()?;
if encoder_devices.is_empty() {
exit_with_error("No hardware H264 encoders found!");
}
if verbose {
println!("Encoders ({}):", encoder_devices.len());
for encoder_device in &encoder_devices {
println!(" {}", encoder_device.display_name());
}
}
let encoder_device = if let Some(encoder_device) = encoder_devices.get(encoder_index) {
encoder_device
} else {
exit_with_error("Encoder index is out of bounds!");
};
if verbose {
println!("Using: {}", encoder_device.display_name());
}
// Create our file
let path = unsafe {
let mut output_path: Vec<u16> = output_path.encode_utf16().collect();
output_path.push(0);
let mut new_path = vec![0u16; MAX_PATH as usize];
let length = GetFullPathNameW(
PWSTR(output_path.as_mut_ptr()),
new_path.len() as u32,
PWSTR(new_path.as_mut_ptr()),
std::ptr::null_mut(),
);
new_path.resize(length as usize, 0);
String::from_utf16(&new_path).unwrap()
};
let path = Path::new(&path);
let parent_folder_path = path.parent().unwrap();
let parent_folder =
StorageFolder::GetFolderFromPathAsync(parent_folder_path.as_os_str().to_str().unwrap())?
.get()?;
let file_name = path.file_name().unwrap();
let file = parent_folder
.CreateFileAsync(
file_name.to_str().unwrap(),
CreationCollisionOption::ReplaceExisting,
)?
.get()?;
// Start the recording
{
let stream = file.OpenAsync(FileAccessMode::ReadWrite)?.get()?;
let d3d_device = create_d3d_device()?;
let mut session = create_encoding_session(
d3d_device,
item,
encoder_device,
resolution,
bit_rate,
frame_rate,
stream,
)?;
if!console_mode {
let mut is_recording = false;
pump_messages(|| -> Result<bool> {
Ok(if!is_recording {
is_recording = true;
println!("Starting recording...");
session.start()?;
false
} else {
true
})
})?;
println!("Stopping recording...");
} else {
session.start()?;
pause();
}
session.stop()?;
}
Ok(())
}
fn main() {
let mut app = App::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.author(env!("CARGO_PKG_AUTHORS"))
.about(env!("CARGO_PKG_DESCRIPTION"))
.arg(
Arg::with_name("display")
.short("d")
.long("display")
.value_name("display index")
.help("The index of the display you'd like to record.")
.takes_value(true)
.default_value("0")
.required(false),
)
.arg(
Arg::with_name("bitRate")
.short("b")
.long("bitRate")
.value_name("bit rate (in Mbps)")
.help("The bit rate you would like to encode at (in Mbps).")
.takes_value(true)
.default_value("18")
.required(false),
)
.arg(
Arg::with_name("frameRate")
.short("f")
.long("frameRate")
.value_name("frame rate")
.help("The frame rate you would like to encode at.")
.takes_value(true)
.default_value("60")
.required(false),
)
.arg(
Arg::with_name("resolution")
.short("r")
.long("resolution")
.value_name("resolution enum")
.help("The resolution you would like to encode at: native, 720p, 1080p, 2160p, or 4320p.")
.takes_value(true)
.default_value("native")
.required(false),
)
.arg(
Arg::with_name("encoder")
.short("e")
.long("encoder")
.value_name("encoder index")
.help("The index of the encoder you'd like to use to record (use enum-encoders command for a list of encoders and their indices).")
.takes_value(true)
.default_value("0")
.required(false),
)
.arg(
Arg::with_name("verbose")
.short("v")
.help("Enables verbose (debug) output.")
.required(false),
)
.arg(
Arg::with_name("waitForDebugger")
.long("waitForDebugger")
.help("The program will wait for a debugger to attach before starting.")
.required(false),
)
.arg(
Arg::with_name("consoleMode")
.long("consoleMode")
.help("Recording immediately starts. End the recording through console input.")
.required(false),
)
.arg(
Arg::with_name("OUTPUT FILE")
.help("The output file that will contain the recording.")
.default_value("recording.mp4")
.required(false),
)
.subcommand(
SubCommand::with_name("enum-encoders")
.about("Lists the available hardware H264 encoders.")
);
// Handle /?
let args: Vec<_> = std::env::args().collect();
if args.contains(&"/?".to_owned()) {
app.print_help().unwrap();
std::process::exit(0);
}
let matches = app.get_matches();
if let Some(name) = matches.subcommand_name() {
if name == "enum-encoders" {
enum_encoders().unwrap();
return;
}
}
let monitor_index: usize = matches
.value_of("display")
.unwrap()
.parse()
.expect("Invalid diplay index value!");
let output_path = matches.value_of("OUTPUT FILE").unwrap();
let verbose = matches.is_present("verbose");
let wait_for_debugger = matches.is_present("waitForDebugger");
let console_mode = matches.is_present("consoleMode");
let bit_rate: u32 = matches
.value_of("bitRate")
.unwrap()
.parse()
.expect("Invalid bit rate value!");
let frame_rate: u32 = matches
.value_of("frameRate")
.unwrap()
.parse()
.expect("Invalid frame rate value!");
let resolution: Resolution = matches
.value_of("resolution")
.unwrap()
.parse()
.expect("Invalid resolution value! Expecting: native, 720p, 1080p, 2160p, or 4320p.");
let encoder_index: usize = matches
.value_of("encoder")
.unwrap()
.parse()
.expect("Invalid encoder index value!");
// Validate some of the params
if!validate_path(output_path) {
exit_with_error("Invalid path specified!");
}
let result = run(
monitor_index,
&output_path,
bit_rate,
frame_rate,
resolution,
encoder_index,
verbose | wait_for_debugger,
wait_for_debugger,
console_mode,
);
// We do this for nicer HRESULT printing when errors occur.
if let Err(error) = result {
error.code().unwrap();
}
}
fn pause() {
println!("Press ENTER to stop recording...");
std::io::Read::read(&mut std::io::stdin(), &mut [0]).unwrap();
}
fn enum_encoders() -> Result<()> {
let encoder_devices = VideoEncoderDevice::enumerate()?;
if encoder_devices.is_empty() {
exit_with_error("No hardware H264 encoders found!");
}
println!("Encoders ({}):", encoder_devices.len());
for (i, encoder_device) in encoder_devices.iter().enumerate() {
println!(" {} - {}", i, encoder_device.display_name());
}
Ok(())
}
fn create_encoding_session(
d3d_device: ID3D11Device,
item: GraphicsCaptureItem,
encoder_device: &VideoEncoderDevice,
resolution: SizeInt32,
bit_rate: u32,
frame_rate: u32,
stream: IRandomAccessStream,
) -> Result<VideoEncodingSession> {
let result = VideoEncodingSession::new(
d3d_device,
item,
encoder_device,
resolution,
bit_rate,
frame_rate,
stream,
);
if result.is_err() {
println!("Error during encoder setup, try another set of encoding settings.");
}
result
}
fn validate_path<P: AsRef<Path>>(path: P) -> bool {
let path = path.as_ref();
let mut valid = true;
if let Some(extension) = path.extension() {
if extension!= "mp4" {
valid = false;
}
} else {
valid = false;
}
valid
}
fn exit_with_error(message: &str) ->! {
println!("{}", message);
std::process::exit(1);
}
fn win32_programmatic_capture_supported() -> Result<bool> {
ApiInformation::IsApiContractPresentByMajor("Windows.Foundation.UniversalApiContract", 8)
}
fn required_capture_features_supported() -> Result<bool> {
let result = ApiInformation::IsTypePresent(GraphicsCaptureSession::NAME)? && // Windows.Graphics.Capture is present
GraphicsCaptureSession::IsSupported()? && // The CaptureService is available
win32_programmatic_capture_supported()?;
Ok(result)
}
fn pump_messages<F: FnMut() -> Result<bool>>(mut hot_key_callback: F) -> Result<()> {
let _hot_key = HotKey::new(MOD_SHIFT | MOD_CONTROL, 0x52 /* R */)?;
println!("Press SHIFT+CTRL+R to start/stop the recording...");
unsafe {
let mut message = MSG::default();
while GetMessageW(&mut message, HWND(0), 0, 0).into() {
if message.message == WM_HOTKEY {
if hot_key_callback()? {
break;
}
}
DispatchMessageW(&mut message);
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use crate::validate_path;
#[test]
fn path_parsing_test() {
assert!(validate_path("something.mp4"));
assert!(validate_path("somedir/something.mp4"));
assert!(validate_path("somedir\\something.mp4"));
assert!(validate_path("../something.mp4"));
assert!(!validate_path("."));
assert!(!validate_path("*"));
assert!(!validate_path("something"));
assert!(!validate_path(".mp4"));
assert!(!validate_path("mp4"));
assert!(!validate_path("something.avi"));
}
}
| {
exit_with_error("The required screen capture features are not supported on this device for this release of Windows!\nPlease update your operating system (minimum: Windows 10 Version 1903, Build 18362).");
} | conditional_block |
main.rs | mod capture;
mod d3d;
mod displays;
mod hotkey;
mod media;
mod resolution;
mod video;
use std::{path::Path, time::Duration};
use clap::{App, Arg, SubCommand};
use hotkey::HotKey;
use windows::{
core::{Result, RuntimeName},
Foundation::Metadata::ApiInformation,
Graphics::{
Capture::{GraphicsCaptureItem, GraphicsCaptureSession},
SizeInt32,
},
Storage::{
CreationCollisionOption, FileAccessMode, StorageFolder, Streams::IRandomAccessStream,
},
Win32::{
Foundation::{HWND, MAX_PATH, PWSTR},
Graphics::Direct3D11::ID3D11Device,
Media::MediaFoundation::{MFStartup, MFSTARTUP_FULL},
Storage::FileSystem::GetFullPathNameW,
System::{
Diagnostics::Debug::{DebugBreak, IsDebuggerPresent},
Threading::GetCurrentProcessId,
WinRT::{RoInitialize, RO_INIT_MULTITHREADED},
},
UI::{
Input::KeyboardAndMouse::{MOD_CONTROL, MOD_SHIFT},
WindowsAndMessaging::{DispatchMessageW, GetMessageW, MSG, WM_HOTKEY},
},
},
};
use crate::{
capture::create_capture_item_for_monitor,
d3d::create_d3d_device,
displays::get_display_handle_from_index,
media::MF_VERSION,
resolution::Resolution,
video::{encoder_device::VideoEncoderDevice, encoding_session::VideoEncodingSession},
};
fn | (
display_index: usize,
output_path: &str,
bit_rate: u32,
frame_rate: u32,
resolution: Resolution,
encoder_index: usize,
verbose: bool,
wait_for_debugger: bool,
console_mode: bool,
) -> Result<()> {
unsafe {
RoInitialize(RO_INIT_MULTITHREADED)?;
}
unsafe { MFStartup(MF_VERSION, MFSTARTUP_FULL)? }
if wait_for_debugger {
let pid = unsafe { GetCurrentProcessId() };
println!("Waiting for a debugger to attach (PID: {})...", pid);
loop {
if unsafe { IsDebuggerPresent().into() } {
break;
}
std::thread::sleep(Duration::from_secs(1));
}
unsafe {
DebugBreak();
}
}
// Check to make sure Windows.Graphics.Capture is available
if!required_capture_features_supported()? {
exit_with_error("The required screen capture features are not supported on this device for this release of Windows!\nPlease update your operating system (minimum: Windows 10 Version 1903, Build 18362).");
}
if verbose {
println!(
"Using index \"{}\" and path \"{}\".",
display_index, output_path
);
}
// Get the display handle using the provided index
let display_handle = get_display_handle_from_index(display_index)
.expect("The provided display index was out of bounds!");
let item = create_capture_item_for_monitor(display_handle)?;
// Resolve encoding settings
let resolution = if let Some(resolution) = resolution.get_size() {
resolution
} else {
item.Size()?
};
let bit_rate = bit_rate * 1000000;
let encoder_devices = VideoEncoderDevice::enumerate()?;
if encoder_devices.is_empty() {
exit_with_error("No hardware H264 encoders found!");
}
if verbose {
println!("Encoders ({}):", encoder_devices.len());
for encoder_device in &encoder_devices {
println!(" {}", encoder_device.display_name());
}
}
let encoder_device = if let Some(encoder_device) = encoder_devices.get(encoder_index) {
encoder_device
} else {
exit_with_error("Encoder index is out of bounds!");
};
if verbose {
println!("Using: {}", encoder_device.display_name());
}
// Create our file
let path = unsafe {
let mut output_path: Vec<u16> = output_path.encode_utf16().collect();
output_path.push(0);
let mut new_path = vec![0u16; MAX_PATH as usize];
let length = GetFullPathNameW(
PWSTR(output_path.as_mut_ptr()),
new_path.len() as u32,
PWSTR(new_path.as_mut_ptr()),
std::ptr::null_mut(),
);
new_path.resize(length as usize, 0);
String::from_utf16(&new_path).unwrap()
};
let path = Path::new(&path);
let parent_folder_path = path.parent().unwrap();
let parent_folder =
StorageFolder::GetFolderFromPathAsync(parent_folder_path.as_os_str().to_str().unwrap())?
.get()?;
let file_name = path.file_name().unwrap();
let file = parent_folder
.CreateFileAsync(
file_name.to_str().unwrap(),
CreationCollisionOption::ReplaceExisting,
)?
.get()?;
// Start the recording
{
let stream = file.OpenAsync(FileAccessMode::ReadWrite)?.get()?;
let d3d_device = create_d3d_device()?;
let mut session = create_encoding_session(
d3d_device,
item,
encoder_device,
resolution,
bit_rate,
frame_rate,
stream,
)?;
if!console_mode {
let mut is_recording = false;
pump_messages(|| -> Result<bool> {
Ok(if!is_recording {
is_recording = true;
println!("Starting recording...");
session.start()?;
false
} else {
true
})
})?;
println!("Stopping recording...");
} else {
session.start()?;
pause();
}
session.stop()?;
}
Ok(())
}
fn main() {
let mut app = App::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.author(env!("CARGO_PKG_AUTHORS"))
.about(env!("CARGO_PKG_DESCRIPTION"))
.arg(
Arg::with_name("display")
.short("d")
.long("display")
.value_name("display index")
.help("The index of the display you'd like to record.")
.takes_value(true)
.default_value("0")
.required(false),
)
.arg(
Arg::with_name("bitRate")
.short("b")
.long("bitRate")
.value_name("bit rate (in Mbps)")
.help("The bit rate you would like to encode at (in Mbps).")
.takes_value(true)
.default_value("18")
.required(false),
)
.arg(
Arg::with_name("frameRate")
.short("f")
.long("frameRate")
.value_name("frame rate")
.help("The frame rate you would like to encode at.")
.takes_value(true)
.default_value("60")
.required(false),
)
.arg(
Arg::with_name("resolution")
.short("r")
.long("resolution")
.value_name("resolution enum")
.help("The resolution you would like to encode at: native, 720p, 1080p, 2160p, or 4320p.")
.takes_value(true)
.default_value("native")
.required(false),
)
.arg(
Arg::with_name("encoder")
.short("e")
.long("encoder")
.value_name("encoder index")
.help("The index of the encoder you'd like to use to record (use enum-encoders command for a list of encoders and their indices).")
.takes_value(true)
.default_value("0")
.required(false),
)
.arg(
Arg::with_name("verbose")
.short("v")
.help("Enables verbose (debug) output.")
.required(false),
)
.arg(
Arg::with_name("waitForDebugger")
.long("waitForDebugger")
.help("The program will wait for a debugger to attach before starting.")
.required(false),
)
.arg(
Arg::with_name("consoleMode")
.long("consoleMode")
.help("Recording immediately starts. End the recording through console input.")
.required(false),
)
.arg(
Arg::with_name("OUTPUT FILE")
.help("The output file that will contain the recording.")
.default_value("recording.mp4")
.required(false),
)
.subcommand(
SubCommand::with_name("enum-encoders")
.about("Lists the available hardware H264 encoders.")
);
// Handle /?
let args: Vec<_> = std::env::args().collect();
if args.contains(&"/?".to_owned()) {
app.print_help().unwrap();
std::process::exit(0);
}
let matches = app.get_matches();
if let Some(name) = matches.subcommand_name() {
if name == "enum-encoders" {
enum_encoders().unwrap();
return;
}
}
let monitor_index: usize = matches
.value_of("display")
.unwrap()
.parse()
.expect("Invalid diplay index value!");
let output_path = matches.value_of("OUTPUT FILE").unwrap();
let verbose = matches.is_present("verbose");
let wait_for_debugger = matches.is_present("waitForDebugger");
let console_mode = matches.is_present("consoleMode");
let bit_rate: u32 = matches
.value_of("bitRate")
.unwrap()
.parse()
.expect("Invalid bit rate value!");
let frame_rate: u32 = matches
.value_of("frameRate")
.unwrap()
.parse()
.expect("Invalid frame rate value!");
let resolution: Resolution = matches
.value_of("resolution")
.unwrap()
.parse()
.expect("Invalid resolution value! Expecting: native, 720p, 1080p, 2160p, or 4320p.");
let encoder_index: usize = matches
.value_of("encoder")
.unwrap()
.parse()
.expect("Invalid encoder index value!");
// Validate some of the params
if!validate_path(output_path) {
exit_with_error("Invalid path specified!");
}
let result = run(
monitor_index,
&output_path,
bit_rate,
frame_rate,
resolution,
encoder_index,
verbose | wait_for_debugger,
wait_for_debugger,
console_mode,
);
// We do this for nicer HRESULT printing when errors occur.
if let Err(error) = result {
error.code().unwrap();
}
}
fn pause() {
println!("Press ENTER to stop recording...");
std::io::Read::read(&mut std::io::stdin(), &mut [0]).unwrap();
}
fn enum_encoders() -> Result<()> {
let encoder_devices = VideoEncoderDevice::enumerate()?;
if encoder_devices.is_empty() {
exit_with_error("No hardware H264 encoders found!");
}
println!("Encoders ({}):", encoder_devices.len());
for (i, encoder_device) in encoder_devices.iter().enumerate() {
println!(" {} - {}", i, encoder_device.display_name());
}
Ok(())
}
fn create_encoding_session(
d3d_device: ID3D11Device,
item: GraphicsCaptureItem,
encoder_device: &VideoEncoderDevice,
resolution: SizeInt32,
bit_rate: u32,
frame_rate: u32,
stream: IRandomAccessStream,
) -> Result<VideoEncodingSession> {
let result = VideoEncodingSession::new(
d3d_device,
item,
encoder_device,
resolution,
bit_rate,
frame_rate,
stream,
);
if result.is_err() {
println!("Error during encoder setup, try another set of encoding settings.");
}
result
}
fn validate_path<P: AsRef<Path>>(path: P) -> bool {
let path = path.as_ref();
let mut valid = true;
if let Some(extension) = path.extension() {
if extension!= "mp4" {
valid = false;
}
} else {
valid = false;
}
valid
}
fn exit_with_error(message: &str) ->! {
println!("{}", message);
std::process::exit(1);
}
fn win32_programmatic_capture_supported() -> Result<bool> {
ApiInformation::IsApiContractPresentByMajor("Windows.Foundation.UniversalApiContract", 8)
}
fn required_capture_features_supported() -> Result<bool> {
let result = ApiInformation::IsTypePresent(GraphicsCaptureSession::NAME)? && // Windows.Graphics.Capture is present
GraphicsCaptureSession::IsSupported()? && // The CaptureService is available
win32_programmatic_capture_supported()?;
Ok(result)
}
fn pump_messages<F: FnMut() -> Result<bool>>(mut hot_key_callback: F) -> Result<()> {
let _hot_key = HotKey::new(MOD_SHIFT | MOD_CONTROL, 0x52 /* R */)?;
println!("Press SHIFT+CTRL+R to start/stop the recording...");
unsafe {
let mut message = MSG::default();
while GetMessageW(&mut message, HWND(0), 0, 0).into() {
if message.message == WM_HOTKEY {
if hot_key_callback()? {
break;
}
}
DispatchMessageW(&mut message);
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use crate::validate_path;
#[test]
fn path_parsing_test() {
assert!(validate_path("something.mp4"));
assert!(validate_path("somedir/something.mp4"));
assert!(validate_path("somedir\\something.mp4"));
assert!(validate_path("../something.mp4"));
assert!(!validate_path("."));
assert!(!validate_path("*"));
assert!(!validate_path("something"));
assert!(!validate_path(".mp4"));
assert!(!validate_path("mp4"));
assert!(!validate_path("something.avi"));
}
}
| run | identifier_name |
receiver.rs | (&mut self) -> Pin<&mut T> {
Pin::new(&mut self.sock)
}
fn ack_interval(&mut self) -> Pin<&mut Interval> {
Pin::new(&mut self.ack_interval)
}
fn nak_interval(&mut self) -> Pin<&mut Delay> {
Pin::new(&mut self.nak_interval)
}
fn release_delay(&mut self) -> Pin<&mut Delay> {
Pin::new(&mut self.release_delay)
}
fn send_to_remote(&mut self, cx: &mut Context, packet: Packet) -> Result<(), Error> {
self.send_wrapper
.send(&mut self.sock, (packet, self.settings.remote), cx)
}
fn reset_timeout(&mut self) {
self.timeout_timer.reset(time::Instant::from_std(
Instant::now() + self.listen_timeout,
))
}
fn on_ack_event(&mut self, cx: &mut Context) -> Result<(), Error> {
// get largest inclusive received packet number
let ack_number = match self.loss_list.first() {
// There is an element in the loss list
Some(i) => i.seq_num,
// No elements, use lrsn, as it's already exclusive
None => self.lrsn,
};
// 2) If (a) the ACK number equals to the largest ACK number ever
// acknowledged by ACK2
if ack_number == self.lr_ack_acked.1 {
// stop (do not send this ACK).
return Ok(());
}
// make sure this ACK number is greater or equal to a one sent previously
if let Some(w) = self.ack_history_window.last() {
assert!(w.ack_number <= ack_number);
}
trace!(
"Sending ACK; ack_num={:?}, lr_ack_acked={:?}",
ack_number,
self.lr_ack_acked.1
);
if let Some(&AckHistoryEntry {
ack_number: last_ack_number,
timestamp: last_timestamp,
..
}) = self.ack_history_window.first()
{
// or, (b) it is equal to the ACK number in the
// last ACK
if last_ack_number == ack_number &&
// and the time interval between this two ACK packets is
// less than 2 RTTs,
(self.get_timestamp_now() - last_timestamp) < (self.rtt * 2)
{
// stop (do not send this ACK).
return Ok(());
}
}
// 3) Assign this ACK a unique increasing ACK sequence number.
let ack_seq_num = self.next_ack;
self.next_ack += 1;
// 4) Calculate the packet arrival speed according to the following
// algorithm:
let packet_recv_rate = {
if self.packet_history_window.len() < 16 {
0
} else {
// Calculate the median value of the last 16 packet arrival
// intervals (AI) using the values stored in PKT History Window.
let mut last_16: Vec<_> = self.packet_history_window
[self.packet_history_window.len() - 16..]
.iter()
.map(|&(_, ts)| ts)
.collect();
last_16.sort();
// the median timestamp
let ai = last_16[last_16.len() / 2];
// In these 16 values, remove those either greater than AI*8 or
// less than AI/8.
let filtered: Vec<i32> = last_16
.iter()
.filter(|&&n| n / 8 < ai && n > ai / 8)
.cloned()
.collect();
// If more than 8 values are left, calculate the
// average of the left values AI', and the packet arrival speed is
// 1/AI' (number of packets per second). Otherwise, return 0.
if filtered.len() > 8 {
(filtered.iter().fold(0i64, |sum, &val| sum + i64::from(val))
/ (filtered.len() as i64)) as i32
} else {
0
}
}
};
// 5) Calculate the estimated link capacity according to the following algorithm:
let est_link_cap = {
if self.packet_pair_window.len() < 16 {
0
} else {
// Calculate the median value of the last 16 packet pair
// intervals (PI) using the values in Packet Pair Window, and the
// link capacity is 1/PI (number of packets per second).
let pi = {
let mut last_16: Vec<_> = self.packet_pair_window
[self.packet_pair_window.len() - 16..]
.iter()
.map(|&(_, time)| time)
.collect();
last_16.sort();
last_16[last_16.len() / 2]
};
// Multiply by 1M because pi is in microseconds
// pi is in us/packet
(1.0e6 / (pi as f32)) as i32
}
};
// Pack the ACK packet with RTT, RTT Variance, and flow window size (available
// receiver buffer size).
let ack = self.make_control_packet(ControlTypes::Ack {
ack_seq_num,
ack_number,
rtt: Some(self.rtt),
rtt_variance: Some(self.rtt_variance),
buffer_available: None, // TODO: add this
packet_recv_rate: Some(packet_recv_rate),
est_link_cap: Some(est_link_cap),
});
// add it to the ack history
let now = self.get_timestamp_now();
self.ack_history_window.push(AckHistoryEntry {
ack_number,
ack_seq_num,
timestamp: now,
});
self.send_to_remote(cx, ack)?;
Ok(())
}
fn on_nak_event(&mut self, cx: &mut Context) -> Result<(), Error> {
// reset NAK timer, rtt and variance are in us, so convert to ns
// NAK is used to trigger a negative acknowledgement (NAK). Its period
// is dynamically updated to 4 * RTT_+ RTTVar + SYN, where RTTVar is the
// variance of RTT samples.
let nak_interval_us = 4 * self.rtt as u64 + self.rtt_variance as u64 + 10_000;
self.nak_interval.reset(time::Instant::from_std(
Instant::now() + Duration::from_micros(nak_interval_us),
));
// Search the receiver's loss list, find out all those sequence numbers
// whose last feedback time is k*RTT before, where k is initialized as 2
// and increased by 1 each time the number is fed back. Compress
// (according to section 6.4) and send these numbers back to the sender
// in an NAK packet.
let now = self.get_timestamp_now();
// increment k and change feedback time, returning sequence numbers
let seq_nums = {
let mut ret = Vec::new();
let rtt = self.rtt;
for pak in self
.loss_list
.iter_mut()
.filter(|lle| lle.feedback_time < now - lle.k * rtt)
{
pak.k += 1;
pak.feedback_time = now;
ret.push(pak.seq_num);
}
ret
};
if seq_nums.is_empty() {
return Ok(());
}
// send the nak
self.send_nak(cx, seq_nums.into_iter())?;
Ok(())
}
// checks the timers
// if a timer was triggered, then an RSFutureTimeout will be returned
// if not, the socket is given back
fn check_timers(&mut self, cx: &mut Context) -> Result<(), Error> {
// see if we need to ACK or NAK
if let Poll::Ready(Some(_)) = self.ack_interval().poll_next(cx) {
self.on_ack_event(cx)?;
}
if let Poll::Ready(_) = self.nak_interval().poll(cx) {
self.on_nak_event(cx)?;
}
// no need to do anything specific
let _ = self.release_delay().poll(cx);
Ok(())
}
// handles a SRT control packet
fn handle_srt_control_packet(&mut self, pack: &SrtControlPacket) -> Result<(), Error> {
use self::SrtControlPacket::*;
match pack {
HandshakeRequest(_) | HandshakeResponse(_) => {
warn!("Received handshake SRT packet, HSv5 expected");
}
_ => unimplemented!(),
}
Ok(())
}
// handles an incomming a packet
fn handle_packet(
&mut self,
cx: &mut Context,
packet: &Packet,
from: &SocketAddr,
) -> Result<(), Error> {
// We don't care about packets from elsewhere
if *from!= self.settings.remote {
info!("Packet received from unknown address: {:?}", from);
return Ok(());
}
if self.settings.local_sockid!= packet.dest_sockid() {
// packet isn't applicable
info!(
"Packet send to socket id ({}) that does not match local ({})",
packet.dest_sockid().0,
self.settings.local_sockid.0
);
return Ok(());
}
trace!("Received packet: {:?}", packet);
match packet {
Packet::Control(ctrl) => {
// handle the control packet
match &ctrl.control_type {
ControlTypes::Ack {.. } => warn!("Receiver received ACK packet, unusual"),
ControlTypes::Ack2(seq_num) => self.handle_ack2(*seq_num)?,
ControlTypes::DropRequest {.. } => unimplemented!(),
ControlTypes::Handshake(_) => {
if let Some(ret) = self.hs_returner.as_ref() {
if let Some(pack) = (*ret)(&packet) {
self.send_to_remote(cx, pack)?;
}
}
}
ControlTypes::KeepAlive => {} // TODO: actually reset EXP etc
ControlTypes::Nak {.. } => warn!("Receiver received NAK packet, unusual"),
ControlTypes::Shutdown => {
info!("Shutdown packet received, flushing receiver...");
self.shutdown_flag = true;
} // end of stream
ControlTypes::Srt(srt_packet) => {
self.handle_srt_control_packet(srt_packet)?;
}
}
}
Packet::Data(data) => self.handle_data_packet(cx, &data)?,
};
Ok(())
}
fn handle_ack2(&mut self, seq_num: i32) -> Result<(), Error> {
// 1) Locate the related ACK in the ACK History Window according to the
// ACK sequence number in this ACK2.
let id_in_wnd = match self
.ack_history_window
.as_slice()
.binary_search_by(|entry| entry.ack_seq_num.cmp(&seq_num))
{
Ok(i) => Some(i),
Err(_) => None,
};
if let Some(id) = id_in_wnd {
let AckHistoryEntry {
timestamp: send_timestamp,
ack_number,
..
} = self.ack_history_window[id];
// 2) Update the largest ACK number ever been acknowledged.
self.lr_ack_acked = (seq_num, ack_number);
// 3) Calculate new rtt according to the ACK2 arrival time and the ACK
// departure time, and update the RTT value as: RTT = (RTT * 7 +
// rtt) / 8
let immediate_rtt = self.get_timestamp_now() - send_timestamp;
self.rtt = (self.rtt * 7 + immediate_rtt) / 8;
// 4) Update RTTVar by: RTTVar = (RTTVar * 3 + abs(RTT - rtt)) / 4.
self.rtt_variance =
(self.rtt_variance * 3 + (self.rtt_variance - immediate_rtt).abs()) / 4;
// 5) Update both ACK and NAK period to 4 * RTT + RTTVar + SYN.
let ack_us = 4 * self.rtt as u64 + self.rtt_variance as u64 + 10_000;
self.ack_interval = interval(Duration::from_micros(ack_us));
} else {
warn!(
"ACK sequence number in ACK2 packet not found in ACK history: {}",
seq_num
);
}
Ok(())
}
fn handle_data_packet(&mut self, cx: &mut Context, data: &DataPacket) -> Result<(), Error> {
let now = self.get_timestamp_now();
// 1) Reset the ExpCount to 1. If there is no unacknowledged data
// packet, or if this is an ACK or NAK control packet, reset the EXP
// timer.
self.exp_count = 1;
// 2&3 don't apply
// 4) If the sequence number of the current data packet is 16n + 1,
// where n is an integer, record the time interval between this
if data.seq_number % 16 == 0 {
self.probe_time = Some(now)
} else if data.seq_number % 16 == 1 {
// if there is an entry
if let Some(pt) = self.probe_time {
// calculate and insert
self.packet_pair_window.push((data.seq_number, now - pt));
// reset
self.probe_time = None;
}
}
// 5) Record the packet arrival time in PKT History Window.
self.packet_history_window.push((data.seq_number, now));
// 6)
// a. If the sequence number of the current data packet is greater
// than LRSN, put all the sequence numbers between (but
// excluding) these two values into the receiver's loss list and
// send them to the sender in an NAK packet.
match data.seq_number.cmp(&self.lrsn) {
Ordering::Greater => {
// lrsn is the latest packet received, so nak the one after that
for i in seq_num_range(self.lrsn, data.seq_number) {
self.loss_list.push(LossListEntry {
seq_num: i,
feedback_time: now,
// k is initialized at 2, as stated on page 12 (very end)
k: 2,
})
}
self.send_nak(cx, seq_num_range(self.lrsn, data.seq_number))?;
}
// b. If the sequence number is less than LRSN, remove it from the
// receiver's loss list.
Ordering::Less => {
match self.loss_list[..].binary_search_by(|ll| ll.seq_num.cmp(&data.seq_number)) {
Ok(i) => {
self.loss_list.remove(i);
}
Err(_) => {
debug!(
"Packet received that's not in the loss list: {:?}, loss_list={:?}",
data.seq_number,
self.loss_list
.iter()
.map(|ll| ll.seq_num.as_raw())
.collect::<Vec<_>>()
);
}
};
}
Ordering::Equal => {}
}
// record that we got this packet
self.lrsn = cmp::max(data.seq_number + 1, self.lrsn);
// we've already gotten this packet, drop it
if self.buffer.next_release() > data.seq_number {
debug!("Received packet {:?} twice", data.seq_number);
return Ok(());
}
self.buffer.add(data.clone());
trace!(
"Received data packet seq_num={}, loc={:?}, buffer={:?}",
data.seq_number,
data.message_loc,
self.buffer,
);
Ok(())
}
// send a NAK, and return the future
fn send_nak<I>(&mut self, cx: &mut Context, lost_seq_nums: I) -> Result<(), Error>
where
I: Iterator<Item = SeqNumber>,
| {
let vec: Vec<_> = lost_seq_nums.collect();
debug!("Sending NAK for={:?}", vec);
let pack = self.make_control_packet(ControlTypes::Nak(
compress_loss_list(vec.iter().cloned()).collect(),
));
self.send_to_remote(cx, pack)?;
Ok(())
} | identifier_body |
|
receiver.rs | /// wakes the thread when there is a new packet to be released
release_delay: Delay,
/// A buffer of packets to send to the underlying sink
send_wrapper: SinkSendWrapper<(Packet, SocketAddr)>,
}
impl<T> Receiver<T>
where
T: Stream<Item = Result<(Packet, SocketAddr), Error>>
+ Sink<(Packet, SocketAddr), Error = Error>
+ Unpin,
{
pub fn new(
sock: T,
settings: ConnectionSettings,
hs_returner: Option<HandshakeReturner>,
) -> Receiver<T> {
let init_seq_num = settings.init_seq_num;
info!(
"Receiving started from {:?}, with latency={:?}",
settings.remote, settings.tsbpd_latency
);
Receiver {
settings,
hs_returner,
sock,
rtt: 10_000,
rtt_variance: 1_000,
listen_timeout: Duration::from_secs(1),
loss_list: Vec::new(),
ack_history_window: Vec::new(),
packet_history_window: Vec::new(),
packet_pair_window: Vec::new(),
ack_interval: interval(Duration::from_millis(10)),
nak_interval: delay_for(Duration::from_millis(10)),
lrsn: init_seq_num, // at start, we have received everything until the first packet, exclusive (aka nothing)
next_ack: 1,
exp_count: 1,
probe_time: None,
timeout_timer: delay_for(Duration::from_secs(1)),
lr_ack_acked: (0, init_seq_num),
buffer: RecvBuffer::new(init_seq_num),
shutdown_flag: false,
release_delay: delay_for(Duration::from_secs(0)), // start with an empty delay
send_wrapper: SinkSendWrapper::new(),
}
}
pub fn settings(&self) -> &ConnectionSettings {
&self.settings
}
pub fn remote(&self) -> SocketAddr {
self.settings.remote
}
fn timeout_timer(&mut self) -> Pin<&mut Delay> {
Pin::new(&mut self.timeout_timer)
}
fn sock(&mut self) -> Pin<&mut T> {
Pin::new(&mut self.sock)
}
fn ack_interval(&mut self) -> Pin<&mut Interval> {
Pin::new(&mut self.ack_interval)
}
fn nak_interval(&mut self) -> Pin<&mut Delay> {
Pin::new(&mut self.nak_interval)
}
fn release_delay(&mut self) -> Pin<&mut Delay> {
Pin::new(&mut self.release_delay)
}
fn | (&mut self, cx: &mut Context, packet: Packet) -> Result<(), Error> {
self.send_wrapper
.send(&mut self.sock, (packet, self.settings.remote), cx)
}
fn reset_timeout(&mut self) {
self.timeout_timer.reset(time::Instant::from_std(
Instant::now() + self.listen_timeout,
))
}
fn on_ack_event(&mut self, cx: &mut Context) -> Result<(), Error> {
// get largest inclusive received packet number
let ack_number = match self.loss_list.first() {
// There is an element in the loss list
Some(i) => i.seq_num,
// No elements, use lrsn, as it's already exclusive
None => self.lrsn,
};
// 2) If (a) the ACK number equals to the largest ACK number ever
// acknowledged by ACK2
if ack_number == self.lr_ack_acked.1 {
// stop (do not send this ACK).
return Ok(());
}
// make sure this ACK number is greater or equal to a one sent previously
if let Some(w) = self.ack_history_window.last() {
assert!(w.ack_number <= ack_number);
}
trace!(
"Sending ACK; ack_num={:?}, lr_ack_acked={:?}",
ack_number,
self.lr_ack_acked.1
);
if let Some(&AckHistoryEntry {
ack_number: last_ack_number,
timestamp: last_timestamp,
..
}) = self.ack_history_window.first()
{
// or, (b) it is equal to the ACK number in the
// last ACK
if last_ack_number == ack_number &&
// and the time interval between this two ACK packets is
// less than 2 RTTs,
(self.get_timestamp_now() - last_timestamp) < (self.rtt * 2)
{
// stop (do not send this ACK).
return Ok(());
}
}
// 3) Assign this ACK a unique increasing ACK sequence number.
let ack_seq_num = self.next_ack;
self.next_ack += 1;
// 4) Calculate the packet arrival speed according to the following
// algorithm:
let packet_recv_rate = {
if self.packet_history_window.len() < 16 {
0
} else {
// Calculate the median value of the last 16 packet arrival
// intervals (AI) using the values stored in PKT History Window.
let mut last_16: Vec<_> = self.packet_history_window
[self.packet_history_window.len() - 16..]
.iter()
.map(|&(_, ts)| ts)
.collect();
last_16.sort();
// the median timestamp
let ai = last_16[last_16.len() / 2];
// In these 16 values, remove those either greater than AI*8 or
// less than AI/8.
let filtered: Vec<i32> = last_16
.iter()
.filter(|&&n| n / 8 < ai && n > ai / 8)
.cloned()
.collect();
// If more than 8 values are left, calculate the
// average of the left values AI', and the packet arrival speed is
// 1/AI' (number of packets per second). Otherwise, return 0.
if filtered.len() > 8 {
(filtered.iter().fold(0i64, |sum, &val| sum + i64::from(val))
/ (filtered.len() as i64)) as i32
} else {
0
}
}
};
// 5) Calculate the estimated link capacity according to the following algorithm:
let est_link_cap = {
if self.packet_pair_window.len() < 16 {
0
} else {
// Calculate the median value of the last 16 packet pair
// intervals (PI) using the values in Packet Pair Window, and the
// link capacity is 1/PI (number of packets per second).
let pi = {
let mut last_16: Vec<_> = self.packet_pair_window
[self.packet_pair_window.len() - 16..]
.iter()
.map(|&(_, time)| time)
.collect();
last_16.sort();
last_16[last_16.len() / 2]
};
// Multiply by 1M because pi is in microseconds
// pi is in us/packet
(1.0e6 / (pi as f32)) as i32
}
};
// Pack the ACK packet with RTT, RTT Variance, and flow window size (available
// receiver buffer size).
let ack = self.make_control_packet(ControlTypes::Ack {
ack_seq_num,
ack_number,
rtt: Some(self.rtt),
rtt_variance: Some(self.rtt_variance),
buffer_available: None, // TODO: add this
packet_recv_rate: Some(packet_recv_rate),
est_link_cap: Some(est_link_cap),
});
// add it to the ack history
let now = self.get_timestamp_now();
self.ack_history_window.push(AckHistoryEntry {
ack_number,
ack_seq_num,
timestamp: now,
});
self.send_to_remote(cx, ack)?;
Ok(())
}
fn on_nak_event(&mut self, cx: &mut Context) -> Result<(), Error> {
// reset NAK timer, rtt and variance are in us, so convert to ns
// NAK is used to trigger a negative acknowledgement (NAK). Its period
// is dynamically updated to 4 * RTT_+ RTTVar + SYN, where RTTVar is the
// variance of RTT samples.
let nak_interval_us = 4 * self.rtt as u64 + self.rtt_variance as u64 + 10_000;
self.nak_interval.reset(time::Instant::from_std(
Instant::now() + Duration::from_micros(nak_interval_us),
));
// Search the receiver's loss list, find out all those sequence numbers
// whose last feedback time is k*RTT before, where k is initialized as 2
// and increased by 1 each time the number is fed back. Compress
// (according to section 6.4) and send these numbers back to the sender
// in an NAK packet.
let now = self.get_timestamp_now();
// increment k and change feedback time, returning sequence numbers
let seq_nums = {
let mut ret = Vec::new();
let rtt = self.rtt;
for pak in self
.loss_list
.iter_mut()
.filter(|lle| lle.feedback_time < now - lle.k * rtt)
{
pak.k += 1;
pak.feedback_time = now;
ret.push(pak.seq_num);
}
ret
};
if seq_nums.is_empty() {
return Ok(());
}
// send the nak
self.send_nak(cx, seq_nums.into_iter())?;
Ok(())
}
// checks the timers
// if a timer was triggered, then an RSFutureTimeout will be returned
// if not, the socket is given back
fn check_timers(&mut self, cx: &mut Context) -> Result<(), Error> {
// see if we need to ACK or NAK
if let Poll::Ready(Some(_)) = self.ack_interval().poll_next(cx) {
self.on_ack_event(cx)?;
}
if let Poll::Ready(_) = self.nak_interval().poll(cx) {
self.on_nak_event(cx)?;
}
// no need to do anything specific
let _ = self.release_delay().poll(cx);
Ok(())
}
// handles a SRT control packet
fn handle_srt_control_packet(&mut self, pack: &SrtControlPacket) -> Result<(), Error> {
use self::SrtControlPacket::*;
match pack {
HandshakeRequest(_) | HandshakeResponse(_) => {
warn!("Received handshake SRT packet, HSv5 expected");
}
_ => unimplemented!(),
}
Ok(())
}
// handles an incomming a packet
fn handle_packet(
&mut self,
cx: &mut Context,
packet: &Packet,
from: &SocketAddr,
) -> Result<(), Error> {
// We don't care about packets from elsewhere
if *from!= self.settings.remote {
info!("Packet received from unknown address: {:?}", from);
return Ok(());
}
if self.settings.local_sockid!= packet.dest_sockid() {
// packet isn't applicable
info!(
"Packet send to socket id ({}) that does not match local ({})",
packet.dest_sockid().0,
self.settings.local_sockid.0
);
return Ok(());
}
trace!("Received packet: {:?}", packet);
match packet {
Packet::Control(ctrl) => {
// handle the control packet
match &ctrl.control_type {
ControlTypes::Ack {.. } => warn!("Receiver received ACK packet, unusual"),
ControlTypes::Ack2(seq_num) => self.handle_ack2(*seq_num)?,
ControlTypes::DropRequest {.. } => unimplemented!(),
ControlTypes::Handshake(_) => {
if let Some(ret) = self.hs_returner.as_ref() {
if let Some(pack) = (*ret)(&packet) {
self.send_to_remote(cx, pack)?;
}
}
}
ControlTypes::KeepAlive => {} // TODO: actually reset EXP etc
ControlTypes::Nak {.. } => warn!("Receiver received NAK packet, unusual"),
ControlTypes::Shutdown => {
info!("Shutdown packet received, flushing receiver...");
self.shutdown_flag = true;
} // end of stream
ControlTypes::Srt(srt_packet) => {
self.handle_srt_control_packet(srt_packet)?;
}
}
}
Packet::Data(data) => self.handle_data_packet(cx, &data)?,
};
Ok(())
}
fn handle_ack2(&mut self, seq_num: i32) -> Result<(), Error> {
// 1) Locate the related ACK in the ACK History Window according to the
// ACK sequence number in this ACK2.
let id_in_wnd = match self
.ack_history_window
.as_slice()
.binary_search_by(|entry| entry.ack_seq_num.cmp(&seq_num))
{
Ok(i) => Some(i),
Err(_) => None,
};
if let Some(id) = id_in_wnd {
let AckHistoryEntry {
timestamp: send_timestamp,
ack_number,
..
} = self.ack_history_window[id];
// 2) Update the largest ACK number ever been acknowledged.
self.lr_ack_acked = (seq_num, ack_number);
// 3) Calculate new rtt according to the ACK2 arrival time and the ACK
// departure time, and update the RTT value as: RTT = (RTT * 7 +
// rtt) / 8
let immediate_rtt = self.get_timestamp_now() - send_timestamp;
self.rtt = (self.rtt * 7 + immediate_rtt) / 8;
// 4) Update RTTVar by: RTTVar = (RTTVar * 3 + abs(RTT - rtt)) / 4.
self.rtt_variance =
(self.rtt_variance * 3 + (self.rtt_variance - immediate_rtt).abs()) / 4;
// 5) Update both ACK and NAK period to 4 * RTT + RTTVar + SYN.
let ack_us = 4 * self.rtt as u64 + self.rtt_variance as u64 + 10_000;
self.ack_interval = interval(Duration::from_micros(ack_us));
} else {
warn!(
"ACK sequence number in ACK2 packet not found in ACK history: {}",
seq_num
);
}
Ok(())
}
fn handle_data_packet(&mut self, cx: &mut Context, data: &DataPacket) -> Result<(), Error> {
let now = self.get_timestamp_now();
// 1) Reset the ExpCount to 1. If there is no unacknowledged data
// packet, or if this is an ACK or NAK control packet, reset the EXP
// timer.
self.exp_count = 1;
// 2&3 don't apply
// 4) If the sequence number of the current data packet is 16n + 1,
// where n is an integer, record the time interval between this
if data.seq_number % 16 == 0 {
self.probe_time = Some(now)
} else if data.seq_number % 16 == 1 {
// if there is an entry
if let Some(pt) = self.probe_time {
// calculate and insert
self.packet_pair_window.push((data.seq_number, now - pt));
// reset
self.probe_time = None;
}
}
// 5) Record the packet arrival time in PKT History Window.
self.packet_history_window.push((data.seq_number, now));
// 6)
// a. If the sequence number of the current data packet is greater
// than LRSN, put all the sequence numbers between (but
// excluding) these two values into the receiver's loss list and
// send them to the sender in an NAK packet.
match data.seq_number.cmp(&self.lrsn) {
Ordering::Greater => {
// lrsn is the latest packet received, so nak the one after that
for i in seq_num_range(self.lrsn, data.seq_number) {
self.loss_list.push(LossListEntry {
seq_num: i,
feedback_time: now,
// k is initialized at 2, as stated on page 12 (very end)
k: 2,
})
}
self.send_nak(cx, seq_num_range(self.lrsn, data.seq_number))?;
}
// b. If the sequence number is less than LRSN, remove it from the
// receiver's loss list.
Ordering::Less => {
match self.loss_list[..].binary_search_by(|ll| ll.seq_num.cmp(&data.seq_number)) {
Ok(i) => {
| send_to_remote | identifier_name |
receiver.rs |
/// wakes the thread when there is a new packet to be released
release_delay: Delay,
/// A buffer of packets to send to the underlying sink
send_wrapper: SinkSendWrapper<(Packet, SocketAddr)>,
}
impl<T> Receiver<T>
where
T: Stream<Item = Result<(Packet, SocketAddr), Error>>
+ Sink<(Packet, SocketAddr), Error = Error>
+ Unpin,
{
pub fn new(
sock: T,
settings: ConnectionSettings,
hs_returner: Option<HandshakeReturner>,
) -> Receiver<T> {
let init_seq_num = settings.init_seq_num;
info!(
"Receiving started from {:?}, with latency={:?}",
settings.remote, settings.tsbpd_latency
);
Receiver {
settings,
hs_returner,
sock,
rtt: 10_000,
rtt_variance: 1_000,
listen_timeout: Duration::from_secs(1),
loss_list: Vec::new(),
ack_history_window: Vec::new(),
packet_history_window: Vec::new(),
packet_pair_window: Vec::new(),
ack_interval: interval(Duration::from_millis(10)),
nak_interval: delay_for(Duration::from_millis(10)),
lrsn: init_seq_num, // at start, we have received everything until the first packet, exclusive (aka nothing)
next_ack: 1,
exp_count: 1,
probe_time: None,
timeout_timer: delay_for(Duration::from_secs(1)),
lr_ack_acked: (0, init_seq_num),
buffer: RecvBuffer::new(init_seq_num),
shutdown_flag: false,
release_delay: delay_for(Duration::from_secs(0)), // start with an empty delay
send_wrapper: SinkSendWrapper::new(),
}
}
pub fn settings(&self) -> &ConnectionSettings {
&self.settings
}
pub fn remote(&self) -> SocketAddr {
self.settings.remote
}
fn timeout_timer(&mut self) -> Pin<&mut Delay> {
Pin::new(&mut self.timeout_timer)
}
fn sock(&mut self) -> Pin<&mut T> {
Pin::new(&mut self.sock)
}
fn ack_interval(&mut self) -> Pin<&mut Interval> {
Pin::new(&mut self.ack_interval)
}
fn nak_interval(&mut self) -> Pin<&mut Delay> {
Pin::new(&mut self.nak_interval)
}
fn release_delay(&mut self) -> Pin<&mut Delay> {
Pin::new(&mut self.release_delay)
}
fn send_to_remote(&mut self, cx: &mut Context, packet: Packet) -> Result<(), Error> {
self.send_wrapper
.send(&mut self.sock, (packet, self.settings.remote), cx)
}
fn reset_timeout(&mut self) {
self.timeout_timer.reset(time::Instant::from_std(
Instant::now() + self.listen_timeout,
))
}
fn on_ack_event(&mut self, cx: &mut Context) -> Result<(), Error> {
// get largest inclusive received packet number
let ack_number = match self.loss_list.first() {
// There is an element in the loss list
Some(i) => i.seq_num,
// No elements, use lrsn, as it's already exclusive
None => self.lrsn,
};
// 2) If (a) the ACK number equals to the largest ACK number ever
// acknowledged by ACK2
if ack_number == self.lr_ack_acked.1 {
// stop (do not send this ACK).
return Ok(());
}
// make sure this ACK number is greater or equal to a one sent previously
if let Some(w) = self.ack_history_window.last() {
assert!(w.ack_number <= ack_number);
}
trace!(
"Sending ACK; ack_num={:?}, lr_ack_acked={:?}",
ack_number,
self.lr_ack_acked.1
);
if let Some(&AckHistoryEntry {
ack_number: last_ack_number,
timestamp: last_timestamp,
..
}) = self.ack_history_window.first()
{
// or, (b) it is equal to the ACK number in the
// last ACK
if last_ack_number == ack_number &&
// and the time interval between this two ACK packets is
// less than 2 RTTs,
(self.get_timestamp_now() - last_timestamp) < (self.rtt * 2)
{
// stop (do not send this ACK).
return Ok(());
}
}
// 3) Assign this ACK a unique increasing ACK sequence number.
let ack_seq_num = self.next_ack;
self.next_ack += 1;
// 4) Calculate the packet arrival speed according to the following
// algorithm:
let packet_recv_rate = {
if self.packet_history_window.len() < 16 {
0
} else {
// Calculate the median value of the last 16 packet arrival
// intervals (AI) using the values stored in PKT History Window.
let mut last_16: Vec<_> = self.packet_history_window
[self.packet_history_window.len() - 16..]
.iter()
.map(|&(_, ts)| ts)
.collect();
last_16.sort();
// the median timestamp
let ai = last_16[last_16.len() / 2];
// In these 16 values, remove those either greater than AI*8 or
// less than AI/8.
let filtered: Vec<i32> = last_16
.iter()
.filter(|&&n| n / 8 < ai && n > ai / 8)
.cloned()
.collect();
// If more than 8 values are left, calculate the
// average of the left values AI', and the packet arrival speed is
// 1/AI' (number of packets per second). Otherwise, return 0.
if filtered.len() > 8 {
(filtered.iter().fold(0i64, |sum, &val| sum + i64::from(val))
/ (filtered.len() as i64)) as i32
} else {
0
}
}
};
// 5) Calculate the estimated link capacity according to the following algorithm:
let est_link_cap = {
if self.packet_pair_window.len() < 16 {
0
} else {
// Calculate the median value of the last 16 packet pair
// intervals (PI) using the values in Packet Pair Window, and the
// link capacity is 1/PI (number of packets per second).
let pi = {
let mut last_16: Vec<_> = self.packet_pair_window
[self.packet_pair_window.len() - 16..]
.iter()
.map(|&(_, time)| time)
.collect();
last_16.sort();
last_16[last_16.len() / 2]
};
// Multiply by 1M because pi is in microseconds
// pi is in us/packet
(1.0e6 / (pi as f32)) as i32
}
};
// Pack the ACK packet with RTT, RTT Variance, and flow window size (available
// receiver buffer size).
let ack = self.make_control_packet(ControlTypes::Ack {
ack_seq_num,
ack_number,
rtt: Some(self.rtt),
rtt_variance: Some(self.rtt_variance),
buffer_available: None, // TODO: add this
packet_recv_rate: Some(packet_recv_rate),
est_link_cap: Some(est_link_cap),
});
// add it to the ack history
let now = self.get_timestamp_now();
self.ack_history_window.push(AckHistoryEntry {
ack_number,
ack_seq_num,
timestamp: now,
});
self.send_to_remote(cx, ack)?;
Ok(())
}
fn on_nak_event(&mut self, cx: &mut Context) -> Result<(), Error> {
// reset NAK timer, rtt and variance are in us, so convert to ns
// NAK is used to trigger a negative acknowledgement (NAK). Its period
// is dynamically updated to 4 * RTT_+ RTTVar + SYN, where RTTVar is the
// variance of RTT samples.
let nak_interval_us = 4 * self.rtt as u64 + self.rtt_variance as u64 + 10_000;
self.nak_interval.reset(time::Instant::from_std(
Instant::now() + Duration::from_micros(nak_interval_us),
));
// Search the receiver's loss list, find out all those sequence numbers
// whose last feedback time is k*RTT before, where k is initialized as 2
// and increased by 1 each time the number is fed back. Compress
// (according to section 6.4) and send these numbers back to the sender
// in an NAK packet.
let now = self.get_timestamp_now();
// increment k and change feedback time, returning sequence numbers
let seq_nums = {
let mut ret = Vec::new();
let rtt = self.rtt;
for pak in self
.loss_list
.iter_mut()
.filter(|lle| lle.feedback_time < now - lle.k * rtt)
{
pak.k += 1;
pak.feedback_time = now;
ret.push(pak.seq_num);
}
ret
};
if seq_nums.is_empty() {
return Ok(());
}
// send the nak
self.send_nak(cx, seq_nums.into_iter())?;
Ok(())
}
// checks the timers
// if a timer was triggered, then an RSFutureTimeout will be returned
// if not, the socket is given back
fn check_timers(&mut self, cx: &mut Context) -> Result<(), Error> {
// see if we need to ACK or NAK
if let Poll::Ready(Some(_)) = self.ack_interval().poll_next(cx) {
self.on_ack_event(cx)?;
}
if let Poll::Ready(_) = self.nak_interval().poll(cx) {
self.on_nak_event(cx)?;
}
// no need to do anything specific
let _ = self.release_delay().poll(cx);
Ok(())
}
// handles a SRT control packet
fn handle_srt_control_packet(&mut self, pack: &SrtControlPacket) -> Result<(), Error> {
use self::SrtControlPacket::*;
match pack {
HandshakeRequest(_) | HandshakeResponse(_) => {
warn!("Received handshake SRT packet, HSv5 expected");
}
_ => unimplemented!(),
}
Ok(())
}
// handles an incomming a packet
fn handle_packet(
&mut self,
cx: &mut Context,
packet: &Packet,
from: &SocketAddr,
) -> Result<(), Error> {
// We don't care about packets from elsewhere
if *from!= self.settings.remote {
info!("Packet received from unknown address: {:?}", from);
return Ok(());
}
if self.settings.local_sockid!= packet.dest_sockid() {
// packet isn't applicable | return Ok(());
}
trace!("Received packet: {:?}", packet);
match packet {
Packet::Control(ctrl) => {
// handle the control packet
match &ctrl.control_type {
ControlTypes::Ack {.. } => warn!("Receiver received ACK packet, unusual"),
ControlTypes::Ack2(seq_num) => self.handle_ack2(*seq_num)?,
ControlTypes::DropRequest {.. } => unimplemented!(),
ControlTypes::Handshake(_) => {
if let Some(ret) = self.hs_returner.as_ref() {
if let Some(pack) = (*ret)(&packet) {
self.send_to_remote(cx, pack)?;
}
}
}
ControlTypes::KeepAlive => {} // TODO: actually reset EXP etc
ControlTypes::Nak {.. } => warn!("Receiver received NAK packet, unusual"),
ControlTypes::Shutdown => {
info!("Shutdown packet received, flushing receiver...");
self.shutdown_flag = true;
} // end of stream
ControlTypes::Srt(srt_packet) => {
self.handle_srt_control_packet(srt_packet)?;
}
}
}
Packet::Data(data) => self.handle_data_packet(cx, &data)?,
};
Ok(())
}
fn handle_ack2(&mut self, seq_num: i32) -> Result<(), Error> {
// 1) Locate the related ACK in the ACK History Window according to the
// ACK sequence number in this ACK2.
let id_in_wnd = match self
.ack_history_window
.as_slice()
.binary_search_by(|entry| entry.ack_seq_num.cmp(&seq_num))
{
Ok(i) => Some(i),
Err(_) => None,
};
if let Some(id) = id_in_wnd {
let AckHistoryEntry {
timestamp: send_timestamp,
ack_number,
..
} = self.ack_history_window[id];
// 2) Update the largest ACK number ever been acknowledged.
self.lr_ack_acked = (seq_num, ack_number);
// 3) Calculate new rtt according to the ACK2 arrival time and the ACK
// departure time, and update the RTT value as: RTT = (RTT * 7 +
// rtt) / 8
let immediate_rtt = self.get_timestamp_now() - send_timestamp;
self.rtt = (self.rtt * 7 + immediate_rtt) / 8;
// 4) Update RTTVar by: RTTVar = (RTTVar * 3 + abs(RTT - rtt)) / 4.
self.rtt_variance =
(self.rtt_variance * 3 + (self.rtt_variance - immediate_rtt).abs()) / 4;
// 5) Update both ACK and NAK period to 4 * RTT + RTTVar + SYN.
let ack_us = 4 * self.rtt as u64 + self.rtt_variance as u64 + 10_000;
self.ack_interval = interval(Duration::from_micros(ack_us));
} else {
warn!(
"ACK sequence number in ACK2 packet not found in ACK history: {}",
seq_num
);
}
Ok(())
}
fn handle_data_packet(&mut self, cx: &mut Context, data: &DataPacket) -> Result<(), Error> {
let now = self.get_timestamp_now();
// 1) Reset the ExpCount to 1. If there is no unacknowledged data
// packet, or if this is an ACK or NAK control packet, reset the EXP
// timer.
self.exp_count = 1;
// 2&3 don't apply
// 4) If the sequence number of the current data packet is 16n + 1,
// where n is an integer, record the time interval between this
if data.seq_number % 16 == 0 {
self.probe_time = Some(now)
} else if data.seq_number % 16 == 1 {
// if there is an entry
if let Some(pt) = self.probe_time {
// calculate and insert
self.packet_pair_window.push((data.seq_number, now - pt));
// reset
self.probe_time = None;
}
}
// 5) Record the packet arrival time in PKT History Window.
self.packet_history_window.push((data.seq_number, now));
// 6)
// a. If the sequence number of the current data packet is greater
// than LRSN, put all the sequence numbers between (but
// excluding) these two values into the receiver's loss list and
// send them to the sender in an NAK packet.
match data.seq_number.cmp(&self.lrsn) {
Ordering::Greater => {
// lrsn is the latest packet received, so nak the one after that
for i in seq_num_range(self.lrsn, data.seq_number) {
self.loss_list.push(LossListEntry {
seq_num: i,
feedback_time: now,
// k is initialized at 2, as stated on page 12 (very end)
k: 2,
})
}
self.send_nak(cx, seq_num_range(self.lrsn, data.seq_number))?;
}
// b. If the sequence number is less than LRSN, remove it from the
// receiver's loss list.
Ordering::Less => {
match self.loss_list[..].binary_search_by(|ll| ll.seq_num.cmp(&data.seq_number)) {
Ok(i) => {
| info!(
"Packet send to socket id ({}) that does not match local ({})",
packet.dest_sockid().0,
self.settings.local_sockid.0
); | random_line_split |
main.rs | #[macro_use]
extern crate clap;
extern crate irb;
extern crate las;
extern crate palette;
extern crate riscan_pro;
extern crate scanifc;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate text_io;
extern crate toml;
use clap::{App, ArgMatches};
use irb::Irb;
use las::Color;
use las::point::Format;
use palette::{Gradient, Rgb};
use riscan_pro::{CameraCalibration, MountCalibration, Point, Project, ScanPosition, Socs};
use riscan_pro::scan_position::Image;
use scanifc::point3d::Stream;
use std::fmt;
use std::fs;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::u16;
fn main() {
let yaml = load_yaml!("cli.yml");
let matches = App::from_yaml(yaml).get_matches();
print!("Configuring...");
std::io::stdout().flush().unwrap();
let config = Config::new(&matches);
println!("done.");
println!("{}", config);
loop {
print!("Continue? (y/n) ");
std::io::stdout().flush().unwrap();
let answer: String = read!();
println!();
match answer.to_lowercase().as_str() {
"y" => break,
"n" => return,
_ => println!("Unknown response: {}", answer),
}
}
for scan_position in config.scan_positions() {
println!("Colorizing {}:", scan_position.name);
let translations = config.translations(scan_position);
if translations.is_empty() {
println!(" - No translations found");
} else {
for translation in translations {
println!(
" - Translation:\n - Infile: {}\n - Outfile: {}",
translation.infile.display(),
translation.outfile.display()
);
config.colorize(scan_position, &translation);
}
}
}
println!("Complete!");
}
struct Config {
image_dir: PathBuf,
keep_without_thermal: bool,
las_dir: PathBuf,
max_reflectance: f32,
min_reflectance: f32,
project: Project,
rotate: bool,
scan_position_names: Option<Vec<String>>,
sync_to_pps: bool,
temperature_gradient: Gradient<Rgb>,
use_scanpos_names: bool,
name_map: NameMap,
}
struct ImageGroup<'a> {
camera_calibration: &'a CameraCalibration,
image: &'a Image,
irb: Irb,
irb_path: PathBuf,
mount_calibration: &'a MountCalibration,
rotate: bool,
}
struct Translation {
infile: PathBuf,
outfile: PathBuf,
}
#[derive(Debug, Default, Deserialize)]
struct NameMap {
maps: Vec<FromTo>,
}
#[derive(Debug, Default, Deserialize)]
struct FromTo {
from: String,
to: String,
}
impl Config {
fn new(matches: &ArgMatches) -> Config {
use std::fs::File;
use std::io::Read;
use toml;
let project = Project::from_path(matches.value_of("PROJECT").unwrap()).unwrap();
let image_dir = PathBuf::from(matches.value_of("IMAGE_DIR").unwrap());
let las_dir = Path::new(matches.value_of("LAS_DIR").unwrap()).to_path_buf();
let min_reflectance = value_t!(matches, "min-reflectance", f32).unwrap();
let max_reflectance = value_t!(matches, "max-reflectance", f32).unwrap();
let min_temperature = value_t!(matches, "min-temperature", f32).unwrap();
let max_temperature = value_t!(matches, "max-temperature", f32).unwrap();
let min_temperature_color = Rgb::new(0.0, 0., 1.0);
let max_temperature_color = Rgb::new(1.0, 0., 0.);
let temperature_gradient = Gradient::with_domain(vec![
(min_temperature, min_temperature_color),
(max_temperature, max_temperature_color),
]);
let name_map = if let Some(name_map) = matches.value_of("name-map") {
let mut s = String::new();
File::open(name_map)
.unwrap()
.read_to_string(&mut s)
.unwrap();
toml::from_str(&s).unwrap()
} else {
NameMap::default()
};
Config {
image_dir: image_dir,
keep_without_thermal: matches.is_present("keep-without-thermal"),
las_dir: las_dir,
max_reflectance: max_reflectance,
min_reflectance: min_reflectance,
project: project,
rotate: matches.is_present("rotate"),
scan_position_names: matches.values_of("scan-position").map(|values| {
values.map(|name| name.to_string()).collect()
}),
sync_to_pps: matches.is_present("sync-to-pps"),
temperature_gradient: temperature_gradient,
use_scanpos_names: matches.is_present("use-scanpos-names"),
name_map: name_map,
}
}
fn translations(&self, scan_position: &ScanPosition) -> Vec<Translation> {
let paths = scan_position.singlescan_rxp_paths(&self.project);
if self.use_scanpos_names && paths.len() > 1 {
panic!(
"--use-scanpos-names was provided, but there are {} rxp files for scan position {}",
paths.len(),
scan_position.name
);
}
paths
.into_iter()
.map(|path| {
Translation {
outfile: self.outfile(scan_position, &path),
infile: path,
}
})
.collect()
}
fn colorize(&self, scan_position: &ScanPosition, translation: &Translation) {
use std::f64;
let image_groups = self.image_groups(scan_position);
let stream = Stream::from_path(&translation.infile)
.sync_to_pps(self.sync_to_pps)
.open()
.unwrap();
let mut writer = las::Writer::from_path(&translation.outfile, self.las_header()).unwrap();
for point in stream {
let point = point.expect("could not read rxp point");
let socs = Point::socs(point.x, point.y, point.z);
let temperatures = image_groups
.iter()
.filter_map(|image_group| image_group.temperature(&socs))
.collect::<Vec<_>>();
let temperature = if temperatures.is_empty() {
if self.keep_without_thermal {
f64::NAN
} else {
continue;
}
} else {
temperatures.iter().sum::<f64>() / temperatures.len() as f64
};
let glcs = socs.to_prcs(scan_position.sop).to_glcs(self.project.pop);
let point = las::Point {
x: glcs.x,
y: glcs.y,
z: glcs.z,
intensity: self.to_intensity(point.reflectance),
color: Some(self.to_color(temperature as f32)),
gps_time: Some(temperature),
..Default::default()
};
writer.write(point).expect("could not write las point");
}
}
fn scan_positions(&self) -> Vec<&ScanPosition> {
let mut scan_positions: Vec<_> = if let Some(names) = self.scan_position_names.as_ref() {
names
.iter()
.map(|name| self.project.scan_positions.get(name).unwrap())
.collect()
} else {
self.project.scan_positions.values().collect()
};
scan_positions.sort_by_key(|s| &s.name);
scan_positions
}
fn to_color(&self, n: f32) -> Color {
let color = self.temperature_gradient.get(n);
Color {
red: (u16::MAX as f32 * color.red) as u16,
green: (u16::MAX as f32 * color.green) as u16,
blue: (u16::MAX as f32 * color.blue) as u16,
}
}
fn to_intensity(&self, n: f32) -> u16 {
(u16::MAX as f32 * (n - self.min_reflectance) /
(self.max_reflectance - self.min_reflectance)) as u16
}
fn las_header(&self) -> las::Header {
let mut header = las::Header::default();
header.point_format = Format::new(3).unwrap();
header.transforms = las::Vector {
x: las::Transform {
scale: 0.001,
offset: self.project.pop[(0, 3)],
},
y: las::Transform {
scale: 0.001,
offset: self.project.pop[(1, 3)],
},
z: las::Transform {
scale: 0.001,
offset: self.project.pop[(2, 3)],
},
};
header
}
fn image_groups<'a>(&'a self, scan_position: &'a ScanPosition) -> Vec<ImageGroup<'a>> {
let mut image_dir = self.image_dir.clone();
image_dir.push(&scan_position.name);
match fs::read_dir(image_dir) {
Ok(read_dir) => {
read_dir
.enumerate()
.filter_map(|(i, entry)| {
let entry = entry.unwrap();
if entry.path().extension().map(|e| e == "irb").unwrap_or(
false,
)
{
let image = if let Some(name) = self.name_map(scan_position) {
let image_name = format!("{} - Image{:03}", name, i + 1);
scan_position.images.get(&image_name).expect(&format!(
"Could not find image {}",
image_name
))
} else {
scan_position.image_from_path(entry.path()).unwrap()
};
let irb = Irb::from_path(entry.path().to_string_lossy().as_ref())
.unwrap();
let camera_calibration =
image.camera_calibration(&self.project).unwrap();
let mount_calibration = image.mount_calibration(&self.project).unwrap();
Some(ImageGroup {
camera_calibration: camera_calibration,
image: image,
irb: irb,
irb_path: entry.path(),
mount_calibration: mount_calibration,
rotate: self.rotate,
})
} else {
None
}
})
.collect() | match err.kind() {
ErrorKind::NotFound => Vec::new(),
_ => panic!("io error: {}", err),
}
}
}
}
fn outfile<P: AsRef<Path>>(&self, scan_position: &ScanPosition, infile: P) -> PathBuf {
let mut outfile = self.las_dir.clone();
if self.use_scanpos_names {
outfile.push(Path::new(&scan_position.name).with_extension("las"));
} else {
outfile.push(infile.as_ref().with_extension("las").file_name().unwrap());
}
outfile
}
fn name_map(&self, scan_position: &ScanPosition) -> Option<&str> {
self.name_map
.maps
.iter()
.find(|map| map.from == scan_position.name)
.map(|map| map.to.as_str())
}
}
impl fmt::Display for Config {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Configuration:")?;
writeln!(f, " - project: {}", self.project.path.display())?;
writeln!(f, " - image dir: {}", self.image_dir.display())?;
writeln!(f, " - las dir: {}", self.las_dir.display())?;
writeln!(f, " - scan positions:")?;
for scan_position in self.scan_positions() {
writeln!(f, " - name: {}", scan_position.name)?;
let image_groups = self.image_groups(scan_position);
if image_groups.is_empty() {
writeln!(f, " - no images for this scan position")?;
} else {
writeln!(f, " - images:")?;
for image_group in image_groups {
writeln!(f, " - {}", image_group.irb_path.display())?;
}
}
}
Ok(())
}
}
impl<'a> ImageGroup<'a> {
fn temperature(&self, socs: &Point<Socs>) -> Option<f64> {
let cmcs = socs.to_cmcs(self.image.cop, self.mount_calibration);
self.camera_calibration.cmcs_to_ics(&cmcs).map(|(mut u,
mut v)| {
if self.rotate {
let new_u = self.camera_calibration.height as f64 - v;
v = u;
u = new_u;
}
self.irb
.temperature(u.trunc() as i32, v.trunc() as i32)
.expect("error when retrieving temperature") - 273.15
})
}
} | }
Err(err) => {
use std::io::ErrorKind; | random_line_split |
main.rs | #[macro_use]
extern crate clap;
extern crate irb;
extern crate las;
extern crate palette;
extern crate riscan_pro;
extern crate scanifc;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate text_io;
extern crate toml;
use clap::{App, ArgMatches};
use irb::Irb;
use las::Color;
use las::point::Format;
use palette::{Gradient, Rgb};
use riscan_pro::{CameraCalibration, MountCalibration, Point, Project, ScanPosition, Socs};
use riscan_pro::scan_position::Image;
use scanifc::point3d::Stream;
use std::fmt;
use std::fs;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::u16;
fn main() {
let yaml = load_yaml!("cli.yml");
let matches = App::from_yaml(yaml).get_matches();
print!("Configuring...");
std::io::stdout().flush().unwrap();
let config = Config::new(&matches);
println!("done.");
println!("{}", config);
loop {
print!("Continue? (y/n) ");
std::io::stdout().flush().unwrap();
let answer: String = read!();
println!();
match answer.to_lowercase().as_str() {
"y" => break,
"n" => return,
_ => println!("Unknown response: {}", answer),
}
}
for scan_position in config.scan_positions() {
println!("Colorizing {}:", scan_position.name);
let translations = config.translations(scan_position);
if translations.is_empty() {
println!(" - No translations found");
} else {
for translation in translations {
println!(
" - Translation:\n - Infile: {}\n - Outfile: {}",
translation.infile.display(),
translation.outfile.display()
);
config.colorize(scan_position, &translation);
}
}
}
println!("Complete!");
}
struct Config {
image_dir: PathBuf,
keep_without_thermal: bool,
las_dir: PathBuf,
max_reflectance: f32,
min_reflectance: f32,
project: Project,
rotate: bool,
scan_position_names: Option<Vec<String>>,
sync_to_pps: bool,
temperature_gradient: Gradient<Rgb>,
use_scanpos_names: bool,
name_map: NameMap,
}
struct ImageGroup<'a> {
camera_calibration: &'a CameraCalibration,
image: &'a Image,
irb: Irb,
irb_path: PathBuf,
mount_calibration: &'a MountCalibration,
rotate: bool,
}
struct Translation {
infile: PathBuf,
outfile: PathBuf,
}
#[derive(Debug, Default, Deserialize)]
struct NameMap {
maps: Vec<FromTo>,
}
#[derive(Debug, Default, Deserialize)]
struct FromTo {
from: String,
to: String,
}
impl Config {
fn new(matches: &ArgMatches) -> Config {
use std::fs::File;
use std::io::Read;
use toml;
let project = Project::from_path(matches.value_of("PROJECT").unwrap()).unwrap();
let image_dir = PathBuf::from(matches.value_of("IMAGE_DIR").unwrap());
let las_dir = Path::new(matches.value_of("LAS_DIR").unwrap()).to_path_buf();
let min_reflectance = value_t!(matches, "min-reflectance", f32).unwrap();
let max_reflectance = value_t!(matches, "max-reflectance", f32).unwrap();
let min_temperature = value_t!(matches, "min-temperature", f32).unwrap();
let max_temperature = value_t!(matches, "max-temperature", f32).unwrap();
let min_temperature_color = Rgb::new(0.0, 0., 1.0);
let max_temperature_color = Rgb::new(1.0, 0., 0.);
let temperature_gradient = Gradient::with_domain(vec![
(min_temperature, min_temperature_color),
(max_temperature, max_temperature_color),
]);
let name_map = if let Some(name_map) = matches.value_of("name-map") {
let mut s = String::new();
File::open(name_map)
.unwrap()
.read_to_string(&mut s)
.unwrap();
toml::from_str(&s).unwrap()
} else {
NameMap::default()
};
Config {
image_dir: image_dir,
keep_without_thermal: matches.is_present("keep-without-thermal"),
las_dir: las_dir,
max_reflectance: max_reflectance,
min_reflectance: min_reflectance,
project: project,
rotate: matches.is_present("rotate"),
scan_position_names: matches.values_of("scan-position").map(|values| {
values.map(|name| name.to_string()).collect()
}),
sync_to_pps: matches.is_present("sync-to-pps"),
temperature_gradient: temperature_gradient,
use_scanpos_names: matches.is_present("use-scanpos-names"),
name_map: name_map,
}
}
fn translations(&self, scan_position: &ScanPosition) -> Vec<Translation> {
let paths = scan_position.singlescan_rxp_paths(&self.project);
if self.use_scanpos_names && paths.len() > 1 {
panic!(
"--use-scanpos-names was provided, but there are {} rxp files for scan position {}",
paths.len(),
scan_position.name
);
}
paths
.into_iter()
.map(|path| {
Translation {
outfile: self.outfile(scan_position, &path),
infile: path,
}
})
.collect()
}
fn colorize(&self, scan_position: &ScanPosition, translation: &Translation) {
use std::f64;
let image_groups = self.image_groups(scan_position);
let stream = Stream::from_path(&translation.infile)
.sync_to_pps(self.sync_to_pps)
.open()
.unwrap();
let mut writer = las::Writer::from_path(&translation.outfile, self.las_header()).unwrap();
for point in stream {
let point = point.expect("could not read rxp point");
let socs = Point::socs(point.x, point.y, point.z);
let temperatures = image_groups
.iter()
.filter_map(|image_group| image_group.temperature(&socs))
.collect::<Vec<_>>();
let temperature = if temperatures.is_empty() {
if self.keep_without_thermal {
f64::NAN
} else {
continue;
}
} else {
temperatures.iter().sum::<f64>() / temperatures.len() as f64
};
let glcs = socs.to_prcs(scan_position.sop).to_glcs(self.project.pop);
let point = las::Point {
x: glcs.x,
y: glcs.y,
z: glcs.z,
intensity: self.to_intensity(point.reflectance),
color: Some(self.to_color(temperature as f32)),
gps_time: Some(temperature),
..Default::default()
};
writer.write(point).expect("could not write las point");
}
}
fn scan_positions(&self) -> Vec<&ScanPosition> {
let mut scan_positions: Vec<_> = if let Some(names) = self.scan_position_names.as_ref() {
names
.iter()
.map(|name| self.project.scan_positions.get(name).unwrap())
.collect()
} else {
self.project.scan_positions.values().collect()
};
scan_positions.sort_by_key(|s| &s.name);
scan_positions
}
fn to_color(&self, n: f32) -> Color {
let color = self.temperature_gradient.get(n);
Color {
red: (u16::MAX as f32 * color.red) as u16,
green: (u16::MAX as f32 * color.green) as u16,
blue: (u16::MAX as f32 * color.blue) as u16,
}
}
fn to_intensity(&self, n: f32) -> u16 {
(u16::MAX as f32 * (n - self.min_reflectance) /
(self.max_reflectance - self.min_reflectance)) as u16
}
fn las_header(&self) -> las::Header {
let mut header = las::Header::default();
header.point_format = Format::new(3).unwrap();
header.transforms = las::Vector {
x: las::Transform {
scale: 0.001,
offset: self.project.pop[(0, 3)],
},
y: las::Transform {
scale: 0.001,
offset: self.project.pop[(1, 3)],
},
z: las::Transform {
scale: 0.001,
offset: self.project.pop[(2, 3)],
},
};
header
}
fn image_groups<'a>(&'a self, scan_position: &'a ScanPosition) -> Vec<ImageGroup<'a>> {
let mut image_dir = self.image_dir.clone();
image_dir.push(&scan_position.name);
match fs::read_dir(image_dir) {
Ok(read_dir) => {
read_dir
.enumerate()
.filter_map(|(i, entry)| {
let entry = entry.unwrap();
if entry.path().extension().map(|e| e == "irb").unwrap_or(
false,
)
{
let image = if let Some(name) = self.name_map(scan_position) {
let image_name = format!("{} - Image{:03}", name, i + 1);
scan_position.images.get(&image_name).expect(&format!(
"Could not find image {}",
image_name
))
} else {
scan_position.image_from_path(entry.path()).unwrap()
};
let irb = Irb::from_path(entry.path().to_string_lossy().as_ref())
.unwrap();
let camera_calibration =
image.camera_calibration(&self.project).unwrap();
let mount_calibration = image.mount_calibration(&self.project).unwrap();
Some(ImageGroup {
camera_calibration: camera_calibration,
image: image,
irb: irb,
irb_path: entry.path(),
mount_calibration: mount_calibration,
rotate: self.rotate,
})
} else {
None
}
})
.collect()
}
Err(err) => {
use std::io::ErrorKind;
match err.kind() {
ErrorKind::NotFound => Vec::new(),
_ => panic!("io error: {}", err),
}
}
}
}
fn outfile<P: AsRef<Path>>(&self, scan_position: &ScanPosition, infile: P) -> PathBuf {
let mut outfile = self.las_dir.clone();
if self.use_scanpos_names {
outfile.push(Path::new(&scan_position.name).with_extension("las"));
} else {
outfile.push(infile.as_ref().with_extension("las").file_name().unwrap());
}
outfile
}
fn name_map(&self, scan_position: &ScanPosition) -> Option<&str> {
self.name_map
.maps
.iter()
.find(|map| map.from == scan_position.name)
.map(|map| map.to.as_str())
}
}
impl fmt::Display for Config {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Configuration:")?;
writeln!(f, " - project: {}", self.project.path.display())?;
writeln!(f, " - image dir: {}", self.image_dir.display())?;
writeln!(f, " - las dir: {}", self.las_dir.display())?;
writeln!(f, " - scan positions:")?;
for scan_position in self.scan_positions() {
writeln!(f, " - name: {}", scan_position.name)?;
let image_groups = self.image_groups(scan_position);
if image_groups.is_empty() {
writeln!(f, " - no images for this scan position")?;
} else {
writeln!(f, " - images:")?;
for image_group in image_groups {
writeln!(f, " - {}", image_group.irb_path.display())?;
}
}
}
Ok(())
}
}
impl<'a> ImageGroup<'a> {
fn | (&self, socs: &Point<Socs>) -> Option<f64> {
let cmcs = socs.to_cmcs(self.image.cop, self.mount_calibration);
self.camera_calibration.cmcs_to_ics(&cmcs).map(|(mut u,
mut v)| {
if self.rotate {
let new_u = self.camera_calibration.height as f64 - v;
v = u;
u = new_u;
}
self.irb
.temperature(u.trunc() as i32, v.trunc() as i32)
.expect("error when retrieving temperature") - 273.15
})
}
}
| temperature | identifier_name |
main.rs | #[macro_use]
extern crate clap;
extern crate irb;
extern crate las;
extern crate palette;
extern crate riscan_pro;
extern crate scanifc;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate text_io;
extern crate toml;
use clap::{App, ArgMatches};
use irb::Irb;
use las::Color;
use las::point::Format;
use palette::{Gradient, Rgb};
use riscan_pro::{CameraCalibration, MountCalibration, Point, Project, ScanPosition, Socs};
use riscan_pro::scan_position::Image;
use scanifc::point3d::Stream;
use std::fmt;
use std::fs;
use std::io::Write;
use std::path::{Path, PathBuf};
use std::u16;
fn main() {
let yaml = load_yaml!("cli.yml");
let matches = App::from_yaml(yaml).get_matches();
print!("Configuring...");
std::io::stdout().flush().unwrap();
let config = Config::new(&matches);
println!("done.");
println!("{}", config);
loop {
print!("Continue? (y/n) ");
std::io::stdout().flush().unwrap();
let answer: String = read!();
println!();
match answer.to_lowercase().as_str() {
"y" => break,
"n" => return,
_ => println!("Unknown response: {}", answer),
}
}
for scan_position in config.scan_positions() {
println!("Colorizing {}:", scan_position.name);
let translations = config.translations(scan_position);
if translations.is_empty() {
println!(" - No translations found");
} else {
for translation in translations {
println!(
" - Translation:\n - Infile: {}\n - Outfile: {}",
translation.infile.display(),
translation.outfile.display()
);
config.colorize(scan_position, &translation);
}
}
}
println!("Complete!");
}
struct Config {
image_dir: PathBuf,
keep_without_thermal: bool,
las_dir: PathBuf,
max_reflectance: f32,
min_reflectance: f32,
project: Project,
rotate: bool,
scan_position_names: Option<Vec<String>>,
sync_to_pps: bool,
temperature_gradient: Gradient<Rgb>,
use_scanpos_names: bool,
name_map: NameMap,
}
struct ImageGroup<'a> {
camera_calibration: &'a CameraCalibration,
image: &'a Image,
irb: Irb,
irb_path: PathBuf,
mount_calibration: &'a MountCalibration,
rotate: bool,
}
struct Translation {
infile: PathBuf,
outfile: PathBuf,
}
#[derive(Debug, Default, Deserialize)]
struct NameMap {
maps: Vec<FromTo>,
}
#[derive(Debug, Default, Deserialize)]
struct FromTo {
from: String,
to: String,
}
impl Config {
fn new(matches: &ArgMatches) -> Config {
use std::fs::File;
use std::io::Read;
use toml;
let project = Project::from_path(matches.value_of("PROJECT").unwrap()).unwrap();
let image_dir = PathBuf::from(matches.value_of("IMAGE_DIR").unwrap());
let las_dir = Path::new(matches.value_of("LAS_DIR").unwrap()).to_path_buf();
let min_reflectance = value_t!(matches, "min-reflectance", f32).unwrap();
let max_reflectance = value_t!(matches, "max-reflectance", f32).unwrap();
let min_temperature = value_t!(matches, "min-temperature", f32).unwrap();
let max_temperature = value_t!(matches, "max-temperature", f32).unwrap();
let min_temperature_color = Rgb::new(0.0, 0., 1.0);
let max_temperature_color = Rgb::new(1.0, 0., 0.);
let temperature_gradient = Gradient::with_domain(vec![
(min_temperature, min_temperature_color),
(max_temperature, max_temperature_color),
]);
let name_map = if let Some(name_map) = matches.value_of("name-map") {
let mut s = String::new();
File::open(name_map)
.unwrap()
.read_to_string(&mut s)
.unwrap();
toml::from_str(&s).unwrap()
} else {
NameMap::default()
};
Config {
image_dir: image_dir,
keep_without_thermal: matches.is_present("keep-without-thermal"),
las_dir: las_dir,
max_reflectance: max_reflectance,
min_reflectance: min_reflectance,
project: project,
rotate: matches.is_present("rotate"),
scan_position_names: matches.values_of("scan-position").map(|values| {
values.map(|name| name.to_string()).collect()
}),
sync_to_pps: matches.is_present("sync-to-pps"),
temperature_gradient: temperature_gradient,
use_scanpos_names: matches.is_present("use-scanpos-names"),
name_map: name_map,
}
}
fn translations(&self, scan_position: &ScanPosition) -> Vec<Translation> {
let paths = scan_position.singlescan_rxp_paths(&self.project);
if self.use_scanpos_names && paths.len() > 1 {
panic!(
"--use-scanpos-names was provided, but there are {} rxp files for scan position {}",
paths.len(),
scan_position.name
);
}
paths
.into_iter()
.map(|path| {
Translation {
outfile: self.outfile(scan_position, &path),
infile: path,
}
})
.collect()
}
fn colorize(&self, scan_position: &ScanPosition, translation: &Translation) {
use std::f64;
let image_groups = self.image_groups(scan_position);
let stream = Stream::from_path(&translation.infile)
.sync_to_pps(self.sync_to_pps)
.open()
.unwrap();
let mut writer = las::Writer::from_path(&translation.outfile, self.las_header()).unwrap();
for point in stream {
let point = point.expect("could not read rxp point");
let socs = Point::socs(point.x, point.y, point.z);
let temperatures = image_groups
.iter()
.filter_map(|image_group| image_group.temperature(&socs))
.collect::<Vec<_>>();
let temperature = if temperatures.is_empty() {
if self.keep_without_thermal {
f64::NAN
} else {
continue;
}
} else {
temperatures.iter().sum::<f64>() / temperatures.len() as f64
};
let glcs = socs.to_prcs(scan_position.sop).to_glcs(self.project.pop);
let point = las::Point {
x: glcs.x,
y: glcs.y,
z: glcs.z,
intensity: self.to_intensity(point.reflectance),
color: Some(self.to_color(temperature as f32)),
gps_time: Some(temperature),
..Default::default()
};
writer.write(point).expect("could not write las point");
}
}
fn scan_positions(&self) -> Vec<&ScanPosition> {
let mut scan_positions: Vec<_> = if let Some(names) = self.scan_position_names.as_ref() {
names
.iter()
.map(|name| self.project.scan_positions.get(name).unwrap())
.collect()
} else {
self.project.scan_positions.values().collect()
};
scan_positions.sort_by_key(|s| &s.name);
scan_positions
}
fn to_color(&self, n: f32) -> Color {
let color = self.temperature_gradient.get(n);
Color {
red: (u16::MAX as f32 * color.red) as u16,
green: (u16::MAX as f32 * color.green) as u16,
blue: (u16::MAX as f32 * color.blue) as u16,
}
}
fn to_intensity(&self, n: f32) -> u16 {
(u16::MAX as f32 * (n - self.min_reflectance) /
(self.max_reflectance - self.min_reflectance)) as u16
}
fn las_header(&self) -> las::Header {
let mut header = las::Header::default();
header.point_format = Format::new(3).unwrap();
header.transforms = las::Vector {
x: las::Transform {
scale: 0.001,
offset: self.project.pop[(0, 3)],
},
y: las::Transform {
scale: 0.001,
offset: self.project.pop[(1, 3)],
},
z: las::Transform {
scale: 0.001,
offset: self.project.pop[(2, 3)],
},
};
header
}
fn image_groups<'a>(&'a self, scan_position: &'a ScanPosition) -> Vec<ImageGroup<'a>> {
let mut image_dir = self.image_dir.clone();
image_dir.push(&scan_position.name);
match fs::read_dir(image_dir) {
Ok(read_dir) => {
read_dir
.enumerate()
.filter_map(|(i, entry)| {
let entry = entry.unwrap();
if entry.path().extension().map(|e| e == "irb").unwrap_or(
false,
)
{
let image = if let Some(name) = self.name_map(scan_position) {
let image_name = format!("{} - Image{:03}", name, i + 1);
scan_position.images.get(&image_name).expect(&format!(
"Could not find image {}",
image_name
))
} else {
scan_position.image_from_path(entry.path()).unwrap()
};
let irb = Irb::from_path(entry.path().to_string_lossy().as_ref())
.unwrap();
let camera_calibration =
image.camera_calibration(&self.project).unwrap();
let mount_calibration = image.mount_calibration(&self.project).unwrap();
Some(ImageGroup {
camera_calibration: camera_calibration,
image: image,
irb: irb,
irb_path: entry.path(),
mount_calibration: mount_calibration,
rotate: self.rotate,
})
} else |
})
.collect()
}
Err(err) => {
use std::io::ErrorKind;
match err.kind() {
ErrorKind::NotFound => Vec::new(),
_ => panic!("io error: {}", err),
}
}
}
}
fn outfile<P: AsRef<Path>>(&self, scan_position: &ScanPosition, infile: P) -> PathBuf {
let mut outfile = self.las_dir.clone();
if self.use_scanpos_names {
outfile.push(Path::new(&scan_position.name).with_extension("las"));
} else {
outfile.push(infile.as_ref().with_extension("las").file_name().unwrap());
}
outfile
}
fn name_map(&self, scan_position: &ScanPosition) -> Option<&str> {
self.name_map
.maps
.iter()
.find(|map| map.from == scan_position.name)
.map(|map| map.to.as_str())
}
}
impl fmt::Display for Config {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Configuration:")?;
writeln!(f, " - project: {}", self.project.path.display())?;
writeln!(f, " - image dir: {}", self.image_dir.display())?;
writeln!(f, " - las dir: {}", self.las_dir.display())?;
writeln!(f, " - scan positions:")?;
for scan_position in self.scan_positions() {
writeln!(f, " - name: {}", scan_position.name)?;
let image_groups = self.image_groups(scan_position);
if image_groups.is_empty() {
writeln!(f, " - no images for this scan position")?;
} else {
writeln!(f, " - images:")?;
for image_group in image_groups {
writeln!(f, " - {}", image_group.irb_path.display())?;
}
}
}
Ok(())
}
}
impl<'a> ImageGroup<'a> {
fn temperature(&self, socs: &Point<Socs>) -> Option<f64> {
let cmcs = socs.to_cmcs(self.image.cop, self.mount_calibration);
self.camera_calibration.cmcs_to_ics(&cmcs).map(|(mut u,
mut v)| {
if self.rotate {
let new_u = self.camera_calibration.height as f64 - v;
v = u;
u = new_u;
}
self.irb
.temperature(u.trunc() as i32, v.trunc() as i32)
.expect("error when retrieving temperature") - 273.15
})
}
}
| {
None
} | conditional_block |
map_output_tracker.rs | use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::Arc;
use std::time::Duration;
use crate::serialized_data_capnp::serialized_data;
use crate::{Error, NetworkError, Result};
use capnp::message::{Builder as MsgBuilder, ReaderOptions};
use capnp_futures::serialize as capnp_serialize;
use dashmap::{DashMap, DashSet};
use parking_lot::Mutex;
use thiserror::Error;
use tokio::{
net::{TcpListener, TcpStream},
stream::StreamExt,
};
use tokio_util::compat::{Tokio02AsyncReadCompatExt, Tokio02AsyncWriteCompatExt};
const CAPNP_BUF_READ_OPTS: ReaderOptions = ReaderOptions {
traversal_limit_in_words: std::u64::MAX,
nesting_limit: 64,
};
pub(crate) enum MapOutputTrackerMessage {
// Contains shuffle_id
GetMapOutputLocations(i64),
StopMapOutputTracker,
}
/// The key is the shuffle_id
pub type ServerUris = Arc<DashMap<usize, Vec<Option<String>>>>;
// Starts the server in master node and client in slave nodes. Similar to cache tracker.
#[derive(Clone, Debug)]
pub(crate) struct MapOutputTracker {
is_master: bool,
pub server_uris: ServerUris,
fetching: Arc<DashSet<usize>>,
generation: Arc<Mutex<i64>>,
master_addr: SocketAddr,
}
// Only master_addr doesn't have a default.
impl Default for MapOutputTracker {
fn default() -> Self {
MapOutputTracker {
is_master: Default::default(),
server_uris: Default::default(),
fetching: Default::default(),
generation: Default::default(),
master_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0),
}
}
}
impl MapOutputTracker {
pub fn new(is_master: bool, master_addr: SocketAddr) -> Self {
let output_tracker = MapOutputTracker {
is_master,
server_uris: Arc::new(DashMap::new()),
fetching: Arc::new(DashSet::new()),
generation: Arc::new(Mutex::new(0)),
master_addr,
};
output_tracker.server();
output_tracker
}
async fn client(&self, shuffle_id: usize) -> Result<Vec<String>> {
let mut stream = loop {
match TcpStream::connect(self.master_addr).await {
Ok(stream) => break stream,
Err(_) => continue,
}
};
let (reader, writer) = stream.split();
let reader = reader.compat();
let mut writer = writer.compat_write();
log::debug!(
"connected to master to fetch shuffle task #{} data hosts",
shuffle_id
);
let shuffle_id_bytes = bincode::serialize(&shuffle_id)?;
let mut message = MsgBuilder::new_default();
let mut shuffle_data = message.init_root::<serialized_data::Builder>();
shuffle_data.set_msg(&shuffle_id_bytes);
capnp_serialize::write_message(&mut writer, &message).await?;
let message_reader = capnp_serialize::read_message(reader, CAPNP_BUF_READ_OPTS)
.await?
.ok_or_else(|| NetworkError::NoMessageReceived)?;
let shuffle_data = message_reader.get_root::<serialized_data::Reader>()?;
let locs: Vec<String> = bincode::deserialize(&shuffle_data.get_msg()?)?;
Ok(locs)
}
fn server(&self) {
if!self.is_master {
return;
}
log::debug!("map output tracker server starting");
let master_addr = self.master_addr;
let server_uris = self.server_uris.clone();
tokio::spawn(async move {
let mut listener = TcpListener::bind(master_addr)
.await
.map_err(NetworkError::TcpListener)?;
log::debug!("map output tracker server started");
while let Some(Ok(mut stream)) = listener.incoming().next().await {
let server_uris_clone = server_uris.clone();
tokio::spawn(async move {
let (reader, writer) = stream.split();
let reader = reader.compat();
let writer = writer.compat_write();
// reading
let message_reader = capnp_serialize::read_message(reader, CAPNP_BUF_READ_OPTS)
.await?
.ok_or_else(|| NetworkError::NoMessageReceived)?;
let shuffle_id = {
let data = message_reader.get_root::<serialized_data::Reader>()?;
bincode::deserialize(data.get_msg()?)?
};
while server_uris_clone
.get(&shuffle_id)
.ok_or_else(|| MapOutputError::ShuffleIdNotFound(shuffle_id))?
.iter()
.filter(|x|!x.is_none())
.count()
== 0
{
//check whether this will hurt the performance or not
tokio::time::delay_for(Duration::from_millis(1)).await;
}
let locs = server_uris_clone
.get(&shuffle_id)
.map(|kv| {
kv.value()
.iter()
.cloned()
.map(|x| x.unwrap())
.collect::<Vec<_>>()
})
.unwrap_or_default();
log::debug!(
"locs inside map output tracker server for shuffle id #{}: {:?}",
shuffle_id,
locs
);
// writting response
let result = bincode::serialize(&locs)?;
let mut message = MsgBuilder::new_default();
let mut locs_data = message.init_root::<serialized_data::Builder>();
locs_data.set_msg(&result);
// TODO: remove blocking call when possible
futures::executor::block_on(async {
capnp_futures::serialize::write_message(writer, message)
.await
.map_err(Error::CapnpDeserialization)?;
Ok::<_, Error>(())
})?;
Ok::<_, Error>(())
});
}
Err::<(), _>(Error::ExecutorShutdown)
});
}
pub fn register_shuffle(&self, shuffle_id: usize, num_maps: usize) {
log::debug!("inside register shuffle");
if self.server_uris.get(&shuffle_id).is_some() {
// TODO: error handling
log::debug!("map tracker register shuffle none");
return;
}
self.server_uris.insert(shuffle_id, vec![None; num_maps]);
log::debug!("server_uris after register_shuffle {:?}", self.server_uris);
}
pub fn register_map_output(&self, shuffle_id: usize, map_id: usize, server_uri: String) {
log::debug!(
"registering map output from shuffle task #{} with map id #{} at server: {}",
shuffle_id,
map_id,
server_uri
);
self.server_uris.get_mut(&shuffle_id).unwrap()[map_id] = Some(server_uri);
}
pub fn register_map_outputs(&self, shuffle_id: usize, locs: Vec<Option<String>>) {
log::debug!(
"registering map outputs inside map output tracker for shuffle id #{}: {:?}",
shuffle_id,
locs
);
self.server_uris.insert(shuffle_id, locs);
}
pub fn unregister_map_output(&self, shuffle_id: usize, map_id: usize, server_uri: String) {
let array = self.server_uris.get(&shuffle_id);
if let Some(arr) = array {
if arr.get(map_id).unwrap() == &Some(server_uri) {
self.server_uris
.get_mut(&shuffle_id)
.unwrap()
.insert(map_id, None)
}
self.increment_generation();
} else {
// TODO: error logging
}
}
pub async fn get_server_uris(&self, shuffle_id: usize) -> Result<Vec<String>> {
log::debug!(
"trying to get uri for shuffle task #{}, current server uris: {:?}",
shuffle_id,
self.server_uris
);
if self
.server_uris
.get(&shuffle_id)
.map(|some| some.iter().filter_map(|x| x.clone()).next())
.flatten()
.is_none()
{
if self.fetching.contains(&shuffle_id) {
while self.fetching.contains(&shuffle_id) {
// TODO: check whether this will hurt the performance or not
tokio::time::delay_for(Duration::from_millis(1)).await;
}
let servers = self
.server_uris
.get(&shuffle_id)
.ok_or_else(|| MapOutputError::ShuffleIdNotFound(shuffle_id))?
.iter()
.filter(|x|!x.is_none())
.map(|x| x.clone().unwrap())
.collect::<Vec<_>>();
log::debug!("returning after fetching done, return: {:?}", servers);
return Ok(servers);
} else {
log::debug!("adding to fetching queue");
self.fetching.insert(shuffle_id);
}
let fetched = self.client(shuffle_id).await?;
log::debug!("fetched locs from client: {:?}", fetched);
self.server_uris.insert(
shuffle_id,
fetched.iter().map(|x| Some(x.clone())).collect(),
);
log::debug!("added locs to server uris after fetching");
self.fetching.remove(&shuffle_id);
Ok(fetched)
} else {
Ok(self
.server_uris
.get(&shuffle_id)
.ok_or_else(|| MapOutputError::ShuffleIdNotFound(shuffle_id))?
.iter()
.filter(|x|!x.is_none())
.map(|x| x.clone().unwrap())
.collect())
}
}
pub fn increment_generation(&self) {
*self.generation.lock() += 1;
}
pub fn get_generation(&self) -> i64 {
*self.generation.lock()
} | pub fn update_generation(&mut self, new_gen: i64) {
if new_gen > *self.generation.lock() {
self.server_uris = Arc::new(DashMap::new());
*self.generation.lock() = new_gen;
}
}
}
#[derive(Debug, Error)]
pub enum MapOutputError {
#[error("Shuffle id output #{0} not found in the map")]
ShuffleIdNotFound(usize),
} | random_line_split |
|
map_output_tracker.rs | use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::Arc;
use std::time::Duration;
use crate::serialized_data_capnp::serialized_data;
use crate::{Error, NetworkError, Result};
use capnp::message::{Builder as MsgBuilder, ReaderOptions};
use capnp_futures::serialize as capnp_serialize;
use dashmap::{DashMap, DashSet};
use parking_lot::Mutex;
use thiserror::Error;
use tokio::{
net::{TcpListener, TcpStream},
stream::StreamExt,
};
use tokio_util::compat::{Tokio02AsyncReadCompatExt, Tokio02AsyncWriteCompatExt};
const CAPNP_BUF_READ_OPTS: ReaderOptions = ReaderOptions {
traversal_limit_in_words: std::u64::MAX,
nesting_limit: 64,
};
pub(crate) enum MapOutputTrackerMessage {
// Contains shuffle_id
GetMapOutputLocations(i64),
StopMapOutputTracker,
}
/// The key is the shuffle_id
pub type ServerUris = Arc<DashMap<usize, Vec<Option<String>>>>;
// Starts the server in master node and client in slave nodes. Similar to cache tracker.
#[derive(Clone, Debug)]
pub(crate) struct MapOutputTracker {
is_master: bool,
pub server_uris: ServerUris,
fetching: Arc<DashSet<usize>>,
generation: Arc<Mutex<i64>>,
master_addr: SocketAddr,
}
// Only master_addr doesn't have a default.
impl Default for MapOutputTracker {
fn default() -> Self {
MapOutputTracker {
is_master: Default::default(),
server_uris: Default::default(),
fetching: Default::default(),
generation: Default::default(),
master_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0),
}
}
}
impl MapOutputTracker {
pub fn | (is_master: bool, master_addr: SocketAddr) -> Self {
let output_tracker = MapOutputTracker {
is_master,
server_uris: Arc::new(DashMap::new()),
fetching: Arc::new(DashSet::new()),
generation: Arc::new(Mutex::new(0)),
master_addr,
};
output_tracker.server();
output_tracker
}
async fn client(&self, shuffle_id: usize) -> Result<Vec<String>> {
let mut stream = loop {
match TcpStream::connect(self.master_addr).await {
Ok(stream) => break stream,
Err(_) => continue,
}
};
let (reader, writer) = stream.split();
let reader = reader.compat();
let mut writer = writer.compat_write();
log::debug!(
"connected to master to fetch shuffle task #{} data hosts",
shuffle_id
);
let shuffle_id_bytes = bincode::serialize(&shuffle_id)?;
let mut message = MsgBuilder::new_default();
let mut shuffle_data = message.init_root::<serialized_data::Builder>();
shuffle_data.set_msg(&shuffle_id_bytes);
capnp_serialize::write_message(&mut writer, &message).await?;
let message_reader = capnp_serialize::read_message(reader, CAPNP_BUF_READ_OPTS)
.await?
.ok_or_else(|| NetworkError::NoMessageReceived)?;
let shuffle_data = message_reader.get_root::<serialized_data::Reader>()?;
let locs: Vec<String> = bincode::deserialize(&shuffle_data.get_msg()?)?;
Ok(locs)
}
fn server(&self) {
if!self.is_master {
return;
}
log::debug!("map output tracker server starting");
let master_addr = self.master_addr;
let server_uris = self.server_uris.clone();
tokio::spawn(async move {
let mut listener = TcpListener::bind(master_addr)
.await
.map_err(NetworkError::TcpListener)?;
log::debug!("map output tracker server started");
while let Some(Ok(mut stream)) = listener.incoming().next().await {
let server_uris_clone = server_uris.clone();
tokio::spawn(async move {
let (reader, writer) = stream.split();
let reader = reader.compat();
let writer = writer.compat_write();
// reading
let message_reader = capnp_serialize::read_message(reader, CAPNP_BUF_READ_OPTS)
.await?
.ok_or_else(|| NetworkError::NoMessageReceived)?;
let shuffle_id = {
let data = message_reader.get_root::<serialized_data::Reader>()?;
bincode::deserialize(data.get_msg()?)?
};
while server_uris_clone
.get(&shuffle_id)
.ok_or_else(|| MapOutputError::ShuffleIdNotFound(shuffle_id))?
.iter()
.filter(|x|!x.is_none())
.count()
== 0
{
//check whether this will hurt the performance or not
tokio::time::delay_for(Duration::from_millis(1)).await;
}
let locs = server_uris_clone
.get(&shuffle_id)
.map(|kv| {
kv.value()
.iter()
.cloned()
.map(|x| x.unwrap())
.collect::<Vec<_>>()
})
.unwrap_or_default();
log::debug!(
"locs inside map output tracker server for shuffle id #{}: {:?}",
shuffle_id,
locs
);
// writting response
let result = bincode::serialize(&locs)?;
let mut message = MsgBuilder::new_default();
let mut locs_data = message.init_root::<serialized_data::Builder>();
locs_data.set_msg(&result);
// TODO: remove blocking call when possible
futures::executor::block_on(async {
capnp_futures::serialize::write_message(writer, message)
.await
.map_err(Error::CapnpDeserialization)?;
Ok::<_, Error>(())
})?;
Ok::<_, Error>(())
});
}
Err::<(), _>(Error::ExecutorShutdown)
});
}
pub fn register_shuffle(&self, shuffle_id: usize, num_maps: usize) {
log::debug!("inside register shuffle");
if self.server_uris.get(&shuffle_id).is_some() {
// TODO: error handling
log::debug!("map tracker register shuffle none");
return;
}
self.server_uris.insert(shuffle_id, vec![None; num_maps]);
log::debug!("server_uris after register_shuffle {:?}", self.server_uris);
}
pub fn register_map_output(&self, shuffle_id: usize, map_id: usize, server_uri: String) {
log::debug!(
"registering map output from shuffle task #{} with map id #{} at server: {}",
shuffle_id,
map_id,
server_uri
);
self.server_uris.get_mut(&shuffle_id).unwrap()[map_id] = Some(server_uri);
}
pub fn register_map_outputs(&self, shuffle_id: usize, locs: Vec<Option<String>>) {
log::debug!(
"registering map outputs inside map output tracker for shuffle id #{}: {:?}",
shuffle_id,
locs
);
self.server_uris.insert(shuffle_id, locs);
}
pub fn unregister_map_output(&self, shuffle_id: usize, map_id: usize, server_uri: String) {
let array = self.server_uris.get(&shuffle_id);
if let Some(arr) = array {
if arr.get(map_id).unwrap() == &Some(server_uri) {
self.server_uris
.get_mut(&shuffle_id)
.unwrap()
.insert(map_id, None)
}
self.increment_generation();
} else {
// TODO: error logging
}
}
pub async fn get_server_uris(&self, shuffle_id: usize) -> Result<Vec<String>> {
log::debug!(
"trying to get uri for shuffle task #{}, current server uris: {:?}",
shuffle_id,
self.server_uris
);
if self
.server_uris
.get(&shuffle_id)
.map(|some| some.iter().filter_map(|x| x.clone()).next())
.flatten()
.is_none()
{
if self.fetching.contains(&shuffle_id) {
while self.fetching.contains(&shuffle_id) {
// TODO: check whether this will hurt the performance or not
tokio::time::delay_for(Duration::from_millis(1)).await;
}
let servers = self
.server_uris
.get(&shuffle_id)
.ok_or_else(|| MapOutputError::ShuffleIdNotFound(shuffle_id))?
.iter()
.filter(|x|!x.is_none())
.map(|x| x.clone().unwrap())
.collect::<Vec<_>>();
log::debug!("returning after fetching done, return: {:?}", servers);
return Ok(servers);
} else {
log::debug!("adding to fetching queue");
self.fetching.insert(shuffle_id);
}
let fetched = self.client(shuffle_id).await?;
log::debug!("fetched locs from client: {:?}", fetched);
self.server_uris.insert(
shuffle_id,
fetched.iter().map(|x| Some(x.clone())).collect(),
);
log::debug!("added locs to server uris after fetching");
self.fetching.remove(&shuffle_id);
Ok(fetched)
} else {
Ok(self
.server_uris
.get(&shuffle_id)
.ok_or_else(|| MapOutputError::ShuffleIdNotFound(shuffle_id))?
.iter()
.filter(|x|!x.is_none())
.map(|x| x.clone().unwrap())
.collect())
}
}
pub fn increment_generation(&self) {
*self.generation.lock() += 1;
}
pub fn get_generation(&self) -> i64 {
*self.generation.lock()
}
pub fn update_generation(&mut self, new_gen: i64) {
if new_gen > *self.generation.lock() {
self.server_uris = Arc::new(DashMap::new());
*self.generation.lock() = new_gen;
}
}
}
#[derive(Debug, Error)]
pub enum MapOutputError {
#[error("Shuffle id output #{0} not found in the map")]
ShuffleIdNotFound(usize),
}
| new | identifier_name |
map_output_tracker.rs | use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::Arc;
use std::time::Duration;
use crate::serialized_data_capnp::serialized_data;
use crate::{Error, NetworkError, Result};
use capnp::message::{Builder as MsgBuilder, ReaderOptions};
use capnp_futures::serialize as capnp_serialize;
use dashmap::{DashMap, DashSet};
use parking_lot::Mutex;
use thiserror::Error;
use tokio::{
net::{TcpListener, TcpStream},
stream::StreamExt,
};
use tokio_util::compat::{Tokio02AsyncReadCompatExt, Tokio02AsyncWriteCompatExt};
const CAPNP_BUF_READ_OPTS: ReaderOptions = ReaderOptions {
traversal_limit_in_words: std::u64::MAX,
nesting_limit: 64,
};
pub(crate) enum MapOutputTrackerMessage {
// Contains shuffle_id
GetMapOutputLocations(i64),
StopMapOutputTracker,
}
/// The key is the shuffle_id
pub type ServerUris = Arc<DashMap<usize, Vec<Option<String>>>>;
// Starts the server in master node and client in slave nodes. Similar to cache tracker.
#[derive(Clone, Debug)]
pub(crate) struct MapOutputTracker {
is_master: bool,
pub server_uris: ServerUris,
fetching: Arc<DashSet<usize>>,
generation: Arc<Mutex<i64>>,
master_addr: SocketAddr,
}
// Only master_addr doesn't have a default.
impl Default for MapOutputTracker {
fn default() -> Self {
MapOutputTracker {
is_master: Default::default(),
server_uris: Default::default(),
fetching: Default::default(),
generation: Default::default(),
master_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0),
}
}
}
impl MapOutputTracker {
pub fn new(is_master: bool, master_addr: SocketAddr) -> Self {
let output_tracker = MapOutputTracker {
is_master,
server_uris: Arc::new(DashMap::new()),
fetching: Arc::new(DashSet::new()),
generation: Arc::new(Mutex::new(0)),
master_addr,
};
output_tracker.server();
output_tracker
}
async fn client(&self, shuffle_id: usize) -> Result<Vec<String>> {
let mut stream = loop {
match TcpStream::connect(self.master_addr).await {
Ok(stream) => break stream,
Err(_) => continue,
}
};
let (reader, writer) = stream.split();
let reader = reader.compat();
let mut writer = writer.compat_write();
log::debug!(
"connected to master to fetch shuffle task #{} data hosts",
shuffle_id
);
let shuffle_id_bytes = bincode::serialize(&shuffle_id)?;
let mut message = MsgBuilder::new_default();
let mut shuffle_data = message.init_root::<serialized_data::Builder>();
shuffle_data.set_msg(&shuffle_id_bytes);
capnp_serialize::write_message(&mut writer, &message).await?;
let message_reader = capnp_serialize::read_message(reader, CAPNP_BUF_READ_OPTS)
.await?
.ok_or_else(|| NetworkError::NoMessageReceived)?;
let shuffle_data = message_reader.get_root::<serialized_data::Reader>()?;
let locs: Vec<String> = bincode::deserialize(&shuffle_data.get_msg()?)?;
Ok(locs)
}
fn server(&self) {
if!self.is_master {
return;
}
log::debug!("map output tracker server starting");
let master_addr = self.master_addr;
let server_uris = self.server_uris.clone();
tokio::spawn(async move {
let mut listener = TcpListener::bind(master_addr)
.await
.map_err(NetworkError::TcpListener)?;
log::debug!("map output tracker server started");
while let Some(Ok(mut stream)) = listener.incoming().next().await {
let server_uris_clone = server_uris.clone();
tokio::spawn(async move {
let (reader, writer) = stream.split();
let reader = reader.compat();
let writer = writer.compat_write();
// reading
let message_reader = capnp_serialize::read_message(reader, CAPNP_BUF_READ_OPTS)
.await?
.ok_or_else(|| NetworkError::NoMessageReceived)?;
let shuffle_id = {
let data = message_reader.get_root::<serialized_data::Reader>()?;
bincode::deserialize(data.get_msg()?)?
};
while server_uris_clone
.get(&shuffle_id)
.ok_or_else(|| MapOutputError::ShuffleIdNotFound(shuffle_id))?
.iter()
.filter(|x|!x.is_none())
.count()
== 0
{
//check whether this will hurt the performance or not
tokio::time::delay_for(Duration::from_millis(1)).await;
}
let locs = server_uris_clone
.get(&shuffle_id)
.map(|kv| {
kv.value()
.iter()
.cloned()
.map(|x| x.unwrap())
.collect::<Vec<_>>()
})
.unwrap_or_default();
log::debug!(
"locs inside map output tracker server for shuffle id #{}: {:?}",
shuffle_id,
locs
);
// writting response
let result = bincode::serialize(&locs)?;
let mut message = MsgBuilder::new_default();
let mut locs_data = message.init_root::<serialized_data::Builder>();
locs_data.set_msg(&result);
// TODO: remove blocking call when possible
futures::executor::block_on(async {
capnp_futures::serialize::write_message(writer, message)
.await
.map_err(Error::CapnpDeserialization)?;
Ok::<_, Error>(())
})?;
Ok::<_, Error>(())
});
}
Err::<(), _>(Error::ExecutorShutdown)
});
}
pub fn register_shuffle(&self, shuffle_id: usize, num_maps: usize) {
log::debug!("inside register shuffle");
if self.server_uris.get(&shuffle_id).is_some() {
// TODO: error handling
log::debug!("map tracker register shuffle none");
return;
}
self.server_uris.insert(shuffle_id, vec![None; num_maps]);
log::debug!("server_uris after register_shuffle {:?}", self.server_uris);
}
pub fn register_map_output(&self, shuffle_id: usize, map_id: usize, server_uri: String) {
log::debug!(
"registering map output from shuffle task #{} with map id #{} at server: {}",
shuffle_id,
map_id,
server_uri
);
self.server_uris.get_mut(&shuffle_id).unwrap()[map_id] = Some(server_uri);
}
pub fn register_map_outputs(&self, shuffle_id: usize, locs: Vec<Option<String>>) {
log::debug!(
"registering map outputs inside map output tracker for shuffle id #{}: {:?}",
shuffle_id,
locs
);
self.server_uris.insert(shuffle_id, locs);
}
pub fn unregister_map_output(&self, shuffle_id: usize, map_id: usize, server_uri: String) {
let array = self.server_uris.get(&shuffle_id);
if let Some(arr) = array | else {
// TODO: error logging
}
}
pub async fn get_server_uris(&self, shuffle_id: usize) -> Result<Vec<String>> {
log::debug!(
"trying to get uri for shuffle task #{}, current server uris: {:?}",
shuffle_id,
self.server_uris
);
if self
.server_uris
.get(&shuffle_id)
.map(|some| some.iter().filter_map(|x| x.clone()).next())
.flatten()
.is_none()
{
if self.fetching.contains(&shuffle_id) {
while self.fetching.contains(&shuffle_id) {
// TODO: check whether this will hurt the performance or not
tokio::time::delay_for(Duration::from_millis(1)).await;
}
let servers = self
.server_uris
.get(&shuffle_id)
.ok_or_else(|| MapOutputError::ShuffleIdNotFound(shuffle_id))?
.iter()
.filter(|x|!x.is_none())
.map(|x| x.clone().unwrap())
.collect::<Vec<_>>();
log::debug!("returning after fetching done, return: {:?}", servers);
return Ok(servers);
} else {
log::debug!("adding to fetching queue");
self.fetching.insert(shuffle_id);
}
let fetched = self.client(shuffle_id).await?;
log::debug!("fetched locs from client: {:?}", fetched);
self.server_uris.insert(
shuffle_id,
fetched.iter().map(|x| Some(x.clone())).collect(),
);
log::debug!("added locs to server uris after fetching");
self.fetching.remove(&shuffle_id);
Ok(fetched)
} else {
Ok(self
.server_uris
.get(&shuffle_id)
.ok_or_else(|| MapOutputError::ShuffleIdNotFound(shuffle_id))?
.iter()
.filter(|x|!x.is_none())
.map(|x| x.clone().unwrap())
.collect())
}
}
pub fn increment_generation(&self) {
*self.generation.lock() += 1;
}
pub fn get_generation(&self) -> i64 {
*self.generation.lock()
}
pub fn update_generation(&mut self, new_gen: i64) {
if new_gen > *self.generation.lock() {
self.server_uris = Arc::new(DashMap::new());
*self.generation.lock() = new_gen;
}
}
}
#[derive(Debug, Error)]
pub enum MapOutputError {
#[error("Shuffle id output #{0} not found in the map")]
ShuffleIdNotFound(usize),
}
| {
if arr.get(map_id).unwrap() == &Some(server_uri) {
self.server_uris
.get_mut(&shuffle_id)
.unwrap()
.insert(map_id, None)
}
self.increment_generation();
} | conditional_block |
lib.rs | //! Everything related to meshes.
//!
//! **TODO**: Everything.
#![feature(trivial_bounds)]
#![feature(never_type)]
#![feature(doc_cfg)]
#![feature(proc_macro_hygiene)]
#![feature(try_blocks)]
#![feature(specialization)]
#![feature(associated_type_defaults)]
#![feature(associated_type_bounds)]
#![feature(array_value_iter)]
#![deny(missing_debug_implementations)]
#![deny(intra_doc_link_resolution_failure)]
// TODO: specialization now warns, but min_specialization is not yet ready to be used here
#![allow(incomplete_features)]
pub extern crate cgmath;
// This is done for proc macros from `lox-macros`. These use paths starting
// with `lox`. This makes sense for all crates using `lox` as dependency. But
// we also want to use proc macros in this library. So we alias `crate` with
// `lox`.
extern crate self as lox;
#[cfg(test)]
#[macro_use]
mod test_utils;
pub mod algo;
pub mod cast;
pub mod ds;
#[cfg(feature = "io")]
pub mod fat;
pub mod handle;
#[cfg(feature = "io")]
pub mod io;
pub mod map;
pub mod math;
pub mod mesh;
pub mod prop;
pub mod traits;
pub mod prelude;
pub mod refs;
#[cfg(feature = "io")]
pub mod shape;
pub mod util;
pub use crate::handle::{EdgeHandle, FaceHandle, VertexHandle};
/// The three basic elements in a polygon mesh.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum | {
Edge,
Face,
Vertex,
}
// ===========================================================================
// ===== `Sealed` trait
// ===========================================================================
pub(crate) mod sealed {
/// A trait that cannot be implemented outside of this crate.
///
/// This is helpful for all "real" traits in this library that only
/// abstract over a closed set of types. Thus, users shouldn't be able to
/// implement those traits for their types. Adding `Sealed` as supertrait
/// solves this problem.
pub trait Sealed {}
}
// ===========================================================================
// ===== Macros
// ===========================================================================
/// Derive macro for the [`Empty` trait][traits::Empty].
///
/// ```
/// use lox::Empty; // this imports the custom-derive and not the trait!
///
/// #[derive(Empty)]
/// struct MyStruct {
/// a: Vec<u32>, // => `vec![]`
/// b: Option<String>, // => `None`
/// c: (), // => `()`
/// }
/// ```
///
/// This can only be derived for structs. All struct fields need to implement
/// `Empty` in order for the derive to work. If your struct has generic
/// parameters, they won't be bounded with `Empty` in the generated impl block.
/// This is useful most of the time, because things like `Vec<T>` and
/// `Option<T>` don't require `T: Empty` to implement `Empty`. But this means
/// that you sometimes have to add a global `Empty` bound to your parameter or
/// implement `Empty` manually.
pub use lox_macros::Empty;
/// Derive macro for [the `MemSink` trait][io::MemSink].
///
/// You can easily derive `MemSink` for your own types. To do that, you have to
/// attach `#[derive(MemSink)]` to your struct definition (note: currently, the
/// trait can only be derived for structs with named fields). You also have to
/// annotate your fields with `#[lox(...)]` attributes to tell the derive macro
/// what a field should be used for. Example:
///
/// ```
/// use lox::{
/// MemSink, VertexHandle,
/// cgmath::Point3,
/// ds::HalfEdgeMesh,
/// map::DenseMap,
/// };
///
///
/// #[derive(MemSink)]
/// struct MyMesh {
/// #[lox(core_mesh)]
/// mesh: HalfEdgeMesh,
///
/// #[lox(vertex_position)]
/// positions: DenseMap<VertexHandle, Point3<f32>>,
/// }
/// ```
///
/// There is one required field: the core mesh field. That field's type has to
/// implement several mesh traits, in particular `MeshMut` and `TriMeshMut`.
/// You have to annotate that mesh with `#[lox(core_mesh)]`.
///
/// Additionally, you can have fields for each mesh property, like vertex
/// position or face colors. The type of those fields has to implement
/// `PropStoreMut` with a compatible element type. You have to annotate these
/// property fields with the corresponding attribute. The available properties
/// are:
///
/// - `vertex_position`
/// - `vertex_normal`
/// - `vertex_color`
/// - `face_normal`
/// - `face_color`
///
/// Furthermore, there are some configurations (like the cast mode) that can be
/// configured via `lox(...)` attributes as well. See below for more
/// information.
///
///
/// ## Cast modes
///
/// You can set a *cast mode* for each field. A `MemSink` has to be able to
/// "handle" any primitive type as the source is allowed to call the property
/// methods with any type. The sink can handle types either by casting or by
/// returning an error. The field's cast mode determines which casts are
/// allowed and which are not. Possible cast modes:
///
/// - `cast = "none"`
/// - `cast = "lossless"`
/// - `cast = "rounding"`
/// - `cast = "clamping"`
/// - `cast = "lossy"` (*default*)
///
/// The `none` mode does not allow casting at all. If the type provided by the
/// source does not match the type in your struct, an error is returned. All
/// other modes correspond to the cast modes in the [`cast`
/// module][crate::cast].
///
/// Note that the cast modes are used by `derive(MemSource)` as well.
///
/// You can specify the cast mode either per field or globally on the whole
/// struct. The mode of the struct applies to all fields that don't have a
/// field-specific mode.
///
/// ```
/// # use lox::{
/// # MemSink, VertexHandle,
/// # cgmath::{Point3, Vector3},
/// # ds::HalfEdgeMesh,
/// # map::DenseMap,
/// # };
/// #
/// #[derive(MemSink)]
/// #[lox(cast = "none")]
/// struct MyMesh {
/// #[lox(core_mesh)]
/// mesh: HalfEdgeMesh,
///
/// #[lox(vertex_position)]
/// positions: DenseMap<VertexHandle, Point3<f32>>,
///
/// #[lox(vertex_normal, cast = "lossy")]
/// normals: DenseMap<VertexHandle, Vector3<f32>>,
/// }
/// ```
///
/// In this example, the vertex positions inherit the "struct global" cast mode
/// (`none`), while the vertex normals override that mode to `lossy`.
///
///
/// ### Exact traits required for each field
///
/// Traits required for the `core_mesh` field:
/// - TODO
///
/// Traits required for property fields. For type `T` of the field:
/// - `T` must implement [`PropStoreMut`][crate::map::PropStoreMut] (with
/// fitting handle type). Additionally:
/// - For `vertex_position`: `T::Target` must implement
/// [`Pos3Like`][crate::prop::Pos3Like].
/// - For `*_normal`: `T::Target` must implement
/// [`Vec3Like`][crate::prop::Vec3Like].
/// - For `*_color`: `T::Target` must implement
/// [`ColorLike`][crate::prop::ColorLike] and `T::Target::Channel` must
/// implement [`Primitive`].
#[cfg(feature= "io")]
pub use lox_macros::MemSink;
/// Derive macro for [the `MemSource` trait][io::MemSource].
///
/// You can easily derive `MemSource` for your own types. To do that, you have
/// to attach `#[derive(MemSource)]` to your struct definition (note:
/// currently, the trait can only be derived for structs with named fields).
/// You also have to annotate your fields with `#[lox(...)]` attributes to tell
/// the derive macro what a field should be used for. Example:
///
/// ```
/// use lox::{
/// MemSource, VertexHandle,
/// cgmath::Point3,
/// ds::SharedVertexMesh,
/// map::DenseMap,
/// };
///
///
/// #[derive(MemSource)]
/// struct MyMesh {
/// #[lox(core_mesh)]
/// mesh: SharedVertexMesh,
///
/// #[lox(vertex_position)]
/// positions: DenseMap<VertexHandle, Point3<f32>>,
/// }
/// ```
///
/// Deriving this trait works very similar to deriving [`MemSink`]. See its
/// documentation for more information on the custom derive.
///
///
/// ### Exact traits required for each field
///
/// Traits required for the `core_mesh` field:
/// - TODO
///
/// Traits required for property fields. For type `T` of the field:
/// - `T` must implement [`PropStore`][crate::map::PropStore] (with fitting
/// handle type). Additionally:
/// - For `vertex_position`: `T::Target` must implement
/// [`Pos3Like`][crate::prop::Pos3Like] and `T::Target::Scalar` must
/// implement [`Primitive`].
/// - For `*_normal`: `T::Target` must implement
/// [`Vec3Like`][crate::prop::Vec3Like] and `T::Target::Scalar` must
/// implement [`Primitive`].
/// - For `*_color`: `T::Target` must implement
/// [`ColorLike`][crate::prop::ColorLike] and `T::Target::Channel` must
/// implement [`Primitive`].
#[cfg(feature= "io")]
pub use lox_macros::MemSource;
/// Convenience macro to quickly create a small mesh.
///
/// (This is just a dummy macro to add documentation to the actual proc-macro
/// reexported from `lox-macros`. See [#58700][i58700] and [#58696][i58696] for
/// more information.)
///
/// **Note about unstable features**: this proc macro needs to be invoked in
/// expression context, which is still unstable. So your crate needs to enable
/// the `proc_macro_hygiene` feature for this to work.
///
/// [i58700]: https://github.com/rust-lang/rust/issues/58700
/// [i58696]: https://github.com/rust-lang/rust/issues/58696
///
/// # Examples
///
/// Here we create two triangles:
///
/// ```
/// #![feature(proc_macro_hygiene)]
/// use lox::{
/// mesh,
/// prelude::*,
/// ds::SharedVertexMesh,
/// };
///
///
/// let (mesh, positions, distances, face_colors) = mesh! {
/// type: SharedVertexMesh,
/// vertices: [
/// v0: ([0.0, 0.0, 0.0], 0.0),
/// v1: ([0.0, 1.0, 0.0], 1.0),
/// v2: ([1.0, 0.0, 0.0], 1.0),
/// v3: ([1.0, 1.0, 0.0], 1.414),
/// ],
/// faces: [
/// [v0, v2, v1]: ("red"),
/// [v3, v1, v2]: ("green"),
/// ],
/// };
///
/// assert_eq!(mesh.num_vertices(), 4);
/// assert_eq!(mesh.num_faces(), 2);
/// ```
///
/// In the code above, we associate a position and a scalar value with each
/// vertex and a color (or rather, a color name) with each face. Properties of
/// vertices and faces are specified after a colon (`:`) in parenthesis (like a
/// tuple).
///
/// For each property you add in those parenthesis, the macro returns an
/// additional property map. The full return value is:
///
/// ```text
/// (mesh, /* vertex property maps */, /* face property maps*/)
/// ```
///
/// ## Without properties
///
/// We don't need to specify any properties. We can either write empty
/// parenthesis (`()`) or just omit the colon and the parenthesis:
///
/// ```
/// #![feature(proc_macro_hygiene)]
/// use lox::{
/// mesh,
/// ds::SharedVertexMesh,
/// };
///
///
/// let mesh = mesh! {
/// type: SharedVertexMesh,
/// vertices: [
/// v0: (), // <-- this is equivalent to:
/// v1, // <-- this
/// v2,
/// v3,
/// ],
/// faces: [
/// [v0, v2, v1],
/// [v3, v1, v2],
/// ],
/// };
/// ```
///
/// Of course, you can also add properties to the vertices, but not the faces,
/// or the other way around. However, you always have to specify the same
/// number of properties for all vertices and the same number of properties for
/// all faces.
///
/// ## An empty mesh
///
/// This is not particularly useful in itself, but it works. You can use this
/// syntax when you haven't yet decided how your mesh should look like.
///
/// ```
/// #![feature(proc_macro_hygiene)]
/// use lox::{
/// mesh,
/// ds::SharedVertexMesh,
/// };
///
///
/// let empty_mesh = mesh! {
/// type: SharedVertexMesh,
/// vertices: [],
/// faces: [],
/// };
/// ```
pub use lox_macros::mesh;
| MeshElement | identifier_name |
lib.rs | //! Everything related to meshes.
//!
//! **TODO**: Everything.
#![feature(trivial_bounds)]
#![feature(never_type)]
#![feature(doc_cfg)]
#![feature(proc_macro_hygiene)]
#![feature(try_blocks)]
#![feature(specialization)]
#![feature(associated_type_defaults)]
#![feature(associated_type_bounds)]
#![feature(array_value_iter)]
#![deny(missing_debug_implementations)]
#![deny(intra_doc_link_resolution_failure)]
// TODO: specialization now warns, but min_specialization is not yet ready to be used here
#![allow(incomplete_features)]
pub extern crate cgmath;
// This is done for proc macros from `lox-macros`. These use paths starting
// with `lox`. This makes sense for all crates using `lox` as dependency. But
// we also want to use proc macros in this library. So we alias `crate` with
// `lox`.
extern crate self as lox;
#[cfg(test)]
#[macro_use]
mod test_utils;
pub mod algo;
pub mod cast;
pub mod ds;
#[cfg(feature = "io")]
pub mod fat;
pub mod handle;
#[cfg(feature = "io")]
pub mod io;
pub mod map;
pub mod math;
pub mod mesh;
pub mod prop;
pub mod traits;
pub mod prelude;
pub mod refs;
#[cfg(feature = "io")]
pub mod shape;
pub mod util;
pub use crate::handle::{EdgeHandle, FaceHandle, VertexHandle};
/// The three basic elements in a polygon mesh.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum MeshElement {
Edge,
Face,
Vertex,
}
// ===========================================================================
// ===== `Sealed` trait
// ===========================================================================
pub(crate) mod sealed {
/// A trait that cannot be implemented outside of this crate.
///
/// This is helpful for all "real" traits in this library that only
/// abstract over a closed set of types. Thus, users shouldn't be able to
/// implement those traits for their types. Adding `Sealed` as supertrait
/// solves this problem.
pub trait Sealed {}
}
// ===========================================================================
// ===== Macros
// ===========================================================================
/// Derive macro for the [`Empty` trait][traits::Empty].
///
/// ```
/// use lox::Empty; // this imports the custom-derive and not the trait!
///
/// #[derive(Empty)]
/// struct MyStruct {
/// a: Vec<u32>, // => `vec![]`
/// b: Option<String>, // => `None`
/// c: (), // => `()`
/// }
/// ```
///
/// This can only be derived for structs. All struct fields need to implement
/// `Empty` in order for the derive to work. If your struct has generic
/// parameters, they won't be bounded with `Empty` in the generated impl block.
/// This is useful most of the time, because things like `Vec<T>` and
/// `Option<T>` don't require `T: Empty` to implement `Empty`. But this means
/// that you sometimes have to add a global `Empty` bound to your parameter or
/// implement `Empty` manually.
pub use lox_macros::Empty;
/// Derive macro for [the `MemSink` trait][io::MemSink].
///
/// You can easily derive `MemSink` for your own types. To do that, you have to
/// attach `#[derive(MemSink)]` to your struct definition (note: currently, the
/// trait can only be derived for structs with named fields). You also have to
/// annotate your fields with `#[lox(...)]` attributes to tell the derive macro
/// what a field should be used for. Example:
///
/// ```
/// use lox::{
/// MemSink, VertexHandle,
/// cgmath::Point3,
/// ds::HalfEdgeMesh,
/// map::DenseMap,
/// };
///
///
/// #[derive(MemSink)]
/// struct MyMesh {
/// #[lox(core_mesh)]
/// mesh: HalfEdgeMesh,
///
/// #[lox(vertex_position)]
/// positions: DenseMap<VertexHandle, Point3<f32>>,
/// }
/// ```
///
/// There is one required field: the core mesh field. That field's type has to
/// implement several mesh traits, in particular `MeshMut` and `TriMeshMut`.
/// You have to annotate that mesh with `#[lox(core_mesh)]`.
///
/// Additionally, you can have fields for each mesh property, like vertex
/// position or face colors. The type of those fields has to implement
/// `PropStoreMut` with a compatible element type. You have to annotate these
/// property fields with the corresponding attribute. The available properties
/// are:
///
/// - `vertex_position`
/// - `vertex_normal`
/// - `vertex_color`
/// - `face_normal`
/// - `face_color`
///
/// Furthermore, there are some configurations (like the cast mode) that can be
/// configured via `lox(...)` attributes as well. See below for more
/// information.
///
///
/// ## Cast modes
///
/// You can set a *cast mode* for each field. A `MemSink` has to be able to
/// "handle" any primitive type as the source is allowed to call the property
/// methods with any type. The sink can handle types either by casting or by
/// returning an error. The field's cast mode determines which casts are
/// allowed and which are not. Possible cast modes:
///
/// - `cast = "none"`
/// - `cast = "lossless"`
/// - `cast = "rounding"`
/// - `cast = "clamping"`
/// - `cast = "lossy"` (*default*)
///
/// The `none` mode does not allow casting at all. If the type provided by the
/// source does not match the type in your struct, an error is returned. All
/// other modes correspond to the cast modes in the [`cast`
/// module][crate::cast].
///
/// Note that the cast modes are used by `derive(MemSource)` as well.
///
/// You can specify the cast mode either per field or globally on the whole
/// struct. The mode of the struct applies to all fields that don't have a
/// field-specific mode.
///
/// ```
/// # use lox::{
/// # MemSink, VertexHandle,
/// # cgmath::{Point3, Vector3},
/// # ds::HalfEdgeMesh,
/// # map::DenseMap,
/// # };
/// #
/// #[derive(MemSink)]
/// #[lox(cast = "none")]
/// struct MyMesh {
/// #[lox(core_mesh)]
/// mesh: HalfEdgeMesh,
///
/// #[lox(vertex_position)]
/// positions: DenseMap<VertexHandle, Point3<f32>>,
///
/// #[lox(vertex_normal, cast = "lossy")]
/// normals: DenseMap<VertexHandle, Vector3<f32>>,
/// }
/// ```
///
/// In this example, the vertex positions inherit the "struct global" cast mode
/// (`none`), while the vertex normals override that mode to `lossy`.
///
///
/// ### Exact traits required for each field
///
/// Traits required for the `core_mesh` field:
/// - TODO
///
/// Traits required for property fields. For type `T` of the field:
/// - `T` must implement [`PropStoreMut`][crate::map::PropStoreMut] (with
/// fitting handle type). Additionally:
/// - For `vertex_position`: `T::Target` must implement
/// [`Pos3Like`][crate::prop::Pos3Like].
/// - For `*_normal`: `T::Target` must implement
/// [`Vec3Like`][crate::prop::Vec3Like].
/// - For `*_color`: `T::Target` must implement
/// [`ColorLike`][crate::prop::ColorLike] and `T::Target::Channel` must
/// implement [`Primitive`].
#[cfg(feature= "io")]
pub use lox_macros::MemSink;
/// Derive macro for [the `MemSource` trait][io::MemSource].
///
/// You can easily derive `MemSource` for your own types. To do that, you have
/// to attach `#[derive(MemSource)]` to your struct definition (note:
/// currently, the trait can only be derived for structs with named fields).
/// You also have to annotate your fields with `#[lox(...)]` attributes to tell
/// the derive macro what a field should be used for. Example:
///
/// ```
/// use lox::{ | /// };
///
///
/// #[derive(MemSource)]
/// struct MyMesh {
/// #[lox(core_mesh)]
/// mesh: SharedVertexMesh,
///
/// #[lox(vertex_position)]
/// positions: DenseMap<VertexHandle, Point3<f32>>,
/// }
/// ```
///
/// Deriving this trait works very similar to deriving [`MemSink`]. See its
/// documentation for more information on the custom derive.
///
///
/// ### Exact traits required for each field
///
/// Traits required for the `core_mesh` field:
/// - TODO
///
/// Traits required for property fields. For type `T` of the field:
/// - `T` must implement [`PropStore`][crate::map::PropStore] (with fitting
/// handle type). Additionally:
/// - For `vertex_position`: `T::Target` must implement
/// [`Pos3Like`][crate::prop::Pos3Like] and `T::Target::Scalar` must
/// implement [`Primitive`].
/// - For `*_normal`: `T::Target` must implement
/// [`Vec3Like`][crate::prop::Vec3Like] and `T::Target::Scalar` must
/// implement [`Primitive`].
/// - For `*_color`: `T::Target` must implement
/// [`ColorLike`][crate::prop::ColorLike] and `T::Target::Channel` must
/// implement [`Primitive`].
#[cfg(feature= "io")]
pub use lox_macros::MemSource;
/// Convenience macro to quickly create a small mesh.
///
/// (This is just a dummy macro to add documentation to the actual proc-macro
/// reexported from `lox-macros`. See [#58700][i58700] and [#58696][i58696] for
/// more information.)
///
/// **Note about unstable features**: this proc macro needs to be invoked in
/// expression context, which is still unstable. So your crate needs to enable
/// the `proc_macro_hygiene` feature for this to work.
///
/// [i58700]: https://github.com/rust-lang/rust/issues/58700
/// [i58696]: https://github.com/rust-lang/rust/issues/58696
///
/// # Examples
///
/// Here we create two triangles:
///
/// ```
/// #![feature(proc_macro_hygiene)]
/// use lox::{
/// mesh,
/// prelude::*,
/// ds::SharedVertexMesh,
/// };
///
///
/// let (mesh, positions, distances, face_colors) = mesh! {
/// type: SharedVertexMesh,
/// vertices: [
/// v0: ([0.0, 0.0, 0.0], 0.0),
/// v1: ([0.0, 1.0, 0.0], 1.0),
/// v2: ([1.0, 0.0, 0.0], 1.0),
/// v3: ([1.0, 1.0, 0.0], 1.414),
/// ],
/// faces: [
/// [v0, v2, v1]: ("red"),
/// [v3, v1, v2]: ("green"),
/// ],
/// };
///
/// assert_eq!(mesh.num_vertices(), 4);
/// assert_eq!(mesh.num_faces(), 2);
/// ```
///
/// In the code above, we associate a position and a scalar value with each
/// vertex and a color (or rather, a color name) with each face. Properties of
/// vertices and faces are specified after a colon (`:`) in parenthesis (like a
/// tuple).
///
/// For each property you add in those parenthesis, the macro returns an
/// additional property map. The full return value is:
///
/// ```text
/// (mesh, /* vertex property maps */, /* face property maps*/)
/// ```
///
/// ## Without properties
///
/// We don't need to specify any properties. We can either write empty
/// parenthesis (`()`) or just omit the colon and the parenthesis:
///
/// ```
/// #![feature(proc_macro_hygiene)]
/// use lox::{
/// mesh,
/// ds::SharedVertexMesh,
/// };
///
///
/// let mesh = mesh! {
/// type: SharedVertexMesh,
/// vertices: [
/// v0: (), // <-- this is equivalent to:
/// v1, // <-- this
/// v2,
/// v3,
/// ],
/// faces: [
/// [v0, v2, v1],
/// [v3, v1, v2],
/// ],
/// };
/// ```
///
/// Of course, you can also add properties to the vertices, but not the faces,
/// or the other way around. However, you always have to specify the same
/// number of properties for all vertices and the same number of properties for
/// all faces.
///
/// ## An empty mesh
///
/// This is not particularly useful in itself, but it works. You can use this
/// syntax when you haven't yet decided how your mesh should look like.
///
/// ```
/// #![feature(proc_macro_hygiene)]
/// use lox::{
/// mesh,
/// ds::SharedVertexMesh,
/// };
///
///
/// let empty_mesh = mesh! {
/// type: SharedVertexMesh,
/// vertices: [],
/// faces: [],
/// };
/// ```
pub use lox_macros::mesh; | /// MemSource, VertexHandle,
/// cgmath::Point3,
/// ds::SharedVertexMesh,
/// map::DenseMap, | random_line_split |
main.rs | #[macro_use]
extern crate log;
extern crate simplelog;
use futures::future;
use futures::future::{BoxFuture, FutureExt};
use reqwest as request;
use base64::encode;
use dirs::home_dir;
use futures::io::SeekFrom;
use regex::Regex;
use request::header::{HeaderMap, AUTHORIZATION, CONTENT_TYPE, USER_AGENT};
use scraper::{Html, Selector};
use serde::{Deserialize, Serialize};
use simplelog::*;
use std::collections::HashMap;
use std::fs::{File, OpenOptions};
use std::io::{self, BufRead, LineWriter, Seek, Write};
use std::str;
const SURE_USER_AGENT: &str = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Safari/605.1.15";
const TWILIO_BASE_URL: &str = "https://api.twilio.com/2010-04-01";
#[tokio::main]
async fn main() -> SureResult<()> {
init_logging()?;
let client = request::Client::new();
let sess_id = get_session_id(&client).await?;
let mut listings = get_listings(&client, &sess_id, 0).await?;
remove_duplicates(&mut listings);
if listings.markers.len() > 0 {
let listings_map = scrape_listings(&client, &listings).await?;
let desired_listings = get_desired_listings(&listings_map);
if desired_listings.len() > 0 {
let listing_message = build_listing_message(&desired_listings);
send_messages(&client, &listing_message).await?;
}
}
Ok(())
}
fn init_logging() -> SureResult<()> {
let log_file = OpenOptions::new()
.append(true)
.create(true)
.open(&get_sure_filepath("sure.log"))?;
let config = ConfigBuilder::new()
.set_time_format_str("%c")
.set_time_to_local(true)
.build();
CombinedLogger::init(vec![WriteLogger::new(LevelFilter::Info, config, log_file)]).unwrap();
Ok(())
}
async fn get_session_id(client: &request::Client) -> SureResult<String> {
let re = Regex::new(r#"(PHPSESSID=[\w\S]+);"#).unwrap();
let res = client
.get("https://www.utahrealestate.com/index/public.index")
.header(USER_AGENT, SURE_USER_AGENT)
.send()
.await?;
let sessid = res.headers().get("set-cookie").unwrap().to_str().unwrap();
let mut id = String::from("");
for cap in re.captures_iter(sessid) {
id = String::from(&cap[1]);
}
if id == "" {
panic!("unable to find session id");
}
Ok(id)
}
fn get_listings<'a>(
client: &'a request::Client,
session_id: &'a str,
retry_count: usize,
) -> BoxFuture<'a, SureResult<UreData>> {
if retry_count > 3 {
error!("exceeded retry count - URE must be down");
std::process::exit(0);
}
async move {
let params = get_ure_search_params();
let mut headers = HeaderMap::new();
headers.insert(USER_AGENT, SURE_USER_AGENT.parse().unwrap());
headers.insert(
CONTENT_TYPE,
"application/x-www-form-urlencoded".parse().unwrap(),
);
headers.insert("PHPSESSID", session_id.parse().unwrap());
let res = client
.post("https://www.utahrealestate.com/search/chained.update/param_reset/county_code,o_county_code,city,o_city,zip,o_zip,geometry,o_geometry/count/false/criteria/false/pg/1/limit/50/dh/1190")
.headers(headers)
.body(params)
.send()
.await?;
let res_text = res.text().await?;
match serde_json::from_str(&res_text) {
Ok(v) => Ok(v),
Err(_) => {
error!("failed to parse text, retrying");
Ok(get_listings(client, session_id, retry_count + 1).await?)
}
}
}.boxed()
}
async fn scrape_listings(
client: &request::Client,
data: &UreData,
) -> SureResult<HashMap<String, Html>> {
let mut raw_futures = vec![];
for (index, marker) in data.markers.iter().enumerate() {
raw_futures.push(get_listing(&client, &marker.id, index));
}
let unpin_futures: Vec<_> = raw_futures.into_iter().map(Box::pin).collect();
let mut mut_futures = unpin_futures;
let mut documents: HashMap<String, Html> = HashMap::new();
let mut size: usize = 0;
let mut current: f32 = 0.0;
let total: usize = mut_futures.len();
while!mut_futures.is_empty() {
match future::select_all(mut_futures).await {
(Ok((id, _idx, document, content_length)), _index, remaining) => {
current += 1.0;
let percentage = (((current / total as f32) * 100.0) / 2.0) as usize;
io::stdout()
.write(
format!(
"\rdownloading listings {}/{}: [{}>{}]",
current,
total,
"=".repeat(percentage),
" ".repeat(50 - percentage),
)
.as_bytes(),
)
.unwrap();
io::stdout().flush().unwrap();
size += content_length;
documents.insert(id, document);
mut_futures = remaining;
}
(Err(_e), _index, remaining) => {
error!("document failed");
mut_futures = remaining;
}
}
}
println!("\n");
info!(
"downloaded {:.2?}MB from {} listings\n\t\t\t\tβββ{:?}{}",
size as f32 / 1000000.0,
total,
documents.iter().map(|v| v.0).collect::<Vec<&String>>(),
" ".repeat(50)
);
Ok(documents)
}
fn get_desired_listings(listing_map: &HashMap<String, Html>) -> Vec<DesiredListing> {
let selector = Selector::parse(".facts___list___items.facts___item").unwrap();
let mut desired_listings: Vec<DesiredListing> = vec![];
for (key, value) in listing_map {
let mut dl = DesiredListing::new();
let div = value.select(&selector).collect::<Vec<_>>();
for node in div {
let mut node_vec = node
.text()
.collect::<Vec<&str>>()
.iter()
.map(|&v| v.trim())
.collect::<Vec<&str>>();
node_vec.retain(|&v| v!= "");
if node_vec[0] == "Days on URE"
&& (node_vec[1] == "Just Listed"
|| node_vec[1].to_string().parse::<usize>().unwrap() >= 20)
{
dl.interested = true;
}
if node_vec[0] == "Status" && node_vec[1] == "Active" {
dl.active = true;
}
}
if dl.is_desired() {
dl.mls = String::from(key);
desired_listings.push(dl);
}
}
desired_listings
}
fn remove_duplicates(listings: &mut UreData) {
let mut dup_idx: Vec<usize> = vec![];
let mut existing = get_checked_listings();
for (idx, listing) in listings.markers.iter().enumerate() {
if existing.contains(&listing.id) {
dup_idx.push(idx);
}
}
if dup_idx.len() > 0 {
for i in dup_idx.into_iter().rev() {
listings.markers.remove(i);
}
}
if listings.markers.len() > 0 {
for listing in listings.markers.iter() {
existing.push(listing.id.clone());
}
write_checked_listings(&existing).unwrap();
} else {
info!("no new listings");
}
}
fn build_listing_message(listings: &Vec<DesiredListing>) -> String {
let mut message_str = String::from("");
for listing in listings {
message_str.push_str(&format!(
"https://www.utahrealestate.com/{}\n\n",
listing.mls
));
}
message_str
}
async fn send_messages(client: &request::Client, message: &str) -> SureResult<()> {
let credentials = get_twilio_credentials();
let mut raw_futures = vec![];
for number in credentials.alert_numbers.iter() {
raw_futures.push(send_message(&client, &message, number))
}
let unpin_futures: Vec<_> = raw_futures.into_iter().map(Box::pin).collect();
let mut mut_futures = unpin_futures;
while!mut_futures.is_empty() {
match future::select_all(mut_futures).await {
(Ok(_res), _index, remaining) => mut_futures = remaining,
(Err(_e), _index, remaining) => mut_futures = remaining,
}
}
Ok(())
}
async fn get_listing(
client: &request::Client,
id: &str,
index: usize,
) -> SureResult<(String, usize, Html, usize)> {
let url = format!("https://www.utahrealestate.com/{}", id);
let res = client
.get(&url)
.header(USER_AGENT, SURE_USER_AGENT)
.send()
.await?;
let body = res.text().await?;
let document = Html::parse_document(&body);
Ok((String::from(id), index, document, body.len()))
}
async fn send_message(client: &request::Client, message: &str, to: &str) -> SureResult<()> {
let credentials = get_twilio_credentials();
let message_url = format!(
"{}/Accounts/{}/Messages.json",
TWILIO_BASE_URL, credentials.sid
);
let mut headers = HeaderMap::new();
headers.insert(
AUTHORIZATION,
format!("Basic {}", credentials.basic_auth())
.parse()
.unwrap(),
);
let params = [
("From", &credentials.number),
("Body", &message.to_string()),
("To", &to.to_string()),
];
let res = client
.post(&message_url)
.headers(headers)
.form(¶ms)
.send()
.await?;
if res.status() == 201 {
info!("message sent");
} else {
error!(
"error sending message: {:?}\n\tβββ{}\n\tβββ{:?}",
res.status(),
res.text().await?,
params
)
}
Ok(())
}
///
/// Utility Functions
///
fn get_checked_listings() -> Vec<String> {
let mut checked_mls: Vec<String> = vec![];
if let Ok(lines) = read_lines(&get_sure_filepath("listings.txt")) {
for line in lines {
if let Ok(l) = line {
checked_mls.push(String::from(l.trim()))
}
}
}
checked_mls
}
fn write_checked_listings(checked: &Vec<String>) -> SureResult<()> {
let mut contents = String::from("");
let mut file = OpenOptions::new()
.write(true)
.create(true)
.open(&get_sure_filepath("listings.txt"))?;
file.set_len(0)?;
file.seek(SeekFrom::Start(0))?;
let mut file = LineWriter::new(file);
let mut sorted = checked
.iter()
.map(|v| v.parse::<usize>().unwrap())
.collect::<Vec<usize>>();
sorted.sort();
for c in sorted {
contents.push_str(&format!("{}\n", c));
}
file.write_all(contents.as_bytes())?;
Ok(())
}
fn get_ure_search_params() -> String {
let mut param_encoded = String::from("");
if let Ok(lines) = read_lines(&get_sure_filepath("queries.env")) {
for line in lines {
if let Ok(l) = line {
param_encoded.push_str(&format!("{}&", l));
}
}
}
String::from(param_encoded)
}
fn get_twilio_credentials() -> TwilioAuth {
let mut auth = TwilioAuth::new();
if let Ok(lines) = read_lines(&get_sure_filepath("twilio.env")) {
for line in lines {
if let Ok(i) = line {
let config_item: Vec<&str> = i.split('=').collect();
if config_item[0] == "AccountSID" {
auth.sid = String::from(config_item[1]);
}
if config_item[0] == "AuthToken" {
auth.auth_token = String::from(config_item[1]);
}
if config_item[0] == "TwilioNumber" {
auth.number = String::from(config_item[1]);
}
if config_item[0] == "AlertNumbers" {
let numbers: Vec<String> = config_item[1]
.split(",")
.into_iter()
.map(String::from)
.collect();
auth.alert_numbers = numbers;
}
}
}
}
auth
}
fn read_lines(filename: &str) -> io::Result<io::Lines<io::BufReader<File>>> {
let file = File::open(filename)?;
Ok(io::BufReader::new(file).lines())
}
fn get_sure_filepath( | > String {
let mut home_path = home_dir().unwrap();
home_path.push(".sure");
home_path.push(filename);
String::from(home_path.to_str().unwrap())
}
///
///
/// Definitions and Implementations
///
///
///
/// DesiredListing
///
#[derive(Debug)]
struct DesiredListing {
active: bool,
interested: bool,
mls: String,
}
impl DesiredListing {
fn new() -> DesiredListing {
Default::default()
}
fn is_desired(&self) -> bool {
self.active && self.interested
}
}
impl Default for DesiredListing {
fn default() -> Self {
DesiredListing {
active: false,
interested: false,
mls: String::from(""),
}
}
}
///
/// Twilio
///
pub struct TwilioAuth {
sid: String,
auth_token: String,
number: String,
alert_numbers: Vec<String>,
}
impl TwilioAuth {
fn new() -> TwilioAuth {
Default::default()
}
fn basic_auth(&self) -> String {
encode(format!("{}:{}", &self.sid, &self.auth_token).as_bytes())
}
}
impl Default for TwilioAuth {
fn default() -> Self {
TwilioAuth {
sid: String::from(""),
auth_token: String::from(""),
number: String::from(""),
alert_numbers: vec![],
}
}
}
#[derive(Debug, Serialize, Deserialize)]
struct TwilioResponse {
error_code: String,
status: String,
}
///
/// SureResult and SureError
///
type SureResult<T> = Result<T, SureError>;
#[derive(Debug)]
enum SureError {
IoError(std::io::Error),
ReqwestError(request::Error),
StdError(Box<dyn std::error::Error>),
JsonError(serde_json::Error),
}
impl From<std::io::Error> for SureError {
fn from(error: std::io::Error) -> Self {
SureError::IoError(error)
}
}
impl From<reqwest::Error> for SureError {
fn from(error: reqwest::Error) -> Self {
SureError::ReqwestError(error)
}
}
impl From<Box<dyn std::error::Error>> for SureError {
fn from(error: Box<dyn std::error::Error>) -> Self {
SureError::StdError(error)
}
}
impl From<serde_json::Error> for SureError {
fn from(error: serde_json::Error) -> Self {
SureError::JsonError(error)
}
}
///
/// UreData
/// βββ Vec<Marker>
///
#[derive(Debug, Serialize, Deserialize)]
struct UreData {
markers: Vec<Marker>,
}
#[derive(Debug, Serialize, Deserialize)]
struct Marker {
price: String,
id: String,
}
| filename: &str) - | identifier_name |
main.rs | #[macro_use]
extern crate log;
extern crate simplelog;
use futures::future;
use futures::future::{BoxFuture, FutureExt};
use reqwest as request;
use base64::encode;
use dirs::home_dir;
use futures::io::SeekFrom;
use regex::Regex;
use request::header::{HeaderMap, AUTHORIZATION, CONTENT_TYPE, USER_AGENT};
use scraper::{Html, Selector};
use serde::{Deserialize, Serialize};
use simplelog::*;
use std::collections::HashMap;
use std::fs::{File, OpenOptions};
use std::io::{self, BufRead, LineWriter, Seek, Write};
use std::str;
const SURE_USER_AGENT: &str = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Safari/605.1.15";
const TWILIO_BASE_URL: &str = "https://api.twilio.com/2010-04-01";
#[tokio::main]
async fn main() -> SureResult<()> {
init_logging()?;
let client = request::Client::new();
let sess_id = get_session_id(&client).await?;
let mut listings = get_listings(&client, &sess_id, 0).await?;
remove_duplicates(&mut listings);
if listings.markers.len() > 0 {
let listings_map = scrape_listings(&client, &listings).await?;
let desired_listings = get_desired_listings(&listings_map);
if desired_listings.len() > 0 {
let listing_message = build_listing_message(&desired_listings);
send_messages(&client, &listing_message).await?;
}
}
Ok(())
}
fn init_logging() -> SureResult<()> {
let log_file = OpenOptions::new()
.append(true)
.create(true)
.open(&get_sure_filepath("sure.log"))?;
let config = ConfigBuilder::new()
.set_time_format_str("%c")
.set_time_to_local(true)
.build();
CombinedLogger::init(vec![WriteLogger::new(LevelFilter::Info, config, log_file)]).unwrap();
Ok(())
}
async fn get_session_id(client: &request::Client) -> SureResult<String> {
let re = Regex::new(r#"(PHPSESSID=[\w\S]+);"#).unwrap();
let res = client
.get("https://www.utahrealestate.com/index/public.index")
.header(USER_AGENT, SURE_USER_AGENT)
.send()
.await?;
let sessid = res.headers().get("set-cookie").unwrap().to_str().unwrap();
let mut id = String::from("");
for cap in re.captures_iter(sessid) {
id = String::from(&cap[1]);
}
if id == "" {
panic!("unable to find session id");
}
Ok(id)
}
fn get_listings<'a>(
client: &'a request::Client,
session_id: &'a str,
retry_count: usize,
) -> BoxFuture<'a, SureResult<UreData>> {
if retry_count > 3 {
error!("exceeded retry count - URE must be down");
std::process::exit(0);
}
async move {
let params = get_ure_search_params();
let mut headers = HeaderMap::new();
headers.insert(USER_AGENT, SURE_USER_AGENT.parse().unwrap());
headers.insert(
CONTENT_TYPE,
"application/x-www-form-urlencoded".parse().unwrap(),
);
headers.insert("PHPSESSID", session_id.parse().unwrap());
let res = client
.post("https://www.utahrealestate.com/search/chained.update/param_reset/county_code,o_county_code,city,o_city,zip,o_zip,geometry,o_geometry/count/false/criteria/false/pg/1/limit/50/dh/1190")
.headers(headers)
.body(params)
.send()
.await?;
let res_text = res.text().await?;
match serde_json::from_str(&res_text) {
Ok(v) => Ok(v),
Err(_) => {
error!("failed to parse text, retrying");
Ok(get_listings(client, session_id, retry_count + 1).await?)
}
}
}.boxed()
}
async fn scrape_listings(
client: &request::Client,
data: &UreData,
) -> SureResult<HashMap<String, Html>> {
let mut raw_futures = vec![];
for (index, marker) in data.markers.iter().enumerate() {
raw_futures.push(get_listing(&client, &marker.id, index));
}
let unpin_futures: Vec<_> = raw_futures.into_iter().map(Box::pin).collect();
let mut mut_futures = unpin_futures;
let mut documents: HashMap<String, Html> = HashMap::new();
let mut size: usize = 0;
let mut current: f32 = 0.0;
let total: usize = mut_futures.len();
while!mut_futures.is_empty() {
match future::select_all(mut_futures).await { | io::stdout()
.write(
format!(
"\rdownloading listings {}/{}: [{}>{}]",
current,
total,
"=".repeat(percentage),
" ".repeat(50 - percentage),
)
.as_bytes(),
)
.unwrap();
io::stdout().flush().unwrap();
size += content_length;
documents.insert(id, document);
mut_futures = remaining;
}
(Err(_e), _index, remaining) => {
error!("document failed");
mut_futures = remaining;
}
}
}
println!("\n");
info!(
"downloaded {:.2?}MB from {} listings\n\t\t\t\tβββ{:?}{}",
size as f32 / 1000000.0,
total,
documents.iter().map(|v| v.0).collect::<Vec<&String>>(),
" ".repeat(50)
);
Ok(documents)
}
fn get_desired_listings(listing_map: &HashMap<String, Html>) -> Vec<DesiredListing> {
let selector = Selector::parse(".facts___list___items.facts___item").unwrap();
let mut desired_listings: Vec<DesiredListing> = vec![];
for (key, value) in listing_map {
let mut dl = DesiredListing::new();
let div = value.select(&selector).collect::<Vec<_>>();
for node in div {
let mut node_vec = node
.text()
.collect::<Vec<&str>>()
.iter()
.map(|&v| v.trim())
.collect::<Vec<&str>>();
node_vec.retain(|&v| v!= "");
if node_vec[0] == "Days on URE"
&& (node_vec[1] == "Just Listed"
|| node_vec[1].to_string().parse::<usize>().unwrap() >= 20)
{
dl.interested = true;
}
if node_vec[0] == "Status" && node_vec[1] == "Active" {
dl.active = true;
}
}
if dl.is_desired() {
dl.mls = String::from(key);
desired_listings.push(dl);
}
}
desired_listings
}
fn remove_duplicates(listings: &mut UreData) {
let mut dup_idx: Vec<usize> = vec![];
let mut existing = get_checked_listings();
for (idx, listing) in listings.markers.iter().enumerate() {
if existing.contains(&listing.id) {
dup_idx.push(idx);
}
}
if dup_idx.len() > 0 {
for i in dup_idx.into_iter().rev() {
listings.markers.remove(i);
}
}
if listings.markers.len() > 0 {
for listing in listings.markers.iter() {
existing.push(listing.id.clone());
}
write_checked_listings(&existing).unwrap();
} else {
info!("no new listings");
}
}
fn build_listing_message(listings: &Vec<DesiredListing>) -> String {
let mut message_str = String::from("");
for listing in listings {
message_str.push_str(&format!(
"https://www.utahrealestate.com/{}\n\n",
listing.mls
));
}
message_str
}
async fn send_messages(client: &request::Client, message: &str) -> SureResult<()> {
let credentials = get_twilio_credentials();
let mut raw_futures = vec![];
for number in credentials.alert_numbers.iter() {
raw_futures.push(send_message(&client, &message, number))
}
let unpin_futures: Vec<_> = raw_futures.into_iter().map(Box::pin).collect();
let mut mut_futures = unpin_futures;
while!mut_futures.is_empty() {
match future::select_all(mut_futures).await {
(Ok(_res), _index, remaining) => mut_futures = remaining,
(Err(_e), _index, remaining) => mut_futures = remaining,
}
}
Ok(())
}
async fn get_listing(
client: &request::Client,
id: &str,
index: usize,
) -> SureResult<(String, usize, Html, usize)> {
let url = format!("https://www.utahrealestate.com/{}", id);
let res = client
.get(&url)
.header(USER_AGENT, SURE_USER_AGENT)
.send()
.await?;
let body = res.text().await?;
let document = Html::parse_document(&body);
Ok((String::from(id), index, document, body.len()))
}
async fn send_message(client: &request::Client, message: &str, to: &str) -> SureResult<()> {
let credentials = get_twilio_credentials();
let message_url = format!(
"{}/Accounts/{}/Messages.json",
TWILIO_BASE_URL, credentials.sid
);
let mut headers = HeaderMap::new();
headers.insert(
AUTHORIZATION,
format!("Basic {}", credentials.basic_auth())
.parse()
.unwrap(),
);
let params = [
("From", &credentials.number),
("Body", &message.to_string()),
("To", &to.to_string()),
];
let res = client
.post(&message_url)
.headers(headers)
.form(¶ms)
.send()
.await?;
if res.status() == 201 {
info!("message sent");
} else {
error!(
"error sending message: {:?}\n\tβββ{}\n\tβββ{:?}",
res.status(),
res.text().await?,
params
)
}
Ok(())
}
///
/// Utility Functions
///
fn get_checked_listings() -> Vec<String> {
let mut checked_mls: Vec<String> = vec![];
if let Ok(lines) = read_lines(&get_sure_filepath("listings.txt")) {
for line in lines {
if let Ok(l) = line {
checked_mls.push(String::from(l.trim()))
}
}
}
checked_mls
}
fn write_checked_listings(checked: &Vec<String>) -> SureResult<()> {
let mut contents = String::from("");
let mut file = OpenOptions::new()
.write(true)
.create(true)
.open(&get_sure_filepath("listings.txt"))?;
file.set_len(0)?;
file.seek(SeekFrom::Start(0))?;
let mut file = LineWriter::new(file);
let mut sorted = checked
.iter()
.map(|v| v.parse::<usize>().unwrap())
.collect::<Vec<usize>>();
sorted.sort();
for c in sorted {
contents.push_str(&format!("{}\n", c));
}
file.write_all(contents.as_bytes())?;
Ok(())
}
fn get_ure_search_params() -> String {
let mut param_encoded = String::from("");
if let Ok(lines) = read_lines(&get_sure_filepath("queries.env")) {
for line in lines {
if let Ok(l) = line {
param_encoded.push_str(&format!("{}&", l));
}
}
}
String::from(param_encoded)
}
fn get_twilio_credentials() -> TwilioAuth {
let mut auth = TwilioAuth::new();
if let Ok(lines) = read_lines(&get_sure_filepath("twilio.env")) {
for line in lines {
if let Ok(i) = line {
let config_item: Vec<&str> = i.split('=').collect();
if config_item[0] == "AccountSID" {
auth.sid = String::from(config_item[1]);
}
if config_item[0] == "AuthToken" {
auth.auth_token = String::from(config_item[1]);
}
if config_item[0] == "TwilioNumber" {
auth.number = String::from(config_item[1]);
}
if config_item[0] == "AlertNumbers" {
let numbers: Vec<String> = config_item[1]
.split(",")
.into_iter()
.map(String::from)
.collect();
auth.alert_numbers = numbers;
}
}
}
}
auth
}
fn read_lines(filename: &str) -> io::Result<io::Lines<io::BufReader<File>>> {
let file = File::open(filename)?;
Ok(io::BufReader::new(file).lines())
}
fn get_sure_filepath(filename: &str) -> String {
let mut home_path = home_dir().unwrap();
home_path.push(".sure");
home_path.push(filename);
String::from(home_path.to_str().unwrap())
}
///
///
/// Definitions and Implementations
///
///
///
/// DesiredListing
///
#[derive(Debug)]
struct DesiredListing {
active: bool,
interested: bool,
mls: String,
}
impl DesiredListing {
fn new() -> DesiredListing {
Default::default()
}
fn is_desired(&self) -> bool {
self.active && self.interested
}
}
impl Default for DesiredListing {
fn default() -> Self {
DesiredListing {
active: false,
interested: false,
mls: String::from(""),
}
}
}
///
/// Twilio
///
pub struct TwilioAuth {
sid: String,
auth_token: String,
number: String,
alert_numbers: Vec<String>,
}
impl TwilioAuth {
fn new() -> TwilioAuth {
Default::default()
}
fn basic_auth(&self) -> String {
encode(format!("{}:{}", &self.sid, &self.auth_token).as_bytes())
}
}
impl Default for TwilioAuth {
fn default() -> Self {
TwilioAuth {
sid: String::from(""),
auth_token: String::from(""),
number: String::from(""),
alert_numbers: vec![],
}
}
}
#[derive(Debug, Serialize, Deserialize)]
struct TwilioResponse {
error_code: String,
status: String,
}
///
/// SureResult and SureError
///
type SureResult<T> = Result<T, SureError>;
#[derive(Debug)]
enum SureError {
IoError(std::io::Error),
ReqwestError(request::Error),
StdError(Box<dyn std::error::Error>),
JsonError(serde_json::Error),
}
impl From<std::io::Error> for SureError {
fn from(error: std::io::Error) -> Self {
SureError::IoError(error)
}
}
impl From<reqwest::Error> for SureError {
fn from(error: reqwest::Error) -> Self {
SureError::ReqwestError(error)
}
}
impl From<Box<dyn std::error::Error>> for SureError {
fn from(error: Box<dyn std::error::Error>) -> Self {
SureError::StdError(error)
}
}
impl From<serde_json::Error> for SureError {
fn from(error: serde_json::Error) -> Self {
SureError::JsonError(error)
}
}
///
/// UreData
/// βββ Vec<Marker>
///
#[derive(Debug, Serialize, Deserialize)]
struct UreData {
markers: Vec<Marker>,
}
#[derive(Debug, Serialize, Deserialize)]
struct Marker {
price: String,
id: String,
} | (Ok((id, _idx, document, content_length)), _index, remaining) => {
current += 1.0;
let percentage = (((current / total as f32) * 100.0) / 2.0) as usize; | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.