file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
list_view.rs
use std::{ cmp::Ordering, io::{stdout, Write}, }; use crossterm::{ cursor::MoveTo, queue, style::{Color, SetBackgroundColor}, terminal::{Clear, ClearType}, }; use crate::{ compute_scrollbar, errors::Result, gray, Alignment, Area, CompoundStyle, MadSkin, Spacing, }; pub struct ListViewCell<'t> { con: String, style: &'t CompoundStyle, width: usize, // length of content in chars } pub struct Title { columns: Vec<usize>, // the column(s) below this title } pub struct ListViewColumn<'t, T> { title: String, min_width: usize, max_width: usize, spacing: Spacing, extract: Box<dyn Fn(&T) -> ListViewCell<'t>>, // a function building cells from the rows } struct Row<T> { data: T, displayed: bool, } /// A filterable list whose columns can be automatically resized. /// /// /// Notes: /// * another version will allow more than one style per cell /// (i.e. make the cells composites rather than compounds). Shout /// out if you need that now. /// * this version doesn't allow cell wrapping #[allow(clippy::type_complexity)] pub struct ListView<'t, T> { titles: Vec<Title>, columns: Vec<ListViewColumn<'t, T>>, rows: Vec<Row<T>>, pub area: Area, scroll: usize, pub skin: &'t MadSkin, filter: Option<Box<dyn Fn(&T) -> bool>>, // a function determining if the row must be displayed displayed_rows_count: usize, row_order: Option<Box<dyn Fn(&T, &T) -> Ordering>>, selection: Option<usize>, // index of the selected line selection_background: Color, } impl<'t> ListViewCell<'t> { pub fn new(con: String, style: &'t CompoundStyle) -> Self { let width = con.chars().count(); Self { con, style, width } } } impl<'t, T> ListViewColumn<'t, T> { pub fn new( title: &str, min_width: usize, max_width: usize, extract: Box<dyn Fn(&T) -> ListViewCell<'t>>, ) -> Self { Self { title: title.to_owned(), min_width, max_width, spacing: Spacing { width: min_width, align: Alignment::Center, }, extract, } } pub const fn with_align(mut self, align: Alignment) -> Self { self.spacing.align = align; self } } impl<'t, T> ListView<'t, T> { /// Create a new list view with the passed columns. /// /// The columns can't be changed afterwards but the area can be modified. /// When two columns have the same title, those titles are merged (but /// the columns below stay separated). pub fn new(area: Area, columns: Vec<ListViewColumn<'t, T>>, skin: &'t MadSkin) -> Self { let mut titles: Vec<Title> = Vec::new(); for (column_idx, column) in columns.iter().enumerate() { if let Some(last_title) = titles.last_mut() { if columns[last_title.columns[0]].title == column.title { // we merge those columns titles last_title.columns.push(column_idx); continue; } } // this is a new title titles.push(Title { columns: vec![column_idx], }); } Self { titles, columns, rows: Vec::new(), area, scroll: 0, skin, filter: None, displayed_rows_count: 0, row_order: None, selection: None, selection_background: gray(5), } } /// set a comparator for row sorting #[allow(clippy::type_complexity)] pub fn sort(&mut self, sort: Box<dyn Fn(&T, &T) -> Ordering>) { self.row_order = Some(sort); } /// return the height which is available for rows #[inline(always)] pub const fn tbody_height(&self) -> u16 { if self.area.height > 2
else { self.area.height } } /// return an option which when filled contains /// a tupple with the top and bottom of the vertical /// scrollbar. Return none when the content fits /// the available space. #[inline(always)] pub fn scrollbar(&self) -> Option<(u16, u16)> { compute_scrollbar( self.scroll as u16, self.displayed_rows_count as u16, self.tbody_height(), self.area.top, ) } pub fn add_row(&mut self, data: T) { let stick_to_bottom = self.row_order.is_none() && self.do_scroll_show_bottom(); let displayed = match &self.filter { Some(fun) => fun(&data), None => true, }; if displayed { self.displayed_rows_count += 1; } if stick_to_bottom { self.scroll_to_bottom(); } self.rows.push(Row { data, displayed }); if let Some(row_order) = &self.row_order { self.rows.sort_by(|a, b| row_order(&a.data, &b.data)); } } /// remove all rows (and selection). /// /// Keep the columns and the sort function, if any. pub fn clear_rows(&mut self) { self.rows.clear(); self.scroll = 0; self.displayed_rows_count = 0; self.selection = None; } /// return both the number of displayed rows and the total number pub fn row_counts(&self) -> (usize, usize) { (self.displayed_rows_count, self.rows.len()) } /// recompute the widths of all columns. /// This should be called when the area size is modified pub fn update_dimensions(&mut self) { let available_width: i32 = i32::from(self.area.width) - (self.columns.len() as i32 - 1) // we remove the separator - 1; // we remove 1 to let space for the scrollbar let sum_min_widths: i32 = self.columns.iter().map(|c| c.min_width as i32).sum(); if sum_min_widths >= available_width { for i in 0..self.columns.len() { self.columns[i].spacing.width = self.columns[i].min_width; } } else { let mut excess = available_width - sum_min_widths; for i in 0..self.columns.len() { let d = ((self.columns[i].max_width - self.columns[i].min_width) as i32).min(excess); excess -= d; self.columns[i].spacing.width = self.columns[i].min_width + d as usize; } // there might be some excess, but it's better to have some space at right rather // than a too wide table } } pub fn set_filter(&mut self, filter: Box<dyn Fn(&T) -> bool>) { let mut count = 0; for row in self.rows.iter_mut() { row.displayed = filter(&row.data); if row.displayed { count += 1; } } self.scroll = 0; // something better should be done... later self.displayed_rows_count = count; self.filter = Some(filter); } pub fn remove_filter(&mut self) { for row in self.rows.iter_mut() { row.displayed = true; } self.displayed_rows_count = self.rows.len(); self.filter = None; } /// write the list view on the given writer pub fn write_on<W>(&self, w: &mut W) -> Result<()> where W: std::io::Write, { let sx = self.area.left + self.area.width; let vbar = self.skin.table.compound_style.style_char('│'); let tee = self.skin.table.compound_style.style_char('┬'); let cross = self.skin.table.compound_style.style_char('┼'); let hbar = self.skin.table.compound_style.style_char('─'); // title line queue!(w, MoveTo(self.area.left, self.area.top))?; for (title_idx, title) in self.titles.iter().enumerate() { if title_idx!= 0 { vbar.queue(w)?; } let width = title .columns .iter() .map(|ci| self.columns[*ci].spacing.width) .sum::<usize>() + title.columns.len() - 1; let spacing = Spacing { width, align: Alignment::Center, }; spacing.write_str( w, &self.columns[title.columns[0]].title, &self.skin.headers[0].compound_style, )?; } // separator line queue!(w, MoveTo(self.area.left, self.area.top + 1))?; for (title_idx, title) in self.titles.iter().enumerate() { if title_idx!= 0 { cross.queue(w)?; } for (col_idx_idx, col_idx) in title.columns.iter().enumerate() { if col_idx_idx > 0 { tee.queue(w)?; } for _ in 0..self.columns[*col_idx].spacing.width { hbar.queue(w)?; } } } // rows, maybe scrolled let mut row_idx = self.scroll; let scrollbar = self.scrollbar(); for y in 2..self.area.height { queue!(w, MoveTo(self.area.left, self.area.top + y))?; loop { if row_idx == self.rows.len() { queue!(w, Clear(ClearType::UntilNewLine))?; break; } if self.rows[row_idx].displayed { let selected = Some(row_idx) == self.selection; for (col_idx, col) in self.columns.iter().enumerate() { if col_idx!= 0 { if selected { queue!(w, SetBackgroundColor(self.selection_background))?; } vbar.queue(w)?; } let cell = (col.extract)(&self.rows[row_idx].data); if selected { let mut style = cell.style.clone(); style.set_bg(self.selection_background); col.spacing .write_counted_str(w, &cell.con, cell.width, &style)?; } else { col.spacing .write_counted_str(w, &cell.con, cell.width, cell.style)?; } } row_idx += 1; break; } row_idx += 1; } if let Some((sctop, scbottom)) = scrollbar { queue!(w, MoveTo(sx, self.area.top + y))?; let y = y - 2; if sctop <= y && y <= scbottom { self.skin.scrollbar.thumb.queue(w)?; } else { self.skin.scrollbar.track.queue(w)?; } } } Ok(()) } /// display the whole list in its area pub fn write(&self) -> Result<()> { let mut stdout = stdout(); self.write_on(&mut stdout)?; stdout.flush()?; Ok(()) } /// return true if the last line of the list is visible pub const fn do_scroll_show_bottom(&self) -> bool { self.scroll + self.tbody_height() as usize >= self.displayed_rows_count } /// ensure the last line is visible pub fn scroll_to_bottom(&mut self) { let body_height = self.tbody_height() as usize; self.scroll = if self.displayed_rows_count > body_height { self.displayed_rows_count - body_height } else { 0 } } /// set the scroll amount. /// lines_count can be negative pub fn try_scroll_lines(&mut self, lines_count: i32) { if lines_count < 0 { let lines_count = -lines_count as usize; self.scroll = if lines_count >= self.scroll { 0 } else { self.scroll - lines_count }; } else { self.scroll = (self.scroll + lines_count as usize) .min(self.displayed_rows_count - self.tbody_height() as usize + 1); } self.make_selection_visible(); } /// set the scroll amount. /// pages_count can be negative pub fn try_scroll_pages(&mut self, pages_count: i32) { self.try_scroll_lines(pages_count * self.tbody_height() as i32) } /// try to select the next visible line pub fn try_select_next(&mut self, up: bool) { if self.displayed_rows_count == 0 { return; } if self.displayed_rows_count == 1 || self.selection.is_none() { for i in 0..self.rows.len() { let i = (i + self.scroll) % self.rows.len(); if self.rows[i].displayed { self.selection = Some(i); self.make_selection_visible(); return; } } } for i in 0..self.rows.len() { let delta_idx = if up { self.rows.len() - 1 - i } else { i + 1 }; let row_idx = (delta_idx + self.selection.unwrap()) % self.rows.len(); if self.rows[row_idx].displayed { self.selection = Some(row_idx); self.make_selection_visible(); return; } } } /// select the first visible line (unless there's nothing). pub fn select_first_line(&mut self) { for i in 0..self.rows.len() { if self.rows[i].displayed { self.selection = Some(i); self.make_selection_visible(); return; } } self.selection = None; } /// select the last visible line (unless there's nothing). pub fn select_last_line(&mut self) { for i in (0..self.rows.len()).rev() { if self.rows[i].displayed { self.selection = Some(i); self.make_selection_visible(); return; } } self.selection = None; } /// scroll to ensure the selected line (if any) is visible. /// /// This is automatically called by try_scroll /// and try select functions pub fn make_selection_visible(&mut self) { let tbody_height = self.tbody_height() as usize; if self.displayed_rows_count <= tbody_height { return; // there's no scroll } if let Some(sel) = self.selection { if sel <= self.scroll { self.scroll = if sel > 2 { sel - 2 } else { 0 }; } else if sel + 1 >= self.scroll + tbody_height { self.scroll = sel - tbody_height + 2; } } } pub fn get_selection(&self) -> Option<&T> { self.selection.map(|sel| &self.rows[sel].data) } pub const fn has_selection(&self) -> bool { self.selection.is_some() } pub fn unselect(&mut self) { self.selection = None; } }
{ self.area.height - 2 }
conditional_block
list_view.rs
use std::{ cmp::Ordering, io::{stdout, Write}, }; use crossterm::{ cursor::MoveTo, queue, style::{Color, SetBackgroundColor}, terminal::{Clear, ClearType}, }; use crate::{ compute_scrollbar, errors::Result, gray, Alignment, Area, CompoundStyle, MadSkin, Spacing, }; pub struct ListViewCell<'t> { con: String, style: &'t CompoundStyle, width: usize, // length of content in chars } pub struct Title { columns: Vec<usize>, // the column(s) below this title } pub struct ListViewColumn<'t, T> { title: String, min_width: usize, max_width: usize, spacing: Spacing, extract: Box<dyn Fn(&T) -> ListViewCell<'t>>, // a function building cells from the rows } struct Row<T> { data: T, displayed: bool, } /// A filterable list whose columns can be automatically resized. /// /// /// Notes: /// * another version will allow more than one style per cell /// (i.e. make the cells composites rather than compounds). Shout /// out if you need that now. /// * this version doesn't allow cell wrapping #[allow(clippy::type_complexity)] pub struct ListView<'t, T> { titles: Vec<Title>, columns: Vec<ListViewColumn<'t, T>>, rows: Vec<Row<T>>, pub area: Area, scroll: usize, pub skin: &'t MadSkin, filter: Option<Box<dyn Fn(&T) -> bool>>, // a function determining if the row must be displayed displayed_rows_count: usize, row_order: Option<Box<dyn Fn(&T, &T) -> Ordering>>, selection: Option<usize>, // index of the selected line selection_background: Color, } impl<'t> ListViewCell<'t> { pub fn new(con: String, style: &'t CompoundStyle) -> Self { let width = con.chars().count(); Self { con, style, width } } } impl<'t, T> ListViewColumn<'t, T> { pub fn new( title: &str, min_width: usize, max_width: usize, extract: Box<dyn Fn(&T) -> ListViewCell<'t>>, ) -> Self { Self { title: title.to_owned(), min_width, max_width, spacing: Spacing { width: min_width, align: Alignment::Center, }, extract, } } pub const fn with_align(mut self, align: Alignment) -> Self { self.spacing.align = align; self } } impl<'t, T> ListView<'t, T> { /// Create a new list view with the passed columns. /// /// The columns can't be changed afterwards but the area can be modified. /// When two columns have the same title, those titles are merged (but /// the columns below stay separated). pub fn new(area: Area, columns: Vec<ListViewColumn<'t, T>>, skin: &'t MadSkin) -> Self { let mut titles: Vec<Title> = Vec::new(); for (column_idx, column) in columns.iter().enumerate() { if let Some(last_title) = titles.last_mut() { if columns[last_title.columns[0]].title == column.title { // we merge those columns titles last_title.columns.push(column_idx); continue; } } // this is a new title titles.push(Title { columns: vec![column_idx], }); } Self { titles, columns, rows: Vec::new(), area, scroll: 0, skin, filter: None, displayed_rows_count: 0, row_order: None, selection: None, selection_background: gray(5), } }
self.row_order = Some(sort); } /// return the height which is available for rows #[inline(always)] pub const fn tbody_height(&self) -> u16 { if self.area.height > 2 { self.area.height - 2 } else { self.area.height } } /// return an option which when filled contains /// a tupple with the top and bottom of the vertical /// scrollbar. Return none when the content fits /// the available space. #[inline(always)] pub fn scrollbar(&self) -> Option<(u16, u16)> { compute_scrollbar( self.scroll as u16, self.displayed_rows_count as u16, self.tbody_height(), self.area.top, ) } pub fn add_row(&mut self, data: T) { let stick_to_bottom = self.row_order.is_none() && self.do_scroll_show_bottom(); let displayed = match &self.filter { Some(fun) => fun(&data), None => true, }; if displayed { self.displayed_rows_count += 1; } if stick_to_bottom { self.scroll_to_bottom(); } self.rows.push(Row { data, displayed }); if let Some(row_order) = &self.row_order { self.rows.sort_by(|a, b| row_order(&a.data, &b.data)); } } /// remove all rows (and selection). /// /// Keep the columns and the sort function, if any. pub fn clear_rows(&mut self) { self.rows.clear(); self.scroll = 0; self.displayed_rows_count = 0; self.selection = None; } /// return both the number of displayed rows and the total number pub fn row_counts(&self) -> (usize, usize) { (self.displayed_rows_count, self.rows.len()) } /// recompute the widths of all columns. /// This should be called when the area size is modified pub fn update_dimensions(&mut self) { let available_width: i32 = i32::from(self.area.width) - (self.columns.len() as i32 - 1) // we remove the separator - 1; // we remove 1 to let space for the scrollbar let sum_min_widths: i32 = self.columns.iter().map(|c| c.min_width as i32).sum(); if sum_min_widths >= available_width { for i in 0..self.columns.len() { self.columns[i].spacing.width = self.columns[i].min_width; } } else { let mut excess = available_width - sum_min_widths; for i in 0..self.columns.len() { let d = ((self.columns[i].max_width - self.columns[i].min_width) as i32).min(excess); excess -= d; self.columns[i].spacing.width = self.columns[i].min_width + d as usize; } // there might be some excess, but it's better to have some space at right rather // than a too wide table } } pub fn set_filter(&mut self, filter: Box<dyn Fn(&T) -> bool>) { let mut count = 0; for row in self.rows.iter_mut() { row.displayed = filter(&row.data); if row.displayed { count += 1; } } self.scroll = 0; // something better should be done... later self.displayed_rows_count = count; self.filter = Some(filter); } pub fn remove_filter(&mut self) { for row in self.rows.iter_mut() { row.displayed = true; } self.displayed_rows_count = self.rows.len(); self.filter = None; } /// write the list view on the given writer pub fn write_on<W>(&self, w: &mut W) -> Result<()> where W: std::io::Write, { let sx = self.area.left + self.area.width; let vbar = self.skin.table.compound_style.style_char('│'); let tee = self.skin.table.compound_style.style_char('┬'); let cross = self.skin.table.compound_style.style_char('┼'); let hbar = self.skin.table.compound_style.style_char('─'); // title line queue!(w, MoveTo(self.area.left, self.area.top))?; for (title_idx, title) in self.titles.iter().enumerate() { if title_idx!= 0 { vbar.queue(w)?; } let width = title .columns .iter() .map(|ci| self.columns[*ci].spacing.width) .sum::<usize>() + title.columns.len() - 1; let spacing = Spacing { width, align: Alignment::Center, }; spacing.write_str( w, &self.columns[title.columns[0]].title, &self.skin.headers[0].compound_style, )?; } // separator line queue!(w, MoveTo(self.area.left, self.area.top + 1))?; for (title_idx, title) in self.titles.iter().enumerate() { if title_idx!= 0 { cross.queue(w)?; } for (col_idx_idx, col_idx) in title.columns.iter().enumerate() { if col_idx_idx > 0 { tee.queue(w)?; } for _ in 0..self.columns[*col_idx].spacing.width { hbar.queue(w)?; } } } // rows, maybe scrolled let mut row_idx = self.scroll; let scrollbar = self.scrollbar(); for y in 2..self.area.height { queue!(w, MoveTo(self.area.left, self.area.top + y))?; loop { if row_idx == self.rows.len() { queue!(w, Clear(ClearType::UntilNewLine))?; break; } if self.rows[row_idx].displayed { let selected = Some(row_idx) == self.selection; for (col_idx, col) in self.columns.iter().enumerate() { if col_idx!= 0 { if selected { queue!(w, SetBackgroundColor(self.selection_background))?; } vbar.queue(w)?; } let cell = (col.extract)(&self.rows[row_idx].data); if selected { let mut style = cell.style.clone(); style.set_bg(self.selection_background); col.spacing .write_counted_str(w, &cell.con, cell.width, &style)?; } else { col.spacing .write_counted_str(w, &cell.con, cell.width, cell.style)?; } } row_idx += 1; break; } row_idx += 1; } if let Some((sctop, scbottom)) = scrollbar { queue!(w, MoveTo(sx, self.area.top + y))?; let y = y - 2; if sctop <= y && y <= scbottom { self.skin.scrollbar.thumb.queue(w)?; } else { self.skin.scrollbar.track.queue(w)?; } } } Ok(()) } /// display the whole list in its area pub fn write(&self) -> Result<()> { let mut stdout = stdout(); self.write_on(&mut stdout)?; stdout.flush()?; Ok(()) } /// return true if the last line of the list is visible pub const fn do_scroll_show_bottom(&self) -> bool { self.scroll + self.tbody_height() as usize >= self.displayed_rows_count } /// ensure the last line is visible pub fn scroll_to_bottom(&mut self) { let body_height = self.tbody_height() as usize; self.scroll = if self.displayed_rows_count > body_height { self.displayed_rows_count - body_height } else { 0 } } /// set the scroll amount. /// lines_count can be negative pub fn try_scroll_lines(&mut self, lines_count: i32) { if lines_count < 0 { let lines_count = -lines_count as usize; self.scroll = if lines_count >= self.scroll { 0 } else { self.scroll - lines_count }; } else { self.scroll = (self.scroll + lines_count as usize) .min(self.displayed_rows_count - self.tbody_height() as usize + 1); } self.make_selection_visible(); } /// set the scroll amount. /// pages_count can be negative pub fn try_scroll_pages(&mut self, pages_count: i32) { self.try_scroll_lines(pages_count * self.tbody_height() as i32) } /// try to select the next visible line pub fn try_select_next(&mut self, up: bool) { if self.displayed_rows_count == 0 { return; } if self.displayed_rows_count == 1 || self.selection.is_none() { for i in 0..self.rows.len() { let i = (i + self.scroll) % self.rows.len(); if self.rows[i].displayed { self.selection = Some(i); self.make_selection_visible(); return; } } } for i in 0..self.rows.len() { let delta_idx = if up { self.rows.len() - 1 - i } else { i + 1 }; let row_idx = (delta_idx + self.selection.unwrap()) % self.rows.len(); if self.rows[row_idx].displayed { self.selection = Some(row_idx); self.make_selection_visible(); return; } } } /// select the first visible line (unless there's nothing). pub fn select_first_line(&mut self) { for i in 0..self.rows.len() { if self.rows[i].displayed { self.selection = Some(i); self.make_selection_visible(); return; } } self.selection = None; } /// select the last visible line (unless there's nothing). pub fn select_last_line(&mut self) { for i in (0..self.rows.len()).rev() { if self.rows[i].displayed { self.selection = Some(i); self.make_selection_visible(); return; } } self.selection = None; } /// scroll to ensure the selected line (if any) is visible. /// /// This is automatically called by try_scroll /// and try select functions pub fn make_selection_visible(&mut self) { let tbody_height = self.tbody_height() as usize; if self.displayed_rows_count <= tbody_height { return; // there's no scroll } if let Some(sel) = self.selection { if sel <= self.scroll { self.scroll = if sel > 2 { sel - 2 } else { 0 }; } else if sel + 1 >= self.scroll + tbody_height { self.scroll = sel - tbody_height + 2; } } } pub fn get_selection(&self) -> Option<&T> { self.selection.map(|sel| &self.rows[sel].data) } pub const fn has_selection(&self) -> bool { self.selection.is_some() } pub fn unselect(&mut self) { self.selection = None; } }
/// set a comparator for row sorting #[allow(clippy::type_complexity)] pub fn sort(&mut self, sort: Box<dyn Fn(&T, &T) -> Ordering>) {
random_line_split
list_view.rs
use std::{ cmp::Ordering, io::{stdout, Write}, }; use crossterm::{ cursor::MoveTo, queue, style::{Color, SetBackgroundColor}, terminal::{Clear, ClearType}, }; use crate::{ compute_scrollbar, errors::Result, gray, Alignment, Area, CompoundStyle, MadSkin, Spacing, }; pub struct ListViewCell<'t> { con: String, style: &'t CompoundStyle, width: usize, // length of content in chars } pub struct Title { columns: Vec<usize>, // the column(s) below this title } pub struct ListViewColumn<'t, T> { title: String, min_width: usize, max_width: usize, spacing: Spacing, extract: Box<dyn Fn(&T) -> ListViewCell<'t>>, // a function building cells from the rows } struct Row<T> { data: T, displayed: bool, } /// A filterable list whose columns can be automatically resized. /// /// /// Notes: /// * another version will allow more than one style per cell /// (i.e. make the cells composites rather than compounds). Shout /// out if you need that now. /// * this version doesn't allow cell wrapping #[allow(clippy::type_complexity)] pub struct ListView<'t, T> { titles: Vec<Title>, columns: Vec<ListViewColumn<'t, T>>, rows: Vec<Row<T>>, pub area: Area, scroll: usize, pub skin: &'t MadSkin, filter: Option<Box<dyn Fn(&T) -> bool>>, // a function determining if the row must be displayed displayed_rows_count: usize, row_order: Option<Box<dyn Fn(&T, &T) -> Ordering>>, selection: Option<usize>, // index of the selected line selection_background: Color, } impl<'t> ListViewCell<'t> { pub fn new(con: String, style: &'t CompoundStyle) -> Self { let width = con.chars().count(); Self { con, style, width } } } impl<'t, T> ListViewColumn<'t, T> { pub fn new( title: &str, min_width: usize, max_width: usize, extract: Box<dyn Fn(&T) -> ListViewCell<'t>>, ) -> Self { Self { title: title.to_owned(), min_width, max_width, spacing: Spacing { width: min_width, align: Alignment::Center, }, extract, } } pub const fn with_align(mut self, align: Alignment) -> Self { self.spacing.align = align; self } } impl<'t, T> ListView<'t, T> { /// Create a new list view with the passed columns. /// /// The columns can't be changed afterwards but the area can be modified. /// When two columns have the same title, those titles are merged (but /// the columns below stay separated). pub fn new(area: Area, columns: Vec<ListViewColumn<'t, T>>, skin: &'t MadSkin) -> Self { let mut titles: Vec<Title> = Vec::new(); for (column_idx, column) in columns.iter().enumerate() { if let Some(last_title) = titles.last_mut() { if columns[last_title.columns[0]].title == column.title { // we merge those columns titles last_title.columns.push(column_idx); continue; } } // this is a new title titles.push(Title { columns: vec![column_idx], }); } Self { titles, columns, rows: Vec::new(), area, scroll: 0, skin, filter: None, displayed_rows_count: 0, row_order: None, selection: None, selection_background: gray(5), } } /// set a comparator for row sorting #[allow(clippy::type_complexity)] pub fn sort(&mut self, sort: Box<dyn Fn(&T, &T) -> Ordering>) { self.row_order = Some(sort); } /// return the height which is available for rows #[inline(always)] pub const fn tbody_height(&self) -> u16 { if self.area.height > 2 { self.area.height - 2 } else { self.area.height } } /// return an option which when filled contains /// a tupple with the top and bottom of the vertical /// scrollbar. Return none when the content fits /// the available space. #[inline(always)] pub fn scrollbar(&self) -> Option<(u16, u16)> { compute_scrollbar( self.scroll as u16, self.displayed_rows_count as u16, self.tbody_height(), self.area.top, ) } pub fn
(&mut self, data: T) { let stick_to_bottom = self.row_order.is_none() && self.do_scroll_show_bottom(); let displayed = match &self.filter { Some(fun) => fun(&data), None => true, }; if displayed { self.displayed_rows_count += 1; } if stick_to_bottom { self.scroll_to_bottom(); } self.rows.push(Row { data, displayed }); if let Some(row_order) = &self.row_order { self.rows.sort_by(|a, b| row_order(&a.data, &b.data)); } } /// remove all rows (and selection). /// /// Keep the columns and the sort function, if any. pub fn clear_rows(&mut self) { self.rows.clear(); self.scroll = 0; self.displayed_rows_count = 0; self.selection = None; } /// return both the number of displayed rows and the total number pub fn row_counts(&self) -> (usize, usize) { (self.displayed_rows_count, self.rows.len()) } /// recompute the widths of all columns. /// This should be called when the area size is modified pub fn update_dimensions(&mut self) { let available_width: i32 = i32::from(self.area.width) - (self.columns.len() as i32 - 1) // we remove the separator - 1; // we remove 1 to let space for the scrollbar let sum_min_widths: i32 = self.columns.iter().map(|c| c.min_width as i32).sum(); if sum_min_widths >= available_width { for i in 0..self.columns.len() { self.columns[i].spacing.width = self.columns[i].min_width; } } else { let mut excess = available_width - sum_min_widths; for i in 0..self.columns.len() { let d = ((self.columns[i].max_width - self.columns[i].min_width) as i32).min(excess); excess -= d; self.columns[i].spacing.width = self.columns[i].min_width + d as usize; } // there might be some excess, but it's better to have some space at right rather // than a too wide table } } pub fn set_filter(&mut self, filter: Box<dyn Fn(&T) -> bool>) { let mut count = 0; for row in self.rows.iter_mut() { row.displayed = filter(&row.data); if row.displayed { count += 1; } } self.scroll = 0; // something better should be done... later self.displayed_rows_count = count; self.filter = Some(filter); } pub fn remove_filter(&mut self) { for row in self.rows.iter_mut() { row.displayed = true; } self.displayed_rows_count = self.rows.len(); self.filter = None; } /// write the list view on the given writer pub fn write_on<W>(&self, w: &mut W) -> Result<()> where W: std::io::Write, { let sx = self.area.left + self.area.width; let vbar = self.skin.table.compound_style.style_char('│'); let tee = self.skin.table.compound_style.style_char('┬'); let cross = self.skin.table.compound_style.style_char('┼'); let hbar = self.skin.table.compound_style.style_char('─'); // title line queue!(w, MoveTo(self.area.left, self.area.top))?; for (title_idx, title) in self.titles.iter().enumerate() { if title_idx!= 0 { vbar.queue(w)?; } let width = title .columns .iter() .map(|ci| self.columns[*ci].spacing.width) .sum::<usize>() + title.columns.len() - 1; let spacing = Spacing { width, align: Alignment::Center, }; spacing.write_str( w, &self.columns[title.columns[0]].title, &self.skin.headers[0].compound_style, )?; } // separator line queue!(w, MoveTo(self.area.left, self.area.top + 1))?; for (title_idx, title) in self.titles.iter().enumerate() { if title_idx!= 0 { cross.queue(w)?; } for (col_idx_idx, col_idx) in title.columns.iter().enumerate() { if col_idx_idx > 0 { tee.queue(w)?; } for _ in 0..self.columns[*col_idx].spacing.width { hbar.queue(w)?; } } } // rows, maybe scrolled let mut row_idx = self.scroll; let scrollbar = self.scrollbar(); for y in 2..self.area.height { queue!(w, MoveTo(self.area.left, self.area.top + y))?; loop { if row_idx == self.rows.len() { queue!(w, Clear(ClearType::UntilNewLine))?; break; } if self.rows[row_idx].displayed { let selected = Some(row_idx) == self.selection; for (col_idx, col) in self.columns.iter().enumerate() { if col_idx!= 0 { if selected { queue!(w, SetBackgroundColor(self.selection_background))?; } vbar.queue(w)?; } let cell = (col.extract)(&self.rows[row_idx].data); if selected { let mut style = cell.style.clone(); style.set_bg(self.selection_background); col.spacing .write_counted_str(w, &cell.con, cell.width, &style)?; } else { col.spacing .write_counted_str(w, &cell.con, cell.width, cell.style)?; } } row_idx += 1; break; } row_idx += 1; } if let Some((sctop, scbottom)) = scrollbar { queue!(w, MoveTo(sx, self.area.top + y))?; let y = y - 2; if sctop <= y && y <= scbottom { self.skin.scrollbar.thumb.queue(w)?; } else { self.skin.scrollbar.track.queue(w)?; } } } Ok(()) } /// display the whole list in its area pub fn write(&self) -> Result<()> { let mut stdout = stdout(); self.write_on(&mut stdout)?; stdout.flush()?; Ok(()) } /// return true if the last line of the list is visible pub const fn do_scroll_show_bottom(&self) -> bool { self.scroll + self.tbody_height() as usize >= self.displayed_rows_count } /// ensure the last line is visible pub fn scroll_to_bottom(&mut self) { let body_height = self.tbody_height() as usize; self.scroll = if self.displayed_rows_count > body_height { self.displayed_rows_count - body_height } else { 0 } } /// set the scroll amount. /// lines_count can be negative pub fn try_scroll_lines(&mut self, lines_count: i32) { if lines_count < 0 { let lines_count = -lines_count as usize; self.scroll = if lines_count >= self.scroll { 0 } else { self.scroll - lines_count }; } else { self.scroll = (self.scroll + lines_count as usize) .min(self.displayed_rows_count - self.tbody_height() as usize + 1); } self.make_selection_visible(); } /// set the scroll amount. /// pages_count can be negative pub fn try_scroll_pages(&mut self, pages_count: i32) { self.try_scroll_lines(pages_count * self.tbody_height() as i32) } /// try to select the next visible line pub fn try_select_next(&mut self, up: bool) { if self.displayed_rows_count == 0 { return; } if self.displayed_rows_count == 1 || self.selection.is_none() { for i in 0..self.rows.len() { let i = (i + self.scroll) % self.rows.len(); if self.rows[i].displayed { self.selection = Some(i); self.make_selection_visible(); return; } } } for i in 0..self.rows.len() { let delta_idx = if up { self.rows.len() - 1 - i } else { i + 1 }; let row_idx = (delta_idx + self.selection.unwrap()) % self.rows.len(); if self.rows[row_idx].displayed { self.selection = Some(row_idx); self.make_selection_visible(); return; } } } /// select the first visible line (unless there's nothing). pub fn select_first_line(&mut self) { for i in 0..self.rows.len() { if self.rows[i].displayed { self.selection = Some(i); self.make_selection_visible(); return; } } self.selection = None; } /// select the last visible line (unless there's nothing). pub fn select_last_line(&mut self) { for i in (0..self.rows.len()).rev() { if self.rows[i].displayed { self.selection = Some(i); self.make_selection_visible(); return; } } self.selection = None; } /// scroll to ensure the selected line (if any) is visible. /// /// This is automatically called by try_scroll /// and try select functions pub fn make_selection_visible(&mut self) { let tbody_height = self.tbody_height() as usize; if self.displayed_rows_count <= tbody_height { return; // there's no scroll } if let Some(sel) = self.selection { if sel <= self.scroll { self.scroll = if sel > 2 { sel - 2 } else { 0 }; } else if sel + 1 >= self.scroll + tbody_height { self.scroll = sel - tbody_height + 2; } } } pub fn get_selection(&self) -> Option<&T> { self.selection.map(|sel| &self.rows[sel].data) } pub const fn has_selection(&self) -> bool { self.selection.is_some() } pub fn unselect(&mut self) { self.selection = None; } }
add_row
identifier_name
lib.rs
// `error_chain!` can recurse deeply #![recursion_limit = "1024"] // Import the macro. Don't forget to add `error-chain` in your // `Cargo.toml`! #[macro_use] extern crate error_chain; #[macro_use] extern crate log; extern crate clap; extern crate byteorder; extern crate ansi_term; extern crate pbr; pub mod network; use std::path::PathBuf; use std::io::{Read, Write}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use ansi_term::Colour::*; use std::cmp::Ordering; use pbr::{ProgressBar, Units}; pub mod errors { use std::io; use std::net; use std::path; error_chain! { // The type defined for this error. These are the conventional // and recommended names, but they can be arbitrarily chosen. // // It is also possible to leave this section out entirely, or // leave it empty, and these names will be used automatically. types { Error, ErrorKind, ResultExt, Result; } // Without the `Result` wrapper: // // types { // Error, ErrorKind, ResultExt; // } // Automatic conversions between this error chain and other // error chains. In this case, it will e.g. generate an // `ErrorKind` variant called `Another` which in turn contains // the `other_error::ErrorKind`, with conversions from // `other_error::Error`. // // Optionally, some attributes can be added to a variant. // // This section can be empty. links { } // Automatic conversions between this error chain and other // error types not defined by the `error_chain!`. These will be // wrapped in a new error with, in the first case, the // `ErrorKind::Fmt` variant. The description and cause will // forward to the description and cause of the original error. // // Optionally, some attributes can be added to a variant. // // This section can be empty. foreign_links { Io(io::Error) #[cfg(unix)]; } // Define additional `ErrorKind` variants. The syntax here is // the same as `quick_error!`, but the `from()` and `cause()` // syntax is not supported. errors { PathConversion { description("Failed coverting the path to a string") display("Failed converting path to string") } Serialization { description("Serialization failed") display("Failed serializing") } //I think cloning the pathbuf is ok for the slow path in case of error SendFile(remote_addr: net::SocketAddr){ description("Error while sending file") display("While sending to {}", remote_addr) } UnknownFile(index: u32) { description("The client requested an unknown file") display("The client requested an unknown file with id {}", index) } ServerConnection { description("While processing connection") display("A low level error occured while processing connection") } ClientConnection(ip: net::Ipv4Addr, port: u16) { description("Client failed to connect to server") display("While connecting to {}:{}", ip, port) } Enumeration { description("While enumerating interface") display("While enumerating interfaces") } Bind(ip: net::Ipv4Addr, port: u16) { description("While binding connection") display("While binding to {}:{}", ip, port) } IncompleteRead(actual: usize, expected: usize) { description("An error occured which caused a read to end before getting the expected data") display("A read didn't get the expected amount of data [Expected {}, Actual {}]", actual, expected) } Fetch { description("While reading message") display("While reading message") } InvalidTransport(t: String) { description("Transport not valid") display("Invalid transport: {}", t) } FileExists(p: ::std::path::PathBuf) { description("File already exists") display("Tried to write to existing file: {}", p.to_string_lossy()) } WriteContent { description("An error occured while writing content to disk") display("While writing content to disk") } ReadContent { description("An error occured while reading content from network") display("While reading content from network") } } } } use errors::*; trait Readn { fn readn(&mut self, buff: &mut Vec<u8>, n: usize) -> std::io::Result<usize>; } impl<T: Read> Readn for T { fn readn(&mut self, mut buff: &mut Vec<u8>, n: usize) -> std::io::Result<usize> { let mut sub = self.take(n as u64); return sub.read_to_end(&mut buff); } } trait Streamable<'a>{ fn read<T: Read + 'a>(stream: T) -> Result<Self> where Self: std::marker::Sized; fn write<T: Write + 'a>(&mut self, stream: &mut T) -> Result<usize>; } struct FileMessage<'a> { name_size: u32, name: String, size: u32, file: Box<Read + 'a>, } impl<'a> FileMessage<'a> { fn new<T: Read + 'a>(name: String, size: u32, stream: T) -> Self { return FileMessage { name_size: name.len() as u32, //@Expansion: 32 bits is a lot, but maybe in the far flung future. name: name, size: size, file: Box::new(stream) }; } } impl<'a> Streamable<'a> for FileMessage<'a> { fn read<T: Read + 'a>(mut stream: T) -> Result<Self> { //Get the length of the name let name_len = try!(stream.read_u32::<BigEndian>()); //Get the name from the stream let mut name_buff = Vec::with_capacity(name_len as usize); //@Expansion: Here we have the 32-bit again. let name_read = try!(stream.readn(&mut name_buff, name_len as usize)); if name_len!= name_read as u32 { bail!(ErrorKind::IncompleteRead(name_read, name_len as usize)); } let name = String::from_utf8(name_buff).unwrap(); //@Error: Make error //Get the length of the file contents let file_len = try!(stream.read_u32::<BigEndian>()); //@Expansion: u32. That's a direct limit on the size of files. //Currently we aren't aiming at //supporting large files, which makes //it ok. //We aren't getting the file contents because we don't want to store it all in memory return Ok(FileMessage { name_size: name_len, name: name, size: file_len, file: Box::new(stream), }); } fn write<T: Write + 'a>(&mut self, mut stream: &mut T) -> Result<usize>{ try!(stream.write_u32::<BigEndian>(self.name_size)); //@Error: Should this be handled differently? try!(stream.write_all(self.name.as_bytes())); try!(stream.write_u32::<BigEndian>(self.size)); try!(std::io::copy(&mut self.file, &mut stream)); return Ok(0); } } pub type Dict<'a> = Box<[&'a str]>; pub struct TransportPresenter<'a> { dictionary: Dict<'a>, dict_entries: u32, } impl<'a> TransportPresenter<'a> { pub fn new(dictionary: Dict<'a>, dict_entries: u32) -> Self { return TransportPresenter { dictionary: dictionary, dict_entries: dict_entries, }; } pub fn present(&self, t: &Transport) -> Result<String> { let parts = (t.max_state() as f64).log(self.dict_entries as f64).ceil() as u32; let mut part_representation: Vec<&str> = Vec::with_capacity(parts as usize); let mut remainder = t.state(); for _ in 0..parts { let part = remainder % self.dict_entries; remainder = remainder / self.dict_entries; part_representation.push(self.dictionary[part as usize]); } return Ok(part_representation.join(" ")); } pub fn present_inv(&self, s: String) -> Result<ClientTransport> { let mut res: u32 = 0; let mut part_count = 0; for word in s.split(" ") { if let Ok(val) = self.dictionary.binary_search_by(|p| { //Flip the search to allow for cmp between String and &str match word.cmp(p) { Ordering::Greater => Ordering::Less, Ordering::Less => Ordering::Greater, Ordering::Equal => Ordering::Equal, } }) { res += (val as u32) * (self.dict_entries.pow(part_count)); part_count += 1; } else { bail!(ErrorKind::InvalidTransport(word.to_owned())); } } return Ok(ClientTransport::new(res)); } } pub struct ServerTransport { state: u32, max_state: u32, } pub struct ClientTransport { state: u32, } pub trait Transport { fn state(&self) -> u32; fn max_state(&self) -> u32; } impl ServerTransport { fn new(state: u32, max_state: u32) -> Self { return ServerTransport { state: state, max_state: max_state, }; } } impl Transport for ServerTransport { fn state(&self) -> u32 { return self.state; } fn max_state(&self) -> u32 { return self.max_state; } } pub trait PartialTransport { fn state(&self) -> u32; } impl ClientTransport { fn new(state: u32) -> Self { return ClientTransport { state: state, }; } } impl PartialTransport for ClientTransport { fn state(&self) -> u32 { return self.state; } } impl <T: Transport> PartialTransport for T { fn state(&self) -> u32 { return Transport::state(self); } } pub trait Transportable { fn make_transport(&self) -> Result<ServerTransport>; fn from_transport<T: PartialTransport>(t: T) -> Result<Self> where Self: std::marker::Sized; } impl Transportable for std::net::Ipv4Addr { fn make_transport(&self) -> Result<ServerTransport> { return Ok(ServerTransport::new(u32::from(self.clone()), std::u32::MAX)); } fn from_transport<T: PartialTransport>(t: T) -> Result<Self> { return Ok(std::net::Ipv4Addr::from(t.state())); } } #[derive(Clone)] pub struct FileInfo{ path: PathBuf, len: u64, } impl FileInfo { fn new(path: PathBuf, len: u64) -> FileInfo { return FileInfo { path: path, len: len, } } pub fn from_path(path: PathBuf) -> Result<FileInfo> { let metadata = std::fs::metadata(&path)?; return Ok(FileInfo::new(path, metadata.len())) } pub fn open(&self) -> std::result::Result<std::fs::File, std::io::Error> { return std::fs::File::open(&self.path); } } //@Refactor: This is just private but should be refactored fn send_file<S: Write>(mut stream: &mut S, file: &FileInfo) -> Result<()> { let filename = match file.path.file_name() .and_then(|x| x.to_str()) .map(|x| x.to_owned()) { Some(x) => x, None => return Err(ErrorKind::PathConversion.into()), }; let mut message = FileMessage::new(filename, file.len as u32, try!(file.open())); message.write(&mut stream) .chain_err(|| ErrorKind::Serialization)?; return Ok(()); } pub struct FileRepository { files: std::collections::HashMap<u32, FileInfo>, pub interface: network::Interface, next_id: u32, } impl FileRepository { pub fn new(interface: network::Interface) -> Self { return FileRepository { files: std::collections::HashMap::new(), interface: interface, next_id: 0, }; } pub fn add_file(&mut self, file: FileInfo) -> Result<ServerTransport> { self.files.insert(self.next_id, file); return self.interface.addr.make_transport(); } fn
(&self, index: u32) -> Result<&FileInfo> { return self.files.get(&index) .ok_or_else(|| ErrorKind::UnknownFile(index).into()); } pub fn run(&self) -> Result<()> { //@Expansion: Maybe don't use fixed ports let listener = std::net::TcpListener::bind((self.interface.addr, 2222)) .chain_err(|| ErrorKind::Bind(self.interface.addr, 2222))?; for conn in listener.incoming() { let mut stream = conn .chain_err(|| ErrorKind::ServerConnection)?; //TODO: I should read some sort of info about which file to get here let file = self.get_file(0) .chain_err(|| ErrorKind::SendFile(stream.peer_addr().unwrap()))?; send_file(&mut stream, file) .chain_err(|| ErrorKind::SendFile(stream.peer_addr().unwrap()))?; } return Ok(()); } } pub struct FileClient { } impl FileClient{ pub fn new() -> Self { return FileClient { } } pub fn get_file<T: PartialTransport>(&self, transport: T, out_path: Option<std::path::PathBuf>) -> Result<()> { let ip = std::net::Ipv4Addr::from_transport(transport)?; println!("{} from ip {}", Green.paint("Downloading"), Yellow.paint(ip.to_string())); //@Expansion: We can't time out right now. Use the net2::TcpBuilder? //@Expansion: Maybe don't use fixed ports let stream = std::net::TcpStream::connect((ip, 2222)) .chain_err(|| ErrorKind::ClientConnection(ip, 2222))?; let mut message = FileMessage::read(stream) .chain_err(|| ErrorKind::Fetch)?; let mut pb = ProgressBar::new(message.size as u64); pb.set_units(Units::Bytes); let new_path = out_path .unwrap_or(std::path::PathBuf::from(&message.name)); if new_path.exists() { bail!(ErrorKind::FileExists(new_path)); } //TODO: Make some error wrapper let mut file = std::fs::File::create(new_path)?; let mut buffer = [0u8; 8192]; loop{ let read = message.file.read(&mut buffer) .chain_err(|| ErrorKind::ReadContent)?; if read == 0 { break; } pb.add(read as u64); file.write(&mut buffer[0..read]) .chain_err(|| ErrorKind::WriteContent)?; } return Ok(()); } }
get_file
identifier_name
lib.rs
// `error_chain!` can recurse deeply #![recursion_limit = "1024"] // Import the macro. Don't forget to add `error-chain` in your // `Cargo.toml`! #[macro_use] extern crate error_chain; #[macro_use] extern crate log; extern crate clap; extern crate byteorder; extern crate ansi_term; extern crate pbr; pub mod network; use std::path::PathBuf; use std::io::{Read, Write}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use ansi_term::Colour::*; use std::cmp::Ordering; use pbr::{ProgressBar, Units}; pub mod errors { use std::io; use std::net; use std::path; error_chain! { // The type defined for this error. These are the conventional // and recommended names, but they can be arbitrarily chosen. // // It is also possible to leave this section out entirely, or // leave it empty, and these names will be used automatically. types { Error, ErrorKind, ResultExt, Result; } // Without the `Result` wrapper: // // types { // Error, ErrorKind, ResultExt; // } // Automatic conversions between this error chain and other // error chains. In this case, it will e.g. generate an // `ErrorKind` variant called `Another` which in turn contains // the `other_error::ErrorKind`, with conversions from // `other_error::Error`. // // Optionally, some attributes can be added to a variant. // // This section can be empty. links { } // Automatic conversions between this error chain and other // error types not defined by the `error_chain!`. These will be // wrapped in a new error with, in the first case, the // `ErrorKind::Fmt` variant. The description and cause will // forward to the description and cause of the original error. // // Optionally, some attributes can be added to a variant. // // This section can be empty. foreign_links { Io(io::Error) #[cfg(unix)]; } // Define additional `ErrorKind` variants. The syntax here is // the same as `quick_error!`, but the `from()` and `cause()` // syntax is not supported. errors { PathConversion { description("Failed coverting the path to a string") display("Failed converting path to string") } Serialization { description("Serialization failed") display("Failed serializing") } //I think cloning the pathbuf is ok for the slow path in case of error SendFile(remote_addr: net::SocketAddr){ description("Error while sending file") display("While sending to {}", remote_addr) } UnknownFile(index: u32) { description("The client requested an unknown file") display("The client requested an unknown file with id {}", index) } ServerConnection { description("While processing connection") display("A low level error occured while processing connection") } ClientConnection(ip: net::Ipv4Addr, port: u16) { description("Client failed to connect to server") display("While connecting to {}:{}", ip, port) } Enumeration { description("While enumerating interface") display("While enumerating interfaces") } Bind(ip: net::Ipv4Addr, port: u16) { description("While binding connection") display("While binding to {}:{}", ip, port) } IncompleteRead(actual: usize, expected: usize) { description("An error occured which caused a read to end before getting the expected data") display("A read didn't get the expected amount of data [Expected {}, Actual {}]", actual, expected) } Fetch { description("While reading message") display("While reading message") } InvalidTransport(t: String) { description("Transport not valid") display("Invalid transport: {}", t) } FileExists(p: ::std::path::PathBuf) { description("File already exists") display("Tried to write to existing file: {}", p.to_string_lossy()) } WriteContent { description("An error occured while writing content to disk") display("While writing content to disk") } ReadContent { description("An error occured while reading content from network") display("While reading content from network") } } } } use errors::*; trait Readn { fn readn(&mut self, buff: &mut Vec<u8>, n: usize) -> std::io::Result<usize>; } impl<T: Read> Readn for T { fn readn(&mut self, mut buff: &mut Vec<u8>, n: usize) -> std::io::Result<usize> { let mut sub = self.take(n as u64); return sub.read_to_end(&mut buff); } } trait Streamable<'a>{ fn read<T: Read + 'a>(stream: T) -> Result<Self> where Self: std::marker::Sized; fn write<T: Write + 'a>(&mut self, stream: &mut T) -> Result<usize>; } struct FileMessage<'a> { name_size: u32, name: String, size: u32, file: Box<Read + 'a>, } impl<'a> FileMessage<'a> { fn new<T: Read + 'a>(name: String, size: u32, stream: T) -> Self { return FileMessage { name_size: name.len() as u32, //@Expansion: 32 bits is a lot, but maybe in the far flung future. name: name, size: size, file: Box::new(stream) }; } } impl<'a> Streamable<'a> for FileMessage<'a> { fn read<T: Read + 'a>(mut stream: T) -> Result<Self> { //Get the length of the name let name_len = try!(stream.read_u32::<BigEndian>()); //Get the name from the stream let mut name_buff = Vec::with_capacity(name_len as usize); //@Expansion: Here we have the 32-bit again. let name_read = try!(stream.readn(&mut name_buff, name_len as usize)); if name_len!= name_read as u32 { bail!(ErrorKind::IncompleteRead(name_read, name_len as usize)); } let name = String::from_utf8(name_buff).unwrap(); //@Error: Make error //Get the length of the file contents let file_len = try!(stream.read_u32::<BigEndian>()); //@Expansion: u32. That's a direct limit on the size of files. //Currently we aren't aiming at //supporting large files, which makes //it ok. //We aren't getting the file contents because we don't want to store it all in memory return Ok(FileMessage { name_size: name_len, name: name, size: file_len, file: Box::new(stream), }); } fn write<T: Write + 'a>(&mut self, mut stream: &mut T) -> Result<usize>{ try!(stream.write_u32::<BigEndian>(self.name_size)); //@Error: Should this be handled differently? try!(stream.write_all(self.name.as_bytes())); try!(stream.write_u32::<BigEndian>(self.size)); try!(std::io::copy(&mut self.file, &mut stream)); return Ok(0); } } pub type Dict<'a> = Box<[&'a str]>; pub struct TransportPresenter<'a> { dictionary: Dict<'a>, dict_entries: u32, } impl<'a> TransportPresenter<'a> { pub fn new(dictionary: Dict<'a>, dict_entries: u32) -> Self { return TransportPresenter { dictionary: dictionary, dict_entries: dict_entries, }; } pub fn present(&self, t: &Transport) -> Result<String> { let parts = (t.max_state() as f64).log(self.dict_entries as f64).ceil() as u32; let mut part_representation: Vec<&str> = Vec::with_capacity(parts as usize); let mut remainder = t.state(); for _ in 0..parts { let part = remainder % self.dict_entries; remainder = remainder / self.dict_entries; part_representation.push(self.dictionary[part as usize]); } return Ok(part_representation.join(" ")); } pub fn present_inv(&self, s: String) -> Result<ClientTransport> { let mut res: u32 = 0; let mut part_count = 0; for word in s.split(" ") { if let Ok(val) = self.dictionary.binary_search_by(|p| { //Flip the search to allow for cmp between String and &str match word.cmp(p) { Ordering::Greater => Ordering::Less, Ordering::Less => Ordering::Greater, Ordering::Equal => Ordering::Equal, } }) { res += (val as u32) * (self.dict_entries.pow(part_count)); part_count += 1; } else { bail!(ErrorKind::InvalidTransport(word.to_owned())); } } return Ok(ClientTransport::new(res)); } } pub struct ServerTransport { state: u32, max_state: u32, } pub struct ClientTransport { state: u32, } pub trait Transport { fn state(&self) -> u32; fn max_state(&self) -> u32; } impl ServerTransport { fn new(state: u32, max_state: u32) -> Self { return ServerTransport { state: state, max_state: max_state, }; } } impl Transport for ServerTransport { fn state(&self) -> u32 { return self.state; } fn max_state(&self) -> u32 { return self.max_state; } } pub trait PartialTransport { fn state(&self) -> u32; } impl ClientTransport { fn new(state: u32) -> Self { return ClientTransport { state: state, }; } } impl PartialTransport for ClientTransport { fn state(&self) -> u32 { return self.state; } } impl <T: Transport> PartialTransport for T { fn state(&self) -> u32 { return Transport::state(self); } } pub trait Transportable { fn make_transport(&self) -> Result<ServerTransport>; fn from_transport<T: PartialTransport>(t: T) -> Result<Self> where Self: std::marker::Sized; } impl Transportable for std::net::Ipv4Addr { fn make_transport(&self) -> Result<ServerTransport> { return Ok(ServerTransport::new(u32::from(self.clone()), std::u32::MAX)); } fn from_transport<T: PartialTransport>(t: T) -> Result<Self> { return Ok(std::net::Ipv4Addr::from(t.state())); } } #[derive(Clone)] pub struct FileInfo{ path: PathBuf, len: u64, } impl FileInfo { fn new(path: PathBuf, len: u64) -> FileInfo { return FileInfo { path: path, len: len, } } pub fn from_path(path: PathBuf) -> Result<FileInfo> { let metadata = std::fs::metadata(&path)?; return Ok(FileInfo::new(path, metadata.len())) } pub fn open(&self) -> std::result::Result<std::fs::File, std::io::Error> { return std::fs::File::open(&self.path); } } //@Refactor: This is just private but should be refactored fn send_file<S: Write>(mut stream: &mut S, file: &FileInfo) -> Result<()> { let filename = match file.path.file_name() .and_then(|x| x.to_str()) .map(|x| x.to_owned()) { Some(x) => x, None => return Err(ErrorKind::PathConversion.into()), }; let mut message = FileMessage::new(filename, file.len as u32, try!(file.open())); message.write(&mut stream) .chain_err(|| ErrorKind::Serialization)?; return Ok(()); } pub struct FileRepository { files: std::collections::HashMap<u32, FileInfo>, pub interface: network::Interface, next_id: u32, } impl FileRepository { pub fn new(interface: network::Interface) -> Self { return FileRepository { files: std::collections::HashMap::new(), interface: interface, next_id: 0, }; } pub fn add_file(&mut self, file: FileInfo) -> Result<ServerTransport> { self.files.insert(self.next_id, file); return self.interface.addr.make_transport(); } fn get_file(&self, index: u32) -> Result<&FileInfo> { return self.files.get(&index) .ok_or_else(|| ErrorKind::UnknownFile(index).into()); } pub fn run(&self) -> Result<()> { //@Expansion: Maybe don't use fixed ports let listener = std::net::TcpListener::bind((self.interface.addr, 2222)) .chain_err(|| ErrorKind::Bind(self.interface.addr, 2222))?; for conn in listener.incoming() { let mut stream = conn .chain_err(|| ErrorKind::ServerConnection)?; //TODO: I should read some sort of info about which file to get here let file = self.get_file(0) .chain_err(|| ErrorKind::SendFile(stream.peer_addr().unwrap()))?; send_file(&mut stream, file) .chain_err(|| ErrorKind::SendFile(stream.peer_addr().unwrap()))?; } return Ok(()); } } pub struct FileClient { } impl FileClient{ pub fn new() -> Self { return FileClient { } } pub fn get_file<T: PartialTransport>(&self, transport: T, out_path: Option<std::path::PathBuf>) -> Result<()> { let ip = std::net::Ipv4Addr::from_transport(transport)?; println!("{} from ip {}", Green.paint("Downloading"), Yellow.paint(ip.to_string())); //@Expansion: We can't time out right now. Use the net2::TcpBuilder? //@Expansion: Maybe don't use fixed ports let stream = std::net::TcpStream::connect((ip, 2222)) .chain_err(|| ErrorKind::ClientConnection(ip, 2222))?; let mut message = FileMessage::read(stream) .chain_err(|| ErrorKind::Fetch)?; let mut pb = ProgressBar::new(message.size as u64); pb.set_units(Units::Bytes); let new_path = out_path .unwrap_or(std::path::PathBuf::from(&message.name)); if new_path.exists()
//TODO: Make some error wrapper let mut file = std::fs::File::create(new_path)?; let mut buffer = [0u8; 8192]; loop{ let read = message.file.read(&mut buffer) .chain_err(|| ErrorKind::ReadContent)?; if read == 0 { break; } pb.add(read as u64); file.write(&mut buffer[0..read]) .chain_err(|| ErrorKind::WriteContent)?; } return Ok(()); } }
{ bail!(ErrorKind::FileExists(new_path)); }
conditional_block
lib.rs
// `error_chain!` can recurse deeply #![recursion_limit = "1024"] // Import the macro. Don't forget to add `error-chain` in your // `Cargo.toml`! #[macro_use] extern crate error_chain; #[macro_use] extern crate log; extern crate clap; extern crate byteorder; extern crate ansi_term; extern crate pbr; pub mod network; use std::path::PathBuf; use std::io::{Read, Write}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use ansi_term::Colour::*; use std::cmp::Ordering; use pbr::{ProgressBar, Units}; pub mod errors { use std::io; use std::net; use std::path; error_chain! { // The type defined for this error. These are the conventional // and recommended names, but they can be arbitrarily chosen. // // It is also possible to leave this section out entirely, or // leave it empty, and these names will be used automatically. types { Error, ErrorKind, ResultExt, Result; } // Without the `Result` wrapper: // // types { // Error, ErrorKind, ResultExt; // } // Automatic conversions between this error chain and other // error chains. In this case, it will e.g. generate an // `ErrorKind` variant called `Another` which in turn contains // the `other_error::ErrorKind`, with conversions from // `other_error::Error`. // // Optionally, some attributes can be added to a variant. // // This section can be empty. links { } // Automatic conversions between this error chain and other // error types not defined by the `error_chain!`. These will be // wrapped in a new error with, in the first case, the // `ErrorKind::Fmt` variant. The description and cause will // forward to the description and cause of the original error. // // Optionally, some attributes can be added to a variant. // // This section can be empty. foreign_links { Io(io::Error) #[cfg(unix)]; } // Define additional `ErrorKind` variants. The syntax here is // the same as `quick_error!`, but the `from()` and `cause()` // syntax is not supported. errors { PathConversion { description("Failed coverting the path to a string") display("Failed converting path to string") } Serialization { description("Serialization failed") display("Failed serializing") } //I think cloning the pathbuf is ok for the slow path in case of error SendFile(remote_addr: net::SocketAddr){ description("Error while sending file") display("While sending to {}", remote_addr) } UnknownFile(index: u32) { description("The client requested an unknown file") display("The client requested an unknown file with id {}", index) } ServerConnection { description("While processing connection") display("A low level error occured while processing connection") } ClientConnection(ip: net::Ipv4Addr, port: u16) { description("Client failed to connect to server") display("While connecting to {}:{}", ip, port) } Enumeration { description("While enumerating interface") display("While enumerating interfaces") } Bind(ip: net::Ipv4Addr, port: u16) { description("While binding connection") display("While binding to {}:{}", ip, port) } IncompleteRead(actual: usize, expected: usize) { description("An error occured which caused a read to end before getting the expected data") display("A read didn't get the expected amount of data [Expected {}, Actual {}]", actual, expected) } Fetch { description("While reading message") display("While reading message") } InvalidTransport(t: String) { description("Transport not valid") display("Invalid transport: {}", t) } FileExists(p: ::std::path::PathBuf) { description("File already exists") display("Tried to write to existing file: {}", p.to_string_lossy()) } WriteContent { description("An error occured while writing content to disk") display("While writing content to disk") } ReadContent { description("An error occured while reading content from network") display("While reading content from network") } } } } use errors::*; trait Readn { fn readn(&mut self, buff: &mut Vec<u8>, n: usize) -> std::io::Result<usize>; } impl<T: Read> Readn for T { fn readn(&mut self, mut buff: &mut Vec<u8>, n: usize) -> std::io::Result<usize> { let mut sub = self.take(n as u64); return sub.read_to_end(&mut buff); } } trait Streamable<'a>{ fn read<T: Read + 'a>(stream: T) -> Result<Self> where Self: std::marker::Sized; fn write<T: Write + 'a>(&mut self, stream: &mut T) -> Result<usize>; } struct FileMessage<'a> { name_size: u32, name: String, size: u32, file: Box<Read + 'a>, } impl<'a> FileMessage<'a> { fn new<T: Read + 'a>(name: String, size: u32, stream: T) -> Self { return FileMessage { name_size: name.len() as u32, //@Expansion: 32 bits is a lot, but maybe in the far flung future. name: name, size: size, file: Box::new(stream) }; } } impl<'a> Streamable<'a> for FileMessage<'a> { fn read<T: Read + 'a>(mut stream: T) -> Result<Self> { //Get the length of the name let name_len = try!(stream.read_u32::<BigEndian>()); //Get the name from the stream let mut name_buff = Vec::with_capacity(name_len as usize); //@Expansion: Here we have the 32-bit again. let name_read = try!(stream.readn(&mut name_buff, name_len as usize)); if name_len!= name_read as u32 { bail!(ErrorKind::IncompleteRead(name_read, name_len as usize)); } let name = String::from_utf8(name_buff).unwrap(); //@Error: Make error //Get the length of the file contents let file_len = try!(stream.read_u32::<BigEndian>()); //@Expansion: u32. That's a direct limit on the size of files. //Currently we aren't aiming at //supporting large files, which makes //it ok. //We aren't getting the file contents because we don't want to store it all in memory return Ok(FileMessage { name_size: name_len, name: name, size: file_len, file: Box::new(stream), }); } fn write<T: Write + 'a>(&mut self, mut stream: &mut T) -> Result<usize>{ try!(stream.write_u32::<BigEndian>(self.name_size)); //@Error: Should this be handled differently? try!(stream.write_all(self.name.as_bytes())); try!(stream.write_u32::<BigEndian>(self.size)); try!(std::io::copy(&mut self.file, &mut stream)); return Ok(0); } } pub type Dict<'a> = Box<[&'a str]>; pub struct TransportPresenter<'a> { dictionary: Dict<'a>, dict_entries: u32, } impl<'a> TransportPresenter<'a> { pub fn new(dictionary: Dict<'a>, dict_entries: u32) -> Self { return TransportPresenter {
}; } pub fn present(&self, t: &Transport) -> Result<String> { let parts = (t.max_state() as f64).log(self.dict_entries as f64).ceil() as u32; let mut part_representation: Vec<&str> = Vec::with_capacity(parts as usize); let mut remainder = t.state(); for _ in 0..parts { let part = remainder % self.dict_entries; remainder = remainder / self.dict_entries; part_representation.push(self.dictionary[part as usize]); } return Ok(part_representation.join(" ")); } pub fn present_inv(&self, s: String) -> Result<ClientTransport> { let mut res: u32 = 0; let mut part_count = 0; for word in s.split(" ") { if let Ok(val) = self.dictionary.binary_search_by(|p| { //Flip the search to allow for cmp between String and &str match word.cmp(p) { Ordering::Greater => Ordering::Less, Ordering::Less => Ordering::Greater, Ordering::Equal => Ordering::Equal, } }) { res += (val as u32) * (self.dict_entries.pow(part_count)); part_count += 1; } else { bail!(ErrorKind::InvalidTransport(word.to_owned())); } } return Ok(ClientTransport::new(res)); } } pub struct ServerTransport { state: u32, max_state: u32, } pub struct ClientTransport { state: u32, } pub trait Transport { fn state(&self) -> u32; fn max_state(&self) -> u32; } impl ServerTransport { fn new(state: u32, max_state: u32) -> Self { return ServerTransport { state: state, max_state: max_state, }; } } impl Transport for ServerTransport { fn state(&self) -> u32 { return self.state; } fn max_state(&self) -> u32 { return self.max_state; } } pub trait PartialTransport { fn state(&self) -> u32; } impl ClientTransport { fn new(state: u32) -> Self { return ClientTransport { state: state, }; } } impl PartialTransport for ClientTransport { fn state(&self) -> u32 { return self.state; } } impl <T: Transport> PartialTransport for T { fn state(&self) -> u32 { return Transport::state(self); } } pub trait Transportable { fn make_transport(&self) -> Result<ServerTransport>; fn from_transport<T: PartialTransport>(t: T) -> Result<Self> where Self: std::marker::Sized; } impl Transportable for std::net::Ipv4Addr { fn make_transport(&self) -> Result<ServerTransport> { return Ok(ServerTransport::new(u32::from(self.clone()), std::u32::MAX)); } fn from_transport<T: PartialTransport>(t: T) -> Result<Self> { return Ok(std::net::Ipv4Addr::from(t.state())); } } #[derive(Clone)] pub struct FileInfo{ path: PathBuf, len: u64, } impl FileInfo { fn new(path: PathBuf, len: u64) -> FileInfo { return FileInfo { path: path, len: len, } } pub fn from_path(path: PathBuf) -> Result<FileInfo> { let metadata = std::fs::metadata(&path)?; return Ok(FileInfo::new(path, metadata.len())) } pub fn open(&self) -> std::result::Result<std::fs::File, std::io::Error> { return std::fs::File::open(&self.path); } } //@Refactor: This is just private but should be refactored fn send_file<S: Write>(mut stream: &mut S, file: &FileInfo) -> Result<()> { let filename = match file.path.file_name() .and_then(|x| x.to_str()) .map(|x| x.to_owned()) { Some(x) => x, None => return Err(ErrorKind::PathConversion.into()), }; let mut message = FileMessage::new(filename, file.len as u32, try!(file.open())); message.write(&mut stream) .chain_err(|| ErrorKind::Serialization)?; return Ok(()); } pub struct FileRepository { files: std::collections::HashMap<u32, FileInfo>, pub interface: network::Interface, next_id: u32, } impl FileRepository { pub fn new(interface: network::Interface) -> Self { return FileRepository { files: std::collections::HashMap::new(), interface: interface, next_id: 0, }; } pub fn add_file(&mut self, file: FileInfo) -> Result<ServerTransport> { self.files.insert(self.next_id, file); return self.interface.addr.make_transport(); } fn get_file(&self, index: u32) -> Result<&FileInfo> { return self.files.get(&index) .ok_or_else(|| ErrorKind::UnknownFile(index).into()); } pub fn run(&self) -> Result<()> { //@Expansion: Maybe don't use fixed ports let listener = std::net::TcpListener::bind((self.interface.addr, 2222)) .chain_err(|| ErrorKind::Bind(self.interface.addr, 2222))?; for conn in listener.incoming() { let mut stream = conn .chain_err(|| ErrorKind::ServerConnection)?; //TODO: I should read some sort of info about which file to get here let file = self.get_file(0) .chain_err(|| ErrorKind::SendFile(stream.peer_addr().unwrap()))?; send_file(&mut stream, file) .chain_err(|| ErrorKind::SendFile(stream.peer_addr().unwrap()))?; } return Ok(()); } } pub struct FileClient { } impl FileClient{ pub fn new() -> Self { return FileClient { } } pub fn get_file<T: PartialTransport>(&self, transport: T, out_path: Option<std::path::PathBuf>) -> Result<()> { let ip = std::net::Ipv4Addr::from_transport(transport)?; println!("{} from ip {}", Green.paint("Downloading"), Yellow.paint(ip.to_string())); //@Expansion: We can't time out right now. Use the net2::TcpBuilder? //@Expansion: Maybe don't use fixed ports let stream = std::net::TcpStream::connect((ip, 2222)) .chain_err(|| ErrorKind::ClientConnection(ip, 2222))?; let mut message = FileMessage::read(stream) .chain_err(|| ErrorKind::Fetch)?; let mut pb = ProgressBar::new(message.size as u64); pb.set_units(Units::Bytes); let new_path = out_path .unwrap_or(std::path::PathBuf::from(&message.name)); if new_path.exists() { bail!(ErrorKind::FileExists(new_path)); } //TODO: Make some error wrapper let mut file = std::fs::File::create(new_path)?; let mut buffer = [0u8; 8192]; loop{ let read = message.file.read(&mut buffer) .chain_err(|| ErrorKind::ReadContent)?; if read == 0 { break; } pb.add(read as u64); file.write(&mut buffer[0..read]) .chain_err(|| ErrorKind::WriteContent)?; } return Ok(()); } }
dictionary: dictionary, dict_entries: dict_entries,
random_line_split
lib.rs
// `error_chain!` can recurse deeply #![recursion_limit = "1024"] // Import the macro. Don't forget to add `error-chain` in your // `Cargo.toml`! #[macro_use] extern crate error_chain; #[macro_use] extern crate log; extern crate clap; extern crate byteorder; extern crate ansi_term; extern crate pbr; pub mod network; use std::path::PathBuf; use std::io::{Read, Write}; use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; use ansi_term::Colour::*; use std::cmp::Ordering; use pbr::{ProgressBar, Units}; pub mod errors { use std::io; use std::net; use std::path; error_chain! { // The type defined for this error. These are the conventional // and recommended names, but they can be arbitrarily chosen. // // It is also possible to leave this section out entirely, or // leave it empty, and these names will be used automatically. types { Error, ErrorKind, ResultExt, Result; } // Without the `Result` wrapper: // // types { // Error, ErrorKind, ResultExt; // } // Automatic conversions between this error chain and other // error chains. In this case, it will e.g. generate an // `ErrorKind` variant called `Another` which in turn contains // the `other_error::ErrorKind`, with conversions from // `other_error::Error`. // // Optionally, some attributes can be added to a variant. // // This section can be empty. links { } // Automatic conversions between this error chain and other // error types not defined by the `error_chain!`. These will be // wrapped in a new error with, in the first case, the // `ErrorKind::Fmt` variant. The description and cause will // forward to the description and cause of the original error. // // Optionally, some attributes can be added to a variant. // // This section can be empty. foreign_links { Io(io::Error) #[cfg(unix)]; } // Define additional `ErrorKind` variants. The syntax here is // the same as `quick_error!`, but the `from()` and `cause()` // syntax is not supported. errors { PathConversion { description("Failed coverting the path to a string") display("Failed converting path to string") } Serialization { description("Serialization failed") display("Failed serializing") } //I think cloning the pathbuf is ok for the slow path in case of error SendFile(remote_addr: net::SocketAddr){ description("Error while sending file") display("While sending to {}", remote_addr) } UnknownFile(index: u32) { description("The client requested an unknown file") display("The client requested an unknown file with id {}", index) } ServerConnection { description("While processing connection") display("A low level error occured while processing connection") } ClientConnection(ip: net::Ipv4Addr, port: u16) { description("Client failed to connect to server") display("While connecting to {}:{}", ip, port) } Enumeration { description("While enumerating interface") display("While enumerating interfaces") } Bind(ip: net::Ipv4Addr, port: u16) { description("While binding connection") display("While binding to {}:{}", ip, port) } IncompleteRead(actual: usize, expected: usize) { description("An error occured which caused a read to end before getting the expected data") display("A read didn't get the expected amount of data [Expected {}, Actual {}]", actual, expected) } Fetch { description("While reading message") display("While reading message") } InvalidTransport(t: String) { description("Transport not valid") display("Invalid transport: {}", t) } FileExists(p: ::std::path::PathBuf) { description("File already exists") display("Tried to write to existing file: {}", p.to_string_lossy()) } WriteContent { description("An error occured while writing content to disk") display("While writing content to disk") } ReadContent { description("An error occured while reading content from network") display("While reading content from network") } } } } use errors::*; trait Readn { fn readn(&mut self, buff: &mut Vec<u8>, n: usize) -> std::io::Result<usize>; } impl<T: Read> Readn for T { fn readn(&mut self, mut buff: &mut Vec<u8>, n: usize) -> std::io::Result<usize> { let mut sub = self.take(n as u64); return sub.read_to_end(&mut buff); } } trait Streamable<'a>{ fn read<T: Read + 'a>(stream: T) -> Result<Self> where Self: std::marker::Sized; fn write<T: Write + 'a>(&mut self, stream: &mut T) -> Result<usize>; } struct FileMessage<'a> { name_size: u32, name: String, size: u32, file: Box<Read + 'a>, } impl<'a> FileMessage<'a> { fn new<T: Read + 'a>(name: String, size: u32, stream: T) -> Self { return FileMessage { name_size: name.len() as u32, //@Expansion: 32 bits is a lot, but maybe in the far flung future. name: name, size: size, file: Box::new(stream) }; } } impl<'a> Streamable<'a> for FileMessage<'a> { fn read<T: Read + 'a>(mut stream: T) -> Result<Self> { //Get the length of the name let name_len = try!(stream.read_u32::<BigEndian>()); //Get the name from the stream let mut name_buff = Vec::with_capacity(name_len as usize); //@Expansion: Here we have the 32-bit again. let name_read = try!(stream.readn(&mut name_buff, name_len as usize)); if name_len!= name_read as u32 { bail!(ErrorKind::IncompleteRead(name_read, name_len as usize)); } let name = String::from_utf8(name_buff).unwrap(); //@Error: Make error //Get the length of the file contents let file_len = try!(stream.read_u32::<BigEndian>()); //@Expansion: u32. That's a direct limit on the size of files. //Currently we aren't aiming at //supporting large files, which makes //it ok. //We aren't getting the file contents because we don't want to store it all in memory return Ok(FileMessage { name_size: name_len, name: name, size: file_len, file: Box::new(stream), }); } fn write<T: Write + 'a>(&mut self, mut stream: &mut T) -> Result<usize>{ try!(stream.write_u32::<BigEndian>(self.name_size)); //@Error: Should this be handled differently? try!(stream.write_all(self.name.as_bytes())); try!(stream.write_u32::<BigEndian>(self.size)); try!(std::io::copy(&mut self.file, &mut stream)); return Ok(0); } } pub type Dict<'a> = Box<[&'a str]>; pub struct TransportPresenter<'a> { dictionary: Dict<'a>, dict_entries: u32, } impl<'a> TransportPresenter<'a> { pub fn new(dictionary: Dict<'a>, dict_entries: u32) -> Self { return TransportPresenter { dictionary: dictionary, dict_entries: dict_entries, }; } pub fn present(&self, t: &Transport) -> Result<String> { let parts = (t.max_state() as f64).log(self.dict_entries as f64).ceil() as u32; let mut part_representation: Vec<&str> = Vec::with_capacity(parts as usize); let mut remainder = t.state(); for _ in 0..parts { let part = remainder % self.dict_entries; remainder = remainder / self.dict_entries; part_representation.push(self.dictionary[part as usize]); } return Ok(part_representation.join(" ")); } pub fn present_inv(&self, s: String) -> Result<ClientTransport> { let mut res: u32 = 0; let mut part_count = 0; for word in s.split(" ") { if let Ok(val) = self.dictionary.binary_search_by(|p| { //Flip the search to allow for cmp between String and &str match word.cmp(p) { Ordering::Greater => Ordering::Less, Ordering::Less => Ordering::Greater, Ordering::Equal => Ordering::Equal, } }) { res += (val as u32) * (self.dict_entries.pow(part_count)); part_count += 1; } else { bail!(ErrorKind::InvalidTransport(word.to_owned())); } } return Ok(ClientTransport::new(res)); } } pub struct ServerTransport { state: u32, max_state: u32, } pub struct ClientTransport { state: u32, } pub trait Transport { fn state(&self) -> u32; fn max_state(&self) -> u32; } impl ServerTransport { fn new(state: u32, max_state: u32) -> Self { return ServerTransport { state: state, max_state: max_state, }; } } impl Transport for ServerTransport { fn state(&self) -> u32 { return self.state; } fn max_state(&self) -> u32 { return self.max_state; } } pub trait PartialTransport { fn state(&self) -> u32; } impl ClientTransport { fn new(state: u32) -> Self { return ClientTransport { state: state, }; } } impl PartialTransport for ClientTransport { fn state(&self) -> u32 { return self.state; } } impl <T: Transport> PartialTransport for T { fn state(&self) -> u32 { return Transport::state(self); } } pub trait Transportable { fn make_transport(&self) -> Result<ServerTransport>; fn from_transport<T: PartialTransport>(t: T) -> Result<Self> where Self: std::marker::Sized; } impl Transportable for std::net::Ipv4Addr { fn make_transport(&self) -> Result<ServerTransport>
fn from_transport<T: PartialTransport>(t: T) -> Result<Self> { return Ok(std::net::Ipv4Addr::from(t.state())); } } #[derive(Clone)] pub struct FileInfo{ path: PathBuf, len: u64, } impl FileInfo { fn new(path: PathBuf, len: u64) -> FileInfo { return FileInfo { path: path, len: len, } } pub fn from_path(path: PathBuf) -> Result<FileInfo> { let metadata = std::fs::metadata(&path)?; return Ok(FileInfo::new(path, metadata.len())) } pub fn open(&self) -> std::result::Result<std::fs::File, std::io::Error> { return std::fs::File::open(&self.path); } } //@Refactor: This is just private but should be refactored fn send_file<S: Write>(mut stream: &mut S, file: &FileInfo) -> Result<()> { let filename = match file.path.file_name() .and_then(|x| x.to_str()) .map(|x| x.to_owned()) { Some(x) => x, None => return Err(ErrorKind::PathConversion.into()), }; let mut message = FileMessage::new(filename, file.len as u32, try!(file.open())); message.write(&mut stream) .chain_err(|| ErrorKind::Serialization)?; return Ok(()); } pub struct FileRepository { files: std::collections::HashMap<u32, FileInfo>, pub interface: network::Interface, next_id: u32, } impl FileRepository { pub fn new(interface: network::Interface) -> Self { return FileRepository { files: std::collections::HashMap::new(), interface: interface, next_id: 0, }; } pub fn add_file(&mut self, file: FileInfo) -> Result<ServerTransport> { self.files.insert(self.next_id, file); return self.interface.addr.make_transport(); } fn get_file(&self, index: u32) -> Result<&FileInfo> { return self.files.get(&index) .ok_or_else(|| ErrorKind::UnknownFile(index).into()); } pub fn run(&self) -> Result<()> { //@Expansion: Maybe don't use fixed ports let listener = std::net::TcpListener::bind((self.interface.addr, 2222)) .chain_err(|| ErrorKind::Bind(self.interface.addr, 2222))?; for conn in listener.incoming() { let mut stream = conn .chain_err(|| ErrorKind::ServerConnection)?; //TODO: I should read some sort of info about which file to get here let file = self.get_file(0) .chain_err(|| ErrorKind::SendFile(stream.peer_addr().unwrap()))?; send_file(&mut stream, file) .chain_err(|| ErrorKind::SendFile(stream.peer_addr().unwrap()))?; } return Ok(()); } } pub struct FileClient { } impl FileClient{ pub fn new() -> Self { return FileClient { } } pub fn get_file<T: PartialTransport>(&self, transport: T, out_path: Option<std::path::PathBuf>) -> Result<()> { let ip = std::net::Ipv4Addr::from_transport(transport)?; println!("{} from ip {}", Green.paint("Downloading"), Yellow.paint(ip.to_string())); //@Expansion: We can't time out right now. Use the net2::TcpBuilder? //@Expansion: Maybe don't use fixed ports let stream = std::net::TcpStream::connect((ip, 2222)) .chain_err(|| ErrorKind::ClientConnection(ip, 2222))?; let mut message = FileMessage::read(stream) .chain_err(|| ErrorKind::Fetch)?; let mut pb = ProgressBar::new(message.size as u64); pb.set_units(Units::Bytes); let new_path = out_path .unwrap_or(std::path::PathBuf::from(&message.name)); if new_path.exists() { bail!(ErrorKind::FileExists(new_path)); } //TODO: Make some error wrapper let mut file = std::fs::File::create(new_path)?; let mut buffer = [0u8; 8192]; loop{ let read = message.file.read(&mut buffer) .chain_err(|| ErrorKind::ReadContent)?; if read == 0 { break; } pb.add(read as u64); file.write(&mut buffer[0..read]) .chain_err(|| ErrorKind::WriteContent)?; } return Ok(()); } }
{ return Ok(ServerTransport::new(u32::from(self.clone()), std::u32::MAX)); }
identifier_body
bulk.rs
use bitcoincash::blockdata::block::Block; use bitcoincash::consensus::encode::{deserialize, Decodable}; use bitcoincash::hash_types::BlockHash; use std::collections::HashSet; use std::fs; use std::io::Cursor; use std::path::{Path, PathBuf}; use std::sync::{ mpsc::{Receiver, SyncSender}, Arc, Mutex, }; use std::thread; use crate::cashaccount::CashAccountParser; use crate::daemon::Daemon; use crate::errors::*; use crate::index::{index_block, last_indexed_block, read_indexed_blockhashes}; use crate::metrics::Metrics; use crate::signal::Waiter; use crate::store::{DbStore, Row, WriteStore}; use crate::util::{spawn_thread, HeaderList, SyncChannel}; struct Parser { magic: u32, current_headers: HeaderList, indexed_blockhashes: Mutex<HashSet<BlockHash>>, cashaccount_activation_height: u32, // metrics duration: prometheus::HistogramVec, block_count: prometheus::IntCounterVec, bytes_read: prometheus::Histogram, } impl Parser { fn new( daemon: &Daemon, metrics: &Metrics, indexed_blockhashes: HashSet<BlockHash>, cashaccount_activation_height: u32, ) -> Result<Arc<Parser>> { Ok(Arc::new(Parser { magic: daemon.disk_magic(), current_headers: load_headers(daemon)?, indexed_blockhashes: Mutex::new(indexed_blockhashes), cashaccount_activation_height, duration: metrics.histogram_vec( prometheus::HistogramOpts::new( "electrscash_parse_duration", "blk*.dat parsing duration (in seconds)", ), &["step"], ), block_count: metrics.counter_int_vec( prometheus::Opts::new( "electrscash_parse_blocks", "# of block parsed (from blk*.dat)", ), &["type"], ), bytes_read: metrics.histogram(prometheus::HistogramOpts::new( "electrscash_parse_bytes_read", "# of bytes read (from blk*.dat)", )), })) } fn last_indexed_row(&self) -> Row { // TODO: use JSONRPC for missing blocks, and don't use 'L' row at all. let indexed_blockhashes = self.indexed_blockhashes.lock().unwrap(); let last_header = self .current_headers .iter() .take_while(|h| indexed_blockhashes.contains(h.hash())) .last() .expect("no indexed header found"); debug!("last indexed block: {:?}", last_header); last_indexed_block(last_header.hash()) } fn read_blkfile(&self, path: &Path) -> Result<Vec<u8>> { let timer = self.duration.with_label_values(&["read"]).start_timer(); let blob = fs::read(&path).chain_err(|| format!("failed to read {:?}", path))?; timer.observe_duration(); self.bytes_read.observe(blob.len() as f64); Ok(blob) } fn index_blkfile(&self, blob: Vec<u8>) -> Result<Vec<Row>> { let timer = self.duration.with_label_values(&["parse"]).start_timer(); let blocks = parse_blocks(blob, self.magic)?; timer.observe_duration(); let mut rows = Vec::<Row>::new(); let timer = self.duration.with_label_values(&["index"]).start_timer(); let cashaccount = CashAccountParser::new(Some(self.cashaccount_activation_height)); for block in blocks { let blockhash = block.block_hash(); if let Some(header) = self.current_headers.header_by_blockhash(&blockhash) { if self .indexed_blockhashes .lock() .expect("indexed_blockhashes") .insert(blockhash) { rows.extend(index_block(&block, header.height(), &cashaccount)); self.block_count.with_label_values(&["indexed"]).inc(); } else { self.block_count.with_label_values(&["duplicate"]).inc(); } } else { // will be indexed later (after bulk load is over) if not an orphan block self.block_count.with_label_values(&["skipped"]).inc(); } } timer.observe_duration(); let timer = self.duration.with_label_values(&["sort"]).start_timer(); rows.sort_unstable_by(|a, b| a.key.cmp(&b.key)); timer.observe_duration(); Ok(rows) } } fn
(blob: Vec<u8>, magic: u32) -> Result<Vec<Block>> { let mut cursor = Cursor::new(&blob); let mut blocks = vec![]; let max_pos = blob.len() as u64; while cursor.position() < max_pos { let offset = cursor.position(); match u32::consensus_decode(&mut cursor) { Ok(value) => { if magic!= value { cursor.set_position(offset + 1); continue; } } Err(_) => break, // EOF }; let block_size = u32::consensus_decode(&mut cursor).chain_err(|| "no block size")?; let start = cursor.position(); let end = start + block_size as u64; // If Core's WriteBlockToDisk ftell fails, only the magic bytes and size will be written // and the block body won't be written to the blk*.dat file. // Since the first 4 bytes should contain the block's version, we can skip such blocks // by peeking the cursor (and skipping previous `magic` and `block_size`). match u32::consensus_decode(&mut cursor) { Ok(value) => { if magic == value { cursor.set_position(start); continue; } } Err(_) => break, // EOF } let block: Block = deserialize(&blob[start as usize..end as usize]) .chain_err(|| format!("failed to parse block at {}..{}", start, end))?; blocks.push(block); cursor.set_position(end as u64); } Ok(blocks) } fn load_headers(daemon: &Daemon) -> Result<HeaderList> { let tip = daemon.getbestblockhash()?; let mut headers = HeaderList::empty(); let new_headers = headers.order(daemon.get_new_headers(&headers, &tip)?); headers.apply(&new_headers, tip); Ok(headers) } fn set_open_files_limit(limit: libc::rlim_t) { let resource = libc::RLIMIT_NOFILE; let mut rlim = libc::rlimit { rlim_cur: 0, rlim_max: 0, }; let result = unsafe { libc::getrlimit(resource, &mut rlim) }; if result < 0 { panic!("getrlimit() failed: {}", result); } rlim.rlim_cur = limit; // set softs limit only. let result = unsafe { libc::setrlimit(resource, &rlim) }; if result < 0 { panic!("setrlimit() failed: {}", result); } } type JoinHandle = thread::JoinHandle<Result<()>>; type BlobReceiver = Arc<Mutex<Receiver<(Vec<u8>, PathBuf)>>>; fn start_reader(blk_files: Vec<PathBuf>, parser: Arc<Parser>) -> (BlobReceiver, JoinHandle) { let chan = SyncChannel::new(0); let blobs = chan.sender(); let handle = spawn_thread("bulk_read", move || -> Result<()> { for path in blk_files { blobs .send((parser.read_blkfile(&path)?, path)) .expect("failed to send blk*.dat contents"); } Ok(()) }); (Arc::new(Mutex::new(chan.into_receiver())), handle) } fn start_indexer( blobs: BlobReceiver, parser: Arc<Parser>, writer: SyncSender<(Vec<Row>, PathBuf)>, ) -> JoinHandle { spawn_thread("bulk_index", move || -> Result<()> { loop { let msg = blobs.lock().unwrap().recv(); if let Ok((blob, path)) = msg { let rows = parser .index_blkfile(blob) .chain_err(|| format!("failed to index {:?}", path))?; writer .send((rows, path)) .expect("failed to send indexed rows") } else { debug!("no more blocks to index"); break; } } Ok(()) }) } pub fn index_blk_files( daemon: &Daemon, index_threads: usize, metrics: &Metrics, signal: &Waiter, store: DbStore, cashaccount_activation_height: u32, ) -> Result<DbStore> { set_open_files_limit(2048); // twice the default `ulimit -n` value let blk_files = daemon.list_blk_files()?; info!("indexing {} blk*.dat files", blk_files.len()); let indexed_blockhashes = read_indexed_blockhashes(&store); debug!("found {} indexed blocks", indexed_blockhashes.len()); let parser = Parser::new( daemon, metrics, indexed_blockhashes, cashaccount_activation_height, )?; let (blobs, reader) = start_reader(blk_files, parser.clone()); let rows_chan = SyncChannel::new(0); #[allow(clippy::needless_collect)] let indexers: Vec<JoinHandle> = (0..index_threads) .map(|_| start_indexer(blobs.clone(), parser.clone(), rows_chan.sender())) .collect(); for (rows, path) in rows_chan.into_receiver() { trace!("indexed {:?}: {} rows", path, rows.len()); store.write(rows, false); signal .poll() .chain_err(|| "stopping bulk indexing due to signal")?; } reader .join() .expect("reader panicked") .expect("reader failed"); indexers.into_iter().for_each(|i| { i.join() .expect("indexer panicked") .expect("indexing failed") }); store.write(vec![parser.last_indexed_row()], true); Ok(store) } #[cfg(test)] mod tests { use super::*; use bitcoincash::hashes::Hash; use hex::decode as hex_decode; #[test] fn test_incomplete_block_parsing() { let magic = 0x0709110b; let raw_blocks = hex_decode(fixture("incomplete_block.hex")).unwrap(); let blocks = parse_blocks(raw_blocks, magic).unwrap(); assert_eq!(blocks.len(), 2); assert_eq!( blocks[1].block_hash().into_inner().to_vec(), hex_decode("d55acd552414cc44a761e8d6b64a4d555975e208397281d115336fc500000000").unwrap() ); } pub fn fixture(filename: &str) -> String { let path = Path::new("src") .join("tests") .join("fixtures") .join(filename); fs::read_to_string(path).unwrap() } }
parse_blocks
identifier_name
bulk.rs
use bitcoincash::blockdata::block::Block; use bitcoincash::consensus::encode::{deserialize, Decodable}; use bitcoincash::hash_types::BlockHash; use std::collections::HashSet; use std::fs; use std::io::Cursor; use std::path::{Path, PathBuf}; use std::sync::{ mpsc::{Receiver, SyncSender}, Arc, Mutex, }; use std::thread; use crate::cashaccount::CashAccountParser; use crate::daemon::Daemon; use crate::errors::*; use crate::index::{index_block, last_indexed_block, read_indexed_blockhashes}; use crate::metrics::Metrics; use crate::signal::Waiter; use crate::store::{DbStore, Row, WriteStore}; use crate::util::{spawn_thread, HeaderList, SyncChannel}; struct Parser { magic: u32, current_headers: HeaderList, indexed_blockhashes: Mutex<HashSet<BlockHash>>, cashaccount_activation_height: u32, // metrics duration: prometheus::HistogramVec, block_count: prometheus::IntCounterVec, bytes_read: prometheus::Histogram, } impl Parser { fn new( daemon: &Daemon, metrics: &Metrics, indexed_blockhashes: HashSet<BlockHash>, cashaccount_activation_height: u32, ) -> Result<Arc<Parser>> { Ok(Arc::new(Parser { magic: daemon.disk_magic(), current_headers: load_headers(daemon)?, indexed_blockhashes: Mutex::new(indexed_blockhashes), cashaccount_activation_height, duration: metrics.histogram_vec( prometheus::HistogramOpts::new( "electrscash_parse_duration", "blk*.dat parsing duration (in seconds)", ), &["step"], ), block_count: metrics.counter_int_vec( prometheus::Opts::new( "electrscash_parse_blocks", "# of block parsed (from blk*.dat)", ), &["type"], ), bytes_read: metrics.histogram(prometheus::HistogramOpts::new( "electrscash_parse_bytes_read", "# of bytes read (from blk*.dat)", )), })) } fn last_indexed_row(&self) -> Row { // TODO: use JSONRPC for missing blocks, and don't use 'L' row at all. let indexed_blockhashes = self.indexed_blockhashes.lock().unwrap(); let last_header = self .current_headers .iter() .take_while(|h| indexed_blockhashes.contains(h.hash())) .last() .expect("no indexed header found"); debug!("last indexed block: {:?}", last_header); last_indexed_block(last_header.hash()) } fn read_blkfile(&self, path: &Path) -> Result<Vec<u8>> { let timer = self.duration.with_label_values(&["read"]).start_timer(); let blob = fs::read(&path).chain_err(|| format!("failed to read {:?}", path))?; timer.observe_duration(); self.bytes_read.observe(blob.len() as f64); Ok(blob) } fn index_blkfile(&self, blob: Vec<u8>) -> Result<Vec<Row>> { let timer = self.duration.with_label_values(&["parse"]).start_timer(); let blocks = parse_blocks(blob, self.magic)?; timer.observe_duration(); let mut rows = Vec::<Row>::new(); let timer = self.duration.with_label_values(&["index"]).start_timer(); let cashaccount = CashAccountParser::new(Some(self.cashaccount_activation_height)); for block in blocks { let blockhash = block.block_hash(); if let Some(header) = self.current_headers.header_by_blockhash(&blockhash) { if self .indexed_blockhashes .lock() .expect("indexed_blockhashes") .insert(blockhash) { rows.extend(index_block(&block, header.height(), &cashaccount)); self.block_count.with_label_values(&["indexed"]).inc(); } else { self.block_count.with_label_values(&["duplicate"]).inc(); } } else { // will be indexed later (after bulk load is over) if not an orphan block self.block_count.with_label_values(&["skipped"]).inc(); } } timer.observe_duration(); let timer = self.duration.with_label_values(&["sort"]).start_timer(); rows.sort_unstable_by(|a, b| a.key.cmp(&b.key)); timer.observe_duration(); Ok(rows) } } fn parse_blocks(blob: Vec<u8>, magic: u32) -> Result<Vec<Block>> { let mut cursor = Cursor::new(&blob); let mut blocks = vec![]; let max_pos = blob.len() as u64; while cursor.position() < max_pos { let offset = cursor.position(); match u32::consensus_decode(&mut cursor) { Ok(value) => { if magic!= value { cursor.set_position(offset + 1); continue; } } Err(_) => break, // EOF }; let block_size = u32::consensus_decode(&mut cursor).chain_err(|| "no block size")?; let start = cursor.position(); let end = start + block_size as u64; // If Core's WriteBlockToDisk ftell fails, only the magic bytes and size will be written // and the block body won't be written to the blk*.dat file. // Since the first 4 bytes should contain the block's version, we can skip such blocks // by peeking the cursor (and skipping previous `magic` and `block_size`). match u32::consensus_decode(&mut cursor) { Ok(value) => { if magic == value { cursor.set_position(start); continue; } } Err(_) => break, // EOF } let block: Block = deserialize(&blob[start as usize..end as usize]) .chain_err(|| format!("failed to parse block at {}..{}", start, end))?; blocks.push(block); cursor.set_position(end as u64); } Ok(blocks) } fn load_headers(daemon: &Daemon) -> Result<HeaderList> { let tip = daemon.getbestblockhash()?; let mut headers = HeaderList::empty(); let new_headers = headers.order(daemon.get_new_headers(&headers, &tip)?); headers.apply(&new_headers, tip); Ok(headers) } fn set_open_files_limit(limit: libc::rlim_t) { let resource = libc::RLIMIT_NOFILE; let mut rlim = libc::rlimit { rlim_cur: 0, rlim_max: 0, }; let result = unsafe { libc::getrlimit(resource, &mut rlim) }; if result < 0 { panic!("getrlimit() failed: {}", result); } rlim.rlim_cur = limit; // set softs limit only. let result = unsafe { libc::setrlimit(resource, &rlim) }; if result < 0 { panic!("setrlimit() failed: {}", result); } }
let chan = SyncChannel::new(0); let blobs = chan.sender(); let handle = spawn_thread("bulk_read", move || -> Result<()> { for path in blk_files { blobs .send((parser.read_blkfile(&path)?, path)) .expect("failed to send blk*.dat contents"); } Ok(()) }); (Arc::new(Mutex::new(chan.into_receiver())), handle) } fn start_indexer( blobs: BlobReceiver, parser: Arc<Parser>, writer: SyncSender<(Vec<Row>, PathBuf)>, ) -> JoinHandle { spawn_thread("bulk_index", move || -> Result<()> { loop { let msg = blobs.lock().unwrap().recv(); if let Ok((blob, path)) = msg { let rows = parser .index_blkfile(blob) .chain_err(|| format!("failed to index {:?}", path))?; writer .send((rows, path)) .expect("failed to send indexed rows") } else { debug!("no more blocks to index"); break; } } Ok(()) }) } pub fn index_blk_files( daemon: &Daemon, index_threads: usize, metrics: &Metrics, signal: &Waiter, store: DbStore, cashaccount_activation_height: u32, ) -> Result<DbStore> { set_open_files_limit(2048); // twice the default `ulimit -n` value let blk_files = daemon.list_blk_files()?; info!("indexing {} blk*.dat files", blk_files.len()); let indexed_blockhashes = read_indexed_blockhashes(&store); debug!("found {} indexed blocks", indexed_blockhashes.len()); let parser = Parser::new( daemon, metrics, indexed_blockhashes, cashaccount_activation_height, )?; let (blobs, reader) = start_reader(blk_files, parser.clone()); let rows_chan = SyncChannel::new(0); #[allow(clippy::needless_collect)] let indexers: Vec<JoinHandle> = (0..index_threads) .map(|_| start_indexer(blobs.clone(), parser.clone(), rows_chan.sender())) .collect(); for (rows, path) in rows_chan.into_receiver() { trace!("indexed {:?}: {} rows", path, rows.len()); store.write(rows, false); signal .poll() .chain_err(|| "stopping bulk indexing due to signal")?; } reader .join() .expect("reader panicked") .expect("reader failed"); indexers.into_iter().for_each(|i| { i.join() .expect("indexer panicked") .expect("indexing failed") }); store.write(vec![parser.last_indexed_row()], true); Ok(store) } #[cfg(test)] mod tests { use super::*; use bitcoincash::hashes::Hash; use hex::decode as hex_decode; #[test] fn test_incomplete_block_parsing() { let magic = 0x0709110b; let raw_blocks = hex_decode(fixture("incomplete_block.hex")).unwrap(); let blocks = parse_blocks(raw_blocks, magic).unwrap(); assert_eq!(blocks.len(), 2); assert_eq!( blocks[1].block_hash().into_inner().to_vec(), hex_decode("d55acd552414cc44a761e8d6b64a4d555975e208397281d115336fc500000000").unwrap() ); } pub fn fixture(filename: &str) -> String { let path = Path::new("src") .join("tests") .join("fixtures") .join(filename); fs::read_to_string(path).unwrap() } }
type JoinHandle = thread::JoinHandle<Result<()>>; type BlobReceiver = Arc<Mutex<Receiver<(Vec<u8>, PathBuf)>>>; fn start_reader(blk_files: Vec<PathBuf>, parser: Arc<Parser>) -> (BlobReceiver, JoinHandle) {
random_line_split
bulk.rs
use bitcoincash::blockdata::block::Block; use bitcoincash::consensus::encode::{deserialize, Decodable}; use bitcoincash::hash_types::BlockHash; use std::collections::HashSet; use std::fs; use std::io::Cursor; use std::path::{Path, PathBuf}; use std::sync::{ mpsc::{Receiver, SyncSender}, Arc, Mutex, }; use std::thread; use crate::cashaccount::CashAccountParser; use crate::daemon::Daemon; use crate::errors::*; use crate::index::{index_block, last_indexed_block, read_indexed_blockhashes}; use crate::metrics::Metrics; use crate::signal::Waiter; use crate::store::{DbStore, Row, WriteStore}; use crate::util::{spawn_thread, HeaderList, SyncChannel}; struct Parser { magic: u32, current_headers: HeaderList, indexed_blockhashes: Mutex<HashSet<BlockHash>>, cashaccount_activation_height: u32, // metrics duration: prometheus::HistogramVec, block_count: prometheus::IntCounterVec, bytes_read: prometheus::Histogram, } impl Parser { fn new( daemon: &Daemon, metrics: &Metrics, indexed_blockhashes: HashSet<BlockHash>, cashaccount_activation_height: u32, ) -> Result<Arc<Parser>>
bytes_read: metrics.histogram(prometheus::HistogramOpts::new( "electrscash_parse_bytes_read", "# of bytes read (from blk*.dat)", )), })) } fn last_indexed_row(&self) -> Row { // TODO: use JSONRPC for missing blocks, and don't use 'L' row at all. let indexed_blockhashes = self.indexed_blockhashes.lock().unwrap(); let last_header = self .current_headers .iter() .take_while(|h| indexed_blockhashes.contains(h.hash())) .last() .expect("no indexed header found"); debug!("last indexed block: {:?}", last_header); last_indexed_block(last_header.hash()) } fn read_blkfile(&self, path: &Path) -> Result<Vec<u8>> { let timer = self.duration.with_label_values(&["read"]).start_timer(); let blob = fs::read(&path).chain_err(|| format!("failed to read {:?}", path))?; timer.observe_duration(); self.bytes_read.observe(blob.len() as f64); Ok(blob) } fn index_blkfile(&self, blob: Vec<u8>) -> Result<Vec<Row>> { let timer = self.duration.with_label_values(&["parse"]).start_timer(); let blocks = parse_blocks(blob, self.magic)?; timer.observe_duration(); let mut rows = Vec::<Row>::new(); let timer = self.duration.with_label_values(&["index"]).start_timer(); let cashaccount = CashAccountParser::new(Some(self.cashaccount_activation_height)); for block in blocks { let blockhash = block.block_hash(); if let Some(header) = self.current_headers.header_by_blockhash(&blockhash) { if self .indexed_blockhashes .lock() .expect("indexed_blockhashes") .insert(blockhash) { rows.extend(index_block(&block, header.height(), &cashaccount)); self.block_count.with_label_values(&["indexed"]).inc(); } else { self.block_count.with_label_values(&["duplicate"]).inc(); } } else { // will be indexed later (after bulk load is over) if not an orphan block self.block_count.with_label_values(&["skipped"]).inc(); } } timer.observe_duration(); let timer = self.duration.with_label_values(&["sort"]).start_timer(); rows.sort_unstable_by(|a, b| a.key.cmp(&b.key)); timer.observe_duration(); Ok(rows) } } fn parse_blocks(blob: Vec<u8>, magic: u32) -> Result<Vec<Block>> { let mut cursor = Cursor::new(&blob); let mut blocks = vec![]; let max_pos = blob.len() as u64; while cursor.position() < max_pos { let offset = cursor.position(); match u32::consensus_decode(&mut cursor) { Ok(value) => { if magic!= value { cursor.set_position(offset + 1); continue; } } Err(_) => break, // EOF }; let block_size = u32::consensus_decode(&mut cursor).chain_err(|| "no block size")?; let start = cursor.position(); let end = start + block_size as u64; // If Core's WriteBlockToDisk ftell fails, only the magic bytes and size will be written // and the block body won't be written to the blk*.dat file. // Since the first 4 bytes should contain the block's version, we can skip such blocks // by peeking the cursor (and skipping previous `magic` and `block_size`). match u32::consensus_decode(&mut cursor) { Ok(value) => { if magic == value { cursor.set_position(start); continue; } } Err(_) => break, // EOF } let block: Block = deserialize(&blob[start as usize..end as usize]) .chain_err(|| format!("failed to parse block at {}..{}", start, end))?; blocks.push(block); cursor.set_position(end as u64); } Ok(blocks) } fn load_headers(daemon: &Daemon) -> Result<HeaderList> { let tip = daemon.getbestblockhash()?; let mut headers = HeaderList::empty(); let new_headers = headers.order(daemon.get_new_headers(&headers, &tip)?); headers.apply(&new_headers, tip); Ok(headers) } fn set_open_files_limit(limit: libc::rlim_t) { let resource = libc::RLIMIT_NOFILE; let mut rlim = libc::rlimit { rlim_cur: 0, rlim_max: 0, }; let result = unsafe { libc::getrlimit(resource, &mut rlim) }; if result < 0 { panic!("getrlimit() failed: {}", result); } rlim.rlim_cur = limit; // set softs limit only. let result = unsafe { libc::setrlimit(resource, &rlim) }; if result < 0 { panic!("setrlimit() failed: {}", result); } } type JoinHandle = thread::JoinHandle<Result<()>>; type BlobReceiver = Arc<Mutex<Receiver<(Vec<u8>, PathBuf)>>>; fn start_reader(blk_files: Vec<PathBuf>, parser: Arc<Parser>) -> (BlobReceiver, JoinHandle) { let chan = SyncChannel::new(0); let blobs = chan.sender(); let handle = spawn_thread("bulk_read", move || -> Result<()> { for path in blk_files { blobs .send((parser.read_blkfile(&path)?, path)) .expect("failed to send blk*.dat contents"); } Ok(()) }); (Arc::new(Mutex::new(chan.into_receiver())), handle) } fn start_indexer( blobs: BlobReceiver, parser: Arc<Parser>, writer: SyncSender<(Vec<Row>, PathBuf)>, ) -> JoinHandle { spawn_thread("bulk_index", move || -> Result<()> { loop { let msg = blobs.lock().unwrap().recv(); if let Ok((blob, path)) = msg { let rows = parser .index_blkfile(blob) .chain_err(|| format!("failed to index {:?}", path))?; writer .send((rows, path)) .expect("failed to send indexed rows") } else { debug!("no more blocks to index"); break; } } Ok(()) }) } pub fn index_blk_files( daemon: &Daemon, index_threads: usize, metrics: &Metrics, signal: &Waiter, store: DbStore, cashaccount_activation_height: u32, ) -> Result<DbStore> { set_open_files_limit(2048); // twice the default `ulimit -n` value let blk_files = daemon.list_blk_files()?; info!("indexing {} blk*.dat files", blk_files.len()); let indexed_blockhashes = read_indexed_blockhashes(&store); debug!("found {} indexed blocks", indexed_blockhashes.len()); let parser = Parser::new( daemon, metrics, indexed_blockhashes, cashaccount_activation_height, )?; let (blobs, reader) = start_reader(blk_files, parser.clone()); let rows_chan = SyncChannel::new(0); #[allow(clippy::needless_collect)] let indexers: Vec<JoinHandle> = (0..index_threads) .map(|_| start_indexer(blobs.clone(), parser.clone(), rows_chan.sender())) .collect(); for (rows, path) in rows_chan.into_receiver() { trace!("indexed {:?}: {} rows", path, rows.len()); store.write(rows, false); signal .poll() .chain_err(|| "stopping bulk indexing due to signal")?; } reader .join() .expect("reader panicked") .expect("reader failed"); indexers.into_iter().for_each(|i| { i.join() .expect("indexer panicked") .expect("indexing failed") }); store.write(vec![parser.last_indexed_row()], true); Ok(store) } #[cfg(test)] mod tests { use super::*; use bitcoincash::hashes::Hash; use hex::decode as hex_decode; #[test] fn test_incomplete_block_parsing() { let magic = 0x0709110b; let raw_blocks = hex_decode(fixture("incomplete_block.hex")).unwrap(); let blocks = parse_blocks(raw_blocks, magic).unwrap(); assert_eq!(blocks.len(), 2); assert_eq!( blocks[1].block_hash().into_inner().to_vec(), hex_decode("d55acd552414cc44a761e8d6b64a4d555975e208397281d115336fc500000000").unwrap() ); } pub fn fixture(filename: &str) -> String { let path = Path::new("src") .join("tests") .join("fixtures") .join(filename); fs::read_to_string(path).unwrap() } }
{ Ok(Arc::new(Parser { magic: daemon.disk_magic(), current_headers: load_headers(daemon)?, indexed_blockhashes: Mutex::new(indexed_blockhashes), cashaccount_activation_height, duration: metrics.histogram_vec( prometheus::HistogramOpts::new( "electrscash_parse_duration", "blk*.dat parsing duration (in seconds)", ), &["step"], ), block_count: metrics.counter_int_vec( prometheus::Opts::new( "electrscash_parse_blocks", "# of block parsed (from blk*.dat)", ), &["type"], ),
identifier_body
lib.rs
// SPDX-FileCopyrightText: 2020 Robin Krahl <[email protected]> // SPDX-License-Identifier: Apache-2.0 or MIT //! `cursive-markup` provides the [`MarkupView`][] for [`cursive`][] that can render HTML or other //! markup. //! //! # Quickstart //! //! To render an HTML document, create a [`MarkupView`][] with the [`html`][] method, configure the //! maximum line width using the [`set_maximum_width`][] method and set callbacks for the links //! using the [`on_link_select`][] and [`on_link_focus`][] methods. //! //! Typically, you’ll want to wrap the view in a [`ScrollView`][] and add it to a //! [`Cursive`][`cursive::Cursive`] instance. //! //! ``` //! // Create the markup view //! let html = "<a href='https://rust-lang.org'>Rust</a>"; //! let mut view = cursive_markup::MarkupView::html(&html); //! view.set_maximum_width(120); //! //! // Set callbacks that are called if the link focus is changed and if a link is selected with //! // the Enter key //! view.on_link_focus(|s, url| {}); //! view.on_link_select(|s, url| {}); //! //! // Add the view to a Cursive instance //! use cursive::view::{Resizable, Scrollable}; //! let mut s = cursive::dummy(); //! s.add_global_callback('q', |s| s.quit()); //! s.add_fullscreen_layer(view.scrollable().full_screen()); //! s.run(); //! ``` //! //! You can use the arrow keys to navigate between the links and press Enter to trigger the //! [`on_link_select`][] callback. //! //! For a complete example, see [`examples/browser.rs`][], a very simple browser implementation. //! //! # Components //! //! The main component of the crate is [`MarkupView`][]. It is a [`cursive`][] view that displays //! hypertext: a combination of formatted text and links. You can use the arrow keys to navigate //! between the links, and the Enter key to select a link. //! //! The displayed content is provided and rendered by a [`Renderer`][] instance. If the `html` //! feature is enabled (default), the [`html::Renderer`][] can be used to parse and render an HTML //! document with [`html2text`][]. But you can also implement your own [`Renderer`][]. //! [`MarkupView`][] caches the rendered document ([`RenderedDocument`][]) and only invokes the //! renderer if the width of the view has been changed. //! //! ## HTML rendering //! //! To customize the HTML rendering, you can change the [`TextDecorator`][] that is used by //! [`html2text`][] to transform the HTML DOM into annotated strings. Of course the renderer must //! know how to interpret the annotations, so if you provide a custom decorator, you also have to //! provide a [`Converter`][] that extracts formatting and links from the annotations. //! //! [`cursive`]: https://docs.rs/cursive/latest/cursive/ //! [`cursive::Cursive`]: https://docs.rs/cursive/latest/cursive/struct.Cursive.html //! [`ScrollView`]: https://docs.rs/cursive/latest/cursive/views/struct.ScrollView.html //! [`html2text`]: https://docs.rs/html2text/latest/html2text/ //! [`TextDecorator`]: https://docs.rs/html2text/latest/html2text/render/text_renderer/trait.TextDecorator.html //! [`Converter`]: html/trait.Converter.html //! [`MarkupView`]: struct.MarkupView.html //! [`RenderedDocument`]: struct.RenderedDocument.html //! [`Renderer`]: trait.Renderer.html //! [`html`]: struct.MarkupView.html#method.html //! [`set_maximum_width`]: struct.MarkupView.html#method.set_maximum_width //! [`on_link_select`]: struct.MarkupView.html#method.on_link_select //! [`on_link_focus`]: struct.MarkupView.html#method.on_link_focus //! [`html::Renderer`]: html/struct.Renderer.html //! [`examples/browser.rs`]: https://git.sr.ht/~ireas/cursive-markup-rs/tree/master/examples/browser.rs #![warn(missing_docs, rust_2018_idioms)] #[cfg(feature = "html")] pub mod html; use std::rc; use cursive_core::theme; use unicode_width::UnicodeWidthStr as _; /// A view for hypertext that has been rendered by a [`Renderer`][]. /// /// This view displays hypertext (a combination of formatted text and links) that typically has /// been parsed from a markup language. You can use the arrow keys to navigate between the links, /// and the Enter key to select a link. If the focused link is changed, the [`on_link_focus`][] /// callback is triggered. If the focused link is selected using the Enter key, the /// [`on_link_select`][] callback is triggered. /// /// The displayed hypertext is created by a [`Renderer`][] implementation. The `MarkupView` calls /// the [`render`][] method with the size constraint provided by `cursive` and receives a /// [`RenderedDocument`][] that contains the text and the links. This document is cached until the /// available width changes.
/// [`set_maximum_width`][] method. /// /// [`RenderedDocument`]: struct.RenderedDocument.html /// [`Renderer`]: trait.Renderer.html /// [`render`]: trait.Renderer.html#method.render /// [`on_link_select`]: #method.on_link_select /// [`on_link_focus`]: #method.on_link_focus /// [`set_maximum_width`]: #method.set_maximum_width pub struct MarkupView<R: Renderer +'static> { renderer: R, doc: Option<RenderedDocument>, on_link_focus: Option<rc::Rc<LinkCallback>>, on_link_select: Option<rc::Rc<LinkCallback>>, maximum_width: Option<usize>, } /// A callback that is triggered for a link. /// /// The first argument is a mutable reference to the current [`Cursive`][] instance. The second /// argument is the target of the link, typically a URL. /// /// [`Cursive`]: https://docs.rs/cursive/latest/cursive/struct.Cursive.html pub type LinkCallback = dyn Fn(&mut cursive_core::Cursive, &str); /// A renderer that produces a hypertext document. pub trait Renderer { /// Renders this document within the given size constraint and returns the result. /// /// This method is called by [`MarkupView`][] every time the provided width changes. /// /// [`MarkupView`]: struct.MarkupView.html fn render(&self, constraint: cursive_core::XY<usize>) -> RenderedDocument; } /// A rendered hypertext document that consists of lines of formatted text and links. #[derive(Clone, Debug)] pub struct RenderedDocument { lines: Vec<Vec<RenderedElement>>, link_handler: LinkHandler, size: cursive_core::XY<usize>, constraint: cursive_core::XY<usize>, } /// A hypertext element: a formatted string with an optional link target. #[derive(Clone, Debug, Default)] pub struct Element { text: String, style: theme::Style, link_target: Option<String>, } #[derive(Clone, Debug, Default)] struct RenderedElement { text: String, style: theme::Style, link_idx: Option<usize>, } #[derive(Clone, Debug, Default)] struct LinkHandler { links: Vec<Link>, focus: usize, } #[derive(Clone, Debug)] struct Link { position: cursive_core::XY<usize>, width: usize, target: String, } #[cfg(feature = "html")] impl MarkupView<html::RichRenderer> { /// Creates a new `MarkupView` that uses a rich text HTML renderer. /// /// *Requires the `html` feature (enabled per default).* pub fn html(html: &str) -> MarkupView<html::RichRenderer> { MarkupView::with_renderer(html::Renderer::new(html)) } } impl<R: Renderer +'static> MarkupView<R> { /// Creates a new `MarkupView` with the given renderer. pub fn with_renderer(renderer: R) -> MarkupView<R> { MarkupView { renderer, doc: None, on_link_focus: None, on_link_select: None, maximum_width: None, } } /// Sets the callback that is triggered if the link focus is changed. /// /// Note that this callback is only triggered if the link focus is changed with the arrow keys. /// It is not triggered if the view takes focus. The callback will receive the target of the /// link as an argument. pub fn on_link_focus<F: Fn(&mut cursive_core::Cursive, &str) +'static>(&mut self, f: F) { self.on_link_focus = Some(rc::Rc::new(f)); } /// Sets the callback that is triggered if a link is selected. /// /// This callback is triggered if a link is focused and the users presses the Enter key. The /// callback will receive the target of the link as an argument. pub fn on_link_select<F: Fn(&mut cursive_core::Cursive, &str) +'static>(&mut self, f: F) { self.on_link_select = Some(rc::Rc::new(f)); } /// Sets the maximum width of the view. /// /// This means that the width that is available for the renderer is limited to the given value. pub fn set_maximum_width(&mut self, width: usize) { self.maximum_width = Some(width); } fn render(&mut self, mut constraint: cursive_core::XY<usize>) -> cursive_core::XY<usize> { let mut last_focus = 0; if let Some(width) = self.maximum_width { constraint.x = std::cmp::min(width, constraint.x); } if let Some(doc) = &self.doc { if constraint.x == doc.constraint.x { return doc.size; } last_focus = doc.link_handler.focus; } let mut doc = self.renderer.render(constraint); // TODO: Rendering the document with a different width may lead to links being split up (or // previously split up links being no longer split up). Ideally, we would adjust the focus // for these changes. if last_focus < doc.link_handler.links.len() { doc.link_handler.focus = last_focus; } let size = doc.size; self.doc = Some(doc); size } } impl<R: Renderer +'static> cursive_core::View for MarkupView<R> { fn draw(&self, printer: &cursive_core::Printer<'_, '_>) { let doc = &self.doc.as_ref().expect("layout not called before draw"); for (y, line) in doc.lines.iter().enumerate() { let mut x = 0; for element in line { let mut style = element.style; if let Some(link_idx) = element.link_idx { if printer.focused && doc.link_handler.focus == link_idx { style = style.combine(theme::PaletteColor::Highlight); } } printer.with_style(style, |printer| printer.print((x, y), &element.text)); x += element.text.width(); } } } fn layout(&mut self, constraint: cursive_core::XY<usize>) { self.render(constraint); } fn required_size(&mut self, constraint: cursive_core::XY<usize>) -> cursive_core::XY<usize> { self.render(constraint) } fn take_focus(&mut self, direction: cursive_core::direction::Direction) -> bool { self.doc .as_mut() .map(|doc| doc.link_handler.take_focus(direction)) .unwrap_or_default() } fn on_event(&mut self, event: cursive_core::event::Event) -> cursive_core::event::EventResult { use cursive_core::direction::Absolute; use cursive_core::event::{Callback, Event, EventResult, Key}; let link_handler = if let Some(doc) = self.doc.as_mut() { if doc.link_handler.links.is_empty() { return EventResult::Ignored; } else { &mut doc.link_handler } } else { return EventResult::Ignored; }; // TODO: implement mouse support let focus_changed = match event { Event::Key(Key::Left) => link_handler.move_focus(Absolute::Left), Event::Key(Key::Right) => link_handler.move_focus(Absolute::Right), Event::Key(Key::Up) => link_handler.move_focus(Absolute::Up), Event::Key(Key::Down) => link_handler.move_focus(Absolute::Down), _ => false, }; if focus_changed { let target = link_handler.links[link_handler.focus].target.clone(); EventResult::Consumed( self.on_link_focus .clone() .map(|f| Callback::from_fn(move |s| f(s, &target))), ) } else if event == Event::Key(Key::Enter) { let target = link_handler.links[link_handler.focus].target.clone(); EventResult::Consumed( self.on_link_select .clone() .map(|f| Callback::from_fn(move |s| f(s, &target))), ) } else { EventResult::Ignored } } fn important_area(&self, _: cursive_core::XY<usize>) -> cursive_core::Rect { if let Some(doc) = &self.doc { doc.link_handler.important_area() } else { cursive_core::Rect::from((0, 0)) } } } impl RenderedDocument { /// Creates a new rendered document with the given size constraint. /// /// The size constraint is used to check whether a cached document can be reused or whether it /// has to be rendered for the new constraint. It is *not* enforced by this struct! pub fn new(constraint: cursive_core::XY<usize>) -> RenderedDocument { RenderedDocument { lines: Vec::new(), link_handler: Default::default(), size: (0, 0).into(), constraint, } } /// Appends a rendered line to the document. pub fn push_line<I: IntoIterator<Item = Element>>(&mut self, line: I) { let mut rendered_line = Vec::new(); let y = self.lines.len(); let mut x = 0; for element in line { let width = element.text.width(); let link_idx = element.link_target.map(|target| { self.link_handler.push(Link { position: (x, y).into(), width, target, }) }); x += width; rendered_line.push(RenderedElement { text: element.text, style: element.style, link_idx, }); } self.lines.push(rendered_line); self.size = self.size.stack_vertical(&(x, 1).into()); } } impl Element { /// Creates a new element with the given text, style and optional link target. pub fn new(text: String, style: theme::Style, link_target: Option<String>) -> Element { Element { text, style, link_target, } } /// Creates an element with the given text, with the default style and without a link target. pub fn plain(text: String) -> Element { Element { text, ..Default::default() } } /// Creates an element with the given text and style and without a link target. pub fn styled(text: String, style: theme::Style) -> Element { Element::new(text, style, None) } /// Creates an element with the given text, style and link target. pub fn link(text: String, style: theme::Style, target: String) -> Element { Element::new(text, style, Some(target)) } } impl From<String> for Element { fn from(s: String) -> Element { Element::plain(s) } } impl From<Element> for RenderedElement { fn from(element: Element) -> RenderedElement { RenderedElement { text: element.text, style: element.style, link_idx: None, } } } impl LinkHandler { pub fn push(&mut self, link: Link) -> usize { self.links.push(link); self.links.len() - 1 } pub fn take_focus(&mut self, direction: cursive_core::direction::Direction) -> bool { if self.links.is_empty() { false } else { use cursive_core::direction::{Absolute, Direction, Relative}; let rel = match direction { Direction::Abs(abs) => match abs { Absolute::Up | Absolute::Left | Absolute::None => Relative::Front, Absolute::Down | Absolute::Right => Relative::Back, }, Direction::Rel(rel) => rel, }; self.focus = match rel { Relative::Front => 0, Relative::Back => self.links.len() - 1, }; true } } pub fn move_focus(&mut self, direction: cursive_core::direction::Absolute) -> bool { use cursive_core::direction::{Absolute, Relative}; match direction { Absolute::Left => self.move_focus_horizontal(Relative::Front), Absolute::Right => self.move_focus_horizontal(Relative::Back), Absolute::Up => self.move_focus_vertical(Relative::Front), Absolute::Down => self.move_focus_vertical(Relative::Back), Absolute::None => false, } } fn move_focus_horizontal(&mut self, direction: cursive_core::direction::Relative) -> bool { use cursive_core::direction::Relative; if self.links.is_empty() { return false; } let new_focus = match direction { Relative::Front => self.focus.checked_sub(1), Relative::Back => { if self.focus < self.links.len() - 1 { Some(self.focus + 1) } else { None } } }; if let Some(new_focus) = new_focus { if self.links[self.focus].position.y == self.links[new_focus].position.y { self.focus = new_focus; true } else { false } } else { false } } fn move_focus_vertical(&mut self, direction: cursive_core::direction::Relative) -> bool { use cursive_core::direction::Relative; if self.links.is_empty() { return false; } // TODO: Currently, we select the first link on a different line. We could instead select // the closest link on a different line (if there are multiple links on one line). let y = self.links[self.focus].position.y; let iter = self.links.iter().enumerate(); let next = match direction { Relative::Front => iter .rev() .skip(self.links.len() - self.focus) .find(|(_, link)| link.position.y < y), Relative::Back => iter .skip(self.focus + 1) .find(|(_, link)| link.position.y > y), }; if let Some((idx, _)) = next { self.focus = idx; true } else { false } } pub fn important_area(&self) -> cursive_core::Rect { if self.links.is_empty() { cursive_core::Rect::from((0, 0)) } else { let link = &self.links[self.focus]; cursive_core::Rect::from_size(link.position, (link.width, 1)) } } }
/// /// You can also limit the available width by setting a maximum line width with the
random_line_split
lib.rs
// SPDX-FileCopyrightText: 2020 Robin Krahl <[email protected]> // SPDX-License-Identifier: Apache-2.0 or MIT //! `cursive-markup` provides the [`MarkupView`][] for [`cursive`][] that can render HTML or other //! markup. //! //! # Quickstart //! //! To render an HTML document, create a [`MarkupView`][] with the [`html`][] method, configure the //! maximum line width using the [`set_maximum_width`][] method and set callbacks for the links //! using the [`on_link_select`][] and [`on_link_focus`][] methods. //! //! Typically, you’ll want to wrap the view in a [`ScrollView`][] and add it to a //! [`Cursive`][`cursive::Cursive`] instance. //! //! ``` //! // Create the markup view //! let html = "<a href='https://rust-lang.org'>Rust</a>"; //! let mut view = cursive_markup::MarkupView::html(&html); //! view.set_maximum_width(120); //! //! // Set callbacks that are called if the link focus is changed and if a link is selected with //! // the Enter key //! view.on_link_focus(|s, url| {}); //! view.on_link_select(|s, url| {}); //! //! // Add the view to a Cursive instance //! use cursive::view::{Resizable, Scrollable}; //! let mut s = cursive::dummy(); //! s.add_global_callback('q', |s| s.quit()); //! s.add_fullscreen_layer(view.scrollable().full_screen()); //! s.run(); //! ``` //! //! You can use the arrow keys to navigate between the links and press Enter to trigger the //! [`on_link_select`][] callback. //! //! For a complete example, see [`examples/browser.rs`][], a very simple browser implementation. //! //! # Components //! //! The main component of the crate is [`MarkupView`][]. It is a [`cursive`][] view that displays //! hypertext: a combination of formatted text and links. You can use the arrow keys to navigate //! between the links, and the Enter key to select a link. //! //! The displayed content is provided and rendered by a [`Renderer`][] instance. If the `html` //! feature is enabled (default), the [`html::Renderer`][] can be used to parse and render an HTML //! document with [`html2text`][]. But you can also implement your own [`Renderer`][]. //! [`MarkupView`][] caches the rendered document ([`RenderedDocument`][]) and only invokes the //! renderer if the width of the view has been changed. //! //! ## HTML rendering //! //! To customize the HTML rendering, you can change the [`TextDecorator`][] that is used by //! [`html2text`][] to transform the HTML DOM into annotated strings. Of course the renderer must //! know how to interpret the annotations, so if you provide a custom decorator, you also have to //! provide a [`Converter`][] that extracts formatting and links from the annotations. //! //! [`cursive`]: https://docs.rs/cursive/latest/cursive/ //! [`cursive::Cursive`]: https://docs.rs/cursive/latest/cursive/struct.Cursive.html //! [`ScrollView`]: https://docs.rs/cursive/latest/cursive/views/struct.ScrollView.html //! [`html2text`]: https://docs.rs/html2text/latest/html2text/ //! [`TextDecorator`]: https://docs.rs/html2text/latest/html2text/render/text_renderer/trait.TextDecorator.html //! [`Converter`]: html/trait.Converter.html //! [`MarkupView`]: struct.MarkupView.html //! [`RenderedDocument`]: struct.RenderedDocument.html //! [`Renderer`]: trait.Renderer.html //! [`html`]: struct.MarkupView.html#method.html //! [`set_maximum_width`]: struct.MarkupView.html#method.set_maximum_width //! [`on_link_select`]: struct.MarkupView.html#method.on_link_select //! [`on_link_focus`]: struct.MarkupView.html#method.on_link_focus //! [`html::Renderer`]: html/struct.Renderer.html //! [`examples/browser.rs`]: https://git.sr.ht/~ireas/cursive-markup-rs/tree/master/examples/browser.rs #![warn(missing_docs, rust_2018_idioms)] #[cfg(feature = "html")] pub mod html; use std::rc; use cursive_core::theme; use unicode_width::UnicodeWidthStr as _; /// A view for hypertext that has been rendered by a [`Renderer`][]. /// /// This view displays hypertext (a combination of formatted text and links) that typically has /// been parsed from a markup language. You can use the arrow keys to navigate between the links, /// and the Enter key to select a link. If the focused link is changed, the [`on_link_focus`][] /// callback is triggered. If the focused link is selected using the Enter key, the /// [`on_link_select`][] callback is triggered. /// /// The displayed hypertext is created by a [`Renderer`][] implementation. The `MarkupView` calls /// the [`render`][] method with the size constraint provided by `cursive` and receives a /// [`RenderedDocument`][] that contains the text and the links. This document is cached until the /// available width changes. /// /// You can also limit the available width by setting a maximum line width with the /// [`set_maximum_width`][] method. /// /// [`RenderedDocument`]: struct.RenderedDocument.html /// [`Renderer`]: trait.Renderer.html /// [`render`]: trait.Renderer.html#method.render /// [`on_link_select`]: #method.on_link_select /// [`on_link_focus`]: #method.on_link_focus /// [`set_maximum_width`]: #method.set_maximum_width pub struct MarkupView<R: Renderer +'static> { renderer: R, doc: Option<RenderedDocument>, on_link_focus: Option<rc::Rc<LinkCallback>>, on_link_select: Option<rc::Rc<LinkCallback>>, maximum_width: Option<usize>, } /// A callback that is triggered for a link. /// /// The first argument is a mutable reference to the current [`Cursive`][] instance. The second /// argument is the target of the link, typically a URL. /// /// [`Cursive`]: https://docs.rs/cursive/latest/cursive/struct.Cursive.html pub type LinkCallback = dyn Fn(&mut cursive_core::Cursive, &str); /// A renderer that produces a hypertext document. pub trait Renderer { /// Renders this document within the given size constraint and returns the result. /// /// This method is called by [`MarkupView`][] every time the provided width changes. /// /// [`MarkupView`]: struct.MarkupView.html fn render(&self, constraint: cursive_core::XY<usize>) -> RenderedDocument; } /// A rendered hypertext document that consists of lines of formatted text and links. #[derive(Clone, Debug)] pub struct RenderedDocument { lines: Vec<Vec<RenderedElement>>, link_handler: LinkHandler, size: cursive_core::XY<usize>, constraint: cursive_core::XY<usize>, } /// A hypertext element: a formatted string with an optional link target. #[derive(Clone, Debug, Default)] pub struct Element { text: String, style: theme::Style, link_target: Option<String>, } #[derive(Clone, Debug, Default)] struct RenderedElement { text: String, style: theme::Style, link_idx: Option<usize>, } #[derive(Clone, Debug, Default)] struct LinkHandler { links: Vec<Link>, focus: usize, } #[derive(Clone, Debug)] struct Link { position: cursive_core::XY<usize>, width: usize, target: String, } #[cfg(feature = "html")] impl MarkupView<html::RichRenderer> { /// Creates a new `MarkupView` that uses a rich text HTML renderer. /// /// *Requires the `html` feature (enabled per default).* pub fn html(html: &str) -> MarkupView<html::RichRenderer> { MarkupView::with_renderer(html::Renderer::new(html)) } } impl<R: Renderer +'static> MarkupView<R> { /// Creates a new `MarkupView` with the given renderer. pub fn with_renderer(renderer: R) -> MarkupView<R> { MarkupView { renderer, doc: None, on_link_focus: None, on_link_select: None, maximum_width: None, } } /// Sets the callback that is triggered if the link focus is changed. /// /// Note that this callback is only triggered if the link focus is changed with the arrow keys. /// It is not triggered if the view takes focus. The callback will receive the target of the /// link as an argument. pub fn on_link_focus<F: Fn(&mut cursive_core::Cursive, &str) +'static>(&mut self, f: F) { self.on_link_focus = Some(rc::Rc::new(f)); } /// Sets the callback that is triggered if a link is selected. /// /// This callback is triggered if a link is focused and the users presses the Enter key. The /// callback will receive the target of the link as an argument. pub fn on_link_select<F: Fn(&mut cursive_core::Cursive, &str) +'static>(&mut self, f: F) { self.on_link_select = Some(rc::Rc::new(f)); } /// Sets the maximum width of the view. /// /// This means that the width that is available for the renderer is limited to the given value. pub fn set_maximum_width(&mut self, width: usize) { self.maximum_width = Some(width); } fn render(&mut self, mut constraint: cursive_core::XY<usize>) -> cursive_core::XY<usize> { let mut last_focus = 0; if let Some(width) = self.maximum_width { constraint.x = std::cmp::min(width, constraint.x); } if let Some(doc) = &self.doc { if constraint.x == doc.constraint.x { return doc.size; } last_focus = doc.link_handler.focus; } let mut doc = self.renderer.render(constraint); // TODO: Rendering the document with a different width may lead to links being split up (or // previously split up links being no longer split up). Ideally, we would adjust the focus // for these changes. if last_focus < doc.link_handler.links.len() { doc.link_handler.focus = last_focus; } let size = doc.size; self.doc = Some(doc); size } } impl<R: Renderer +'static> cursive_core::View for MarkupView<R> { fn draw(&self, printer: &cursive_core::Printer<'_, '_>) {
fn layout(&mut self, constraint: cursive_core::XY<usize>) { self.render(constraint); } fn required_size(&mut self, constraint: cursive_core::XY<usize>) -> cursive_core::XY<usize> { self.render(constraint) } fn take_focus(&mut self, direction: cursive_core::direction::Direction) -> bool { self.doc .as_mut() .map(|doc| doc.link_handler.take_focus(direction)) .unwrap_or_default() } fn on_event(&mut self, event: cursive_core::event::Event) -> cursive_core::event::EventResult { use cursive_core::direction::Absolute; use cursive_core::event::{Callback, Event, EventResult, Key}; let link_handler = if let Some(doc) = self.doc.as_mut() { if doc.link_handler.links.is_empty() { return EventResult::Ignored; } else { &mut doc.link_handler } } else { return EventResult::Ignored; }; // TODO: implement mouse support let focus_changed = match event { Event::Key(Key::Left) => link_handler.move_focus(Absolute::Left), Event::Key(Key::Right) => link_handler.move_focus(Absolute::Right), Event::Key(Key::Up) => link_handler.move_focus(Absolute::Up), Event::Key(Key::Down) => link_handler.move_focus(Absolute::Down), _ => false, }; if focus_changed { let target = link_handler.links[link_handler.focus].target.clone(); EventResult::Consumed( self.on_link_focus .clone() .map(|f| Callback::from_fn(move |s| f(s, &target))), ) } else if event == Event::Key(Key::Enter) { let target = link_handler.links[link_handler.focus].target.clone(); EventResult::Consumed( self.on_link_select .clone() .map(|f| Callback::from_fn(move |s| f(s, &target))), ) } else { EventResult::Ignored } } fn important_area(&self, _: cursive_core::XY<usize>) -> cursive_core::Rect { if let Some(doc) = &self.doc { doc.link_handler.important_area() } else { cursive_core::Rect::from((0, 0)) } } } impl RenderedDocument { /// Creates a new rendered document with the given size constraint. /// /// The size constraint is used to check whether a cached document can be reused or whether it /// has to be rendered for the new constraint. It is *not* enforced by this struct! pub fn new(constraint: cursive_core::XY<usize>) -> RenderedDocument { RenderedDocument { lines: Vec::new(), link_handler: Default::default(), size: (0, 0).into(), constraint, } } /// Appends a rendered line to the document. pub fn push_line<I: IntoIterator<Item = Element>>(&mut self, line: I) { let mut rendered_line = Vec::new(); let y = self.lines.len(); let mut x = 0; for element in line { let width = element.text.width(); let link_idx = element.link_target.map(|target| { self.link_handler.push(Link { position: (x, y).into(), width, target, }) }); x += width; rendered_line.push(RenderedElement { text: element.text, style: element.style, link_idx, }); } self.lines.push(rendered_line); self.size = self.size.stack_vertical(&(x, 1).into()); } } impl Element { /// Creates a new element with the given text, style and optional link target. pub fn new(text: String, style: theme::Style, link_target: Option<String>) -> Element { Element { text, style, link_target, } } /// Creates an element with the given text, with the default style and without a link target. pub fn plain(text: String) -> Element { Element { text, ..Default::default() } } /// Creates an element with the given text and style and without a link target. pub fn styled(text: String, style: theme::Style) -> Element { Element::new(text, style, None) } /// Creates an element with the given text, style and link target. pub fn link(text: String, style: theme::Style, target: String) -> Element { Element::new(text, style, Some(target)) } } impl From<String> for Element { fn from(s: String) -> Element { Element::plain(s) } } impl From<Element> for RenderedElement { fn from(element: Element) -> RenderedElement { RenderedElement { text: element.text, style: element.style, link_idx: None, } } } impl LinkHandler { pub fn push(&mut self, link: Link) -> usize { self.links.push(link); self.links.len() - 1 } pub fn take_focus(&mut self, direction: cursive_core::direction::Direction) -> bool { if self.links.is_empty() { false } else { use cursive_core::direction::{Absolute, Direction, Relative}; let rel = match direction { Direction::Abs(abs) => match abs { Absolute::Up | Absolute::Left | Absolute::None => Relative::Front, Absolute::Down | Absolute::Right => Relative::Back, }, Direction::Rel(rel) => rel, }; self.focus = match rel { Relative::Front => 0, Relative::Back => self.links.len() - 1, }; true } } pub fn move_focus(&mut self, direction: cursive_core::direction::Absolute) -> bool { use cursive_core::direction::{Absolute, Relative}; match direction { Absolute::Left => self.move_focus_horizontal(Relative::Front), Absolute::Right => self.move_focus_horizontal(Relative::Back), Absolute::Up => self.move_focus_vertical(Relative::Front), Absolute::Down => self.move_focus_vertical(Relative::Back), Absolute::None => false, } } fn move_focus_horizontal(&mut self, direction: cursive_core::direction::Relative) -> bool { use cursive_core::direction::Relative; if self.links.is_empty() { return false; } let new_focus = match direction { Relative::Front => self.focus.checked_sub(1), Relative::Back => { if self.focus < self.links.len() - 1 { Some(self.focus + 1) } else { None } } }; if let Some(new_focus) = new_focus { if self.links[self.focus].position.y == self.links[new_focus].position.y { self.focus = new_focus; true } else { false } } else { false } } fn move_focus_vertical(&mut self, direction: cursive_core::direction::Relative) -> bool { use cursive_core::direction::Relative; if self.links.is_empty() { return false; } // TODO: Currently, we select the first link on a different line. We could instead select // the closest link on a different line (if there are multiple links on one line). let y = self.links[self.focus].position.y; let iter = self.links.iter().enumerate(); let next = match direction { Relative::Front => iter .rev() .skip(self.links.len() - self.focus) .find(|(_, link)| link.position.y < y), Relative::Back => iter .skip(self.focus + 1) .find(|(_, link)| link.position.y > y), }; if let Some((idx, _)) = next { self.focus = idx; true } else { false } } pub fn important_area(&self) -> cursive_core::Rect { if self.links.is_empty() { cursive_core::Rect::from((0, 0)) } else { let link = &self.links[self.focus]; cursive_core::Rect::from_size(link.position, (link.width, 1)) } } }
let doc = &self.doc.as_ref().expect("layout not called before draw"); for (y, line) in doc.lines.iter().enumerate() { let mut x = 0; for element in line { let mut style = element.style; if let Some(link_idx) = element.link_idx { if printer.focused && doc.link_handler.focus == link_idx { style = style.combine(theme::PaletteColor::Highlight); } } printer.with_style(style, |printer| printer.print((x, y), &element.text)); x += element.text.width(); } } }
identifier_body
lib.rs
// SPDX-FileCopyrightText: 2020 Robin Krahl <[email protected]> // SPDX-License-Identifier: Apache-2.0 or MIT //! `cursive-markup` provides the [`MarkupView`][] for [`cursive`][] that can render HTML or other //! markup. //! //! # Quickstart //! //! To render an HTML document, create a [`MarkupView`][] with the [`html`][] method, configure the //! maximum line width using the [`set_maximum_width`][] method and set callbacks for the links //! using the [`on_link_select`][] and [`on_link_focus`][] methods. //! //! Typically, you’ll want to wrap the view in a [`ScrollView`][] and add it to a //! [`Cursive`][`cursive::Cursive`] instance. //! //! ``` //! // Create the markup view //! let html = "<a href='https://rust-lang.org'>Rust</a>"; //! let mut view = cursive_markup::MarkupView::html(&html); //! view.set_maximum_width(120); //! //! // Set callbacks that are called if the link focus is changed and if a link is selected with //! // the Enter key //! view.on_link_focus(|s, url| {}); //! view.on_link_select(|s, url| {}); //! //! // Add the view to a Cursive instance //! use cursive::view::{Resizable, Scrollable}; //! let mut s = cursive::dummy(); //! s.add_global_callback('q', |s| s.quit()); //! s.add_fullscreen_layer(view.scrollable().full_screen()); //! s.run(); //! ``` //! //! You can use the arrow keys to navigate between the links and press Enter to trigger the //! [`on_link_select`][] callback. //! //! For a complete example, see [`examples/browser.rs`][], a very simple browser implementation. //! //! # Components //! //! The main component of the crate is [`MarkupView`][]. It is a [`cursive`][] view that displays //! hypertext: a combination of formatted text and links. You can use the arrow keys to navigate //! between the links, and the Enter key to select a link. //! //! The displayed content is provided and rendered by a [`Renderer`][] instance. If the `html` //! feature is enabled (default), the [`html::Renderer`][] can be used to parse and render an HTML //! document with [`html2text`][]. But you can also implement your own [`Renderer`][]. //! [`MarkupView`][] caches the rendered document ([`RenderedDocument`][]) and only invokes the //! renderer if the width of the view has been changed. //! //! ## HTML rendering //! //! To customize the HTML rendering, you can change the [`TextDecorator`][] that is used by //! [`html2text`][] to transform the HTML DOM into annotated strings. Of course the renderer must //! know how to interpret the annotations, so if you provide a custom decorator, you also have to //! provide a [`Converter`][] that extracts formatting and links from the annotations. //! //! [`cursive`]: https://docs.rs/cursive/latest/cursive/ //! [`cursive::Cursive`]: https://docs.rs/cursive/latest/cursive/struct.Cursive.html //! [`ScrollView`]: https://docs.rs/cursive/latest/cursive/views/struct.ScrollView.html //! [`html2text`]: https://docs.rs/html2text/latest/html2text/ //! [`TextDecorator`]: https://docs.rs/html2text/latest/html2text/render/text_renderer/trait.TextDecorator.html //! [`Converter`]: html/trait.Converter.html //! [`MarkupView`]: struct.MarkupView.html //! [`RenderedDocument`]: struct.RenderedDocument.html //! [`Renderer`]: trait.Renderer.html //! [`html`]: struct.MarkupView.html#method.html //! [`set_maximum_width`]: struct.MarkupView.html#method.set_maximum_width //! [`on_link_select`]: struct.MarkupView.html#method.on_link_select //! [`on_link_focus`]: struct.MarkupView.html#method.on_link_focus //! [`html::Renderer`]: html/struct.Renderer.html //! [`examples/browser.rs`]: https://git.sr.ht/~ireas/cursive-markup-rs/tree/master/examples/browser.rs #![warn(missing_docs, rust_2018_idioms)] #[cfg(feature = "html")] pub mod html; use std::rc; use cursive_core::theme; use unicode_width::UnicodeWidthStr as _; /// A view for hypertext that has been rendered by a [`Renderer`][]. /// /// This view displays hypertext (a combination of formatted text and links) that typically has /// been parsed from a markup language. You can use the arrow keys to navigate between the links, /// and the Enter key to select a link. If the focused link is changed, the [`on_link_focus`][] /// callback is triggered. If the focused link is selected using the Enter key, the /// [`on_link_select`][] callback is triggered. /// /// The displayed hypertext is created by a [`Renderer`][] implementation. The `MarkupView` calls /// the [`render`][] method with the size constraint provided by `cursive` and receives a /// [`RenderedDocument`][] that contains the text and the links. This document is cached until the /// available width changes. /// /// You can also limit the available width by setting a maximum line width with the /// [`set_maximum_width`][] method. /// /// [`RenderedDocument`]: struct.RenderedDocument.html /// [`Renderer`]: trait.Renderer.html /// [`render`]: trait.Renderer.html#method.render /// [`on_link_select`]: #method.on_link_select /// [`on_link_focus`]: #method.on_link_focus /// [`set_maximum_width`]: #method.set_maximum_width pub struct MarkupView<R: Renderer +'static> { renderer: R, doc: Option<RenderedDocument>, on_link_focus: Option<rc::Rc<LinkCallback>>, on_link_select: Option<rc::Rc<LinkCallback>>, maximum_width: Option<usize>, } /// A callback that is triggered for a link. /// /// The first argument is a mutable reference to the current [`Cursive`][] instance. The second /// argument is the target of the link, typically a URL. /// /// [`Cursive`]: https://docs.rs/cursive/latest/cursive/struct.Cursive.html pub type LinkCallback = dyn Fn(&mut cursive_core::Cursive, &str); /// A renderer that produces a hypertext document. pub trait Renderer { /// Renders this document within the given size constraint and returns the result. /// /// This method is called by [`MarkupView`][] every time the provided width changes. /// /// [`MarkupView`]: struct.MarkupView.html fn render(&self, constraint: cursive_core::XY<usize>) -> RenderedDocument; } /// A rendered hypertext document that consists of lines of formatted text and links. #[derive(Clone, Debug)] pub struct RenderedDocument { lines: Vec<Vec<RenderedElement>>, link_handler: LinkHandler, size: cursive_core::XY<usize>, constraint: cursive_core::XY<usize>, } /// A hypertext element: a formatted string with an optional link target. #[derive(Clone, Debug, Default)] pub struct El
text: String, style: theme::Style, link_target: Option<String>, } #[derive(Clone, Debug, Default)] struct RenderedElement { text: String, style: theme::Style, link_idx: Option<usize>, } #[derive(Clone, Debug, Default)] struct LinkHandler { links: Vec<Link>, focus: usize, } #[derive(Clone, Debug)] struct Link { position: cursive_core::XY<usize>, width: usize, target: String, } #[cfg(feature = "html")] impl MarkupView<html::RichRenderer> { /// Creates a new `MarkupView` that uses a rich text HTML renderer. /// /// *Requires the `html` feature (enabled per default).* pub fn html(html: &str) -> MarkupView<html::RichRenderer> { MarkupView::with_renderer(html::Renderer::new(html)) } } impl<R: Renderer +'static> MarkupView<R> { /// Creates a new `MarkupView` with the given renderer. pub fn with_renderer(renderer: R) -> MarkupView<R> { MarkupView { renderer, doc: None, on_link_focus: None, on_link_select: None, maximum_width: None, } } /// Sets the callback that is triggered if the link focus is changed. /// /// Note that this callback is only triggered if the link focus is changed with the arrow keys. /// It is not triggered if the view takes focus. The callback will receive the target of the /// link as an argument. pub fn on_link_focus<F: Fn(&mut cursive_core::Cursive, &str) +'static>(&mut self, f: F) { self.on_link_focus = Some(rc::Rc::new(f)); } /// Sets the callback that is triggered if a link is selected. /// /// This callback is triggered if a link is focused and the users presses the Enter key. The /// callback will receive the target of the link as an argument. pub fn on_link_select<F: Fn(&mut cursive_core::Cursive, &str) +'static>(&mut self, f: F) { self.on_link_select = Some(rc::Rc::new(f)); } /// Sets the maximum width of the view. /// /// This means that the width that is available for the renderer is limited to the given value. pub fn set_maximum_width(&mut self, width: usize) { self.maximum_width = Some(width); } fn render(&mut self, mut constraint: cursive_core::XY<usize>) -> cursive_core::XY<usize> { let mut last_focus = 0; if let Some(width) = self.maximum_width { constraint.x = std::cmp::min(width, constraint.x); } if let Some(doc) = &self.doc { if constraint.x == doc.constraint.x { return doc.size; } last_focus = doc.link_handler.focus; } let mut doc = self.renderer.render(constraint); // TODO: Rendering the document with a different width may lead to links being split up (or // previously split up links being no longer split up). Ideally, we would adjust the focus // for these changes. if last_focus < doc.link_handler.links.len() { doc.link_handler.focus = last_focus; } let size = doc.size; self.doc = Some(doc); size } } impl<R: Renderer +'static> cursive_core::View for MarkupView<R> { fn draw(&self, printer: &cursive_core::Printer<'_, '_>) { let doc = &self.doc.as_ref().expect("layout not called before draw"); for (y, line) in doc.lines.iter().enumerate() { let mut x = 0; for element in line { let mut style = element.style; if let Some(link_idx) = element.link_idx { if printer.focused && doc.link_handler.focus == link_idx { style = style.combine(theme::PaletteColor::Highlight); } } printer.with_style(style, |printer| printer.print((x, y), &element.text)); x += element.text.width(); } } } fn layout(&mut self, constraint: cursive_core::XY<usize>) { self.render(constraint); } fn required_size(&mut self, constraint: cursive_core::XY<usize>) -> cursive_core::XY<usize> { self.render(constraint) } fn take_focus(&mut self, direction: cursive_core::direction::Direction) -> bool { self.doc .as_mut() .map(|doc| doc.link_handler.take_focus(direction)) .unwrap_or_default() } fn on_event(&mut self, event: cursive_core::event::Event) -> cursive_core::event::EventResult { use cursive_core::direction::Absolute; use cursive_core::event::{Callback, Event, EventResult, Key}; let link_handler = if let Some(doc) = self.doc.as_mut() { if doc.link_handler.links.is_empty() { return EventResult::Ignored; } else { &mut doc.link_handler } } else { return EventResult::Ignored; }; // TODO: implement mouse support let focus_changed = match event { Event::Key(Key::Left) => link_handler.move_focus(Absolute::Left), Event::Key(Key::Right) => link_handler.move_focus(Absolute::Right), Event::Key(Key::Up) => link_handler.move_focus(Absolute::Up), Event::Key(Key::Down) => link_handler.move_focus(Absolute::Down), _ => false, }; if focus_changed { let target = link_handler.links[link_handler.focus].target.clone(); EventResult::Consumed( self.on_link_focus .clone() .map(|f| Callback::from_fn(move |s| f(s, &target))), ) } else if event == Event::Key(Key::Enter) { let target = link_handler.links[link_handler.focus].target.clone(); EventResult::Consumed( self.on_link_select .clone() .map(|f| Callback::from_fn(move |s| f(s, &target))), ) } else { EventResult::Ignored } } fn important_area(&self, _: cursive_core::XY<usize>) -> cursive_core::Rect { if let Some(doc) = &self.doc { doc.link_handler.important_area() } else { cursive_core::Rect::from((0, 0)) } } } impl RenderedDocument { /// Creates a new rendered document with the given size constraint. /// /// The size constraint is used to check whether a cached document can be reused or whether it /// has to be rendered for the new constraint. It is *not* enforced by this struct! pub fn new(constraint: cursive_core::XY<usize>) -> RenderedDocument { RenderedDocument { lines: Vec::new(), link_handler: Default::default(), size: (0, 0).into(), constraint, } } /// Appends a rendered line to the document. pub fn push_line<I: IntoIterator<Item = Element>>(&mut self, line: I) { let mut rendered_line = Vec::new(); let y = self.lines.len(); let mut x = 0; for element in line { let width = element.text.width(); let link_idx = element.link_target.map(|target| { self.link_handler.push(Link { position: (x, y).into(), width, target, }) }); x += width; rendered_line.push(RenderedElement { text: element.text, style: element.style, link_idx, }); } self.lines.push(rendered_line); self.size = self.size.stack_vertical(&(x, 1).into()); } } impl Element { /// Creates a new element with the given text, style and optional link target. pub fn new(text: String, style: theme::Style, link_target: Option<String>) -> Element { Element { text, style, link_target, } } /// Creates an element with the given text, with the default style and without a link target. pub fn plain(text: String) -> Element { Element { text, ..Default::default() } } /// Creates an element with the given text and style and without a link target. pub fn styled(text: String, style: theme::Style) -> Element { Element::new(text, style, None) } /// Creates an element with the given text, style and link target. pub fn link(text: String, style: theme::Style, target: String) -> Element { Element::new(text, style, Some(target)) } } impl From<String> for Element { fn from(s: String) -> Element { Element::plain(s) } } impl From<Element> for RenderedElement { fn from(element: Element) -> RenderedElement { RenderedElement { text: element.text, style: element.style, link_idx: None, } } } impl LinkHandler { pub fn push(&mut self, link: Link) -> usize { self.links.push(link); self.links.len() - 1 } pub fn take_focus(&mut self, direction: cursive_core::direction::Direction) -> bool { if self.links.is_empty() { false } else { use cursive_core::direction::{Absolute, Direction, Relative}; let rel = match direction { Direction::Abs(abs) => match abs { Absolute::Up | Absolute::Left | Absolute::None => Relative::Front, Absolute::Down | Absolute::Right => Relative::Back, }, Direction::Rel(rel) => rel, }; self.focus = match rel { Relative::Front => 0, Relative::Back => self.links.len() - 1, }; true } } pub fn move_focus(&mut self, direction: cursive_core::direction::Absolute) -> bool { use cursive_core::direction::{Absolute, Relative}; match direction { Absolute::Left => self.move_focus_horizontal(Relative::Front), Absolute::Right => self.move_focus_horizontal(Relative::Back), Absolute::Up => self.move_focus_vertical(Relative::Front), Absolute::Down => self.move_focus_vertical(Relative::Back), Absolute::None => false, } } fn move_focus_horizontal(&mut self, direction: cursive_core::direction::Relative) -> bool { use cursive_core::direction::Relative; if self.links.is_empty() { return false; } let new_focus = match direction { Relative::Front => self.focus.checked_sub(1), Relative::Back => { if self.focus < self.links.len() - 1 { Some(self.focus + 1) } else { None } } }; if let Some(new_focus) = new_focus { if self.links[self.focus].position.y == self.links[new_focus].position.y { self.focus = new_focus; true } else { false } } else { false } } fn move_focus_vertical(&mut self, direction: cursive_core::direction::Relative) -> bool { use cursive_core::direction::Relative; if self.links.is_empty() { return false; } // TODO: Currently, we select the first link on a different line. We could instead select // the closest link on a different line (if there are multiple links on one line). let y = self.links[self.focus].position.y; let iter = self.links.iter().enumerate(); let next = match direction { Relative::Front => iter .rev() .skip(self.links.len() - self.focus) .find(|(_, link)| link.position.y < y), Relative::Back => iter .skip(self.focus + 1) .find(|(_, link)| link.position.y > y), }; if let Some((idx, _)) = next { self.focus = idx; true } else { false } } pub fn important_area(&self) -> cursive_core::Rect { if self.links.is_empty() { cursive_core::Rect::from((0, 0)) } else { let link = &self.links[self.focus]; cursive_core::Rect::from_size(link.position, (link.width, 1)) } } }
ement {
identifier_name
spec.rs
// Copyright 2015-2019 Parity Technologies (UK) Ltd. // This file is part of Parity Ethereum. // Parity Ethereum is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity Ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>. //! Parameters for a block chain. use std::{ collections::BTreeMap, fmt, io::Read, path::Path, sync::Arc, }; use common_types::{ BlockNumber, header::Header, encoded, engines::{OptimizeFor, params::CommonParams}, errors::EthcoreError as Error, transaction::{Action, Transaction}, }; use account_state::{Backend, State, backend::Basic as BasicBackend}; use authority_round::AuthorityRound; use basic_authority::BasicAuthority; use bytes::Bytes; use builtin::Builtin; use clique::Clique; use engine::Engine; use ethash_engine::Ethash; use ethereum_types::{H256, Bloom, U256, Address}; use ethjson; use instant_seal::{InstantSeal, InstantSealParams}; use keccak_hash::{KECCAK_NULL_RLP, keccak}; use log::{trace, warn}; use machine::{executive::Executive, Machine, substate::Substate}; use null_engine::NullEngine; use pod::PodState; use rlp::{Rlp, RlpStream}; use trace::{NoopTracer, NoopVMTracer}; use trie_vm_factories::Factories; use vm::{EnvInfo, CallType, ActionValue, ActionParams, ParamsType}; use crate::{ Genesis, seal::Generic as GenericSeal, }; /// Runtime parameters for the spec that are related to how the software should run the chain, /// rather than integral properties of the chain itself. pub struct SpecParams<'a> { /// The path to the folder used to cache nodes. This is typically /tmp/ on Unix-like systems pub cache_dir: &'a Path, /// Whether to run slower at the expense of better memory usage, or run faster while using /// more /// memory. This may get more fine-grained in the future but for now is simply a binary /// option. pub optimization_setting: Option<OptimizeFor>, } impl<'a> SpecParams<'a> { /// Create from a cache path, with null values for the other fields pub fn from_path(path: &'a Path) -> Self { SpecParams { cache_dir: path, optimization_setting: None, } } /// Create from a cache path and an optimization setting pub fn new(path: &'a Path, optimization: OptimizeFor) -> Self { SpecParams { cache_dir: path, optimization_setting: Some(optimization), } } } impl<'a, T: AsRef<Path>> From<&'a T> for SpecParams<'a> { fn from(path: &'a T) -> Self { Self::from_path(path.as_ref()) } } /// given a pre-constructor state, run all the given constructors and produce a new state and /// state root. fn run_constructors<T: Backend>( genesis_state: &PodState, constructors: &[(Address, Bytes)], engine: &dyn Engine, author: Address, timestamp: u64, difficulty: U256, factories: &Factories, mut db: T ) -> Result<(H256, T), Error> { let mut root = KECCAK_NULL_RLP; // basic accounts in spec. { let mut t = factories.trie.create(db.as_hash_db_mut(), &mut root); for (address, account) in genesis_state.get().iter() { t.insert(address.as_bytes(), &account.rlp())?; } } for (address, account) in genesis_state.get().iter() { db.note_non_null_account(address); account.insert_additional( &mut *factories.accountdb.create( db.as_hash_db_mut(), keccak(address), ), &factories.trie, ); } let start_nonce = engine.account_start_nonce(0); let mut state = State::from_existing(db, root, start_nonce, factories.clone())?; // Execute contract constructors. let env_info = EnvInfo { number: 0, author, timestamp, difficulty, last_hashes: Default::default(), gas_used: U256::zero(), gas_limit: U256::max_value(), }; let from = Address::zero(); for &(ref address, ref constructor) in constructors.iter() { trace!(target: "spec", "run_constructors: Creating a contract at {}.", address); trace!(target: "spec", " .. root before = {}", state.root()); let params = ActionParams { code_address: address.clone(), code_hash: Some(keccak(constructor)), code_version: U256::zero(), address: address.clone(), sender: from.clone(), origin: from.clone(), gas: U256::max_value(), gas_price: Default::default(), value: ActionValue::Transfer(Default::default()), code: Some(Arc::new(constructor.clone())), data: None, call_type: CallType::None, params_type: ParamsType::Embedded, }; let mut substate = Substate::new(); { let machine = engine.machine(); let schedule = machine.schedule(env_info.number); let mut exec = Executive::new(&mut state, &env_info, &machine, &schedule); // failing create is not a bug if let Err(e) = exec.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) { warn!(target: "spec", "Genesis constructor execution at {} failed: {}.", address, e); } } let _ = state.commit()?; } Ok(state.drop()) } /// Parameters for a block chain; includes both those intrinsic to the design of the /// chain and those to be interpreted by the active chain engine. pub struct Spec { /// User friendly spec name. pub name: String, /// Engine specified by json file. pub engine: Arc<dyn Engine>, /// Name of the subdir inside the main data dir to use for chain data and settings. pub data_dir: String, /// Known nodes on the network in enode format. pub nodes: Vec<String>, /// The genesis block's parent hash field. pub parent_hash: H256, /// The genesis block's author field. pub author: Address, /// The genesis block's difficulty field. pub difficulty: U256, /// The genesis block's gas limit field. pub gas_limit: U256, /// The genesis block's gas used field. pub gas_used: U256, /// The genesis block's timestamp field. pub timestamp: u64, /// Transactions root of the genesis block. Should be KECCAK_NULL_RLP. pub transactions_root: H256, /// Receipts root of the genesis block. Should be KECCAK_NULL_RLP. pub receipts_root: H256, /// The genesis block's extra data field. pub extra_data: Bytes, /// Each seal field, expressed as RLP, concatenated. pub seal_rlp: Bytes, /// Hardcoded synchronization. Allows the light client to immediately jump to a specific block. pub hardcoded_sync: Option<SpecHardcodedSync>, /// Contract constructors to be executed on genesis. pub constructors: Vec<(Address, Bytes)>, /// May be prepopulated if we know this in advance. pub state_root: H256, /// Genesis state as plain old data. pub genesis_state: PodState, } /// Part of `Spec`. Describes the hardcoded synchronization parameters. pub struct SpecHardcodedSync { /// Header of the block to jump to for hardcoded sync, and total difficulty. pub header: encoded::Header, /// Total difficulty of the block to jump to. pub total_difficulty: U256, /// List of hardcoded CHTs, in order. If `hardcoded_sync` is set, the CHTs should include the /// header of `hardcoded_sync`. pub chts: Vec<H256>, } impl From<ethjson::spec::HardcodedSync> for SpecHardcodedSync { fn from(sync: ethjson::spec::HardcodedSync) -> Self { SpecHardcodedSync { header: encoded::Header::new(sync.header.into()), total_difficulty: sync.total_difficulty.into(), chts: sync.chts.into_iter().map(Into::into).collect(), } } } impl fmt::Display for SpecHardcodedSync { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "{{")?; writeln!(f, r#"header": "{:?},"#, self.header)?; writeln!(f, r#"total_difficulty": "{:?},"#, self.total_difficulty)?; writeln!(f, r#"chts": {:#?}"#, self.chts.iter().map(|x| format!(r#"{}"#, x)).collect::<Vec<_>>())?; writeln!(f, "}}") } } /// Load from JSON object. fn load_from(spec_params: SpecParams, s: ethjson::spec::Spec) -> Result<Spec, Error> { let builtins = s.accounts .builtins() .into_iter() .map(|p| (p.0.into(), From::from(p.1))) .collect(); let g = Genesis::from(s.genesis); let GenericSeal(seal_rlp) = g.seal.into(); let params = CommonParams::from(s.params); let hardcoded_sync = s.hardcoded_sync.map(Into::into); let engine = Spec::engine(spec_params, s.engine, params, builtins); let author = g.author; let timestamp = g.timestamp; let difficulty = g.difficulty; let constructors: Vec<_> = s.accounts .constructors() .into_iter() .map(|(a, c)| (a.into(), c.into())) .collect(); let genesis_state: PodState = s.accounts.into(); let (state_root, _) = run_constructors( &genesis_state, &constructors, &*engine, author, timestamp, difficulty, &Default::default(), BasicBackend(journaldb::new_memory_db()), )?; let s = Spec { engine, name: s.name.clone().into(), data_dir: s.data_dir.unwrap_or(s.name).into(), nodes: s.nodes.unwrap_or_else(Vec::new), parent_hash: g.parent_hash, transactions_root: g.transactions_root, receipts_root: g.receipts_root, author, difficulty, gas_limit: g.gas_limit, gas_used: g.gas_used, timestamp, extra_data: g.extra_data, seal_rlp, hardcoded_sync, constructors, genesis_state, state_root, }; Ok(s) } impl Spec { // create an instance of an Ethereum state machine, minus consensus logic. fn machine( engine_spec: &ethjson::spec::Engine, params: CommonParams, builtins: BTreeMap<Address, Builtin>, ) -> Machine { if let ethjson::spec::Engine::Ethash(ref ethash) = *engine_spec
else { Machine::regular(params, builtins) } } /// Convert engine spec into a arc'd Engine of the right underlying type. /// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. fn engine( spec_params: SpecParams, engine_spec: ethjson::spec::Engine, params: CommonParams, builtins: BTreeMap<Address, Builtin>, ) -> Arc<dyn Engine> { let machine = Self::machine(&engine_spec, params, builtins); match engine_spec { ethjson::spec::Engine::Null(null) => Arc::new(NullEngine::new(null.params.into(), machine)), ethjson::spec::Engine::Ethash(ethash) => Arc::new(Ethash::new(spec_params.cache_dir, ethash.params.into(), machine, spec_params.optimization_setting)), ethjson::spec::Engine::InstantSeal(Some(instant_seal)) => Arc::new(InstantSeal::new(instant_seal.params.into(), machine)), ethjson::spec::Engine::InstantSeal(None) => Arc::new(InstantSeal::new(InstantSealParams::default(), machine)), ethjson::spec::Engine::BasicAuthority(basic_authority) => Arc::new(BasicAuthority::new(basic_authority.params.into(), machine)), ethjson::spec::Engine::Clique(clique) => Clique::new(clique.params.into(), machine) .expect("Failed to start Clique consensus engine."), ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(authority_round.params.into(), machine) .expect("Failed to start AuthorityRound consensus engine."), } } /// Get common blockchain parameters. pub fn params(&self) -> &CommonParams { &self.engine.params() } /// Get the configured Network ID. pub fn network_id(&self) -> u64 { self.params().network_id } /// Get the chain ID used for signing. pub fn chain_id(&self) -> u64 { self.params().chain_id } /// Get the configured subprotocol name. pub fn subprotocol_name(&self) -> String { self.params().subprotocol_name.clone() } /// Get the configured network fork block. pub fn fork_block(&self) -> Option<(BlockNumber, H256)> { self.params().fork_block } /// Get the header of the genesis block. pub fn genesis_header(&self) -> Header { let mut header: Header = Default::default(); header.set_parent_hash(self.parent_hash.clone()); header.set_timestamp(self.timestamp); header.set_number(0); header.set_author(self.author.clone()); header.set_transactions_root(self.transactions_root.clone()); header.set_uncles_hash(keccak(RlpStream::new_list(0).out())); header.set_extra_data(self.extra_data.clone()); header.set_state_root(self.state_root); header.set_receipts_root(self.receipts_root.clone()); header.set_log_bloom(Bloom::default()); header.set_gas_used(self.gas_used.clone()); header.set_gas_limit(self.gas_limit.clone()); header.set_difficulty(self.difficulty.clone()); header.set_seal({ let r = Rlp::new(&self.seal_rlp); r.iter().map(|f| f.as_raw().to_vec()).collect() }); trace!(target: "spec", "Header hash is {}", header.hash()); header } /// Compose the genesis block for this chain. pub fn genesis_block(&self) -> Bytes { let empty_list = RlpStream::new_list(0).out(); let header = self.genesis_header(); let mut ret = RlpStream::new_list(3); ret.append(&header); ret.append_raw(&empty_list, 1); ret.append_raw(&empty_list, 1); ret.out() } /// Overwrite the genesis components. pub fn overwrite_genesis_params(&mut self, g: Genesis) { let GenericSeal(seal_rlp) = g.seal.into(); self.parent_hash = g.parent_hash; self.transactions_root = g.transactions_root; self.receipts_root = g.receipts_root; self.author = g.author; self.difficulty = g.difficulty; self.gas_limit = g.gas_limit; self.gas_used = g.gas_used; self.timestamp = g.timestamp; self.extra_data = g.extra_data; self.seal_rlp = seal_rlp; } /// Alter the value of the genesis state. pub fn set_genesis_state(&mut self, s: PodState) -> Result<(), Error> { self.genesis_state = s; let (root, _) = run_constructors( &self.genesis_state, &self.constructors, &*self.engine, self.author, self.timestamp, self.difficulty, &Default::default(), BasicBackend(journaldb::new_memory_db()), )?; self.state_root = root; Ok(()) } /// Ensure that the given state DB has the trie nodes in for the genesis state. pub fn ensure_db_good<T: Backend>(&self, db: T, factories: &Factories) -> Result<T, Error> { if db.as_hash_db().contains(&self.state_root, hash_db::EMPTY_PREFIX) { return Ok(db); } // TODO: could optimize so we don't re-run, but `ensure_db_good` is barely ever // called anyway. let (root, db) = run_constructors( &self.genesis_state, &self.constructors, &*self.engine, self.author, self.timestamp, self.difficulty, factories, db )?; assert_eq!(root, self.state_root, "Spec's state root has not been precomputed correctly."); Ok(db) } /// Loads just the state machine from a json file. pub fn load_machine<R: Read>(reader: R) -> Result<Machine, Error> { ethjson::spec::Spec::load(reader) .map_err(|e| Error::Msg(e.to_string())) .map(|s| { let builtins = s.accounts.builtins().into_iter().map(|p| (p.0.into(), From::from(p.1))).collect(); let params = CommonParams::from(s.params); Spec::machine(&s.engine, params, builtins) }) } /// Loads spec from json file. Provide factories for executing contracts and ensuring /// storage goes to the right place. pub fn load<'a, T: Into<SpecParams<'a>>, R: Read>(params: T, reader: R) -> Result<Self, Error> { ethjson::spec::Spec::load(reader) .map_err(|e| Error::Msg(e.to_string())) .and_then(|x| load_from(params.into(), x)) } /// initialize genesis epoch data, using in-memory database for /// constructor. pub fn genesis_epoch_data(&self) -> Result<Vec<u8>, String> { let genesis = self.genesis_header(); let factories = Default::default(); let mut db = journaldb::new( Arc::new(kvdb_memorydb::create(0)), journaldb::Algorithm::Archive, None, ); self.ensure_db_good(BasicBackend(db.as_hash_db_mut()), &factories) .map_err(|e| format!("Unable to initialize genesis state: {}", e))?; let call = |a, d| { let mut db = db.boxed_clone(); let env_info = evm::EnvInfo { number: 0, author: *genesis.author(), timestamp: genesis.timestamp(), difficulty: *genesis.difficulty(), gas_limit: U256::max_value(), last_hashes: Arc::new(Vec::new()), gas_used: 0.into(), }; let from = Address::zero(); let tx = Transaction { nonce: self.engine.account_start_nonce(0), action: Action::Call(a), gas: U256::max_value(), gas_price: U256::default(), value: U256::default(), data: d, }.fake_sign(from); let res = executive_state::prove_transaction_virtual( db.as_hash_db_mut(), *genesis.state_root(), &tx, self.engine.machine(), &env_info, factories.clone(), ); res.map(|(out, proof)| { (out, proof.into_iter().map(|x| x.into_vec()).collect()) }).ok_or_else(|| "Failed to prove call: insufficient state".into()) }; self.engine.genesis_epoch_data(&genesis, &call) } } #[cfg(test)] mod tests { use std::str::FromStr; use account_state::State; use common_types::{view, views::BlockView}; use ethereum_types::{Address, H256}; use ethcore::test_helpers::get_temp_state_db; use tempdir::TempDir; use super::Spec; #[test] fn test_load_empty() { let tempdir = TempDir::new("").unwrap(); assert!(Spec::load(&tempdir.path(), &[] as &[u8]).is_err()); } #[test] fn test_chain() { let test_spec = crate::new_test(); assert_eq!( test_spec.state_root, H256::from_str("f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9").unwrap() ); let genesis = test_spec.genesis_block(); assert_eq!( view!(BlockView, &genesis).header_view().hash(), H256::from_str("0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303").unwrap() ); } #[test] fn genesis_constructor() { let _ = ::env_logger::try_init(); let spec = crate::new_test_constructor(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()) .unwrap(); let state = State::from_existing( db.boxed_clone(), spec.state_root, spec.engine.account_start_nonce(0), Default::default(), ).unwrap(); let expected = H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); let address = Address::from_str("0000000000000000000000000000000000001337").unwrap(); assert_eq!(state.storage_at(&address, &H256::zero()).unwrap(), expected); assert_eq!(state.balance(&address).unwrap(), 1.into()); } }
{ Machine::with_ethash_extensions(params, builtins, ethash.params.clone().into()) }
conditional_block
spec.rs
// Copyright 2015-2019 Parity Technologies (UK) Ltd. // This file is part of Parity Ethereum. // Parity Ethereum is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity Ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>. //! Parameters for a block chain. use std::{ collections::BTreeMap, fmt, io::Read, path::Path, sync::Arc, }; use common_types::{ BlockNumber, header::Header, encoded, engines::{OptimizeFor, params::CommonParams}, errors::EthcoreError as Error, transaction::{Action, Transaction}, }; use account_state::{Backend, State, backend::Basic as BasicBackend}; use authority_round::AuthorityRound; use basic_authority::BasicAuthority; use bytes::Bytes; use builtin::Builtin; use clique::Clique; use engine::Engine; use ethash_engine::Ethash; use ethereum_types::{H256, Bloom, U256, Address}; use ethjson; use instant_seal::{InstantSeal, InstantSealParams}; use keccak_hash::{KECCAK_NULL_RLP, keccak}; use log::{trace, warn}; use machine::{executive::Executive, Machine, substate::Substate}; use null_engine::NullEngine; use pod::PodState; use rlp::{Rlp, RlpStream}; use trace::{NoopTracer, NoopVMTracer}; use trie_vm_factories::Factories; use vm::{EnvInfo, CallType, ActionValue, ActionParams, ParamsType}; use crate::{ Genesis, seal::Generic as GenericSeal, }; /// Runtime parameters for the spec that are related to how the software should run the chain, /// rather than integral properties of the chain itself. pub struct SpecParams<'a> { /// The path to the folder used to cache nodes. This is typically /tmp/ on Unix-like systems pub cache_dir: &'a Path, /// Whether to run slower at the expense of better memory usage, or run faster while using /// more /// memory. This may get more fine-grained in the future but for now is simply a binary /// option. pub optimization_setting: Option<OptimizeFor>, } impl<'a> SpecParams<'a> { /// Create from a cache path, with null values for the other fields pub fn from_path(path: &'a Path) -> Self { SpecParams { cache_dir: path, optimization_setting: None, } } /// Create from a cache path and an optimization setting pub fn new(path: &'a Path, optimization: OptimizeFor) -> Self { SpecParams { cache_dir: path, optimization_setting: Some(optimization), } } } impl<'a, T: AsRef<Path>> From<&'a T> for SpecParams<'a> { fn from(path: &'a T) -> Self { Self::from_path(path.as_ref()) } } /// given a pre-constructor state, run all the given constructors and produce a new state and /// state root. fn run_constructors<T: Backend>( genesis_state: &PodState, constructors: &[(Address, Bytes)], engine: &dyn Engine, author: Address, timestamp: u64, difficulty: U256, factories: &Factories, mut db: T ) -> Result<(H256, T), Error> { let mut root = KECCAK_NULL_RLP; // basic accounts in spec. { let mut t = factories.trie.create(db.as_hash_db_mut(), &mut root); for (address, account) in genesis_state.get().iter() { t.insert(address.as_bytes(), &account.rlp())?; } } for (address, account) in genesis_state.get().iter() { db.note_non_null_account(address); account.insert_additional( &mut *factories.accountdb.create( db.as_hash_db_mut(), keccak(address), ), &factories.trie, ); } let start_nonce = engine.account_start_nonce(0); let mut state = State::from_existing(db, root, start_nonce, factories.clone())?; // Execute contract constructors. let env_info = EnvInfo { number: 0, author, timestamp, difficulty, last_hashes: Default::default(), gas_used: U256::zero(), gas_limit: U256::max_value(), }; let from = Address::zero(); for &(ref address, ref constructor) in constructors.iter() { trace!(target: "spec", "run_constructors: Creating a contract at {}.", address); trace!(target: "spec", " .. root before = {}", state.root()); let params = ActionParams { code_address: address.clone(), code_hash: Some(keccak(constructor)), code_version: U256::zero(), address: address.clone(), sender: from.clone(), origin: from.clone(), gas: U256::max_value(), gas_price: Default::default(), value: ActionValue::Transfer(Default::default()), code: Some(Arc::new(constructor.clone())), data: None, call_type: CallType::None, params_type: ParamsType::Embedded, }; let mut substate = Substate::new(); { let machine = engine.machine(); let schedule = machine.schedule(env_info.number); let mut exec = Executive::new(&mut state, &env_info, &machine, &schedule); // failing create is not a bug if let Err(e) = exec.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) { warn!(target: "spec", "Genesis constructor execution at {} failed: {}.", address, e); } } let _ = state.commit()?; } Ok(state.drop()) } /// Parameters for a block chain; includes both those intrinsic to the design of the /// chain and those to be interpreted by the active chain engine. pub struct Spec { /// User friendly spec name. pub name: String, /// Engine specified by json file. pub engine: Arc<dyn Engine>, /// Name of the subdir inside the main data dir to use for chain data and settings. pub data_dir: String, /// Known nodes on the network in enode format. pub nodes: Vec<String>, /// The genesis block's parent hash field. pub parent_hash: H256, /// The genesis block's author field. pub author: Address, /// The genesis block's difficulty field. pub difficulty: U256, /// The genesis block's gas limit field. pub gas_limit: U256, /// The genesis block's gas used field. pub gas_used: U256, /// The genesis block's timestamp field. pub timestamp: u64, /// Transactions root of the genesis block. Should be KECCAK_NULL_RLP. pub transactions_root: H256, /// Receipts root of the genesis block. Should be KECCAK_NULL_RLP. pub receipts_root: H256, /// The genesis block's extra data field. pub extra_data: Bytes, /// Each seal field, expressed as RLP, concatenated. pub seal_rlp: Bytes, /// Hardcoded synchronization. Allows the light client to immediately jump to a specific block. pub hardcoded_sync: Option<SpecHardcodedSync>, /// Contract constructors to be executed on genesis. pub constructors: Vec<(Address, Bytes)>, /// May be prepopulated if we know this in advance. pub state_root: H256, /// Genesis state as plain old data. pub genesis_state: PodState, } /// Part of `Spec`. Describes the hardcoded synchronization parameters. pub struct SpecHardcodedSync { /// Header of the block to jump to for hardcoded sync, and total difficulty. pub header: encoded::Header, /// Total difficulty of the block to jump to. pub total_difficulty: U256, /// List of hardcoded CHTs, in order. If `hardcoded_sync` is set, the CHTs should include the /// header of `hardcoded_sync`. pub chts: Vec<H256>, } impl From<ethjson::spec::HardcodedSync> for SpecHardcodedSync { fn from(sync: ethjson::spec::HardcodedSync) -> Self { SpecHardcodedSync { header: encoded::Header::new(sync.header.into()), total_difficulty: sync.total_difficulty.into(), chts: sync.chts.into_iter().map(Into::into).collect(), } } } impl fmt::Display for SpecHardcodedSync { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "{{")?; writeln!(f, r#"header": "{:?},"#, self.header)?; writeln!(f, r#"total_difficulty": "{:?},"#, self.total_difficulty)?; writeln!(f, r#"chts": {:#?}"#, self.chts.iter().map(|x| format!(r#"{}"#, x)).collect::<Vec<_>>())?; writeln!(f, "}}") } } /// Load from JSON object. fn load_from(spec_params: SpecParams, s: ethjson::spec::Spec) -> Result<Spec, Error> { let builtins = s.accounts .builtins() .into_iter() .map(|p| (p.0.into(), From::from(p.1))) .collect(); let g = Genesis::from(s.genesis); let GenericSeal(seal_rlp) = g.seal.into(); let params = CommonParams::from(s.params); let hardcoded_sync = s.hardcoded_sync.map(Into::into); let engine = Spec::engine(spec_params, s.engine, params, builtins); let author = g.author; let timestamp = g.timestamp; let difficulty = g.difficulty; let constructors: Vec<_> = s.accounts .constructors() .into_iter() .map(|(a, c)| (a.into(), c.into())) .collect(); let genesis_state: PodState = s.accounts.into(); let (state_root, _) = run_constructors( &genesis_state, &constructors, &*engine, author, timestamp, difficulty, &Default::default(), BasicBackend(journaldb::new_memory_db()), )?; let s = Spec { engine, name: s.name.clone().into(), data_dir: s.data_dir.unwrap_or(s.name).into(), nodes: s.nodes.unwrap_or_else(Vec::new), parent_hash: g.parent_hash, transactions_root: g.transactions_root, receipts_root: g.receipts_root, author, difficulty, gas_limit: g.gas_limit, gas_used: g.gas_used, timestamp, extra_data: g.extra_data, seal_rlp, hardcoded_sync, constructors, genesis_state, state_root, }; Ok(s) } impl Spec { // create an instance of an Ethereum state machine, minus consensus logic. fn machine( engine_spec: &ethjson::spec::Engine, params: CommonParams, builtins: BTreeMap<Address, Builtin>, ) -> Machine { if let ethjson::spec::Engine::Ethash(ref ethash) = *engine_spec { Machine::with_ethash_extensions(params, builtins, ethash.params.clone().into()) } else { Machine::regular(params, builtins) } } /// Convert engine spec into a arc'd Engine of the right underlying type. /// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. fn engine( spec_params: SpecParams, engine_spec: ethjson::spec::Engine, params: CommonParams, builtins: BTreeMap<Address, Builtin>, ) -> Arc<dyn Engine> { let machine = Self::machine(&engine_spec, params, builtins); match engine_spec { ethjson::spec::Engine::Null(null) => Arc::new(NullEngine::new(null.params.into(), machine)), ethjson::spec::Engine::Ethash(ethash) => Arc::new(Ethash::new(spec_params.cache_dir, ethash.params.into(), machine, spec_params.optimization_setting)), ethjson::spec::Engine::InstantSeal(Some(instant_seal)) => Arc::new(InstantSeal::new(instant_seal.params.into(), machine)), ethjson::spec::Engine::InstantSeal(None) => Arc::new(InstantSeal::new(InstantSealParams::default(), machine)), ethjson::spec::Engine::BasicAuthority(basic_authority) => Arc::new(BasicAuthority::new(basic_authority.params.into(), machine)), ethjson::spec::Engine::Clique(clique) => Clique::new(clique.params.into(), machine) .expect("Failed to start Clique consensus engine."), ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(authority_round.params.into(), machine) .expect("Failed to start AuthorityRound consensus engine."), } } /// Get common blockchain parameters. pub fn params(&self) -> &CommonParams { &self.engine.params() } /// Get the configured Network ID. pub fn network_id(&self) -> u64 { self.params().network_id } /// Get the chain ID used for signing. pub fn chain_id(&self) -> u64 { self.params().chain_id } /// Get the configured subprotocol name. pub fn subprotocol_name(&self) -> String { self.params().subprotocol_name.clone() } /// Get the configured network fork block. pub fn fork_block(&self) -> Option<(BlockNumber, H256)> { self.params().fork_block } /// Get the header of the genesis block. pub fn genesis_header(&self) -> Header { let mut header: Header = Default::default(); header.set_parent_hash(self.parent_hash.clone()); header.set_timestamp(self.timestamp); header.set_number(0); header.set_author(self.author.clone()); header.set_transactions_root(self.transactions_root.clone()); header.set_uncles_hash(keccak(RlpStream::new_list(0).out())); header.set_extra_data(self.extra_data.clone()); header.set_state_root(self.state_root); header.set_receipts_root(self.receipts_root.clone()); header.set_log_bloom(Bloom::default()); header.set_gas_used(self.gas_used.clone()); header.set_gas_limit(self.gas_limit.clone()); header.set_difficulty(self.difficulty.clone()); header.set_seal({ let r = Rlp::new(&self.seal_rlp); r.iter().map(|f| f.as_raw().to_vec()).collect() }); trace!(target: "spec", "Header hash is {}", header.hash()); header } /// Compose the genesis block for this chain. pub fn genesis_block(&self) -> Bytes { let empty_list = RlpStream::new_list(0).out(); let header = self.genesis_header(); let mut ret = RlpStream::new_list(3); ret.append(&header); ret.append_raw(&empty_list, 1); ret.append_raw(&empty_list, 1); ret.out() } /// Overwrite the genesis components. pub fn overwrite_genesis_params(&mut self, g: Genesis) { let GenericSeal(seal_rlp) = g.seal.into(); self.parent_hash = g.parent_hash; self.transactions_root = g.transactions_root; self.receipts_root = g.receipts_root; self.author = g.author; self.difficulty = g.difficulty; self.gas_limit = g.gas_limit; self.gas_used = g.gas_used; self.timestamp = g.timestamp; self.extra_data = g.extra_data; self.seal_rlp = seal_rlp; } /// Alter the value of the genesis state. pub fn set_genesis_state(&mut self, s: PodState) -> Result<(), Error> { self.genesis_state = s; let (root, _) = run_constructors( &self.genesis_state, &self.constructors, &*self.engine, self.author, self.timestamp, self.difficulty, &Default::default(), BasicBackend(journaldb::new_memory_db()), )?; self.state_root = root; Ok(()) } /// Ensure that the given state DB has the trie nodes in for the genesis state. pub fn ensure_db_good<T: Backend>(&self, db: T, factories: &Factories) -> Result<T, Error> { if db.as_hash_db().contains(&self.state_root, hash_db::EMPTY_PREFIX) { return Ok(db); } // TODO: could optimize so we don't re-run, but `ensure_db_good` is barely ever // called anyway. let (root, db) = run_constructors( &self.genesis_state, &self.constructors, &*self.engine, self.author, self.timestamp, self.difficulty, factories, db )?; assert_eq!(root, self.state_root, "Spec's state root has not been precomputed correctly."); Ok(db) } /// Loads just the state machine from a json file. pub fn load_machine<R: Read>(reader: R) -> Result<Machine, Error> { ethjson::spec::Spec::load(reader) .map_err(|e| Error::Msg(e.to_string())) .map(|s| { let builtins = s.accounts.builtins().into_iter().map(|p| (p.0.into(), From::from(p.1))).collect();
}) } /// Loads spec from json file. Provide factories for executing contracts and ensuring /// storage goes to the right place. pub fn load<'a, T: Into<SpecParams<'a>>, R: Read>(params: T, reader: R) -> Result<Self, Error> { ethjson::spec::Spec::load(reader) .map_err(|e| Error::Msg(e.to_string())) .and_then(|x| load_from(params.into(), x)) } /// initialize genesis epoch data, using in-memory database for /// constructor. pub fn genesis_epoch_data(&self) -> Result<Vec<u8>, String> { let genesis = self.genesis_header(); let factories = Default::default(); let mut db = journaldb::new( Arc::new(kvdb_memorydb::create(0)), journaldb::Algorithm::Archive, None, ); self.ensure_db_good(BasicBackend(db.as_hash_db_mut()), &factories) .map_err(|e| format!("Unable to initialize genesis state: {}", e))?; let call = |a, d| { let mut db = db.boxed_clone(); let env_info = evm::EnvInfo { number: 0, author: *genesis.author(), timestamp: genesis.timestamp(), difficulty: *genesis.difficulty(), gas_limit: U256::max_value(), last_hashes: Arc::new(Vec::new()), gas_used: 0.into(), }; let from = Address::zero(); let tx = Transaction { nonce: self.engine.account_start_nonce(0), action: Action::Call(a), gas: U256::max_value(), gas_price: U256::default(), value: U256::default(), data: d, }.fake_sign(from); let res = executive_state::prove_transaction_virtual( db.as_hash_db_mut(), *genesis.state_root(), &tx, self.engine.machine(), &env_info, factories.clone(), ); res.map(|(out, proof)| { (out, proof.into_iter().map(|x| x.into_vec()).collect()) }).ok_or_else(|| "Failed to prove call: insufficient state".into()) }; self.engine.genesis_epoch_data(&genesis, &call) } } #[cfg(test)] mod tests { use std::str::FromStr; use account_state::State; use common_types::{view, views::BlockView}; use ethereum_types::{Address, H256}; use ethcore::test_helpers::get_temp_state_db; use tempdir::TempDir; use super::Spec; #[test] fn test_load_empty() { let tempdir = TempDir::new("").unwrap(); assert!(Spec::load(&tempdir.path(), &[] as &[u8]).is_err()); } #[test] fn test_chain() { let test_spec = crate::new_test(); assert_eq!( test_spec.state_root, H256::from_str("f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9").unwrap() ); let genesis = test_spec.genesis_block(); assert_eq!( view!(BlockView, &genesis).header_view().hash(), H256::from_str("0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303").unwrap() ); } #[test] fn genesis_constructor() { let _ = ::env_logger::try_init(); let spec = crate::new_test_constructor(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()) .unwrap(); let state = State::from_existing( db.boxed_clone(), spec.state_root, spec.engine.account_start_nonce(0), Default::default(), ).unwrap(); let expected = H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); let address = Address::from_str("0000000000000000000000000000000000001337").unwrap(); assert_eq!(state.storage_at(&address, &H256::zero()).unwrap(), expected); assert_eq!(state.balance(&address).unwrap(), 1.into()); } }
let params = CommonParams::from(s.params); Spec::machine(&s.engine, params, builtins)
random_line_split
spec.rs
// Copyright 2015-2019 Parity Technologies (UK) Ltd. // This file is part of Parity Ethereum. // Parity Ethereum is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity Ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>. //! Parameters for a block chain. use std::{ collections::BTreeMap, fmt, io::Read, path::Path, sync::Arc, }; use common_types::{ BlockNumber, header::Header, encoded, engines::{OptimizeFor, params::CommonParams}, errors::EthcoreError as Error, transaction::{Action, Transaction}, }; use account_state::{Backend, State, backend::Basic as BasicBackend}; use authority_round::AuthorityRound; use basic_authority::BasicAuthority; use bytes::Bytes; use builtin::Builtin; use clique::Clique; use engine::Engine; use ethash_engine::Ethash; use ethereum_types::{H256, Bloom, U256, Address}; use ethjson; use instant_seal::{InstantSeal, InstantSealParams}; use keccak_hash::{KECCAK_NULL_RLP, keccak}; use log::{trace, warn}; use machine::{executive::Executive, Machine, substate::Substate}; use null_engine::NullEngine; use pod::PodState; use rlp::{Rlp, RlpStream}; use trace::{NoopTracer, NoopVMTracer}; use trie_vm_factories::Factories; use vm::{EnvInfo, CallType, ActionValue, ActionParams, ParamsType}; use crate::{ Genesis, seal::Generic as GenericSeal, }; /// Runtime parameters for the spec that are related to how the software should run the chain, /// rather than integral properties of the chain itself. pub struct SpecParams<'a> { /// The path to the folder used to cache nodes. This is typically /tmp/ on Unix-like systems pub cache_dir: &'a Path, /// Whether to run slower at the expense of better memory usage, or run faster while using /// more /// memory. This may get more fine-grained in the future but for now is simply a binary /// option. pub optimization_setting: Option<OptimizeFor>, } impl<'a> SpecParams<'a> { /// Create from a cache path, with null values for the other fields pub fn from_path(path: &'a Path) -> Self { SpecParams { cache_dir: path, optimization_setting: None, } } /// Create from a cache path and an optimization setting pub fn new(path: &'a Path, optimization: OptimizeFor) -> Self { SpecParams { cache_dir: path, optimization_setting: Some(optimization), } } } impl<'a, T: AsRef<Path>> From<&'a T> for SpecParams<'a> { fn from(path: &'a T) -> Self { Self::from_path(path.as_ref()) } } /// given a pre-constructor state, run all the given constructors and produce a new state and /// state root. fn run_constructors<T: Backend>( genesis_state: &PodState, constructors: &[(Address, Bytes)], engine: &dyn Engine, author: Address, timestamp: u64, difficulty: U256, factories: &Factories, mut db: T ) -> Result<(H256, T), Error> { let mut root = KECCAK_NULL_RLP; // basic accounts in spec. { let mut t = factories.trie.create(db.as_hash_db_mut(), &mut root); for (address, account) in genesis_state.get().iter() { t.insert(address.as_bytes(), &account.rlp())?; } } for (address, account) in genesis_state.get().iter() { db.note_non_null_account(address); account.insert_additional( &mut *factories.accountdb.create( db.as_hash_db_mut(), keccak(address), ), &factories.trie, ); } let start_nonce = engine.account_start_nonce(0); let mut state = State::from_existing(db, root, start_nonce, factories.clone())?; // Execute contract constructors. let env_info = EnvInfo { number: 0, author, timestamp, difficulty, last_hashes: Default::default(), gas_used: U256::zero(), gas_limit: U256::max_value(), }; let from = Address::zero(); for &(ref address, ref constructor) in constructors.iter() { trace!(target: "spec", "run_constructors: Creating a contract at {}.", address); trace!(target: "spec", " .. root before = {}", state.root()); let params = ActionParams { code_address: address.clone(), code_hash: Some(keccak(constructor)), code_version: U256::zero(), address: address.clone(), sender: from.clone(), origin: from.clone(), gas: U256::max_value(), gas_price: Default::default(), value: ActionValue::Transfer(Default::default()), code: Some(Arc::new(constructor.clone())), data: None, call_type: CallType::None, params_type: ParamsType::Embedded, }; let mut substate = Substate::new(); { let machine = engine.machine(); let schedule = machine.schedule(env_info.number); let mut exec = Executive::new(&mut state, &env_info, &machine, &schedule); // failing create is not a bug if let Err(e) = exec.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) { warn!(target: "spec", "Genesis constructor execution at {} failed: {}.", address, e); } } let _ = state.commit()?; } Ok(state.drop()) } /// Parameters for a block chain; includes both those intrinsic to the design of the /// chain and those to be interpreted by the active chain engine. pub struct Spec { /// User friendly spec name. pub name: String, /// Engine specified by json file. pub engine: Arc<dyn Engine>, /// Name of the subdir inside the main data dir to use for chain data and settings. pub data_dir: String, /// Known nodes on the network in enode format. pub nodes: Vec<String>, /// The genesis block's parent hash field. pub parent_hash: H256, /// The genesis block's author field. pub author: Address, /// The genesis block's difficulty field. pub difficulty: U256, /// The genesis block's gas limit field. pub gas_limit: U256, /// The genesis block's gas used field. pub gas_used: U256, /// The genesis block's timestamp field. pub timestamp: u64, /// Transactions root of the genesis block. Should be KECCAK_NULL_RLP. pub transactions_root: H256, /// Receipts root of the genesis block. Should be KECCAK_NULL_RLP. pub receipts_root: H256, /// The genesis block's extra data field. pub extra_data: Bytes, /// Each seal field, expressed as RLP, concatenated. pub seal_rlp: Bytes, /// Hardcoded synchronization. Allows the light client to immediately jump to a specific block. pub hardcoded_sync: Option<SpecHardcodedSync>, /// Contract constructors to be executed on genesis. pub constructors: Vec<(Address, Bytes)>, /// May be prepopulated if we know this in advance. pub state_root: H256, /// Genesis state as plain old data. pub genesis_state: PodState, } /// Part of `Spec`. Describes the hardcoded synchronization parameters. pub struct SpecHardcodedSync { /// Header of the block to jump to for hardcoded sync, and total difficulty. pub header: encoded::Header, /// Total difficulty of the block to jump to. pub total_difficulty: U256, /// List of hardcoded CHTs, in order. If `hardcoded_sync` is set, the CHTs should include the /// header of `hardcoded_sync`. pub chts: Vec<H256>, } impl From<ethjson::spec::HardcodedSync> for SpecHardcodedSync { fn from(sync: ethjson::spec::HardcodedSync) -> Self { SpecHardcodedSync { header: encoded::Header::new(sync.header.into()), total_difficulty: sync.total_difficulty.into(), chts: sync.chts.into_iter().map(Into::into).collect(), } } } impl fmt::Display for SpecHardcodedSync { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "{{")?; writeln!(f, r#"header": "{:?},"#, self.header)?; writeln!(f, r#"total_difficulty": "{:?},"#, self.total_difficulty)?; writeln!(f, r#"chts": {:#?}"#, self.chts.iter().map(|x| format!(r#"{}"#, x)).collect::<Vec<_>>())?; writeln!(f, "}}") } } /// Load from JSON object. fn load_from(spec_params: SpecParams, s: ethjson::spec::Spec) -> Result<Spec, Error> { let builtins = s.accounts .builtins() .into_iter() .map(|p| (p.0.into(), From::from(p.1))) .collect(); let g = Genesis::from(s.genesis); let GenericSeal(seal_rlp) = g.seal.into(); let params = CommonParams::from(s.params); let hardcoded_sync = s.hardcoded_sync.map(Into::into); let engine = Spec::engine(spec_params, s.engine, params, builtins); let author = g.author; let timestamp = g.timestamp; let difficulty = g.difficulty; let constructors: Vec<_> = s.accounts .constructors() .into_iter() .map(|(a, c)| (a.into(), c.into())) .collect(); let genesis_state: PodState = s.accounts.into(); let (state_root, _) = run_constructors( &genesis_state, &constructors, &*engine, author, timestamp, difficulty, &Default::default(), BasicBackend(journaldb::new_memory_db()), )?; let s = Spec { engine, name: s.name.clone().into(), data_dir: s.data_dir.unwrap_or(s.name).into(), nodes: s.nodes.unwrap_or_else(Vec::new), parent_hash: g.parent_hash, transactions_root: g.transactions_root, receipts_root: g.receipts_root, author, difficulty, gas_limit: g.gas_limit, gas_used: g.gas_used, timestamp, extra_data: g.extra_data, seal_rlp, hardcoded_sync, constructors, genesis_state, state_root, }; Ok(s) } impl Spec { // create an instance of an Ethereum state machine, minus consensus logic. fn machine( engine_spec: &ethjson::spec::Engine, params: CommonParams, builtins: BTreeMap<Address, Builtin>, ) -> Machine { if let ethjson::spec::Engine::Ethash(ref ethash) = *engine_spec { Machine::with_ethash_extensions(params, builtins, ethash.params.clone().into()) } else { Machine::regular(params, builtins) } } /// Convert engine spec into a arc'd Engine of the right underlying type. /// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. fn
( spec_params: SpecParams, engine_spec: ethjson::spec::Engine, params: CommonParams, builtins: BTreeMap<Address, Builtin>, ) -> Arc<dyn Engine> { let machine = Self::machine(&engine_spec, params, builtins); match engine_spec { ethjson::spec::Engine::Null(null) => Arc::new(NullEngine::new(null.params.into(), machine)), ethjson::spec::Engine::Ethash(ethash) => Arc::new(Ethash::new(spec_params.cache_dir, ethash.params.into(), machine, spec_params.optimization_setting)), ethjson::spec::Engine::InstantSeal(Some(instant_seal)) => Arc::new(InstantSeal::new(instant_seal.params.into(), machine)), ethjson::spec::Engine::InstantSeal(None) => Arc::new(InstantSeal::new(InstantSealParams::default(), machine)), ethjson::spec::Engine::BasicAuthority(basic_authority) => Arc::new(BasicAuthority::new(basic_authority.params.into(), machine)), ethjson::spec::Engine::Clique(clique) => Clique::new(clique.params.into(), machine) .expect("Failed to start Clique consensus engine."), ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(authority_round.params.into(), machine) .expect("Failed to start AuthorityRound consensus engine."), } } /// Get common blockchain parameters. pub fn params(&self) -> &CommonParams { &self.engine.params() } /// Get the configured Network ID. pub fn network_id(&self) -> u64 { self.params().network_id } /// Get the chain ID used for signing. pub fn chain_id(&self) -> u64 { self.params().chain_id } /// Get the configured subprotocol name. pub fn subprotocol_name(&self) -> String { self.params().subprotocol_name.clone() } /// Get the configured network fork block. pub fn fork_block(&self) -> Option<(BlockNumber, H256)> { self.params().fork_block } /// Get the header of the genesis block. pub fn genesis_header(&self) -> Header { let mut header: Header = Default::default(); header.set_parent_hash(self.parent_hash.clone()); header.set_timestamp(self.timestamp); header.set_number(0); header.set_author(self.author.clone()); header.set_transactions_root(self.transactions_root.clone()); header.set_uncles_hash(keccak(RlpStream::new_list(0).out())); header.set_extra_data(self.extra_data.clone()); header.set_state_root(self.state_root); header.set_receipts_root(self.receipts_root.clone()); header.set_log_bloom(Bloom::default()); header.set_gas_used(self.gas_used.clone()); header.set_gas_limit(self.gas_limit.clone()); header.set_difficulty(self.difficulty.clone()); header.set_seal({ let r = Rlp::new(&self.seal_rlp); r.iter().map(|f| f.as_raw().to_vec()).collect() }); trace!(target: "spec", "Header hash is {}", header.hash()); header } /// Compose the genesis block for this chain. pub fn genesis_block(&self) -> Bytes { let empty_list = RlpStream::new_list(0).out(); let header = self.genesis_header(); let mut ret = RlpStream::new_list(3); ret.append(&header); ret.append_raw(&empty_list, 1); ret.append_raw(&empty_list, 1); ret.out() } /// Overwrite the genesis components. pub fn overwrite_genesis_params(&mut self, g: Genesis) { let GenericSeal(seal_rlp) = g.seal.into(); self.parent_hash = g.parent_hash; self.transactions_root = g.transactions_root; self.receipts_root = g.receipts_root; self.author = g.author; self.difficulty = g.difficulty; self.gas_limit = g.gas_limit; self.gas_used = g.gas_used; self.timestamp = g.timestamp; self.extra_data = g.extra_data; self.seal_rlp = seal_rlp; } /// Alter the value of the genesis state. pub fn set_genesis_state(&mut self, s: PodState) -> Result<(), Error> { self.genesis_state = s; let (root, _) = run_constructors( &self.genesis_state, &self.constructors, &*self.engine, self.author, self.timestamp, self.difficulty, &Default::default(), BasicBackend(journaldb::new_memory_db()), )?; self.state_root = root; Ok(()) } /// Ensure that the given state DB has the trie nodes in for the genesis state. pub fn ensure_db_good<T: Backend>(&self, db: T, factories: &Factories) -> Result<T, Error> { if db.as_hash_db().contains(&self.state_root, hash_db::EMPTY_PREFIX) { return Ok(db); } // TODO: could optimize so we don't re-run, but `ensure_db_good` is barely ever // called anyway. let (root, db) = run_constructors( &self.genesis_state, &self.constructors, &*self.engine, self.author, self.timestamp, self.difficulty, factories, db )?; assert_eq!(root, self.state_root, "Spec's state root has not been precomputed correctly."); Ok(db) } /// Loads just the state machine from a json file. pub fn load_machine<R: Read>(reader: R) -> Result<Machine, Error> { ethjson::spec::Spec::load(reader) .map_err(|e| Error::Msg(e.to_string())) .map(|s| { let builtins = s.accounts.builtins().into_iter().map(|p| (p.0.into(), From::from(p.1))).collect(); let params = CommonParams::from(s.params); Spec::machine(&s.engine, params, builtins) }) } /// Loads spec from json file. Provide factories for executing contracts and ensuring /// storage goes to the right place. pub fn load<'a, T: Into<SpecParams<'a>>, R: Read>(params: T, reader: R) -> Result<Self, Error> { ethjson::spec::Spec::load(reader) .map_err(|e| Error::Msg(e.to_string())) .and_then(|x| load_from(params.into(), x)) } /// initialize genesis epoch data, using in-memory database for /// constructor. pub fn genesis_epoch_data(&self) -> Result<Vec<u8>, String> { let genesis = self.genesis_header(); let factories = Default::default(); let mut db = journaldb::new( Arc::new(kvdb_memorydb::create(0)), journaldb::Algorithm::Archive, None, ); self.ensure_db_good(BasicBackend(db.as_hash_db_mut()), &factories) .map_err(|e| format!("Unable to initialize genesis state: {}", e))?; let call = |a, d| { let mut db = db.boxed_clone(); let env_info = evm::EnvInfo { number: 0, author: *genesis.author(), timestamp: genesis.timestamp(), difficulty: *genesis.difficulty(), gas_limit: U256::max_value(), last_hashes: Arc::new(Vec::new()), gas_used: 0.into(), }; let from = Address::zero(); let tx = Transaction { nonce: self.engine.account_start_nonce(0), action: Action::Call(a), gas: U256::max_value(), gas_price: U256::default(), value: U256::default(), data: d, }.fake_sign(from); let res = executive_state::prove_transaction_virtual( db.as_hash_db_mut(), *genesis.state_root(), &tx, self.engine.machine(), &env_info, factories.clone(), ); res.map(|(out, proof)| { (out, proof.into_iter().map(|x| x.into_vec()).collect()) }).ok_or_else(|| "Failed to prove call: insufficient state".into()) }; self.engine.genesis_epoch_data(&genesis, &call) } } #[cfg(test)] mod tests { use std::str::FromStr; use account_state::State; use common_types::{view, views::BlockView}; use ethereum_types::{Address, H256}; use ethcore::test_helpers::get_temp_state_db; use tempdir::TempDir; use super::Spec; #[test] fn test_load_empty() { let tempdir = TempDir::new("").unwrap(); assert!(Spec::load(&tempdir.path(), &[] as &[u8]).is_err()); } #[test] fn test_chain() { let test_spec = crate::new_test(); assert_eq!( test_spec.state_root, H256::from_str("f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9").unwrap() ); let genesis = test_spec.genesis_block(); assert_eq!( view!(BlockView, &genesis).header_view().hash(), H256::from_str("0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303").unwrap() ); } #[test] fn genesis_constructor() { let _ = ::env_logger::try_init(); let spec = crate::new_test_constructor(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()) .unwrap(); let state = State::from_existing( db.boxed_clone(), spec.state_root, spec.engine.account_start_nonce(0), Default::default(), ).unwrap(); let expected = H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); let address = Address::from_str("0000000000000000000000000000000000001337").unwrap(); assert_eq!(state.storage_at(&address, &H256::zero()).unwrap(), expected); assert_eq!(state.balance(&address).unwrap(), 1.into()); } }
engine
identifier_name
spec.rs
// Copyright 2015-2019 Parity Technologies (UK) Ltd. // This file is part of Parity Ethereum. // Parity Ethereum is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity Ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>. //! Parameters for a block chain. use std::{ collections::BTreeMap, fmt, io::Read, path::Path, sync::Arc, }; use common_types::{ BlockNumber, header::Header, encoded, engines::{OptimizeFor, params::CommonParams}, errors::EthcoreError as Error, transaction::{Action, Transaction}, }; use account_state::{Backend, State, backend::Basic as BasicBackend}; use authority_round::AuthorityRound; use basic_authority::BasicAuthority; use bytes::Bytes; use builtin::Builtin; use clique::Clique; use engine::Engine; use ethash_engine::Ethash; use ethereum_types::{H256, Bloom, U256, Address}; use ethjson; use instant_seal::{InstantSeal, InstantSealParams}; use keccak_hash::{KECCAK_NULL_RLP, keccak}; use log::{trace, warn}; use machine::{executive::Executive, Machine, substate::Substate}; use null_engine::NullEngine; use pod::PodState; use rlp::{Rlp, RlpStream}; use trace::{NoopTracer, NoopVMTracer}; use trie_vm_factories::Factories; use vm::{EnvInfo, CallType, ActionValue, ActionParams, ParamsType}; use crate::{ Genesis, seal::Generic as GenericSeal, }; /// Runtime parameters for the spec that are related to how the software should run the chain, /// rather than integral properties of the chain itself. pub struct SpecParams<'a> { /// The path to the folder used to cache nodes. This is typically /tmp/ on Unix-like systems pub cache_dir: &'a Path, /// Whether to run slower at the expense of better memory usage, or run faster while using /// more /// memory. This may get more fine-grained in the future but for now is simply a binary /// option. pub optimization_setting: Option<OptimizeFor>, } impl<'a> SpecParams<'a> { /// Create from a cache path, with null values for the other fields pub fn from_path(path: &'a Path) -> Self { SpecParams { cache_dir: path, optimization_setting: None, } } /// Create from a cache path and an optimization setting pub fn new(path: &'a Path, optimization: OptimizeFor) -> Self
} impl<'a, T: AsRef<Path>> From<&'a T> for SpecParams<'a> { fn from(path: &'a T) -> Self { Self::from_path(path.as_ref()) } } /// given a pre-constructor state, run all the given constructors and produce a new state and /// state root. fn run_constructors<T: Backend>( genesis_state: &PodState, constructors: &[(Address, Bytes)], engine: &dyn Engine, author: Address, timestamp: u64, difficulty: U256, factories: &Factories, mut db: T ) -> Result<(H256, T), Error> { let mut root = KECCAK_NULL_RLP; // basic accounts in spec. { let mut t = factories.trie.create(db.as_hash_db_mut(), &mut root); for (address, account) in genesis_state.get().iter() { t.insert(address.as_bytes(), &account.rlp())?; } } for (address, account) in genesis_state.get().iter() { db.note_non_null_account(address); account.insert_additional( &mut *factories.accountdb.create( db.as_hash_db_mut(), keccak(address), ), &factories.trie, ); } let start_nonce = engine.account_start_nonce(0); let mut state = State::from_existing(db, root, start_nonce, factories.clone())?; // Execute contract constructors. let env_info = EnvInfo { number: 0, author, timestamp, difficulty, last_hashes: Default::default(), gas_used: U256::zero(), gas_limit: U256::max_value(), }; let from = Address::zero(); for &(ref address, ref constructor) in constructors.iter() { trace!(target: "spec", "run_constructors: Creating a contract at {}.", address); trace!(target: "spec", " .. root before = {}", state.root()); let params = ActionParams { code_address: address.clone(), code_hash: Some(keccak(constructor)), code_version: U256::zero(), address: address.clone(), sender: from.clone(), origin: from.clone(), gas: U256::max_value(), gas_price: Default::default(), value: ActionValue::Transfer(Default::default()), code: Some(Arc::new(constructor.clone())), data: None, call_type: CallType::None, params_type: ParamsType::Embedded, }; let mut substate = Substate::new(); { let machine = engine.machine(); let schedule = machine.schedule(env_info.number); let mut exec = Executive::new(&mut state, &env_info, &machine, &schedule); // failing create is not a bug if let Err(e) = exec.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) { warn!(target: "spec", "Genesis constructor execution at {} failed: {}.", address, e); } } let _ = state.commit()?; } Ok(state.drop()) } /// Parameters for a block chain; includes both those intrinsic to the design of the /// chain and those to be interpreted by the active chain engine. pub struct Spec { /// User friendly spec name. pub name: String, /// Engine specified by json file. pub engine: Arc<dyn Engine>, /// Name of the subdir inside the main data dir to use for chain data and settings. pub data_dir: String, /// Known nodes on the network in enode format. pub nodes: Vec<String>, /// The genesis block's parent hash field. pub parent_hash: H256, /// The genesis block's author field. pub author: Address, /// The genesis block's difficulty field. pub difficulty: U256, /// The genesis block's gas limit field. pub gas_limit: U256, /// The genesis block's gas used field. pub gas_used: U256, /// The genesis block's timestamp field. pub timestamp: u64, /// Transactions root of the genesis block. Should be KECCAK_NULL_RLP. pub transactions_root: H256, /// Receipts root of the genesis block. Should be KECCAK_NULL_RLP. pub receipts_root: H256, /// The genesis block's extra data field. pub extra_data: Bytes, /// Each seal field, expressed as RLP, concatenated. pub seal_rlp: Bytes, /// Hardcoded synchronization. Allows the light client to immediately jump to a specific block. pub hardcoded_sync: Option<SpecHardcodedSync>, /// Contract constructors to be executed on genesis. pub constructors: Vec<(Address, Bytes)>, /// May be prepopulated if we know this in advance. pub state_root: H256, /// Genesis state as plain old data. pub genesis_state: PodState, } /// Part of `Spec`. Describes the hardcoded synchronization parameters. pub struct SpecHardcodedSync { /// Header of the block to jump to for hardcoded sync, and total difficulty. pub header: encoded::Header, /// Total difficulty of the block to jump to. pub total_difficulty: U256, /// List of hardcoded CHTs, in order. If `hardcoded_sync` is set, the CHTs should include the /// header of `hardcoded_sync`. pub chts: Vec<H256>, } impl From<ethjson::spec::HardcodedSync> for SpecHardcodedSync { fn from(sync: ethjson::spec::HardcodedSync) -> Self { SpecHardcodedSync { header: encoded::Header::new(sync.header.into()), total_difficulty: sync.total_difficulty.into(), chts: sync.chts.into_iter().map(Into::into).collect(), } } } impl fmt::Display for SpecHardcodedSync { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "{{")?; writeln!(f, r#"header": "{:?},"#, self.header)?; writeln!(f, r#"total_difficulty": "{:?},"#, self.total_difficulty)?; writeln!(f, r#"chts": {:#?}"#, self.chts.iter().map(|x| format!(r#"{}"#, x)).collect::<Vec<_>>())?; writeln!(f, "}}") } } /// Load from JSON object. fn load_from(spec_params: SpecParams, s: ethjson::spec::Spec) -> Result<Spec, Error> { let builtins = s.accounts .builtins() .into_iter() .map(|p| (p.0.into(), From::from(p.1))) .collect(); let g = Genesis::from(s.genesis); let GenericSeal(seal_rlp) = g.seal.into(); let params = CommonParams::from(s.params); let hardcoded_sync = s.hardcoded_sync.map(Into::into); let engine = Spec::engine(spec_params, s.engine, params, builtins); let author = g.author; let timestamp = g.timestamp; let difficulty = g.difficulty; let constructors: Vec<_> = s.accounts .constructors() .into_iter() .map(|(a, c)| (a.into(), c.into())) .collect(); let genesis_state: PodState = s.accounts.into(); let (state_root, _) = run_constructors( &genesis_state, &constructors, &*engine, author, timestamp, difficulty, &Default::default(), BasicBackend(journaldb::new_memory_db()), )?; let s = Spec { engine, name: s.name.clone().into(), data_dir: s.data_dir.unwrap_or(s.name).into(), nodes: s.nodes.unwrap_or_else(Vec::new), parent_hash: g.parent_hash, transactions_root: g.transactions_root, receipts_root: g.receipts_root, author, difficulty, gas_limit: g.gas_limit, gas_used: g.gas_used, timestamp, extra_data: g.extra_data, seal_rlp, hardcoded_sync, constructors, genesis_state, state_root, }; Ok(s) } impl Spec { // create an instance of an Ethereum state machine, minus consensus logic. fn machine( engine_spec: &ethjson::spec::Engine, params: CommonParams, builtins: BTreeMap<Address, Builtin>, ) -> Machine { if let ethjson::spec::Engine::Ethash(ref ethash) = *engine_spec { Machine::with_ethash_extensions(params, builtins, ethash.params.clone().into()) } else { Machine::regular(params, builtins) } } /// Convert engine spec into a arc'd Engine of the right underlying type. /// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. fn engine( spec_params: SpecParams, engine_spec: ethjson::spec::Engine, params: CommonParams, builtins: BTreeMap<Address, Builtin>, ) -> Arc<dyn Engine> { let machine = Self::machine(&engine_spec, params, builtins); match engine_spec { ethjson::spec::Engine::Null(null) => Arc::new(NullEngine::new(null.params.into(), machine)), ethjson::spec::Engine::Ethash(ethash) => Arc::new(Ethash::new(spec_params.cache_dir, ethash.params.into(), machine, spec_params.optimization_setting)), ethjson::spec::Engine::InstantSeal(Some(instant_seal)) => Arc::new(InstantSeal::new(instant_seal.params.into(), machine)), ethjson::spec::Engine::InstantSeal(None) => Arc::new(InstantSeal::new(InstantSealParams::default(), machine)), ethjson::spec::Engine::BasicAuthority(basic_authority) => Arc::new(BasicAuthority::new(basic_authority.params.into(), machine)), ethjson::spec::Engine::Clique(clique) => Clique::new(clique.params.into(), machine) .expect("Failed to start Clique consensus engine."), ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(authority_round.params.into(), machine) .expect("Failed to start AuthorityRound consensus engine."), } } /// Get common blockchain parameters. pub fn params(&self) -> &CommonParams { &self.engine.params() } /// Get the configured Network ID. pub fn network_id(&self) -> u64 { self.params().network_id } /// Get the chain ID used for signing. pub fn chain_id(&self) -> u64 { self.params().chain_id } /// Get the configured subprotocol name. pub fn subprotocol_name(&self) -> String { self.params().subprotocol_name.clone() } /// Get the configured network fork block. pub fn fork_block(&self) -> Option<(BlockNumber, H256)> { self.params().fork_block } /// Get the header of the genesis block. pub fn genesis_header(&self) -> Header { let mut header: Header = Default::default(); header.set_parent_hash(self.parent_hash.clone()); header.set_timestamp(self.timestamp); header.set_number(0); header.set_author(self.author.clone()); header.set_transactions_root(self.transactions_root.clone()); header.set_uncles_hash(keccak(RlpStream::new_list(0).out())); header.set_extra_data(self.extra_data.clone()); header.set_state_root(self.state_root); header.set_receipts_root(self.receipts_root.clone()); header.set_log_bloom(Bloom::default()); header.set_gas_used(self.gas_used.clone()); header.set_gas_limit(self.gas_limit.clone()); header.set_difficulty(self.difficulty.clone()); header.set_seal({ let r = Rlp::new(&self.seal_rlp); r.iter().map(|f| f.as_raw().to_vec()).collect() }); trace!(target: "spec", "Header hash is {}", header.hash()); header } /// Compose the genesis block for this chain. pub fn genesis_block(&self) -> Bytes { let empty_list = RlpStream::new_list(0).out(); let header = self.genesis_header(); let mut ret = RlpStream::new_list(3); ret.append(&header); ret.append_raw(&empty_list, 1); ret.append_raw(&empty_list, 1); ret.out() } /// Overwrite the genesis components. pub fn overwrite_genesis_params(&mut self, g: Genesis) { let GenericSeal(seal_rlp) = g.seal.into(); self.parent_hash = g.parent_hash; self.transactions_root = g.transactions_root; self.receipts_root = g.receipts_root; self.author = g.author; self.difficulty = g.difficulty; self.gas_limit = g.gas_limit; self.gas_used = g.gas_used; self.timestamp = g.timestamp; self.extra_data = g.extra_data; self.seal_rlp = seal_rlp; } /// Alter the value of the genesis state. pub fn set_genesis_state(&mut self, s: PodState) -> Result<(), Error> { self.genesis_state = s; let (root, _) = run_constructors( &self.genesis_state, &self.constructors, &*self.engine, self.author, self.timestamp, self.difficulty, &Default::default(), BasicBackend(journaldb::new_memory_db()), )?; self.state_root = root; Ok(()) } /// Ensure that the given state DB has the trie nodes in for the genesis state. pub fn ensure_db_good<T: Backend>(&self, db: T, factories: &Factories) -> Result<T, Error> { if db.as_hash_db().contains(&self.state_root, hash_db::EMPTY_PREFIX) { return Ok(db); } // TODO: could optimize so we don't re-run, but `ensure_db_good` is barely ever // called anyway. let (root, db) = run_constructors( &self.genesis_state, &self.constructors, &*self.engine, self.author, self.timestamp, self.difficulty, factories, db )?; assert_eq!(root, self.state_root, "Spec's state root has not been precomputed correctly."); Ok(db) } /// Loads just the state machine from a json file. pub fn load_machine<R: Read>(reader: R) -> Result<Machine, Error> { ethjson::spec::Spec::load(reader) .map_err(|e| Error::Msg(e.to_string())) .map(|s| { let builtins = s.accounts.builtins().into_iter().map(|p| (p.0.into(), From::from(p.1))).collect(); let params = CommonParams::from(s.params); Spec::machine(&s.engine, params, builtins) }) } /// Loads spec from json file. Provide factories for executing contracts and ensuring /// storage goes to the right place. pub fn load<'a, T: Into<SpecParams<'a>>, R: Read>(params: T, reader: R) -> Result<Self, Error> { ethjson::spec::Spec::load(reader) .map_err(|e| Error::Msg(e.to_string())) .and_then(|x| load_from(params.into(), x)) } /// initialize genesis epoch data, using in-memory database for /// constructor. pub fn genesis_epoch_data(&self) -> Result<Vec<u8>, String> { let genesis = self.genesis_header(); let factories = Default::default(); let mut db = journaldb::new( Arc::new(kvdb_memorydb::create(0)), journaldb::Algorithm::Archive, None, ); self.ensure_db_good(BasicBackend(db.as_hash_db_mut()), &factories) .map_err(|e| format!("Unable to initialize genesis state: {}", e))?; let call = |a, d| { let mut db = db.boxed_clone(); let env_info = evm::EnvInfo { number: 0, author: *genesis.author(), timestamp: genesis.timestamp(), difficulty: *genesis.difficulty(), gas_limit: U256::max_value(), last_hashes: Arc::new(Vec::new()), gas_used: 0.into(), }; let from = Address::zero(); let tx = Transaction { nonce: self.engine.account_start_nonce(0), action: Action::Call(a), gas: U256::max_value(), gas_price: U256::default(), value: U256::default(), data: d, }.fake_sign(from); let res = executive_state::prove_transaction_virtual( db.as_hash_db_mut(), *genesis.state_root(), &tx, self.engine.machine(), &env_info, factories.clone(), ); res.map(|(out, proof)| { (out, proof.into_iter().map(|x| x.into_vec()).collect()) }).ok_or_else(|| "Failed to prove call: insufficient state".into()) }; self.engine.genesis_epoch_data(&genesis, &call) } } #[cfg(test)] mod tests { use std::str::FromStr; use account_state::State; use common_types::{view, views::BlockView}; use ethereum_types::{Address, H256}; use ethcore::test_helpers::get_temp_state_db; use tempdir::TempDir; use super::Spec; #[test] fn test_load_empty() { let tempdir = TempDir::new("").unwrap(); assert!(Spec::load(&tempdir.path(), &[] as &[u8]).is_err()); } #[test] fn test_chain() { let test_spec = crate::new_test(); assert_eq!( test_spec.state_root, H256::from_str("f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9").unwrap() ); let genesis = test_spec.genesis_block(); assert_eq!( view!(BlockView, &genesis).header_view().hash(), H256::from_str("0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303").unwrap() ); } #[test] fn genesis_constructor() { let _ = ::env_logger::try_init(); let spec = crate::new_test_constructor(); let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()) .unwrap(); let state = State::from_existing( db.boxed_clone(), spec.state_root, spec.engine.account_start_nonce(0), Default::default(), ).unwrap(); let expected = H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap(); let address = Address::from_str("0000000000000000000000000000000000001337").unwrap(); assert_eq!(state.storage_at(&address, &H256::zero()).unwrap(), expected); assert_eq!(state.balance(&address).unwrap(), 1.into()); } }
{ SpecParams { cache_dir: path, optimization_setting: Some(optimization), } }
identifier_body
dynamic_scene.rs
use std::any::TypeId; use crate::{DynamicSceneBuilder, Scene, SceneSpawnError}; use anyhow::Result; use bevy_ecs::{ entity::Entity, reflect::{AppTypeRegistry, ReflectComponent, ReflectMapEntities}, world::World, }; use bevy_reflect::{Reflect, TypePath, TypeRegistryArc, TypeUuid}; use bevy_utils::HashMap; #[cfg(feature = "serialize")] use crate::serde::SceneSerializer; use bevy_ecs::reflect::ReflectResource; #[cfg(feature = "serialize")] use serde::Serialize; /// A collection of serializable resources and dynamic entities. /// /// Each dynamic entity in the collection contains its own run-time defined set of components. /// To spawn a dynamic scene, you can use either: /// * [`SceneSpawner::spawn_dynamic`](crate::SceneSpawner::spawn_dynamic) /// * adding the [`DynamicSceneBundle`](crate::DynamicSceneBundle) to an entity /// * adding the [`Handle<DynamicScene>`](bevy_asset::Handle) to an entity (the scene will only be /// visible if the entity already has [`Transform`](bevy_transform::components::Transform) and /// [`GlobalTransform`](bevy_transform::components::GlobalTransform) components) #[derive(Default, TypeUuid, TypePath)] #[uuid = "749479b1-fb8c-4ff8-a775-623aa76014f5"] pub struct DynamicScene { pub resources: Vec<Box<dyn Reflect>>, pub entities: Vec<DynamicEntity>, } /// A reflection-powered serializable representation of an entity and its components. pub struct DynamicEntity { /// The identifier of the entity, unique within a scene (and the world it may have been generated from). /// /// Components that reference this entity must consistently use this identifier. pub entity: Entity, /// A vector of boxed components that belong to the given entity and /// implement the [`Reflect`] trait. pub components: Vec<Box<dyn Reflect>>, } impl DynamicScene { /// Create a new dynamic scene from a given scene. pub fn from_scene(scene: &Scene) -> Self { Self::from_world(&scene.world) } /// Create a new dynamic scene from a given world. pub fn from_world(world: &World) -> Self { let mut builder = DynamicSceneBuilder::from_world(world); builder.extract_entities(world.iter_entities().map(|entity| entity.id())); builder.extract_resources(); builder.build() } /// Write the resources, the dynamic entities, and their corresponding components to the given world. /// /// This method will return a [`SceneSpawnError`] if a type either is not registered /// in the provided [`AppTypeRegistry`] resource, or doesn't reflect the /// [`Component`](bevy_ecs::component::Component) or [`Resource`](bevy_ecs::prelude::Resource) trait. pub fn write_to_world_with( &self, world: &mut World, entity_map: &mut HashMap<Entity, Entity>, type_registry: &AppTypeRegistry, ) -> Result<(), SceneSpawnError> { let type_registry = type_registry.read(); for resource in &self.resources { let registration = type_registry .get_with_name(resource.type_name()) .ok_or_else(|| SceneSpawnError::UnregisteredType { type_name: resource.type_name().to_string(), })?; let reflect_resource = registration.data::<ReflectResource>().ok_or_else(|| { SceneSpawnError::UnregisteredResource { type_name: resource.type_name().to_string(), } })?; // If the world already contains an instance of the given resource // just apply the (possibly) new value, otherwise insert the resource reflect_resource.apply_or_insert(world, &**resource); } // For each component types that reference other entities, we keep track // of which entities in the scene use that component. // This is so we can update the scene-internal references to references // of the actual entities in the world. let mut scene_mappings: HashMap<TypeId, Vec<Entity>> = HashMap::default(); for scene_entity in &self.entities { // Fetch the entity with the given entity id from the `entity_map` // or spawn a new entity with a transiently unique id if there is // no corresponding entry. let entity = *entity_map .entry(scene_entity.entity) .or_insert_with(|| world.spawn_empty().id()); let entity_mut = &mut world.entity_mut(entity); // Apply/ add each component to the given entity. for component in &scene_entity.components { let registration = type_registry .get_with_name(component.type_name()) .ok_or_else(|| SceneSpawnError::UnregisteredType { type_name: component.type_name().to_string(), })?; let reflect_component = registration.data::<ReflectComponent>().ok_or_else(|| { SceneSpawnError::UnregisteredComponent { type_name: component.type_name().to_string(), } })?; // If this component references entities in the scene, track it // so we can update it to the entity in the world. if registration.data::<ReflectMapEntities>().is_some() { scene_mappings .entry(registration.type_id()) .or_insert(Vec::new()) .push(entity); } // If the entity already has the given component attached, // just apply the (possibly) new value, otherwise add the // component to the entity. reflect_component.apply_or_insert(entity_mut, &**component); } } // Updates references to entities in the scene to entities in the world for (type_id, entities) in scene_mappings.into_iter() { let registration = type_registry.get(type_id).expect( "we should be getting TypeId from this TypeRegistration in the first place", ); if let Some(map_entities_reflect) = registration.data::<ReflectMapEntities>() { map_entities_reflect.map_entities(world, entity_map, &entities); } } Ok(()) } /// Write the resources, the dynamic entities, and their corresponding components to the given world. /// /// This method will return a [`SceneSpawnError`] if a type either is not registered /// in the world's [`AppTypeRegistry`] resource, or doesn't reflect the /// [`Component`](bevy_ecs::component::Component) trait. pub fn write_to_world( &self, world: &mut World, entity_map: &mut HashMap<Entity, Entity>, ) -> Result<(), SceneSpawnError> { let registry = world.resource::<AppTypeRegistry>().clone(); self.write_to_world_with(world, entity_map, &registry) } // TODO: move to AssetSaver when it is implemented /// Serialize this dynamic scene into rust object notation (ron). #[cfg(feature = "serialize")] pub fn serialize_ron(&self, registry: &TypeRegistryArc) -> Result<String, ron::Error> { serialize_ron(SceneSerializer::new(self, registry)) } } /// Serialize a given Rust data structure into rust object notation (ron). #[cfg(feature = "serialize")] pub fn serialize_ron<S>(serialize: S) -> Result<String, ron::Error> where S: Serialize, { let pretty_config = ron::ser::PrettyConfig::default() .indentor(" ".to_string()) .new_line("\n".to_string()); ron::ser::to_string_pretty(&serialize, pretty_config) } #[cfg(test)] mod tests { use bevy_ecs::{reflect::AppTypeRegistry, system::Command, world::World};
use crate::dynamic_scene_builder::DynamicSceneBuilder; #[test] fn components_not_defined_in_scene_should_not_be_affected_by_scene_entity_map() { // Testing that scene reloading applies EntityMap correctly to MapEntities components. // First, we create a simple world with a parent and a child relationship let mut world = World::new(); world.init_resource::<AppTypeRegistry>(); world .resource_mut::<AppTypeRegistry>() .write() .register::<Parent>(); let original_parent_entity = world.spawn_empty().id(); let original_child_entity = world.spawn_empty().id(); AddChild { parent: original_parent_entity, child: original_child_entity, } .apply(&mut world); // We then write this relationship to a new scene, and then write that scene back to the // world to create another parent and child relationship let mut scene_builder = DynamicSceneBuilder::from_world(&world); scene_builder.extract_entity(original_parent_entity); scene_builder.extract_entity(original_child_entity); let scene = scene_builder.build(); let mut entity_map = HashMap::default(); scene.write_to_world(&mut world, &mut entity_map).unwrap(); let &from_scene_parent_entity = entity_map.get(&original_parent_entity).unwrap(); let &from_scene_child_entity = entity_map.get(&original_child_entity).unwrap(); // We then add the parent from the scene as a child of the original child // Hierarchy should look like: // Original Parent <- Original Child <- Scene Parent <- Scene Child AddChild { parent: original_child_entity, child: from_scene_parent_entity, } .apply(&mut world); // We then reload the scene to make sure that from_scene_parent_entity's parent component // isn't updated with the entity map, since this component isn't defined in the scene. // With bevy_hierarchy, this can cause serious errors and malformed hierarchies. scene.write_to_world(&mut world, &mut entity_map).unwrap(); assert_eq!( original_parent_entity, world .get_entity(original_child_entity) .unwrap() .get::<Parent>() .unwrap() .get(), "something about reloading the scene is touching entities with the same scene Ids" ); assert_eq!( original_child_entity, world .get_entity(from_scene_parent_entity) .unwrap() .get::<Parent>() .unwrap() .get(), "something about reloading the scene is touching components not defined in the scene but on entities defined in the scene" ); assert_eq!( from_scene_parent_entity, world .get_entity(from_scene_child_entity) .unwrap() .get::<Parent>() .expect("something is wrong with this test, and the scene components don't have a parent/child relationship") .get(), "something is wrong with the this test or the code reloading scenes since the relationship between scene entities is broken" ); } }
use bevy_hierarchy::{AddChild, Parent}; use bevy_utils::HashMap;
random_line_split
dynamic_scene.rs
use std::any::TypeId; use crate::{DynamicSceneBuilder, Scene, SceneSpawnError}; use anyhow::Result; use bevy_ecs::{ entity::Entity, reflect::{AppTypeRegistry, ReflectComponent, ReflectMapEntities}, world::World, }; use bevy_reflect::{Reflect, TypePath, TypeRegistryArc, TypeUuid}; use bevy_utils::HashMap; #[cfg(feature = "serialize")] use crate::serde::SceneSerializer; use bevy_ecs::reflect::ReflectResource; #[cfg(feature = "serialize")] use serde::Serialize; /// A collection of serializable resources and dynamic entities. /// /// Each dynamic entity in the collection contains its own run-time defined set of components. /// To spawn a dynamic scene, you can use either: /// * [`SceneSpawner::spawn_dynamic`](crate::SceneSpawner::spawn_dynamic) /// * adding the [`DynamicSceneBundle`](crate::DynamicSceneBundle) to an entity /// * adding the [`Handle<DynamicScene>`](bevy_asset::Handle) to an entity (the scene will only be /// visible if the entity already has [`Transform`](bevy_transform::components::Transform) and /// [`GlobalTransform`](bevy_transform::components::GlobalTransform) components) #[derive(Default, TypeUuid, TypePath)] #[uuid = "749479b1-fb8c-4ff8-a775-623aa76014f5"] pub struct DynamicScene { pub resources: Vec<Box<dyn Reflect>>, pub entities: Vec<DynamicEntity>, } /// A reflection-powered serializable representation of an entity and its components. pub struct DynamicEntity { /// The identifier of the entity, unique within a scene (and the world it may have been generated from). /// /// Components that reference this entity must consistently use this identifier. pub entity: Entity, /// A vector of boxed components that belong to the given entity and /// implement the [`Reflect`] trait. pub components: Vec<Box<dyn Reflect>>, } impl DynamicScene { /// Create a new dynamic scene from a given scene. pub fn from_scene(scene: &Scene) -> Self { Self::from_world(&scene.world) } /// Create a new dynamic scene from a given world. pub fn from_world(world: &World) -> Self { let mut builder = DynamicSceneBuilder::from_world(world); builder.extract_entities(world.iter_entities().map(|entity| entity.id())); builder.extract_resources(); builder.build() } /// Write the resources, the dynamic entities, and their corresponding components to the given world. /// /// This method will return a [`SceneSpawnError`] if a type either is not registered /// in the provided [`AppTypeRegistry`] resource, or doesn't reflect the /// [`Component`](bevy_ecs::component::Component) or [`Resource`](bevy_ecs::prelude::Resource) trait. pub fn write_to_world_with( &self, world: &mut World, entity_map: &mut HashMap<Entity, Entity>, type_registry: &AppTypeRegistry, ) -> Result<(), SceneSpawnError> { let type_registry = type_registry.read(); for resource in &self.resources { let registration = type_registry .get_with_name(resource.type_name()) .ok_or_else(|| SceneSpawnError::UnregisteredType { type_name: resource.type_name().to_string(), })?; let reflect_resource = registration.data::<ReflectResource>().ok_or_else(|| { SceneSpawnError::UnregisteredResource { type_name: resource.type_name().to_string(), } })?; // If the world already contains an instance of the given resource // just apply the (possibly) new value, otherwise insert the resource reflect_resource.apply_or_insert(world, &**resource); } // For each component types that reference other entities, we keep track // of which entities in the scene use that component. // This is so we can update the scene-internal references to references // of the actual entities in the world. let mut scene_mappings: HashMap<TypeId, Vec<Entity>> = HashMap::default(); for scene_entity in &self.entities { // Fetch the entity with the given entity id from the `entity_map` // or spawn a new entity with a transiently unique id if there is // no corresponding entry. let entity = *entity_map .entry(scene_entity.entity) .or_insert_with(|| world.spawn_empty().id()); let entity_mut = &mut world.entity_mut(entity); // Apply/ add each component to the given entity. for component in &scene_entity.components { let registration = type_registry .get_with_name(component.type_name()) .ok_or_else(|| SceneSpawnError::UnregisteredType { type_name: component.type_name().to_string(), })?; let reflect_component = registration.data::<ReflectComponent>().ok_or_else(|| { SceneSpawnError::UnregisteredComponent { type_name: component.type_name().to_string(), } })?; // If this component references entities in the scene, track it // so we can update it to the entity in the world. if registration.data::<ReflectMapEntities>().is_some() { scene_mappings .entry(registration.type_id()) .or_insert(Vec::new()) .push(entity); } // If the entity already has the given component attached, // just apply the (possibly) new value, otherwise add the // component to the entity. reflect_component.apply_or_insert(entity_mut, &**component); } } // Updates references to entities in the scene to entities in the world for (type_id, entities) in scene_mappings.into_iter() { let registration = type_registry.get(type_id).expect( "we should be getting TypeId from this TypeRegistration in the first place", ); if let Some(map_entities_reflect) = registration.data::<ReflectMapEntities>()
} Ok(()) } /// Write the resources, the dynamic entities, and their corresponding components to the given world. /// /// This method will return a [`SceneSpawnError`] if a type either is not registered /// in the world's [`AppTypeRegistry`] resource, or doesn't reflect the /// [`Component`](bevy_ecs::component::Component) trait. pub fn write_to_world( &self, world: &mut World, entity_map: &mut HashMap<Entity, Entity>, ) -> Result<(), SceneSpawnError> { let registry = world.resource::<AppTypeRegistry>().clone(); self.write_to_world_with(world, entity_map, &registry) } // TODO: move to AssetSaver when it is implemented /// Serialize this dynamic scene into rust object notation (ron). #[cfg(feature = "serialize")] pub fn serialize_ron(&self, registry: &TypeRegistryArc) -> Result<String, ron::Error> { serialize_ron(SceneSerializer::new(self, registry)) } } /// Serialize a given Rust data structure into rust object notation (ron). #[cfg(feature = "serialize")] pub fn serialize_ron<S>(serialize: S) -> Result<String, ron::Error> where S: Serialize, { let pretty_config = ron::ser::PrettyConfig::default() .indentor(" ".to_string()) .new_line("\n".to_string()); ron::ser::to_string_pretty(&serialize, pretty_config) } #[cfg(test)] mod tests { use bevy_ecs::{reflect::AppTypeRegistry, system::Command, world::World}; use bevy_hierarchy::{AddChild, Parent}; use bevy_utils::HashMap; use crate::dynamic_scene_builder::DynamicSceneBuilder; #[test] fn components_not_defined_in_scene_should_not_be_affected_by_scene_entity_map() { // Testing that scene reloading applies EntityMap correctly to MapEntities components. // First, we create a simple world with a parent and a child relationship let mut world = World::new(); world.init_resource::<AppTypeRegistry>(); world .resource_mut::<AppTypeRegistry>() .write() .register::<Parent>(); let original_parent_entity = world.spawn_empty().id(); let original_child_entity = world.spawn_empty().id(); AddChild { parent: original_parent_entity, child: original_child_entity, } .apply(&mut world); // We then write this relationship to a new scene, and then write that scene back to the // world to create another parent and child relationship let mut scene_builder = DynamicSceneBuilder::from_world(&world); scene_builder.extract_entity(original_parent_entity); scene_builder.extract_entity(original_child_entity); let scene = scene_builder.build(); let mut entity_map = HashMap::default(); scene.write_to_world(&mut world, &mut entity_map).unwrap(); let &from_scene_parent_entity = entity_map.get(&original_parent_entity).unwrap(); let &from_scene_child_entity = entity_map.get(&original_child_entity).unwrap(); // We then add the parent from the scene as a child of the original child // Hierarchy should look like: // Original Parent <- Original Child <- Scene Parent <- Scene Child AddChild { parent: original_child_entity, child: from_scene_parent_entity, } .apply(&mut world); // We then reload the scene to make sure that from_scene_parent_entity's parent component // isn't updated with the entity map, since this component isn't defined in the scene. // With bevy_hierarchy, this can cause serious errors and malformed hierarchies. scene.write_to_world(&mut world, &mut entity_map).unwrap(); assert_eq!( original_parent_entity, world .get_entity(original_child_entity) .unwrap() .get::<Parent>() .unwrap() .get(), "something about reloading the scene is touching entities with the same scene Ids" ); assert_eq!( original_child_entity, world .get_entity(from_scene_parent_entity) .unwrap() .get::<Parent>() .unwrap() .get(), "something about reloading the scene is touching components not defined in the scene but on entities defined in the scene" ); assert_eq!( from_scene_parent_entity, world .get_entity(from_scene_child_entity) .unwrap() .get::<Parent>() .expect("something is wrong with this test, and the scene components don't have a parent/child relationship") .get(), "something is wrong with the this test or the code reloading scenes since the relationship between scene entities is broken" ); } }
{ map_entities_reflect.map_entities(world, entity_map, &entities); }
conditional_block
dynamic_scene.rs
use std::any::TypeId; use crate::{DynamicSceneBuilder, Scene, SceneSpawnError}; use anyhow::Result; use bevy_ecs::{ entity::Entity, reflect::{AppTypeRegistry, ReflectComponent, ReflectMapEntities}, world::World, }; use bevy_reflect::{Reflect, TypePath, TypeRegistryArc, TypeUuid}; use bevy_utils::HashMap; #[cfg(feature = "serialize")] use crate::serde::SceneSerializer; use bevy_ecs::reflect::ReflectResource; #[cfg(feature = "serialize")] use serde::Serialize; /// A collection of serializable resources and dynamic entities. /// /// Each dynamic entity in the collection contains its own run-time defined set of components. /// To spawn a dynamic scene, you can use either: /// * [`SceneSpawner::spawn_dynamic`](crate::SceneSpawner::spawn_dynamic) /// * adding the [`DynamicSceneBundle`](crate::DynamicSceneBundle) to an entity /// * adding the [`Handle<DynamicScene>`](bevy_asset::Handle) to an entity (the scene will only be /// visible if the entity already has [`Transform`](bevy_transform::components::Transform) and /// [`GlobalTransform`](bevy_transform::components::GlobalTransform) components) #[derive(Default, TypeUuid, TypePath)] #[uuid = "749479b1-fb8c-4ff8-a775-623aa76014f5"] pub struct DynamicScene { pub resources: Vec<Box<dyn Reflect>>, pub entities: Vec<DynamicEntity>, } /// A reflection-powered serializable representation of an entity and its components. pub struct DynamicEntity { /// The identifier of the entity, unique within a scene (and the world it may have been generated from). /// /// Components that reference this entity must consistently use this identifier. pub entity: Entity, /// A vector of boxed components that belong to the given entity and /// implement the [`Reflect`] trait. pub components: Vec<Box<dyn Reflect>>, } impl DynamicScene { /// Create a new dynamic scene from a given scene. pub fn from_scene(scene: &Scene) -> Self { Self::from_world(&scene.world) } /// Create a new dynamic scene from a given world. pub fn from_world(world: &World) -> Self { let mut builder = DynamicSceneBuilder::from_world(world); builder.extract_entities(world.iter_entities().map(|entity| entity.id())); builder.extract_resources(); builder.build() } /// Write the resources, the dynamic entities, and their corresponding components to the given world. /// /// This method will return a [`SceneSpawnError`] if a type either is not registered /// in the provided [`AppTypeRegistry`] resource, or doesn't reflect the /// [`Component`](bevy_ecs::component::Component) or [`Resource`](bevy_ecs::prelude::Resource) trait. pub fn write_to_world_with( &self, world: &mut World, entity_map: &mut HashMap<Entity, Entity>, type_registry: &AppTypeRegistry, ) -> Result<(), SceneSpawnError> { let type_registry = type_registry.read(); for resource in &self.resources { let registration = type_registry .get_with_name(resource.type_name()) .ok_or_else(|| SceneSpawnError::UnregisteredType { type_name: resource.type_name().to_string(), })?; let reflect_resource = registration.data::<ReflectResource>().ok_or_else(|| { SceneSpawnError::UnregisteredResource { type_name: resource.type_name().to_string(), } })?; // If the world already contains an instance of the given resource // just apply the (possibly) new value, otherwise insert the resource reflect_resource.apply_or_insert(world, &**resource); } // For each component types that reference other entities, we keep track // of which entities in the scene use that component. // This is so we can update the scene-internal references to references // of the actual entities in the world. let mut scene_mappings: HashMap<TypeId, Vec<Entity>> = HashMap::default(); for scene_entity in &self.entities { // Fetch the entity with the given entity id from the `entity_map` // or spawn a new entity with a transiently unique id if there is // no corresponding entry. let entity = *entity_map .entry(scene_entity.entity) .or_insert_with(|| world.spawn_empty().id()); let entity_mut = &mut world.entity_mut(entity); // Apply/ add each component to the given entity. for component in &scene_entity.components { let registration = type_registry .get_with_name(component.type_name()) .ok_or_else(|| SceneSpawnError::UnregisteredType { type_name: component.type_name().to_string(), })?; let reflect_component = registration.data::<ReflectComponent>().ok_or_else(|| { SceneSpawnError::UnregisteredComponent { type_name: component.type_name().to_string(), } })?; // If this component references entities in the scene, track it // so we can update it to the entity in the world. if registration.data::<ReflectMapEntities>().is_some() { scene_mappings .entry(registration.type_id()) .or_insert(Vec::new()) .push(entity); } // If the entity already has the given component attached, // just apply the (possibly) new value, otherwise add the // component to the entity. reflect_component.apply_or_insert(entity_mut, &**component); } } // Updates references to entities in the scene to entities in the world for (type_id, entities) in scene_mappings.into_iter() { let registration = type_registry.get(type_id).expect( "we should be getting TypeId from this TypeRegistration in the first place", ); if let Some(map_entities_reflect) = registration.data::<ReflectMapEntities>() { map_entities_reflect.map_entities(world, entity_map, &entities); } } Ok(()) } /// Write the resources, the dynamic entities, and their corresponding components to the given world. /// /// This method will return a [`SceneSpawnError`] if a type either is not registered /// in the world's [`AppTypeRegistry`] resource, or doesn't reflect the /// [`Component`](bevy_ecs::component::Component) trait. pub fn write_to_world( &self, world: &mut World, entity_map: &mut HashMap<Entity, Entity>, ) -> Result<(), SceneSpawnError> { let registry = world.resource::<AppTypeRegistry>().clone(); self.write_to_world_with(world, entity_map, &registry) } // TODO: move to AssetSaver when it is implemented /// Serialize this dynamic scene into rust object notation (ron). #[cfg(feature = "serialize")] pub fn serialize_ron(&self, registry: &TypeRegistryArc) -> Result<String, ron::Error> { serialize_ron(SceneSerializer::new(self, registry)) } } /// Serialize a given Rust data structure into rust object notation (ron). #[cfg(feature = "serialize")] pub fn serialize_ron<S>(serialize: S) -> Result<String, ron::Error> where S: Serialize,
#[cfg(test)] mod tests { use bevy_ecs::{reflect::AppTypeRegistry, system::Command, world::World}; use bevy_hierarchy::{AddChild, Parent}; use bevy_utils::HashMap; use crate::dynamic_scene_builder::DynamicSceneBuilder; #[test] fn components_not_defined_in_scene_should_not_be_affected_by_scene_entity_map() { // Testing that scene reloading applies EntityMap correctly to MapEntities components. // First, we create a simple world with a parent and a child relationship let mut world = World::new(); world.init_resource::<AppTypeRegistry>(); world .resource_mut::<AppTypeRegistry>() .write() .register::<Parent>(); let original_parent_entity = world.spawn_empty().id(); let original_child_entity = world.spawn_empty().id(); AddChild { parent: original_parent_entity, child: original_child_entity, } .apply(&mut world); // We then write this relationship to a new scene, and then write that scene back to the // world to create another parent and child relationship let mut scene_builder = DynamicSceneBuilder::from_world(&world); scene_builder.extract_entity(original_parent_entity); scene_builder.extract_entity(original_child_entity); let scene = scene_builder.build(); let mut entity_map = HashMap::default(); scene.write_to_world(&mut world, &mut entity_map).unwrap(); let &from_scene_parent_entity = entity_map.get(&original_parent_entity).unwrap(); let &from_scene_child_entity = entity_map.get(&original_child_entity).unwrap(); // We then add the parent from the scene as a child of the original child // Hierarchy should look like: // Original Parent <- Original Child <- Scene Parent <- Scene Child AddChild { parent: original_child_entity, child: from_scene_parent_entity, } .apply(&mut world); // We then reload the scene to make sure that from_scene_parent_entity's parent component // isn't updated with the entity map, since this component isn't defined in the scene. // With bevy_hierarchy, this can cause serious errors and malformed hierarchies. scene.write_to_world(&mut world, &mut entity_map).unwrap(); assert_eq!( original_parent_entity, world .get_entity(original_child_entity) .unwrap() .get::<Parent>() .unwrap() .get(), "something about reloading the scene is touching entities with the same scene Ids" ); assert_eq!( original_child_entity, world .get_entity(from_scene_parent_entity) .unwrap() .get::<Parent>() .unwrap() .get(), "something about reloading the scene is touching components not defined in the scene but on entities defined in the scene" ); assert_eq!( from_scene_parent_entity, world .get_entity(from_scene_child_entity) .unwrap() .get::<Parent>() .expect("something is wrong with this test, and the scene components don't have a parent/child relationship") .get(), "something is wrong with the this test or the code reloading scenes since the relationship between scene entities is broken" ); } }
{ let pretty_config = ron::ser::PrettyConfig::default() .indentor(" ".to_string()) .new_line("\n".to_string()); ron::ser::to_string_pretty(&serialize, pretty_config) }
identifier_body
dynamic_scene.rs
use std::any::TypeId; use crate::{DynamicSceneBuilder, Scene, SceneSpawnError}; use anyhow::Result; use bevy_ecs::{ entity::Entity, reflect::{AppTypeRegistry, ReflectComponent, ReflectMapEntities}, world::World, }; use bevy_reflect::{Reflect, TypePath, TypeRegistryArc, TypeUuid}; use bevy_utils::HashMap; #[cfg(feature = "serialize")] use crate::serde::SceneSerializer; use bevy_ecs::reflect::ReflectResource; #[cfg(feature = "serialize")] use serde::Serialize; /// A collection of serializable resources and dynamic entities. /// /// Each dynamic entity in the collection contains its own run-time defined set of components. /// To spawn a dynamic scene, you can use either: /// * [`SceneSpawner::spawn_dynamic`](crate::SceneSpawner::spawn_dynamic) /// * adding the [`DynamicSceneBundle`](crate::DynamicSceneBundle) to an entity /// * adding the [`Handle<DynamicScene>`](bevy_asset::Handle) to an entity (the scene will only be /// visible if the entity already has [`Transform`](bevy_transform::components::Transform) and /// [`GlobalTransform`](bevy_transform::components::GlobalTransform) components) #[derive(Default, TypeUuid, TypePath)] #[uuid = "749479b1-fb8c-4ff8-a775-623aa76014f5"] pub struct DynamicScene { pub resources: Vec<Box<dyn Reflect>>, pub entities: Vec<DynamicEntity>, } /// A reflection-powered serializable representation of an entity and its components. pub struct
{ /// The identifier of the entity, unique within a scene (and the world it may have been generated from). /// /// Components that reference this entity must consistently use this identifier. pub entity: Entity, /// A vector of boxed components that belong to the given entity and /// implement the [`Reflect`] trait. pub components: Vec<Box<dyn Reflect>>, } impl DynamicScene { /// Create a new dynamic scene from a given scene. pub fn from_scene(scene: &Scene) -> Self { Self::from_world(&scene.world) } /// Create a new dynamic scene from a given world. pub fn from_world(world: &World) -> Self { let mut builder = DynamicSceneBuilder::from_world(world); builder.extract_entities(world.iter_entities().map(|entity| entity.id())); builder.extract_resources(); builder.build() } /// Write the resources, the dynamic entities, and their corresponding components to the given world. /// /// This method will return a [`SceneSpawnError`] if a type either is not registered /// in the provided [`AppTypeRegistry`] resource, or doesn't reflect the /// [`Component`](bevy_ecs::component::Component) or [`Resource`](bevy_ecs::prelude::Resource) trait. pub fn write_to_world_with( &self, world: &mut World, entity_map: &mut HashMap<Entity, Entity>, type_registry: &AppTypeRegistry, ) -> Result<(), SceneSpawnError> { let type_registry = type_registry.read(); for resource in &self.resources { let registration = type_registry .get_with_name(resource.type_name()) .ok_or_else(|| SceneSpawnError::UnregisteredType { type_name: resource.type_name().to_string(), })?; let reflect_resource = registration.data::<ReflectResource>().ok_or_else(|| { SceneSpawnError::UnregisteredResource { type_name: resource.type_name().to_string(), } })?; // If the world already contains an instance of the given resource // just apply the (possibly) new value, otherwise insert the resource reflect_resource.apply_or_insert(world, &**resource); } // For each component types that reference other entities, we keep track // of which entities in the scene use that component. // This is so we can update the scene-internal references to references // of the actual entities in the world. let mut scene_mappings: HashMap<TypeId, Vec<Entity>> = HashMap::default(); for scene_entity in &self.entities { // Fetch the entity with the given entity id from the `entity_map` // or spawn a new entity with a transiently unique id if there is // no corresponding entry. let entity = *entity_map .entry(scene_entity.entity) .or_insert_with(|| world.spawn_empty().id()); let entity_mut = &mut world.entity_mut(entity); // Apply/ add each component to the given entity. for component in &scene_entity.components { let registration = type_registry .get_with_name(component.type_name()) .ok_or_else(|| SceneSpawnError::UnregisteredType { type_name: component.type_name().to_string(), })?; let reflect_component = registration.data::<ReflectComponent>().ok_or_else(|| { SceneSpawnError::UnregisteredComponent { type_name: component.type_name().to_string(), } })?; // If this component references entities in the scene, track it // so we can update it to the entity in the world. if registration.data::<ReflectMapEntities>().is_some() { scene_mappings .entry(registration.type_id()) .or_insert(Vec::new()) .push(entity); } // If the entity already has the given component attached, // just apply the (possibly) new value, otherwise add the // component to the entity. reflect_component.apply_or_insert(entity_mut, &**component); } } // Updates references to entities in the scene to entities in the world for (type_id, entities) in scene_mappings.into_iter() { let registration = type_registry.get(type_id).expect( "we should be getting TypeId from this TypeRegistration in the first place", ); if let Some(map_entities_reflect) = registration.data::<ReflectMapEntities>() { map_entities_reflect.map_entities(world, entity_map, &entities); } } Ok(()) } /// Write the resources, the dynamic entities, and their corresponding components to the given world. /// /// This method will return a [`SceneSpawnError`] if a type either is not registered /// in the world's [`AppTypeRegistry`] resource, or doesn't reflect the /// [`Component`](bevy_ecs::component::Component) trait. pub fn write_to_world( &self, world: &mut World, entity_map: &mut HashMap<Entity, Entity>, ) -> Result<(), SceneSpawnError> { let registry = world.resource::<AppTypeRegistry>().clone(); self.write_to_world_with(world, entity_map, &registry) } // TODO: move to AssetSaver when it is implemented /// Serialize this dynamic scene into rust object notation (ron). #[cfg(feature = "serialize")] pub fn serialize_ron(&self, registry: &TypeRegistryArc) -> Result<String, ron::Error> { serialize_ron(SceneSerializer::new(self, registry)) } } /// Serialize a given Rust data structure into rust object notation (ron). #[cfg(feature = "serialize")] pub fn serialize_ron<S>(serialize: S) -> Result<String, ron::Error> where S: Serialize, { let pretty_config = ron::ser::PrettyConfig::default() .indentor(" ".to_string()) .new_line("\n".to_string()); ron::ser::to_string_pretty(&serialize, pretty_config) } #[cfg(test)] mod tests { use bevy_ecs::{reflect::AppTypeRegistry, system::Command, world::World}; use bevy_hierarchy::{AddChild, Parent}; use bevy_utils::HashMap; use crate::dynamic_scene_builder::DynamicSceneBuilder; #[test] fn components_not_defined_in_scene_should_not_be_affected_by_scene_entity_map() { // Testing that scene reloading applies EntityMap correctly to MapEntities components. // First, we create a simple world with a parent and a child relationship let mut world = World::new(); world.init_resource::<AppTypeRegistry>(); world .resource_mut::<AppTypeRegistry>() .write() .register::<Parent>(); let original_parent_entity = world.spawn_empty().id(); let original_child_entity = world.spawn_empty().id(); AddChild { parent: original_parent_entity, child: original_child_entity, } .apply(&mut world); // We then write this relationship to a new scene, and then write that scene back to the // world to create another parent and child relationship let mut scene_builder = DynamicSceneBuilder::from_world(&world); scene_builder.extract_entity(original_parent_entity); scene_builder.extract_entity(original_child_entity); let scene = scene_builder.build(); let mut entity_map = HashMap::default(); scene.write_to_world(&mut world, &mut entity_map).unwrap(); let &from_scene_parent_entity = entity_map.get(&original_parent_entity).unwrap(); let &from_scene_child_entity = entity_map.get(&original_child_entity).unwrap(); // We then add the parent from the scene as a child of the original child // Hierarchy should look like: // Original Parent <- Original Child <- Scene Parent <- Scene Child AddChild { parent: original_child_entity, child: from_scene_parent_entity, } .apply(&mut world); // We then reload the scene to make sure that from_scene_parent_entity's parent component // isn't updated with the entity map, since this component isn't defined in the scene. // With bevy_hierarchy, this can cause serious errors and malformed hierarchies. scene.write_to_world(&mut world, &mut entity_map).unwrap(); assert_eq!( original_parent_entity, world .get_entity(original_child_entity) .unwrap() .get::<Parent>() .unwrap() .get(), "something about reloading the scene is touching entities with the same scene Ids" ); assert_eq!( original_child_entity, world .get_entity(from_scene_parent_entity) .unwrap() .get::<Parent>() .unwrap() .get(), "something about reloading the scene is touching components not defined in the scene but on entities defined in the scene" ); assert_eq!( from_scene_parent_entity, world .get_entity(from_scene_child_entity) .unwrap() .get::<Parent>() .expect("something is wrong with this test, and the scene components don't have a parent/child relationship") .get(), "something is wrong with the this test or the code reloading scenes since the relationship between scene entities is broken" ); } }
DynamicEntity
identifier_name
bank.rs
#[macro_use] extern crate clap; extern crate rand; extern crate distributary; use std::sync; use std::thread; use std::time; use std::collections::HashMap; use distributary::{Blender, Base, Aggregation, JoinBuilder, Datas, DataType, Token, Mutator}; use rand::Rng; extern crate hdrsample; use hdrsample::Histogram; #[allow(dead_code)] type Put = Box<Fn(Vec<DataType>) + Send +'static>; type TxPut = Box<Fn(Vec<DataType>, Token) -> Result<i64, ()> + Send +'static>; #[allow(dead_code)] type Get = Box<Fn(&DataType) -> Result<Datas, ()> + Send + Sync>; type TxGet = Box<Fn(&DataType) -> Result<(Datas, Token), ()> + Send + Sync>; const NANOS_PER_SEC: u64 = 1_000_000_000; macro_rules! dur_to_ns { ($d:expr) => {{ let d = $d; d.as_secs() * NANOS_PER_SEC + d.subsec_nanos() as u64 }} } #[cfg_attr(rustfmt, rustfmt_skip)] const BENCH_USAGE: &'static str = "\ EXAMPLES: bank --avg"; pub struct Bank { transfers: Vec<Mutator>, balances: sync::Arc<Option<TxGet>>, migrate: Box<FnMut()>, } pub fn setup(num_putters: usize) -> Box<Bank> { // set up graph let mut g = Blender::new(); let transfers; let credits; let debits; let balances; let (_, balancesq) = { // migrate let mut mig = g.start_migration(); // add transfers base table transfers = mig.add_ingredient("transfers", &["src_acct", "dst_acct", "amount"], Base::default()); // add all debits debits = mig.add_ingredient("debits", &["acct_id", "total"], Aggregation::SUM.over(transfers, 2, &[0])); // add all credits credits = mig.add_ingredient("credits", &["acct_id", "total"], Aggregation::SUM.over(transfers, 2, &[1])); // add join of credits and debits; this is a hack as we don't currently have multi-parent // aggregations or arithmetic on columns. let j2 = JoinBuilder::new(vec![(credits, 0), (credits, 1), (debits, 1)]) .from(credits, vec![1, 0]) .join(debits, vec![1, 0]); balances = mig.add_ingredient("balances", &["acct_id", "credit", "debit"], j2); let balancesq = Some(mig.transactional_maintain(balances, 0)); let d = mig.add_domain(); mig.assign_domain(transfers, d); mig.assign_domain(credits, d); mig.assign_domain(debits, d); mig.assign_domain(balances, d); // start processing (mig.commit(), balancesq) }; Box::new(Bank { transfers: (0..num_putters) .into_iter() .map(|_| g.get_mutator(transfers)) .collect::<Vec<_>>(), balances: sync::Arc::new(balancesq), migrate: Box::new(move || { let mut mig = g.start_migration(); let identity = mig.add_ingredient("identity", &["acct_id", "credit", "debit"], distributary::Identity::new(balances)); let _ = mig.transactional_maintain(identity, 0); let _ = mig.commit(); }), }) } impl Bank { fn getter(&mut self) -> Box<Getter> { Box::new(self.balances.clone()) } fn putter(&mut self) -> Box<Putter> { let m = self.transfers.pop().unwrap(); let p: TxPut = Box::new(move |u: Vec<DataType>, t: Token| m.transactional_put(u, t)); Box::new(p) } } pub trait Putter: Send { fn transfer<'a>(&'a mut self) -> Box<FnMut(i64, i64, i64, Token) -> Result<i64, ()> + 'a>; } impl Putter for TxPut { fn transfer<'a>(&'a mut self) -> Box<FnMut(i64, i64, i64, Token) -> Result<i64, ()> + 'a> { Box::new(move |src, dst, amount, token| { self(vec![src.into(), dst.into(), amount.into()], token.into()) }) } } pub trait Getter: Send { fn get<'a>(&'a self) -> Box<FnMut(i64) -> Result<Option<(i64, Token)>, ()> + 'a>; } impl Getter for sync::Arc<Option<TxGet>> { fn get<'a>(&'a self) -> Box<FnMut(i64) -> Result<Option<(i64, Token)>, ()> + 'a> { Box::new(move |id| { if let Some(ref g) = *self.as_ref() { g(&id.into()).map(|(res, token)| { assert_eq!(res.len(), 1); res.into_iter().next().map(|row| { // we only care about the first result let mut row = row.into_iter(); let _: i64 = row.next().unwrap().into(); let credit: i64 = row.next().unwrap().into(); let debit: i64 = row.next().unwrap().into(); (credit - debit, token) }) }) } else { use std::time::Duration; use std::thread; // avoid spinning thread::sleep(Duration::from_secs(1)); Err(()) } }) } } fn populate(naccounts: i64, transfers_put: &mut Box<Putter>)
fn client(i: usize, mut transfers_put: Box<Putter>, balances_get: Box<Getter>, naccounts: i64, start: time::Instant, runtime: time::Duration, verbose: bool, cdf: bool, audit: bool, transactions: &mut Vec<(i64, i64, i64)>) -> Vec<f64> { let mut count = 0; let mut committed = 0; let mut aborted = 0; let mut samples = Histogram::<u64>::new_with_bounds(1, 100000, 3).unwrap(); let mut last_reported = start; let mut throughputs = Vec::new(); let mut t_rng = rand::thread_rng(); let mut sample = || t_rng.gen_range(1, naccounts); let mut sample_pair = || -> (_, _) { let dst_acct_rnd_id = sample(); assert!(dst_acct_rnd_id > 0); let mut src_acct_rnd_id = sample(); while src_acct_rnd_id == dst_acct_rnd_id { src_acct_rnd_id = sample(); } assert!(src_acct_rnd_id > 0); (src_acct_rnd_id, dst_acct_rnd_id) }; { let mut get = balances_get.get(); let mut put = transfers_put.transfer(); while start.elapsed() < runtime { let pair = sample_pair(); let (balance, token) = get(pair.0).unwrap().unwrap(); if verbose { println!("t{} read {}: {} @ {:#?} (for {})", i, pair.0, balance, token, pair.1); } // try to make both transfers { let mut do_tx = |src, dst, amt, tkn| { let mut count_result = |res| match res { Ok(ts) => { if verbose { println!("commit @ {}", ts); } if audit { transactions.push((src, dst, amt)); } committed += 1 } Err(_) => { if verbose { println!("abort"); } aborted += 1 } }; if verbose { println!("trying {} -> {} of {}", src, dst, amt); } if cdf { let t = time::Instant::now(); count_result(put(src, dst, amt, tkn)); let t = (dur_to_ns!(t.elapsed()) / 1000) as i64; if samples.record(t).is_err() { println!("failed to record slow put ({}ns)", t); } } else { count_result(put(src, dst, amt, tkn)); } count += 1; }; if pair.0!= 0 { assert!(balance >= 0, format!("{} balance is {}", pair.0, balance)); } if balance >= 100 { do_tx(pair.0, pair.1, 100, token); } } // check if we should report if last_reported.elapsed() > time::Duration::from_secs(1) { let ts = last_reported.elapsed(); let throughput = count as f64 / (ts.as_secs() as f64 + ts.subsec_nanos() as f64 / 1_000_000_000f64); let commit_rate = committed as f64 / count as f64; let abort_rate = aborted as f64 / count as f64; println!("{:?} PUT: {:.2} {:.2} {:.2}", dur_to_ns!(start.elapsed()), throughput, commit_rate, abort_rate); throughputs.push(throughput); last_reported = time::Instant::now(); count = 0; committed = 0; aborted = 0; } } if audit { let mut target_balances = HashMap::new(); for i in 0..naccounts { target_balances.insert(i as i64, 0); } for i in 0i64..(naccounts as i64) { *target_balances.get_mut(&0).unwrap() -= 999; *target_balances.get_mut(&i).unwrap() += 999; } for &mut (src, dst, amt) in transactions { *target_balances.get_mut(&src).unwrap() -= amt; *target_balances.get_mut(&dst).unwrap() += amt; } for (account, balance) in target_balances { assert_eq!(get(account).unwrap().unwrap().0, balance); } println!("Audit found no irregularities"); } } if cdf { for (v, p, _, _) in samples.iter_percentiles(1) { println!("percentile PUT {:.2} {:.2}", v, p); } } throughputs } fn main() { use clap::{Arg, App}; let args = App::new("bank") .version("0.1") .about("Benchmarks Soup transactions and reports abort rate.") .arg(Arg::with_name("avg") .long("avg") .takes_value(false) .help("compute average throughput at the end of benchmark")) .arg(Arg::with_name("cdf") .long("cdf") .takes_value(false) .help("produce a CDF of recorded latencies for each client at the end")) .arg(Arg::with_name("naccounts") .short("a") .long("accounts") .value_name("N") .default_value("5") .help("Number of bank accounts to prepopulate the database with")) .arg(Arg::with_name("runtime") .short("r") .long("runtime") .value_name("N") .default_value("60") .help("Benchmark runtime in seconds")) .arg(Arg::with_name("migrate") .short("m") .long("migrate") .value_name("M") .help("Perform a migration after this many seconds") .conflicts_with("stage")) .arg(Arg::with_name("threads") .short("t") .long("threads") .value_name("T") .default_value("2") .help("Number of client threads")) .arg(Arg::with_name("verbose") .short("v") .long("verbose") .takes_value(false) .help("Verbose (debugging) output")) .arg(Arg::with_name("audit") .short("A") .long("audit") .takes_value(false) .help("Audit results after benchmark completes")) .after_help(BENCH_USAGE) .get_matches(); let avg = args.is_present("avg"); let cdf = args.is_present("cdf"); let runtime = time::Duration::from_secs(value_t_or_exit!(args, "runtime", u64)); let migrate_after = args.value_of("migrate") .map(|_| value_t_or_exit!(args, "migrate", u64)) .map(time::Duration::from_secs); let naccounts = value_t_or_exit!(args, "naccounts", i64); let nthreads = value_t_or_exit!(args, "threads", usize); let verbose = args.is_present("verbose"); let audit = args.is_present("audit"); if let Some(ref migrate_after) = migrate_after { assert!(migrate_after < &runtime); } // setup db println!("Attempting to set up bank"); let mut bank = setup(nthreads); // let system settle // thread::sleep(time::Duration::new(1, 0)); let start = time::Instant::now(); // benchmark let clients = (0..nthreads) .into_iter() .map(|i| { Some({ let mut transfers_put = bank.putter(); let balances_get: Box<Getter> = bank.getter(); let mut transactions = vec![]; if i == 0 { populate(naccounts, &mut transfers_put); } thread::Builder::new() .name(format!("bank{}", i)) .spawn(move || -> Vec<f64> { client(i, transfers_put, balances_get, naccounts, start, runtime, verbose, cdf, audit, &mut transactions) }) .unwrap() }) }) .collect::<Vec<_>>(); let avg_put_throughput = |th: Vec<f64>| if avg { let sum: f64 = th.iter().sum(); println!("avg PUT: {:.2}", sum / th.len() as f64); }; if let Some(duration) = migrate_after { thread::sleep(duration); println!("----- starting migration -----"); let start = time::Instant::now(); (bank.migrate)(); let duration = start.elapsed(); let length = 1000000000u64 * duration.as_secs() + duration.subsec_nanos() as u64; println!("----- completed migration -----\nElapsed time = {} ms", 1e-6 * (length as f64)); } // clean for c in clients { if let Some(client) = c { match client.join() { Err(e) => panic!(e), Ok(th) => avg_put_throughput(th), } } } }
{ // prepopulate non-transactionally (this is okay because we add no accounts while running the // benchmark) println!("Connected. Setting up {} accounts.", naccounts); { // let accounts_put = bank.accounts.as_ref().unwrap(); let mut money_put = transfers_put.transfer(); for i in 0..naccounts { // accounts_put(vec![DataType::Number(i as i64), format!("user {}", i).into()]); money_put(0, i, 1000, Token::empty()).unwrap(); money_put(i, 0, 1, Token::empty()).unwrap(); } } println!("Done with account creation"); }
identifier_body
bank.rs
#[macro_use] extern crate clap; extern crate rand; extern crate distributary; use std::sync; use std::thread; use std::time; use std::collections::HashMap; use distributary::{Blender, Base, Aggregation, JoinBuilder, Datas, DataType, Token, Mutator}; use rand::Rng; extern crate hdrsample; use hdrsample::Histogram; #[allow(dead_code)] type Put = Box<Fn(Vec<DataType>) + Send +'static>; type TxPut = Box<Fn(Vec<DataType>, Token) -> Result<i64, ()> + Send +'static>; #[allow(dead_code)] type Get = Box<Fn(&DataType) -> Result<Datas, ()> + Send + Sync>; type TxGet = Box<Fn(&DataType) -> Result<(Datas, Token), ()> + Send + Sync>; const NANOS_PER_SEC: u64 = 1_000_000_000; macro_rules! dur_to_ns { ($d:expr) => {{ let d = $d; d.as_secs() * NANOS_PER_SEC + d.subsec_nanos() as u64 }} } #[cfg_attr(rustfmt, rustfmt_skip)] const BENCH_USAGE: &'static str = "\ EXAMPLES: bank --avg"; pub struct Bank { transfers: Vec<Mutator>, balances: sync::Arc<Option<TxGet>>, migrate: Box<FnMut()>, } pub fn setup(num_putters: usize) -> Box<Bank> { // set up graph let mut g = Blender::new(); let transfers; let credits; let debits; let balances; let (_, balancesq) = { // migrate let mut mig = g.start_migration(); // add transfers base table transfers = mig.add_ingredient("transfers", &["src_acct", "dst_acct", "amount"], Base::default()); // add all debits debits = mig.add_ingredient("debits", &["acct_id", "total"], Aggregation::SUM.over(transfers, 2, &[0])); // add all credits credits = mig.add_ingredient("credits", &["acct_id", "total"], Aggregation::SUM.over(transfers, 2, &[1])); // add join of credits and debits; this is a hack as we don't currently have multi-parent // aggregations or arithmetic on columns. let j2 = JoinBuilder::new(vec![(credits, 0), (credits, 1), (debits, 1)]) .from(credits, vec![1, 0]) .join(debits, vec![1, 0]); balances = mig.add_ingredient("balances", &["acct_id", "credit", "debit"], j2); let balancesq = Some(mig.transactional_maintain(balances, 0)); let d = mig.add_domain(); mig.assign_domain(transfers, d); mig.assign_domain(credits, d); mig.assign_domain(debits, d); mig.assign_domain(balances, d); // start processing (mig.commit(), balancesq) }; Box::new(Bank { transfers: (0..num_putters) .into_iter() .map(|_| g.get_mutator(transfers)) .collect::<Vec<_>>(), balances: sync::Arc::new(balancesq), migrate: Box::new(move || { let mut mig = g.start_migration(); let identity = mig.add_ingredient("identity", &["acct_id", "credit", "debit"], distributary::Identity::new(balances)); let _ = mig.transactional_maintain(identity, 0); let _ = mig.commit(); }), }) } impl Bank { fn getter(&mut self) -> Box<Getter> { Box::new(self.balances.clone()) } fn putter(&mut self) -> Box<Putter> { let m = self.transfers.pop().unwrap(); let p: TxPut = Box::new(move |u: Vec<DataType>, t: Token| m.transactional_put(u, t)); Box::new(p) } } pub trait Putter: Send { fn transfer<'a>(&'a mut self) -> Box<FnMut(i64, i64, i64, Token) -> Result<i64, ()> + 'a>; } impl Putter for TxPut { fn transfer<'a>(&'a mut self) -> Box<FnMut(i64, i64, i64, Token) -> Result<i64, ()> + 'a> { Box::new(move |src, dst, amount, token| { self(vec![src.into(), dst.into(), amount.into()], token.into()) }) } } pub trait Getter: Send { fn get<'a>(&'a self) -> Box<FnMut(i64) -> Result<Option<(i64, Token)>, ()> + 'a>; } impl Getter for sync::Arc<Option<TxGet>> { fn get<'a>(&'a self) -> Box<FnMut(i64) -> Result<Option<(i64, Token)>, ()> + 'a> { Box::new(move |id| { if let Some(ref g) = *self.as_ref() { g(&id.into()).map(|(res, token)| { assert_eq!(res.len(), 1); res.into_iter().next().map(|row| { // we only care about the first result let mut row = row.into_iter(); let _: i64 = row.next().unwrap().into(); let credit: i64 = row.next().unwrap().into(); let debit: i64 = row.next().unwrap().into(); (credit - debit, token) }) }) } else { use std::time::Duration; use std::thread; // avoid spinning thread::sleep(Duration::from_secs(1)); Err(()) } }) } } fn
(naccounts: i64, transfers_put: &mut Box<Putter>) { // prepopulate non-transactionally (this is okay because we add no accounts while running the // benchmark) println!("Connected. Setting up {} accounts.", naccounts); { // let accounts_put = bank.accounts.as_ref().unwrap(); let mut money_put = transfers_put.transfer(); for i in 0..naccounts { // accounts_put(vec![DataType::Number(i as i64), format!("user {}", i).into()]); money_put(0, i, 1000, Token::empty()).unwrap(); money_put(i, 0, 1, Token::empty()).unwrap(); } } println!("Done with account creation"); } fn client(i: usize, mut transfers_put: Box<Putter>, balances_get: Box<Getter>, naccounts: i64, start: time::Instant, runtime: time::Duration, verbose: bool, cdf: bool, audit: bool, transactions: &mut Vec<(i64, i64, i64)>) -> Vec<f64> { let mut count = 0; let mut committed = 0; let mut aborted = 0; let mut samples = Histogram::<u64>::new_with_bounds(1, 100000, 3).unwrap(); let mut last_reported = start; let mut throughputs = Vec::new(); let mut t_rng = rand::thread_rng(); let mut sample = || t_rng.gen_range(1, naccounts); let mut sample_pair = || -> (_, _) { let dst_acct_rnd_id = sample(); assert!(dst_acct_rnd_id > 0); let mut src_acct_rnd_id = sample(); while src_acct_rnd_id == dst_acct_rnd_id { src_acct_rnd_id = sample(); } assert!(src_acct_rnd_id > 0); (src_acct_rnd_id, dst_acct_rnd_id) }; { let mut get = balances_get.get(); let mut put = transfers_put.transfer(); while start.elapsed() < runtime { let pair = sample_pair(); let (balance, token) = get(pair.0).unwrap().unwrap(); if verbose { println!("t{} read {}: {} @ {:#?} (for {})", i, pair.0, balance, token, pair.1); } // try to make both transfers { let mut do_tx = |src, dst, amt, tkn| { let mut count_result = |res| match res { Ok(ts) => { if verbose { println!("commit @ {}", ts); } if audit { transactions.push((src, dst, amt)); } committed += 1 } Err(_) => { if verbose { println!("abort"); } aborted += 1 } }; if verbose { println!("trying {} -> {} of {}", src, dst, amt); } if cdf { let t = time::Instant::now(); count_result(put(src, dst, amt, tkn)); let t = (dur_to_ns!(t.elapsed()) / 1000) as i64; if samples.record(t).is_err() { println!("failed to record slow put ({}ns)", t); } } else { count_result(put(src, dst, amt, tkn)); } count += 1; }; if pair.0!= 0 { assert!(balance >= 0, format!("{} balance is {}", pair.0, balance)); } if balance >= 100 { do_tx(pair.0, pair.1, 100, token); } } // check if we should report if last_reported.elapsed() > time::Duration::from_secs(1) { let ts = last_reported.elapsed(); let throughput = count as f64 / (ts.as_secs() as f64 + ts.subsec_nanos() as f64 / 1_000_000_000f64); let commit_rate = committed as f64 / count as f64; let abort_rate = aborted as f64 / count as f64; println!("{:?} PUT: {:.2} {:.2} {:.2}", dur_to_ns!(start.elapsed()), throughput, commit_rate, abort_rate); throughputs.push(throughput); last_reported = time::Instant::now(); count = 0; committed = 0; aborted = 0; } } if audit { let mut target_balances = HashMap::new(); for i in 0..naccounts { target_balances.insert(i as i64, 0); } for i in 0i64..(naccounts as i64) { *target_balances.get_mut(&0).unwrap() -= 999; *target_balances.get_mut(&i).unwrap() += 999; } for &mut (src, dst, amt) in transactions { *target_balances.get_mut(&src).unwrap() -= amt; *target_balances.get_mut(&dst).unwrap() += amt; } for (account, balance) in target_balances { assert_eq!(get(account).unwrap().unwrap().0, balance); } println!("Audit found no irregularities"); } } if cdf { for (v, p, _, _) in samples.iter_percentiles(1) { println!("percentile PUT {:.2} {:.2}", v, p); } } throughputs } fn main() { use clap::{Arg, App}; let args = App::new("bank") .version("0.1") .about("Benchmarks Soup transactions and reports abort rate.") .arg(Arg::with_name("avg") .long("avg") .takes_value(false) .help("compute average throughput at the end of benchmark")) .arg(Arg::with_name("cdf") .long("cdf") .takes_value(false) .help("produce a CDF of recorded latencies for each client at the end")) .arg(Arg::with_name("naccounts") .short("a") .long("accounts") .value_name("N") .default_value("5") .help("Number of bank accounts to prepopulate the database with")) .arg(Arg::with_name("runtime") .short("r") .long("runtime") .value_name("N") .default_value("60") .help("Benchmark runtime in seconds")) .arg(Arg::with_name("migrate") .short("m") .long("migrate") .value_name("M") .help("Perform a migration after this many seconds") .conflicts_with("stage")) .arg(Arg::with_name("threads") .short("t") .long("threads") .value_name("T") .default_value("2") .help("Number of client threads")) .arg(Arg::with_name("verbose") .short("v") .long("verbose") .takes_value(false) .help("Verbose (debugging) output")) .arg(Arg::with_name("audit") .short("A") .long("audit") .takes_value(false) .help("Audit results after benchmark completes")) .after_help(BENCH_USAGE) .get_matches(); let avg = args.is_present("avg"); let cdf = args.is_present("cdf"); let runtime = time::Duration::from_secs(value_t_or_exit!(args, "runtime", u64)); let migrate_after = args.value_of("migrate") .map(|_| value_t_or_exit!(args, "migrate", u64)) .map(time::Duration::from_secs); let naccounts = value_t_or_exit!(args, "naccounts", i64); let nthreads = value_t_or_exit!(args, "threads", usize); let verbose = args.is_present("verbose"); let audit = args.is_present("audit"); if let Some(ref migrate_after) = migrate_after { assert!(migrate_after < &runtime); } // setup db println!("Attempting to set up bank"); let mut bank = setup(nthreads); // let system settle // thread::sleep(time::Duration::new(1, 0)); let start = time::Instant::now(); // benchmark let clients = (0..nthreads) .into_iter() .map(|i| { Some({ let mut transfers_put = bank.putter(); let balances_get: Box<Getter> = bank.getter(); let mut transactions = vec![]; if i == 0 { populate(naccounts, &mut transfers_put); } thread::Builder::new() .name(format!("bank{}", i)) .spawn(move || -> Vec<f64> { client(i, transfers_put, balances_get, naccounts, start, runtime, verbose, cdf, audit, &mut transactions) }) .unwrap() }) }) .collect::<Vec<_>>(); let avg_put_throughput = |th: Vec<f64>| if avg { let sum: f64 = th.iter().sum(); println!("avg PUT: {:.2}", sum / th.len() as f64); }; if let Some(duration) = migrate_after { thread::sleep(duration); println!("----- starting migration -----"); let start = time::Instant::now(); (bank.migrate)(); let duration = start.elapsed(); let length = 1000000000u64 * duration.as_secs() + duration.subsec_nanos() as u64; println!("----- completed migration -----\nElapsed time = {} ms", 1e-6 * (length as f64)); } // clean for c in clients { if let Some(client) = c { match client.join() { Err(e) => panic!(e), Ok(th) => avg_put_throughput(th), } } } }
populate
identifier_name
bank.rs
#[macro_use] extern crate clap; extern crate rand; extern crate distributary; use std::sync; use std::thread; use std::time; use std::collections::HashMap; use distributary::{Blender, Base, Aggregation, JoinBuilder, Datas, DataType, Token, Mutator}; use rand::Rng; extern crate hdrsample; use hdrsample::Histogram; #[allow(dead_code)] type Put = Box<Fn(Vec<DataType>) + Send +'static>; type TxPut = Box<Fn(Vec<DataType>, Token) -> Result<i64, ()> + Send +'static>; #[allow(dead_code)] type Get = Box<Fn(&DataType) -> Result<Datas, ()> + Send + Sync>; type TxGet = Box<Fn(&DataType) -> Result<(Datas, Token), ()> + Send + Sync>; const NANOS_PER_SEC: u64 = 1_000_000_000; macro_rules! dur_to_ns { ($d:expr) => {{ let d = $d; d.as_secs() * NANOS_PER_SEC + d.subsec_nanos() as u64 }} } #[cfg_attr(rustfmt, rustfmt_skip)] const BENCH_USAGE: &'static str = "\ EXAMPLES: bank --avg"; pub struct Bank { transfers: Vec<Mutator>, balances: sync::Arc<Option<TxGet>>, migrate: Box<FnMut()>, } pub fn setup(num_putters: usize) -> Box<Bank> { // set up graph let mut g = Blender::new(); let transfers; let credits; let debits; let balances; let (_, balancesq) = { // migrate let mut mig = g.start_migration(); // add transfers base table transfers = mig.add_ingredient("transfers", &["src_acct", "dst_acct", "amount"], Base::default()); // add all debits debits = mig.add_ingredient("debits", &["acct_id", "total"], Aggregation::SUM.over(transfers, 2, &[0])); // add all credits credits = mig.add_ingredient("credits", &["acct_id", "total"], Aggregation::SUM.over(transfers, 2, &[1])); // add join of credits and debits; this is a hack as we don't currently have multi-parent // aggregations or arithmetic on columns. let j2 = JoinBuilder::new(vec![(credits, 0), (credits, 1), (debits, 1)]) .from(credits, vec![1, 0]) .join(debits, vec![1, 0]); balances = mig.add_ingredient("balances", &["acct_id", "credit", "debit"], j2); let balancesq = Some(mig.transactional_maintain(balances, 0)); let d = mig.add_domain(); mig.assign_domain(transfers, d); mig.assign_domain(credits, d); mig.assign_domain(debits, d); mig.assign_domain(balances, d); // start processing (mig.commit(), balancesq) }; Box::new(Bank { transfers: (0..num_putters) .into_iter() .map(|_| g.get_mutator(transfers)) .collect::<Vec<_>>(), balances: sync::Arc::new(balancesq), migrate: Box::new(move || { let mut mig = g.start_migration(); let identity = mig.add_ingredient("identity", &["acct_id", "credit", "debit"], distributary::Identity::new(balances)); let _ = mig.transactional_maintain(identity, 0); let _ = mig.commit(); }), }) } impl Bank { fn getter(&mut self) -> Box<Getter> { Box::new(self.balances.clone()) } fn putter(&mut self) -> Box<Putter> { let m = self.transfers.pop().unwrap(); let p: TxPut = Box::new(move |u: Vec<DataType>, t: Token| m.transactional_put(u, t)); Box::new(p) } } pub trait Putter: Send { fn transfer<'a>(&'a mut self) -> Box<FnMut(i64, i64, i64, Token) -> Result<i64, ()> + 'a>; } impl Putter for TxPut { fn transfer<'a>(&'a mut self) -> Box<FnMut(i64, i64, i64, Token) -> Result<i64, ()> + 'a> { Box::new(move |src, dst, amount, token| { self(vec![src.into(), dst.into(), amount.into()], token.into()) }) } } pub trait Getter: Send { fn get<'a>(&'a self) -> Box<FnMut(i64) -> Result<Option<(i64, Token)>, ()> + 'a>; } impl Getter for sync::Arc<Option<TxGet>> { fn get<'a>(&'a self) -> Box<FnMut(i64) -> Result<Option<(i64, Token)>, ()> + 'a> { Box::new(move |id| { if let Some(ref g) = *self.as_ref() { g(&id.into()).map(|(res, token)| { assert_eq!(res.len(), 1); res.into_iter().next().map(|row| { // we only care about the first result let mut row = row.into_iter(); let _: i64 = row.next().unwrap().into(); let credit: i64 = row.next().unwrap().into(); let debit: i64 = row.next().unwrap().into(); (credit - debit, token) }) }) } else { use std::time::Duration; use std::thread; // avoid spinning thread::sleep(Duration::from_secs(1)); Err(()) } }) } } fn populate(naccounts: i64, transfers_put: &mut Box<Putter>) { // prepopulate non-transactionally (this is okay because we add no accounts while running the // benchmark) println!("Connected. Setting up {} accounts.", naccounts); { // let accounts_put = bank.accounts.as_ref().unwrap(); let mut money_put = transfers_put.transfer(); for i in 0..naccounts { // accounts_put(vec![DataType::Number(i as i64), format!("user {}", i).into()]); money_put(0, i, 1000, Token::empty()).unwrap(); money_put(i, 0, 1, Token::empty()).unwrap(); } } println!("Done with account creation"); } fn client(i: usize, mut transfers_put: Box<Putter>, balances_get: Box<Getter>, naccounts: i64, start: time::Instant, runtime: time::Duration, verbose: bool, cdf: bool, audit: bool, transactions: &mut Vec<(i64, i64, i64)>) -> Vec<f64> { let mut count = 0; let mut committed = 0; let mut aborted = 0; let mut samples = Histogram::<u64>::new_with_bounds(1, 100000, 3).unwrap(); let mut last_reported = start; let mut throughputs = Vec::new(); let mut t_rng = rand::thread_rng(); let mut sample = || t_rng.gen_range(1, naccounts); let mut sample_pair = || -> (_, _) { let dst_acct_rnd_id = sample(); assert!(dst_acct_rnd_id > 0); let mut src_acct_rnd_id = sample(); while src_acct_rnd_id == dst_acct_rnd_id { src_acct_rnd_id = sample(); } assert!(src_acct_rnd_id > 0); (src_acct_rnd_id, dst_acct_rnd_id) }; { let mut get = balances_get.get(); let mut put = transfers_put.transfer(); while start.elapsed() < runtime { let pair = sample_pair(); let (balance, token) = get(pair.0).unwrap().unwrap(); if verbose { println!("t{} read {}: {} @ {:#?} (for {})", i, pair.0, balance, token, pair.1); } // try to make both transfers { let mut do_tx = |src, dst, amt, tkn| { let mut count_result = |res| match res { Ok(ts) => { if verbose { println!("commit @ {}", ts); } if audit { transactions.push((src, dst, amt)); } committed += 1 } Err(_) => { if verbose { println!("abort"); } aborted += 1 } }; if verbose { println!("trying {} -> {} of {}", src, dst, amt); } if cdf { let t = time::Instant::now(); count_result(put(src, dst, amt, tkn)); let t = (dur_to_ns!(t.elapsed()) / 1000) as i64; if samples.record(t).is_err() { println!("failed to record slow put ({}ns)", t); } } else { count_result(put(src, dst, amt, tkn)); } count += 1; }; if pair.0!= 0 { assert!(balance >= 0, format!("{} balance is {}", pair.0, balance)); } if balance >= 100 { do_tx(pair.0, pair.1, 100, token); } } // check if we should report if last_reported.elapsed() > time::Duration::from_secs(1) { let ts = last_reported.elapsed(); let throughput = count as f64 / (ts.as_secs() as f64 + ts.subsec_nanos() as f64 / 1_000_000_000f64); let commit_rate = committed as f64 / count as f64; let abort_rate = aborted as f64 / count as f64; println!("{:?} PUT: {:.2} {:.2} {:.2}", dur_to_ns!(start.elapsed()), throughput, commit_rate, abort_rate); throughputs.push(throughput); last_reported = time::Instant::now(); count = 0; committed = 0; aborted = 0; } } if audit { let mut target_balances = HashMap::new(); for i in 0..naccounts { target_balances.insert(i as i64, 0); } for i in 0i64..(naccounts as i64) { *target_balances.get_mut(&0).unwrap() -= 999; *target_balances.get_mut(&i).unwrap() += 999; } for &mut (src, dst, amt) in transactions { *target_balances.get_mut(&src).unwrap() -= amt; *target_balances.get_mut(&dst).unwrap() += amt; } for (account, balance) in target_balances { assert_eq!(get(account).unwrap().unwrap().0, balance); } println!("Audit found no irregularities"); } } if cdf { for (v, p, _, _) in samples.iter_percentiles(1) { println!("percentile PUT {:.2} {:.2}", v, p); } } throughputs } fn main() { use clap::{Arg, App}; let args = App::new("bank") .version("0.1") .about("Benchmarks Soup transactions and reports abort rate.") .arg(Arg::with_name("avg") .long("avg") .takes_value(false) .help("compute average throughput at the end of benchmark")) .arg(Arg::with_name("cdf") .long("cdf") .takes_value(false) .help("produce a CDF of recorded latencies for each client at the end")) .arg(Arg::with_name("naccounts") .short("a") .long("accounts") .value_name("N") .default_value("5") .help("Number of bank accounts to prepopulate the database with")) .arg(Arg::with_name("runtime") .short("r") .long("runtime") .value_name("N") .default_value("60") .help("Benchmark runtime in seconds")) .arg(Arg::with_name("migrate") .short("m") .long("migrate") .value_name("M") .help("Perform a migration after this many seconds") .conflicts_with("stage")) .arg(Arg::with_name("threads") .short("t") .long("threads") .value_name("T") .default_value("2") .help("Number of client threads")) .arg(Arg::with_name("verbose") .short("v") .long("verbose") .takes_value(false) .help("Verbose (debugging) output")) .arg(Arg::with_name("audit") .short("A") .long("audit") .takes_value(false) .help("Audit results after benchmark completes")) .after_help(BENCH_USAGE) .get_matches(); let avg = args.is_present("avg"); let cdf = args.is_present("cdf"); let runtime = time::Duration::from_secs(value_t_or_exit!(args, "runtime", u64)); let migrate_after = args.value_of("migrate") .map(|_| value_t_or_exit!(args, "migrate", u64)) .map(time::Duration::from_secs); let naccounts = value_t_or_exit!(args, "naccounts", i64); let nthreads = value_t_or_exit!(args, "threads", usize); let verbose = args.is_present("verbose"); let audit = args.is_present("audit"); if let Some(ref migrate_after) = migrate_after { assert!(migrate_after < &runtime); } // setup db println!("Attempting to set up bank"); let mut bank = setup(nthreads); // let system settle // thread::sleep(time::Duration::new(1, 0)); let start = time::Instant::now(); // benchmark let clients = (0..nthreads) .into_iter() .map(|i| { Some({ let mut transfers_put = bank.putter(); let balances_get: Box<Getter> = bank.getter(); let mut transactions = vec![]; if i == 0 { populate(naccounts, &mut transfers_put); } thread::Builder::new() .name(format!("bank{}", i)) .spawn(move || -> Vec<f64> { client(i, transfers_put, balances_get, naccounts, start, runtime, verbose, cdf, audit, &mut transactions) }) .unwrap()
let avg_put_throughput = |th: Vec<f64>| if avg { let sum: f64 = th.iter().sum(); println!("avg PUT: {:.2}", sum / th.len() as f64); }; if let Some(duration) = migrate_after { thread::sleep(duration); println!("----- starting migration -----"); let start = time::Instant::now(); (bank.migrate)(); let duration = start.elapsed(); let length = 1000000000u64 * duration.as_secs() + duration.subsec_nanos() as u64; println!("----- completed migration -----\nElapsed time = {} ms", 1e-6 * (length as f64)); } // clean for c in clients { if let Some(client) = c { match client.join() { Err(e) => panic!(e), Ok(th) => avg_put_throughput(th), } } } }
}) }) .collect::<Vec<_>>();
random_line_split
lib.rs
use byteorder::{NativeEndian, ReadBytesExt}; use fftw::array::AlignedVec; use fftw::plan::*; use fftw::types::*; use num_complex::Complex; use ron::de::from_reader; use serde::Deserialize; use std::f64::consts::PI; use std::fs::File; use std::io::prelude::*; use std::io::BufReader; /// A struct containing the configuration information to run the program, read /// at runtime from a RON file. /// /// # Examples /// /// ``` /// let config = Config { /// grid1_filename: String::from("/path/to/grid1"), /// grid2_filename: String::from("/path/to/grid2"), /// output_filename: String::from("/path/to/output"), /// ngrid: 2048, /// boxsize: 160.0, /// } /// ``` #[derive(Debug, Deserialize)] pub struct Config { pub grid1_filename: String, pub grid2_filename: String, pub output_filename: String, pub ngrid: u32, pub boxsize: f32, } impl Config { /// Reads the configuration file passed as a command line option at runtime. /// /// # Examples /// /// ``` /// let config = Config::new(env::args()).unwrap(); /// ``` pub fn new(mut args: std::env::Args) -> Result<Config, &'static str> { args.next(); // Match command-line argument for configuration filename let config_filename = match args.next() { Some(arg) => arg, None => return Err("Incorrect command-line argument."), }; // Open configuration file println!("\nReading configuration file: {}", config_filename); let f = match File::open(&config_filename) { Ok(file) => file, Err(_) => return Err("Unable to open configuration file."), }; // Decode RON format of configuration file let config: Config = match from_reader(f) { Ok(x) => x, Err(_) => return Err("Unable to read configuration from file."), }; // Print configuration println!("\ngrid1 path: {}", config.grid1_filename); println!("grid1 path: {}", config.grid2_filename); println!("output path: {}", config.output_filename); println!("ngrid: {} cells on a side", config.ngrid); println!("boxsize: {} cMpc/h", config.boxsize); Ok(config) } } /// A struct containing the final output vectors. #[derive(Debug)] pub struct Output { pub w: Vec<f64>, pub pow_spec: Vec<f64>, pub deltasqk: Vec<f64>, pub iweights: Vec<i64>, } impl Output { /// Saves the power spectrum to a formatted txt file. /// /// # Examples /// /// ``` /// output.save_result(&config).unwrap(); /// ``` pub fn save_result(&self, config: &Config) -> Result<(), &'static str> { println!("\nSaving results to: {}", &config.output_filename); // Open output file let mut f = match File::create(&config.output_filename) { Ok(file) => file, Err(_) => return Err("Unable to open output file!"), }; match writeln!(f, "# w pow_spec deltasqk iweights") { Ok(_) => (), Err(err) => { eprintln!("{}", err); return Err("Unable to save output!") }, } let nhalf: usize = (config.ngrid / 2) as usize; for n in 0..nhalf { match writeln!( f, "{} {} {} {}", self.w[n], self.pow_spec[n], self.deltasqk[n], self.iweights[n] ) { Ok(_) => (), Err(err) => { eprintln!("{}", err); return Err("Unable to save output!") }, } } Ok(()) } } /// Loads a grid stored at `filename` (in a custom binary format) into an /// `fftw::array::AlignedVec` object. This custom format stores the 3D grid as /// a 1D array of values. The data should be stored as deviations from the mean, /// i.e. delta = (x - mean(x)) / mean(x). /// /// # Examples /// /// ``` /// let grid1 = load_grid(&config, 1).unwrap(); /// ``` pub fn load_grid(config: &Config, num: usize) -> Result<AlignedVec<c64>, &'static str> { let filename = match num { 1 => &config.grid1_filename, 2 => &config.grid2_filename, _ => return Err("Need to load either grid 1 or 2!"), }; println!("\nOpening grid from file: {}", filename); let ngrid: usize = config.ngrid as usize; // Allocate AlignedVec array to hold grid let ngrid3 = ngrid * ngrid * ngrid; let mut grid = AlignedVec::new(ngrid3); // Open binary file let f = match File::open(filename) { Ok(file) => file, Err(_) => return Err("Unable to open grid file!"), }; let mut buf_reader = BufReader::new(f); // Read in array from binary file for elem in grid.iter_mut() { let cell = match buf_reader.read_f32::<NativeEndian>() { Ok(val) => val, Err(_) => return Err("Problem reading values from file!"), }; *elem = c64::new(f64::from(cell), 0.0); } println!("Successfully read {} cells!", ngrid3); println!("Sanity print:"); grid[0..5].iter() .enumerate() .for_each(|(i, elem)| { println!("grid1[{}] = {:.3e} + {:.3e}i", i, elem.re, elem.im); }); Ok(grid) } /// Performs FFT on grids /// /// # Examples /// /// ``` /// let output: Output = correlate(&config, grid1, grid2).unwrap(); /// ``` pub fn perform_fft( config: &Config, grid1: AlignedVec<c64>, grid2: AlignedVec<c64>, ) -> Result<(AlignedVec<c64>, AlignedVec<c64>), &'static str> { println!("\nPerforming FFTs..."); let ngrid: usize = config.ngrid as usize; // Create FFTW plan let shape = [ngrid, ngrid, ngrid]; let mut plan: C2CPlan64 = match C2CPlan::aligned(&shape[..], Sign::Forward, Flag::Estimate) { Ok(p) => p, Err(_) => return Err("Unable to create FFTW plan."), }; println!("Plan created!"); // Perform FFT on grids let ngrid3 = ngrid * ngrid * ngrid; let out1 = fft_from_plan(ngrid3, grid1, &mut plan)?; println!("First grid FFT complete!"); let out2 = fft_from_plan(ngrid3, grid2, &mut plan)?; println!("Second grid FFT complete!"); // Sanity prints println!("FFTs performed... Sanity check:"); for n in 0..10 { println!("out1[{}] = {:.3e} + {:.3e}i", n, out1[n].re, out1[n].im); println!("out2[{}] = {:.3e} + {:.3e}i", n, out2[n].re, out2[n].im); } Ok((out1, out2)) } /// Use FFTW3 plan to perform FFT fn fft_from_plan( ngrid3: usize, mut grid: AlignedVec<c64>, plan: &mut C2CPlan64, ) -> Result<AlignedVec<c64>, &'static str>
/// Calculates the cross power spectrum of the given 3D grids (note if the same /// grid is given twice then this is the auto power spectrum). /// /// # Examples /// /// ``` /// let output: Output = correlate(&config, grid1, grid2).unwrap(); /// ``` pub fn correlate( config: &Config, out1: AlignedVec<c64>, out2: AlignedVec<c64>, ) -> Result<Output, &'static str> { println!("\nCalculating power spectrum..."); if cfg!(feature = "ngp_correction_single") { println!("Correcting for NGP mass assignment of one field!"); } else if cfg!(feature = "cic_correction_single") { println!("Correcting for CIC mass assignment of one field!"); } else if cfg!(feature = "ngp_correction_both") { println!("Correcting for NGP mass assignment of both fields!"); } else if cfg!(feature = "cic_correction_both") { println!("Correcting for CIC mass assignment of both fields!"); } let ngrid: usize = config.ngrid as usize; let boxsize: f64 = f64::from(config.boxsize); // Calculate power spectrum let kf: f64 = 2.0 * PI / boxsize; let coeff: f64 = (boxsize / (2.0 * PI)).powf(2.0); let nhalf: usize = ngrid / 2; #[cfg(any( feature = "ngp_correction_single", feature = "ngp_correction_both", feature = "cic_correction_single", feature = "cic_correction_both" ))] let kny: f64 = PI * config.ngrid as f64 / boxsize; let mut w: Vec<f64> = Vec::with_capacity(ngrid); for i in 0..=nhalf { w.push(kf * (i as f64)); } for i in (nhalf + 1)..ngrid { w.push(kf * ((i as isize - ngrid as isize) as f64)); } let mut pow_spec: Vec<f64> = vec![0.0; ngrid]; let mut iweights: Vec<i64> = vec![0; ngrid]; for i in 0..ngrid { let iper = if i >= nhalf { ngrid - i } else { i }; for j in 0..ngrid { let jper = if j >= nhalf { ngrid - j } else { j }; for k in 0..ngrid { let kper = if k >= nhalf { ngrid - k } else { k }; let r: f64 = (iper * iper + jper * jper + kper * kper) as f64; let m: usize = (0.5 + r.sqrt()) as usize; iweights[m] += 1; let g = w[i] * w[i] + w[j] * w[j] + w[k] * w[k]; if g!= 0.0 { let scale: usize = (0.5 + (g * coeff).sqrt()) as usize; let index: usize = k + ngrid * (j + ngrid * i); let mut contrib: Complex<f64> = out1[index] * out2[index].conj() + out1[index].conj() * out2[index]; #[cfg(feature = "ngp_correction_single")] { // Correct for Nearest-Grid-Point mass assignment let wngp = sinc(PI * w[i] as f64 / (2.0 * kny)) * sinc(PI * w[j] as f64 / (2.0 * kny)) * sinc(PI * w[k] as f64 / (2.0 * kny)); contrib.re /= wngp; } #[cfg(feature = "cic_correction_single")] { // Correct for Cloud-in-Cell mass assignment let wcic = (sinc(PI * w[i] as f64 / (2.0 * kny)) * sinc(PI * w[j] as f64 / (2.0 * kny)) * sinc(PI * w[k] as f64 / (2.0 * kny))) .powi(2); contrib.re /= wcic; } #[cfg(feature = "ngp_correction_both")] { // Correct for Nearest-Grid-Point mass assignment let wngp = sinc(PI * w[i] as f64 / (2.0 * kny)) * sinc(PI * w[j] as f64 / (2.0 * kny)) * sinc(PI * w[k] as f64 / (2.0 * kny)); contrib.re /= wngp * wngp; } #[cfg(feature = "cic_correction_both")] { // Correct for Cloud-in-Cell mass assignment let wcic = (sinc(PI * w[i] as f64 / (2.0 * kny)) * sinc(PI * w[j] as f64 / (2.0 * kny)) * sinc(PI * w[k] as f64 / (2.0 * kny))) .powi(2); contrib.re /= wcic * wcic; } pow_spec[scale] += contrib.re / 2.0; } } } } println!("Power spectrum calculated. Normalising..."); // Normalise power spectrum let pisq: f64 = 2.0 * PI * PI; let mut deltasqk: Vec<f64> = Vec::with_capacity(nhalf); for i in 0..nhalf { pow_spec[i] *= boxsize.powi(3) / (ngrid as f64).powi(6); pow_spec[i] /= iweights[i] as f64; deltasqk.push(w[i].powf(3.0) * pow_spec[i] / pisq); } // Return final output Ok(Output { w, pow_spec, deltasqk, iweights, }) } #[cfg(any( feature = "ngp_correction_single", feature = "ngp_correction_both", feature = "cic_correction_single", feature = "cic_correction_both" ))] fn sinc(theta: f64) -> f64 { if theta < 1e-20 { 1.0 } else { (theta.sin() / theta) } }
{ let mut out = AlignedVec::new(ngrid3); match plan.c2c(&mut grid, &mut out) { Ok(_) => (), Err(_) => return Err("Failed to FFT grid."), }; Ok(out) }
identifier_body
lib.rs
use byteorder::{NativeEndian, ReadBytesExt}; use fftw::array::AlignedVec; use fftw::plan::*; use fftw::types::*; use num_complex::Complex; use ron::de::from_reader; use serde::Deserialize; use std::f64::consts::PI; use std::fs::File; use std::io::prelude::*; use std::io::BufReader; /// A struct containing the configuration information to run the program, read /// at runtime from a RON file. /// /// # Examples /// /// ``` /// let config = Config { /// grid1_filename: String::from("/path/to/grid1"), /// grid2_filename: String::from("/path/to/grid2"), /// output_filename: String::from("/path/to/output"), /// ngrid: 2048, /// boxsize: 160.0, /// } /// ``` #[derive(Debug, Deserialize)] pub struct Config { pub grid1_filename: String, pub grid2_filename: String, pub output_filename: String, pub ngrid: u32, pub boxsize: f32, } impl Config { /// Reads the configuration file passed as a command line option at runtime. /// /// # Examples /// /// ``` /// let config = Config::new(env::args()).unwrap(); /// ``` pub fn new(mut args: std::env::Args) -> Result<Config, &'static str> { args.next(); // Match command-line argument for configuration filename let config_filename = match args.next() { Some(arg) => arg, None => return Err("Incorrect command-line argument."), }; // Open configuration file println!("\nReading configuration file: {}", config_filename); let f = match File::open(&config_filename) { Ok(file) => file, Err(_) => return Err("Unable to open configuration file."), }; // Decode RON format of configuration file let config: Config = match from_reader(f) { Ok(x) => x, Err(_) => return Err("Unable to read configuration from file."), }; // Print configuration println!("\ngrid1 path: {}", config.grid1_filename); println!("grid1 path: {}", config.grid2_filename); println!("output path: {}", config.output_filename); println!("ngrid: {} cells on a side", config.ngrid); println!("boxsize: {} cMpc/h", config.boxsize); Ok(config) } } /// A struct containing the final output vectors. #[derive(Debug)] pub struct Output { pub w: Vec<f64>, pub pow_spec: Vec<f64>, pub deltasqk: Vec<f64>, pub iweights: Vec<i64>, } impl Output { /// Saves the power spectrum to a formatted txt file. /// /// # Examples /// /// ``` /// output.save_result(&config).unwrap(); /// ``` pub fn save_result(&self, config: &Config) -> Result<(), &'static str> { println!("\nSaving results to: {}", &config.output_filename); // Open output file let mut f = match File::create(&config.output_filename) { Ok(file) => file, Err(_) => return Err("Unable to open output file!"), }; match writeln!(f, "# w pow_spec deltasqk iweights") { Ok(_) => (), Err(err) => { eprintln!("{}", err); return Err("Unable to save output!") }, } let nhalf: usize = (config.ngrid / 2) as usize; for n in 0..nhalf { match writeln!( f, "{} {} {} {}", self.w[n], self.pow_spec[n], self.deltasqk[n], self.iweights[n] ) { Ok(_) => (), Err(err) => { eprintln!("{}", err); return Err("Unable to save output!") }, } } Ok(()) } } /// Loads a grid stored at `filename` (in a custom binary format) into an /// `fftw::array::AlignedVec` object. This custom format stores the 3D grid as /// a 1D array of values. The data should be stored as deviations from the mean, /// i.e. delta = (x - mean(x)) / mean(x). /// /// # Examples /// /// ``` /// let grid1 = load_grid(&config, 1).unwrap(); /// ``` pub fn load_grid(config: &Config, num: usize) -> Result<AlignedVec<c64>, &'static str> { let filename = match num { 1 => &config.grid1_filename, 2 => &config.grid2_filename, _ => return Err("Need to load either grid 1 or 2!"), }; println!("\nOpening grid from file: {}", filename); let ngrid: usize = config.ngrid as usize; // Allocate AlignedVec array to hold grid let ngrid3 = ngrid * ngrid * ngrid; let mut grid = AlignedVec::new(ngrid3); // Open binary file let f = match File::open(filename) { Ok(file) => file, Err(_) => return Err("Unable to open grid file!"), }; let mut buf_reader = BufReader::new(f); // Read in array from binary file for elem in grid.iter_mut() { let cell = match buf_reader.read_f32::<NativeEndian>() { Ok(val) => val, Err(_) => return Err("Problem reading values from file!"), }; *elem = c64::new(f64::from(cell), 0.0); } println!("Successfully read {} cells!", ngrid3); println!("Sanity print:"); grid[0..5].iter() .enumerate() .for_each(|(i, elem)| { println!("grid1[{}] = {:.3e} + {:.3e}i", i, elem.re, elem.im);
} /// Performs FFT on grids /// /// # Examples /// /// ``` /// let output: Output = correlate(&config, grid1, grid2).unwrap(); /// ``` pub fn perform_fft( config: &Config, grid1: AlignedVec<c64>, grid2: AlignedVec<c64>, ) -> Result<(AlignedVec<c64>, AlignedVec<c64>), &'static str> { println!("\nPerforming FFTs..."); let ngrid: usize = config.ngrid as usize; // Create FFTW plan let shape = [ngrid, ngrid, ngrid]; let mut plan: C2CPlan64 = match C2CPlan::aligned(&shape[..], Sign::Forward, Flag::Estimate) { Ok(p) => p, Err(_) => return Err("Unable to create FFTW plan."), }; println!("Plan created!"); // Perform FFT on grids let ngrid3 = ngrid * ngrid * ngrid; let out1 = fft_from_plan(ngrid3, grid1, &mut plan)?; println!("First grid FFT complete!"); let out2 = fft_from_plan(ngrid3, grid2, &mut plan)?; println!("Second grid FFT complete!"); // Sanity prints println!("FFTs performed... Sanity check:"); for n in 0..10 { println!("out1[{}] = {:.3e} + {:.3e}i", n, out1[n].re, out1[n].im); println!("out2[{}] = {:.3e} + {:.3e}i", n, out2[n].re, out2[n].im); } Ok((out1, out2)) } /// Use FFTW3 plan to perform FFT fn fft_from_plan( ngrid3: usize, mut grid: AlignedVec<c64>, plan: &mut C2CPlan64, ) -> Result<AlignedVec<c64>, &'static str> { let mut out = AlignedVec::new(ngrid3); match plan.c2c(&mut grid, &mut out) { Ok(_) => (), Err(_) => return Err("Failed to FFT grid."), }; Ok(out) } /// Calculates the cross power spectrum of the given 3D grids (note if the same /// grid is given twice then this is the auto power spectrum). /// /// # Examples /// /// ``` /// let output: Output = correlate(&config, grid1, grid2).unwrap(); /// ``` pub fn correlate( config: &Config, out1: AlignedVec<c64>, out2: AlignedVec<c64>, ) -> Result<Output, &'static str> { println!("\nCalculating power spectrum..."); if cfg!(feature = "ngp_correction_single") { println!("Correcting for NGP mass assignment of one field!"); } else if cfg!(feature = "cic_correction_single") { println!("Correcting for CIC mass assignment of one field!"); } else if cfg!(feature = "ngp_correction_both") { println!("Correcting for NGP mass assignment of both fields!"); } else if cfg!(feature = "cic_correction_both") { println!("Correcting for CIC mass assignment of both fields!"); } let ngrid: usize = config.ngrid as usize; let boxsize: f64 = f64::from(config.boxsize); // Calculate power spectrum let kf: f64 = 2.0 * PI / boxsize; let coeff: f64 = (boxsize / (2.0 * PI)).powf(2.0); let nhalf: usize = ngrid / 2; #[cfg(any( feature = "ngp_correction_single", feature = "ngp_correction_both", feature = "cic_correction_single", feature = "cic_correction_both" ))] let kny: f64 = PI * config.ngrid as f64 / boxsize; let mut w: Vec<f64> = Vec::with_capacity(ngrid); for i in 0..=nhalf { w.push(kf * (i as f64)); } for i in (nhalf + 1)..ngrid { w.push(kf * ((i as isize - ngrid as isize) as f64)); } let mut pow_spec: Vec<f64> = vec![0.0; ngrid]; let mut iweights: Vec<i64> = vec![0; ngrid]; for i in 0..ngrid { let iper = if i >= nhalf { ngrid - i } else { i }; for j in 0..ngrid { let jper = if j >= nhalf { ngrid - j } else { j }; for k in 0..ngrid { let kper = if k >= nhalf { ngrid - k } else { k }; let r: f64 = (iper * iper + jper * jper + kper * kper) as f64; let m: usize = (0.5 + r.sqrt()) as usize; iweights[m] += 1; let g = w[i] * w[i] + w[j] * w[j] + w[k] * w[k]; if g!= 0.0 { let scale: usize = (0.5 + (g * coeff).sqrt()) as usize; let index: usize = k + ngrid * (j + ngrid * i); let mut contrib: Complex<f64> = out1[index] * out2[index].conj() + out1[index].conj() * out2[index]; #[cfg(feature = "ngp_correction_single")] { // Correct for Nearest-Grid-Point mass assignment let wngp = sinc(PI * w[i] as f64 / (2.0 * kny)) * sinc(PI * w[j] as f64 / (2.0 * kny)) * sinc(PI * w[k] as f64 / (2.0 * kny)); contrib.re /= wngp; } #[cfg(feature = "cic_correction_single")] { // Correct for Cloud-in-Cell mass assignment let wcic = (sinc(PI * w[i] as f64 / (2.0 * kny)) * sinc(PI * w[j] as f64 / (2.0 * kny)) * sinc(PI * w[k] as f64 / (2.0 * kny))) .powi(2); contrib.re /= wcic; } #[cfg(feature = "ngp_correction_both")] { // Correct for Nearest-Grid-Point mass assignment let wngp = sinc(PI * w[i] as f64 / (2.0 * kny)) * sinc(PI * w[j] as f64 / (2.0 * kny)) * sinc(PI * w[k] as f64 / (2.0 * kny)); contrib.re /= wngp * wngp; } #[cfg(feature = "cic_correction_both")] { // Correct for Cloud-in-Cell mass assignment let wcic = (sinc(PI * w[i] as f64 / (2.0 * kny)) * sinc(PI * w[j] as f64 / (2.0 * kny)) * sinc(PI * w[k] as f64 / (2.0 * kny))) .powi(2); contrib.re /= wcic * wcic; } pow_spec[scale] += contrib.re / 2.0; } } } } println!("Power spectrum calculated. Normalising..."); // Normalise power spectrum let pisq: f64 = 2.0 * PI * PI; let mut deltasqk: Vec<f64> = Vec::with_capacity(nhalf); for i in 0..nhalf { pow_spec[i] *= boxsize.powi(3) / (ngrid as f64).powi(6); pow_spec[i] /= iweights[i] as f64; deltasqk.push(w[i].powf(3.0) * pow_spec[i] / pisq); } // Return final output Ok(Output { w, pow_spec, deltasqk, iweights, }) } #[cfg(any( feature = "ngp_correction_single", feature = "ngp_correction_both", feature = "cic_correction_single", feature = "cic_correction_both" ))] fn sinc(theta: f64) -> f64 { if theta < 1e-20 { 1.0 } else { (theta.sin() / theta) } }
}); Ok(grid)
random_line_split
lib.rs
use byteorder::{NativeEndian, ReadBytesExt}; use fftw::array::AlignedVec; use fftw::plan::*; use fftw::types::*; use num_complex::Complex; use ron::de::from_reader; use serde::Deserialize; use std::f64::consts::PI; use std::fs::File; use std::io::prelude::*; use std::io::BufReader; /// A struct containing the configuration information to run the program, read /// at runtime from a RON file. /// /// # Examples /// /// ``` /// let config = Config { /// grid1_filename: String::from("/path/to/grid1"), /// grid2_filename: String::from("/path/to/grid2"), /// output_filename: String::from("/path/to/output"), /// ngrid: 2048, /// boxsize: 160.0, /// } /// ``` #[derive(Debug, Deserialize)] pub struct Config { pub grid1_filename: String, pub grid2_filename: String, pub output_filename: String, pub ngrid: u32, pub boxsize: f32, } impl Config { /// Reads the configuration file passed as a command line option at runtime. /// /// # Examples /// /// ``` /// let config = Config::new(env::args()).unwrap(); /// ``` pub fn new(mut args: std::env::Args) -> Result<Config, &'static str> { args.next(); // Match command-line argument for configuration filename let config_filename = match args.next() { Some(arg) => arg, None => return Err("Incorrect command-line argument."), }; // Open configuration file println!("\nReading configuration file: {}", config_filename); let f = match File::open(&config_filename) { Ok(file) => file, Err(_) => return Err("Unable to open configuration file."), }; // Decode RON format of configuration file let config: Config = match from_reader(f) { Ok(x) => x, Err(_) => return Err("Unable to read configuration from file."), }; // Print configuration println!("\ngrid1 path: {}", config.grid1_filename); println!("grid1 path: {}", config.grid2_filename); println!("output path: {}", config.output_filename); println!("ngrid: {} cells on a side", config.ngrid); println!("boxsize: {} cMpc/h", config.boxsize); Ok(config) } } /// A struct containing the final output vectors. #[derive(Debug)] pub struct Output { pub w: Vec<f64>, pub pow_spec: Vec<f64>, pub deltasqk: Vec<f64>, pub iweights: Vec<i64>, } impl Output { /// Saves the power spectrum to a formatted txt file. /// /// # Examples /// /// ``` /// output.save_result(&config).unwrap(); /// ``` pub fn save_result(&self, config: &Config) -> Result<(), &'static str> { println!("\nSaving results to: {}", &config.output_filename); // Open output file let mut f = match File::create(&config.output_filename) { Ok(file) => file, Err(_) => return Err("Unable to open output file!"), }; match writeln!(f, "# w pow_spec deltasqk iweights") { Ok(_) => (), Err(err) => { eprintln!("{}", err); return Err("Unable to save output!") }, } let nhalf: usize = (config.ngrid / 2) as usize; for n in 0..nhalf { match writeln!( f, "{} {} {} {}", self.w[n], self.pow_spec[n], self.deltasqk[n], self.iweights[n] ) { Ok(_) => (), Err(err) => { eprintln!("{}", err); return Err("Unable to save output!") }, } } Ok(()) } } /// Loads a grid stored at `filename` (in a custom binary format) into an /// `fftw::array::AlignedVec` object. This custom format stores the 3D grid as /// a 1D array of values. The data should be stored as deviations from the mean, /// i.e. delta = (x - mean(x)) / mean(x). /// /// # Examples /// /// ``` /// let grid1 = load_grid(&config, 1).unwrap(); /// ``` pub fn
(config: &Config, num: usize) -> Result<AlignedVec<c64>, &'static str> { let filename = match num { 1 => &config.grid1_filename, 2 => &config.grid2_filename, _ => return Err("Need to load either grid 1 or 2!"), }; println!("\nOpening grid from file: {}", filename); let ngrid: usize = config.ngrid as usize; // Allocate AlignedVec array to hold grid let ngrid3 = ngrid * ngrid * ngrid; let mut grid = AlignedVec::new(ngrid3); // Open binary file let f = match File::open(filename) { Ok(file) => file, Err(_) => return Err("Unable to open grid file!"), }; let mut buf_reader = BufReader::new(f); // Read in array from binary file for elem in grid.iter_mut() { let cell = match buf_reader.read_f32::<NativeEndian>() { Ok(val) => val, Err(_) => return Err("Problem reading values from file!"), }; *elem = c64::new(f64::from(cell), 0.0); } println!("Successfully read {} cells!", ngrid3); println!("Sanity print:"); grid[0..5].iter() .enumerate() .for_each(|(i, elem)| { println!("grid1[{}] = {:.3e} + {:.3e}i", i, elem.re, elem.im); }); Ok(grid) } /// Performs FFT on grids /// /// # Examples /// /// ``` /// let output: Output = correlate(&config, grid1, grid2).unwrap(); /// ``` pub fn perform_fft( config: &Config, grid1: AlignedVec<c64>, grid2: AlignedVec<c64>, ) -> Result<(AlignedVec<c64>, AlignedVec<c64>), &'static str> { println!("\nPerforming FFTs..."); let ngrid: usize = config.ngrid as usize; // Create FFTW plan let shape = [ngrid, ngrid, ngrid]; let mut plan: C2CPlan64 = match C2CPlan::aligned(&shape[..], Sign::Forward, Flag::Estimate) { Ok(p) => p, Err(_) => return Err("Unable to create FFTW plan."), }; println!("Plan created!"); // Perform FFT on grids let ngrid3 = ngrid * ngrid * ngrid; let out1 = fft_from_plan(ngrid3, grid1, &mut plan)?; println!("First grid FFT complete!"); let out2 = fft_from_plan(ngrid3, grid2, &mut plan)?; println!("Second grid FFT complete!"); // Sanity prints println!("FFTs performed... Sanity check:"); for n in 0..10 { println!("out1[{}] = {:.3e} + {:.3e}i", n, out1[n].re, out1[n].im); println!("out2[{}] = {:.3e} + {:.3e}i", n, out2[n].re, out2[n].im); } Ok((out1, out2)) } /// Use FFTW3 plan to perform FFT fn fft_from_plan( ngrid3: usize, mut grid: AlignedVec<c64>, plan: &mut C2CPlan64, ) -> Result<AlignedVec<c64>, &'static str> { let mut out = AlignedVec::new(ngrid3); match plan.c2c(&mut grid, &mut out) { Ok(_) => (), Err(_) => return Err("Failed to FFT grid."), }; Ok(out) } /// Calculates the cross power spectrum of the given 3D grids (note if the same /// grid is given twice then this is the auto power spectrum). /// /// # Examples /// /// ``` /// let output: Output = correlate(&config, grid1, grid2).unwrap(); /// ``` pub fn correlate( config: &Config, out1: AlignedVec<c64>, out2: AlignedVec<c64>, ) -> Result<Output, &'static str> { println!("\nCalculating power spectrum..."); if cfg!(feature = "ngp_correction_single") { println!("Correcting for NGP mass assignment of one field!"); } else if cfg!(feature = "cic_correction_single") { println!("Correcting for CIC mass assignment of one field!"); } else if cfg!(feature = "ngp_correction_both") { println!("Correcting for NGP mass assignment of both fields!"); } else if cfg!(feature = "cic_correction_both") { println!("Correcting for CIC mass assignment of both fields!"); } let ngrid: usize = config.ngrid as usize; let boxsize: f64 = f64::from(config.boxsize); // Calculate power spectrum let kf: f64 = 2.0 * PI / boxsize; let coeff: f64 = (boxsize / (2.0 * PI)).powf(2.0); let nhalf: usize = ngrid / 2; #[cfg(any( feature = "ngp_correction_single", feature = "ngp_correction_both", feature = "cic_correction_single", feature = "cic_correction_both" ))] let kny: f64 = PI * config.ngrid as f64 / boxsize; let mut w: Vec<f64> = Vec::with_capacity(ngrid); for i in 0..=nhalf { w.push(kf * (i as f64)); } for i in (nhalf + 1)..ngrid { w.push(kf * ((i as isize - ngrid as isize) as f64)); } let mut pow_spec: Vec<f64> = vec![0.0; ngrid]; let mut iweights: Vec<i64> = vec![0; ngrid]; for i in 0..ngrid { let iper = if i >= nhalf { ngrid - i } else { i }; for j in 0..ngrid { let jper = if j >= nhalf { ngrid - j } else { j }; for k in 0..ngrid { let kper = if k >= nhalf { ngrid - k } else { k }; let r: f64 = (iper * iper + jper * jper + kper * kper) as f64; let m: usize = (0.5 + r.sqrt()) as usize; iweights[m] += 1; let g = w[i] * w[i] + w[j] * w[j] + w[k] * w[k]; if g!= 0.0 { let scale: usize = (0.5 + (g * coeff).sqrt()) as usize; let index: usize = k + ngrid * (j + ngrid * i); let mut contrib: Complex<f64> = out1[index] * out2[index].conj() + out1[index].conj() * out2[index]; #[cfg(feature = "ngp_correction_single")] { // Correct for Nearest-Grid-Point mass assignment let wngp = sinc(PI * w[i] as f64 / (2.0 * kny)) * sinc(PI * w[j] as f64 / (2.0 * kny)) * sinc(PI * w[k] as f64 / (2.0 * kny)); contrib.re /= wngp; } #[cfg(feature = "cic_correction_single")] { // Correct for Cloud-in-Cell mass assignment let wcic = (sinc(PI * w[i] as f64 / (2.0 * kny)) * sinc(PI * w[j] as f64 / (2.0 * kny)) * sinc(PI * w[k] as f64 / (2.0 * kny))) .powi(2); contrib.re /= wcic; } #[cfg(feature = "ngp_correction_both")] { // Correct for Nearest-Grid-Point mass assignment let wngp = sinc(PI * w[i] as f64 / (2.0 * kny)) * sinc(PI * w[j] as f64 / (2.0 * kny)) * sinc(PI * w[k] as f64 / (2.0 * kny)); contrib.re /= wngp * wngp; } #[cfg(feature = "cic_correction_both")] { // Correct for Cloud-in-Cell mass assignment let wcic = (sinc(PI * w[i] as f64 / (2.0 * kny)) * sinc(PI * w[j] as f64 / (2.0 * kny)) * sinc(PI * w[k] as f64 / (2.0 * kny))) .powi(2); contrib.re /= wcic * wcic; } pow_spec[scale] += contrib.re / 2.0; } } } } println!("Power spectrum calculated. Normalising..."); // Normalise power spectrum let pisq: f64 = 2.0 * PI * PI; let mut deltasqk: Vec<f64> = Vec::with_capacity(nhalf); for i in 0..nhalf { pow_spec[i] *= boxsize.powi(3) / (ngrid as f64).powi(6); pow_spec[i] /= iweights[i] as f64; deltasqk.push(w[i].powf(3.0) * pow_spec[i] / pisq); } // Return final output Ok(Output { w, pow_spec, deltasqk, iweights, }) } #[cfg(any( feature = "ngp_correction_single", feature = "ngp_correction_both", feature = "cic_correction_single", feature = "cic_correction_both" ))] fn sinc(theta: f64) -> f64 { if theta < 1e-20 { 1.0 } else { (theta.sin() / theta) } }
load_grid
identifier_name
jenkins-mod-main.rs
#[macro_use] extern crate error_chain; extern crate hyper; extern crate log4rs; #[macro_use] extern crate log; #[macro_use] extern crate serde_derive; extern crate serde_json; extern crate structopt; #[macro_use] extern crate structopt_derive; extern crate toml; use hyper::client::{Client, RedirectPolicy}; use serde_json::{Map, Value}; use std::fs::{self, File}; use std::io::{self, Read, Write}; use std::path::{Path, PathBuf}; use std::process; use structopt::StructOpt; mod errors { error_chain! { errors { } } } use errors::*; #[derive(Serialize, Deserialize, Debug)] struct FileConfig { update_center_url: String, suppress_front: String, suppress_back: String, connection_check_url_change: String, url_replace_from: String, url_replace_into: String, auto_create_output_dir: bool, modified_json_file_path: PathBuf, url_list_json_file_path: PathBuf, } #[derive(StructOpt, Debug)] #[structopt(name = "Test", about = "Test program")] struct ArgConfig { #[structopt(short = "c", long = "config", help = "File configuration path")] config_path: String, #[structopt(short = "l", long = "log-config", help = "Log configuration file path")] log_config_path: String, } // const key names const CONNECTION_CHECK_URL_KEY: &str = "connectionCheckUrl"; const CORE_KEY: &str = "core"; const PLUGINS_KEY: &str = "plugins"; const URL_KEY: &str = "url"; type MapStrVal = Map<String, Value>; fn change_connection_check_url<S: Into<String>>( resp_outer_map: &mut MapStrVal, connection_check_url_change: S, ) -> Result<()> { let connection_check_url = match resp_outer_map.get_mut(CONNECTION_CHECK_URL_KEY) { Some(connection_check_url) => connection_check_url, None => bail!(format!( "Unable to find '{}' for changing connection URL", CONNECTION_CHECK_URL_KEY )), }; let connection_check_url = match connection_check_url { &mut Value::String(ref mut connection_check_url) => connection_check_url, c => bail!(format!( "Expected '{}' to contain string value, but found content: {:?}", CONNECTION_CHECK_URL_KEY, c )), }; *connection_check_url = connection_check_url_change.into(); Ok(()) } fn replace_url_impl( url_outer: &mut Value, outer_key: &str, url_replace_from: &str, url_replace_into: &str, ) -> Result<String> { let url_outer_map = match url_outer { &mut Value::Object(ref mut url_outer_map) => url_outer_map, c => bail!(format!( "Expected '{}' to be an object, but found content: {:?}", outer_key, c )), }; let url = match url_outer_map.get_mut(URL_KEY) { Some(url) => url, None => bail!(format!( "Expected '{}' to be present for '{}'", URL_KEY, CORE_KEY )), }; let url_str = match url { &mut Value::String(ref mut url_str) => url_str, c => bail!(format!( "Expected '{}' to contain string value, but found content: {:?}", URL_KEY, c )), }; let orig_url = url_str.to_owned(); *url_str = url_str.replace(url_replace_from, url_replace_into); Ok(orig_url) } fn replace_core_url( resp_outer_map: &mut MapStrVal, url_replace_from: &str, url_replace_into: &str, ) -> Result<String> { let mut core = match resp_outer_map.get_mut(CORE_KEY) { Some(core) => core, None => bail!(format!( "Unable to find '{}' for core URL replacement", CORE_KEY )), }; replace_url_impl(&mut core, CORE_KEY, url_replace_from, url_replace_into) } fn replace_plugin_urls( resp_outer_map: &mut MapStrVal, url_replace_from: &str, url_replace_into: &str, ) -> Result<Vec<String>> { let plugins = match resp_outer_map.get_mut(PLUGINS_KEY) { Some(plugins) => plugins, None => bail!(format!( "Unable to find '{}' for core URL replacement", CORE_KEY )), }; let plugins_obj = match plugins { &mut Value::Object(ref mut plugins_obj) => plugins_obj, c => bail!(format!( "Expected '{}' to be of object type, but found content: {:?}", PLUGINS_KEY, c )), }; let mut orig_urls = Vec::new(); for (key, mut plugin) in plugins_obj.iter_mut() { let orig_url = replace_url_impl(plugin, key, url_replace_from, url_replace_into)?; orig_urls.push(orig_url); } Ok(orig_urls) } fn run() -> Result<()> { let arg_config = ArgConfig::from_args(); log4rs::init_file(&arg_config.log_config_path, Default::default()).chain_err(|| { format!( "Unable to initialize log4rs logger with the given config file at '{}'", arg_config.log_config_path ) })?; let config_str = { let mut config_file = File::open(&arg_config.config_path).chain_err(|| { format!( "Unable to open config file path at {:?}", arg_config.config_path ) })?; let mut s = String::new(); config_file .read_to_string(&mut s) .map(|_| s) .chain_err(|| "Unable to read config file into string")? }; let config: FileConfig = toml::from_str(&config_str).chain_err(|| { format!( "Unable to parse config as required toml format: {}", config_str )
// write the body here let mut client = Client::new(); client.set_redirect_policy(RedirectPolicy::FollowAll); let mut resp = client.get(&config.update_center_url).send().chain_err(|| { format!( "Unable to perform HTTP request with URL string '{}'", config.update_center_url ) })?; let mut resp_str = String::new(); resp.read_to_string(&mut resp_str) .chain_err(|| "Unable to read HTTP response into string")?; let resp_str = resp_str; let trimmed_resp_str = resp_str .trim_left_matches(&config.suppress_front) .trim_right_matches(&config.suppress_back); // JSON parsing all the way let mut resp_json: Value = serde_json::from_str(trimmed_resp_str) .chain_err(|| "Unable to parse trimmed JSON string into JSON value.")?; // to stop borrowing early let (core_orig_url, mut plugin_urls) = { let mut resp_outer_map = match resp_json { Value::Object(ref mut resp_outer_map) => resp_outer_map, c => bail!(format!( "Expected outer most JSON to be of Object type, but found content: {:?}", c )), }; change_connection_check_url( &mut resp_outer_map, config.connection_check_url_change.to_owned(), )?; let core_orig_url = replace_core_url( &mut resp_outer_map, &config.url_replace_from, &config.url_replace_into, )?; let plugin_urls = replace_plugin_urls( &mut resp_outer_map, &config.url_replace_from, &config.url_replace_into, )?; (core_orig_url, plugin_urls) }; // combine both the core + plugin links let mut urls = vec![core_orig_url]; urls.append(&mut plugin_urls); let urls = urls; // write the modified JSON file if config.auto_create_output_dir { let create_parent_dir_if_present = |dir_opt: Option<&Path>| { let dir_opt = dir_opt.and_then(|dir| { // ignore if the directory has already been created if Path::new(dir).is_dir() { None } else { Some(dir) } }); match dir_opt { Some(dir) => { info!("Creating directory chain: {:?}", dir); fs::create_dir_all(dir) .chain_err(|| format!("Unable to create directory chain: {:?}", dir)) } None => Ok(()), } }; create_parent_dir_if_present(config.modified_json_file_path.parent())?; create_parent_dir_if_present(config.url_list_json_file_path.parent())?; } let mut json_file = File::create(&config.modified_json_file_path) .chain_err(|| "Unable to open modified update-center file for writing")?; let serialized_json = serde_json::to_string(&resp_json) .chain_err(|| "Unable to convert modified JSON back into string for serialization")?; // need to append back the trimmed left and right sides json_file .write_fmt(format_args!( "{}{}{}", config.suppress_front, serialized_json, config.suppress_back )) .chain_err(|| "Unable to write modified serialized JSON to file")?; let mut urls_file = File::create(&config.url_list_json_file_path) .chain_err(|| "Unable to open file for writing URLs")?; let urls_json = serde_json::to_string_pretty(&urls) .chain_err(|| "Unable to convert list of URLs into pretty JSON form")?; urls_file .write_fmt(format_args!("{}", urls_json)) .chain_err(|| "Unable to write URLs in JSON form into file")?; Ok(()) } fn main() { match run() { Ok(_) => { println!("Program completed!"); process::exit(0) } Err(ref e) => { let stderr = &mut io::stderr(); writeln!(stderr, "Error: {}", e).expect("Unable to write error into stderr!"); for e in e.iter().skip(1) { writeln!(stderr, "- Caused by: {}", e) .expect("Unable to write error causes into stderr!"); } process::exit(1); } } }
})?; info!("Completed configuration initialization!");
random_line_split
jenkins-mod-main.rs
#[macro_use] extern crate error_chain; extern crate hyper; extern crate log4rs; #[macro_use] extern crate log; #[macro_use] extern crate serde_derive; extern crate serde_json; extern crate structopt; #[macro_use] extern crate structopt_derive; extern crate toml; use hyper::client::{Client, RedirectPolicy}; use serde_json::{Map, Value}; use std::fs::{self, File}; use std::io::{self, Read, Write}; use std::path::{Path, PathBuf}; use std::process; use structopt::StructOpt; mod errors { error_chain! { errors { } } } use errors::*; #[derive(Serialize, Deserialize, Debug)] struct FileConfig { update_center_url: String, suppress_front: String, suppress_back: String, connection_check_url_change: String, url_replace_from: String, url_replace_into: String, auto_create_output_dir: bool, modified_json_file_path: PathBuf, url_list_json_file_path: PathBuf, } #[derive(StructOpt, Debug)] #[structopt(name = "Test", about = "Test program")] struct ArgConfig { #[structopt(short = "c", long = "config", help = "File configuration path")] config_path: String, #[structopt(short = "l", long = "log-config", help = "Log configuration file path")] log_config_path: String, } // const key names const CONNECTION_CHECK_URL_KEY: &str = "connectionCheckUrl"; const CORE_KEY: &str = "core"; const PLUGINS_KEY: &str = "plugins"; const URL_KEY: &str = "url"; type MapStrVal = Map<String, Value>; fn change_connection_check_url<S: Into<String>>( resp_outer_map: &mut MapStrVal, connection_check_url_change: S, ) -> Result<()> { let connection_check_url = match resp_outer_map.get_mut(CONNECTION_CHECK_URL_KEY) { Some(connection_check_url) => connection_check_url, None => bail!(format!( "Unable to find '{}' for changing connection URL", CONNECTION_CHECK_URL_KEY )), }; let connection_check_url = match connection_check_url { &mut Value::String(ref mut connection_check_url) => connection_check_url, c => bail!(format!( "Expected '{}' to contain string value, but found content: {:?}", CONNECTION_CHECK_URL_KEY, c )), }; *connection_check_url = connection_check_url_change.into(); Ok(()) } fn replace_url_impl( url_outer: &mut Value, outer_key: &str, url_replace_from: &str, url_replace_into: &str, ) -> Result<String> { let url_outer_map = match url_outer { &mut Value::Object(ref mut url_outer_map) => url_outer_map, c => bail!(format!( "Expected '{}' to be an object, but found content: {:?}", outer_key, c )), }; let url = match url_outer_map.get_mut(URL_KEY) { Some(url) => url, None => bail!(format!( "Expected '{}' to be present for '{}'", URL_KEY, CORE_KEY )), }; let url_str = match url { &mut Value::String(ref mut url_str) => url_str, c => bail!(format!( "Expected '{}' to contain string value, but found content: {:?}", URL_KEY, c )), }; let orig_url = url_str.to_owned(); *url_str = url_str.replace(url_replace_from, url_replace_into); Ok(orig_url) } fn replace_core_url( resp_outer_map: &mut MapStrVal, url_replace_from: &str, url_replace_into: &str, ) -> Result<String> { let mut core = match resp_outer_map.get_mut(CORE_KEY) { Some(core) => core, None => bail!(format!( "Unable to find '{}' for core URL replacement", CORE_KEY )), }; replace_url_impl(&mut core, CORE_KEY, url_replace_from, url_replace_into) } fn replace_plugin_urls( resp_outer_map: &mut MapStrVal, url_replace_from: &str, url_replace_into: &str, ) -> Result<Vec<String>> { let plugins = match resp_outer_map.get_mut(PLUGINS_KEY) { Some(plugins) => plugins, None => bail!(format!( "Unable to find '{}' for core URL replacement", CORE_KEY )), }; let plugins_obj = match plugins { &mut Value::Object(ref mut plugins_obj) => plugins_obj, c => bail!(format!( "Expected '{}' to be of object type, but found content: {:?}", PLUGINS_KEY, c )), }; let mut orig_urls = Vec::new(); for (key, mut plugin) in plugins_obj.iter_mut() { let orig_url = replace_url_impl(plugin, key, url_replace_from, url_replace_into)?; orig_urls.push(orig_url); } Ok(orig_urls) } fn run() -> Result<()> { let arg_config = ArgConfig::from_args(); log4rs::init_file(&arg_config.log_config_path, Default::default()).chain_err(|| { format!( "Unable to initialize log4rs logger with the given config file at '{}'", arg_config.log_config_path ) })?; let config_str = { let mut config_file = File::open(&arg_config.config_path).chain_err(|| { format!( "Unable to open config file path at {:?}", arg_config.config_path ) })?; let mut s = String::new(); config_file .read_to_string(&mut s) .map(|_| s) .chain_err(|| "Unable to read config file into string")? }; let config: FileConfig = toml::from_str(&config_str).chain_err(|| { format!( "Unable to parse config as required toml format: {}", config_str ) })?; info!("Completed configuration initialization!"); // write the body here let mut client = Client::new(); client.set_redirect_policy(RedirectPolicy::FollowAll); let mut resp = client.get(&config.update_center_url).send().chain_err(|| { format!( "Unable to perform HTTP request with URL string '{}'", config.update_center_url ) })?; let mut resp_str = String::new(); resp.read_to_string(&mut resp_str) .chain_err(|| "Unable to read HTTP response into string")?; let resp_str = resp_str; let trimmed_resp_str = resp_str .trim_left_matches(&config.suppress_front) .trim_right_matches(&config.suppress_back); // JSON parsing all the way let mut resp_json: Value = serde_json::from_str(trimmed_resp_str) .chain_err(|| "Unable to parse trimmed JSON string into JSON value.")?; // to stop borrowing early let (core_orig_url, mut plugin_urls) = { let mut resp_outer_map = match resp_json { Value::Object(ref mut resp_outer_map) => resp_outer_map, c => bail!(format!( "Expected outer most JSON to be of Object type, but found content: {:?}", c )), }; change_connection_check_url( &mut resp_outer_map, config.connection_check_url_change.to_owned(), )?; let core_orig_url = replace_core_url( &mut resp_outer_map, &config.url_replace_from, &config.url_replace_into, )?; let plugin_urls = replace_plugin_urls( &mut resp_outer_map, &config.url_replace_from, &config.url_replace_into, )?; (core_orig_url, plugin_urls) }; // combine both the core + plugin links let mut urls = vec![core_orig_url]; urls.append(&mut plugin_urls); let urls = urls; // write the modified JSON file if config.auto_create_output_dir { let create_parent_dir_if_present = |dir_opt: Option<&Path>| { let dir_opt = dir_opt.and_then(|dir| { // ignore if the directory has already been created if Path::new(dir).is_dir() { None } else { Some(dir) } }); match dir_opt { Some(dir) =>
None => Ok(()), } }; create_parent_dir_if_present(config.modified_json_file_path.parent())?; create_parent_dir_if_present(config.url_list_json_file_path.parent())?; } let mut json_file = File::create(&config.modified_json_file_path) .chain_err(|| "Unable to open modified update-center file for writing")?; let serialized_json = serde_json::to_string(&resp_json) .chain_err(|| "Unable to convert modified JSON back into string for serialization")?; // need to append back the trimmed left and right sides json_file .write_fmt(format_args!( "{}{}{}", config.suppress_front, serialized_json, config.suppress_back )) .chain_err(|| "Unable to write modified serialized JSON to file")?; let mut urls_file = File::create(&config.url_list_json_file_path) .chain_err(|| "Unable to open file for writing URLs")?; let urls_json = serde_json::to_string_pretty(&urls) .chain_err(|| "Unable to convert list of URLs into pretty JSON form")?; urls_file .write_fmt(format_args!("{}", urls_json)) .chain_err(|| "Unable to write URLs in JSON form into file")?; Ok(()) } fn main() { match run() { Ok(_) => { println!("Program completed!"); process::exit(0) } Err(ref e) => { let stderr = &mut io::stderr(); writeln!(stderr, "Error: {}", e).expect("Unable to write error into stderr!"); for e in e.iter().skip(1) { writeln!(stderr, "- Caused by: {}", e) .expect("Unable to write error causes into stderr!"); } process::exit(1); } } }
{ info!("Creating directory chain: {:?}", dir); fs::create_dir_all(dir) .chain_err(|| format!("Unable to create directory chain: {:?}", dir)) }
conditional_block
jenkins-mod-main.rs
#[macro_use] extern crate error_chain; extern crate hyper; extern crate log4rs; #[macro_use] extern crate log; #[macro_use] extern crate serde_derive; extern crate serde_json; extern crate structopt; #[macro_use] extern crate structopt_derive; extern crate toml; use hyper::client::{Client, RedirectPolicy}; use serde_json::{Map, Value}; use std::fs::{self, File}; use std::io::{self, Read, Write}; use std::path::{Path, PathBuf}; use std::process; use structopt::StructOpt; mod errors { error_chain! { errors { } } } use errors::*; #[derive(Serialize, Deserialize, Debug)] struct FileConfig { update_center_url: String, suppress_front: String, suppress_back: String, connection_check_url_change: String, url_replace_from: String, url_replace_into: String, auto_create_output_dir: bool, modified_json_file_path: PathBuf, url_list_json_file_path: PathBuf, } #[derive(StructOpt, Debug)] #[structopt(name = "Test", about = "Test program")] struct ArgConfig { #[structopt(short = "c", long = "config", help = "File configuration path")] config_path: String, #[structopt(short = "l", long = "log-config", help = "Log configuration file path")] log_config_path: String, } // const key names const CONNECTION_CHECK_URL_KEY: &str = "connectionCheckUrl"; const CORE_KEY: &str = "core"; const PLUGINS_KEY: &str = "plugins"; const URL_KEY: &str = "url"; type MapStrVal = Map<String, Value>; fn
<S: Into<String>>( resp_outer_map: &mut MapStrVal, connection_check_url_change: S, ) -> Result<()> { let connection_check_url = match resp_outer_map.get_mut(CONNECTION_CHECK_URL_KEY) { Some(connection_check_url) => connection_check_url, None => bail!(format!( "Unable to find '{}' for changing connection URL", CONNECTION_CHECK_URL_KEY )), }; let connection_check_url = match connection_check_url { &mut Value::String(ref mut connection_check_url) => connection_check_url, c => bail!(format!( "Expected '{}' to contain string value, but found content: {:?}", CONNECTION_CHECK_URL_KEY, c )), }; *connection_check_url = connection_check_url_change.into(); Ok(()) } fn replace_url_impl( url_outer: &mut Value, outer_key: &str, url_replace_from: &str, url_replace_into: &str, ) -> Result<String> { let url_outer_map = match url_outer { &mut Value::Object(ref mut url_outer_map) => url_outer_map, c => bail!(format!( "Expected '{}' to be an object, but found content: {:?}", outer_key, c )), }; let url = match url_outer_map.get_mut(URL_KEY) { Some(url) => url, None => bail!(format!( "Expected '{}' to be present for '{}'", URL_KEY, CORE_KEY )), }; let url_str = match url { &mut Value::String(ref mut url_str) => url_str, c => bail!(format!( "Expected '{}' to contain string value, but found content: {:?}", URL_KEY, c )), }; let orig_url = url_str.to_owned(); *url_str = url_str.replace(url_replace_from, url_replace_into); Ok(orig_url) } fn replace_core_url( resp_outer_map: &mut MapStrVal, url_replace_from: &str, url_replace_into: &str, ) -> Result<String> { let mut core = match resp_outer_map.get_mut(CORE_KEY) { Some(core) => core, None => bail!(format!( "Unable to find '{}' for core URL replacement", CORE_KEY )), }; replace_url_impl(&mut core, CORE_KEY, url_replace_from, url_replace_into) } fn replace_plugin_urls( resp_outer_map: &mut MapStrVal, url_replace_from: &str, url_replace_into: &str, ) -> Result<Vec<String>> { let plugins = match resp_outer_map.get_mut(PLUGINS_KEY) { Some(plugins) => plugins, None => bail!(format!( "Unable to find '{}' for core URL replacement", CORE_KEY )), }; let plugins_obj = match plugins { &mut Value::Object(ref mut plugins_obj) => plugins_obj, c => bail!(format!( "Expected '{}' to be of object type, but found content: {:?}", PLUGINS_KEY, c )), }; let mut orig_urls = Vec::new(); for (key, mut plugin) in plugins_obj.iter_mut() { let orig_url = replace_url_impl(plugin, key, url_replace_from, url_replace_into)?; orig_urls.push(orig_url); } Ok(orig_urls) } fn run() -> Result<()> { let arg_config = ArgConfig::from_args(); log4rs::init_file(&arg_config.log_config_path, Default::default()).chain_err(|| { format!( "Unable to initialize log4rs logger with the given config file at '{}'", arg_config.log_config_path ) })?; let config_str = { let mut config_file = File::open(&arg_config.config_path).chain_err(|| { format!( "Unable to open config file path at {:?}", arg_config.config_path ) })?; let mut s = String::new(); config_file .read_to_string(&mut s) .map(|_| s) .chain_err(|| "Unable to read config file into string")? }; let config: FileConfig = toml::from_str(&config_str).chain_err(|| { format!( "Unable to parse config as required toml format: {}", config_str ) })?; info!("Completed configuration initialization!"); // write the body here let mut client = Client::new(); client.set_redirect_policy(RedirectPolicy::FollowAll); let mut resp = client.get(&config.update_center_url).send().chain_err(|| { format!( "Unable to perform HTTP request with URL string '{}'", config.update_center_url ) })?; let mut resp_str = String::new(); resp.read_to_string(&mut resp_str) .chain_err(|| "Unable to read HTTP response into string")?; let resp_str = resp_str; let trimmed_resp_str = resp_str .trim_left_matches(&config.suppress_front) .trim_right_matches(&config.suppress_back); // JSON parsing all the way let mut resp_json: Value = serde_json::from_str(trimmed_resp_str) .chain_err(|| "Unable to parse trimmed JSON string into JSON value.")?; // to stop borrowing early let (core_orig_url, mut plugin_urls) = { let mut resp_outer_map = match resp_json { Value::Object(ref mut resp_outer_map) => resp_outer_map, c => bail!(format!( "Expected outer most JSON to be of Object type, but found content: {:?}", c )), }; change_connection_check_url( &mut resp_outer_map, config.connection_check_url_change.to_owned(), )?; let core_orig_url = replace_core_url( &mut resp_outer_map, &config.url_replace_from, &config.url_replace_into, )?; let plugin_urls = replace_plugin_urls( &mut resp_outer_map, &config.url_replace_from, &config.url_replace_into, )?; (core_orig_url, plugin_urls) }; // combine both the core + plugin links let mut urls = vec![core_orig_url]; urls.append(&mut plugin_urls); let urls = urls; // write the modified JSON file if config.auto_create_output_dir { let create_parent_dir_if_present = |dir_opt: Option<&Path>| { let dir_opt = dir_opt.and_then(|dir| { // ignore if the directory has already been created if Path::new(dir).is_dir() { None } else { Some(dir) } }); match dir_opt { Some(dir) => { info!("Creating directory chain: {:?}", dir); fs::create_dir_all(dir) .chain_err(|| format!("Unable to create directory chain: {:?}", dir)) } None => Ok(()), } }; create_parent_dir_if_present(config.modified_json_file_path.parent())?; create_parent_dir_if_present(config.url_list_json_file_path.parent())?; } let mut json_file = File::create(&config.modified_json_file_path) .chain_err(|| "Unable to open modified update-center file for writing")?; let serialized_json = serde_json::to_string(&resp_json) .chain_err(|| "Unable to convert modified JSON back into string for serialization")?; // need to append back the trimmed left and right sides json_file .write_fmt(format_args!( "{}{}{}", config.suppress_front, serialized_json, config.suppress_back )) .chain_err(|| "Unable to write modified serialized JSON to file")?; let mut urls_file = File::create(&config.url_list_json_file_path) .chain_err(|| "Unable to open file for writing URLs")?; let urls_json = serde_json::to_string_pretty(&urls) .chain_err(|| "Unable to convert list of URLs into pretty JSON form")?; urls_file .write_fmt(format_args!("{}", urls_json)) .chain_err(|| "Unable to write URLs in JSON form into file")?; Ok(()) } fn main() { match run() { Ok(_) => { println!("Program completed!"); process::exit(0) } Err(ref e) => { let stderr = &mut io::stderr(); writeln!(stderr, "Error: {}", e).expect("Unable to write error into stderr!"); for e in e.iter().skip(1) { writeln!(stderr, "- Caused by: {}", e) .expect("Unable to write error causes into stderr!"); } process::exit(1); } } }
change_connection_check_url
identifier_name
mod.rs
use core::iter::Iterator; use super::*; use crate::error_consts::*; //#[test] //mod test; #[derive(Clone)] pub struct Line { tag: char, matched: bool, text: String, } pub struct VecBuffer { saved: bool, // Chars used for tagging. No tag equates to NULL in the char buffer: Vec<Line>, clipboard: Vec<Line>, } impl VecBuffer { pub fn new() -> Self { Self{ saved: true, buffer: Vec::new(), clipboard: Vec::new(), } } } impl Buffer for VecBuffer { // Index operations, get and verify fn len(&self) -> usize { self.buffer.len() } fn get_tag(&self, tag: char) -> Result<usize, &'static str> { let mut index = 0; for line in &self.buffer[..] { if &tag == &line.tag { return Ok(index); } index += 1; } Err(NO_MATCH) } fn get_matching(&self, pattern: &str, curr_line: usize, backwards: bool) -> Result<usize, &'static str> { verify_index(self, curr_line)?; use regex::RegexBuilder; let regex = RegexBuilder::new(pattern) .multi_line(true) .build() .map_err(|_| INVALID_REGEX) ?; // Figure out how far to iterate let length = if! backwards { self.buffer.len().saturating_sub(curr_line + 1) } else { curr_line }; // Since the range must be positive we subtract from bufferlen for backwards for index in 0.. length { if backwards { if regex.is_match(&(self.buffer[curr_line - 1 - index].text)) { return Ok(curr_line - 1 - index) } } else { if regex.is_match(&(self.buffer[curr_line + index + 1].text)) { return Ok(curr_line + index + 1) } } } Err(NO_MATCH) } // For macro commands fn mark_matching(&mut self, pattern: &str, selection: (usize, usize), inverse: bool) -> Result<(), &'static str> { use regex::RegexBuilder; verify_selection(self, selection)?; let regex = RegexBuilder::new(pattern) .multi_line(true) .build() .map_err(|_| INVALID_REGEX) ?; for index in 0.. self.len() { if index >= selection.0 && index <= selection.1 { self.buffer[index].matched = regex.is_match(&(self.buffer[index].text)) ^ inverse; } else { self.buffer[index].matched = false; } } Ok(()) } fn get_marked(&mut self) -> Result<Option<usize>, &'static str> { for index in 0.. self.buffer.len() { if self.buffer[index].matched { self.buffer[index].matched = false; return Ok(Some(index)); } } Ok(None) } // Simple buffer modifications: fn tag_line(&mut self, index: usize, tag: char) -> Result<(), &'static str> { // Overwrite current char with given char self.buffer[index].tag = tag; Ok(()) } // Take an iterator over &str as data fn insert<'a>(&mut self, data: &mut dyn Iterator<Item = &'a str>, index: usize) -> Result<(), &'static str> { // Possible TODO: preallocate for the insert verify_index(self, index)?; self.saved = false; // To minimise time complexity we split the vector immediately let mut tail = self.buffer.split_off(index); // Then append the insert data for line in data { self.buffer.push(Line{tag: '\0', matched: false, text: line.to_string()}); } // And finally the cut off tail self.buffer.append(&mut tail); Ok(()) } fn cut(&mut self, selection: (usize, usize)) -> Result<(), &'static str> { verify_selection(self, selection)?; self.saved = false; let mut tail = self.buffer.split_off(selection.1 + 1); self.clipboard = self.buffer.split_off(selection.0); self.buffer.append(&mut tail); Ok(()) } fn change<'a>(&mut self, data: &mut dyn Iterator<Item = &'a str>, selection: (usize, usize)) -> Result<(), &'static str> { verify_selection(self, selection)?; self.saved = false; let mut tail = self.buffer.split_off(selection.1 + 1); self.clipboard = self.buffer.split_off(selection.0); for line in data { self.buffer.push(Line{tag: '\0', matched: false, text: line.to_string()}); } self.buffer.append(&mut tail); Ok(()) } fn mov(&mut self, selection: (usize, usize), index: usize) -> Result<(), &'static str> { verify_selection(self, selection)?; verify_index(self, index)?; // Operation varies depending on moving forward or back if index <= selection.0 { // split out the relevant parts of the buffer let mut tail = self.buffer.split_off(selection.1 + 1); let mut data = self.buffer.split_off(selection.0); let mut middle = self.buffer.split_off(index.saturating_sub(1)); // Reassemble self.buffer.append(&mut data); self.buffer.append(&mut middle); self.buffer.append(&mut tail); Ok(()) } else if index >= selection.1 { // split out the relevant parts of the buffer let mut tail = self.buffer.split_off(index); let mut middle = self.buffer.split_off(selection.1 + 1); let mut data = self.buffer.split_off(selection.0); // Reassemble self.buffer.append(&mut middle); self.buffer.append(&mut data); self.buffer.append(&mut tail); Ok(()) } else { Err(MOVE_INTO_SELF) } } fn mov_copy(&mut self, selection: (usize, usize), index: usize) -> Result<(), &'static str> { verify_selection(self, selection)?; verify_index(self, index)?; // Get the data let mut data = Vec::new(); for line in &self.buffer[selection.0..= selection.1] { data.push(line.clone()); } // Insert it, subtract one if copying to before selection let i = if index <= selection.0 { index.saturating_sub(1) } else { index }; let mut tail = self.buffer.split_off(i); self.buffer.append(&mut data); self.buffer.append(&mut tail); Ok(()) } fn join(&mut self, selection: (usize, usize)) -> Result<(), &'static str> { verify_selection(self, selection)?; // Take out the lines that should go away efficiently let mut tail = self.buffer.split_off(selection.1 + 1); let data = self.buffer.split_off(selection.0 + 1); self.buffer.append(&mut tail); // Add their contents to the line left in for line in data { self.buffer[selection.0].text.pop(); // Remove the existing newline self.buffer[selection.0].text.push_str(&line.text); // Add in the line } Ok(()) } fn copy(&mut self, selection: (usize, usize)) -> Result<(), &'static str> { verify_selection(self, selection)?; self.clipboard = Vec::new(); // copy out each line in selection for line in &self.buffer[selection.0..= selection.1] { self.clipboard.push(line.clone()); } Ok(()) } fn paste(&mut self, index: usize) -> Result<usize, &'static str> { verify_index(self, index)?; // Cut off the tail in one go, to reduce time complexity let mut tmp = self.buffer.split_off(index); // Then append copies of all lines in clipboard for line in &self.clipboard { self.buffer.push(line.clone()); } // Finally put back the tail self.buffer.append(&mut tmp); Ok(self.clipboard.len()) } fn search_replace(&mut self, pattern: (&str, &str), selection: (usize, usize), global: bool) -> Result<(usize, usize), &'static str> { use regex::RegexBuilder; // ensure that the selection is valid verify_selection(self, selection)?; self.saved = false; // TODO: actually check if changes are made // Compile the regex used to match/extract data let regex = RegexBuilder::new(pattern.0) .multi_line(true) .build() .map_err(|_| INVALID_REGEX) ?; let mut selection_after = selection; // Cut out the whole selection from buffer let mut tail = self.buffer.split_off(selection.1 + 1); let before = self.buffer.split_off(selection.0 + 1); // Save ourselves a little bit of copying/allocating let mut tmp = self.buffer.pop().unwrap(); // Then join all selected lines together for line in before { tmp.text.push_str(&line.text); } // Run the search-replace over it let mut after = if global { regex.replace_all(&tmp.text, pattern.1).to_string() } else
; // If there is no newline at the end, join next line if!after.ends_with('\n') { if tail.len() > 0 { after.push_str(&tail.remove(0).text); } else { after.push('\n'); } } // Split on newlines and add all lines to the buffer for line in after.lines() { self.buffer.push(Line{tag: '\0', matched: false, text: format!("{}\n", line)}); } // Get the end of the affected area from current bufferlen selection_after.1 = self.buffer.len(); // Then put the tail back self.buffer.append(&mut tail); Ok(selection_after) } fn read_from(&mut self, path: &str, index: Option<usize>, must_exist: bool) -> Result<usize, &'static str> { if let Some(i) = index { verify_index(self, i)?; } let data = file::read_file(path, must_exist)?; let len = data.len(); let mut iter = data.iter().map(| string | &string[..]); let i = match index { Some(i) => i, // Since.change is not safe on an empty selection and we actually just wish to delete everything None => { self.buffer.clear(); 0 }, }; self.insert(&mut iter, i)?; Ok(len) } fn write_to(&mut self, selection: Option<(usize, usize)>, path: &str, append: bool) -> Result<(), &'static str> { let data = match selection { Some(sel) => self.get_selection(sel)?, None => Box::new(self.buffer[..].iter().map(|line| &line.text[..])), }; file::write_file(path, data, append)?; if selection == Some((0, self.len().saturating_sub(1))) || selection.is_none() { self.saved = true; } Ok(()) } fn saved(&self) -> bool { self.saved } // The output command fn get_selection<'a>(&'a self, selection: (usize, usize)) -> Result<Box<dyn Iterator<Item = &'a str> + 'a>, &'static str> { verify_selection(self, selection)?; let tmp = self.buffer[selection.0..= selection.1].iter().map(|line| &line.text[..]); Ok(Box::new(tmp)) } }
{ regex.replace(&tmp.text, pattern.1).to_string() }
conditional_block
mod.rs
use core::iter::Iterator; use super::*; use crate::error_consts::*; //#[test] //mod test; #[derive(Clone)] pub struct Line { tag: char, matched: bool, text: String, } pub struct VecBuffer { saved: bool, // Chars used for tagging. No tag equates to NULL in the char buffer: Vec<Line>, clipboard: Vec<Line>, } impl VecBuffer { pub fn new() -> Self { Self{ saved: true, buffer: Vec::new(), clipboard: Vec::new(), } } } impl Buffer for VecBuffer { // Index operations, get and verify fn len(&self) -> usize { self.buffer.len() } fn get_tag(&self, tag: char) -> Result<usize, &'static str> { let mut index = 0; for line in &self.buffer[..] { if &tag == &line.tag { return Ok(index); } index += 1; } Err(NO_MATCH) } fn get_matching(&self, pattern: &str, curr_line: usize, backwards: bool) -> Result<usize, &'static str> { verify_index(self, curr_line)?; use regex::RegexBuilder; let regex = RegexBuilder::new(pattern) .multi_line(true) .build() .map_err(|_| INVALID_REGEX) ?; // Figure out how far to iterate let length = if! backwards { self.buffer.len().saturating_sub(curr_line + 1) } else { curr_line }; // Since the range must be positive we subtract from bufferlen for backwards for index in 0.. length { if backwards { if regex.is_match(&(self.buffer[curr_line - 1 - index].text)) { return Ok(curr_line - 1 - index) } } else { if regex.is_match(&(self.buffer[curr_line + index + 1].text)) { return Ok(curr_line + index + 1) } } } Err(NO_MATCH) } // For macro commands fn mark_matching(&mut self, pattern: &str, selection: (usize, usize), inverse: bool) -> Result<(), &'static str> { use regex::RegexBuilder; verify_selection(self, selection)?; let regex = RegexBuilder::new(pattern) .multi_line(true) .build() .map_err(|_| INVALID_REGEX) ?; for index in 0.. self.len() { if index >= selection.0 && index <= selection.1 { self.buffer[index].matched = regex.is_match(&(self.buffer[index].text)) ^ inverse; } else { self.buffer[index].matched = false; } } Ok(()) } fn get_marked(&mut self) -> Result<Option<usize>, &'static str> { for index in 0.. self.buffer.len() { if self.buffer[index].matched { self.buffer[index].matched = false; return Ok(Some(index)); } } Ok(None) } // Simple buffer modifications: fn tag_line(&mut self, index: usize, tag: char) -> Result<(), &'static str> { // Overwrite current char with given char self.buffer[index].tag = tag; Ok(()) } // Take an iterator over &str as data fn insert<'a>(&mut self, data: &mut dyn Iterator<Item = &'a str>, index: usize) -> Result<(), &'static str> { // Possible TODO: preallocate for the insert verify_index(self, index)?; self.saved = false; // To minimise time complexity we split the vector immediately let mut tail = self.buffer.split_off(index); // Then append the insert data for line in data { self.buffer.push(Line{tag: '\0', matched: false, text: line.to_string()}); } // And finally the cut off tail self.buffer.append(&mut tail); Ok(()) } fn cut(&mut self, selection: (usize, usize)) -> Result<(), &'static str> { verify_selection(self, selection)?; self.saved = false; let mut tail = self.buffer.split_off(selection.1 + 1); self.clipboard = self.buffer.split_off(selection.0); self.buffer.append(&mut tail); Ok(()) } fn change<'a>(&mut self, data: &mut dyn Iterator<Item = &'a str>, selection: (usize, usize)) -> Result<(), &'static str> { verify_selection(self, selection)?; self.saved = false; let mut tail = self.buffer.split_off(selection.1 + 1); self.clipboard = self.buffer.split_off(selection.0); for line in data { self.buffer.push(Line{tag: '\0', matched: false, text: line.to_string()}); } self.buffer.append(&mut tail); Ok(()) } fn mov(&mut self, selection: (usize, usize), index: usize) -> Result<(), &'static str> { verify_selection(self, selection)?; verify_index(self, index)?; // Operation varies depending on moving forward or back if index <= selection.0 { // split out the relevant parts of the buffer let mut tail = self.buffer.split_off(selection.1 + 1); let mut data = self.buffer.split_off(selection.0); let mut middle = self.buffer.split_off(index.saturating_sub(1)); // Reassemble self.buffer.append(&mut data); self.buffer.append(&mut middle); self.buffer.append(&mut tail); Ok(()) } else if index >= selection.1 { // split out the relevant parts of the buffer let mut tail = self.buffer.split_off(index); let mut middle = self.buffer.split_off(selection.1 + 1); let mut data = self.buffer.split_off(selection.0); // Reassemble self.buffer.append(&mut middle); self.buffer.append(&mut data); self.buffer.append(&mut tail); Ok(()) } else { Err(MOVE_INTO_SELF) } } fn mov_copy(&mut self, selection: (usize, usize), index: usize) -> Result<(), &'static str> { verify_selection(self, selection)?; verify_index(self, index)?; // Get the data let mut data = Vec::new(); for line in &self.buffer[selection.0..= selection.1] { data.push(line.clone()); } // Insert it, subtract one if copying to before selection let i = if index <= selection.0 { index.saturating_sub(1) } else { index }; let mut tail = self.buffer.split_off(i); self.buffer.append(&mut data); self.buffer.append(&mut tail); Ok(()) } fn join(&mut self, selection: (usize, usize)) -> Result<(), &'static str> { verify_selection(self, selection)?; // Take out the lines that should go away efficiently let mut tail = self.buffer.split_off(selection.1 + 1); let data = self.buffer.split_off(selection.0 + 1); self.buffer.append(&mut tail); // Add their contents to the line left in for line in data { self.buffer[selection.0].text.pop(); // Remove the existing newline self.buffer[selection.0].text.push_str(&line.text); // Add in the line } Ok(()) } fn copy(&mut self, selection: (usize, usize)) -> Result<(), &'static str> { verify_selection(self, selection)?; self.clipboard = Vec::new(); // copy out each line in selection for line in &self.buffer[selection.0..= selection.1] { self.clipboard.push(line.clone()); } Ok(()) } fn paste(&mut self, index: usize) -> Result<usize, &'static str> { verify_index(self, index)?; // Cut off the tail in one go, to reduce time complexity let mut tmp = self.buffer.split_off(index); // Then append copies of all lines in clipboard for line in &self.clipboard { self.buffer.push(line.clone()); } // Finally put back the tail self.buffer.append(&mut tmp); Ok(self.clipboard.len()) } fn search_replace(&mut self, pattern: (&str, &str), selection: (usize, usize), global: bool) -> Result<(usize, usize), &'static str> { use regex::RegexBuilder; // ensure that the selection is valid verify_selection(self, selection)?; self.saved = false; // TODO: actually check if changes are made // Compile the regex used to match/extract data let regex = RegexBuilder::new(pattern.0) .multi_line(true) .build() .map_err(|_| INVALID_REGEX) ?; let mut selection_after = selection; // Cut out the whole selection from buffer let mut tail = self.buffer.split_off(selection.1 + 1); let before = self.buffer.split_off(selection.0 + 1); // Save ourselves a little bit of copying/allocating let mut tmp = self.buffer.pop().unwrap(); // Then join all selected lines together for line in before { tmp.text.push_str(&line.text); } // Run the search-replace over it let mut after = if global { regex.replace_all(&tmp.text, pattern.1).to_string() } else { regex.replace(&tmp.text, pattern.1).to_string() }; // If there is no newline at the end, join next line if!after.ends_with('\n') { if tail.len() > 0 { after.push_str(&tail.remove(0).text); } else { after.push('\n'); } } // Split on newlines and add all lines to the buffer for line in after.lines() { self.buffer.push(Line{tag: '\0', matched: false, text: format!("{}\n", line)}); } // Get the end of the affected area from current bufferlen selection_after.1 = self.buffer.len(); // Then put the tail back self.buffer.append(&mut tail); Ok(selection_after) } fn read_from(&mut self, path: &str, index: Option<usize>, must_exist: bool) -> Result<usize, &'static str> { if let Some(i) = index { verify_index(self, i)?; } let data = file::read_file(path, must_exist)?; let len = data.len(); let mut iter = data.iter().map(| string | &string[..]); let i = match index { Some(i) => i, // Since.change is not safe on an empty selection and we actually just wish to delete everything None => { self.buffer.clear(); 0 }, }; self.insert(&mut iter, i)?; Ok(len) } fn
(&mut self, selection: Option<(usize, usize)>, path: &str, append: bool) -> Result<(), &'static str> { let data = match selection { Some(sel) => self.get_selection(sel)?, None => Box::new(self.buffer[..].iter().map(|line| &line.text[..])), }; file::write_file(path, data, append)?; if selection == Some((0, self.len().saturating_sub(1))) || selection.is_none() { self.saved = true; } Ok(()) } fn saved(&self) -> bool { self.saved } // The output command fn get_selection<'a>(&'a self, selection: (usize, usize)) -> Result<Box<dyn Iterator<Item = &'a str> + 'a>, &'static str> { verify_selection(self, selection)?; let tmp = self.buffer[selection.0..= selection.1].iter().map(|line| &line.text[..]); Ok(Box::new(tmp)) } }
write_to
identifier_name
mod.rs
use core::iter::Iterator; use super::*; use crate::error_consts::*; //#[test] //mod test; #[derive(Clone)] pub struct Line { tag: char, matched: bool, text: String, } pub struct VecBuffer { saved: bool, // Chars used for tagging. No tag equates to NULL in the char buffer: Vec<Line>, clipboard: Vec<Line>, } impl VecBuffer { pub fn new() -> Self { Self{
buffer: Vec::new(), clipboard: Vec::new(), } } } impl Buffer for VecBuffer { // Index operations, get and verify fn len(&self) -> usize { self.buffer.len() } fn get_tag(&self, tag: char) -> Result<usize, &'static str> { let mut index = 0; for line in &self.buffer[..] { if &tag == &line.tag { return Ok(index); } index += 1; } Err(NO_MATCH) } fn get_matching(&self, pattern: &str, curr_line: usize, backwards: bool) -> Result<usize, &'static str> { verify_index(self, curr_line)?; use regex::RegexBuilder; let regex = RegexBuilder::new(pattern) .multi_line(true) .build() .map_err(|_| INVALID_REGEX) ?; // Figure out how far to iterate let length = if! backwards { self.buffer.len().saturating_sub(curr_line + 1) } else { curr_line }; // Since the range must be positive we subtract from bufferlen for backwards for index in 0.. length { if backwards { if regex.is_match(&(self.buffer[curr_line - 1 - index].text)) { return Ok(curr_line - 1 - index) } } else { if regex.is_match(&(self.buffer[curr_line + index + 1].text)) { return Ok(curr_line + index + 1) } } } Err(NO_MATCH) } // For macro commands fn mark_matching(&mut self, pattern: &str, selection: (usize, usize), inverse: bool) -> Result<(), &'static str> { use regex::RegexBuilder; verify_selection(self, selection)?; let regex = RegexBuilder::new(pattern) .multi_line(true) .build() .map_err(|_| INVALID_REGEX) ?; for index in 0.. self.len() { if index >= selection.0 && index <= selection.1 { self.buffer[index].matched = regex.is_match(&(self.buffer[index].text)) ^ inverse; } else { self.buffer[index].matched = false; } } Ok(()) } fn get_marked(&mut self) -> Result<Option<usize>, &'static str> { for index in 0.. self.buffer.len() { if self.buffer[index].matched { self.buffer[index].matched = false; return Ok(Some(index)); } } Ok(None) } // Simple buffer modifications: fn tag_line(&mut self, index: usize, tag: char) -> Result<(), &'static str> { // Overwrite current char with given char self.buffer[index].tag = tag; Ok(()) } // Take an iterator over &str as data fn insert<'a>(&mut self, data: &mut dyn Iterator<Item = &'a str>, index: usize) -> Result<(), &'static str> { // Possible TODO: preallocate for the insert verify_index(self, index)?; self.saved = false; // To minimise time complexity we split the vector immediately let mut tail = self.buffer.split_off(index); // Then append the insert data for line in data { self.buffer.push(Line{tag: '\0', matched: false, text: line.to_string()}); } // And finally the cut off tail self.buffer.append(&mut tail); Ok(()) } fn cut(&mut self, selection: (usize, usize)) -> Result<(), &'static str> { verify_selection(self, selection)?; self.saved = false; let mut tail = self.buffer.split_off(selection.1 + 1); self.clipboard = self.buffer.split_off(selection.0); self.buffer.append(&mut tail); Ok(()) } fn change<'a>(&mut self, data: &mut dyn Iterator<Item = &'a str>, selection: (usize, usize)) -> Result<(), &'static str> { verify_selection(self, selection)?; self.saved = false; let mut tail = self.buffer.split_off(selection.1 + 1); self.clipboard = self.buffer.split_off(selection.0); for line in data { self.buffer.push(Line{tag: '\0', matched: false, text: line.to_string()}); } self.buffer.append(&mut tail); Ok(()) } fn mov(&mut self, selection: (usize, usize), index: usize) -> Result<(), &'static str> { verify_selection(self, selection)?; verify_index(self, index)?; // Operation varies depending on moving forward or back if index <= selection.0 { // split out the relevant parts of the buffer let mut tail = self.buffer.split_off(selection.1 + 1); let mut data = self.buffer.split_off(selection.0); let mut middle = self.buffer.split_off(index.saturating_sub(1)); // Reassemble self.buffer.append(&mut data); self.buffer.append(&mut middle); self.buffer.append(&mut tail); Ok(()) } else if index >= selection.1 { // split out the relevant parts of the buffer let mut tail = self.buffer.split_off(index); let mut middle = self.buffer.split_off(selection.1 + 1); let mut data = self.buffer.split_off(selection.0); // Reassemble self.buffer.append(&mut middle); self.buffer.append(&mut data); self.buffer.append(&mut tail); Ok(()) } else { Err(MOVE_INTO_SELF) } } fn mov_copy(&mut self, selection: (usize, usize), index: usize) -> Result<(), &'static str> { verify_selection(self, selection)?; verify_index(self, index)?; // Get the data let mut data = Vec::new(); for line in &self.buffer[selection.0..= selection.1] { data.push(line.clone()); } // Insert it, subtract one if copying to before selection let i = if index <= selection.0 { index.saturating_sub(1) } else { index }; let mut tail = self.buffer.split_off(i); self.buffer.append(&mut data); self.buffer.append(&mut tail); Ok(()) } fn join(&mut self, selection: (usize, usize)) -> Result<(), &'static str> { verify_selection(self, selection)?; // Take out the lines that should go away efficiently let mut tail = self.buffer.split_off(selection.1 + 1); let data = self.buffer.split_off(selection.0 + 1); self.buffer.append(&mut tail); // Add their contents to the line left in for line in data { self.buffer[selection.0].text.pop(); // Remove the existing newline self.buffer[selection.0].text.push_str(&line.text); // Add in the line } Ok(()) } fn copy(&mut self, selection: (usize, usize)) -> Result<(), &'static str> { verify_selection(self, selection)?; self.clipboard = Vec::new(); // copy out each line in selection for line in &self.buffer[selection.0..= selection.1] { self.clipboard.push(line.clone()); } Ok(()) } fn paste(&mut self, index: usize) -> Result<usize, &'static str> { verify_index(self, index)?; // Cut off the tail in one go, to reduce time complexity let mut tmp = self.buffer.split_off(index); // Then append copies of all lines in clipboard for line in &self.clipboard { self.buffer.push(line.clone()); } // Finally put back the tail self.buffer.append(&mut tmp); Ok(self.clipboard.len()) } fn search_replace(&mut self, pattern: (&str, &str), selection: (usize, usize), global: bool) -> Result<(usize, usize), &'static str> { use regex::RegexBuilder; // ensure that the selection is valid verify_selection(self, selection)?; self.saved = false; // TODO: actually check if changes are made // Compile the regex used to match/extract data let regex = RegexBuilder::new(pattern.0) .multi_line(true) .build() .map_err(|_| INVALID_REGEX) ?; let mut selection_after = selection; // Cut out the whole selection from buffer let mut tail = self.buffer.split_off(selection.1 + 1); let before = self.buffer.split_off(selection.0 + 1); // Save ourselves a little bit of copying/allocating let mut tmp = self.buffer.pop().unwrap(); // Then join all selected lines together for line in before { tmp.text.push_str(&line.text); } // Run the search-replace over it let mut after = if global { regex.replace_all(&tmp.text, pattern.1).to_string() } else { regex.replace(&tmp.text, pattern.1).to_string() }; // If there is no newline at the end, join next line if!after.ends_with('\n') { if tail.len() > 0 { after.push_str(&tail.remove(0).text); } else { after.push('\n'); } } // Split on newlines and add all lines to the buffer for line in after.lines() { self.buffer.push(Line{tag: '\0', matched: false, text: format!("{}\n", line)}); } // Get the end of the affected area from current bufferlen selection_after.1 = self.buffer.len(); // Then put the tail back self.buffer.append(&mut tail); Ok(selection_after) } fn read_from(&mut self, path: &str, index: Option<usize>, must_exist: bool) -> Result<usize, &'static str> { if let Some(i) = index { verify_index(self, i)?; } let data = file::read_file(path, must_exist)?; let len = data.len(); let mut iter = data.iter().map(| string | &string[..]); let i = match index { Some(i) => i, // Since.change is not safe on an empty selection and we actually just wish to delete everything None => { self.buffer.clear(); 0 }, }; self.insert(&mut iter, i)?; Ok(len) } fn write_to(&mut self, selection: Option<(usize, usize)>, path: &str, append: bool) -> Result<(), &'static str> { let data = match selection { Some(sel) => self.get_selection(sel)?, None => Box::new(self.buffer[..].iter().map(|line| &line.text[..])), }; file::write_file(path, data, append)?; if selection == Some((0, self.len().saturating_sub(1))) || selection.is_none() { self.saved = true; } Ok(()) } fn saved(&self) -> bool { self.saved } // The output command fn get_selection<'a>(&'a self, selection: (usize, usize)) -> Result<Box<dyn Iterator<Item = &'a str> + 'a>, &'static str> { verify_selection(self, selection)?; let tmp = self.buffer[selection.0..= selection.1].iter().map(|line| &line.text[..]); Ok(Box::new(tmp)) } }
saved: true,
random_line_split
day24b.rs
#![feature(drain_filter)] use clap::Parser; use env_logger::Env; use log::{debug, info}; use std::cell::Cell; use std::fmt; use std::fs::File; use std::io::{BufRead, BufReader}; /// Advent of Code 2022, Day 24 #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Input file to read input: String, /// Part of the puzzle to solve #[arg(short, long, value_parser = clap::value_parser!(u32).range(1..=2))] part: u32, } type MapGrid = Vec<Vec<MapPos>>; type Position = (usize, usize); struct MapPos { blizzards: Vec<char>, wall: bool, } struct Map { grid: MapGrid, start: Position, exit: Position, player: Cell<Position>, // only used for rendering } impl MapPos { /// Convert a MapPosition to a char, for display purposes. pub fn to_char(&self) -> char { let nblizzards = self.blizzards.len(); match (self, nblizzards) { (MapPos { wall: true,.. }, _) => '#', (MapPos { wall: false,.. }, 0) => '.', (MapPos { wall: false,.. }, 1) => self.blizzards[0], (MapPos { wall: false,.. }, 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9) => { char::from_digit(nblizzards as u32, 10).unwrap() } _ => 'X', } } } impl Map { /// Create a new empty Map with the specified dimensions. pub fn empty((nrows, ncols): (usize, usize)) -> Map { let start = (1, 0); let exit = (ncols - 2, nrows - 1); Map { grid: (0..nrows) .map(|nrow| { (0..ncols) .map(|ncol| MapPos { blizzards: Vec::new(), wall: match (ncol, nrow) { (ncol, nrow) if (ncol, nrow) == start => false, (ncol, nrow) if (ncol, nrow) == exit => false, (ncol, _) if (ncol == 0 || ncol == ncols - 1) => true, (_, nrow) if (nrow == 0 || nrow == nrows - 1) => true, _ => false, }, }) .collect::<Vec<MapPos>>() }) .collect::<MapGrid>(), start: start, exit: exit, player: Cell::new(start), } } /// Create a new empty map with the same dimensions and position as the reference map. pub fn empty_from(map: &Map) -> Map { let mut new_map = Map::empty((map.grid.len(), map.grid[0].len())); new_map.start = map.start; new_map.exit = map.exit; new_map } /// Read a map from a file. pub fn from_file(filename: &str) -> Map { let grid = BufReader::new(File::open(filename).unwrap_or_else(|err| { panic!("Error opening {filename}: {err:?}"); })) .lines() .map(|line| { line.unwrap() .chars() .map(|c| match c.clone() { '#' => MapPos { blizzards: Vec::new(), wall: true, }, '.' => MapPos { blizzards: Vec::new(), wall: false, }, '>' | '<' | '^' | 'v' => MapPos { blizzards: Vec::from([c]), wall: false, }, _ => panic!("Unknown character encountered while reading map: {c:?}"), }) .collect::<Vec<MapPos>>() }) .collect::<MapGrid>(); let start = (1, 0); let exit = (grid[0].len() - 2, grid.len() - 1); Map { grid: grid, start: start, exit: exit, player: Cell::new(start), } } /// Calculates the next blizzard position on the map. pub fn next_blizzard_pos(&self, colnum: usize, rownum: usize, b: char) -> (usize, usize) { let (mut colnum_next, mut rownum_next) = (colnum, rownum); match b { '>' => { colnum_next += 1; if self.grid[rownum_next][colnum_next].wall { colnum_next = 1; } } '<' => { colnum_next -= 1; if self.grid[rownum_next][colnum_next].wall { colnum_next = self.grid[0].len() - 2; } } '^' => { rownum_next -= 1; if self.grid[rownum_next][colnum_next].wall { rownum_next = self.grid.len() - 2; } } 'v' => { rownum_next += 1; if self.grid[rownum_next][colnum_next].wall { rownum_next = 1; } } _ => panic!("Unknown blizzard type encountered in ({colnum}, {rownum}): {b:?}"), } (colnum_next, rownum_next) } /// Returns the map with the positions of the blizzards on the next minute. pub fn next_minute(&self) -> Map { let mut new_map = Map::empty_from(self); // Populate empty map with blizzards. self.grid.iter().enumerate().for_each(|(rownum, row)| { row.iter() .enumerate() .filter(|(_colnum, pos)| pos.wall == false && pos.blizzards.len() > 0) .for_each(|(colnum, pos)| { pos.blizzards.iter().for_each(|b| { let (colnum_next, rownum_next) = self.next_blizzard_pos(colnum, rownum, *b); new_map.grid[rownum_next][colnum_next].blizzards.push(*b); }) }) }); new_map } /// Returns the available positions to move on the map. pub fn available_moves(&self, start: &Position) -> Vec<Position> { self.grid .iter() .enumerate() .filter(|(rownum, _row)| { let rowdist = (*rownum as i32 - start.1 as i32).abs(); rowdist <= 1 // keep adjacent and curent rows }) .map(|(rownum, row)| { let rowdist = (rownum as i32 - start.1 as i32).abs(); row.iter() .enumerate() .filter(|(colnum, pos)| { let coldist = (*colnum as i32 - start.0 as i32).abs(); coldist <= 1 // keep adjacent and current columns && coldist + rowdist <= 1 // exclude diagonal neighbors &&!pos.wall // exclude walls && pos.blizzards.len() == 0 // exclude positions with blizzards }) .map(|(colnum, _pos)| (colnum, rownum)) .collect::<Vec<Position>>() }) .flatten() .collect::<Vec<Position>>() } } impl fmt::Display for Map { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "\n{}", self.grid .iter() .enumerate() .map(|(rownum, row)| format!( "{}\n", row.iter() .enumerate() .map(|(colnum, pos)| { let c = pos.to_char(); match (colnum, rownum) { (colnum, rownum) if self.player.get() == (colnum, rownum) && c == '.' => '■', // current position (colnum, rownum) if self.player.get() == (colnum, rownum) && c!= '.' => 'E', // indicate error (colnum, rownum) if self.exit == (colnum, rownum) && c == '.' => '✕', // exit _ => c, } }) .collect::<String>()
} fn main() { env_logger::Builder::from_env(Env::default().default_filter_or("info")) .format_timestamp(None) .init(); let args = Args::parse(); let nways: usize = match args.part { 1 => 1, // start-exit 2 => 3, // start-exit-start-exit part @ _ => panic!("Don't know how to run part {part}."), }; // Read initial map. let mut minute = 0; let map = Map::from_file(&args.input); debug!("\nMinute: {minute}\nMap:{map}"); // The way blizzards propagate, the maps will be periodic. // The period will be the lowest common multiple of the map width and map height. let nrows = map.grid.len(); let ncols = map.grid[0].len(); let period = (1..=(ncols * nrows)) .skip_while(|n| n % nrows!= 0 || n % ncols!= 0) .next() .unwrap(); // Compute all possible maps. let mut maps = Vec::from([map]); (1..period).for_each(|n| { let map_prev = &maps[n - 1]; let map = map_prev.next_minute(); maps.push(map); }); info!("Precomputed {} maps.", maps.len()); // Fully tracking all the possible paths until we reach the exit explodes. // For this we only keep track of the possible positions at each minute. // // Unlike the implementation in day24.rs, here we don't truncate positions // as soon as we reach the end of a crossing (way) in part 2. // This is to cover the case where two different paths for the first // crossing have times T_1 < S_1, but if we continue with the other // crossings, it will be S_1 + S_2 + S_3 < T_1 + T_2 + T_3. // // In the end, it turned out that this didn't matter. But it is not clear // if this is always the case, or it just happened to be that way for the // inputs we tried. let mut all_possible_positions = Vec::<Vec<Position>>::new(); let mut targets = Vec::<Position>::new(); for n in 0..nways { all_possible_positions.push(Vec::<Position>::new()); targets.push(if n % 2 == 0 { maps[0].exit } else { maps[0].start }); } all_possible_positions[0].push(maps[0].start); let mut done = false; while!done { minute += 1; let map = &maps[minute % period]; let npositions: usize = all_possible_positions.iter().map(|w| w.len()).sum(); info!("\nMinute: {minute}\nNumber of possible positions: {npositions}"); // Update positions for all ways. all_possible_positions = all_possible_positions .iter() .enumerate() .map(|(way, possible_positions)| { let mut possible_positions = possible_positions .iter() .map(|position| map.available_moves(position)) .flatten() .collect::<Vec<_>>(); // Keeping track of all possible position still isn't enough. // We need to deduplicate the possible positions to keep things snappy. // Duplication arises because it is possible to reach the same position // through different paths in a set amount of time. possible_positions.sort(); possible_positions.dedup(); // After deduplication, sort possible positions by Manhattan order to // the exit. This makes the loop termination condition trivial. let target = targets[way]; possible_positions.sort_by_key(|pos| { let dx = (target.0 as i32 - pos.0 as i32).abs(); let dy = (target.1 as i32 - pos.1 as i32).abs(); dx + dy }); if possible_positions.len() > 0 { let closest = &possible_positions[0]; info!( "Way: {way}, Positions: {}, Target: {target:?}, Closest position: {closest:?}", possible_positions.len() ); } possible_positions }) .collect::<Vec<_>>(); // Move positions that reached target to the next way. let mut removed = Vec::<Position>::new(); for (way, possible_positions) in all_possible_positions.iter_mut().enumerate() { // Move removed positions from previous way to the current one. if removed.len() > 0 { possible_positions.append(&mut removed); } // Repopulate removed. let target = &targets[way]; removed.extend(possible_positions.drain_filter(|pos| pos == target)); // Done, but finish loop first. if way == nways - 1 && removed.len() > 0 { done = true; continue; } } } info!("Exited after {minute} minutes."); }
)) .collect::<String>() .trim_end() ) }
random_line_split
day24b.rs
#![feature(drain_filter)] use clap::Parser; use env_logger::Env; use log::{debug, info}; use std::cell::Cell; use std::fmt; use std::fs::File; use std::io::{BufRead, BufReader}; /// Advent of Code 2022, Day 24 #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Input file to read input: String, /// Part of the puzzle to solve #[arg(short, long, value_parser = clap::value_parser!(u32).range(1..=2))] part: u32, } type MapGrid = Vec<Vec<MapPos>>; type Position = (usize, usize); struct MapPos { blizzards: Vec<char>, wall: bool, } struct Map { grid: MapGrid, start: Position, exit: Position, player: Cell<Position>, // only used for rendering } impl MapPos { /// Convert a MapPosition to a char, for display purposes. pub fn to_char(&self) -> char
} impl Map { /// Create a new empty Map with the specified dimensions. pub fn empty((nrows, ncols): (usize, usize)) -> Map { let start = (1, 0); let exit = (ncols - 2, nrows - 1); Map { grid: (0..nrows) .map(|nrow| { (0..ncols) .map(|ncol| MapPos { blizzards: Vec::new(), wall: match (ncol, nrow) { (ncol, nrow) if (ncol, nrow) == start => false, (ncol, nrow) if (ncol, nrow) == exit => false, (ncol, _) if (ncol == 0 || ncol == ncols - 1) => true, (_, nrow) if (nrow == 0 || nrow == nrows - 1) => true, _ => false, }, }) .collect::<Vec<MapPos>>() }) .collect::<MapGrid>(), start: start, exit: exit, player: Cell::new(start), } } /// Create a new empty map with the same dimensions and position as the reference map. pub fn empty_from(map: &Map) -> Map { let mut new_map = Map::empty((map.grid.len(), map.grid[0].len())); new_map.start = map.start; new_map.exit = map.exit; new_map } /// Read a map from a file. pub fn from_file(filename: &str) -> Map { let grid = BufReader::new(File::open(filename).unwrap_or_else(|err| { panic!("Error opening {filename}: {err:?}"); })) .lines() .map(|line| { line.unwrap() .chars() .map(|c| match c.clone() { '#' => MapPos { blizzards: Vec::new(), wall: true, }, '.' => MapPos { blizzards: Vec::new(), wall: false, }, '>' | '<' | '^' | 'v' => MapPos { blizzards: Vec::from([c]), wall: false, }, _ => panic!("Unknown character encountered while reading map: {c:?}"), }) .collect::<Vec<MapPos>>() }) .collect::<MapGrid>(); let start = (1, 0); let exit = (grid[0].len() - 2, grid.len() - 1); Map { grid: grid, start: start, exit: exit, player: Cell::new(start), } } /// Calculates the next blizzard position on the map. pub fn next_blizzard_pos(&self, colnum: usize, rownum: usize, b: char) -> (usize, usize) { let (mut colnum_next, mut rownum_next) = (colnum, rownum); match b { '>' => { colnum_next += 1; if self.grid[rownum_next][colnum_next].wall { colnum_next = 1; } } '<' => { colnum_next -= 1; if self.grid[rownum_next][colnum_next].wall { colnum_next = self.grid[0].len() - 2; } } '^' => { rownum_next -= 1; if self.grid[rownum_next][colnum_next].wall { rownum_next = self.grid.len() - 2; } } 'v' => { rownum_next += 1; if self.grid[rownum_next][colnum_next].wall { rownum_next = 1; } } _ => panic!("Unknown blizzard type encountered in ({colnum}, {rownum}): {b:?}"), } (colnum_next, rownum_next) } /// Returns the map with the positions of the blizzards on the next minute. pub fn next_minute(&self) -> Map { let mut new_map = Map::empty_from(self); // Populate empty map with blizzards. self.grid.iter().enumerate().for_each(|(rownum, row)| { row.iter() .enumerate() .filter(|(_colnum, pos)| pos.wall == false && pos.blizzards.len() > 0) .for_each(|(colnum, pos)| { pos.blizzards.iter().for_each(|b| { let (colnum_next, rownum_next) = self.next_blizzard_pos(colnum, rownum, *b); new_map.grid[rownum_next][colnum_next].blizzards.push(*b); }) }) }); new_map } /// Returns the available positions to move on the map. pub fn available_moves(&self, start: &Position) -> Vec<Position> { self.grid .iter() .enumerate() .filter(|(rownum, _row)| { let rowdist = (*rownum as i32 - start.1 as i32).abs(); rowdist <= 1 // keep adjacent and curent rows }) .map(|(rownum, row)| { let rowdist = (rownum as i32 - start.1 as i32).abs(); row.iter() .enumerate() .filter(|(colnum, pos)| { let coldist = (*colnum as i32 - start.0 as i32).abs(); coldist <= 1 // keep adjacent and current columns && coldist + rowdist <= 1 // exclude diagonal neighbors &&!pos.wall // exclude walls && pos.blizzards.len() == 0 // exclude positions with blizzards }) .map(|(colnum, _pos)| (colnum, rownum)) .collect::<Vec<Position>>() }) .flatten() .collect::<Vec<Position>>() } } impl fmt::Display for Map { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "\n{}", self.grid .iter() .enumerate() .map(|(rownum, row)| format!( "{}\n", row.iter() .enumerate() .map(|(colnum, pos)| { let c = pos.to_char(); match (colnum, rownum) { (colnum, rownum) if self.player.get() == (colnum, rownum) && c == '.' => '■', // current position (colnum, rownum) if self.player.get() == (colnum, rownum) && c!= '.' => 'E', // indicate error (colnum, rownum) if self.exit == (colnum, rownum) && c == '.' => '✕', // exit _ => c, } }) .collect::<String>() )) .collect::<String>() .trim_end() ) } } fn main() { env_logger::Builder::from_env(Env::default().default_filter_or("info")) .format_timestamp(None) .init(); let args = Args::parse(); let nways: usize = match args.part { 1 => 1, // start-exit 2 => 3, // start-exit-start-exit part @ _ => panic!("Don't know how to run part {part}."), }; // Read initial map. let mut minute = 0; let map = Map::from_file(&args.input); debug!("\nMinute: {minute}\nMap:{map}"); // The way blizzards propagate, the maps will be periodic. // The period will be the lowest common multiple of the map width and map height. let nrows = map.grid.len(); let ncols = map.grid[0].len(); let period = (1..=(ncols * nrows)) .skip_while(|n| n % nrows!= 0 || n % ncols!= 0) .next() .unwrap(); // Compute all possible maps. let mut maps = Vec::from([map]); (1..period).for_each(|n| { let map_prev = &maps[n - 1]; let map = map_prev.next_minute(); maps.push(map); }); info!("Precomputed {} maps.", maps.len()); // Fully tracking all the possible paths until we reach the exit explodes. // For this we only keep track of the possible positions at each minute. // // Unlike the implementation in day24.rs, here we don't truncate positions // as soon as we reach the end of a crossing (way) in part 2. // This is to cover the case where two different paths for the first // crossing have times T_1 < S_1, but if we continue with the other // crossings, it will be S_1 + S_2 + S_3 < T_1 + T_2 + T_3. // // In the end, it turned out that this didn't matter. But it is not clear // if this is always the case, or it just happened to be that way for the // inputs we tried. let mut all_possible_positions = Vec::<Vec<Position>>::new(); let mut targets = Vec::<Position>::new(); for n in 0..nways { all_possible_positions.push(Vec::<Position>::new()); targets.push(if n % 2 == 0 { maps[0].exit } else { maps[0].start }); } all_possible_positions[0].push(maps[0].start); let mut done = false; while!done { minute += 1; let map = &maps[minute % period]; let npositions: usize = all_possible_positions.iter().map(|w| w.len()).sum(); info!("\nMinute: {minute}\nNumber of possible positions: {npositions}"); // Update positions for all ways. all_possible_positions = all_possible_positions .iter() .enumerate() .map(|(way, possible_positions)| { let mut possible_positions = possible_positions .iter() .map(|position| map.available_moves(position)) .flatten() .collect::<Vec<_>>(); // Keeping track of all possible position still isn't enough. // We need to deduplicate the possible positions to keep things snappy. // Duplication arises because it is possible to reach the same position // through different paths in a set amount of time. possible_positions.sort(); possible_positions.dedup(); // After deduplication, sort possible positions by Manhattan order to // the exit. This makes the loop termination condition trivial. let target = targets[way]; possible_positions.sort_by_key(|pos| { let dx = (target.0 as i32 - pos.0 as i32).abs(); let dy = (target.1 as i32 - pos.1 as i32).abs(); dx + dy }); if possible_positions.len() > 0 { let closest = &possible_positions[0]; info!( "Way: {way}, Positions: {}, Target: {target:?}, Closest position: {closest:?}", possible_positions.len() ); } possible_positions }) .collect::<Vec<_>>(); // Move positions that reached target to the next way. let mut removed = Vec::<Position>::new(); for (way, possible_positions) in all_possible_positions.iter_mut().enumerate() { // Move removed positions from previous way to the current one. if removed.len() > 0 { possible_positions.append(&mut removed); } // Repopulate removed. let target = &targets[way]; removed.extend(possible_positions.drain_filter(|pos| pos == target)); // Done, but finish loop first. if way == nways - 1 && removed.len() > 0 { done = true; continue; } } } info!("Exited after {minute} minutes."); }
{ let nblizzards = self.blizzards.len(); match (self, nblizzards) { (MapPos { wall: true, .. }, _) => '#', (MapPos { wall: false, .. }, 0) => '.', (MapPos { wall: false, .. }, 1) => self.blizzards[0], (MapPos { wall: false, .. }, 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9) => { char::from_digit(nblizzards as u32, 10).unwrap() } _ => 'X', } }
identifier_body
day24b.rs
#![feature(drain_filter)] use clap::Parser; use env_logger::Env; use log::{debug, info}; use std::cell::Cell; use std::fmt; use std::fs::File; use std::io::{BufRead, BufReader}; /// Advent of Code 2022, Day 24 #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Input file to read input: String, /// Part of the puzzle to solve #[arg(short, long, value_parser = clap::value_parser!(u32).range(1..=2))] part: u32, } type MapGrid = Vec<Vec<MapPos>>; type Position = (usize, usize); struct MapPos { blizzards: Vec<char>, wall: bool, } struct Map { grid: MapGrid, start: Position, exit: Position, player: Cell<Position>, // only used for rendering } impl MapPos { /// Convert a MapPosition to a char, for display purposes. pub fn to_char(&self) -> char { let nblizzards = self.blizzards.len(); match (self, nblizzards) { (MapPos { wall: true,.. }, _) => '#', (MapPos { wall: false,.. }, 0) => '.', (MapPos { wall: false,.. }, 1) => self.blizzards[0], (MapPos { wall: false,.. }, 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9) => { char::from_digit(nblizzards as u32, 10).unwrap() } _ => 'X', } } } impl Map { /// Create a new empty Map with the specified dimensions. pub fn empty((nrows, ncols): (usize, usize)) -> Map { let start = (1, 0); let exit = (ncols - 2, nrows - 1); Map { grid: (0..nrows) .map(|nrow| { (0..ncols) .map(|ncol| MapPos { blizzards: Vec::new(), wall: match (ncol, nrow) { (ncol, nrow) if (ncol, nrow) == start => false, (ncol, nrow) if (ncol, nrow) == exit => false, (ncol, _) if (ncol == 0 || ncol == ncols - 1) => true, (_, nrow) if (nrow == 0 || nrow == nrows - 1) => true, _ => false, }, }) .collect::<Vec<MapPos>>() }) .collect::<MapGrid>(), start: start, exit: exit, player: Cell::new(start), } } /// Create a new empty map with the same dimensions and position as the reference map. pub fn empty_from(map: &Map) -> Map { let mut new_map = Map::empty((map.grid.len(), map.grid[0].len())); new_map.start = map.start; new_map.exit = map.exit; new_map } /// Read a map from a file. pub fn from_file(filename: &str) -> Map { let grid = BufReader::new(File::open(filename).unwrap_or_else(|err| { panic!("Error opening {filename}: {err:?}"); })) .lines() .map(|line| { line.unwrap() .chars() .map(|c| match c.clone() { '#' => MapPos { blizzards: Vec::new(), wall: true, }, '.' => MapPos { blizzards: Vec::new(), wall: false, }, '>' | '<' | '^' | 'v' => MapPos { blizzards: Vec::from([c]), wall: false, }, _ => panic!("Unknown character encountered while reading map: {c:?}"), }) .collect::<Vec<MapPos>>() }) .collect::<MapGrid>(); let start = (1, 0); let exit = (grid[0].len() - 2, grid.len() - 1); Map { grid: grid, start: start, exit: exit, player: Cell::new(start), } } /// Calculates the next blizzard position on the map. pub fn next_blizzard_pos(&self, colnum: usize, rownum: usize, b: char) -> (usize, usize) { let (mut colnum_next, mut rownum_next) = (colnum, rownum); match b { '>' => { colnum_next += 1; if self.grid[rownum_next][colnum_next].wall { colnum_next = 1; } } '<' => { colnum_next -= 1; if self.grid[rownum_next][colnum_next].wall { colnum_next = self.grid[0].len() - 2; } } '^' => { rownum_next -= 1; if self.grid[rownum_next][colnum_next].wall { rownum_next = self.grid.len() - 2; } } 'v' => { rownum_next += 1; if self.grid[rownum_next][colnum_next].wall { rownum_next = 1; } } _ => panic!("Unknown blizzard type encountered in ({colnum}, {rownum}): {b:?}"), } (colnum_next, rownum_next) } /// Returns the map with the positions of the blizzards on the next minute. pub fn next_minute(&self) -> Map { let mut new_map = Map::empty_from(self); // Populate empty map with blizzards. self.grid.iter().enumerate().for_each(|(rownum, row)| { row.iter() .enumerate() .filter(|(_colnum, pos)| pos.wall == false && pos.blizzards.len() > 0) .for_each(|(colnum, pos)| { pos.blizzards.iter().for_each(|b| { let (colnum_next, rownum_next) = self.next_blizzard_pos(colnum, rownum, *b); new_map.grid[rownum_next][colnum_next].blizzards.push(*b); }) }) }); new_map } /// Returns the available positions to move on the map. pub fn available_moves(&self, start: &Position) -> Vec<Position> { self.grid .iter() .enumerate() .filter(|(rownum, _row)| { let rowdist = (*rownum as i32 - start.1 as i32).abs(); rowdist <= 1 // keep adjacent and curent rows }) .map(|(rownum, row)| { let rowdist = (rownum as i32 - start.1 as i32).abs(); row.iter() .enumerate() .filter(|(colnum, pos)| { let coldist = (*colnum as i32 - start.0 as i32).abs(); coldist <= 1 // keep adjacent and current columns && coldist + rowdist <= 1 // exclude diagonal neighbors &&!pos.wall // exclude walls && pos.blizzards.len() == 0 // exclude positions with blizzards }) .map(|(colnum, _pos)| (colnum, rownum)) .collect::<Vec<Position>>() }) .flatten() .collect::<Vec<Position>>() } } impl fmt::Display for Map { fn
(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "\n{}", self.grid .iter() .enumerate() .map(|(rownum, row)| format!( "{}\n", row.iter() .enumerate() .map(|(colnum, pos)| { let c = pos.to_char(); match (colnum, rownum) { (colnum, rownum) if self.player.get() == (colnum, rownum) && c == '.' => '■', // current position (colnum, rownum) if self.player.get() == (colnum, rownum) && c!= '.' => 'E', // indicate error (colnum, rownum) if self.exit == (colnum, rownum) && c == '.' => '✕', // exit _ => c, } }) .collect::<String>() )) .collect::<String>() .trim_end() ) } } fn main() { env_logger::Builder::from_env(Env::default().default_filter_or("info")) .format_timestamp(None) .init(); let args = Args::parse(); let nways: usize = match args.part { 1 => 1, // start-exit 2 => 3, // start-exit-start-exit part @ _ => panic!("Don't know how to run part {part}."), }; // Read initial map. let mut minute = 0; let map = Map::from_file(&args.input); debug!("\nMinute: {minute}\nMap:{map}"); // The way blizzards propagate, the maps will be periodic. // The period will be the lowest common multiple of the map width and map height. let nrows = map.grid.len(); let ncols = map.grid[0].len(); let period = (1..=(ncols * nrows)) .skip_while(|n| n % nrows!= 0 || n % ncols!= 0) .next() .unwrap(); // Compute all possible maps. let mut maps = Vec::from([map]); (1..period).for_each(|n| { let map_prev = &maps[n - 1]; let map = map_prev.next_minute(); maps.push(map); }); info!("Precomputed {} maps.", maps.len()); // Fully tracking all the possible paths until we reach the exit explodes. // For this we only keep track of the possible positions at each minute. // // Unlike the implementation in day24.rs, here we don't truncate positions // as soon as we reach the end of a crossing (way) in part 2. // This is to cover the case where two different paths for the first // crossing have times T_1 < S_1, but if we continue with the other // crossings, it will be S_1 + S_2 + S_3 < T_1 + T_2 + T_3. // // In the end, it turned out that this didn't matter. But it is not clear // if this is always the case, or it just happened to be that way for the // inputs we tried. let mut all_possible_positions = Vec::<Vec<Position>>::new(); let mut targets = Vec::<Position>::new(); for n in 0..nways { all_possible_positions.push(Vec::<Position>::new()); targets.push(if n % 2 == 0 { maps[0].exit } else { maps[0].start }); } all_possible_positions[0].push(maps[0].start); let mut done = false; while!done { minute += 1; let map = &maps[minute % period]; let npositions: usize = all_possible_positions.iter().map(|w| w.len()).sum(); info!("\nMinute: {minute}\nNumber of possible positions: {npositions}"); // Update positions for all ways. all_possible_positions = all_possible_positions .iter() .enumerate() .map(|(way, possible_positions)| { let mut possible_positions = possible_positions .iter() .map(|position| map.available_moves(position)) .flatten() .collect::<Vec<_>>(); // Keeping track of all possible position still isn't enough. // We need to deduplicate the possible positions to keep things snappy. // Duplication arises because it is possible to reach the same position // through different paths in a set amount of time. possible_positions.sort(); possible_positions.dedup(); // After deduplication, sort possible positions by Manhattan order to // the exit. This makes the loop termination condition trivial. let target = targets[way]; possible_positions.sort_by_key(|pos| { let dx = (target.0 as i32 - pos.0 as i32).abs(); let dy = (target.1 as i32 - pos.1 as i32).abs(); dx + dy }); if possible_positions.len() > 0 { let closest = &possible_positions[0]; info!( "Way: {way}, Positions: {}, Target: {target:?}, Closest position: {closest:?}", possible_positions.len() ); } possible_positions }) .collect::<Vec<_>>(); // Move positions that reached target to the next way. let mut removed = Vec::<Position>::new(); for (way, possible_positions) in all_possible_positions.iter_mut().enumerate() { // Move removed positions from previous way to the current one. if removed.len() > 0 { possible_positions.append(&mut removed); } // Repopulate removed. let target = &targets[way]; removed.extend(possible_positions.drain_filter(|pos| pos == target)); // Done, but finish loop first. if way == nways - 1 && removed.len() > 0 { done = true; continue; } } } info!("Exited after {minute} minutes."); }
fmt
identifier_name
interface.rs
//! All the nitty gritty details regarding COM interface for the shell extension //! are defined here. //! //! See: https://docs.microsoft.com/en-us/windows/win32/shell/handlers#implementing-shell-extension-handlers use com::sys::HRESULT; use guid_win::Guid; use once_cell::sync::Lazy; use std::cell::RefCell; use std::path::PathBuf; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use widestring::WideCStr; use winapi::shared::guiddef; use winapi::shared::minwindef as win; use winapi::shared::windef; use winapi::shared::winerror; use winapi::shared::wtypesbase; use winapi::um::objidl; use winapi::um::oleidl; use winapi::um::winnt; use winapi::um::winuser; use wslscript_common::error::*; use crate::progress::ProgressWindow; /// IClassFactory GUID. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/unknwn/nn-unknwn-iclassfactory /// /// Windows requests this interface via `DllGetClassObject` to further query /// relevant COM interfaces. _com-rs_ crate implements IClassFactory automatically /// for all interfaces (?), so we don't need to worry about details. static CLASS_FACTORY_CLSID: Lazy<Guid> = Lazy::new(|| Guid::from_str("00000001-0000-0000-c000-000000000046").unwrap()); /// Semaphore to keep track of running WSL threads. /// /// DLL shall not be released if there are threads running. pub(crate) static THREAD_COUNTER: AtomicUsize = AtomicUsize::new(0); /// Handle to loaded DLL module. static mut DLL_HANDLE: win::HINSTANCE = std::ptr::null_mut(); /// DLL module entry point. /// /// See: https://docs.microsoft.com/en-us/windows/win32/dlls/dllmain #[no_mangle] extern "system" fn DllMain( hinstance: win::HINSTANCE, reason: win::DWORD, _reserved: win::LPVOID, ) -> win::BOOL { match reason { winnt::DLL_PROCESS_ATTACH => { // store module instance to global variable unsafe { DLL_HANDLE = hinstance }; // set up logging #[cfg(feature = "debug")] if let Ok(mut path) = get_module_path(hinstance) { let stem = path.file_stem().map_or_else( || "debug.log".to_string(), |s| s.to_string_lossy().into_owned(), ); path.pop(); path.push(format!("{}.log", stem)); if simple_logging::log_to_file(&path, log::LevelFilter::Debug).is_err() { unsafe { use winapi::um::winuser::*; let text = wslscript_common::wcstring(format!( "Failed to set up logging to {}", path.to_string_lossy() )); MessageBoxW( std::ptr::null_mut(), text.as_ptr(), wchar::wchz!("Error").as_ptr(), MB_OK | MB_ICONERROR | MB_SERVICE_NOTIFICATION, ); } } } log::debug!("DLL_PROCESS_ATTACH"); return win::TRUE; } winnt::DLL_PROCESS_DETACH => { log::debug!("DLL_PROCESS_DETACH"); ProgressWindow::unregister_window_class(); } winnt::DLL_THREAD_ATTACH => {} winnt::DLL_THREAD_DETACH => {} _ => {} } win::FALSE } /// Called to check whether DLL can be unloaded from memory. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-dllcanunloadnow #[no_mangle] extern "system" fn DllCanUnloadNow() -> HRESULT { let n = THREAD_COUNTER.load(Ordering::SeqCst); if n > 0 { log::info!("{} WSL threads running, denying DLL unload", n); winerror::S_FALSE } else { log::info!("Permitting DLL unload"); winerror::S_OK } } /// Exposes class factory. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-dllgetclassobject #[no_mangle] extern "system" fn DllGetClassObject( class_id: guiddef::REFCLSID, iid: guiddef::REFIID, result: *mut win::LPVOID, ) -> HRESULT { let class_guid = guid_from_ref(class_id); let interface_guid = guid_from_ref(iid); // expect our registered class ID if wslscript_common::DROP_HANDLER_CLSID.eq(&class_guid) { // expect IClassFactory interface to be requested if!CLASS_FACTORY_CLSID.eq(&interface_guid) { log::warn!("Expected IClassFactory, got {}", interface_guid); } use com::production::Class as COMClass; let cls = <Handler as COMClass>::Factory::allocate(); let rv = unsafe { cls.QueryInterface(iid as _, result as _) }; log::debug!( "QueryInterface for {} returned {}, address={:p}", interface_guid, rv, result ); return rv; } else { log::warn!("Unsupported class: {}", class_guid);
} winerror::CLASS_E_CLASSNOTAVAILABLE } /// Add in-process server keys into registry. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/olectl/nf-olectl-dllregisterserver #[no_mangle] extern "system" fn DllRegisterServer() -> HRESULT { let hinstance = unsafe { DLL_HANDLE }; let path = match get_module_path(hinstance) { Ok(p) => p, Err(_) => return winerror::E_UNEXPECTED, }; log::debug!("DllRegisterServer for {}", path.to_string_lossy()); match wslscript_common::registry::add_server_to_registry(&path) { Ok(_) => (), Err(e) => { log::error!("Failed to register server: {}", e); return winerror::E_UNEXPECTED; } } winerror::S_OK } /// Remove in-process server keys from registry. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/olectl/nf-olectl-dllunregisterserver #[no_mangle] extern "system" fn DllUnregisterServer() -> HRESULT { match wslscript_common::registry::remove_server_from_registry() { Ok(_) => (), Err(e) => { log::error!("Failed to unregister server: {}", e); return winerror::E_UNEXPECTED; } } winerror::S_OK } /// Convert Win32 GUID pointer to Guid struct. const fn guid_from_ref(clsid: *const guiddef::GUID) -> Guid { Guid { 0: unsafe { *clsid }, } } /// Get path to loaded DLL file. fn get_module_path(hinstance: win::HINSTANCE) -> Result<PathBuf, Error> { use std::ffi::OsString; use std::os::windows::ffi::OsStringExt; use winapi::shared::ntdef; use winapi::um::libloaderapi::GetModuleFileNameW as GetModuleFileName; let mut buf: Vec<ntdef::WCHAR> = Vec::with_capacity(win::MAX_PATH); let len = unsafe { GetModuleFileName(hinstance, buf.as_mut_ptr(), buf.capacity() as _) }; if len == 0 { return Err(wslscript_common::win32::last_error()); } unsafe { buf.set_len(len as _) }; Ok(PathBuf::from(OsString::from_wide(&buf))) } bitflags::bitflags! { /// Key state flags. #[derive(Debug)] pub struct KeyState: win::DWORD { const MK_CONTROL = winuser::MK_CONTROL as win::DWORD; const MK_SHIFT = winuser::MK_SHIFT as win::DWORD; const MK_ALT = oleidl::MK_ALT as win::DWORD; const MK_LBUTTON = winuser::MK_LBUTTON as win::DWORD; const MK_MBUTTON = winuser::MK_MBUTTON as win::DWORD; const MK_RBUTTON = winuser::MK_RBUTTON as win::DWORD; } } // COM interface declarations. // // Note that methods must be in exact order! // // See https://www.magnumdb.com/ for interface GUID's. // See https://docs.microsoft.com/en-us/windows/win32/shell/handlers for // required interfaces. com::interfaces! { // NOTE: class! macro generates IClassFactory interface automatically, // so we must directly inherit from IUnknown. #[uuid("81521ebe-a2d4-450b-9bf8-5c23ed8730d0")] pub unsafe interface IHandler : com::interfaces::IUnknown {} #[uuid("0000010b-0000-0000-c000-000000000046")] pub unsafe interface IPersistFile : IPersist { fn IsDirty(&self) -> HRESULT; fn Load( &self, pszFileName: wtypesbase::LPCOLESTR, dwMode: win::DWORD, ) -> HRESULT; fn Save( &self, pszFileName: wtypesbase::LPCOLESTR, fRemember: win::BOOL, ) -> HRESULT; fn SaveCompleted( &self, pszFileName: wtypesbase::LPCOLESTR, ) -> HRESULT; fn GetCurFile( &self, ppszFileName: *mut wtypesbase::LPOLESTR, ) -> HRESULT; } #[uuid("0000010c-0000-0000-c000-000000000046")] pub unsafe interface IPersist : com::interfaces::IUnknown { fn GetClassID( &self, pClassID: *mut guiddef::CLSID, ) -> HRESULT; } #[uuid("00000122-0000-0000-c000-000000000046")] pub unsafe interface IDropTarget: com::interfaces::IUnknown { fn DragEnter( &self, pDataObj: *const objidl::IDataObject, grfKeyState: win::DWORD, pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT; fn DragOver( &self, grfKeyState: win::DWORD, pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT; fn DragLeave(&self) -> HRESULT; fn Drop( &self, pDataObj: *const objidl::IDataObject, grfKeyState: win::DWORD, pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT; } } com::class! { pub class Handler: IHandler, IPersistFile(IPersist), IDropTarget { // File that is receiving the drop. target: RefCell<PathBuf> } impl IHandler for Handler { } // See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nn-objidl-ipersistfile impl IPersistFile for Handler { /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-isdirty fn IsDirty(&self) -> HRESULT { log::debug!("IPersistFile::IsDirty"); winerror::S_FALSE } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-load fn Load( &self, pszFileName: wtypesbase::LPCOLESTR, _dwMode: win::DWORD, ) -> HRESULT { // path to the file that is being dragged over, ie. the registered script file let filename = unsafe { WideCStr::from_ptr_str(pszFileName) }; let path = PathBuf::from(filename.to_os_string()); log::debug!("IPersistFile::Load {}", path.to_string_lossy()); if let Ok(mut target) = self.target.try_borrow_mut() { *target = path; } else { return winerror::E_FAIL; } winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-save fn Save( &self, _pszFileName: wtypesbase::LPCOLESTR, _fRemember: win::BOOL, ) -> HRESULT { log::debug!("IPersistFile::Save"); winerror::S_FALSE } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-savecompleted fn SaveCompleted( &self, _pszFileName: wtypesbase::LPCOLESTR, ) -> HRESULT { log::debug!("IPersistFile::SaveCompleted"); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-getcurfile fn GetCurFile( &self, _ppszFileName: *mut wtypesbase::LPOLESTR, ) -> HRESULT { log::debug!("IPersistFile::GetCurFile"); winerror::E_FAIL } } // See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nn-objidl-ipersist impl IPersist for Handler { /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersist-getclassid fn GetClassID( &self, pClassID: *mut guiddef::CLSID, ) -> HRESULT { log::debug!("IPersist::GetClassID"); let guid = wslscript_common::DROP_HANDLER_CLSID.0; unsafe { *pClassID = guid } winerror::S_OK } } // See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nn-oleidl-idroptarget impl IDropTarget for Handler { /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-dragenter fn DragEnter( &self, _pDataObj: *const objidl::IDataObject, _grfKeyState: win::DWORD, _pt: *const windef::POINTL, _pdwEffect: *mut win::DWORD, ) -> HRESULT { log::debug!("IDropTarget::DragEnter"); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-dragover fn DragOver( &self, _grfKeyState: win::DWORD, _pt: *const windef::POINTL, _pdwEffect: *mut win::DWORD, ) -> HRESULT { log::debug!("IDropTarget::DragOver"); log::debug!("Keys {:?}", KeyState::from_bits_truncate(_grfKeyState)); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-dragleave fn DragLeave(&self) -> HRESULT { log::debug!("IDropTarget::DragLeave"); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-drop fn Drop( &self, pDataObj: *const objidl::IDataObject, grfKeyState: win::DWORD, _pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT { log::debug!("IDropTarget::Drop"); let target = if let Ok(target) = self.target.try_borrow() { target.clone() } else { return winerror::E_UNEXPECTED; }; let obj = unsafe { &*pDataObj }; let keys = KeyState::from_bits_truncate(grfKeyState); super::handle_dropped_files(&target, obj, keys).and_then(|_| { unsafe { *pdwEffect = oleidl::DROPEFFECT_COPY; } Ok(winerror::S_OK) }).unwrap_or_else(|e| { log::debug!("Drop failed: {}", e); winerror::E_UNEXPECTED }) } } }
random_line_split
interface.rs
//! All the nitty gritty details regarding COM interface for the shell extension //! are defined here. //! //! See: https://docs.microsoft.com/en-us/windows/win32/shell/handlers#implementing-shell-extension-handlers use com::sys::HRESULT; use guid_win::Guid; use once_cell::sync::Lazy; use std::cell::RefCell; use std::path::PathBuf; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use widestring::WideCStr; use winapi::shared::guiddef; use winapi::shared::minwindef as win; use winapi::shared::windef; use winapi::shared::winerror; use winapi::shared::wtypesbase; use winapi::um::objidl; use winapi::um::oleidl; use winapi::um::winnt; use winapi::um::winuser; use wslscript_common::error::*; use crate::progress::ProgressWindow; /// IClassFactory GUID. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/unknwn/nn-unknwn-iclassfactory /// /// Windows requests this interface via `DllGetClassObject` to further query /// relevant COM interfaces. _com-rs_ crate implements IClassFactory automatically /// for all interfaces (?), so we don't need to worry about details. static CLASS_FACTORY_CLSID: Lazy<Guid> = Lazy::new(|| Guid::from_str("00000001-0000-0000-c000-000000000046").unwrap()); /// Semaphore to keep track of running WSL threads. /// /// DLL shall not be released if there are threads running. pub(crate) static THREAD_COUNTER: AtomicUsize = AtomicUsize::new(0); /// Handle to loaded DLL module. static mut DLL_HANDLE: win::HINSTANCE = std::ptr::null_mut(); /// DLL module entry point. /// /// See: https://docs.microsoft.com/en-us/windows/win32/dlls/dllmain #[no_mangle] extern "system" fn DllMain( hinstance: win::HINSTANCE, reason: win::DWORD, _reserved: win::LPVOID, ) -> win::BOOL { match reason { winnt::DLL_PROCESS_ATTACH => { // store module instance to global variable unsafe { DLL_HANDLE = hinstance }; // set up logging #[cfg(feature = "debug")] if let Ok(mut path) = get_module_path(hinstance) { let stem = path.file_stem().map_or_else( || "debug.log".to_string(), |s| s.to_string_lossy().into_owned(), ); path.pop(); path.push(format!("{}.log", stem)); if simple_logging::log_to_file(&path, log::LevelFilter::Debug).is_err() { unsafe { use winapi::um::winuser::*; let text = wslscript_common::wcstring(format!( "Failed to set up logging to {}", path.to_string_lossy() )); MessageBoxW( std::ptr::null_mut(), text.as_ptr(), wchar::wchz!("Error").as_ptr(), MB_OK | MB_ICONERROR | MB_SERVICE_NOTIFICATION, ); } } } log::debug!("DLL_PROCESS_ATTACH"); return win::TRUE; } winnt::DLL_PROCESS_DETACH => { log::debug!("DLL_PROCESS_DETACH"); ProgressWindow::unregister_window_class(); } winnt::DLL_THREAD_ATTACH => {} winnt::DLL_THREAD_DETACH => {} _ => {} } win::FALSE } /// Called to check whether DLL can be unloaded from memory. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-dllcanunloadnow #[no_mangle] extern "system" fn DllCanUnloadNow() -> HRESULT { let n = THREAD_COUNTER.load(Ordering::SeqCst); if n > 0 { log::info!("{} WSL threads running, denying DLL unload", n); winerror::S_FALSE } else { log::info!("Permitting DLL unload"); winerror::S_OK } } /// Exposes class factory. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-dllgetclassobject #[no_mangle] extern "system" fn DllGetClassObject( class_id: guiddef::REFCLSID, iid: guiddef::REFIID, result: *mut win::LPVOID, ) -> HRESULT { let class_guid = guid_from_ref(class_id); let interface_guid = guid_from_ref(iid); // expect our registered class ID if wslscript_common::DROP_HANDLER_CLSID.eq(&class_guid) { // expect IClassFactory interface to be requested if!CLASS_FACTORY_CLSID.eq(&interface_guid) { log::warn!("Expected IClassFactory, got {}", interface_guid); } use com::production::Class as COMClass; let cls = <Handler as COMClass>::Factory::allocate(); let rv = unsafe { cls.QueryInterface(iid as _, result as _) }; log::debug!( "QueryInterface for {} returned {}, address={:p}", interface_guid, rv, result ); return rv; } else
winerror::CLASS_E_CLASSNOTAVAILABLE } /// Add in-process server keys into registry. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/olectl/nf-olectl-dllregisterserver #[no_mangle] extern "system" fn DllRegisterServer() -> HRESULT { let hinstance = unsafe { DLL_HANDLE }; let path = match get_module_path(hinstance) { Ok(p) => p, Err(_) => return winerror::E_UNEXPECTED, }; log::debug!("DllRegisterServer for {}", path.to_string_lossy()); match wslscript_common::registry::add_server_to_registry(&path) { Ok(_) => (), Err(e) => { log::error!("Failed to register server: {}", e); return winerror::E_UNEXPECTED; } } winerror::S_OK } /// Remove in-process server keys from registry. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/olectl/nf-olectl-dllunregisterserver #[no_mangle] extern "system" fn DllUnregisterServer() -> HRESULT { match wslscript_common::registry::remove_server_from_registry() { Ok(_) => (), Err(e) => { log::error!("Failed to unregister server: {}", e); return winerror::E_UNEXPECTED; } } winerror::S_OK } /// Convert Win32 GUID pointer to Guid struct. const fn guid_from_ref(clsid: *const guiddef::GUID) -> Guid { Guid { 0: unsafe { *clsid }, } } /// Get path to loaded DLL file. fn get_module_path(hinstance: win::HINSTANCE) -> Result<PathBuf, Error> { use std::ffi::OsString; use std::os::windows::ffi::OsStringExt; use winapi::shared::ntdef; use winapi::um::libloaderapi::GetModuleFileNameW as GetModuleFileName; let mut buf: Vec<ntdef::WCHAR> = Vec::with_capacity(win::MAX_PATH); let len = unsafe { GetModuleFileName(hinstance, buf.as_mut_ptr(), buf.capacity() as _) }; if len == 0 { return Err(wslscript_common::win32::last_error()); } unsafe { buf.set_len(len as _) }; Ok(PathBuf::from(OsString::from_wide(&buf))) } bitflags::bitflags! { /// Key state flags. #[derive(Debug)] pub struct KeyState: win::DWORD { const MK_CONTROL = winuser::MK_CONTROL as win::DWORD; const MK_SHIFT = winuser::MK_SHIFT as win::DWORD; const MK_ALT = oleidl::MK_ALT as win::DWORD; const MK_LBUTTON = winuser::MK_LBUTTON as win::DWORD; const MK_MBUTTON = winuser::MK_MBUTTON as win::DWORD; const MK_RBUTTON = winuser::MK_RBUTTON as win::DWORD; } } // COM interface declarations. // // Note that methods must be in exact order! // // See https://www.magnumdb.com/ for interface GUID's. // See https://docs.microsoft.com/en-us/windows/win32/shell/handlers for // required interfaces. com::interfaces! { // NOTE: class! macro generates IClassFactory interface automatically, // so we must directly inherit from IUnknown. #[uuid("81521ebe-a2d4-450b-9bf8-5c23ed8730d0")] pub unsafe interface IHandler : com::interfaces::IUnknown {} #[uuid("0000010b-0000-0000-c000-000000000046")] pub unsafe interface IPersistFile : IPersist { fn IsDirty(&self) -> HRESULT; fn Load( &self, pszFileName: wtypesbase::LPCOLESTR, dwMode: win::DWORD, ) -> HRESULT; fn Save( &self, pszFileName: wtypesbase::LPCOLESTR, fRemember: win::BOOL, ) -> HRESULT; fn SaveCompleted( &self, pszFileName: wtypesbase::LPCOLESTR, ) -> HRESULT; fn GetCurFile( &self, ppszFileName: *mut wtypesbase::LPOLESTR, ) -> HRESULT; } #[uuid("0000010c-0000-0000-c000-000000000046")] pub unsafe interface IPersist : com::interfaces::IUnknown { fn GetClassID( &self, pClassID: *mut guiddef::CLSID, ) -> HRESULT; } #[uuid("00000122-0000-0000-c000-000000000046")] pub unsafe interface IDropTarget: com::interfaces::IUnknown { fn DragEnter( &self, pDataObj: *const objidl::IDataObject, grfKeyState: win::DWORD, pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT; fn DragOver( &self, grfKeyState: win::DWORD, pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT; fn DragLeave(&self) -> HRESULT; fn Drop( &self, pDataObj: *const objidl::IDataObject, grfKeyState: win::DWORD, pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT; } } com::class! { pub class Handler: IHandler, IPersistFile(IPersist), IDropTarget { // File that is receiving the drop. target: RefCell<PathBuf> } impl IHandler for Handler { } // See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nn-objidl-ipersistfile impl IPersistFile for Handler { /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-isdirty fn IsDirty(&self) -> HRESULT { log::debug!("IPersistFile::IsDirty"); winerror::S_FALSE } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-load fn Load( &self, pszFileName: wtypesbase::LPCOLESTR, _dwMode: win::DWORD, ) -> HRESULT { // path to the file that is being dragged over, ie. the registered script file let filename = unsafe { WideCStr::from_ptr_str(pszFileName) }; let path = PathBuf::from(filename.to_os_string()); log::debug!("IPersistFile::Load {}", path.to_string_lossy()); if let Ok(mut target) = self.target.try_borrow_mut() { *target = path; } else { return winerror::E_FAIL; } winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-save fn Save( &self, _pszFileName: wtypesbase::LPCOLESTR, _fRemember: win::BOOL, ) -> HRESULT { log::debug!("IPersistFile::Save"); winerror::S_FALSE } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-savecompleted fn SaveCompleted( &self, _pszFileName: wtypesbase::LPCOLESTR, ) -> HRESULT { log::debug!("IPersistFile::SaveCompleted"); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-getcurfile fn GetCurFile( &self, _ppszFileName: *mut wtypesbase::LPOLESTR, ) -> HRESULT { log::debug!("IPersistFile::GetCurFile"); winerror::E_FAIL } } // See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nn-objidl-ipersist impl IPersist for Handler { /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersist-getclassid fn GetClassID( &self, pClassID: *mut guiddef::CLSID, ) -> HRESULT { log::debug!("IPersist::GetClassID"); let guid = wslscript_common::DROP_HANDLER_CLSID.0; unsafe { *pClassID = guid } winerror::S_OK } } // See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nn-oleidl-idroptarget impl IDropTarget for Handler { /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-dragenter fn DragEnter( &self, _pDataObj: *const objidl::IDataObject, _grfKeyState: win::DWORD, _pt: *const windef::POINTL, _pdwEffect: *mut win::DWORD, ) -> HRESULT { log::debug!("IDropTarget::DragEnter"); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-dragover fn DragOver( &self, _grfKeyState: win::DWORD, _pt: *const windef::POINTL, _pdwEffect: *mut win::DWORD, ) -> HRESULT { log::debug!("IDropTarget::DragOver"); log::debug!("Keys {:?}", KeyState::from_bits_truncate(_grfKeyState)); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-dragleave fn DragLeave(&self) -> HRESULT { log::debug!("IDropTarget::DragLeave"); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-drop fn Drop( &self, pDataObj: *const objidl::IDataObject, grfKeyState: win::DWORD, _pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT { log::debug!("IDropTarget::Drop"); let target = if let Ok(target) = self.target.try_borrow() { target.clone() } else { return winerror::E_UNEXPECTED; }; let obj = unsafe { &*pDataObj }; let keys = KeyState::from_bits_truncate(grfKeyState); super::handle_dropped_files(&target, obj, keys).and_then(|_| { unsafe { *pdwEffect = oleidl::DROPEFFECT_COPY; } Ok(winerror::S_OK) }).unwrap_or_else(|e| { log::debug!("Drop failed: {}", e); winerror::E_UNEXPECTED }) } } }
{ log::warn!("Unsupported class: {}", class_guid); }
conditional_block
interface.rs
//! All the nitty gritty details regarding COM interface for the shell extension //! are defined here. //! //! See: https://docs.microsoft.com/en-us/windows/win32/shell/handlers#implementing-shell-extension-handlers use com::sys::HRESULT; use guid_win::Guid; use once_cell::sync::Lazy; use std::cell::RefCell; use std::path::PathBuf; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use widestring::WideCStr; use winapi::shared::guiddef; use winapi::shared::minwindef as win; use winapi::shared::windef; use winapi::shared::winerror; use winapi::shared::wtypesbase; use winapi::um::objidl; use winapi::um::oleidl; use winapi::um::winnt; use winapi::um::winuser; use wslscript_common::error::*; use crate::progress::ProgressWindow; /// IClassFactory GUID. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/unknwn/nn-unknwn-iclassfactory /// /// Windows requests this interface via `DllGetClassObject` to further query /// relevant COM interfaces. _com-rs_ crate implements IClassFactory automatically /// for all interfaces (?), so we don't need to worry about details. static CLASS_FACTORY_CLSID: Lazy<Guid> = Lazy::new(|| Guid::from_str("00000001-0000-0000-c000-000000000046").unwrap()); /// Semaphore to keep track of running WSL threads. /// /// DLL shall not be released if there are threads running. pub(crate) static THREAD_COUNTER: AtomicUsize = AtomicUsize::new(0); /// Handle to loaded DLL module. static mut DLL_HANDLE: win::HINSTANCE = std::ptr::null_mut(); /// DLL module entry point. /// /// See: https://docs.microsoft.com/en-us/windows/win32/dlls/dllmain #[no_mangle] extern "system" fn DllMain( hinstance: win::HINSTANCE, reason: win::DWORD, _reserved: win::LPVOID, ) -> win::BOOL { match reason { winnt::DLL_PROCESS_ATTACH => { // store module instance to global variable unsafe { DLL_HANDLE = hinstance }; // set up logging #[cfg(feature = "debug")] if let Ok(mut path) = get_module_path(hinstance) { let stem = path.file_stem().map_or_else( || "debug.log".to_string(), |s| s.to_string_lossy().into_owned(), ); path.pop(); path.push(format!("{}.log", stem)); if simple_logging::log_to_file(&path, log::LevelFilter::Debug).is_err() { unsafe { use winapi::um::winuser::*; let text = wslscript_common::wcstring(format!( "Failed to set up logging to {}", path.to_string_lossy() )); MessageBoxW( std::ptr::null_mut(), text.as_ptr(), wchar::wchz!("Error").as_ptr(), MB_OK | MB_ICONERROR | MB_SERVICE_NOTIFICATION, ); } } } log::debug!("DLL_PROCESS_ATTACH"); return win::TRUE; } winnt::DLL_PROCESS_DETACH => { log::debug!("DLL_PROCESS_DETACH"); ProgressWindow::unregister_window_class(); } winnt::DLL_THREAD_ATTACH => {} winnt::DLL_THREAD_DETACH => {} _ => {} } win::FALSE } /// Called to check whether DLL can be unloaded from memory. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-dllcanunloadnow #[no_mangle] extern "system" fn DllCanUnloadNow() -> HRESULT { let n = THREAD_COUNTER.load(Ordering::SeqCst); if n > 0 { log::info!("{} WSL threads running, denying DLL unload", n); winerror::S_FALSE } else { log::info!("Permitting DLL unload"); winerror::S_OK } } /// Exposes class factory. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-dllgetclassobject #[no_mangle] extern "system" fn DllGetClassObject( class_id: guiddef::REFCLSID, iid: guiddef::REFIID, result: *mut win::LPVOID, ) -> HRESULT { let class_guid = guid_from_ref(class_id); let interface_guid = guid_from_ref(iid); // expect our registered class ID if wslscript_common::DROP_HANDLER_CLSID.eq(&class_guid) { // expect IClassFactory interface to be requested if!CLASS_FACTORY_CLSID.eq(&interface_guid) { log::warn!("Expected IClassFactory, got {}", interface_guid); } use com::production::Class as COMClass; let cls = <Handler as COMClass>::Factory::allocate(); let rv = unsafe { cls.QueryInterface(iid as _, result as _) }; log::debug!( "QueryInterface for {} returned {}, address={:p}", interface_guid, rv, result ); return rv; } else { log::warn!("Unsupported class: {}", class_guid); } winerror::CLASS_E_CLASSNOTAVAILABLE } /// Add in-process server keys into registry. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/olectl/nf-olectl-dllregisterserver #[no_mangle] extern "system" fn DllRegisterServer() -> HRESULT { let hinstance = unsafe { DLL_HANDLE }; let path = match get_module_path(hinstance) { Ok(p) => p, Err(_) => return winerror::E_UNEXPECTED, }; log::debug!("DllRegisterServer for {}", path.to_string_lossy()); match wslscript_common::registry::add_server_to_registry(&path) { Ok(_) => (), Err(e) => { log::error!("Failed to register server: {}", e); return winerror::E_UNEXPECTED; } } winerror::S_OK } /// Remove in-process server keys from registry. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/olectl/nf-olectl-dllunregisterserver #[no_mangle] extern "system" fn DllUnregisterServer() -> HRESULT
/// Convert Win32 GUID pointer to Guid struct. const fn guid_from_ref(clsid: *const guiddef::GUID) -> Guid { Guid { 0: unsafe { *clsid }, } } /// Get path to loaded DLL file. fn get_module_path(hinstance: win::HINSTANCE) -> Result<PathBuf, Error> { use std::ffi::OsString; use std::os::windows::ffi::OsStringExt; use winapi::shared::ntdef; use winapi::um::libloaderapi::GetModuleFileNameW as GetModuleFileName; let mut buf: Vec<ntdef::WCHAR> = Vec::with_capacity(win::MAX_PATH); let len = unsafe { GetModuleFileName(hinstance, buf.as_mut_ptr(), buf.capacity() as _) }; if len == 0 { return Err(wslscript_common::win32::last_error()); } unsafe { buf.set_len(len as _) }; Ok(PathBuf::from(OsString::from_wide(&buf))) } bitflags::bitflags! { /// Key state flags. #[derive(Debug)] pub struct KeyState: win::DWORD { const MK_CONTROL = winuser::MK_CONTROL as win::DWORD; const MK_SHIFT = winuser::MK_SHIFT as win::DWORD; const MK_ALT = oleidl::MK_ALT as win::DWORD; const MK_LBUTTON = winuser::MK_LBUTTON as win::DWORD; const MK_MBUTTON = winuser::MK_MBUTTON as win::DWORD; const MK_RBUTTON = winuser::MK_RBUTTON as win::DWORD; } } // COM interface declarations. // // Note that methods must be in exact order! // // See https://www.magnumdb.com/ for interface GUID's. // See https://docs.microsoft.com/en-us/windows/win32/shell/handlers for // required interfaces. com::interfaces! { // NOTE: class! macro generates IClassFactory interface automatically, // so we must directly inherit from IUnknown. #[uuid("81521ebe-a2d4-450b-9bf8-5c23ed8730d0")] pub unsafe interface IHandler : com::interfaces::IUnknown {} #[uuid("0000010b-0000-0000-c000-000000000046")] pub unsafe interface IPersistFile : IPersist { fn IsDirty(&self) -> HRESULT; fn Load( &self, pszFileName: wtypesbase::LPCOLESTR, dwMode: win::DWORD, ) -> HRESULT; fn Save( &self, pszFileName: wtypesbase::LPCOLESTR, fRemember: win::BOOL, ) -> HRESULT; fn SaveCompleted( &self, pszFileName: wtypesbase::LPCOLESTR, ) -> HRESULT; fn GetCurFile( &self, ppszFileName: *mut wtypesbase::LPOLESTR, ) -> HRESULT; } #[uuid("0000010c-0000-0000-c000-000000000046")] pub unsafe interface IPersist : com::interfaces::IUnknown { fn GetClassID( &self, pClassID: *mut guiddef::CLSID, ) -> HRESULT; } #[uuid("00000122-0000-0000-c000-000000000046")] pub unsafe interface IDropTarget: com::interfaces::IUnknown { fn DragEnter( &self, pDataObj: *const objidl::IDataObject, grfKeyState: win::DWORD, pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT; fn DragOver( &self, grfKeyState: win::DWORD, pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT; fn DragLeave(&self) -> HRESULT; fn Drop( &self, pDataObj: *const objidl::IDataObject, grfKeyState: win::DWORD, pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT; } } com::class! { pub class Handler: IHandler, IPersistFile(IPersist), IDropTarget { // File that is receiving the drop. target: RefCell<PathBuf> } impl IHandler for Handler { } // See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nn-objidl-ipersistfile impl IPersistFile for Handler { /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-isdirty fn IsDirty(&self) -> HRESULT { log::debug!("IPersistFile::IsDirty"); winerror::S_FALSE } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-load fn Load( &self, pszFileName: wtypesbase::LPCOLESTR, _dwMode: win::DWORD, ) -> HRESULT { // path to the file that is being dragged over, ie. the registered script file let filename = unsafe { WideCStr::from_ptr_str(pszFileName) }; let path = PathBuf::from(filename.to_os_string()); log::debug!("IPersistFile::Load {}", path.to_string_lossy()); if let Ok(mut target) = self.target.try_borrow_mut() { *target = path; } else { return winerror::E_FAIL; } winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-save fn Save( &self, _pszFileName: wtypesbase::LPCOLESTR, _fRemember: win::BOOL, ) -> HRESULT { log::debug!("IPersistFile::Save"); winerror::S_FALSE } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-savecompleted fn SaveCompleted( &self, _pszFileName: wtypesbase::LPCOLESTR, ) -> HRESULT { log::debug!("IPersistFile::SaveCompleted"); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-getcurfile fn GetCurFile( &self, _ppszFileName: *mut wtypesbase::LPOLESTR, ) -> HRESULT { log::debug!("IPersistFile::GetCurFile"); winerror::E_FAIL } } // See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nn-objidl-ipersist impl IPersist for Handler { /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersist-getclassid fn GetClassID( &self, pClassID: *mut guiddef::CLSID, ) -> HRESULT { log::debug!("IPersist::GetClassID"); let guid = wslscript_common::DROP_HANDLER_CLSID.0; unsafe { *pClassID = guid } winerror::S_OK } } // See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nn-oleidl-idroptarget impl IDropTarget for Handler { /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-dragenter fn DragEnter( &self, _pDataObj: *const objidl::IDataObject, _grfKeyState: win::DWORD, _pt: *const windef::POINTL, _pdwEffect: *mut win::DWORD, ) -> HRESULT { log::debug!("IDropTarget::DragEnter"); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-dragover fn DragOver( &self, _grfKeyState: win::DWORD, _pt: *const windef::POINTL, _pdwEffect: *mut win::DWORD, ) -> HRESULT { log::debug!("IDropTarget::DragOver"); log::debug!("Keys {:?}", KeyState::from_bits_truncate(_grfKeyState)); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-dragleave fn DragLeave(&self) -> HRESULT { log::debug!("IDropTarget::DragLeave"); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-drop fn Drop( &self, pDataObj: *const objidl::IDataObject, grfKeyState: win::DWORD, _pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT { log::debug!("IDropTarget::Drop"); let target = if let Ok(target) = self.target.try_borrow() { target.clone() } else { return winerror::E_UNEXPECTED; }; let obj = unsafe { &*pDataObj }; let keys = KeyState::from_bits_truncate(grfKeyState); super::handle_dropped_files(&target, obj, keys).and_then(|_| { unsafe { *pdwEffect = oleidl::DROPEFFECT_COPY; } Ok(winerror::S_OK) }).unwrap_or_else(|e| { log::debug!("Drop failed: {}", e); winerror::E_UNEXPECTED }) } } }
{ match wslscript_common::registry::remove_server_from_registry() { Ok(_) => (), Err(e) => { log::error!("Failed to unregister server: {}", e); return winerror::E_UNEXPECTED; } } winerror::S_OK }
identifier_body
interface.rs
//! All the nitty gritty details regarding COM interface for the shell extension //! are defined here. //! //! See: https://docs.microsoft.com/en-us/windows/win32/shell/handlers#implementing-shell-extension-handlers use com::sys::HRESULT; use guid_win::Guid; use once_cell::sync::Lazy; use std::cell::RefCell; use std::path::PathBuf; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, Ordering}; use widestring::WideCStr; use winapi::shared::guiddef; use winapi::shared::minwindef as win; use winapi::shared::windef; use winapi::shared::winerror; use winapi::shared::wtypesbase; use winapi::um::objidl; use winapi::um::oleidl; use winapi::um::winnt; use winapi::um::winuser; use wslscript_common::error::*; use crate::progress::ProgressWindow; /// IClassFactory GUID. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/unknwn/nn-unknwn-iclassfactory /// /// Windows requests this interface via `DllGetClassObject` to further query /// relevant COM interfaces. _com-rs_ crate implements IClassFactory automatically /// for all interfaces (?), so we don't need to worry about details. static CLASS_FACTORY_CLSID: Lazy<Guid> = Lazy::new(|| Guid::from_str("00000001-0000-0000-c000-000000000046").unwrap()); /// Semaphore to keep track of running WSL threads. /// /// DLL shall not be released if there are threads running. pub(crate) static THREAD_COUNTER: AtomicUsize = AtomicUsize::new(0); /// Handle to loaded DLL module. static mut DLL_HANDLE: win::HINSTANCE = std::ptr::null_mut(); /// DLL module entry point. /// /// See: https://docs.microsoft.com/en-us/windows/win32/dlls/dllmain #[no_mangle] extern "system" fn DllMain( hinstance: win::HINSTANCE, reason: win::DWORD, _reserved: win::LPVOID, ) -> win::BOOL { match reason { winnt::DLL_PROCESS_ATTACH => { // store module instance to global variable unsafe { DLL_HANDLE = hinstance }; // set up logging #[cfg(feature = "debug")] if let Ok(mut path) = get_module_path(hinstance) { let stem = path.file_stem().map_or_else( || "debug.log".to_string(), |s| s.to_string_lossy().into_owned(), ); path.pop(); path.push(format!("{}.log", stem)); if simple_logging::log_to_file(&path, log::LevelFilter::Debug).is_err() { unsafe { use winapi::um::winuser::*; let text = wslscript_common::wcstring(format!( "Failed to set up logging to {}", path.to_string_lossy() )); MessageBoxW( std::ptr::null_mut(), text.as_ptr(), wchar::wchz!("Error").as_ptr(), MB_OK | MB_ICONERROR | MB_SERVICE_NOTIFICATION, ); } } } log::debug!("DLL_PROCESS_ATTACH"); return win::TRUE; } winnt::DLL_PROCESS_DETACH => { log::debug!("DLL_PROCESS_DETACH"); ProgressWindow::unregister_window_class(); } winnt::DLL_THREAD_ATTACH => {} winnt::DLL_THREAD_DETACH => {} _ => {} } win::FALSE } /// Called to check whether DLL can be unloaded from memory. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-dllcanunloadnow #[no_mangle] extern "system" fn DllCanUnloadNow() -> HRESULT { let n = THREAD_COUNTER.load(Ordering::SeqCst); if n > 0 { log::info!("{} WSL threads running, denying DLL unload", n); winerror::S_FALSE } else { log::info!("Permitting DLL unload"); winerror::S_OK } } /// Exposes class factory. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-dllgetclassobject #[no_mangle] extern "system" fn DllGetClassObject( class_id: guiddef::REFCLSID, iid: guiddef::REFIID, result: *mut win::LPVOID, ) -> HRESULT { let class_guid = guid_from_ref(class_id); let interface_guid = guid_from_ref(iid); // expect our registered class ID if wslscript_common::DROP_HANDLER_CLSID.eq(&class_guid) { // expect IClassFactory interface to be requested if!CLASS_FACTORY_CLSID.eq(&interface_guid) { log::warn!("Expected IClassFactory, got {}", interface_guid); } use com::production::Class as COMClass; let cls = <Handler as COMClass>::Factory::allocate(); let rv = unsafe { cls.QueryInterface(iid as _, result as _) }; log::debug!( "QueryInterface for {} returned {}, address={:p}", interface_guid, rv, result ); return rv; } else { log::warn!("Unsupported class: {}", class_guid); } winerror::CLASS_E_CLASSNOTAVAILABLE } /// Add in-process server keys into registry. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/olectl/nf-olectl-dllregisterserver #[no_mangle] extern "system" fn
() -> HRESULT { let hinstance = unsafe { DLL_HANDLE }; let path = match get_module_path(hinstance) { Ok(p) => p, Err(_) => return winerror::E_UNEXPECTED, }; log::debug!("DllRegisterServer for {}", path.to_string_lossy()); match wslscript_common::registry::add_server_to_registry(&path) { Ok(_) => (), Err(e) => { log::error!("Failed to register server: {}", e); return winerror::E_UNEXPECTED; } } winerror::S_OK } /// Remove in-process server keys from registry. /// /// See: https://docs.microsoft.com/en-us/windows/win32/api/olectl/nf-olectl-dllunregisterserver #[no_mangle] extern "system" fn DllUnregisterServer() -> HRESULT { match wslscript_common::registry::remove_server_from_registry() { Ok(_) => (), Err(e) => { log::error!("Failed to unregister server: {}", e); return winerror::E_UNEXPECTED; } } winerror::S_OK } /// Convert Win32 GUID pointer to Guid struct. const fn guid_from_ref(clsid: *const guiddef::GUID) -> Guid { Guid { 0: unsafe { *clsid }, } } /// Get path to loaded DLL file. fn get_module_path(hinstance: win::HINSTANCE) -> Result<PathBuf, Error> { use std::ffi::OsString; use std::os::windows::ffi::OsStringExt; use winapi::shared::ntdef; use winapi::um::libloaderapi::GetModuleFileNameW as GetModuleFileName; let mut buf: Vec<ntdef::WCHAR> = Vec::with_capacity(win::MAX_PATH); let len = unsafe { GetModuleFileName(hinstance, buf.as_mut_ptr(), buf.capacity() as _) }; if len == 0 { return Err(wslscript_common::win32::last_error()); } unsafe { buf.set_len(len as _) }; Ok(PathBuf::from(OsString::from_wide(&buf))) } bitflags::bitflags! { /// Key state flags. #[derive(Debug)] pub struct KeyState: win::DWORD { const MK_CONTROL = winuser::MK_CONTROL as win::DWORD; const MK_SHIFT = winuser::MK_SHIFT as win::DWORD; const MK_ALT = oleidl::MK_ALT as win::DWORD; const MK_LBUTTON = winuser::MK_LBUTTON as win::DWORD; const MK_MBUTTON = winuser::MK_MBUTTON as win::DWORD; const MK_RBUTTON = winuser::MK_RBUTTON as win::DWORD; } } // COM interface declarations. // // Note that methods must be in exact order! // // See https://www.magnumdb.com/ for interface GUID's. // See https://docs.microsoft.com/en-us/windows/win32/shell/handlers for // required interfaces. com::interfaces! { // NOTE: class! macro generates IClassFactory interface automatically, // so we must directly inherit from IUnknown. #[uuid("81521ebe-a2d4-450b-9bf8-5c23ed8730d0")] pub unsafe interface IHandler : com::interfaces::IUnknown {} #[uuid("0000010b-0000-0000-c000-000000000046")] pub unsafe interface IPersistFile : IPersist { fn IsDirty(&self) -> HRESULT; fn Load( &self, pszFileName: wtypesbase::LPCOLESTR, dwMode: win::DWORD, ) -> HRESULT; fn Save( &self, pszFileName: wtypesbase::LPCOLESTR, fRemember: win::BOOL, ) -> HRESULT; fn SaveCompleted( &self, pszFileName: wtypesbase::LPCOLESTR, ) -> HRESULT; fn GetCurFile( &self, ppszFileName: *mut wtypesbase::LPOLESTR, ) -> HRESULT; } #[uuid("0000010c-0000-0000-c000-000000000046")] pub unsafe interface IPersist : com::interfaces::IUnknown { fn GetClassID( &self, pClassID: *mut guiddef::CLSID, ) -> HRESULT; } #[uuid("00000122-0000-0000-c000-000000000046")] pub unsafe interface IDropTarget: com::interfaces::IUnknown { fn DragEnter( &self, pDataObj: *const objidl::IDataObject, grfKeyState: win::DWORD, pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT; fn DragOver( &self, grfKeyState: win::DWORD, pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT; fn DragLeave(&self) -> HRESULT; fn Drop( &self, pDataObj: *const objidl::IDataObject, grfKeyState: win::DWORD, pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT; } } com::class! { pub class Handler: IHandler, IPersistFile(IPersist), IDropTarget { // File that is receiving the drop. target: RefCell<PathBuf> } impl IHandler for Handler { } // See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nn-objidl-ipersistfile impl IPersistFile for Handler { /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-isdirty fn IsDirty(&self) -> HRESULT { log::debug!("IPersistFile::IsDirty"); winerror::S_FALSE } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-load fn Load( &self, pszFileName: wtypesbase::LPCOLESTR, _dwMode: win::DWORD, ) -> HRESULT { // path to the file that is being dragged over, ie. the registered script file let filename = unsafe { WideCStr::from_ptr_str(pszFileName) }; let path = PathBuf::from(filename.to_os_string()); log::debug!("IPersistFile::Load {}", path.to_string_lossy()); if let Ok(mut target) = self.target.try_borrow_mut() { *target = path; } else { return winerror::E_FAIL; } winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-save fn Save( &self, _pszFileName: wtypesbase::LPCOLESTR, _fRemember: win::BOOL, ) -> HRESULT { log::debug!("IPersistFile::Save"); winerror::S_FALSE } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-savecompleted fn SaveCompleted( &self, _pszFileName: wtypesbase::LPCOLESTR, ) -> HRESULT { log::debug!("IPersistFile::SaveCompleted"); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersistfile-getcurfile fn GetCurFile( &self, _ppszFileName: *mut wtypesbase::LPOLESTR, ) -> HRESULT { log::debug!("IPersistFile::GetCurFile"); winerror::E_FAIL } } // See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nn-objidl-ipersist impl IPersist for Handler { /// See: https://docs.microsoft.com/en-us/windows/win32/api/objidl/nf-objidl-ipersist-getclassid fn GetClassID( &self, pClassID: *mut guiddef::CLSID, ) -> HRESULT { log::debug!("IPersist::GetClassID"); let guid = wslscript_common::DROP_HANDLER_CLSID.0; unsafe { *pClassID = guid } winerror::S_OK } } // See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nn-oleidl-idroptarget impl IDropTarget for Handler { /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-dragenter fn DragEnter( &self, _pDataObj: *const objidl::IDataObject, _grfKeyState: win::DWORD, _pt: *const windef::POINTL, _pdwEffect: *mut win::DWORD, ) -> HRESULT { log::debug!("IDropTarget::DragEnter"); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-dragover fn DragOver( &self, _grfKeyState: win::DWORD, _pt: *const windef::POINTL, _pdwEffect: *mut win::DWORD, ) -> HRESULT { log::debug!("IDropTarget::DragOver"); log::debug!("Keys {:?}", KeyState::from_bits_truncate(_grfKeyState)); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-dragleave fn DragLeave(&self) -> HRESULT { log::debug!("IDropTarget::DragLeave"); winerror::S_OK } /// See: https://docs.microsoft.com/en-us/windows/win32/api/oleidl/nf-oleidl-idroptarget-drop fn Drop( &self, pDataObj: *const objidl::IDataObject, grfKeyState: win::DWORD, _pt: *const windef::POINTL, pdwEffect: *mut win::DWORD, ) -> HRESULT { log::debug!("IDropTarget::Drop"); let target = if let Ok(target) = self.target.try_borrow() { target.clone() } else { return winerror::E_UNEXPECTED; }; let obj = unsafe { &*pDataObj }; let keys = KeyState::from_bits_truncate(grfKeyState); super::handle_dropped_files(&target, obj, keys).and_then(|_| { unsafe { *pdwEffect = oleidl::DROPEFFECT_COPY; } Ok(winerror::S_OK) }).unwrap_or_else(|e| { log::debug!("Drop failed: {}", e); winerror::E_UNEXPECTED }) } } }
DllRegisterServer
identifier_name
server.rs
use super::router::HttpRouter; use super::ProbeRegistration; use futures::future::BoxFuture; use futures::future::FusedFuture; use futures::future::FutureExt; use futures::lock::Mutex; use hyper::server::{ conn::{AddrIncoming, AddrStream}, Server, }; use hyper::service::Service; use hyper::Body; use hyper::Request; use hyper::Response; use std::future::Future; use std::net::SocketAddr; use std::num::NonZeroU32; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use uuid::Uuid; use slog::Logger; /* TODO Replace this with something else? */ type GenericError = Box<dyn std::error::Error + Send + Sync>; /** * Endpoint-accessible context associated with a server. * * Automatically implemented for all Send + Sync types. */ pub trait ServerContext: Send + Sync +'static {} impl<T:'static> ServerContext for T where T: Send + Sync {} /** * Stores shared state used by the Dropshot server. */ pub struct DropshotState<C: ServerContext> { /** caller-specific state */ pub private: C, /** static server configuration parameters */ pub config: ServerConfig, /** request router */ pub router: HttpRouter<C>, /** server-wide log handle */ pub log: Logger, /** bound local address for the server. */ pub local_addr: SocketAddr, } /** * Stores static configuration associated with the server * TODO-cleanup merge with ConfigDropshot */ pub struct ServerConfig { /** maximum allowed size of a request body */ pub request_body_max_bytes: usize, /** maximum size of any page of results */ pub page_max_nitems: NonZeroU32, /** default size for a page of results */ pub page_default_nitems: NonZeroU32, } /** * A thin wrapper around a Hyper Server object that exposes some interfaces that * we find useful. */ pub struct HttpServerStarter<C: ServerContext> { app_state: Arc<DropshotState<C>>, server: Server<AddrIncoming, ServerConnectionHandler<C>>, local_addr: SocketAddr, } impl<C: ServerContext> HttpServerStarter<C> { pub fn local_addr(&self) -> SocketAddr { self.local_addr } /** * Begins execution of the underlying Http server. */ pub fn start(self) -> HttpServer<C> { let (tx, rx) = tokio::sync::oneshot::channel::<()>(); let log_close = self.app_state.log.new(o!()); let graceful = self.server.with_graceful_shutdown(async move { rx.await.expect( "dropshot server shutting down without invoking close()", ); info!(log_close, "received request to begin graceful shutdown"); }); let join_handle = tokio::spawn(async { graceful.await }); let probe_registration = if cfg!(feature = "usdt-probes") { match usdt::register_probes() { Ok(_) => { debug!( self.app_state.log, "successfully registered DTrace USDT probes" ); ProbeRegistration::Succeeded } Err(e) => { let msg = e.to_string(); error!( self.app_state.log, "failed to register DTrace USDT probes: {}", msg ); ProbeRegistration::Failed(msg) } } } else { debug!( self.app_state.log, "DTrace USDT probes compiled out, not registering" ); ProbeRegistration::Disabled }; HttpServer { probe_registration, app_state: self.app_state, local_addr: self.local_addr, join_handle: Some(join_handle), close_channel: Some(tx), } } /** * Set up an HTTP server bound on the specified address that runs registered * handlers. You must invoke `start()` on the returned instance of * `HttpServerStarter` (and await the result) to actually start the server. * * TODO-cleanup We should be able to take a reference to the ApiDescription. * We currently can't because we need to hang onto the router. */ pub fn new( config: &ConfigDropshot, api: ApiDescription<C>, private: C, log: &Logger, ) -> Result<HttpServerStarter<C>, hyper::Error> { let incoming = AddrIncoming::bind(&config.bind_address)?; let local_addr = incoming.local_addr(); /* TODO-cleanup too many Arcs? */ let app_state = Arc::new(DropshotState { private, config: ServerConfig { /* We start aggressively to ensure test coverage. */ request_body_max_bytes: config.request_body_max_bytes, page_max_nitems: NonZeroU32::new(10000).unwrap(), page_default_nitems: NonZeroU32::new(100).unwrap(), }, router: api.into_router(), log: log.new(o!("local_addr" => local_addr)), local_addr, }); for (path, method, _) in &app_state.router { debug!(app_state.log, "registered endpoint"; "method" => &method, "path" => &path ); } let make_service = ServerConnectionHandler::new(Arc::clone(&app_state)); let builder = hyper::Server::builder(incoming); let server = builder.serve(make_service); info!(app_state.log, "listening"); Ok(HttpServerStarter { app_state, server, local_addr, }) } pub fn app_private(&self) -> &C { &self.app_state.private } } /** * A running Dropshot HTTP server. * * # Panics * * Panics if dropped without invoking `close`. */ pub struct HttpServer<C: ServerContext> { probe_registration: ProbeRegistration, app_state: Arc<DropshotState<C>>, local_addr: SocketAddr, join_handle: Option<tokio::task::JoinHandle<Result<(), hyper::Error>>>, close_channel: Option<tokio::sync::oneshot::Sender<()>>, } impl<C: ServerContext> HttpServer<C> { pub fn local_addr(&self) -> SocketAddr { self.local_addr } pub fn app_private(&self) -> &C { &self.app_state.private } /** * Signals the currently running server to stop and waits for it to exit. */ pub async fn close(mut self) -> Result<(), String> { self.close_channel .take() .expect("cannot close twice") .send(()) .expect("failed to send close signal"); if let Some(handle) = self.join_handle.take() { handle .await .map_err(|error| format!("waiting for server: {}", error))? .map_err(|error| format!("server stopped: {}", error)) } else
} /** * Return the result of registering the server's DTrace USDT probes. * * See [`ProbeRegistration`] for details. */ pub fn probe_registration(&self) -> &ProbeRegistration { &self.probe_registration } } /* * For graceful termination, the `close()` function is preferred, as it can * report errors and wait for termination to complete. However, we impl * `Drop` to attempt to shut down the server to handle less clean shutdowns * (e.g., from failing tests). */ impl<C: ServerContext> Drop for HttpServer<C> { fn drop(&mut self) { if let Some(c) = self.close_channel.take() { c.send(()).expect("failed to send close signal") } } } impl<C: ServerContext> Future for HttpServer<C> { type Output = Result<(), String>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let server = Pin::into_inner(self); let mut handle = server .join_handle .take() .expect("polling a server future which has already completed"); let poll = handle.poll_unpin(cx).map(|result| { result .map_err(|error| format!("waiting for server: {}", error))? .map_err(|error| format!("server stopped: {}", error)) }); if poll.is_pending() { server.join_handle.replace(handle); } return poll; } } impl<C: ServerContext> FusedFuture for HttpServer<C> { fn is_terminated(&self) -> bool { self.join_handle.is_none() } } /** * Initial entry point for handling a new connection to the HTTP server. * This is invoked by Hyper when a new connection is accepted. This function * must return a Hyper Service object that will handle requests for this * connection. */ async fn http_connection_handle<C: ServerContext>( server: Arc<DropshotState<C>>, remote_addr: SocketAddr, ) -> Result<ServerRequestHandler<C>, GenericError> { info!(server.log, "accepted connection"; "remote_addr" => %remote_addr); Ok(ServerRequestHandler::new(server, remote_addr)) } /** * Initial entry point for handling a new request to the HTTP server. This is * invoked by Hyper when a new request is received. This function returns a * Result that either represents a valid HTTP response or an error (which will * also get turned into an HTTP response). */ async fn http_request_handle_wrap<C: ServerContext>( server: Arc<DropshotState<C>>, remote_addr: SocketAddr, request: Request<Body>, ) -> Result<Response<Body>, GenericError> { /* * This extra level of indirection makes error handling much more * straightforward, since the request handling code can simply return early * with an error and we'll treat it like an error from any of the endpoints * themselves. */ let request_id = generate_request_id(); let request_log = server.log.new(o!( "remote_addr" => remote_addr, "req_id" => request_id.clone(), "method" => request.method().as_str().to_string(), "uri" => format!("{}", request.uri()), )); trace!(request_log, "incoming request"); probes::request_start!(|| { let uri = request.uri(); crate::RequestInfo { id: request_id.clone(), local_addr: server.local_addr, remote_addr, method: request.method().to_string(), path: uri.path().to_string(), query: uri.query().map(|x| x.to_string()), } }); // Copy local address to report later during the finish probe, as the // server is passed by value to the request handler function. let local_addr = server.local_addr; let maybe_response = http_request_handle( server, request, &request_id, request_log.new(o!()), ) .await; let response = match maybe_response { Err(error) => { let message_external = error.external_message.clone(); let message_internal = error.internal_message.clone(); let r = error.into_response(&request_id); probes::request_finish!(|| { crate::ResponseInfo { id: request_id.clone(), local_addr, remote_addr, status_code: r.status().as_u16(), message: message_external.clone(), } }); /* TODO-debug: add request and response headers here */ info!(request_log, "request completed"; "response_code" => r.status().as_str().to_string(), "error_message_internal" => message_internal, "error_message_external" => message_external, ); r } Ok(response) => { /* TODO-debug: add request and response headers here */ info!(request_log, "request completed"; "response_code" => response.status().as_str().to_string() ); probes::request_finish!(|| { crate::ResponseInfo { id: request_id.parse().unwrap(), local_addr, remote_addr, status_code: response.status().as_u16(), message: "".to_string(), } }); response } }; Ok(response) } async fn http_request_handle<C: ServerContext>( server: Arc<DropshotState<C>>, request: Request<Body>, request_id: &str, request_log: Logger, ) -> Result<Response<Body>, HttpError> { /* * TODO-hardening: is it correct to (and do we correctly) read the entire * request body even if we decide it's too large and are going to send a 400 * response? * TODO-hardening: add a request read timeout as well so that we don't allow * this to take forever. * TODO-correctness: Do we need to dump the body on errors? */ let method = request.method(); let uri = request.uri(); let lookup_result = server.router.lookup_route(&method, uri.path().into())?; let rqctx = RequestContext { server: Arc::clone(&server), request: Arc::new(Mutex::new(request)), path_variables: lookup_result.variables, request_id: request_id.to_string(), log: request_log, }; let mut response = lookup_result.handler.handle_request(rqctx).await?; response.headers_mut().insert( HEADER_REQUEST_ID, http::header::HeaderValue::from_str(&request_id).unwrap(), ); Ok(response) } /* * This function should probably be parametrized by some name of the service * that is expected to be unique within an organization. That way, it would be * possible to determine from a given request id which service it was from. * TODO should we encode more information here? Service? Instance? Time up to * the hour? */ fn generate_request_id() -> String { format!("{}", Uuid::new_v4()) } /** * ServerConnectionHandler is a Hyper Service implementation that forwards * incoming connections to `http_connection_handle()`, providing the server * state object as an additional argument. We could use `make_service_fn` here * using a closure to capture the state object, but the resulting code is a bit * simpler without it. */ pub struct ServerConnectionHandler<C: ServerContext> { /** backend state that will be made available to the connection handler */ server: Arc<DropshotState<C>>, } impl<C: ServerContext> ServerConnectionHandler<C> { /** * Create an ServerConnectionHandler with the given state object that * will be made available to the handler. */ fn new(server: Arc<DropshotState<C>>) -> Self { ServerConnectionHandler { server, } } } impl<T: ServerContext> Service<&AddrStream> for ServerConnectionHandler<T> { /* * Recall that a Service in this context is just something that takes a * request (which could be anything) and produces a response (which could be * anything). This being a connection handler, the request type is an * AddrStream (which wraps a TCP connection) and the response type is * another Service: one that accepts HTTP requests and produces HTTP * responses. */ type Response = ServerRequestHandler<T>; type Error = GenericError; type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>; fn poll_ready( &mut self, _cx: &mut Context<'_>, ) -> Poll<Result<(), Self::Error>> { // TODO is this right? Poll::Ready(Ok(())) } fn call(&mut self, conn: &AddrStream) -> Self::Future { /* * We're given a borrowed reference to the AddrStream, but our interface * is async (which is good, so that we can support time-consuming * operations as part of receiving requests). To avoid having to ensure * that conn's lifetime exceeds that of this async operation, we simply * copy the only useful information out of the conn: the SocketAddr. We * may want to create our own connection type to encapsulate the socket * address and any other per-connection state that we want to keep. */ let server = Arc::clone(&self.server); let remote_addr = conn.remote_addr(); Box::pin(http_connection_handle(server, remote_addr)) } } /** * ServerRequestHandler is a Hyper Service implementation that forwards * incoming requests to `http_request_handle_wrap()`, including as an argument * the backend server state object. We could use `service_fn` here using a * closure to capture the server state object, but the resulting code is a bit * simpler without all that. */ pub struct ServerRequestHandler<C: ServerContext> { /** backend state that will be made available to the request handler */ server: Arc<DropshotState<C>>, remote_addr: SocketAddr, } impl<C: ServerContext> ServerRequestHandler<C> { /** * Create a ServerRequestHandler object with the given state object that * will be provided to the handler function. */ fn new(server: Arc<DropshotState<C>>, remote_addr: SocketAddr) -> Self { ServerRequestHandler { server, remote_addr, } } } impl<C: ServerContext> Service<Request<Body>> for ServerRequestHandler<C> { type Response = Response<Body>; type Error = GenericError; type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>; fn poll_ready( &mut self, _cx: &mut Context<'_>, ) -> Poll<Result<(), Self::Error>> { // TODO is this right? Poll::Ready(Ok(())) } fn call(&mut self, req: Request<Body>) -> Self::Future { Box::pin(http_request_handle_wrap( Arc::clone(&self.server), self.remote_addr, req, )) } } #[cfg(test)] mod test { use super::*; // Referring to the current crate as "dropshot::" instead of "crate::" // helps the endpoint macro with module lookup. use crate as dropshot; use dropshot::endpoint; use dropshot::test_util::ClientTestContext; use dropshot::test_util::LogContext; use dropshot::ConfigLogging; use dropshot::ConfigLoggingLevel; use dropshot::HttpError; use dropshot::HttpResponseOk; use dropshot::RequestContext; use http::StatusCode; use hyper::Method; use futures::future::FusedFuture; #[endpoint { method = GET, path = "/handler", }] async fn handler( _rqctx: Arc<RequestContext<i32>>, ) -> Result<HttpResponseOk<u64>, HttpError> { Ok(HttpResponseOk(3)) } struct TestConfig { log_context: LogContext, } impl TestConfig { fn log(&self) -> &slog::Logger { &self.log_context.log } } fn create_test_server() -> (HttpServer<i32>, TestConfig) { let config_dropshot = ConfigDropshot::default(); let mut api = ApiDescription::new(); api.register(handler).unwrap(); let config_logging = ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Warn, }; let log_context = LogContext::new("test server", &config_logging); let log = &log_context.log; let server = HttpServerStarter::new(&config_dropshot, api, 0, log) .unwrap() .start(); (server, TestConfig { log_context, }) } async fn single_client_request(addr: SocketAddr, log: &slog::Logger) { let client_log = log.new(o!("http_client" => "dropshot test suite")); let client_testctx = ClientTestContext::new(addr, client_log); tokio::task::spawn(async move { let response = client_testctx .make_request( Method::GET, "/handler", None as Option<()>, StatusCode::OK, ) .await; assert!(response.is_ok()); }) .await .expect("client request failed"); } #[tokio::test] async fn test_server_run_then_close() { let (mut server, config) = create_test_server(); let client = single_client_request(server.local_addr, config.log());
{ Ok(()) }
conditional_block
server.rs
use super::router::HttpRouter; use super::ProbeRegistration; use futures::future::BoxFuture; use futures::future::FusedFuture; use futures::future::FutureExt; use futures::lock::Mutex; use hyper::server::{ conn::{AddrIncoming, AddrStream}, Server, }; use hyper::service::Service; use hyper::Body; use hyper::Request; use hyper::Response; use std::future::Future; use std::net::SocketAddr; use std::num::NonZeroU32; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use uuid::Uuid; use slog::Logger; /* TODO Replace this with something else? */ type GenericError = Box<dyn std::error::Error + Send + Sync>; /** * Endpoint-accessible context associated with a server. * * Automatically implemented for all Send + Sync types. */ pub trait ServerContext: Send + Sync +'static {} impl<T:'static> ServerContext for T where T: Send + Sync {} /** * Stores shared state used by the Dropshot server. */ pub struct DropshotState<C: ServerContext> { /** caller-specific state */ pub private: C, /** static server configuration parameters */ pub config: ServerConfig, /** request router */ pub router: HttpRouter<C>, /** server-wide log handle */ pub log: Logger, /** bound local address for the server. */ pub local_addr: SocketAddr, } /** * Stores static configuration associated with the server * TODO-cleanup merge with ConfigDropshot */ pub struct ServerConfig { /** maximum allowed size of a request body */ pub request_body_max_bytes: usize, /** maximum size of any page of results */ pub page_max_nitems: NonZeroU32, /** default size for a page of results */ pub page_default_nitems: NonZeroU32, } /** * A thin wrapper around a Hyper Server object that exposes some interfaces that * we find useful. */ pub struct HttpServerStarter<C: ServerContext> { app_state: Arc<DropshotState<C>>, server: Server<AddrIncoming, ServerConnectionHandler<C>>, local_addr: SocketAddr, } impl<C: ServerContext> HttpServerStarter<C> { pub fn local_addr(&self) -> SocketAddr { self.local_addr } /** * Begins execution of the underlying Http server. */ pub fn start(self) -> HttpServer<C> { let (tx, rx) = tokio::sync::oneshot::channel::<()>(); let log_close = self.app_state.log.new(o!()); let graceful = self.server.with_graceful_shutdown(async move { rx.await.expect( "dropshot server shutting down without invoking close()", ); info!(log_close, "received request to begin graceful shutdown"); }); let join_handle = tokio::spawn(async { graceful.await }); let probe_registration = if cfg!(feature = "usdt-probes") { match usdt::register_probes() { Ok(_) => { debug!( self.app_state.log, "successfully registered DTrace USDT probes" ); ProbeRegistration::Succeeded
self.app_state.log, "failed to register DTrace USDT probes: {}", msg ); ProbeRegistration::Failed(msg) } } } else { debug!( self.app_state.log, "DTrace USDT probes compiled out, not registering" ); ProbeRegistration::Disabled }; HttpServer { probe_registration, app_state: self.app_state, local_addr: self.local_addr, join_handle: Some(join_handle), close_channel: Some(tx), } } /** * Set up an HTTP server bound on the specified address that runs registered * handlers. You must invoke `start()` on the returned instance of * `HttpServerStarter` (and await the result) to actually start the server. * * TODO-cleanup We should be able to take a reference to the ApiDescription. * We currently can't because we need to hang onto the router. */ pub fn new( config: &ConfigDropshot, api: ApiDescription<C>, private: C, log: &Logger, ) -> Result<HttpServerStarter<C>, hyper::Error> { let incoming = AddrIncoming::bind(&config.bind_address)?; let local_addr = incoming.local_addr(); /* TODO-cleanup too many Arcs? */ let app_state = Arc::new(DropshotState { private, config: ServerConfig { /* We start aggressively to ensure test coverage. */ request_body_max_bytes: config.request_body_max_bytes, page_max_nitems: NonZeroU32::new(10000).unwrap(), page_default_nitems: NonZeroU32::new(100).unwrap(), }, router: api.into_router(), log: log.new(o!("local_addr" => local_addr)), local_addr, }); for (path, method, _) in &app_state.router { debug!(app_state.log, "registered endpoint"; "method" => &method, "path" => &path ); } let make_service = ServerConnectionHandler::new(Arc::clone(&app_state)); let builder = hyper::Server::builder(incoming); let server = builder.serve(make_service); info!(app_state.log, "listening"); Ok(HttpServerStarter { app_state, server, local_addr, }) } pub fn app_private(&self) -> &C { &self.app_state.private } } /** * A running Dropshot HTTP server. * * # Panics * * Panics if dropped without invoking `close`. */ pub struct HttpServer<C: ServerContext> { probe_registration: ProbeRegistration, app_state: Arc<DropshotState<C>>, local_addr: SocketAddr, join_handle: Option<tokio::task::JoinHandle<Result<(), hyper::Error>>>, close_channel: Option<tokio::sync::oneshot::Sender<()>>, } impl<C: ServerContext> HttpServer<C> { pub fn local_addr(&self) -> SocketAddr { self.local_addr } pub fn app_private(&self) -> &C { &self.app_state.private } /** * Signals the currently running server to stop and waits for it to exit. */ pub async fn close(mut self) -> Result<(), String> { self.close_channel .take() .expect("cannot close twice") .send(()) .expect("failed to send close signal"); if let Some(handle) = self.join_handle.take() { handle .await .map_err(|error| format!("waiting for server: {}", error))? .map_err(|error| format!("server stopped: {}", error)) } else { Ok(()) } } /** * Return the result of registering the server's DTrace USDT probes. * * See [`ProbeRegistration`] for details. */ pub fn probe_registration(&self) -> &ProbeRegistration { &self.probe_registration } } /* * For graceful termination, the `close()` function is preferred, as it can * report errors and wait for termination to complete. However, we impl * `Drop` to attempt to shut down the server to handle less clean shutdowns * (e.g., from failing tests). */ impl<C: ServerContext> Drop for HttpServer<C> { fn drop(&mut self) { if let Some(c) = self.close_channel.take() { c.send(()).expect("failed to send close signal") } } } impl<C: ServerContext> Future for HttpServer<C> { type Output = Result<(), String>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let server = Pin::into_inner(self); let mut handle = server .join_handle .take() .expect("polling a server future which has already completed"); let poll = handle.poll_unpin(cx).map(|result| { result .map_err(|error| format!("waiting for server: {}", error))? .map_err(|error| format!("server stopped: {}", error)) }); if poll.is_pending() { server.join_handle.replace(handle); } return poll; } } impl<C: ServerContext> FusedFuture for HttpServer<C> { fn is_terminated(&self) -> bool { self.join_handle.is_none() } } /** * Initial entry point for handling a new connection to the HTTP server. * This is invoked by Hyper when a new connection is accepted. This function * must return a Hyper Service object that will handle requests for this * connection. */ async fn http_connection_handle<C: ServerContext>( server: Arc<DropshotState<C>>, remote_addr: SocketAddr, ) -> Result<ServerRequestHandler<C>, GenericError> { info!(server.log, "accepted connection"; "remote_addr" => %remote_addr); Ok(ServerRequestHandler::new(server, remote_addr)) } /** * Initial entry point for handling a new request to the HTTP server. This is * invoked by Hyper when a new request is received. This function returns a * Result that either represents a valid HTTP response or an error (which will * also get turned into an HTTP response). */ async fn http_request_handle_wrap<C: ServerContext>( server: Arc<DropshotState<C>>, remote_addr: SocketAddr, request: Request<Body>, ) -> Result<Response<Body>, GenericError> { /* * This extra level of indirection makes error handling much more * straightforward, since the request handling code can simply return early * with an error and we'll treat it like an error from any of the endpoints * themselves. */ let request_id = generate_request_id(); let request_log = server.log.new(o!( "remote_addr" => remote_addr, "req_id" => request_id.clone(), "method" => request.method().as_str().to_string(), "uri" => format!("{}", request.uri()), )); trace!(request_log, "incoming request"); probes::request_start!(|| { let uri = request.uri(); crate::RequestInfo { id: request_id.clone(), local_addr: server.local_addr, remote_addr, method: request.method().to_string(), path: uri.path().to_string(), query: uri.query().map(|x| x.to_string()), } }); // Copy local address to report later during the finish probe, as the // server is passed by value to the request handler function. let local_addr = server.local_addr; let maybe_response = http_request_handle( server, request, &request_id, request_log.new(o!()), ) .await; let response = match maybe_response { Err(error) => { let message_external = error.external_message.clone(); let message_internal = error.internal_message.clone(); let r = error.into_response(&request_id); probes::request_finish!(|| { crate::ResponseInfo { id: request_id.clone(), local_addr, remote_addr, status_code: r.status().as_u16(), message: message_external.clone(), } }); /* TODO-debug: add request and response headers here */ info!(request_log, "request completed"; "response_code" => r.status().as_str().to_string(), "error_message_internal" => message_internal, "error_message_external" => message_external, ); r } Ok(response) => { /* TODO-debug: add request and response headers here */ info!(request_log, "request completed"; "response_code" => response.status().as_str().to_string() ); probes::request_finish!(|| { crate::ResponseInfo { id: request_id.parse().unwrap(), local_addr, remote_addr, status_code: response.status().as_u16(), message: "".to_string(), } }); response } }; Ok(response) } async fn http_request_handle<C: ServerContext>( server: Arc<DropshotState<C>>, request: Request<Body>, request_id: &str, request_log: Logger, ) -> Result<Response<Body>, HttpError> { /* * TODO-hardening: is it correct to (and do we correctly) read the entire * request body even if we decide it's too large and are going to send a 400 * response? * TODO-hardening: add a request read timeout as well so that we don't allow * this to take forever. * TODO-correctness: Do we need to dump the body on errors? */ let method = request.method(); let uri = request.uri(); let lookup_result = server.router.lookup_route(&method, uri.path().into())?; let rqctx = RequestContext { server: Arc::clone(&server), request: Arc::new(Mutex::new(request)), path_variables: lookup_result.variables, request_id: request_id.to_string(), log: request_log, }; let mut response = lookup_result.handler.handle_request(rqctx).await?; response.headers_mut().insert( HEADER_REQUEST_ID, http::header::HeaderValue::from_str(&request_id).unwrap(), ); Ok(response) } /* * This function should probably be parametrized by some name of the service * that is expected to be unique within an organization. That way, it would be * possible to determine from a given request id which service it was from. * TODO should we encode more information here? Service? Instance? Time up to * the hour? */ fn generate_request_id() -> String { format!("{}", Uuid::new_v4()) } /** * ServerConnectionHandler is a Hyper Service implementation that forwards * incoming connections to `http_connection_handle()`, providing the server * state object as an additional argument. We could use `make_service_fn` here * using a closure to capture the state object, but the resulting code is a bit * simpler without it. */ pub struct ServerConnectionHandler<C: ServerContext> { /** backend state that will be made available to the connection handler */ server: Arc<DropshotState<C>>, } impl<C: ServerContext> ServerConnectionHandler<C> { /** * Create an ServerConnectionHandler with the given state object that * will be made available to the handler. */ fn new(server: Arc<DropshotState<C>>) -> Self { ServerConnectionHandler { server, } } } impl<T: ServerContext> Service<&AddrStream> for ServerConnectionHandler<T> { /* * Recall that a Service in this context is just something that takes a * request (which could be anything) and produces a response (which could be * anything). This being a connection handler, the request type is an * AddrStream (which wraps a TCP connection) and the response type is * another Service: one that accepts HTTP requests and produces HTTP * responses. */ type Response = ServerRequestHandler<T>; type Error = GenericError; type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>; fn poll_ready( &mut self, _cx: &mut Context<'_>, ) -> Poll<Result<(), Self::Error>> { // TODO is this right? Poll::Ready(Ok(())) } fn call(&mut self, conn: &AddrStream) -> Self::Future { /* * We're given a borrowed reference to the AddrStream, but our interface * is async (which is good, so that we can support time-consuming * operations as part of receiving requests). To avoid having to ensure * that conn's lifetime exceeds that of this async operation, we simply * copy the only useful information out of the conn: the SocketAddr. We * may want to create our own connection type to encapsulate the socket * address and any other per-connection state that we want to keep. */ let server = Arc::clone(&self.server); let remote_addr = conn.remote_addr(); Box::pin(http_connection_handle(server, remote_addr)) } } /** * ServerRequestHandler is a Hyper Service implementation that forwards * incoming requests to `http_request_handle_wrap()`, including as an argument * the backend server state object. We could use `service_fn` here using a * closure to capture the server state object, but the resulting code is a bit * simpler without all that. */ pub struct ServerRequestHandler<C: ServerContext> { /** backend state that will be made available to the request handler */ server: Arc<DropshotState<C>>, remote_addr: SocketAddr, } impl<C: ServerContext> ServerRequestHandler<C> { /** * Create a ServerRequestHandler object with the given state object that * will be provided to the handler function. */ fn new(server: Arc<DropshotState<C>>, remote_addr: SocketAddr) -> Self { ServerRequestHandler { server, remote_addr, } } } impl<C: ServerContext> Service<Request<Body>> for ServerRequestHandler<C> { type Response = Response<Body>; type Error = GenericError; type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>; fn poll_ready( &mut self, _cx: &mut Context<'_>, ) -> Poll<Result<(), Self::Error>> { // TODO is this right? Poll::Ready(Ok(())) } fn call(&mut self, req: Request<Body>) -> Self::Future { Box::pin(http_request_handle_wrap( Arc::clone(&self.server), self.remote_addr, req, )) } } #[cfg(test)] mod test { use super::*; // Referring to the current crate as "dropshot::" instead of "crate::" // helps the endpoint macro with module lookup. use crate as dropshot; use dropshot::endpoint; use dropshot::test_util::ClientTestContext; use dropshot::test_util::LogContext; use dropshot::ConfigLogging; use dropshot::ConfigLoggingLevel; use dropshot::HttpError; use dropshot::HttpResponseOk; use dropshot::RequestContext; use http::StatusCode; use hyper::Method; use futures::future::FusedFuture; #[endpoint { method = GET, path = "/handler", }] async fn handler( _rqctx: Arc<RequestContext<i32>>, ) -> Result<HttpResponseOk<u64>, HttpError> { Ok(HttpResponseOk(3)) } struct TestConfig { log_context: LogContext, } impl TestConfig { fn log(&self) -> &slog::Logger { &self.log_context.log } } fn create_test_server() -> (HttpServer<i32>, TestConfig) { let config_dropshot = ConfigDropshot::default(); let mut api = ApiDescription::new(); api.register(handler).unwrap(); let config_logging = ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Warn, }; let log_context = LogContext::new("test server", &config_logging); let log = &log_context.log; let server = HttpServerStarter::new(&config_dropshot, api, 0, log) .unwrap() .start(); (server, TestConfig { log_context, }) } async fn single_client_request(addr: SocketAddr, log: &slog::Logger) { let client_log = log.new(o!("http_client" => "dropshot test suite")); let client_testctx = ClientTestContext::new(addr, client_log); tokio::task::spawn(async move { let response = client_testctx .make_request( Method::GET, "/handler", None as Option<()>, StatusCode::OK, ) .await; assert!(response.is_ok()); }) .await .expect("client request failed"); } #[tokio::test] async fn test_server_run_then_close() { let (mut server, config) = create_test_server(); let client = single_client_request(server.local_addr, config.log());
} Err(e) => { let msg = e.to_string(); error!(
random_line_split
server.rs
use super::router::HttpRouter; use super::ProbeRegistration; use futures::future::BoxFuture; use futures::future::FusedFuture; use futures::future::FutureExt; use futures::lock::Mutex; use hyper::server::{ conn::{AddrIncoming, AddrStream}, Server, }; use hyper::service::Service; use hyper::Body; use hyper::Request; use hyper::Response; use std::future::Future; use std::net::SocketAddr; use std::num::NonZeroU32; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use uuid::Uuid; use slog::Logger; /* TODO Replace this with something else? */ type GenericError = Box<dyn std::error::Error + Send + Sync>; /** * Endpoint-accessible context associated with a server. * * Automatically implemented for all Send + Sync types. */ pub trait ServerContext: Send + Sync +'static {} impl<T:'static> ServerContext for T where T: Send + Sync {} /** * Stores shared state used by the Dropshot server. */ pub struct DropshotState<C: ServerContext> { /** caller-specific state */ pub private: C, /** static server configuration parameters */ pub config: ServerConfig, /** request router */ pub router: HttpRouter<C>, /** server-wide log handle */ pub log: Logger, /** bound local address for the server. */ pub local_addr: SocketAddr, } /** * Stores static configuration associated with the server * TODO-cleanup merge with ConfigDropshot */ pub struct ServerConfig { /** maximum allowed size of a request body */ pub request_body_max_bytes: usize, /** maximum size of any page of results */ pub page_max_nitems: NonZeroU32, /** default size for a page of results */ pub page_default_nitems: NonZeroU32, } /** * A thin wrapper around a Hyper Server object that exposes some interfaces that * we find useful. */ pub struct HttpServerStarter<C: ServerContext> { app_state: Arc<DropshotState<C>>, server: Server<AddrIncoming, ServerConnectionHandler<C>>, local_addr: SocketAddr, } impl<C: ServerContext> HttpServerStarter<C> { pub fn local_addr(&self) -> SocketAddr { self.local_addr } /** * Begins execution of the underlying Http server. */ pub fn start(self) -> HttpServer<C> { let (tx, rx) = tokio::sync::oneshot::channel::<()>(); let log_close = self.app_state.log.new(o!()); let graceful = self.server.with_graceful_shutdown(async move { rx.await.expect( "dropshot server shutting down without invoking close()", ); info!(log_close, "received request to begin graceful shutdown"); }); let join_handle = tokio::spawn(async { graceful.await }); let probe_registration = if cfg!(feature = "usdt-probes") { match usdt::register_probes() { Ok(_) => { debug!( self.app_state.log, "successfully registered DTrace USDT probes" ); ProbeRegistration::Succeeded } Err(e) => { let msg = e.to_string(); error!( self.app_state.log, "failed to register DTrace USDT probes: {}", msg ); ProbeRegistration::Failed(msg) } } } else { debug!( self.app_state.log, "DTrace USDT probes compiled out, not registering" ); ProbeRegistration::Disabled }; HttpServer { probe_registration, app_state: self.app_state, local_addr: self.local_addr, join_handle: Some(join_handle), close_channel: Some(tx), } } /** * Set up an HTTP server bound on the specified address that runs registered * handlers. You must invoke `start()` on the returned instance of * `HttpServerStarter` (and await the result) to actually start the server. * * TODO-cleanup We should be able to take a reference to the ApiDescription. * We currently can't because we need to hang onto the router. */ pub fn new( config: &ConfigDropshot, api: ApiDescription<C>, private: C, log: &Logger, ) -> Result<HttpServerStarter<C>, hyper::Error> { let incoming = AddrIncoming::bind(&config.bind_address)?; let local_addr = incoming.local_addr(); /* TODO-cleanup too many Arcs? */ let app_state = Arc::new(DropshotState { private, config: ServerConfig { /* We start aggressively to ensure test coverage. */ request_body_max_bytes: config.request_body_max_bytes, page_max_nitems: NonZeroU32::new(10000).unwrap(), page_default_nitems: NonZeroU32::new(100).unwrap(), }, router: api.into_router(), log: log.new(o!("local_addr" => local_addr)), local_addr, }); for (path, method, _) in &app_state.router { debug!(app_state.log, "registered endpoint"; "method" => &method, "path" => &path ); } let make_service = ServerConnectionHandler::new(Arc::clone(&app_state)); let builder = hyper::Server::builder(incoming); let server = builder.serve(make_service); info!(app_state.log, "listening"); Ok(HttpServerStarter { app_state, server, local_addr, }) } pub fn app_private(&self) -> &C { &self.app_state.private } } /** * A running Dropshot HTTP server. * * # Panics * * Panics if dropped without invoking `close`. */ pub struct HttpServer<C: ServerContext> { probe_registration: ProbeRegistration, app_state: Arc<DropshotState<C>>, local_addr: SocketAddr, join_handle: Option<tokio::task::JoinHandle<Result<(), hyper::Error>>>, close_channel: Option<tokio::sync::oneshot::Sender<()>>, } impl<C: ServerContext> HttpServer<C> { pub fn local_addr(&self) -> SocketAddr { self.local_addr } pub fn app_private(&self) -> &C { &self.app_state.private } /** * Signals the currently running server to stop and waits for it to exit. */ pub async fn close(mut self) -> Result<(), String> { self.close_channel .take() .expect("cannot close twice") .send(()) .expect("failed to send close signal"); if let Some(handle) = self.join_handle.take() { handle .await .map_err(|error| format!("waiting for server: {}", error))? .map_err(|error| format!("server stopped: {}", error)) } else { Ok(()) } } /** * Return the result of registering the server's DTrace USDT probes. * * See [`ProbeRegistration`] for details. */ pub fn probe_registration(&self) -> &ProbeRegistration { &self.probe_registration } } /* * For graceful termination, the `close()` function is preferred, as it can * report errors and wait for termination to complete. However, we impl * `Drop` to attempt to shut down the server to handle less clean shutdowns * (e.g., from failing tests). */ impl<C: ServerContext> Drop for HttpServer<C> { fn drop(&mut self) { if let Some(c) = self.close_channel.take() { c.send(()).expect("failed to send close signal") } } } impl<C: ServerContext> Future for HttpServer<C> { type Output = Result<(), String>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let server = Pin::into_inner(self); let mut handle = server .join_handle .take() .expect("polling a server future which has already completed"); let poll = handle.poll_unpin(cx).map(|result| { result .map_err(|error| format!("waiting for server: {}", error))? .map_err(|error| format!("server stopped: {}", error)) }); if poll.is_pending() { server.join_handle.replace(handle); } return poll; } } impl<C: ServerContext> FusedFuture for HttpServer<C> { fn is_terminated(&self) -> bool { self.join_handle.is_none() } } /** * Initial entry point for handling a new connection to the HTTP server. * This is invoked by Hyper when a new connection is accepted. This function * must return a Hyper Service object that will handle requests for this * connection. */ async fn http_connection_handle<C: ServerContext>( server: Arc<DropshotState<C>>, remote_addr: SocketAddr, ) -> Result<ServerRequestHandler<C>, GenericError> { info!(server.log, "accepted connection"; "remote_addr" => %remote_addr); Ok(ServerRequestHandler::new(server, remote_addr)) } /** * Initial entry point for handling a new request to the HTTP server. This is * invoked by Hyper when a new request is received. This function returns a * Result that either represents a valid HTTP response or an error (which will * also get turned into an HTTP response). */ async fn http_request_handle_wrap<C: ServerContext>( server: Arc<DropshotState<C>>, remote_addr: SocketAddr, request: Request<Body>, ) -> Result<Response<Body>, GenericError> { /* * This extra level of indirection makes error handling much more * straightforward, since the request handling code can simply return early * with an error and we'll treat it like an error from any of the endpoints * themselves. */ let request_id = generate_request_id(); let request_log = server.log.new(o!( "remote_addr" => remote_addr, "req_id" => request_id.clone(), "method" => request.method().as_str().to_string(), "uri" => format!("{}", request.uri()), )); trace!(request_log, "incoming request"); probes::request_start!(|| { let uri = request.uri(); crate::RequestInfo { id: request_id.clone(), local_addr: server.local_addr, remote_addr, method: request.method().to_string(), path: uri.path().to_string(), query: uri.query().map(|x| x.to_string()), } }); // Copy local address to report later during the finish probe, as the // server is passed by value to the request handler function. let local_addr = server.local_addr; let maybe_response = http_request_handle( server, request, &request_id, request_log.new(o!()), ) .await; let response = match maybe_response { Err(error) => { let message_external = error.external_message.clone(); let message_internal = error.internal_message.clone(); let r = error.into_response(&request_id); probes::request_finish!(|| { crate::ResponseInfo { id: request_id.clone(), local_addr, remote_addr, status_code: r.status().as_u16(), message: message_external.clone(), } }); /* TODO-debug: add request and response headers here */ info!(request_log, "request completed"; "response_code" => r.status().as_str().to_string(), "error_message_internal" => message_internal, "error_message_external" => message_external, ); r } Ok(response) => { /* TODO-debug: add request and response headers here */ info!(request_log, "request completed"; "response_code" => response.status().as_str().to_string() ); probes::request_finish!(|| { crate::ResponseInfo { id: request_id.parse().unwrap(), local_addr, remote_addr, status_code: response.status().as_u16(), message: "".to_string(), } }); response } }; Ok(response) } async fn http_request_handle<C: ServerContext>( server: Arc<DropshotState<C>>, request: Request<Body>, request_id: &str, request_log: Logger, ) -> Result<Response<Body>, HttpError> { /* * TODO-hardening: is it correct to (and do we correctly) read the entire * request body even if we decide it's too large and are going to send a 400 * response? * TODO-hardening: add a request read timeout as well so that we don't allow * this to take forever. * TODO-correctness: Do we need to dump the body on errors? */ let method = request.method(); let uri = request.uri(); let lookup_result = server.router.lookup_route(&method, uri.path().into())?; let rqctx = RequestContext { server: Arc::clone(&server), request: Arc::new(Mutex::new(request)), path_variables: lookup_result.variables, request_id: request_id.to_string(), log: request_log, }; let mut response = lookup_result.handler.handle_request(rqctx).await?; response.headers_mut().insert( HEADER_REQUEST_ID, http::header::HeaderValue::from_str(&request_id).unwrap(), ); Ok(response) } /* * This function should probably be parametrized by some name of the service * that is expected to be unique within an organization. That way, it would be * possible to determine from a given request id which service it was from. * TODO should we encode more information here? Service? Instance? Time up to * the hour? */ fn generate_request_id() -> String { format!("{}", Uuid::new_v4()) } /** * ServerConnectionHandler is a Hyper Service implementation that forwards * incoming connections to `http_connection_handle()`, providing the server * state object as an additional argument. We could use `make_service_fn` here * using a closure to capture the state object, but the resulting code is a bit * simpler without it. */ pub struct ServerConnectionHandler<C: ServerContext> { /** backend state that will be made available to the connection handler */ server: Arc<DropshotState<C>>, } impl<C: ServerContext> ServerConnectionHandler<C> { /** * Create an ServerConnectionHandler with the given state object that * will be made available to the handler. */ fn new(server: Arc<DropshotState<C>>) -> Self { ServerConnectionHandler { server, } } } impl<T: ServerContext> Service<&AddrStream> for ServerConnectionHandler<T> { /* * Recall that a Service in this context is just something that takes a * request (which could be anything) and produces a response (which could be * anything). This being a connection handler, the request type is an * AddrStream (which wraps a TCP connection) and the response type is * another Service: one that accepts HTTP requests and produces HTTP * responses. */ type Response = ServerRequestHandler<T>; type Error = GenericError; type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>; fn poll_ready( &mut self, _cx: &mut Context<'_>, ) -> Poll<Result<(), Self::Error>> { // TODO is this right? Poll::Ready(Ok(())) } fn call(&mut self, conn: &AddrStream) -> Self::Future { /* * We're given a borrowed reference to the AddrStream, but our interface * is async (which is good, so that we can support time-consuming * operations as part of receiving requests). To avoid having to ensure * that conn's lifetime exceeds that of this async operation, we simply * copy the only useful information out of the conn: the SocketAddr. We * may want to create our own connection type to encapsulate the socket * address and any other per-connection state that we want to keep. */ let server = Arc::clone(&self.server); let remote_addr = conn.remote_addr(); Box::pin(http_connection_handle(server, remote_addr)) } } /** * ServerRequestHandler is a Hyper Service implementation that forwards * incoming requests to `http_request_handle_wrap()`, including as an argument * the backend server state object. We could use `service_fn` here using a * closure to capture the server state object, but the resulting code is a bit * simpler without all that. */ pub struct ServerRequestHandler<C: ServerContext> { /** backend state that will be made available to the request handler */ server: Arc<DropshotState<C>>, remote_addr: SocketAddr, } impl<C: ServerContext> ServerRequestHandler<C> { /** * Create a ServerRequestHandler object with the given state object that * will be provided to the handler function. */ fn new(server: Arc<DropshotState<C>>, remote_addr: SocketAddr) -> Self { ServerRequestHandler { server, remote_addr, } } } impl<C: ServerContext> Service<Request<Body>> for ServerRequestHandler<C> { type Response = Response<Body>; type Error = GenericError; type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>; fn poll_ready( &mut self, _cx: &mut Context<'_>, ) -> Poll<Result<(), Self::Error>> { // TODO is this right? Poll::Ready(Ok(())) } fn call(&mut self, req: Request<Body>) -> Self::Future { Box::pin(http_request_handle_wrap( Arc::clone(&self.server), self.remote_addr, req, )) } } #[cfg(test)] mod test { use super::*; // Referring to the current crate as "dropshot::" instead of "crate::" // helps the endpoint macro with module lookup. use crate as dropshot; use dropshot::endpoint; use dropshot::test_util::ClientTestContext; use dropshot::test_util::LogContext; use dropshot::ConfigLogging; use dropshot::ConfigLoggingLevel; use dropshot::HttpError; use dropshot::HttpResponseOk; use dropshot::RequestContext; use http::StatusCode; use hyper::Method; use futures::future::FusedFuture; #[endpoint { method = GET, path = "/handler", }] async fn
( _rqctx: Arc<RequestContext<i32>>, ) -> Result<HttpResponseOk<u64>, HttpError> { Ok(HttpResponseOk(3)) } struct TestConfig { log_context: LogContext, } impl TestConfig { fn log(&self) -> &slog::Logger { &self.log_context.log } } fn create_test_server() -> (HttpServer<i32>, TestConfig) { let config_dropshot = ConfigDropshot::default(); let mut api = ApiDescription::new(); api.register(handler).unwrap(); let config_logging = ConfigLogging::StderrTerminal { level: ConfigLoggingLevel::Warn, }; let log_context = LogContext::new("test server", &config_logging); let log = &log_context.log; let server = HttpServerStarter::new(&config_dropshot, api, 0, log) .unwrap() .start(); (server, TestConfig { log_context, }) } async fn single_client_request(addr: SocketAddr, log: &slog::Logger) { let client_log = log.new(o!("http_client" => "dropshot test suite")); let client_testctx = ClientTestContext::new(addr, client_log); tokio::task::spawn(async move { let response = client_testctx .make_request( Method::GET, "/handler", None as Option<()>, StatusCode::OK, ) .await; assert!(response.is_ok()); }) .await .expect("client request failed"); } #[tokio::test] async fn test_server_run_then_close() { let (mut server, config) = create_test_server(); let client = single_client_request(server.local_addr, config.log());
handler
identifier_name
kflash.rs
//! Kendryte K210 UART ISP, based on [`kflash.py`] //! (https://github.com/sipeed/kflash.py) use anyhow::Result; use crc::{crc32, Hasher32}; use std::{future::Future, marker::Unpin, path::Path, pin::Pin, sync::Mutex, time::Duration}; use tokio::{ io::{AsyncBufRead, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, BufStream}, task::spawn_blocking, time::delay_for, }; use tokio_serial::{Serial, SerialPort, SerialPortSettings}; use super::{ demux::Demux, serial::{choose_serial, ChooseSerialError}, slip, Arch, DebugProbe, DynAsyncRead, Target, }; use crate::utils::retry_on_fail; /// Maix development boards based on Kendryte K210, download by UART ISP pub struct Maix; impl Target for Maix { fn target_arch(&self) -> Arch { Arch::RV64GC } fn cargo_features(&self) -> &[&str] { &[ "output-k210-uart", "interrupt-k210", "board-maix", "r3_port_riscv/maintain-pie", ] } fn memory_layout_script(&self) -> String { r#" MEMORY { RAM : ORIGIN = 0x80000000, LENGTH = 6M } REGION_ALIAS("REGION_TEXT", RAM); REGION_ALIAS("REGION_RODATA", RAM); REGION_ALIAS("REGION_DATA", RAM); REGION_ALIAS("REGION_BSS", RAM); REGION_ALIAS("REGION_HEAP", RAM); REGION_ALIAS("REGION_STACK", RAM); _hart_stack_size = 1K; "# .to_owned() } fn connect(&self) -> Pin<Box<dyn Future<Output = Result<Box<dyn DebugProbe>>>>> { Box::pin(async { KflashDebugProbe::new().await.map(|x| Box::new(x) as _) }) } } #[derive(thiserror::Error, Debug)] enum OpenError { #[error("Error while choosing the serial port to use")] ChooseSerial(#[source] ChooseSerialError), #[error("Error while opening the serial port '{0}'")] Serial(String, #[source] anyhow::Error), #[error( "Please provide a board name by `MAIX_BOARD` environment variable. \ Valid values: {0:?}" )] NoBoardName(Vec<&'static str>), #[error("Unknown board name: '{0}'")] UnknownBoardName(String), #[error("Communication error")] Communication(#[source] CommunicationError), } #[derive(thiserror::Error, Debug)] enum CommunicationError { #[error("Error while controlling the serial port")] Serial(#[source] tokio_serial::Error), #[error("Error while reading from or writing to the serial port")] SerialIo( #[source] #[from] std::io::Error, ), #[error("Protocol error")] FrameExtractor(#[source] slip::FrameExtractorProtocolError), #[error("Timeout while waiting for a response")] Timeout, #[error("Received an ISP error response {0:?}.")] RemoteError(IspReasonCode), #[error("Received a malformed response.")] MalformedResponse, } impl From<slip::FrameExtractorError> for CommunicationError { fn from(e: slip::FrameExtractorError) -> Self { match e { slip::FrameExtractorError::Io(e) => Self::SerialIo(e), slip::FrameExtractorError::Protocol(e) => Self::FrameExtractor(e), } } } const COMM_TIMEOUT: Duration = Duration::from_secs(3); struct KflashDebugProbe { serial: BufStream<Serial>, isp_boot_cmds: &'static [BootCmd], } impl KflashDebugProbe { async fn new() -> anyhow::Result<Self> { // Choose the ISP sequence specific to a target board let board = match std::env::var("MAIX_BOARD") { Ok(x) => Ok(x), Err(std::env::VarError::NotPresent) => { let valid_board_names = ISP_BOOT_CMDS.iter().map(|x| x.0).collect(); Err(OpenError::NoBoardName(valid_board_names)) } Err(std::env::VarError::NotUnicode(_)) => Err(OpenError::UnknownBoardName( "<invalid UTF-8 string>".to_owned(), )), }?; let isp_boot_cmds = ISP_BOOT_CMDS .iter() .find(|x| x.0 == board) .ok_or_else(|| OpenError::UnknownBoardName(board.clone()))? .1; let serial = spawn_blocking(|| { let dev = choose_serial().map_err(OpenError::ChooseSerial)?; Serial::from_path( &dev, &SerialPortSettings { baud_rate: 115200, timeout: std::time::Duration::from_secs(60), ..Default::default() }, ) .map_err(|e| OpenError::Serial(dev, e.into())) }) .await .unwrap()?; let serial = BufStream::new(serial); // Pu the device into ISP mode. Fail-fast if this was unsuccessful. let serial_m = Mutex::new(serial); retry_on_fail(|| async { maix_enter_isp_mode(&mut serial_m.try_lock().unwrap(), isp_boot_cmds).await }) .await .map_err(OpenError::Communication)?; let serial = serial_m.into_inner().unwrap(); let probe = Self { serial, isp_boot_cmds, }; Ok(probe) } } #[derive(thiserror::Error, Debug)] enum RunError { #[error("{0}")] ProcessElf( #[source] #[from] ProcessElfError, ), #[error("{0}")] Communication( #[source] #[from] CommunicationError, ), } impl DebugProbe for KflashDebugProbe { fn program_and_get_output( &mut self, exe: &Path, ) -> Pin<Box<dyn Future<Output = Result<DynAsyncRead<'_>>> + '_>> { let exe = exe.to_owned(); Box::pin(async move { // Extract loadable sections let LoadableCode { regions, entry } = read_elf(&exe).await.map_err(RunError::ProcessElf)?; // Put the device into ISP mode. let serial_m = Mutex::new(&mut self.serial); let isp_boot_cmds = self.isp_boot_cmds; retry_on_fail(|| async { maix_enter_isp_mode(*serial_m.try_lock().unwrap(), isp_boot_cmds).await }) .await .map_err(RunError::Communication)?; drop(serial_m); // Program the executable image for (i, region) in regions.iter().enumerate() { log::debug!("Programming the region {} of {}", i + 1, regions.len()); if region.1 < 0x80000000 { log::debug!( "Starting address (0x{:x}) is out of range, ignoreing", region.1 ); continue; } flash_dataframe(&mut self.serial, &region.0, region.1 as u32).await?; } // Boot the program log::debug!("Booting from 0x{:08x}", entry); boot(&mut self.serial, entry as u32).await?; // Now, pass the channel to the caller Ok(Box::pin(Demux::new(&mut self.serial)) as _) }) } } #[derive(Debug)] enum BootCmd { Dtr(bool), Rts(bool), Delay, } const ISP_BOOT_CMDS: &[(&str, &[BootCmd])] = &[ // `reset_to_isp_kd233` ( "kd233", &[ BootCmd::Dtr(false), BootCmd::Rts(false), BootCmd::Delay, BootCmd::Dtr(true), BootCmd::Rts(false), BootCmd::Delay, BootCmd::Rts(true), BootCmd::Dtr(false), BootCmd::Delay, ], ), // `reset_to_isp_dan` ( "dan", &[ BootCmd::Dtr(false), BootCmd::Rts(false), BootCmd::Delay, BootCmd::Dtr(false), BootCmd::Rts(true), BootCmd::Delay, BootCmd::Rts(false), BootCmd::Dtr(true), BootCmd::Delay, ], ), // `reset_to_isp_goD` ( "god", &[ BootCmd::Dtr(true), BootCmd::Rts(true), BootCmd::Delay, BootCmd::Rts(false), BootCmd::Dtr(true), BootCmd::Delay, BootCmd::Rts(false), BootCmd::Dtr(true), BootCmd::Delay, ], ), // `reset_to_boot_maixgo` ( "maixgo", &[ BootCmd::Dtr(false), BootCmd::Rts(false), BootCmd::Delay, BootCmd::Rts(false), BootCmd::Dtr(true), BootCmd::Delay, BootCmd::Rts(false), BootCmd::Dtr(false), BootCmd::Delay, ], ), ]; async fn maix_enter_isp_mode( serial: &mut BufStream<Serial>, cmds: &[BootCmd], ) -> Result<(), CommunicationError> { let t = Duration::from_millis(100); let serial_inner = serial.get_mut(); log::debug!("Trying to put the chip into ISP mode"); for cmd in cmds { log::trace!("Performing the command {:?}", cmd); match cmd { BootCmd::Dtr(b) => { serial_inner .write_data_terminal_ready(*b) .map_err(CommunicationError::Serial)?; } BootCmd::Rts(b) => { serial_inner .write_request_to_send(*b) .map_err(CommunicationError::Serial)?; } BootCmd::Delay => { delay_for(t).await; } } } // Clear any stale data in the receive buffer read_to_end_and_discard_for_some_time(serial).await?; // Send a greeting command log::trace!("Sending a greeting command"); slip::write_frame( serial, &[ 0xc2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ], ) .await .map_err(CommunicationError::SerialIo)?; serial.flush().await.map_err(CommunicationError::SerialIo)?; // Wait for a response log::trace!("Waiting for a response"); match tokio::time::timeout(COMM_TIMEOUT, slip::read_frame(serial)).await { Ok(Ok(frame)) => { log::trace!( "Received a packet: {:?} The chip probably successfully entered ISP mode", frame ); } Ok(Err(e)) => return Err(e.into()), Err(_) => return Err(CommunicationError::Timeout), } Ok(()) } async fn flash_dataframe( serial: &mut (impl AsyncBufRead + AsyncWrite + Unpin), data: &[u8], address: u32, ) -> Result<(), CommunicationError> { const CHUNK_LEN: usize = 1024; let mut buffer = [0u8; 8 + CHUNK_LEN]; buffer[0] = 0xc3; for (i, chunk) in data.chunks(CHUNK_LEN).enumerate() { let chunk_addr = address + (i * CHUNK_LEN) as u32; log::debug!( "Programming the range {:?}/{:?} at 0x{:x} ({}%)", (i * CHUNK_LEN)..(i * CHUNK_LEN + chunk.len()), data.len(), chunk_addr, i * CHUNK_LEN * 100 / data.len(), ); let mut error = None; for _ in 0..16 { buffer[0..][..4].copy_from_slice(&chunk_addr.to_le_bytes()); buffer[4..][..4].copy_from_slice(&(chunk.len() as u32).to_le_bytes()); buffer[8..][..chunk.len()].copy_from_slice(chunk); // Send a frame log::trace!("Sending a write command"); write_request(serial, 0xc3, &buffer[..8 + chunk.len()]) .await .map_err(CommunicationError::SerialIo)?; serial.flush().await.map_err(CommunicationError::SerialIo)?; // Wait for a response let response = match tokio::time::timeout(COMM_TIMEOUT, slip::read_frame(serial)).await { Ok(Ok(frame)) => frame, Ok(Err(e)) => return Err(e.into()), Err(_) => return Err(CommunicationError::Timeout), }; let reason: Option<IspReasonCode> = response.get(1).cloned().map(Into::into); match reason { Some(IspReasonCode::Ok) => { error = None; break; } Some(x) => { error = Some(CommunicationError::RemoteError(x)); } None => { error = Some(CommunicationError::MalformedResponse); } } log::trace!("Got {:?}. Retrying...", reason); } if let Some(error) = error { return Err(error); } } Ok(()) } async fn boot( serial: &mut (impl AsyncWrite + Unpin), address: u32, ) -> Result<(), CommunicationError> { let mut buffer = [0u8; 8]; buffer[..4].copy_from_slice(&address.to_le_bytes()); // Send a frame log::trace!("Sending a boot command"); write_request(serial, 0xc5, &buffer) .await .map_err(CommunicationError::SerialIo)?; serial.flush().await.map_err(CommunicationError::SerialIo)?; Ok(()) } async fn write_request( serial: &mut (impl AsyncWrite + Unpin), cmd: u8, req_payload: &[u8], ) -> std::io::Result<()> { let mut frame_payload = vec![0u8; req_payload.len() + 8]; frame_payload[0] = cmd; frame_payload[8..].copy_from_slice(req_payload); let mut digest = crc32::Digest::new_with_initial(crc32::IEEE, 0); digest.write(&req_payload); let crc = digest.sum32(); frame_payload[4..][..4].copy_from_slice(&crc.to_le_bytes()); slip::write_frame(serial, &frame_payload).await } #[derive(Debug, Copy, Clone)] enum IspReasonCode { Default, Ok, BadDataLen, BadDataChecksum, InvalidCommand, BadInitialization, BadExec, Unknown(u8), } impl From<u8> for IspReasonCode { fn from(x: u8) -> Self { match x { 0x00 => Self::Default, 0xe0 => Self::Ok, 0xe1 => Self::BadDataLen, 0xe2 => Self::BadDataChecksum, 0xe3 => Self::InvalidCommand, 0xe4 => Self::BadInitialization, 0xe5 => Self::BadExec, x => Self::Unknown(x), } } } async fn read_to_end_and_discard_for_some_time( reader: &mut (impl AsyncRead + Unpin), ) -> std::io::Result<()> { log::trace!("Starting discarding stale data in the receive buffer"); match tokio::time::timeout(Duration::from_millis(100), read_to_end_and_discard(reader)).await { // FIXME: This match arm is really unreachable because `Infallible` is // uninhabited. Waiting for `exhaustive_patterns` feature // <https://github.com/rust-lang/rust/issues/51085> Ok(Ok(_)) => unreachable!(), Ok(Err(e)) => Err(e), Err(_) => Ok(()), } } async fn
( reader: &mut (impl AsyncRead + Unpin), ) -> std::io::Result<std::convert::Infallible> { let mut buf = [0u8; 256]; loop { let num_bytes = reader.read(&mut buf).await?; log::trace!("Discarding {} byte(s)", num_bytes); } } #[derive(thiserror::Error, Debug)] enum ProcessElfError { #[error("Couldn't read the ELF file: {0}")] Read(#[source] std::io::Error), #[error("Couldn't parse the ELF file: {0}")] Parse(#[source] goblin::error::Error), } struct LoadableCode { /// The regions to be loaded onto the target. regions: Vec<(Vec<u8>, u64)>, /// The entry point. entry: u64, } /// Read the specified ELF file and return regions to be loaded onto the target. async fn read_elf(exe: &Path) -> Result<LoadableCode, ProcessElfError> { let elf_bytes = tokio::fs::read(&exe).await.map_err(ProcessElfError::Read)?; let elf = goblin::elf::Elf::parse(&elf_bytes).map_err(ProcessElfError::Parse)?; let regions = elf .program_headers .iter() .filter_map(|ph| { if ph.p_type == goblin::elf32::program_header::PT_LOAD && ph.p_filesz > 0 { Some(( elf_bytes[ph.p_offset as usize..][..ph.p_filesz as usize].to_vec(), ph.p_paddr, )) } else { None } }) .collect(); Ok(LoadableCode { regions, entry: elf.entry, }) }
read_to_end_and_discard
identifier_name
kflash.rs
//! Kendryte K210 UART ISP, based on [`kflash.py`] //! (https://github.com/sipeed/kflash.py) use anyhow::Result; use crc::{crc32, Hasher32}; use std::{future::Future, marker::Unpin, path::Path, pin::Pin, sync::Mutex, time::Duration}; use tokio::{ io::{AsyncBufRead, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, BufStream}, task::spawn_blocking, time::delay_for, }; use tokio_serial::{Serial, SerialPort, SerialPortSettings}; use super::{ demux::Demux, serial::{choose_serial, ChooseSerialError}, slip, Arch, DebugProbe, DynAsyncRead, Target, }; use crate::utils::retry_on_fail; /// Maix development boards based on Kendryte K210, download by UART ISP pub struct Maix; impl Target for Maix { fn target_arch(&self) -> Arch { Arch::RV64GC } fn cargo_features(&self) -> &[&str] { &[ "output-k210-uart", "interrupt-k210", "board-maix", "r3_port_riscv/maintain-pie", ] } fn memory_layout_script(&self) -> String { r#" MEMORY { RAM : ORIGIN = 0x80000000, LENGTH = 6M } REGION_ALIAS("REGION_TEXT", RAM); REGION_ALIAS("REGION_RODATA", RAM); REGION_ALIAS("REGION_DATA", RAM); REGION_ALIAS("REGION_BSS", RAM); REGION_ALIAS("REGION_HEAP", RAM); REGION_ALIAS("REGION_STACK", RAM); _hart_stack_size = 1K; "# .to_owned() } fn connect(&self) -> Pin<Box<dyn Future<Output = Result<Box<dyn DebugProbe>>>>> { Box::pin(async { KflashDebugProbe::new().await.map(|x| Box::new(x) as _) }) } } #[derive(thiserror::Error, Debug)] enum OpenError { #[error("Error while choosing the serial port to use")] ChooseSerial(#[source] ChooseSerialError), #[error("Error while opening the serial port '{0}'")] Serial(String, #[source] anyhow::Error), #[error( "Please provide a board name by `MAIX_BOARD` environment variable. \ Valid values: {0:?}" )] NoBoardName(Vec<&'static str>), #[error("Unknown board name: '{0}'")] UnknownBoardName(String), #[error("Communication error")] Communication(#[source] CommunicationError), } #[derive(thiserror::Error, Debug)] enum CommunicationError { #[error("Error while controlling the serial port")] Serial(#[source] tokio_serial::Error), #[error("Error while reading from or writing to the serial port")] SerialIo( #[source] #[from] std::io::Error, ), #[error("Protocol error")] FrameExtractor(#[source] slip::FrameExtractorProtocolError), #[error("Timeout while waiting for a response")] Timeout, #[error("Received an ISP error response {0:?}.")] RemoteError(IspReasonCode), #[error("Received a malformed response.")] MalformedResponse, } impl From<slip::FrameExtractorError> for CommunicationError { fn from(e: slip::FrameExtractorError) -> Self { match e { slip::FrameExtractorError::Io(e) => Self::SerialIo(e), slip::FrameExtractorError::Protocol(e) => Self::FrameExtractor(e), } } } const COMM_TIMEOUT: Duration = Duration::from_secs(3); struct KflashDebugProbe { serial: BufStream<Serial>, isp_boot_cmds: &'static [BootCmd], } impl KflashDebugProbe { async fn new() -> anyhow::Result<Self> { // Choose the ISP sequence specific to a target board let board = match std::env::var("MAIX_BOARD") { Ok(x) => Ok(x), Err(std::env::VarError::NotPresent) => { let valid_board_names = ISP_BOOT_CMDS.iter().map(|x| x.0).collect(); Err(OpenError::NoBoardName(valid_board_names)) } Err(std::env::VarError::NotUnicode(_)) => Err(OpenError::UnknownBoardName( "<invalid UTF-8 string>".to_owned(), )), }?; let isp_boot_cmds = ISP_BOOT_CMDS .iter() .find(|x| x.0 == board) .ok_or_else(|| OpenError::UnknownBoardName(board.clone()))? .1; let serial = spawn_blocking(|| { let dev = choose_serial().map_err(OpenError::ChooseSerial)?; Serial::from_path( &dev, &SerialPortSettings { baud_rate: 115200, timeout: std::time::Duration::from_secs(60), ..Default::default() }, ) .map_err(|e| OpenError::Serial(dev, e.into())) }) .await .unwrap()?; let serial = BufStream::new(serial); // Pu the device into ISP mode. Fail-fast if this was unsuccessful. let serial_m = Mutex::new(serial); retry_on_fail(|| async { maix_enter_isp_mode(&mut serial_m.try_lock().unwrap(), isp_boot_cmds).await }) .await .map_err(OpenError::Communication)?; let serial = serial_m.into_inner().unwrap(); let probe = Self { serial, isp_boot_cmds, }; Ok(probe) } } #[derive(thiserror::Error, Debug)] enum RunError { #[error("{0}")] ProcessElf( #[source] #[from] ProcessElfError, ), #[error("{0}")] Communication( #[source] #[from] CommunicationError, ), } impl DebugProbe for KflashDebugProbe { fn program_and_get_output( &mut self, exe: &Path, ) -> Pin<Box<dyn Future<Output = Result<DynAsyncRead<'_>>> + '_>> { let exe = exe.to_owned(); Box::pin(async move { // Extract loadable sections let LoadableCode { regions, entry } = read_elf(&exe).await.map_err(RunError::ProcessElf)?; // Put the device into ISP mode. let serial_m = Mutex::new(&mut self.serial); let isp_boot_cmds = self.isp_boot_cmds; retry_on_fail(|| async { maix_enter_isp_mode(*serial_m.try_lock().unwrap(), isp_boot_cmds).await }) .await .map_err(RunError::Communication)?; drop(serial_m); // Program the executable image for (i, region) in regions.iter().enumerate() { log::debug!("Programming the region {} of {}", i + 1, regions.len()); if region.1 < 0x80000000 { log::debug!( "Starting address (0x{:x}) is out of range, ignoreing", region.1 ); continue; } flash_dataframe(&mut self.serial, &region.0, region.1 as u32).await?; } // Boot the program log::debug!("Booting from 0x{:08x}", entry); boot(&mut self.serial, entry as u32).await?; // Now, pass the channel to the caller Ok(Box::pin(Demux::new(&mut self.serial)) as _) }) } } #[derive(Debug)] enum BootCmd { Dtr(bool), Rts(bool), Delay, } const ISP_BOOT_CMDS: &[(&str, &[BootCmd])] = &[ // `reset_to_isp_kd233` ( "kd233", &[ BootCmd::Dtr(false), BootCmd::Rts(false), BootCmd::Delay, BootCmd::Dtr(true), BootCmd::Rts(false), BootCmd::Delay, BootCmd::Rts(true), BootCmd::Dtr(false), BootCmd::Delay, ], ), // `reset_to_isp_dan` ( "dan", &[ BootCmd::Dtr(false), BootCmd::Rts(false), BootCmd::Delay, BootCmd::Dtr(false), BootCmd::Rts(true), BootCmd::Delay, BootCmd::Rts(false), BootCmd::Dtr(true), BootCmd::Delay, ], ), // `reset_to_isp_goD` ( "god", &[ BootCmd::Dtr(true), BootCmd::Rts(true), BootCmd::Delay, BootCmd::Rts(false), BootCmd::Dtr(true), BootCmd::Delay, BootCmd::Rts(false), BootCmd::Dtr(true), BootCmd::Delay, ], ), // `reset_to_boot_maixgo` ( "maixgo", &[ BootCmd::Dtr(false), BootCmd::Rts(false), BootCmd::Delay, BootCmd::Rts(false), BootCmd::Dtr(true), BootCmd::Delay, BootCmd::Rts(false), BootCmd::Dtr(false), BootCmd::Delay, ], ), ]; async fn maix_enter_isp_mode( serial: &mut BufStream<Serial>, cmds: &[BootCmd], ) -> Result<(), CommunicationError> { let t = Duration::from_millis(100); let serial_inner = serial.get_mut(); log::debug!("Trying to put the chip into ISP mode"); for cmd in cmds { log::trace!("Performing the command {:?}", cmd); match cmd { BootCmd::Dtr(b) => { serial_inner .write_data_terminal_ready(*b) .map_err(CommunicationError::Serial)?; } BootCmd::Rts(b) => { serial_inner .write_request_to_send(*b) .map_err(CommunicationError::Serial)?; } BootCmd::Delay => { delay_for(t).await; } } } // Clear any stale data in the receive buffer read_to_end_and_discard_for_some_time(serial).await?; // Send a greeting command log::trace!("Sending a greeting command"); slip::write_frame( serial, &[ 0xc2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ], ) .await .map_err(CommunicationError::SerialIo)?; serial.flush().await.map_err(CommunicationError::SerialIo)?; // Wait for a response log::trace!("Waiting for a response"); match tokio::time::timeout(COMM_TIMEOUT, slip::read_frame(serial)).await { Ok(Ok(frame)) => { log::trace!( "Received a packet: {:?} The chip probably successfully entered ISP mode", frame ); } Ok(Err(e)) => return Err(e.into()), Err(_) => return Err(CommunicationError::Timeout), } Ok(()) } async fn flash_dataframe( serial: &mut (impl AsyncBufRead + AsyncWrite + Unpin), data: &[u8], address: u32, ) -> Result<(), CommunicationError> { const CHUNK_LEN: usize = 1024; let mut buffer = [0u8; 8 + CHUNK_LEN]; buffer[0] = 0xc3; for (i, chunk) in data.chunks(CHUNK_LEN).enumerate() { let chunk_addr = address + (i * CHUNK_LEN) as u32; log::debug!( "Programming the range {:?}/{:?} at 0x{:x} ({}%)", (i * CHUNK_LEN)..(i * CHUNK_LEN + chunk.len()), data.len(), chunk_addr, i * CHUNK_LEN * 100 / data.len(), ); let mut error = None; for _ in 0..16 { buffer[0..][..4].copy_from_slice(&chunk_addr.to_le_bytes()); buffer[4..][..4].copy_from_slice(&(chunk.len() as u32).to_le_bytes()); buffer[8..][..chunk.len()].copy_from_slice(chunk); // Send a frame log::trace!("Sending a write command"); write_request(serial, 0xc3, &buffer[..8 + chunk.len()]) .await .map_err(CommunicationError::SerialIo)?; serial.flush().await.map_err(CommunicationError::SerialIo)?; // Wait for a response let response = match tokio::time::timeout(COMM_TIMEOUT, slip::read_frame(serial)).await { Ok(Ok(frame)) => frame, Ok(Err(e)) => return Err(e.into()), Err(_) => return Err(CommunicationError::Timeout), }; let reason: Option<IspReasonCode> = response.get(1).cloned().map(Into::into); match reason { Some(IspReasonCode::Ok) => { error = None; break; } Some(x) => { error = Some(CommunicationError::RemoteError(x)); } None => { error = Some(CommunicationError::MalformedResponse); } } log::trace!("Got {:?}. Retrying...", reason); } if let Some(error) = error { return Err(error); } } Ok(()) } async fn boot( serial: &mut (impl AsyncWrite + Unpin), address: u32, ) -> Result<(), CommunicationError> { let mut buffer = [0u8; 8]; buffer[..4].copy_from_slice(&address.to_le_bytes()); // Send a frame log::trace!("Sending a boot command"); write_request(serial, 0xc5, &buffer) .await .map_err(CommunicationError::SerialIo)?; serial.flush().await.map_err(CommunicationError::SerialIo)?; Ok(()) } async fn write_request( serial: &mut (impl AsyncWrite + Unpin), cmd: u8, req_payload: &[u8], ) -> std::io::Result<()> { let mut frame_payload = vec![0u8; req_payload.len() + 8]; frame_payload[0] = cmd; frame_payload[8..].copy_from_slice(req_payload); let mut digest = crc32::Digest::new_with_initial(crc32::IEEE, 0); digest.write(&req_payload); let crc = digest.sum32(); frame_payload[4..][..4].copy_from_slice(&crc.to_le_bytes()); slip::write_frame(serial, &frame_payload).await } #[derive(Debug, Copy, Clone)] enum IspReasonCode { Default, Ok, BadDataLen,
BadInitialization, BadExec, Unknown(u8), } impl From<u8> for IspReasonCode { fn from(x: u8) -> Self { match x { 0x00 => Self::Default, 0xe0 => Self::Ok, 0xe1 => Self::BadDataLen, 0xe2 => Self::BadDataChecksum, 0xe3 => Self::InvalidCommand, 0xe4 => Self::BadInitialization, 0xe5 => Self::BadExec, x => Self::Unknown(x), } } } async fn read_to_end_and_discard_for_some_time( reader: &mut (impl AsyncRead + Unpin), ) -> std::io::Result<()> { log::trace!("Starting discarding stale data in the receive buffer"); match tokio::time::timeout(Duration::from_millis(100), read_to_end_and_discard(reader)).await { // FIXME: This match arm is really unreachable because `Infallible` is // uninhabited. Waiting for `exhaustive_patterns` feature // <https://github.com/rust-lang/rust/issues/51085> Ok(Ok(_)) => unreachable!(), Ok(Err(e)) => Err(e), Err(_) => Ok(()), } } async fn read_to_end_and_discard( reader: &mut (impl AsyncRead + Unpin), ) -> std::io::Result<std::convert::Infallible> { let mut buf = [0u8; 256]; loop { let num_bytes = reader.read(&mut buf).await?; log::trace!("Discarding {} byte(s)", num_bytes); } } #[derive(thiserror::Error, Debug)] enum ProcessElfError { #[error("Couldn't read the ELF file: {0}")] Read(#[source] std::io::Error), #[error("Couldn't parse the ELF file: {0}")] Parse(#[source] goblin::error::Error), } struct LoadableCode { /// The regions to be loaded onto the target. regions: Vec<(Vec<u8>, u64)>, /// The entry point. entry: u64, } /// Read the specified ELF file and return regions to be loaded onto the target. async fn read_elf(exe: &Path) -> Result<LoadableCode, ProcessElfError> { let elf_bytes = tokio::fs::read(&exe).await.map_err(ProcessElfError::Read)?; let elf = goblin::elf::Elf::parse(&elf_bytes).map_err(ProcessElfError::Parse)?; let regions = elf .program_headers .iter() .filter_map(|ph| { if ph.p_type == goblin::elf32::program_header::PT_LOAD && ph.p_filesz > 0 { Some(( elf_bytes[ph.p_offset as usize..][..ph.p_filesz as usize].to_vec(), ph.p_paddr, )) } else { None } }) .collect(); Ok(LoadableCode { regions, entry: elf.entry, }) }
BadDataChecksum, InvalidCommand,
random_line_split
semaphore.rs
use std::{fmt, mem}; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::sync::atomic::Ordering::{Relaxed, Acquire}; use crate::state::ReleaseState::Unlocked; use crate::state::AcquireState::{Available, Queued}; use std::fmt::{Debug, Formatter}; use crate::state::{AcquireStep, Waiter, Permits, AcquireState, ReleaseState}; use std::cell::UnsafeCell; use crate::{AcquireFuture, TryAcquireError, SemaphoreGuard, AcquireFutureArc, SemaphoreGuardArc}; use std::marker::{PhantomPinned, PhantomData}; use crate::waker::AtomicWaker; use std::ptr::null; use std::sync::Arc; use crate::atomic::Atomic; use std::mem::size_of; use crate::release::ReleaseAction; #[allow(unused_imports)] // used by docs use crate::errors::PoisonError; /// An async weighted semaphore. See [crate documentation](index.html) for usage. // This implementation encodes state (the available counter, acquire queue, and cancel queue) into // multiple atomic variables and linked lists. Concurrent acquires (and concurrent cancels) synchronize // by pushing onto a stack with an atomic swap. Releases synchronize with other operations by attempting // to acquire a lock. If the lock is successfully acquired, the release can proceed. Otherwise // the lock is marked dirty to indicate that there is additional work for the lock owner to do. pub struct Semaphore { // The number of available permits or the back of the queue (without next edges). pub(crate) acquire: Atomic<AcquireState>, // A number of releasable permits, and the state of the current release lock. pub(crate) release: Atomic<ReleaseState>, // The front of the queue (with next edges). pub(crate) front: UnsafeCell<*const Waiter>, // The last node swapped from AcquireState (with next edges). pub(crate) middle: UnsafeCell<*const Waiter>, // A stack of nodes that are cancelling. pub(crate) next_cancel: Atomic<*const Waiter>, } unsafe impl Sync for Semaphore {} unsafe impl Send for Semaphore {} impl UnwindSafe for Semaphore {} impl RefUnwindSafe for Semaphore {} impl Debug for Semaphore { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self.acquire.load(Relaxed) { Available(available) => write!(f, "Semaphore::Ready({:?})", available)?, Queued(_) => match self.release.load(Relaxed) { Unlocked(available) => write!(f, "Semaphore::Blocked({:?})", available)?, _ => write!(f, "Semaphore::Unknown")?, }, }; Ok(()) } } impl Semaphore { /// The maximum number of permits that can be made available. This is slightly smaller than /// [`usize::MAX`]. If the number of available permits exceeds this number, it may poison the /// semaphore. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, SemaphoreGuard}; /// struct ReadWriteLock(Semaphore); /// impl ReadWriteLock { /// fn new() -> Self { /// ReadWriteLock(Semaphore::new(Semaphore::MAX_AVAILABLE)) /// } /// // Acquire one permit, allowing up to MAX_AVAILABLE concurrent readers. /// async fn read(&self) -> SemaphoreGuard<'_> { /// self.0.acquire(1).await.unwrap() /// } /// // The writer acquires all the permits, prevent any concurrent writers or readers. The /// // first-in-first-out priority policy prevents writer starvation. /// async fn write(&self) -> SemaphoreGuard<'_> { /// self.0.acquire(Semaphore::MAX_AVAILABLE).await.unwrap() /// } /// } /// ``` pub const MAX_AVAILABLE: usize = (1 << (size_of::<usize>() * 8 - 3)) - 1; /// Create a new semaphore with an initial number of permits. /// # Examples /// ``` /// use async_weighted_semaphore::Semaphore; /// let semaphore = Semaphore::new(1024); /// ``` pub fn new(initial: usize) -> Self { Semaphore { acquire: Atomic::new(Available(Permits::new(initial))), release: Atomic::new(Unlocked(Permits::new(0))), front: UnsafeCell::new(null()), middle: UnsafeCell::new(null()), next_cancel: Atomic::new(null()), } } /// Wait until there are no older pending calls to [acquire](#method.acquire) and at least `amount` permits available. /// Then consume the requested permits and return a [`SemaphoreGuard`]. /// # Errors /// Returns [`PoisonError`] is the semaphore is poisoned. /// # Examples /// ``` /// # use futures::executor::block_on; /// # use std::future::Future; /// use async_weighted_semaphore::Semaphore; /// async fn limit_concurrency(semaphore: &Semaphore, future: impl Future<Output=()>) { /// let guard = semaphore.acquire(1).await.unwrap(); /// future.await /// } /// ``` pub fn acquire(&self, amount: usize) -> AcquireFuture { AcquireFuture(UnsafeCell::new(Waiter { semaphore: self, step: UnsafeCell::new(AcquireStep::Entering), waker: unsafe { AtomicWaker::new() }, amount, next: UnsafeCell::new(null()), prev: UnsafeCell::new(null()), next_cancel: UnsafeCell::new(null()), }), PhantomData, PhantomPinned) } /// Like [acquire](#method.acquire), but fails if the call would block. /// # Errors /// * Returns [`TryAcquireError::Poisoned`] is the semaphore is poisoned. /// * Returns [`TryAcquireError::WouldBlock`] if a call to `acquire` would have blocked. This can /// occur if there are insufficient available permits or if there is another pending call to acquire. /// # Examples /// ``` /// # use futures::executor::block_on; /// # use std::future::Future; /// use async_weighted_semaphore::Semaphore; /// async fn run_if_safe(semaphore: &Semaphore, future: impl Future<Output=()>) { /// if semaphore.try_acquire(1).is_ok() { /// future.await /// } /// } /// ``` pub fn try_acquire(&self, amount: usize) -> Result<SemaphoreGuard, TryAcquireError> { let mut current = self.acquire.load(Acquire); loop { match current { Queued(_) => return Err(TryAcquireError::WouldBlock), Available(available) => { let available = available.into_usize().ok_or(TryAcquireError::Poisoned)?; if available < amount { return Err(TryAcquireError::WouldBlock); } if self.acquire.cmpxchg_weak_acqrel(&mut current, Available(Permits::new(available - amount)))
} } } } /// Like [acquire](#method.acquire), but takes an [`Arc`] `<Semaphore>` and returns a guard that is `'static`, [`Send`] and [`Sync`]. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, PoisonError, SemaphoreGuardArc}; /// # use std::sync::Arc; /// use async_channel::{Sender, SendError}; /// // Limit size of a producer-consumer queue /// async fn send<T>(semaphore: &Arc<Semaphore>, /// sender: &Sender<(SemaphoreGuardArc, T)>, /// message: T /// ) -> Result<(), SendError<T>>{ /// match semaphore.acquire_arc(1).await { /// // A semaphore can be poisoned to prevent deadlock when a channel closes. /// Err(PoisonError) => Err(SendError(message)), /// Ok(guard) => match sender.send((guard, message)).await{ /// Err(SendError((guard, message))) => Err(SendError(message)), /// Ok(()) => Ok(()) /// } /// } /// } /// ``` pub fn acquire_arc(self: &Arc<Self>, amount: usize) -> AcquireFutureArc { AcquireFutureArc { arc: self.clone(), inner: unsafe { mem::transmute::<AcquireFuture, AcquireFuture>(self.acquire(amount)) }, } } /// Like [try_acquire](#method.try_acquire), but takes an [`Arc`] `<Semaphore>`, and returns a guard that is `'static`, /// [`Send`] and [`Sync`]. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, TryAcquireError, SemaphoreGuardArc}; /// # use std::sync::Arc; /// use async_channel::{Sender, TrySendError}; /// // Limit size of a producer-consumer queue /// async fn try_send<T>(semaphore: &Arc<Semaphore>, /// sender: &Sender<(SemaphoreGuardArc, T)>, /// message: T /// ) -> Result<(), TrySendError<T>>{ /// match semaphore.try_acquire_arc(1) { /// Err(TryAcquireError::WouldBlock) => Err(TrySendError::Full(message)), /// // A semaphore can be poisoned to prevent deadlock when a channel closes. /// Err(TryAcquireError::Poisoned) => Err(TrySendError::Closed(message)), /// Ok(guard) => match sender.try_send((guard, message)) { /// Err(TrySendError::Closed((guard, message))) => Err(TrySendError::Closed(message)), /// Err(TrySendError::Full((guard, message))) => Err(TrySendError::Full(message)), /// Ok(()) => Ok(()) /// } /// } /// } /// ``` pub fn try_acquire_arc(self: &Arc<Self>, amount: usize) -> Result<SemaphoreGuardArc, TryAcquireError> { let guard = self.try_acquire(amount)?; let result = SemaphoreGuardArc::new(self.clone(), amount); guard.forget(); Ok(result) } /// Return `amount` permits to the semaphore. This will eventually wake any calls to [acquire](#method.acquire) /// that can succeed with the additional permits. Calling `release` often makes sense after calling /// [`SemaphoreGuard::forget`] or when using the semaphore to signal the number of elements that /// are available for processing. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, TryAcquireError}; /// use async_channel::{Receiver, RecvError}; /// // Limit size of a producer-consumer queue /// async fn recv<T>(semaphore: &Semaphore, recv: &Receiver<T>) -> Result<T, RecvError>{ /// let result = recv.recv().await?; /// // Note that this only guards elements in the queue, not those being processed after the /// // queue. /// semaphore.release(1); /// Ok(result) /// } /// ``` pub fn release(&self, amount: usize) { unsafe { ReleaseAction { sem: self, releasable: Permits::new(amount) }.release(); } } /// Poison the semaphore, causing all pending and future calls to `acquire` to fail immediately. /// This can be used to unblock pending acquires when the guarded operation would fail anyway. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, TryAcquireError}; /// # use std::sync::Arc; /// # use async_std::sync::Mutex; /// use async_channel::{Receiver, RecvError}; /// async fn consume(semaphore: &Semaphore, receiver: Receiver<usize>){ /// while let Ok(x) = receiver.recv().await { /// println!("{:?}", x); /// semaphore.release(1); /// } /// // There will be no more calls to recv, so unblock all senders. /// semaphore.poison(); /// } /// ``` pub fn poison(&self) { unsafe { ReleaseAction { sem: self, releasable: Permits::poison() }.release(); } } }
{ return Ok(SemaphoreGuard::new(self, amount)); }
conditional_block
semaphore.rs
use std::{fmt, mem}; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::sync::atomic::Ordering::{Relaxed, Acquire}; use crate::state::ReleaseState::Unlocked; use crate::state::AcquireState::{Available, Queued}; use std::fmt::{Debug, Formatter}; use crate::state::{AcquireStep, Waiter, Permits, AcquireState, ReleaseState}; use std::cell::UnsafeCell; use crate::{AcquireFuture, TryAcquireError, SemaphoreGuard, AcquireFutureArc, SemaphoreGuardArc}; use std::marker::{PhantomPinned, PhantomData}; use crate::waker::AtomicWaker; use std::ptr::null; use std::sync::Arc; use crate::atomic::Atomic; use std::mem::size_of; use crate::release::ReleaseAction; #[allow(unused_imports)] // used by docs use crate::errors::PoisonError; /// An async weighted semaphore. See [crate documentation](index.html) for usage. // This implementation encodes state (the available counter, acquire queue, and cancel queue) into // multiple atomic variables and linked lists. Concurrent acquires (and concurrent cancels) synchronize // by pushing onto a stack with an atomic swap. Releases synchronize with other operations by attempting // to acquire a lock. If the lock is successfully acquired, the release can proceed. Otherwise // the lock is marked dirty to indicate that there is additional work for the lock owner to do. pub struct Semaphore { // The number of available permits or the back of the queue (without next edges). pub(crate) acquire: Atomic<AcquireState>, // A number of releasable permits, and the state of the current release lock. pub(crate) release: Atomic<ReleaseState>, // The front of the queue (with next edges). pub(crate) front: UnsafeCell<*const Waiter>, // The last node swapped from AcquireState (with next edges). pub(crate) middle: UnsafeCell<*const Waiter>, // A stack of nodes that are cancelling. pub(crate) next_cancel: Atomic<*const Waiter>, } unsafe impl Sync for Semaphore {} unsafe impl Send for Semaphore {} impl UnwindSafe for Semaphore {} impl RefUnwindSafe for Semaphore {} impl Debug for Semaphore { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self.acquire.load(Relaxed) { Available(available) => write!(f, "Semaphore::Ready({:?})", available)?, Queued(_) => match self.release.load(Relaxed) { Unlocked(available) => write!(f, "Semaphore::Blocked({:?})", available)?, _ => write!(f, "Semaphore::Unknown")?, }, }; Ok(()) } } impl Semaphore { /// The maximum number of permits that can be made available. This is slightly smaller than /// [`usize::MAX`]. If the number of available permits exceeds this number, it may poison the /// semaphore. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, SemaphoreGuard}; /// struct ReadWriteLock(Semaphore); /// impl ReadWriteLock { /// fn new() -> Self { /// ReadWriteLock(Semaphore::new(Semaphore::MAX_AVAILABLE)) /// } /// // Acquire one permit, allowing up to MAX_AVAILABLE concurrent readers. /// async fn read(&self) -> SemaphoreGuard<'_> { /// self.0.acquire(1).await.unwrap() /// } /// // The writer acquires all the permits, prevent any concurrent writers or readers. The /// // first-in-first-out priority policy prevents writer starvation. /// async fn write(&self) -> SemaphoreGuard<'_> { /// self.0.acquire(Semaphore::MAX_AVAILABLE).await.unwrap() /// } /// } /// ``` pub const MAX_AVAILABLE: usize = (1 << (size_of::<usize>() * 8 - 3)) - 1; /// Create a new semaphore with an initial number of permits. /// # Examples /// ``` /// use async_weighted_semaphore::Semaphore; /// let semaphore = Semaphore::new(1024); /// ``` pub fn new(initial: usize) -> Self { Semaphore { acquire: Atomic::new(Available(Permits::new(initial))), release: Atomic::new(Unlocked(Permits::new(0))), front: UnsafeCell::new(null()), middle: UnsafeCell::new(null()), next_cancel: Atomic::new(null()), } } /// Wait until there are no older pending calls to [acquire](#method.acquire) and at least `amount` permits available. /// Then consume the requested permits and return a [`SemaphoreGuard`]. /// # Errors /// Returns [`PoisonError`] is the semaphore is poisoned. /// # Examples /// ``` /// # use futures::executor::block_on; /// # use std::future::Future; /// use async_weighted_semaphore::Semaphore; /// async fn limit_concurrency(semaphore: &Semaphore, future: impl Future<Output=()>) { /// let guard = semaphore.acquire(1).await.unwrap(); /// future.await /// } /// ``` pub fn acquire(&self, amount: usize) -> AcquireFuture { AcquireFuture(UnsafeCell::new(Waiter { semaphore: self, step: UnsafeCell::new(AcquireStep::Entering), waker: unsafe { AtomicWaker::new() }, amount, next: UnsafeCell::new(null()), prev: UnsafeCell::new(null()), next_cancel: UnsafeCell::new(null()), }), PhantomData, PhantomPinned) } /// Like [acquire](#method.acquire), but fails if the call would block. /// # Errors /// * Returns [`TryAcquireError::Poisoned`] is the semaphore is poisoned. /// * Returns [`TryAcquireError::WouldBlock`] if a call to `acquire` would have blocked. This can /// occur if there are insufficient available permits or if there is another pending call to acquire. /// # Examples /// ``` /// # use futures::executor::block_on; /// # use std::future::Future; /// use async_weighted_semaphore::Semaphore; /// async fn run_if_safe(semaphore: &Semaphore, future: impl Future<Output=()>) { /// if semaphore.try_acquire(1).is_ok() { /// future.await /// } /// } /// ``` pub fn
(&self, amount: usize) -> Result<SemaphoreGuard, TryAcquireError> { let mut current = self.acquire.load(Acquire); loop { match current { Queued(_) => return Err(TryAcquireError::WouldBlock), Available(available) => { let available = available.into_usize().ok_or(TryAcquireError::Poisoned)?; if available < amount { return Err(TryAcquireError::WouldBlock); } if self.acquire.cmpxchg_weak_acqrel(&mut current, Available(Permits::new(available - amount))) { return Ok(SemaphoreGuard::new(self, amount)); } } } } } /// Like [acquire](#method.acquire), but takes an [`Arc`] `<Semaphore>` and returns a guard that is `'static`, [`Send`] and [`Sync`]. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, PoisonError, SemaphoreGuardArc}; /// # use std::sync::Arc; /// use async_channel::{Sender, SendError}; /// // Limit size of a producer-consumer queue /// async fn send<T>(semaphore: &Arc<Semaphore>, /// sender: &Sender<(SemaphoreGuardArc, T)>, /// message: T /// ) -> Result<(), SendError<T>>{ /// match semaphore.acquire_arc(1).await { /// // A semaphore can be poisoned to prevent deadlock when a channel closes. /// Err(PoisonError) => Err(SendError(message)), /// Ok(guard) => match sender.send((guard, message)).await{ /// Err(SendError((guard, message))) => Err(SendError(message)), /// Ok(()) => Ok(()) /// } /// } /// } /// ``` pub fn acquire_arc(self: &Arc<Self>, amount: usize) -> AcquireFutureArc { AcquireFutureArc { arc: self.clone(), inner: unsafe { mem::transmute::<AcquireFuture, AcquireFuture>(self.acquire(amount)) }, } } /// Like [try_acquire](#method.try_acquire), but takes an [`Arc`] `<Semaphore>`, and returns a guard that is `'static`, /// [`Send`] and [`Sync`]. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, TryAcquireError, SemaphoreGuardArc}; /// # use std::sync::Arc; /// use async_channel::{Sender, TrySendError}; /// // Limit size of a producer-consumer queue /// async fn try_send<T>(semaphore: &Arc<Semaphore>, /// sender: &Sender<(SemaphoreGuardArc, T)>, /// message: T /// ) -> Result<(), TrySendError<T>>{ /// match semaphore.try_acquire_arc(1) { /// Err(TryAcquireError::WouldBlock) => Err(TrySendError::Full(message)), /// // A semaphore can be poisoned to prevent deadlock when a channel closes. /// Err(TryAcquireError::Poisoned) => Err(TrySendError::Closed(message)), /// Ok(guard) => match sender.try_send((guard, message)) { /// Err(TrySendError::Closed((guard, message))) => Err(TrySendError::Closed(message)), /// Err(TrySendError::Full((guard, message))) => Err(TrySendError::Full(message)), /// Ok(()) => Ok(()) /// } /// } /// } /// ``` pub fn try_acquire_arc(self: &Arc<Self>, amount: usize) -> Result<SemaphoreGuardArc, TryAcquireError> { let guard = self.try_acquire(amount)?; let result = SemaphoreGuardArc::new(self.clone(), amount); guard.forget(); Ok(result) } /// Return `amount` permits to the semaphore. This will eventually wake any calls to [acquire](#method.acquire) /// that can succeed with the additional permits. Calling `release` often makes sense after calling /// [`SemaphoreGuard::forget`] or when using the semaphore to signal the number of elements that /// are available for processing. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, TryAcquireError}; /// use async_channel::{Receiver, RecvError}; /// // Limit size of a producer-consumer queue /// async fn recv<T>(semaphore: &Semaphore, recv: &Receiver<T>) -> Result<T, RecvError>{ /// let result = recv.recv().await?; /// // Note that this only guards elements in the queue, not those being processed after the /// // queue. /// semaphore.release(1); /// Ok(result) /// } /// ``` pub fn release(&self, amount: usize) { unsafe { ReleaseAction { sem: self, releasable: Permits::new(amount) }.release(); } } /// Poison the semaphore, causing all pending and future calls to `acquire` to fail immediately. /// This can be used to unblock pending acquires when the guarded operation would fail anyway. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, TryAcquireError}; /// # use std::sync::Arc; /// # use async_std::sync::Mutex; /// use async_channel::{Receiver, RecvError}; /// async fn consume(semaphore: &Semaphore, receiver: Receiver<usize>){ /// while let Ok(x) = receiver.recv().await { /// println!("{:?}", x); /// semaphore.release(1); /// } /// // There will be no more calls to recv, so unblock all senders. /// semaphore.poison(); /// } /// ``` pub fn poison(&self) { unsafe { ReleaseAction { sem: self, releasable: Permits::poison() }.release(); } } }
try_acquire
identifier_name
semaphore.rs
use std::{fmt, mem}; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::sync::atomic::Ordering::{Relaxed, Acquire}; use crate::state::ReleaseState::Unlocked; use crate::state::AcquireState::{Available, Queued}; use std::fmt::{Debug, Formatter}; use crate::state::{AcquireStep, Waiter, Permits, AcquireState, ReleaseState}; use std::cell::UnsafeCell; use crate::{AcquireFuture, TryAcquireError, SemaphoreGuard, AcquireFutureArc, SemaphoreGuardArc}; use std::marker::{PhantomPinned, PhantomData}; use crate::waker::AtomicWaker; use std::ptr::null; use std::sync::Arc; use crate::atomic::Atomic; use std::mem::size_of; use crate::release::ReleaseAction; #[allow(unused_imports)] // used by docs use crate::errors::PoisonError; /// An async weighted semaphore. See [crate documentation](index.html) for usage. // This implementation encodes state (the available counter, acquire queue, and cancel queue) into // multiple atomic variables and linked lists. Concurrent acquires (and concurrent cancels) synchronize // by pushing onto a stack with an atomic swap. Releases synchronize with other operations by attempting // to acquire a lock. If the lock is successfully acquired, the release can proceed. Otherwise // the lock is marked dirty to indicate that there is additional work for the lock owner to do. pub struct Semaphore { // The number of available permits or the back of the queue (without next edges). pub(crate) acquire: Atomic<AcquireState>, // A number of releasable permits, and the state of the current release lock. pub(crate) release: Atomic<ReleaseState>, // The front of the queue (with next edges). pub(crate) front: UnsafeCell<*const Waiter>, // The last node swapped from AcquireState (with next edges). pub(crate) middle: UnsafeCell<*const Waiter>, // A stack of nodes that are cancelling. pub(crate) next_cancel: Atomic<*const Waiter>, } unsafe impl Sync for Semaphore {} unsafe impl Send for Semaphore {} impl UnwindSafe for Semaphore {} impl RefUnwindSafe for Semaphore {} impl Debug for Semaphore { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result
} impl Semaphore { /// The maximum number of permits that can be made available. This is slightly smaller than /// [`usize::MAX`]. If the number of available permits exceeds this number, it may poison the /// semaphore. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, SemaphoreGuard}; /// struct ReadWriteLock(Semaphore); /// impl ReadWriteLock { /// fn new() -> Self { /// ReadWriteLock(Semaphore::new(Semaphore::MAX_AVAILABLE)) /// } /// // Acquire one permit, allowing up to MAX_AVAILABLE concurrent readers. /// async fn read(&self) -> SemaphoreGuard<'_> { /// self.0.acquire(1).await.unwrap() /// } /// // The writer acquires all the permits, prevent any concurrent writers or readers. The /// // first-in-first-out priority policy prevents writer starvation. /// async fn write(&self) -> SemaphoreGuard<'_> { /// self.0.acquire(Semaphore::MAX_AVAILABLE).await.unwrap() /// } /// } /// ``` pub const MAX_AVAILABLE: usize = (1 << (size_of::<usize>() * 8 - 3)) - 1; /// Create a new semaphore with an initial number of permits. /// # Examples /// ``` /// use async_weighted_semaphore::Semaphore; /// let semaphore = Semaphore::new(1024); /// ``` pub fn new(initial: usize) -> Self { Semaphore { acquire: Atomic::new(Available(Permits::new(initial))), release: Atomic::new(Unlocked(Permits::new(0))), front: UnsafeCell::new(null()), middle: UnsafeCell::new(null()), next_cancel: Atomic::new(null()), } } /// Wait until there are no older pending calls to [acquire](#method.acquire) and at least `amount` permits available. /// Then consume the requested permits and return a [`SemaphoreGuard`]. /// # Errors /// Returns [`PoisonError`] is the semaphore is poisoned. /// # Examples /// ``` /// # use futures::executor::block_on; /// # use std::future::Future; /// use async_weighted_semaphore::Semaphore; /// async fn limit_concurrency(semaphore: &Semaphore, future: impl Future<Output=()>) { /// let guard = semaphore.acquire(1).await.unwrap(); /// future.await /// } /// ``` pub fn acquire(&self, amount: usize) -> AcquireFuture { AcquireFuture(UnsafeCell::new(Waiter { semaphore: self, step: UnsafeCell::new(AcquireStep::Entering), waker: unsafe { AtomicWaker::new() }, amount, next: UnsafeCell::new(null()), prev: UnsafeCell::new(null()), next_cancel: UnsafeCell::new(null()), }), PhantomData, PhantomPinned) } /// Like [acquire](#method.acquire), but fails if the call would block. /// # Errors /// * Returns [`TryAcquireError::Poisoned`] is the semaphore is poisoned. /// * Returns [`TryAcquireError::WouldBlock`] if a call to `acquire` would have blocked. This can /// occur if there are insufficient available permits or if there is another pending call to acquire. /// # Examples /// ``` /// # use futures::executor::block_on; /// # use std::future::Future; /// use async_weighted_semaphore::Semaphore; /// async fn run_if_safe(semaphore: &Semaphore, future: impl Future<Output=()>) { /// if semaphore.try_acquire(1).is_ok() { /// future.await /// } /// } /// ``` pub fn try_acquire(&self, amount: usize) -> Result<SemaphoreGuard, TryAcquireError> { let mut current = self.acquire.load(Acquire); loop { match current { Queued(_) => return Err(TryAcquireError::WouldBlock), Available(available) => { let available = available.into_usize().ok_or(TryAcquireError::Poisoned)?; if available < amount { return Err(TryAcquireError::WouldBlock); } if self.acquire.cmpxchg_weak_acqrel(&mut current, Available(Permits::new(available - amount))) { return Ok(SemaphoreGuard::new(self, amount)); } } } } } /// Like [acquire](#method.acquire), but takes an [`Arc`] `<Semaphore>` and returns a guard that is `'static`, [`Send`] and [`Sync`]. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, PoisonError, SemaphoreGuardArc}; /// # use std::sync::Arc; /// use async_channel::{Sender, SendError}; /// // Limit size of a producer-consumer queue /// async fn send<T>(semaphore: &Arc<Semaphore>, /// sender: &Sender<(SemaphoreGuardArc, T)>, /// message: T /// ) -> Result<(), SendError<T>>{ /// match semaphore.acquire_arc(1).await { /// // A semaphore can be poisoned to prevent deadlock when a channel closes. /// Err(PoisonError) => Err(SendError(message)), /// Ok(guard) => match sender.send((guard, message)).await{ /// Err(SendError((guard, message))) => Err(SendError(message)), /// Ok(()) => Ok(()) /// } /// } /// } /// ``` pub fn acquire_arc(self: &Arc<Self>, amount: usize) -> AcquireFutureArc { AcquireFutureArc { arc: self.clone(), inner: unsafe { mem::transmute::<AcquireFuture, AcquireFuture>(self.acquire(amount)) }, } } /// Like [try_acquire](#method.try_acquire), but takes an [`Arc`] `<Semaphore>`, and returns a guard that is `'static`, /// [`Send`] and [`Sync`]. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, TryAcquireError, SemaphoreGuardArc}; /// # use std::sync::Arc; /// use async_channel::{Sender, TrySendError}; /// // Limit size of a producer-consumer queue /// async fn try_send<T>(semaphore: &Arc<Semaphore>, /// sender: &Sender<(SemaphoreGuardArc, T)>, /// message: T /// ) -> Result<(), TrySendError<T>>{ /// match semaphore.try_acquire_arc(1) { /// Err(TryAcquireError::WouldBlock) => Err(TrySendError::Full(message)), /// // A semaphore can be poisoned to prevent deadlock when a channel closes. /// Err(TryAcquireError::Poisoned) => Err(TrySendError::Closed(message)), /// Ok(guard) => match sender.try_send((guard, message)) { /// Err(TrySendError::Closed((guard, message))) => Err(TrySendError::Closed(message)), /// Err(TrySendError::Full((guard, message))) => Err(TrySendError::Full(message)), /// Ok(()) => Ok(()) /// } /// } /// } /// ``` pub fn try_acquire_arc(self: &Arc<Self>, amount: usize) -> Result<SemaphoreGuardArc, TryAcquireError> { let guard = self.try_acquire(amount)?; let result = SemaphoreGuardArc::new(self.clone(), amount); guard.forget(); Ok(result) } /// Return `amount` permits to the semaphore. This will eventually wake any calls to [acquire](#method.acquire) /// that can succeed with the additional permits. Calling `release` often makes sense after calling /// [`SemaphoreGuard::forget`] or when using the semaphore to signal the number of elements that /// are available for processing. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, TryAcquireError}; /// use async_channel::{Receiver, RecvError}; /// // Limit size of a producer-consumer queue /// async fn recv<T>(semaphore: &Semaphore, recv: &Receiver<T>) -> Result<T, RecvError>{ /// let result = recv.recv().await?; /// // Note that this only guards elements in the queue, not those being processed after the /// // queue. /// semaphore.release(1); /// Ok(result) /// } /// ``` pub fn release(&self, amount: usize) { unsafe { ReleaseAction { sem: self, releasable: Permits::new(amount) }.release(); } } /// Poison the semaphore, causing all pending and future calls to `acquire` to fail immediately. /// This can be used to unblock pending acquires when the guarded operation would fail anyway. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, TryAcquireError}; /// # use std::sync::Arc; /// # use async_std::sync::Mutex; /// use async_channel::{Receiver, RecvError}; /// async fn consume(semaphore: &Semaphore, receiver: Receiver<usize>){ /// while let Ok(x) = receiver.recv().await { /// println!("{:?}", x); /// semaphore.release(1); /// } /// // There will be no more calls to recv, so unblock all senders. /// semaphore.poison(); /// } /// ``` pub fn poison(&self) { unsafe { ReleaseAction { sem: self, releasable: Permits::poison() }.release(); } } }
{ match self.acquire.load(Relaxed) { Available(available) => write!(f, "Semaphore::Ready({:?})", available)?, Queued(_) => match self.release.load(Relaxed) { Unlocked(available) => write!(f, "Semaphore::Blocked({:?})", available)?, _ => write!(f, "Semaphore::Unknown")?, }, }; Ok(()) }
identifier_body
semaphore.rs
use std::{fmt, mem}; use std::panic::{RefUnwindSafe, UnwindSafe}; use std::sync::atomic::Ordering::{Relaxed, Acquire}; use crate::state::ReleaseState::Unlocked; use crate::state::AcquireState::{Available, Queued}; use std::fmt::{Debug, Formatter}; use crate::state::{AcquireStep, Waiter, Permits, AcquireState, ReleaseState}; use std::cell::UnsafeCell; use crate::{AcquireFuture, TryAcquireError, SemaphoreGuard, AcquireFutureArc, SemaphoreGuardArc}; use std::marker::{PhantomPinned, PhantomData}; use crate::waker::AtomicWaker; use std::ptr::null; use std::sync::Arc; use crate::atomic::Atomic; use std::mem::size_of; use crate::release::ReleaseAction; #[allow(unused_imports)] // used by docs use crate::errors::PoisonError; /// An async weighted semaphore. See [crate documentation](index.html) for usage. // This implementation encodes state (the available counter, acquire queue, and cancel queue) into // multiple atomic variables and linked lists. Concurrent acquires (and concurrent cancels) synchronize // by pushing onto a stack with an atomic swap. Releases synchronize with other operations by attempting // to acquire a lock. If the lock is successfully acquired, the release can proceed. Otherwise // the lock is marked dirty to indicate that there is additional work for the lock owner to do. pub struct Semaphore { // The number of available permits or the back of the queue (without next edges). pub(crate) acquire: Atomic<AcquireState>, // A number of releasable permits, and the state of the current release lock. pub(crate) release: Atomic<ReleaseState>, // The front of the queue (with next edges). pub(crate) front: UnsafeCell<*const Waiter>, // The last node swapped from AcquireState (with next edges). pub(crate) middle: UnsafeCell<*const Waiter>, // A stack of nodes that are cancelling. pub(crate) next_cancel: Atomic<*const Waiter>, } unsafe impl Sync for Semaphore {} unsafe impl Send for Semaphore {} impl UnwindSafe for Semaphore {} impl RefUnwindSafe for Semaphore {} impl Debug for Semaphore { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self.acquire.load(Relaxed) { Available(available) => write!(f, "Semaphore::Ready({:?})", available)?, Queued(_) => match self.release.load(Relaxed) { Unlocked(available) => write!(f, "Semaphore::Blocked({:?})", available)?, _ => write!(f, "Semaphore::Unknown")?, }, }; Ok(()) } } impl Semaphore { /// The maximum number of permits that can be made available. This is slightly smaller than /// [`usize::MAX`]. If the number of available permits exceeds this number, it may poison the /// semaphore. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, SemaphoreGuard}; /// struct ReadWriteLock(Semaphore); /// impl ReadWriteLock { /// fn new() -> Self { /// ReadWriteLock(Semaphore::new(Semaphore::MAX_AVAILABLE)) /// } /// // Acquire one permit, allowing up to MAX_AVAILABLE concurrent readers. /// async fn read(&self) -> SemaphoreGuard<'_> { /// self.0.acquire(1).await.unwrap() /// } /// // The writer acquires all the permits, prevent any concurrent writers or readers. The /// // first-in-first-out priority policy prevents writer starvation. /// async fn write(&self) -> SemaphoreGuard<'_> { /// self.0.acquire(Semaphore::MAX_AVAILABLE).await.unwrap() /// } /// } /// ``` pub const MAX_AVAILABLE: usize = (1 << (size_of::<usize>() * 8 - 3)) - 1; /// Create a new semaphore with an initial number of permits. /// # Examples /// ``` /// use async_weighted_semaphore::Semaphore; /// let semaphore = Semaphore::new(1024); /// ``` pub fn new(initial: usize) -> Self { Semaphore { acquire: Atomic::new(Available(Permits::new(initial))), release: Atomic::new(Unlocked(Permits::new(0))), front: UnsafeCell::new(null()), middle: UnsafeCell::new(null()), next_cancel: Atomic::new(null()), } } /// Wait until there are no older pending calls to [acquire](#method.acquire) and at least `amount` permits available. /// Then consume the requested permits and return a [`SemaphoreGuard`]. /// # Errors /// Returns [`PoisonError`] is the semaphore is poisoned. /// # Examples /// ``` /// # use futures::executor::block_on; /// # use std::future::Future; /// use async_weighted_semaphore::Semaphore; /// async fn limit_concurrency(semaphore: &Semaphore, future: impl Future<Output=()>) { /// let guard = semaphore.acquire(1).await.unwrap(); /// future.await /// } /// ``` pub fn acquire(&self, amount: usize) -> AcquireFuture { AcquireFuture(UnsafeCell::new(Waiter { semaphore: self, step: UnsafeCell::new(AcquireStep::Entering), waker: unsafe { AtomicWaker::new() }, amount, next: UnsafeCell::new(null()), prev: UnsafeCell::new(null()), next_cancel: UnsafeCell::new(null()), }), PhantomData, PhantomPinned) } /// Like [acquire](#method.acquire), but fails if the call would block. /// # Errors /// * Returns [`TryAcquireError::Poisoned`] is the semaphore is poisoned. /// * Returns [`TryAcquireError::WouldBlock`] if a call to `acquire` would have blocked. This can /// occur if there are insufficient available permits or if there is another pending call to acquire. /// # Examples /// ``` /// # use futures::executor::block_on; /// # use std::future::Future; /// use async_weighted_semaphore::Semaphore; /// async fn run_if_safe(semaphore: &Semaphore, future: impl Future<Output=()>) { /// if semaphore.try_acquire(1).is_ok() { /// future.await /// } /// } /// ``` pub fn try_acquire(&self, amount: usize) -> Result<SemaphoreGuard, TryAcquireError> { let mut current = self.acquire.load(Acquire); loop { match current { Queued(_) => return Err(TryAcquireError::WouldBlock), Available(available) => { let available = available.into_usize().ok_or(TryAcquireError::Poisoned)?; if available < amount { return Err(TryAcquireError::WouldBlock); } if self.acquire.cmpxchg_weak_acqrel(&mut current, Available(Permits::new(available - amount))) { return Ok(SemaphoreGuard::new(self, amount)); } } } } } /// Like [acquire](#method.acquire), but takes an [`Arc`] `<Semaphore>` and returns a guard that is `'static`, [`Send`] and [`Sync`]. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, PoisonError, SemaphoreGuardArc}; /// # use std::sync::Arc; /// use async_channel::{Sender, SendError}; /// // Limit size of a producer-consumer queue /// async fn send<T>(semaphore: &Arc<Semaphore>, /// sender: &Sender<(SemaphoreGuardArc, T)>, /// message: T /// ) -> Result<(), SendError<T>>{ /// match semaphore.acquire_arc(1).await { /// // A semaphore can be poisoned to prevent deadlock when a channel closes. /// Err(PoisonError) => Err(SendError(message)), /// Ok(guard) => match sender.send((guard, message)).await{ /// Err(SendError((guard, message))) => Err(SendError(message)), /// Ok(()) => Ok(()) /// } /// } /// }
pub fn acquire_arc(self: &Arc<Self>, amount: usize) -> AcquireFutureArc { AcquireFutureArc { arc: self.clone(), inner: unsafe { mem::transmute::<AcquireFuture, AcquireFuture>(self.acquire(amount)) }, } } /// Like [try_acquire](#method.try_acquire), but takes an [`Arc`] `<Semaphore>`, and returns a guard that is `'static`, /// [`Send`] and [`Sync`]. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, TryAcquireError, SemaphoreGuardArc}; /// # use std::sync::Arc; /// use async_channel::{Sender, TrySendError}; /// // Limit size of a producer-consumer queue /// async fn try_send<T>(semaphore: &Arc<Semaphore>, /// sender: &Sender<(SemaphoreGuardArc, T)>, /// message: T /// ) -> Result<(), TrySendError<T>>{ /// match semaphore.try_acquire_arc(1) { /// Err(TryAcquireError::WouldBlock) => Err(TrySendError::Full(message)), /// // A semaphore can be poisoned to prevent deadlock when a channel closes. /// Err(TryAcquireError::Poisoned) => Err(TrySendError::Closed(message)), /// Ok(guard) => match sender.try_send((guard, message)) { /// Err(TrySendError::Closed((guard, message))) => Err(TrySendError::Closed(message)), /// Err(TrySendError::Full((guard, message))) => Err(TrySendError::Full(message)), /// Ok(()) => Ok(()) /// } /// } /// } /// ``` pub fn try_acquire_arc(self: &Arc<Self>, amount: usize) -> Result<SemaphoreGuardArc, TryAcquireError> { let guard = self.try_acquire(amount)?; let result = SemaphoreGuardArc::new(self.clone(), amount); guard.forget(); Ok(result) } /// Return `amount` permits to the semaphore. This will eventually wake any calls to [acquire](#method.acquire) /// that can succeed with the additional permits. Calling `release` often makes sense after calling /// [`SemaphoreGuard::forget`] or when using the semaphore to signal the number of elements that /// are available for processing. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, TryAcquireError}; /// use async_channel::{Receiver, RecvError}; /// // Limit size of a producer-consumer queue /// async fn recv<T>(semaphore: &Semaphore, recv: &Receiver<T>) -> Result<T, RecvError>{ /// let result = recv.recv().await?; /// // Note that this only guards elements in the queue, not those being processed after the /// // queue. /// semaphore.release(1); /// Ok(result) /// } /// ``` pub fn release(&self, amount: usize) { unsafe { ReleaseAction { sem: self, releasable: Permits::new(amount) }.release(); } } /// Poison the semaphore, causing all pending and future calls to `acquire` to fail immediately. /// This can be used to unblock pending acquires when the guarded operation would fail anyway. /// # Examples /// ``` /// # use async_weighted_semaphore::{Semaphore, TryAcquireError}; /// # use std::sync::Arc; /// # use async_std::sync::Mutex; /// use async_channel::{Receiver, RecvError}; /// async fn consume(semaphore: &Semaphore, receiver: Receiver<usize>){ /// while let Ok(x) = receiver.recv().await { /// println!("{:?}", x); /// semaphore.release(1); /// } /// // There will be no more calls to recv, so unblock all senders. /// semaphore.poison(); /// } /// ``` pub fn poison(&self) { unsafe { ReleaseAction { sem: self, releasable: Permits::poison() }.release(); } } }
/// ```
random_line_split
14.rs
// --- Day 14: Disk Defragmentation --- // Suddenly, a scheduled job activates the system's disk defragmenter. Were the situation different, you might sit and watch it for a while, but today, you just don't have that kind of time. It's soaking up valuable system resources that are needed elsewhere, and so the only option is to help it finish its task as soon as possible. // The disk in question consists of a 128x128 grid; each square of the grid is either free or used. On this disk, the state of the grid is tracked by the bits in a sequence of knot hashes. // A total of 128 knot hashes are calculated, each corresponding to a single row in the grid; each hash contains 128 bits which correspond to individual grid squares. Each bit of a hash indicates whether that square is free (0) or used (1). // The hash inputs are a key string (your puzzle input), a dash, and a number from 0 to 127 corresponding to the row. For example, if your key string were flqrgnkx, then the first row would be given by the bits of the knot hash of flqrgnkx-0, the second row from the bits of the knot hash of flqrgnkx-1, and so on until the last row, flqrgnkx-127. // The output of a knot hash is traditionally represented by 32 hexadecimal digits; each of these digits correspond to 4 bits, for a total of 4 * 32 = 128 bits. To convert to bits, turn each hexadecimal digit to its equivalent binary value, high-bit first: 0 becomes 0000, 1 becomes 0001, e becomes 1110, f becomes 1111, and so on; a hash that begins with a0c2017... in hexadecimal would begin with 10100000110000100000000101110000... in binary. // Continuing this process, the first 8 rows and columns for key flqrgnkx appear as follows, using # to denote used squares, and. to denote free ones: // ##.#.#..--> //.#.#.#.# //....#.#. // #.#.##.# //.##.#... // ##..#..# //.#...#.. // ##.#.##.--> // | | // V V // In this example, 8108 squares are used across the entire 128x128 grid. // Given your actual key string, how many squares are used? // Your puzzle input is jxqlasbh. #![feature(conservative_impl_trait)] #![feature(entry_and_modify)] // #![feature(nll)] extern crate advent2017; use advent2017::knot::{Knot}; use std::io::Cursor; use std::collections::HashMap; use std::collections::hash_map::Entry::{Occupied, Vacant}; /// Given any Binary, return an iterator that iterates through the binary /// representation of the type (msb first), and returns true whenever the bit is set. fn num_to_bits<T: std::fmt::Binary>(num: T) -> Vec<bool> { let s = format!("{:04b}", num); s.chars() .map(|c| c == '1') .collect::<Vec<bool>>() } /// Given a string representing a hexadecimal number, /// where each character of the string is a hexadecimal digit representing 4 binary bits, /// return a bitfield of the unsigned binary representation of that number, /// msb at index 0 fn hex_to_bits<'a>(hex: &'a str) -> Vec<bool> { (0..hex.len()) .map(|i| &hex[i..i+1]) .map(|slice| u8::from_str_radix(slice, 16).unwrap()) .flat_map(|num| num_to_bits(num)) .collect::<Vec<bool>>() } fn hashes(seed: &str) -> Vec<String> { (0..128) .map(|i| format!("{}-{}", seed, i)) .map(|plaintext| { let mut knot = Knot::new(); knot.hash(Cursor::new(plaintext)) }) .collect() } fn bitcount_hash(hash: &str) -> u32 { let mut bitsum = 0; for j in 0..32 { let slice = &hash[j..j+1]; let num = u32::from_str_radix(slice, 16).unwrap(); bitsum += num.count_ones(); } bitsum } fn count_hash_seed(s: &str) -> u32 { let mut bitsum = 0; for hash in hashes(&s) { bitsum += bitcount_hash(&hash); } bitsum } fn part_one() { let input = "jxqlasbh"; println!("{}: {}", input, count_hash_seed(&input)); } // --- Part Two --- // Now, all the defragmenter needs to know is the number of regions. A region is a group of used squares that are all adjacent, not including diagonals. Every used square is in exactly one region: lone used squares form their own isolated regions, while several adjacent squares all count as a single region. // In the example above, the following nine regions are visible, each marked with a distinct digit: // 11.2.3..--> //.1.2.3.4 //....5.6. // 7.8.55.9 //.88.5... // 88..5..8 //.8...8.. // 88.8.88.--> // | | // V V // Of particular interest is the region marked 8; while it does not appear contiguous in this small view, all of the squares marked 8 are connected when considering the whole 128x128 grid. In total, in this example, 1242 regions are present. // How many regions are present given your key string? fn make_grid(hash_seed: &str) -> Vec<Vec<bool>> { let mut grid = Vec::with_capacity(128); for hash in hashes(hash_seed) { grid.push(hex_to_bits(&hash)); } grid } /// make a single scan through the grid // At each position, if the cell is filled, look in each cardinal direction for adjacent clusters // If at least one is found, merge this element and all clusters that it is touching into the // cluster with the lowest id that was found. // If none are found, then start a new cluster on this cell. type ClusterId = i32; #[derive(Debug)] struct Loc(usize, usize); type CGrid = Vec<Vec<CellState>>; type CMap = HashMap<ClusterId, Vec<Loc>>; #[derive(PartialEq, Eq, Debug, Clone)] enum CellState { Unclaimed, Empty, Id(ClusterId) } struct Clusters { grid: CGrid, index: CMap, next_id: ClusterId } impl Clusters { fn new(size: u32) -> Self { let mut grid : Vec<Vec<CellState>> = Vec::new(); for _ in 0..size { let mut row = vec![]; for _ in 0..size { row.push(CellState::Unclaimed); } grid.push(row); } Clusters { grid, index: HashMap::new(), next_id: 0 } } fn print_small(&self, window_size: usize) { for row in self.grid.iter().take(window_size) { println!("\n{}", row.iter().take(window_size).map(|c| match c { &CellState::Id(id) => format!("{:4}", id), &CellState::Empty => " .".to_string(), &CellState::Unclaimed => " ?".to_string() }) .collect::<Vec<String>>() .join(" ")); } } fn add_grid(&mut self, &Loc(i, j): &Loc, id: ClusterId) { self.grid[i][j] = CellState::Id(id); } fn new_cluster(&mut self, loc: Loc) { let id = self.next_id; self.next_id += 1; self.add_to_cluster(loc, id); } fn add_to_cluster(&mut self, loc: Loc, id: ClusterId) { self.add_grid(&loc, id); match self.index.entry(id) { Occupied(mut e) => { e.get_mut().push(loc); } Vacant(e) => { e.insert(vec![loc]); } } } fn set_empty(&mut self, Loc(i, j): Loc) { self.grid[i][j] = CellState::Empty; } fn state(&self, &Loc(i, j): &Loc) -> CellState { self.grid[i][j].clone() } fn merge_clusters(&mut self, dest: ClusterId, other: &ClusterId) { if dest == *other { return; } if let Some(mut locs) = self.index.remove(&other) { for loc in locs.iter() { self.add_grid(&loc, dest); } self.index.entry(dest) .and_modify(|f| f.append(&mut locs)) .or_insert_with(|| locs ); } } } fn
(size: u32, occupied: &Vec<Vec<bool>>) { for row in occupied.iter().take(size as usize) { println!("\n{}", row.iter().take(size as usize).map(|c| match c { &true => "#", &false => ".", }) .collect::<Vec<&str>>() .join(" ")); } } /* This algorithm makes one pass through the grid, left to right, top to bottom. At each cell, if the cell is occupied, it checks all neighboring cells for any that belong to a cluster. Then current cell and all of its cluster neighbors are merged into the lowest-id cluster that it finds. If the cell is occupied but has no neighbors that belong to cells, a new cluster is started. */ fn count_clusters(occupied: &Vec<Vec<bool>>) -> u32 { let size = 128; let mut clusters = Clusters::new(size); let len = clusters.grid.len(); // print_small_grid(10, &occupied); for i in 0..len { let jlen = clusters.grid[i].len(); for j in 0..jlen { let val = clusters.state(&Loc(i, j)); if occupied[i][j] { let mut adj_clusters = vec![]; for o in [-1, 1].iter() { let it = (i as i64) + *o; let jt = (j as i64) + *o; if it >= 0 && it < len as i64 { let loc = Loc(it as usize, j); if let CellState::Id(id) = clusters.state(&loc) { adj_clusters.push(id); } } if jt >= 0 && jt < jlen as i64 { let loc = Loc(i, jt as usize); if let CellState::Id(id) = clusters.state(&loc) { adj_clusters.push(id); } } } if adj_clusters.len() > 0 { let min = adj_clusters.iter().clone().min().unwrap(); for id in adj_clusters.iter() { clusters.merge_clusters(*min, &id); } clusters.add_to_cluster(Loc(i, j), *min); } else { clusters.new_cluster(Loc(i, j)); } } else { clusters.set_empty(Loc(i, j)) } } } // clusters.print_small(10); clusters.index.keys().len() as u32 } fn part_two() { let grid = make_grid("jxqlasbh"); let count = count_clusters(&grid); println!("14-2: {} clusters in {}", count, "jxqlasbh"); } fn main() { part_one(); part_two(); } #[cfg(test)] mod tests { use count_hash_seed; use hex_to_bits; use count_clusters; use make_grid; #[test] fn test_count_clusters() { assert_eq!(count_clusters(&make_grid("flqrgnkx")), 1242); } #[test] fn test_count_hash_seed() { assert_eq!(count_hash_seed("flqrgnkx"), 8108); } #[test] fn test_hex_to_bits() { for (expected_value, letter) in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"].iter().enumerate() { let actual = hex_to_bits(letter); let actual_binary_string = actual .iter() .map(|b| if *b { '1' } else { '0' }).collect::<String>(); let actual_value = u8::from_str_radix(&actual_binary_string, 2).unwrap(); assert_eq!(actual_value, expected_value as u8); } } }
print_small_grid
identifier_name
14.rs
// --- Day 14: Disk Defragmentation --- // Suddenly, a scheduled job activates the system's disk defragmenter. Were the situation different, you might sit and watch it for a while, but today, you just don't have that kind of time. It's soaking up valuable system resources that are needed elsewhere, and so the only option is to help it finish its task as soon as possible. // The disk in question consists of a 128x128 grid; each square of the grid is either free or used. On this disk, the state of the grid is tracked by the bits in a sequence of knot hashes. // A total of 128 knot hashes are calculated, each corresponding to a single row in the grid; each hash contains 128 bits which correspond to individual grid squares. Each bit of a hash indicates whether that square is free (0) or used (1). // The hash inputs are a key string (your puzzle input), a dash, and a number from 0 to 127 corresponding to the row. For example, if your key string were flqrgnkx, then the first row would be given by the bits of the knot hash of flqrgnkx-0, the second row from the bits of the knot hash of flqrgnkx-1, and so on until the last row, flqrgnkx-127. // The output of a knot hash is traditionally represented by 32 hexadecimal digits; each of these digits correspond to 4 bits, for a total of 4 * 32 = 128 bits. To convert to bits, turn each hexadecimal digit to its equivalent binary value, high-bit first: 0 becomes 0000, 1 becomes 0001, e becomes 1110, f becomes 1111, and so on; a hash that begins with a0c2017... in hexadecimal would begin with 10100000110000100000000101110000... in binary. // Continuing this process, the first 8 rows and columns for key flqrgnkx appear as follows, using # to denote used squares, and. to denote free ones: // ##.#.#..--> //.#.#.#.# //....#.#. // #.#.##.# //.##.#... // ##..#..# //.#...#.. // ##.#.##.--> // | | // V V // In this example, 8108 squares are used across the entire 128x128 grid. // Given your actual key string, how many squares are used? // Your puzzle input is jxqlasbh. #![feature(conservative_impl_trait)] #![feature(entry_and_modify)] // #![feature(nll)] extern crate advent2017; use advent2017::knot::{Knot}; use std::io::Cursor; use std::collections::HashMap; use std::collections::hash_map::Entry::{Occupied, Vacant}; /// Given any Binary, return an iterator that iterates through the binary /// representation of the type (msb first), and returns true whenever the bit is set. fn num_to_bits<T: std::fmt::Binary>(num: T) -> Vec<bool> { let s = format!("{:04b}", num); s.chars() .map(|c| c == '1') .collect::<Vec<bool>>() } /// Given a string representing a hexadecimal number, /// where each character of the string is a hexadecimal digit representing 4 binary bits, /// return a bitfield of the unsigned binary representation of that number, /// msb at index 0 fn hex_to_bits<'a>(hex: &'a str) -> Vec<bool> { (0..hex.len()) .map(|i| &hex[i..i+1]) .map(|slice| u8::from_str_radix(slice, 16).unwrap()) .flat_map(|num| num_to_bits(num)) .collect::<Vec<bool>>() } fn hashes(seed: &str) -> Vec<String> { (0..128) .map(|i| format!("{}-{}", seed, i)) .map(|plaintext| { let mut knot = Knot::new(); knot.hash(Cursor::new(plaintext)) }) .collect() } fn bitcount_hash(hash: &str) -> u32 { let mut bitsum = 0; for j in 0..32 { let slice = &hash[j..j+1]; let num = u32::from_str_radix(slice, 16).unwrap(); bitsum += num.count_ones(); } bitsum } fn count_hash_seed(s: &str) -> u32 { let mut bitsum = 0; for hash in hashes(&s) { bitsum += bitcount_hash(&hash); } bitsum } fn part_one() { let input = "jxqlasbh"; println!("{}: {}", input, count_hash_seed(&input)); } // --- Part Two --- // Now, all the defragmenter needs to know is the number of regions. A region is a group of used squares that are all adjacent, not including diagonals. Every used square is in exactly one region: lone used squares form their own isolated regions, while several adjacent squares all count as a single region. // In the example above, the following nine regions are visible, each marked with a distinct digit: // 11.2.3..--> //.1.2.3.4 //....5.6. // 7.8.55.9 //.88.5... // 88..5..8 //.8...8.. // 88.8.88.--> // | | // V V // Of particular interest is the region marked 8; while it does not appear contiguous in this small view, all of the squares marked 8 are connected when considering the whole 128x128 grid. In total, in this example, 1242 regions are present. // How many regions are present given your key string? fn make_grid(hash_seed: &str) -> Vec<Vec<bool>> { let mut grid = Vec::with_capacity(128); for hash in hashes(hash_seed) { grid.push(hex_to_bits(&hash)); } grid } /// make a single scan through the grid // At each position, if the cell is filled, look in each cardinal direction for adjacent clusters // If at least one is found, merge this element and all clusters that it is touching into the // cluster with the lowest id that was found. // If none are found, then start a new cluster on this cell. type ClusterId = i32; #[derive(Debug)] struct Loc(usize, usize); type CGrid = Vec<Vec<CellState>>; type CMap = HashMap<ClusterId, Vec<Loc>>; #[derive(PartialEq, Eq, Debug, Clone)] enum CellState { Unclaimed, Empty, Id(ClusterId) } struct Clusters { grid: CGrid, index: CMap, next_id: ClusterId } impl Clusters { fn new(size: u32) -> Self { let mut grid : Vec<Vec<CellState>> = Vec::new(); for _ in 0..size { let mut row = vec![]; for _ in 0..size { row.push(CellState::Unclaimed); } grid.push(row); } Clusters { grid, index: HashMap::new(), next_id: 0 } } fn print_small(&self, window_size: usize) { for row in self.grid.iter().take(window_size) { println!("\n{}", row.iter().take(window_size).map(|c| match c { &CellState::Id(id) => format!("{:4}", id), &CellState::Empty => " .".to_string(), &CellState::Unclaimed => " ?".to_string() }) .collect::<Vec<String>>() .join(" ")); } } fn add_grid(&mut self, &Loc(i, j): &Loc, id: ClusterId) { self.grid[i][j] = CellState::Id(id); } fn new_cluster(&mut self, loc: Loc) { let id = self.next_id; self.next_id += 1; self.add_to_cluster(loc, id); } fn add_to_cluster(&mut self, loc: Loc, id: ClusterId) { self.add_grid(&loc, id); match self.index.entry(id) { Occupied(mut e) => { e.get_mut().push(loc); } Vacant(e) => { e.insert(vec![loc]); } } } fn set_empty(&mut self, Loc(i, j): Loc) { self.grid[i][j] = CellState::Empty; } fn state(&self, &Loc(i, j): &Loc) -> CellState { self.grid[i][j].clone() } fn merge_clusters(&mut self, dest: ClusterId, other: &ClusterId) { if dest == *other { return; } if let Some(mut locs) = self.index.remove(&other) { for loc in locs.iter() { self.add_grid(&loc, dest); } self.index.entry(dest) .and_modify(|f| f.append(&mut locs)) .or_insert_with(|| locs ); } } } fn print_small_grid(size: u32, occupied: &Vec<Vec<bool>>) { for row in occupied.iter().take(size as usize) { println!("\n{}", row.iter().take(size as usize).map(|c| match c { &true => "#", &false => ".", }) .collect::<Vec<&str>>() .join(" ")); } } /* This algorithm makes one pass through the grid, left to right, top to bottom. At each cell, if the cell is occupied, it checks all neighboring cells for any that belong to a cluster. Then current cell and all of its cluster neighbors are merged into the lowest-id cluster that it finds. If the cell is occupied but has no neighbors that belong to cells, a new cluster is started. */ fn count_clusters(occupied: &Vec<Vec<bool>>) -> u32 { let size = 128; let mut clusters = Clusters::new(size); let len = clusters.grid.len(); // print_small_grid(10, &occupied); for i in 0..len { let jlen = clusters.grid[i].len(); for j in 0..jlen { let val = clusters.state(&Loc(i, j)); if occupied[i][j] { let mut adj_clusters = vec![]; for o in [-1, 1].iter() { let it = (i as i64) + *o; let jt = (j as i64) + *o; if it >= 0 && it < len as i64 { let loc = Loc(it as usize, j); if let CellState::Id(id) = clusters.state(&loc) { adj_clusters.push(id); } } if jt >= 0 && jt < jlen as i64 { let loc = Loc(i, jt as usize); if let CellState::Id(id) = clusters.state(&loc) { adj_clusters.push(id); } } } if adj_clusters.len() > 0 { let min = adj_clusters.iter().clone().min().unwrap(); for id in adj_clusters.iter() { clusters.merge_clusters(*min, &id); } clusters.add_to_cluster(Loc(i, j), *min); } else
} else { clusters.set_empty(Loc(i, j)) } } } // clusters.print_small(10); clusters.index.keys().len() as u32 } fn part_two() { let grid = make_grid("jxqlasbh"); let count = count_clusters(&grid); println!("14-2: {} clusters in {}", count, "jxqlasbh"); } fn main() { part_one(); part_two(); } #[cfg(test)] mod tests { use count_hash_seed; use hex_to_bits; use count_clusters; use make_grid; #[test] fn test_count_clusters() { assert_eq!(count_clusters(&make_grid("flqrgnkx")), 1242); } #[test] fn test_count_hash_seed() { assert_eq!(count_hash_seed("flqrgnkx"), 8108); } #[test] fn test_hex_to_bits() { for (expected_value, letter) in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"].iter().enumerate() { let actual = hex_to_bits(letter); let actual_binary_string = actual .iter() .map(|b| if *b { '1' } else { '0' }).collect::<String>(); let actual_value = u8::from_str_radix(&actual_binary_string, 2).unwrap(); assert_eq!(actual_value, expected_value as u8); } } }
{ clusters.new_cluster(Loc(i, j)); }
conditional_block
14.rs
// --- Day 14: Disk Defragmentation --- // Suddenly, a scheduled job activates the system's disk defragmenter. Were the situation different, you might sit and watch it for a while, but today, you just don't have that kind of time. It's soaking up valuable system resources that are needed elsewhere, and so the only option is to help it finish its task as soon as possible. // The disk in question consists of a 128x128 grid; each square of the grid is either free or used. On this disk, the state of the grid is tracked by the bits in a sequence of knot hashes. // A total of 128 knot hashes are calculated, each corresponding to a single row in the grid; each hash contains 128 bits which correspond to individual grid squares. Each bit of a hash indicates whether that square is free (0) or used (1). // The hash inputs are a key string (your puzzle input), a dash, and a number from 0 to 127 corresponding to the row. For example, if your key string were flqrgnkx, then the first row would be given by the bits of the knot hash of flqrgnkx-0, the second row from the bits of the knot hash of flqrgnkx-1, and so on until the last row, flqrgnkx-127. // The output of a knot hash is traditionally represented by 32 hexadecimal digits; each of these digits correspond to 4 bits, for a total of 4 * 32 = 128 bits. To convert to bits, turn each hexadecimal digit to its equivalent binary value, high-bit first: 0 becomes 0000, 1 becomes 0001, e becomes 1110, f becomes 1111, and so on; a hash that begins with a0c2017... in hexadecimal would begin with 10100000110000100000000101110000... in binary. // Continuing this process, the first 8 rows and columns for key flqrgnkx appear as follows, using # to denote used squares, and. to denote free ones: // ##.#.#..--> //.#.#.#.# //....#.#. // #.#.##.# //.##.#... // ##..#..# //.#...#.. // ##.#.##.--> // | | // V V // In this example, 8108 squares are used across the entire 128x128 grid. // Given your actual key string, how many squares are used? // Your puzzle input is jxqlasbh. #![feature(conservative_impl_trait)] #![feature(entry_and_modify)] // #![feature(nll)] extern crate advent2017; use advent2017::knot::{Knot}; use std::io::Cursor; use std::collections::HashMap; use std::collections::hash_map::Entry::{Occupied, Vacant}; /// Given any Binary, return an iterator that iterates through the binary /// representation of the type (msb first), and returns true whenever the bit is set. fn num_to_bits<T: std::fmt::Binary>(num: T) -> Vec<bool> { let s = format!("{:04b}", num); s.chars() .map(|c| c == '1') .collect::<Vec<bool>>() } /// Given a string representing a hexadecimal number, /// where each character of the string is a hexadecimal digit representing 4 binary bits, /// return a bitfield of the unsigned binary representation of that number, /// msb at index 0 fn hex_to_bits<'a>(hex: &'a str) -> Vec<bool> { (0..hex.len()) .map(|i| &hex[i..i+1]) .map(|slice| u8::from_str_radix(slice, 16).unwrap()) .flat_map(|num| num_to_bits(num)) .collect::<Vec<bool>>() } fn hashes(seed: &str) -> Vec<String>
fn bitcount_hash(hash: &str) -> u32 { let mut bitsum = 0; for j in 0..32 { let slice = &hash[j..j+1]; let num = u32::from_str_radix(slice, 16).unwrap(); bitsum += num.count_ones(); } bitsum } fn count_hash_seed(s: &str) -> u32 { let mut bitsum = 0; for hash in hashes(&s) { bitsum += bitcount_hash(&hash); } bitsum } fn part_one() { let input = "jxqlasbh"; println!("{}: {}", input, count_hash_seed(&input)); } // --- Part Two --- // Now, all the defragmenter needs to know is the number of regions. A region is a group of used squares that are all adjacent, not including diagonals. Every used square is in exactly one region: lone used squares form their own isolated regions, while several adjacent squares all count as a single region. // In the example above, the following nine regions are visible, each marked with a distinct digit: // 11.2.3..--> //.1.2.3.4 //....5.6. // 7.8.55.9 //.88.5... // 88..5..8 //.8...8.. // 88.8.88.--> // | | // V V // Of particular interest is the region marked 8; while it does not appear contiguous in this small view, all of the squares marked 8 are connected when considering the whole 128x128 grid. In total, in this example, 1242 regions are present. // How many regions are present given your key string? fn make_grid(hash_seed: &str) -> Vec<Vec<bool>> { let mut grid = Vec::with_capacity(128); for hash in hashes(hash_seed) { grid.push(hex_to_bits(&hash)); } grid } /// make a single scan through the grid // At each position, if the cell is filled, look in each cardinal direction for adjacent clusters // If at least one is found, merge this element and all clusters that it is touching into the // cluster with the lowest id that was found. // If none are found, then start a new cluster on this cell. type ClusterId = i32; #[derive(Debug)] struct Loc(usize, usize); type CGrid = Vec<Vec<CellState>>; type CMap = HashMap<ClusterId, Vec<Loc>>; #[derive(PartialEq, Eq, Debug, Clone)] enum CellState { Unclaimed, Empty, Id(ClusterId) } struct Clusters { grid: CGrid, index: CMap, next_id: ClusterId } impl Clusters { fn new(size: u32) -> Self { let mut grid : Vec<Vec<CellState>> = Vec::new(); for _ in 0..size { let mut row = vec![]; for _ in 0..size { row.push(CellState::Unclaimed); } grid.push(row); } Clusters { grid, index: HashMap::new(), next_id: 0 } } fn print_small(&self, window_size: usize) { for row in self.grid.iter().take(window_size) { println!("\n{}", row.iter().take(window_size).map(|c| match c { &CellState::Id(id) => format!("{:4}", id), &CellState::Empty => " .".to_string(), &CellState::Unclaimed => " ?".to_string() }) .collect::<Vec<String>>() .join(" ")); } } fn add_grid(&mut self, &Loc(i, j): &Loc, id: ClusterId) { self.grid[i][j] = CellState::Id(id); } fn new_cluster(&mut self, loc: Loc) { let id = self.next_id; self.next_id += 1; self.add_to_cluster(loc, id); } fn add_to_cluster(&mut self, loc: Loc, id: ClusterId) { self.add_grid(&loc, id); match self.index.entry(id) { Occupied(mut e) => { e.get_mut().push(loc); } Vacant(e) => { e.insert(vec![loc]); } } } fn set_empty(&mut self, Loc(i, j): Loc) { self.grid[i][j] = CellState::Empty; } fn state(&self, &Loc(i, j): &Loc) -> CellState { self.grid[i][j].clone() } fn merge_clusters(&mut self, dest: ClusterId, other: &ClusterId) { if dest == *other { return; } if let Some(mut locs) = self.index.remove(&other) { for loc in locs.iter() { self.add_grid(&loc, dest); } self.index.entry(dest) .and_modify(|f| f.append(&mut locs)) .or_insert_with(|| locs ); } } } fn print_small_grid(size: u32, occupied: &Vec<Vec<bool>>) { for row in occupied.iter().take(size as usize) { println!("\n{}", row.iter().take(size as usize).map(|c| match c { &true => "#", &false => ".", }) .collect::<Vec<&str>>() .join(" ")); } } /* This algorithm makes one pass through the grid, left to right, top to bottom. At each cell, if the cell is occupied, it checks all neighboring cells for any that belong to a cluster. Then current cell and all of its cluster neighbors are merged into the lowest-id cluster that it finds. If the cell is occupied but has no neighbors that belong to cells, a new cluster is started. */ fn count_clusters(occupied: &Vec<Vec<bool>>) -> u32 { let size = 128; let mut clusters = Clusters::new(size); let len = clusters.grid.len(); // print_small_grid(10, &occupied); for i in 0..len { let jlen = clusters.grid[i].len(); for j in 0..jlen { let val = clusters.state(&Loc(i, j)); if occupied[i][j] { let mut adj_clusters = vec![]; for o in [-1, 1].iter() { let it = (i as i64) + *o; let jt = (j as i64) + *o; if it >= 0 && it < len as i64 { let loc = Loc(it as usize, j); if let CellState::Id(id) = clusters.state(&loc) { adj_clusters.push(id); } } if jt >= 0 && jt < jlen as i64 { let loc = Loc(i, jt as usize); if let CellState::Id(id) = clusters.state(&loc) { adj_clusters.push(id); } } } if adj_clusters.len() > 0 { let min = adj_clusters.iter().clone().min().unwrap(); for id in adj_clusters.iter() { clusters.merge_clusters(*min, &id); } clusters.add_to_cluster(Loc(i, j), *min); } else { clusters.new_cluster(Loc(i, j)); } } else { clusters.set_empty(Loc(i, j)) } } } // clusters.print_small(10); clusters.index.keys().len() as u32 } fn part_two() { let grid = make_grid("jxqlasbh"); let count = count_clusters(&grid); println!("14-2: {} clusters in {}", count, "jxqlasbh"); } fn main() { part_one(); part_two(); } #[cfg(test)] mod tests { use count_hash_seed; use hex_to_bits; use count_clusters; use make_grid; #[test] fn test_count_clusters() { assert_eq!(count_clusters(&make_grid("flqrgnkx")), 1242); } #[test] fn test_count_hash_seed() { assert_eq!(count_hash_seed("flqrgnkx"), 8108); } #[test] fn test_hex_to_bits() { for (expected_value, letter) in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"].iter().enumerate() { let actual = hex_to_bits(letter); let actual_binary_string = actual .iter() .map(|b| if *b { '1' } else { '0' }).collect::<String>(); let actual_value = u8::from_str_radix(&actual_binary_string, 2).unwrap(); assert_eq!(actual_value, expected_value as u8); } } }
{ (0..128) .map(|i| format!("{}-{}", seed, i)) .map(|plaintext| { let mut knot = Knot::new(); knot.hash(Cursor::new(plaintext)) }) .collect() }
identifier_body
14.rs
// --- Day 14: Disk Defragmentation --- // Suddenly, a scheduled job activates the system's disk defragmenter. Were the situation different, you might sit and watch it for a while, but today, you just don't have that kind of time. It's soaking up valuable system resources that are needed elsewhere, and so the only option is to help it finish its task as soon as possible. // The disk in question consists of a 128x128 grid; each square of the grid is either free or used. On this disk, the state of the grid is tracked by the bits in a sequence of knot hashes. // A total of 128 knot hashes are calculated, each corresponding to a single row in the grid; each hash contains 128 bits which correspond to individual grid squares. Each bit of a hash indicates whether that square is free (0) or used (1). // The hash inputs are a key string (your puzzle input), a dash, and a number from 0 to 127 corresponding to the row. For example, if your key string were flqrgnkx, then the first row would be given by the bits of the knot hash of flqrgnkx-0, the second row from the bits of the knot hash of flqrgnkx-1, and so on until the last row, flqrgnkx-127. // The output of a knot hash is traditionally represented by 32 hexadecimal digits; each of these digits correspond to 4 bits, for a total of 4 * 32 = 128 bits. To convert to bits, turn each hexadecimal digit to its equivalent binary value, high-bit first: 0 becomes 0000, 1 becomes 0001, e becomes 1110, f becomes 1111, and so on; a hash that begins with a0c2017... in hexadecimal would begin with 10100000110000100000000101110000... in binary. // Continuing this process, the first 8 rows and columns for key flqrgnkx appear as follows, using # to denote used squares, and. to denote free ones: // ##.#.#..--> //.#.#.#.# //....#.#. // #.#.##.# //.##.#... // ##..#..# //.#...#.. // ##.#.##.--> // | | // V V // In this example, 8108 squares are used across the entire 128x128 grid. // Given your actual key string, how many squares are used? // Your puzzle input is jxqlasbh. #![feature(conservative_impl_trait)] #![feature(entry_and_modify)] // #![feature(nll)] extern crate advent2017; use advent2017::knot::{Knot}; use std::io::Cursor; use std::collections::HashMap; use std::collections::hash_map::Entry::{Occupied, Vacant}; /// Given any Binary, return an iterator that iterates through the binary /// representation of the type (msb first), and returns true whenever the bit is set. fn num_to_bits<T: std::fmt::Binary>(num: T) -> Vec<bool> { let s = format!("{:04b}", num); s.chars() .map(|c| c == '1') .collect::<Vec<bool>>() } /// Given a string representing a hexadecimal number, /// where each character of the string is a hexadecimal digit representing 4 binary bits, /// return a bitfield of the unsigned binary representation of that number, /// msb at index 0 fn hex_to_bits<'a>(hex: &'a str) -> Vec<bool> { (0..hex.len()) .map(|i| &hex[i..i+1]) .map(|slice| u8::from_str_radix(slice, 16).unwrap()) .flat_map(|num| num_to_bits(num)) .collect::<Vec<bool>>() } fn hashes(seed: &str) -> Vec<String> { (0..128) .map(|i| format!("{}-{}", seed, i)) .map(|plaintext| { let mut knot = Knot::new(); knot.hash(Cursor::new(plaintext)) }) .collect() } fn bitcount_hash(hash: &str) -> u32 { let mut bitsum = 0; for j in 0..32 { let slice = &hash[j..j+1]; let num = u32::from_str_radix(slice, 16).unwrap(); bitsum += num.count_ones(); } bitsum } fn count_hash_seed(s: &str) -> u32 { let mut bitsum = 0; for hash in hashes(&s) { bitsum += bitcount_hash(&hash); } bitsum } fn part_one() { let input = "jxqlasbh"; println!("{}: {}", input, count_hash_seed(&input)); } // --- Part Two --- // Now, all the defragmenter needs to know is the number of regions. A region is a group of used squares that are all adjacent, not including diagonals. Every used square is in exactly one region: lone used squares form their own isolated regions, while several adjacent squares all count as a single region. // In the example above, the following nine regions are visible, each marked with a distinct digit: // 11.2.3..--> //.1.2.3.4 //....5.6. // 7.8.55.9 //.88.5... // 88..5..8 //.8...8.. // 88.8.88.--> // | | // V V // Of particular interest is the region marked 8; while it does not appear contiguous in this small view, all of the squares marked 8 are connected when considering the whole 128x128 grid. In total, in this example, 1242 regions are present. // How many regions are present given your key string? fn make_grid(hash_seed: &str) -> Vec<Vec<bool>> { let mut grid = Vec::with_capacity(128); for hash in hashes(hash_seed) { grid.push(hex_to_bits(&hash)); } grid } /// make a single scan through the grid // At each position, if the cell is filled, look in each cardinal direction for adjacent clusters // If at least one is found, merge this element and all clusters that it is touching into the // cluster with the lowest id that was found. // If none are found, then start a new cluster on this cell. type ClusterId = i32; #[derive(Debug)] struct Loc(usize, usize); type CGrid = Vec<Vec<CellState>>; type CMap = HashMap<ClusterId, Vec<Loc>>; #[derive(PartialEq, Eq, Debug, Clone)] enum CellState { Unclaimed, Empty, Id(ClusterId) } struct Clusters { grid: CGrid, index: CMap, next_id: ClusterId } impl Clusters { fn new(size: u32) -> Self { let mut grid : Vec<Vec<CellState>> = Vec::new(); for _ in 0..size { let mut row = vec![]; for _ in 0..size { row.push(CellState::Unclaimed); } grid.push(row); } Clusters { grid, index: HashMap::new(), next_id: 0 } } fn print_small(&self, window_size: usize) { for row in self.grid.iter().take(window_size) { println!("\n{}", row.iter().take(window_size).map(|c| match c { &CellState::Id(id) => format!("{:4}", id), &CellState::Empty => " .".to_string(), &CellState::Unclaimed => " ?".to_string() }) .collect::<Vec<String>>() .join(" ")); } } fn add_grid(&mut self, &Loc(i, j): &Loc, id: ClusterId) { self.grid[i][j] = CellState::Id(id);
} fn new_cluster(&mut self, loc: Loc) { let id = self.next_id; self.next_id += 1; self.add_to_cluster(loc, id); } fn add_to_cluster(&mut self, loc: Loc, id: ClusterId) { self.add_grid(&loc, id); match self.index.entry(id) { Occupied(mut e) => { e.get_mut().push(loc); } Vacant(e) => { e.insert(vec![loc]); } } } fn set_empty(&mut self, Loc(i, j): Loc) { self.grid[i][j] = CellState::Empty; } fn state(&self, &Loc(i, j): &Loc) -> CellState { self.grid[i][j].clone() } fn merge_clusters(&mut self, dest: ClusterId, other: &ClusterId) { if dest == *other { return; } if let Some(mut locs) = self.index.remove(&other) { for loc in locs.iter() { self.add_grid(&loc, dest); } self.index.entry(dest) .and_modify(|f| f.append(&mut locs)) .or_insert_with(|| locs ); } } } fn print_small_grid(size: u32, occupied: &Vec<Vec<bool>>) { for row in occupied.iter().take(size as usize) { println!("\n{}", row.iter().take(size as usize).map(|c| match c { &true => "#", &false => ".", }) .collect::<Vec<&str>>() .join(" ")); } } /* This algorithm makes one pass through the grid, left to right, top to bottom. At each cell, if the cell is occupied, it checks all neighboring cells for any that belong to a cluster. Then current cell and all of its cluster neighbors are merged into the lowest-id cluster that it finds. If the cell is occupied but has no neighbors that belong to cells, a new cluster is started. */ fn count_clusters(occupied: &Vec<Vec<bool>>) -> u32 { let size = 128; let mut clusters = Clusters::new(size); let len = clusters.grid.len(); // print_small_grid(10, &occupied); for i in 0..len { let jlen = clusters.grid[i].len(); for j in 0..jlen { let val = clusters.state(&Loc(i, j)); if occupied[i][j] { let mut adj_clusters = vec![]; for o in [-1, 1].iter() { let it = (i as i64) + *o; let jt = (j as i64) + *o; if it >= 0 && it < len as i64 { let loc = Loc(it as usize, j); if let CellState::Id(id) = clusters.state(&loc) { adj_clusters.push(id); } } if jt >= 0 && jt < jlen as i64 { let loc = Loc(i, jt as usize); if let CellState::Id(id) = clusters.state(&loc) { adj_clusters.push(id); } } } if adj_clusters.len() > 0 { let min = adj_clusters.iter().clone().min().unwrap(); for id in adj_clusters.iter() { clusters.merge_clusters(*min, &id); } clusters.add_to_cluster(Loc(i, j), *min); } else { clusters.new_cluster(Loc(i, j)); } } else { clusters.set_empty(Loc(i, j)) } } } // clusters.print_small(10); clusters.index.keys().len() as u32 } fn part_two() { let grid = make_grid("jxqlasbh"); let count = count_clusters(&grid); println!("14-2: {} clusters in {}", count, "jxqlasbh"); } fn main() { part_one(); part_two(); } #[cfg(test)] mod tests { use count_hash_seed; use hex_to_bits; use count_clusters; use make_grid; #[test] fn test_count_clusters() { assert_eq!(count_clusters(&make_grid("flqrgnkx")), 1242); } #[test] fn test_count_hash_seed() { assert_eq!(count_hash_seed("flqrgnkx"), 8108); } #[test] fn test_hex_to_bits() { for (expected_value, letter) in ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"].iter().enumerate() { let actual = hex_to_bits(letter); let actual_binary_string = actual .iter() .map(|b| if *b { '1' } else { '0' }).collect::<String>(); let actual_value = u8::from_str_radix(&actual_binary_string, 2).unwrap(); assert_eq!(actual_value, expected_value as u8); } } }
random_line_split
types.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use serde::{ de::{Deserializer, Error}, Deserialize, Serialize, }; use serde_repr::{Deserialize_repr, Serialize_repr}; use uuid::Uuid; use crate::payment_command::Actor; /// A header set with a unique UUID (according to RFC4122 with "-"'s included) for the request, /// used for tracking requests and debugging. Responses must have the same string in the /// X-REQUEST-ID header value as the requests they correspond to. pub const REQUEST_ID_HEADER: &str = "X-REQUEST-ID"; /// A header with the HTTP request sender's VASP DIP-5 address used in the command object. The HTTP /// request sender must use the compliance key of the VASP account linked with this address to sign /// the request JWS body, and the request receiver uses this address to find the request sender's /// compliance key to verify the JWS signature. For example: VASP A transfers funds to VASP B. The /// HTTP request A sends to B contains X-REQUEST-SENDER-ADDRESS as VASP A's address. An HTTP /// request B sends to A should contain VASP B's address as X-REQUEST-SENDER-ADDRESS. pub const REQUEST_SENDER_ADDRESS: &str = "X-REQUEST-SENDER-ADDRESS"; #[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize)] enum ObjectType { CommandRequestObject, CommandResponseObject, PaymentCommand, } impl ObjectType { fn deserialize_request<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> { Self::deserialize_variant(d, Self::CommandRequestObject) } fn deserialize_response<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> { Self::deserialize_variant(d, Self::CommandResponseObject) } fn deserialize_payment<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> { Self::deserialize_variant(d, Self::PaymentCommand) } fn deserialize_variant<'de, D: Deserializer<'de>>( d: D, variant: Self, ) -> Result<Self, D::Error> { let object_type = Self::deserialize(d)?; if object_type == variant { Ok(object_type) } else { Err(D::Error::custom(format_args!("expected {:?}", variant))) } } } #[derive(Deserialize, Serialize)] pub struct CommandRequestObject { #[serde(deserialize_with = "ObjectType::deserialize_request")] #[serde(rename = "_ObjectType")] object_type: ObjectType, #[serde(flatten)] command: Command, cid: Uuid, } impl CommandRequestObject { pub fn new(command: Command, cid: Uuid) -> Self { Self { object_type: ObjectType::CommandRequestObject, command, cid, } } pub fn command(&self) -> &Command { &self.command } pub fn cid(&self) -> Uuid { self.cid } pub fn into_parts(self) -> (Command, Uuid) { (self.command, self.cid) } } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum CommandStatus { Success, Failure, } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct CommandResponseObject { #[serde(deserialize_with = "ObjectType::deserialize_response")] #[serde(rename = "_ObjectType")] object_type: ObjectType, status: CommandStatus, #[serde(skip_serializing_if = "Option::is_none")] error: Option<OffChainError>, #[serde(skip_serializing_if = "Option::is_none")] cid: Option<Uuid>, } impl CommandResponseObject { pub fn new(status: CommandStatus) -> Self { Self { object_type: ObjectType::CommandResponseObject, status, error: None, cid: None, } } } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] pub enum OffChainErrorType { #[serde(rename = "command_error")] Command, #[serde(rename = "protocol_error")] Protocol, } // https://dip.diem.com/dip-1/#list-of-error-codes #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum ErrorCode { // // HTTP Header Validation Error Codes // /// One of the following potential errors: /// * `X-REQUEST-SENDER-ADDRESS` header value is not the request sender’s address in the /// command object. All command objects should have a field that is the request sender’s /// address. /// * Could not find Diem's onchain account by the `X-REQUEST-SENDER-ADDRESS` header value. /// * Could not find the compliance key of the onchain account found by the /// `X-REQUEST-SENDER-ADDRESS` header value. /// * The compliance key found from the onchain account by `X-REQUEST-SENDER-ADDRESS` is not a /// valid ED25519 public key. /// * `X-REQUEST-ID` is not a valid UUID format. InvalidHttpHeader, /// Missing HTTP header `X-REQUEST-ID` or `X-REQUEST-SENDER-ADDRESS`. MissingHttpHeader, // // JWS Validation Error Codes# // /// Invalid JWS format (compact) or protected header InvalidJws, /// JWS signature verification failed InvalidJwsSignature, // // Request Object Validation Error Codes# // /// Request content is not valid Json InvalidJson, /// Object is not valid, type does not match /// The Command request/response object json is not an object, or the command object type does /// not match command_type. InvalidObject, /// Either: /// * Missing required field /// * An optional field is required to be set for a specific state, e.g. PaymentObject requires /// sender's kyc_data (which is an optional field for PaymentActorObject) when sender init /// the PaymentObject. MissingField, /// A field is unknown for an object. UnknownField, /// Invalid/unsupported command_type. UnknownCommandType, /// * Invalid / unknown enum field values. /// * UUID field value does not match UUID format. /// * Payment actor address is not a valid DIP-5 account identifier. /// * Currency field value is not a valid Diem currency code for the connected network. InvalidFieldValue, /// The HTTP request sender is not the right actor to send the payment object. For example, if /// the actor receiver sends a new command with payment object change that should be done by /// actor sender. InvalidCommandProducer, /// could not find command by reference_id for a non-initial state command object; for example, /// actor receiver received a payment command object that actor sender status is /// `ready_for_settlement`, but receiver could not find any command object by the reference id. InvalidInitialOrPriorNotFound, /// PaymentActionObject#amount is under travel rule threshold, no kyc needed for the /// transaction NoKycNeeded, /// Either: /// * Field recipient_signature value is not hex-encoded bytes. /// * Field recipient_signature value is an invalid signature. InvalidRecipientSignature, /// * The DIP-5 account identifier address in the command object is not HTTP request sender’s /// address or receiver’s address. For payment object it is sender.address or /// receiver.address. /// * Could not find on-chain account by an DIP-5 account identifier address in command object /// address. UnknownAddress, /// * Command object is in conflict with another different command object by cid, likely a cid /// is reused for different command object. /// * Failed to acquire lock for the command object by the reference_id. Conflict, /// Field payment.action.currency value is a valid Diem currency code, but it is not supported /// or acceptable by the receiver VASP. UnsupportedCurrency, /// * Could not find data by the original_payment_reference_id if the sender set it. /// * The status of the original payment object found by original_payment_reference_id is /// aborted instead of ready_for_settlement. InvalidOriginalPaymentReferenceId, /// Overwrite a write-once/immutable field value /// * Overwrite a field that can only be written once. /// * Overwrite an immutable field (field can only be set in initial command object), e.g. /// `original_payment_reference_id`). /// * Overwrite opponent payment actor's fields. InvalidOverwrite, /// As we only allow one actor action at a time, and the next states for a given command object /// state are limited to specific states. This error indicates the new payment object state is /// not valid according to the current object state. For example: VASP A sends RSOFT to VASP B, /// VASP B should send the next payment object with ABORT, or SSOFTSEND; VASP A should respond /// to this error code if VASP B sends payment object state SSOFT. InvalidTransition, #[serde(other)] /// Unknown Error Code Unknown, } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct OffChainError { #[serde(rename = "type")] error_type: OffChainErrorType, #[serde(skip_serializing_if = "Option::is_none")] field: Option<String>, code: ErrorCode, #[serde(skip_serializing_if = "Option::is_none")] message: Option<String>, } #[derive(Deserialize, Serialize)] #[serde(tag = "command_type", content = "command")] pub enum Command { PaymentCommand(PaymentCommandObject), FundPullPreApprovalCommand, } #[derive(Deserialize, Serialize)] pub struct PaymentCommandObject { #[serde(deserialize_with = "ObjectType::deserialize_payment")] #[serde(rename = "_ObjectType")] object_type: ObjectType, payment: PaymentObject, } impl PaymentCommandObject { pub fn new(paym
: PaymentObject) -> Self { Self { object_type: ObjectType::PaymentCommand, payment, } } pub fn payment(&self) -> &PaymentObject { &self.payment } pub fn into_payment(self) -> PaymentObject { self.payment } } /// A `PaymentActorObject` represents a participant in a payment - either sender or receiver. It /// also includes the status of the actor, indicates missing information or willingness to settle /// or abort the payment, and the Know-Your-Customer information of the customer involved in the /// payment. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct PaymentActorObject { /// Address of the sender/receiver account. Addresses may be single use or valid for a limited /// time, and therefore VASPs should not rely on them remaining stable across time or different /// VASP addresses. The addresses are encoded using bech32. The bech32 address encodes both the /// address of the VASP as well as the specific user's subaddress. They should be no longer /// than 80 characters. Mandatory and immutable. For Diem addresses, refer to the "account /// identifier" section in DIP-5 for format. pub address: Box<str>, /// The KYC data for this account. This field is optional but immutable once it is set. pub kyc_data: Option<KycDataObject>, /// Status of the payment from the perspective of this actor. This field can only be set by the /// respective sender/receiver VASP and represents the status on the sender/receiver VASP side. /// This field is mandatory by this respective actor (either sender or receiver side) and /// mutable. Note that in the first request (which is initiated by the sender), the receiver /// status should be set to `None`. pub status: StatusObject, /// Can be specified by the respective VASP to hold metadata that the sender/receiver VASP /// wishes to associate with this payment. It may be set to an empty list (i.e. `[]`). New /// `metadata` elements may be appended to the `metadata` list via subsequent commands on an /// object. #[serde(skip_serializing_if = "Vec::is_empty", default)] pub metadata: Vec<String>, /// Freeform KYC data. If a soft-match occurs, this field can be used to specify additional KYC /// data which can be used to clear the soft-match. It is suggested that this data be JSON, /// XML, or another human-readable form. pub additional_kyc_data: Option<String>, } impl PaymentActorObject { pub fn status(&self) -> &StatusObject { &self.status } pub fn kyc_data(&self) -> Option<&KycDataObject> { self.kyc_data.as_ref() } pub fn additional_kyc_data(&self) -> Option<&str> { self.additional_kyc_data.as_deref() } pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> { if self.address!= prior.address { return Err(WriteOnceError); } if prior.kyc_data.is_some() && prior.kyc_data!= self.kyc_data { return Err(WriteOnceError); } if!self.metadata.starts_with(&prior.metadata) { return Err(WriteOnceError); } Ok(()) } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum ActionType { Charge, } #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct PaymentActionObject { /// Amount of the transfer. Base units are the same as for on-chain transactions for this /// currency. For example, if DiemUSD is represented on-chain where “1” equals 1e-6 dollars, /// then “1” equals the same amount here. For any currency, the on-chain mapping must be used /// for amounts. pub amount: u64, /// One of the supported on-chain currency types - ex. XUS, etc. // TODO Should be an enum per https://dip.diem.com/dip-1/#paymentactionobject pub currency: String, /// Populated in the request. This value indicates the requested action to perform, and the /// only valid value is charge. pub action: ActionType, /// [Unix time](https://en.wikipedia.org/wiki/Unix_time) indicating the time that the payment /// Command was created. pub timestamp: u64, } /// Some fields are immutable after they are defined once. Others can be updated multiple times /// (see below). Updating immutable fields with a different value results in a Command error. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct PaymentObject { /// Information about the sender in this payment pub sender: PaymentActorObject, /// Information about the receiver in this payment pub receiver: PaymentActorObject, /// Unique reference ID of this payment on the payment initiator VASP (the VASP which /// originally created this payment Object). This value should be globally unique. This field /// is mandatory on payment creation and immutable after that. We recommend using a 128 bits /// long UUID according to RFC4122 with "-"'s included. pub reference_id: Uuid, /// Used to refer an old payment known to the other VASP. For example, used for refunds. The /// reference ID of the original payment will be placed into this field. This field is /// mandatory on refund and immutable pub originial_payment_reference_id: Option<Uuid>, /// Signature of the recipient of this transaction encoded in hex. The is signed with the /// compliance key of the recipient VASP and is used for on-chain attestation from the /// recipient party. This may be omitted on blockchains which do not require on-chain /// attestation. pub recipient_signature: Option<String>, /// Number of cryptocurrency + currency type (XUS, etc.)1 + type of action to take. This field is mandatory and immutable pub action: PaymentActionObject, /// Description of the payment. To be displayed to the user. Unicode utf-8 encoded max length /// of 255 characters. This field is optional but can only be written once. pub description: Option<String>, } impl PaymentObject { pub fn sender(&self) -> &PaymentActorObject { &self.sender } pub fn receiver(&self) -> &PaymentActorObject { &self.receiver } pub fn reference_id(&self) -> Uuid { self.reference_id } pub fn actor_object_by_actor(&self, actor: Actor) -> &PaymentActorObject { match actor { Actor::Sender => self.sender(), Actor::Receiver => self.receiver(), } } pub fn recipient_signature(&self) -> Option<&str> { self.recipient_signature.as_deref() } pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> { self.sender.validate_write_once_fields(&prior.sender)?; self.receiver.validate_write_once_fields(&prior.receiver)?; if self.reference_id!= prior.reference_id { return Err(WriteOnceError); } if self.originial_payment_reference_id!= prior.originial_payment_reference_id { return Err(WriteOnceError); } if self.action!= prior.action { return Err(WriteOnceError); } if prior.description.is_some() && prior.description!= self.description { return Err(WriteOnceError); } Ok(()) } } #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct StatusObject { /// Status of the payment from the perspective of this actor. This field can only be set by the /// respective sender/receiver VASP and represents the status on the sender/receiver VASP side. /// This field is mandatory by this respective actor (either sender or receiver side) and /// mutable. pub status: Status, /// In the case of an `abort` status, this field may be used to describe the reason for the /// abort. Represents the error code of the corresponding error. pub abort_code: Option<AbortCode>, /// Additional details about this error. To be used only when `abort_code` is populated. pub abort_message: Option<String>, } impl StatusObject { pub fn status(&self) -> Status { self.status } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum Status { /// No status is yet set from this actor. None, /// KYC data about the subaddresses is required by this actor. NeedsKycData, /// Transaction is ready for settlement according to this actor (i.e. the requried /// signatures/KYC data has been provided. ReadyForSettlement, /// Indicates the actor wishes to abort this payment, instaed of settling it. Abort, /// Actor's KYC data resulted in a soft-match, request additional KYC data. SoftMatch, } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum AbortCode { /// The payment is rejected. It should not be used in the `original_payment_reference_id` field /// of a new payment Rejected, } /// Represents a national ID. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct NationalIdObject { /// Indicates the national ID value - for example, a social security number pub id_value: String, /// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) pub country: Option<String>, /// Indicates the type of the ID #[serde(rename = "type")] pub id_type: Option<String>, } /// Represents a physical address #[derive(Clone, Debug, Default, PartialEq, Deserialize, Serialize)] pub struct AddressObject { /// The city, district, suburb, town, or village pub city: Option<String>, /// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) pub country: Option<String>, /// Address line 1 pub line1: Option<String>, /// Address line 2 - apartment, unit, etc. pub line2: Option<String>, /// ZIP or postal code pub postal_code: Option<String>, /// State, county, province, region. pub state: Option<String>, } /// A `KycDataObject` represents the required information for a single subaddress. Proof of /// non-repudiation is provided by the signatures included in the JWS payloads. The only mandatory /// fields are `payload_version` and `type`. All other fields are optional from the point of view of /// the protocol -- however they may need to be included for another VASP to be ready to settle the /// payment. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct KycDataObject { /// Version identifier to allow modifications to KYC data Object without needing to bump /// version of entire API set. Set to 1 payload_version: KycDataObjectVersion, pub kyc_data_type: KycDataObjectType, /// Legal given name of the user for which this KYC data Object applies. pub given_name: Option<String>, /// Legal surname of the user for which this KYC data Object applies. pub surname: Option<String>, /// Physical address data for this account pub address: Option<AddressObject>, /// Date of birth for the holder of this account. Specified as an ISO 8601 calendar date /// format: https://en.wikipedia.org/wiki/ISO_8601 pub dob: Option<String>, /// Place of birth for this user. line1 and line2 fields should not be populated for this usage /// of the address Object pub place_of_birth: Option<String>, /// National ID information for the holder of this account pub national_id: Option<NationalIdObject>, /// Name of the legal entity. Used when subaddress represents a legal entity rather than an /// individual. KYCDataObject should only include one of legal_entity_name OR /// given_name/surname pub legal_entity_name: Option<String>, } impl KycDataObject { pub fn new_entity() -> Self { Self { payload_version: KycDataObjectVersion::V1, kyc_data_type: KycDataObjectType::Entity, given_name: None, surname: None, address: None, dob: None, place_of_birth: None, national_id: None, legal_entity_name: None, } } pub fn new_individual() -> Self { Self { payload_version: KycDataObjectVersion::V1, kyc_data_type: KycDataObjectType::Individual, given_name: None, surname: None, address: None, dob: None, place_of_birth: None, national_id: None, legal_entity_name: None, } } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum KycDataObjectType { Individual, Entity, } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize_repr, Serialize_repr)] #[repr(u8)] pub enum KycDataObjectVersion { V1 = 1, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct WriteOnceError; #[cfg(test)] mod tests { use super::{KycDataObjectType, KycDataObjectVersion}; use serde_json::json; #[test] fn kyc_data_object_type() { use KycDataObjectType::*; let variants = [(Individual, "individual"), (Entity, "entity")]; for (variant, s) in &variants { let json = json! { s }; assert_eq!(serde_json::to_value(variant).unwrap(), json); assert_eq!( serde_json::from_value::<KycDataObjectType>(json).unwrap(), *variant ); } let invalid = json! { "Organization" }; serde_json::from_value::<KycDataObjectType>(invalid).unwrap_err(); } #[test] fn kyc_data_object_version() { let v1_json = json! { 1 }; let v1: KycDataObjectVersion = serde_json::from_value(v1_json.clone()).unwrap(); assert_eq!(serde_json::to_value(&v1).unwrap(), v1_json); let invalid_version = json! { 52 }; serde_json::from_value::<KycDataObjectVersion>(invalid_version).unwrap_err(); let invalid_type = json! { "1" }; serde_json::from_value::<KycDataObjectVersion>(invalid_type).unwrap_err(); } }
ent
identifier_name
types.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use serde::{ de::{Deserializer, Error}, Deserialize, Serialize, }; use serde_repr::{Deserialize_repr, Serialize_repr}; use uuid::Uuid; use crate::payment_command::Actor; /// A header set with a unique UUID (according to RFC4122 with "-"'s included) for the request, /// used for tracking requests and debugging. Responses must have the same string in the /// X-REQUEST-ID header value as the requests they correspond to. pub const REQUEST_ID_HEADER: &str = "X-REQUEST-ID"; /// A header with the HTTP request sender's VASP DIP-5 address used in the command object. The HTTP /// request sender must use the compliance key of the VASP account linked with this address to sign /// the request JWS body, and the request receiver uses this address to find the request sender's /// compliance key to verify the JWS signature. For example: VASP A transfers funds to VASP B. The /// HTTP request A sends to B contains X-REQUEST-SENDER-ADDRESS as VASP A's address. An HTTP /// request B sends to A should contain VASP B's address as X-REQUEST-SENDER-ADDRESS. pub const REQUEST_SENDER_ADDRESS: &str = "X-REQUEST-SENDER-ADDRESS"; #[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize)] enum ObjectType { CommandRequestObject, CommandResponseObject, PaymentCommand, } impl ObjectType { fn deserialize_request<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> { Self::deserialize_variant(d, Self::CommandRequestObject) } fn deserialize_response<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> { Self::deserialize_variant(d, Self::CommandResponseObject) } fn deserialize_payment<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> { Self::deserialize_variant(d, Self::PaymentCommand) } fn deserialize_variant<'de, D: Deserializer<'de>>( d: D, variant: Self, ) -> Result<Self, D::Error> { let object_type = Self::deserialize(d)?; if object_type == variant { Ok(object_type) } else
} } #[derive(Deserialize, Serialize)] pub struct CommandRequestObject { #[serde(deserialize_with = "ObjectType::deserialize_request")] #[serde(rename = "_ObjectType")] object_type: ObjectType, #[serde(flatten)] command: Command, cid: Uuid, } impl CommandRequestObject { pub fn new(command: Command, cid: Uuid) -> Self { Self { object_type: ObjectType::CommandRequestObject, command, cid, } } pub fn command(&self) -> &Command { &self.command } pub fn cid(&self) -> Uuid { self.cid } pub fn into_parts(self) -> (Command, Uuid) { (self.command, self.cid) } } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum CommandStatus { Success, Failure, } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct CommandResponseObject { #[serde(deserialize_with = "ObjectType::deserialize_response")] #[serde(rename = "_ObjectType")] object_type: ObjectType, status: CommandStatus, #[serde(skip_serializing_if = "Option::is_none")] error: Option<OffChainError>, #[serde(skip_serializing_if = "Option::is_none")] cid: Option<Uuid>, } impl CommandResponseObject { pub fn new(status: CommandStatus) -> Self { Self { object_type: ObjectType::CommandResponseObject, status, error: None, cid: None, } } } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] pub enum OffChainErrorType { #[serde(rename = "command_error")] Command, #[serde(rename = "protocol_error")] Protocol, } // https://dip.diem.com/dip-1/#list-of-error-codes #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum ErrorCode { // // HTTP Header Validation Error Codes // /// One of the following potential errors: /// * `X-REQUEST-SENDER-ADDRESS` header value is not the request sender’s address in the /// command object. All command objects should have a field that is the request sender’s /// address. /// * Could not find Diem's onchain account by the `X-REQUEST-SENDER-ADDRESS` header value. /// * Could not find the compliance key of the onchain account found by the /// `X-REQUEST-SENDER-ADDRESS` header value. /// * The compliance key found from the onchain account by `X-REQUEST-SENDER-ADDRESS` is not a /// valid ED25519 public key. /// * `X-REQUEST-ID` is not a valid UUID format. InvalidHttpHeader, /// Missing HTTP header `X-REQUEST-ID` or `X-REQUEST-SENDER-ADDRESS`. MissingHttpHeader, // // JWS Validation Error Codes# // /// Invalid JWS format (compact) or protected header InvalidJws, /// JWS signature verification failed InvalidJwsSignature, // // Request Object Validation Error Codes# // /// Request content is not valid Json InvalidJson, /// Object is not valid, type does not match /// The Command request/response object json is not an object, or the command object type does /// not match command_type. InvalidObject, /// Either: /// * Missing required field /// * An optional field is required to be set for a specific state, e.g. PaymentObject requires /// sender's kyc_data (which is an optional field for PaymentActorObject) when sender init /// the PaymentObject. MissingField, /// A field is unknown for an object. UnknownField, /// Invalid/unsupported command_type. UnknownCommandType, /// * Invalid / unknown enum field values. /// * UUID field value does not match UUID format. /// * Payment actor address is not a valid DIP-5 account identifier. /// * Currency field value is not a valid Diem currency code for the connected network. InvalidFieldValue, /// The HTTP request sender is not the right actor to send the payment object. For example, if /// the actor receiver sends a new command with payment object change that should be done by /// actor sender. InvalidCommandProducer, /// could not find command by reference_id for a non-initial state command object; for example, /// actor receiver received a payment command object that actor sender status is /// `ready_for_settlement`, but receiver could not find any command object by the reference id. InvalidInitialOrPriorNotFound, /// PaymentActionObject#amount is under travel rule threshold, no kyc needed for the /// transaction NoKycNeeded, /// Either: /// * Field recipient_signature value is not hex-encoded bytes. /// * Field recipient_signature value is an invalid signature. InvalidRecipientSignature, /// * The DIP-5 account identifier address in the command object is not HTTP request sender’s /// address or receiver’s address. For payment object it is sender.address or /// receiver.address. /// * Could not find on-chain account by an DIP-5 account identifier address in command object /// address. UnknownAddress, /// * Command object is in conflict with another different command object by cid, likely a cid /// is reused for different command object. /// * Failed to acquire lock for the command object by the reference_id. Conflict, /// Field payment.action.currency value is a valid Diem currency code, but it is not supported /// or acceptable by the receiver VASP. UnsupportedCurrency, /// * Could not find data by the original_payment_reference_id if the sender set it. /// * The status of the original payment object found by original_payment_reference_id is /// aborted instead of ready_for_settlement. InvalidOriginalPaymentReferenceId, /// Overwrite a write-once/immutable field value /// * Overwrite a field that can only be written once. /// * Overwrite an immutable field (field can only be set in initial command object), e.g. /// `original_payment_reference_id`). /// * Overwrite opponent payment actor's fields. InvalidOverwrite, /// As we only allow one actor action at a time, and the next states for a given command object /// state are limited to specific states. This error indicates the new payment object state is /// not valid according to the current object state. For example: VASP A sends RSOFT to VASP B, /// VASP B should send the next payment object with ABORT, or SSOFTSEND; VASP A should respond /// to this error code if VASP B sends payment object state SSOFT. InvalidTransition, #[serde(other)] /// Unknown Error Code Unknown, } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct OffChainError { #[serde(rename = "type")] error_type: OffChainErrorType, #[serde(skip_serializing_if = "Option::is_none")] field: Option<String>, code: ErrorCode, #[serde(skip_serializing_if = "Option::is_none")] message: Option<String>, } #[derive(Deserialize, Serialize)] #[serde(tag = "command_type", content = "command")] pub enum Command { PaymentCommand(PaymentCommandObject), FundPullPreApprovalCommand, } #[derive(Deserialize, Serialize)] pub struct PaymentCommandObject { #[serde(deserialize_with = "ObjectType::deserialize_payment")] #[serde(rename = "_ObjectType")] object_type: ObjectType, payment: PaymentObject, } impl PaymentCommandObject { pub fn new(payment: PaymentObject) -> Self { Self { object_type: ObjectType::PaymentCommand, payment, } } pub fn payment(&self) -> &PaymentObject { &self.payment } pub fn into_payment(self) -> PaymentObject { self.payment } } /// A `PaymentActorObject` represents a participant in a payment - either sender or receiver. It /// also includes the status of the actor, indicates missing information or willingness to settle /// or abort the payment, and the Know-Your-Customer information of the customer involved in the /// payment. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct PaymentActorObject { /// Address of the sender/receiver account. Addresses may be single use or valid for a limited /// time, and therefore VASPs should not rely on them remaining stable across time or different /// VASP addresses. The addresses are encoded using bech32. The bech32 address encodes both the /// address of the VASP as well as the specific user's subaddress. They should be no longer /// than 80 characters. Mandatory and immutable. For Diem addresses, refer to the "account /// identifier" section in DIP-5 for format. pub address: Box<str>, /// The KYC data for this account. This field is optional but immutable once it is set. pub kyc_data: Option<KycDataObject>, /// Status of the payment from the perspective of this actor. This field can only be set by the /// respective sender/receiver VASP and represents the status on the sender/receiver VASP side. /// This field is mandatory by this respective actor (either sender or receiver side) and /// mutable. Note that in the first request (which is initiated by the sender), the receiver /// status should be set to `None`. pub status: StatusObject, /// Can be specified by the respective VASP to hold metadata that the sender/receiver VASP /// wishes to associate with this payment. It may be set to an empty list (i.e. `[]`). New /// `metadata` elements may be appended to the `metadata` list via subsequent commands on an /// object. #[serde(skip_serializing_if = "Vec::is_empty", default)] pub metadata: Vec<String>, /// Freeform KYC data. If a soft-match occurs, this field can be used to specify additional KYC /// data which can be used to clear the soft-match. It is suggested that this data be JSON, /// XML, or another human-readable form. pub additional_kyc_data: Option<String>, } impl PaymentActorObject { pub fn status(&self) -> &StatusObject { &self.status } pub fn kyc_data(&self) -> Option<&KycDataObject> { self.kyc_data.as_ref() } pub fn additional_kyc_data(&self) -> Option<&str> { self.additional_kyc_data.as_deref() } pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> { if self.address!= prior.address { return Err(WriteOnceError); } if prior.kyc_data.is_some() && prior.kyc_data!= self.kyc_data { return Err(WriteOnceError); } if!self.metadata.starts_with(&prior.metadata) { return Err(WriteOnceError); } Ok(()) } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum ActionType { Charge, } #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct PaymentActionObject { /// Amount of the transfer. Base units are the same as for on-chain transactions for this /// currency. For example, if DiemUSD is represented on-chain where “1” equals 1e-6 dollars, /// then “1” equals the same amount here. For any currency, the on-chain mapping must be used /// for amounts. pub amount: u64, /// One of the supported on-chain currency types - ex. XUS, etc. // TODO Should be an enum per https://dip.diem.com/dip-1/#paymentactionobject pub currency: String, /// Populated in the request. This value indicates the requested action to perform, and the /// only valid value is charge. pub action: ActionType, /// [Unix time](https://en.wikipedia.org/wiki/Unix_time) indicating the time that the payment /// Command was created. pub timestamp: u64, } /// Some fields are immutable after they are defined once. Others can be updated multiple times /// (see below). Updating immutable fields with a different value results in a Command error. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct PaymentObject { /// Information about the sender in this payment pub sender: PaymentActorObject, /// Information about the receiver in this payment pub receiver: PaymentActorObject, /// Unique reference ID of this payment on the payment initiator VASP (the VASP which /// originally created this payment Object). This value should be globally unique. This field /// is mandatory on payment creation and immutable after that. We recommend using a 128 bits /// long UUID according to RFC4122 with "-"'s included. pub reference_id: Uuid, /// Used to refer an old payment known to the other VASP. For example, used for refunds. The /// reference ID of the original payment will be placed into this field. This field is /// mandatory on refund and immutable pub originial_payment_reference_id: Option<Uuid>, /// Signature of the recipient of this transaction encoded in hex. The is signed with the /// compliance key of the recipient VASP and is used for on-chain attestation from the /// recipient party. This may be omitted on blockchains which do not require on-chain /// attestation. pub recipient_signature: Option<String>, /// Number of cryptocurrency + currency type (XUS, etc.)1 + type of action to take. This field is mandatory and immutable pub action: PaymentActionObject, /// Description of the payment. To be displayed to the user. Unicode utf-8 encoded max length /// of 255 characters. This field is optional but can only be written once. pub description: Option<String>, } impl PaymentObject { pub fn sender(&self) -> &PaymentActorObject { &self.sender } pub fn receiver(&self) -> &PaymentActorObject { &self.receiver } pub fn reference_id(&self) -> Uuid { self.reference_id } pub fn actor_object_by_actor(&self, actor: Actor) -> &PaymentActorObject { match actor { Actor::Sender => self.sender(), Actor::Receiver => self.receiver(), } } pub fn recipient_signature(&self) -> Option<&str> { self.recipient_signature.as_deref() } pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> { self.sender.validate_write_once_fields(&prior.sender)?; self.receiver.validate_write_once_fields(&prior.receiver)?; if self.reference_id!= prior.reference_id { return Err(WriteOnceError); } if self.originial_payment_reference_id!= prior.originial_payment_reference_id { return Err(WriteOnceError); } if self.action!= prior.action { return Err(WriteOnceError); } if prior.description.is_some() && prior.description!= self.description { return Err(WriteOnceError); } Ok(()) } } #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct StatusObject { /// Status of the payment from the perspective of this actor. This field can only be set by the /// respective sender/receiver VASP and represents the status on the sender/receiver VASP side. /// This field is mandatory by this respective actor (either sender or receiver side) and /// mutable. pub status: Status, /// In the case of an `abort` status, this field may be used to describe the reason for the /// abort. Represents the error code of the corresponding error. pub abort_code: Option<AbortCode>, /// Additional details about this error. To be used only when `abort_code` is populated. pub abort_message: Option<String>, } impl StatusObject { pub fn status(&self) -> Status { self.status } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum Status { /// No status is yet set from this actor. None, /// KYC data about the subaddresses is required by this actor. NeedsKycData, /// Transaction is ready for settlement according to this actor (i.e. the requried /// signatures/KYC data has been provided. ReadyForSettlement, /// Indicates the actor wishes to abort this payment, instaed of settling it. Abort, /// Actor's KYC data resulted in a soft-match, request additional KYC data. SoftMatch, } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum AbortCode { /// The payment is rejected. It should not be used in the `original_payment_reference_id` field /// of a new payment Rejected, } /// Represents a national ID. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct NationalIdObject { /// Indicates the national ID value - for example, a social security number pub id_value: String, /// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) pub country: Option<String>, /// Indicates the type of the ID #[serde(rename = "type")] pub id_type: Option<String>, } /// Represents a physical address #[derive(Clone, Debug, Default, PartialEq, Deserialize, Serialize)] pub struct AddressObject { /// The city, district, suburb, town, or village pub city: Option<String>, /// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) pub country: Option<String>, /// Address line 1 pub line1: Option<String>, /// Address line 2 - apartment, unit, etc. pub line2: Option<String>, /// ZIP or postal code pub postal_code: Option<String>, /// State, county, province, region. pub state: Option<String>, } /// A `KycDataObject` represents the required information for a single subaddress. Proof of /// non-repudiation is provided by the signatures included in the JWS payloads. The only mandatory /// fields are `payload_version` and `type`. All other fields are optional from the point of view of /// the protocol -- however they may need to be included for another VASP to be ready to settle the /// payment. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct KycDataObject { /// Version identifier to allow modifications to KYC data Object without needing to bump /// version of entire API set. Set to 1 payload_version: KycDataObjectVersion, pub kyc_data_type: KycDataObjectType, /// Legal given name of the user for which this KYC data Object applies. pub given_name: Option<String>, /// Legal surname of the user for which this KYC data Object applies. pub surname: Option<String>, /// Physical address data for this account pub address: Option<AddressObject>, /// Date of birth for the holder of this account. Specified as an ISO 8601 calendar date /// format: https://en.wikipedia.org/wiki/ISO_8601 pub dob: Option<String>, /// Place of birth for this user. line1 and line2 fields should not be populated for this usage /// of the address Object pub place_of_birth: Option<String>, /// National ID information for the holder of this account pub national_id: Option<NationalIdObject>, /// Name of the legal entity. Used when subaddress represents a legal entity rather than an /// individual. KYCDataObject should only include one of legal_entity_name OR /// given_name/surname pub legal_entity_name: Option<String>, } impl KycDataObject { pub fn new_entity() -> Self { Self { payload_version: KycDataObjectVersion::V1, kyc_data_type: KycDataObjectType::Entity, given_name: None, surname: None, address: None, dob: None, place_of_birth: None, national_id: None, legal_entity_name: None, } } pub fn new_individual() -> Self { Self { payload_version: KycDataObjectVersion::V1, kyc_data_type: KycDataObjectType::Individual, given_name: None, surname: None, address: None, dob: None, place_of_birth: None, national_id: None, legal_entity_name: None, } } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum KycDataObjectType { Individual, Entity, } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize_repr, Serialize_repr)] #[repr(u8)] pub enum KycDataObjectVersion { V1 = 1, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct WriteOnceError; #[cfg(test)] mod tests { use super::{KycDataObjectType, KycDataObjectVersion}; use serde_json::json; #[test] fn kyc_data_object_type() { use KycDataObjectType::*; let variants = [(Individual, "individual"), (Entity, "entity")]; for (variant, s) in &variants { let json = json! { s }; assert_eq!(serde_json::to_value(variant).unwrap(), json); assert_eq!( serde_json::from_value::<KycDataObjectType>(json).unwrap(), *variant ); } let invalid = json! { "Organization" }; serde_json::from_value::<KycDataObjectType>(invalid).unwrap_err(); } #[test] fn kyc_data_object_version() { let v1_json = json! { 1 }; let v1: KycDataObjectVersion = serde_json::from_value(v1_json.clone()).unwrap(); assert_eq!(serde_json::to_value(&v1).unwrap(), v1_json); let invalid_version = json! { 52 }; serde_json::from_value::<KycDataObjectVersion>(invalid_version).unwrap_err(); let invalid_type = json! { "1" }; serde_json::from_value::<KycDataObjectVersion>(invalid_type).unwrap_err(); } }
{ Err(D::Error::custom(format_args!("expected {:?}", variant))) }
conditional_block
types.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use serde::{ de::{Deserializer, Error}, Deserialize, Serialize, }; use serde_repr::{Deserialize_repr, Serialize_repr}; use uuid::Uuid; use crate::payment_command::Actor; /// A header set with a unique UUID (according to RFC4122 with "-"'s included) for the request, /// used for tracking requests and debugging. Responses must have the same string in the /// X-REQUEST-ID header value as the requests they correspond to. pub const REQUEST_ID_HEADER: &str = "X-REQUEST-ID"; /// A header with the HTTP request sender's VASP DIP-5 address used in the command object. The HTTP /// request sender must use the compliance key of the VASP account linked with this address to sign /// the request JWS body, and the request receiver uses this address to find the request sender's /// compliance key to verify the JWS signature. For example: VASP A transfers funds to VASP B. The /// HTTP request A sends to B contains X-REQUEST-SENDER-ADDRESS as VASP A's address. An HTTP /// request B sends to A should contain VASP B's address as X-REQUEST-SENDER-ADDRESS. pub const REQUEST_SENDER_ADDRESS: &str = "X-REQUEST-SENDER-ADDRESS"; #[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize)] enum ObjectType { CommandRequestObject, CommandResponseObject, PaymentCommand, } impl ObjectType { fn deserialize_request<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> { Self::deserialize_variant(d, Self::CommandRequestObject) } fn deserialize_response<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> { Self::deserialize_variant(d, Self::CommandResponseObject) } fn deserialize_payment<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> { Self::deserialize_variant(d, Self::PaymentCommand) } fn deserialize_variant<'de, D: Deserializer<'de>>( d: D, variant: Self, ) -> Result<Self, D::Error> { let object_type = Self::deserialize(d)?; if object_type == variant { Ok(object_type) } else { Err(D::Error::custom(format_args!("expected {:?}", variant))) } } } #[derive(Deserialize, Serialize)] pub struct CommandRequestObject { #[serde(deserialize_with = "ObjectType::deserialize_request")] #[serde(rename = "_ObjectType")] object_type: ObjectType, #[serde(flatten)] command: Command, cid: Uuid, } impl CommandRequestObject { pub fn new(command: Command, cid: Uuid) -> Self { Self { object_type: ObjectType::CommandRequestObject, command, cid, } } pub fn command(&self) -> &Command { &self.command } pub fn cid(&self) -> Uuid { self.cid } pub fn into_parts(self) -> (Command, Uuid) { (self.command, self.cid) } } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum CommandStatus { Success, Failure, } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct CommandResponseObject { #[serde(deserialize_with = "ObjectType::deserialize_response")] #[serde(rename = "_ObjectType")] object_type: ObjectType, status: CommandStatus, #[serde(skip_serializing_if = "Option::is_none")] error: Option<OffChainError>, #[serde(skip_serializing_if = "Option::is_none")] cid: Option<Uuid>, } impl CommandResponseObject { pub fn new(status: CommandStatus) -> Self { Self { object_type: ObjectType::CommandResponseObject, status, error: None, cid: None, } } } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] pub enum OffChainErrorType { #[serde(rename = "command_error")] Command, #[serde(rename = "protocol_error")] Protocol, } // https://dip.diem.com/dip-1/#list-of-error-codes #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum ErrorCode { // // HTTP Header Validation Error Codes // /// One of the following potential errors: /// * `X-REQUEST-SENDER-ADDRESS` header value is not the request sender’s address in the /// command object. All command objects should have a field that is the request sender’s /// address. /// * Could not find Diem's onchain account by the `X-REQUEST-SENDER-ADDRESS` header value. /// * Could not find the compliance key of the onchain account found by the /// `X-REQUEST-SENDER-ADDRESS` header value. /// * The compliance key found from the onchain account by `X-REQUEST-SENDER-ADDRESS` is not a /// valid ED25519 public key. /// * `X-REQUEST-ID` is not a valid UUID format. InvalidHttpHeader, /// Missing HTTP header `X-REQUEST-ID` or `X-REQUEST-SENDER-ADDRESS`. MissingHttpHeader, // // JWS Validation Error Codes# // /// Invalid JWS format (compact) or protected header InvalidJws, /// JWS signature verification failed InvalidJwsSignature, // // Request Object Validation Error Codes# // /// Request content is not valid Json InvalidJson, /// Object is not valid, type does not match /// The Command request/response object json is not an object, or the command object type does /// not match command_type. InvalidObject, /// Either: /// * Missing required field /// * An optional field is required to be set for a specific state, e.g. PaymentObject requires /// sender's kyc_data (which is an optional field for PaymentActorObject) when sender init /// the PaymentObject. MissingField, /// A field is unknown for an object. UnknownField, /// Invalid/unsupported command_type. UnknownCommandType, /// * Invalid / unknown enum field values. /// * UUID field value does not match UUID format. /// * Payment actor address is not a valid DIP-5 account identifier. /// * Currency field value is not a valid Diem currency code for the connected network. InvalidFieldValue, /// The HTTP request sender is not the right actor to send the payment object. For example, if /// the actor receiver sends a new command with payment object change that should be done by /// actor sender. InvalidCommandProducer, /// could not find command by reference_id for a non-initial state command object; for example, /// actor receiver received a payment command object that actor sender status is /// `ready_for_settlement`, but receiver could not find any command object by the reference id. InvalidInitialOrPriorNotFound, /// PaymentActionObject#amount is under travel rule threshold, no kyc needed for the /// transaction NoKycNeeded, /// Either: /// * Field recipient_signature value is not hex-encoded bytes. /// * Field recipient_signature value is an invalid signature. InvalidRecipientSignature, /// * The DIP-5 account identifier address in the command object is not HTTP request sender’s /// address or receiver’s address. For payment object it is sender.address or /// receiver.address. /// * Could not find on-chain account by an DIP-5 account identifier address in command object /// address. UnknownAddress, /// * Command object is in conflict with another different command object by cid, likely a cid /// is reused for different command object. /// * Failed to acquire lock for the command object by the reference_id. Conflict, /// Field payment.action.currency value is a valid Diem currency code, but it is not supported /// or acceptable by the receiver VASP. UnsupportedCurrency, /// * Could not find data by the original_payment_reference_id if the sender set it. /// * The status of the original payment object found by original_payment_reference_id is /// aborted instead of ready_for_settlement. InvalidOriginalPaymentReferenceId, /// Overwrite a write-once/immutable field value /// * Overwrite a field that can only be written once. /// * Overwrite an immutable field (field can only be set in initial command object), e.g. /// `original_payment_reference_id`). /// * Overwrite opponent payment actor's fields. InvalidOverwrite, /// As we only allow one actor action at a time, and the next states for a given command object /// state are limited to specific states. This error indicates the new payment object state is /// not valid according to the current object state. For example: VASP A sends RSOFT to VASP B, /// VASP B should send the next payment object with ABORT, or SSOFTSEND; VASP A should respond /// to this error code if VASP B sends payment object state SSOFT. InvalidTransition, #[serde(other)] /// Unknown Error Code Unknown, } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct OffChainError { #[serde(rename = "type")] error_type: OffChainErrorType, #[serde(skip_serializing_if = "Option::is_none")] field: Option<String>, code: ErrorCode, #[serde(skip_serializing_if = "Option::is_none")] message: Option<String>, } #[derive(Deserialize, Serialize)] #[serde(tag = "command_type", content = "command")] pub enum Command { PaymentCommand(PaymentCommandObject), FundPullPreApprovalCommand, } #[derive(Deserialize, Serialize)] pub struct PaymentCommandObject { #[serde(deserialize_with = "ObjectType::deserialize_payment")] #[serde(rename = "_ObjectType")] object_type: ObjectType, payment: PaymentObject, } impl PaymentCommandObject { pub fn new(payment: PaymentObject) -> Self { Self { object_type: ObjectType::PaymentCommand, payment, } } pub fn payment(&self) -> &PaymentObject {
b fn into_payment(self) -> PaymentObject { self.payment } } /// A `PaymentActorObject` represents a participant in a payment - either sender or receiver. It /// also includes the status of the actor, indicates missing information or willingness to settle /// or abort the payment, and the Know-Your-Customer information of the customer involved in the /// payment. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct PaymentActorObject { /// Address of the sender/receiver account. Addresses may be single use or valid for a limited /// time, and therefore VASPs should not rely on them remaining stable across time or different /// VASP addresses. The addresses are encoded using bech32. The bech32 address encodes both the /// address of the VASP as well as the specific user's subaddress. They should be no longer /// than 80 characters. Mandatory and immutable. For Diem addresses, refer to the "account /// identifier" section in DIP-5 for format. pub address: Box<str>, /// The KYC data for this account. This field is optional but immutable once it is set. pub kyc_data: Option<KycDataObject>, /// Status of the payment from the perspective of this actor. This field can only be set by the /// respective sender/receiver VASP and represents the status on the sender/receiver VASP side. /// This field is mandatory by this respective actor (either sender or receiver side) and /// mutable. Note that in the first request (which is initiated by the sender), the receiver /// status should be set to `None`. pub status: StatusObject, /// Can be specified by the respective VASP to hold metadata that the sender/receiver VASP /// wishes to associate with this payment. It may be set to an empty list (i.e. `[]`). New /// `metadata` elements may be appended to the `metadata` list via subsequent commands on an /// object. #[serde(skip_serializing_if = "Vec::is_empty", default)] pub metadata: Vec<String>, /// Freeform KYC data. If a soft-match occurs, this field can be used to specify additional KYC /// data which can be used to clear the soft-match. It is suggested that this data be JSON, /// XML, or another human-readable form. pub additional_kyc_data: Option<String>, } impl PaymentActorObject { pub fn status(&self) -> &StatusObject { &self.status } pub fn kyc_data(&self) -> Option<&KycDataObject> { self.kyc_data.as_ref() } pub fn additional_kyc_data(&self) -> Option<&str> { self.additional_kyc_data.as_deref() } pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> { if self.address!= prior.address { return Err(WriteOnceError); } if prior.kyc_data.is_some() && prior.kyc_data!= self.kyc_data { return Err(WriteOnceError); } if!self.metadata.starts_with(&prior.metadata) { return Err(WriteOnceError); } Ok(()) } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum ActionType { Charge, } #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct PaymentActionObject { /// Amount of the transfer. Base units are the same as for on-chain transactions for this /// currency. For example, if DiemUSD is represented on-chain where “1” equals 1e-6 dollars, /// then “1” equals the same amount here. For any currency, the on-chain mapping must be used /// for amounts. pub amount: u64, /// One of the supported on-chain currency types - ex. XUS, etc. // TODO Should be an enum per https://dip.diem.com/dip-1/#paymentactionobject pub currency: String, /// Populated in the request. This value indicates the requested action to perform, and the /// only valid value is charge. pub action: ActionType, /// [Unix time](https://en.wikipedia.org/wiki/Unix_time) indicating the time that the payment /// Command was created. pub timestamp: u64, } /// Some fields are immutable after they are defined once. Others can be updated multiple times /// (see below). Updating immutable fields with a different value results in a Command error. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct PaymentObject { /// Information about the sender in this payment pub sender: PaymentActorObject, /// Information about the receiver in this payment pub receiver: PaymentActorObject, /// Unique reference ID of this payment on the payment initiator VASP (the VASP which /// originally created this payment Object). This value should be globally unique. This field /// is mandatory on payment creation and immutable after that. We recommend using a 128 bits /// long UUID according to RFC4122 with "-"'s included. pub reference_id: Uuid, /// Used to refer an old payment known to the other VASP. For example, used for refunds. The /// reference ID of the original payment will be placed into this field. This field is /// mandatory on refund and immutable pub originial_payment_reference_id: Option<Uuid>, /// Signature of the recipient of this transaction encoded in hex. The is signed with the /// compliance key of the recipient VASP and is used for on-chain attestation from the /// recipient party. This may be omitted on blockchains which do not require on-chain /// attestation. pub recipient_signature: Option<String>, /// Number of cryptocurrency + currency type (XUS, etc.)1 + type of action to take. This field is mandatory and immutable pub action: PaymentActionObject, /// Description of the payment. To be displayed to the user. Unicode utf-8 encoded max length /// of 255 characters. This field is optional but can only be written once. pub description: Option<String>, } impl PaymentObject { pub fn sender(&self) -> &PaymentActorObject { &self.sender } pub fn receiver(&self) -> &PaymentActorObject { &self.receiver } pub fn reference_id(&self) -> Uuid { self.reference_id } pub fn actor_object_by_actor(&self, actor: Actor) -> &PaymentActorObject { match actor { Actor::Sender => self.sender(), Actor::Receiver => self.receiver(), } } pub fn recipient_signature(&self) -> Option<&str> { self.recipient_signature.as_deref() } pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> { self.sender.validate_write_once_fields(&prior.sender)?; self.receiver.validate_write_once_fields(&prior.receiver)?; if self.reference_id!= prior.reference_id { return Err(WriteOnceError); } if self.originial_payment_reference_id!= prior.originial_payment_reference_id { return Err(WriteOnceError); } if self.action!= prior.action { return Err(WriteOnceError); } if prior.description.is_some() && prior.description!= self.description { return Err(WriteOnceError); } Ok(()) } } #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct StatusObject { /// Status of the payment from the perspective of this actor. This field can only be set by the /// respective sender/receiver VASP and represents the status on the sender/receiver VASP side. /// This field is mandatory by this respective actor (either sender or receiver side) and /// mutable. pub status: Status, /// In the case of an `abort` status, this field may be used to describe the reason for the /// abort. Represents the error code of the corresponding error. pub abort_code: Option<AbortCode>, /// Additional details about this error. To be used only when `abort_code` is populated. pub abort_message: Option<String>, } impl StatusObject { pub fn status(&self) -> Status { self.status } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum Status { /// No status is yet set from this actor. None, /// KYC data about the subaddresses is required by this actor. NeedsKycData, /// Transaction is ready for settlement according to this actor (i.e. the requried /// signatures/KYC data has been provided. ReadyForSettlement, /// Indicates the actor wishes to abort this payment, instaed of settling it. Abort, /// Actor's KYC data resulted in a soft-match, request additional KYC data. SoftMatch, } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum AbortCode { /// The payment is rejected. It should not be used in the `original_payment_reference_id` field /// of a new payment Rejected, } /// Represents a national ID. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct NationalIdObject { /// Indicates the national ID value - for example, a social security number pub id_value: String, /// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) pub country: Option<String>, /// Indicates the type of the ID #[serde(rename = "type")] pub id_type: Option<String>, } /// Represents a physical address #[derive(Clone, Debug, Default, PartialEq, Deserialize, Serialize)] pub struct AddressObject { /// The city, district, suburb, town, or village pub city: Option<String>, /// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) pub country: Option<String>, /// Address line 1 pub line1: Option<String>, /// Address line 2 - apartment, unit, etc. pub line2: Option<String>, /// ZIP or postal code pub postal_code: Option<String>, /// State, county, province, region. pub state: Option<String>, } /// A `KycDataObject` represents the required information for a single subaddress. Proof of /// non-repudiation is provided by the signatures included in the JWS payloads. The only mandatory /// fields are `payload_version` and `type`. All other fields are optional from the point of view of /// the protocol -- however they may need to be included for another VASP to be ready to settle the /// payment. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct KycDataObject { /// Version identifier to allow modifications to KYC data Object without needing to bump /// version of entire API set. Set to 1 payload_version: KycDataObjectVersion, pub kyc_data_type: KycDataObjectType, /// Legal given name of the user for which this KYC data Object applies. pub given_name: Option<String>, /// Legal surname of the user for which this KYC data Object applies. pub surname: Option<String>, /// Physical address data for this account pub address: Option<AddressObject>, /// Date of birth for the holder of this account. Specified as an ISO 8601 calendar date /// format: https://en.wikipedia.org/wiki/ISO_8601 pub dob: Option<String>, /// Place of birth for this user. line1 and line2 fields should not be populated for this usage /// of the address Object pub place_of_birth: Option<String>, /// National ID information for the holder of this account pub national_id: Option<NationalIdObject>, /// Name of the legal entity. Used when subaddress represents a legal entity rather than an /// individual. KYCDataObject should only include one of legal_entity_name OR /// given_name/surname pub legal_entity_name: Option<String>, } impl KycDataObject { pub fn new_entity() -> Self { Self { payload_version: KycDataObjectVersion::V1, kyc_data_type: KycDataObjectType::Entity, given_name: None, surname: None, address: None, dob: None, place_of_birth: None, national_id: None, legal_entity_name: None, } } pub fn new_individual() -> Self { Self { payload_version: KycDataObjectVersion::V1, kyc_data_type: KycDataObjectType::Individual, given_name: None, surname: None, address: None, dob: None, place_of_birth: None, national_id: None, legal_entity_name: None, } } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum KycDataObjectType { Individual, Entity, } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize_repr, Serialize_repr)] #[repr(u8)] pub enum KycDataObjectVersion { V1 = 1, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct WriteOnceError; #[cfg(test)] mod tests { use super::{KycDataObjectType, KycDataObjectVersion}; use serde_json::json; #[test] fn kyc_data_object_type() { use KycDataObjectType::*; let variants = [(Individual, "individual"), (Entity, "entity")]; for (variant, s) in &variants { let json = json! { s }; assert_eq!(serde_json::to_value(variant).unwrap(), json); assert_eq!( serde_json::from_value::<KycDataObjectType>(json).unwrap(), *variant ); } let invalid = json! { "Organization" }; serde_json::from_value::<KycDataObjectType>(invalid).unwrap_err(); } #[test] fn kyc_data_object_version() { let v1_json = json! { 1 }; let v1: KycDataObjectVersion = serde_json::from_value(v1_json.clone()).unwrap(); assert_eq!(serde_json::to_value(&v1).unwrap(), v1_json); let invalid_version = json! { 52 }; serde_json::from_value::<KycDataObjectVersion>(invalid_version).unwrap_err(); let invalid_type = json! { "1" }; serde_json::from_value::<KycDataObjectVersion>(invalid_type).unwrap_err(); } }
&self.payment } pu
identifier_body
types.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use serde::{ de::{Deserializer, Error}, Deserialize, Serialize, }; use serde_repr::{Deserialize_repr, Serialize_repr}; use uuid::Uuid; use crate::payment_command::Actor; /// A header set with a unique UUID (according to RFC4122 with "-"'s included) for the request, /// used for tracking requests and debugging. Responses must have the same string in the /// X-REQUEST-ID header value as the requests they correspond to. pub const REQUEST_ID_HEADER: &str = "X-REQUEST-ID"; /// A header with the HTTP request sender's VASP DIP-5 address used in the command object. The HTTP /// request sender must use the compliance key of the VASP account linked with this address to sign /// the request JWS body, and the request receiver uses this address to find the request sender's /// compliance key to verify the JWS signature. For example: VASP A transfers funds to VASP B. The /// HTTP request A sends to B contains X-REQUEST-SENDER-ADDRESS as VASP A's address. An HTTP /// request B sends to A should contain VASP B's address as X-REQUEST-SENDER-ADDRESS. pub const REQUEST_SENDER_ADDRESS: &str = "X-REQUEST-SENDER-ADDRESS"; #[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize)] enum ObjectType { CommandRequestObject, CommandResponseObject, PaymentCommand, } impl ObjectType { fn deserialize_request<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> { Self::deserialize_variant(d, Self::CommandRequestObject) } fn deserialize_response<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> { Self::deserialize_variant(d, Self::CommandResponseObject) } fn deserialize_payment<'de, D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> { Self::deserialize_variant(d, Self::PaymentCommand) } fn deserialize_variant<'de, D: Deserializer<'de>>( d: D, variant: Self, ) -> Result<Self, D::Error> { let object_type = Self::deserialize(d)?; if object_type == variant { Ok(object_type) } else { Err(D::Error::custom(format_args!("expected {:?}", variant))) } } } #[derive(Deserialize, Serialize)] pub struct CommandRequestObject { #[serde(deserialize_with = "ObjectType::deserialize_request")] #[serde(rename = "_ObjectType")] object_type: ObjectType, #[serde(flatten)] command: Command, cid: Uuid, } impl CommandRequestObject { pub fn new(command: Command, cid: Uuid) -> Self { Self { object_type: ObjectType::CommandRequestObject, command, cid, } } pub fn command(&self) -> &Command { &self.command } pub fn cid(&self) -> Uuid { self.cid } pub fn into_parts(self) -> (Command, Uuid) { (self.command, self.cid) } } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum CommandStatus { Success, Failure, } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct CommandResponseObject { #[serde(deserialize_with = "ObjectType::deserialize_response")] #[serde(rename = "_ObjectType")] object_type: ObjectType, status: CommandStatus, #[serde(skip_serializing_if = "Option::is_none")] error: Option<OffChainError>, #[serde(skip_serializing_if = "Option::is_none")] cid: Option<Uuid>, } impl CommandResponseObject { pub fn new(status: CommandStatus) -> Self { Self { object_type: ObjectType::CommandResponseObject, status, error: None, cid: None, } } } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] pub enum OffChainErrorType { #[serde(rename = "command_error")] Command, #[serde(rename = "protocol_error")] Protocol, } // https://dip.diem.com/dip-1/#list-of-error-codes #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum ErrorCode { // // HTTP Header Validation Error Codes // /// One of the following potential errors: /// * `X-REQUEST-SENDER-ADDRESS` header value is not the request sender’s address in the /// command object. All command objects should have a field that is the request sender’s /// address. /// * Could not find Diem's onchain account by the `X-REQUEST-SENDER-ADDRESS` header value. /// * Could not find the compliance key of the onchain account found by the /// `X-REQUEST-SENDER-ADDRESS` header value. /// * The compliance key found from the onchain account by `X-REQUEST-SENDER-ADDRESS` is not a /// valid ED25519 public key. /// * `X-REQUEST-ID` is not a valid UUID format. InvalidHttpHeader, /// Missing HTTP header `X-REQUEST-ID` or `X-REQUEST-SENDER-ADDRESS`. MissingHttpHeader, // // JWS Validation Error Codes# // /// Invalid JWS format (compact) or protected header InvalidJws, /// JWS signature verification failed InvalidJwsSignature, // // Request Object Validation Error Codes# // /// Request content is not valid Json InvalidJson, /// Object is not valid, type does not match /// The Command request/response object json is not an object, or the command object type does /// not match command_type. InvalidObject, /// Either: /// * Missing required field /// * An optional field is required to be set for a specific state, e.g. PaymentObject requires /// sender's kyc_data (which is an optional field for PaymentActorObject) when sender init /// the PaymentObject. MissingField, /// A field is unknown for an object. UnknownField, /// Invalid/unsupported command_type. UnknownCommandType, /// * Invalid / unknown enum field values. /// * UUID field value does not match UUID format. /// * Payment actor address is not a valid DIP-5 account identifier. /// * Currency field value is not a valid Diem currency code for the connected network. InvalidFieldValue, /// The HTTP request sender is not the right actor to send the payment object. For example, if /// the actor receiver sends a new command with payment object change that should be done by /// actor sender. InvalidCommandProducer, /// could not find command by reference_id for a non-initial state command object; for example, /// actor receiver received a payment command object that actor sender status is /// `ready_for_settlement`, but receiver could not find any command object by the reference id. InvalidInitialOrPriorNotFound, /// PaymentActionObject#amount is under travel rule threshold, no kyc needed for the /// transaction NoKycNeeded, /// Either: /// * Field recipient_signature value is not hex-encoded bytes. /// * Field recipient_signature value is an invalid signature. InvalidRecipientSignature, /// * The DIP-5 account identifier address in the command object is not HTTP request sender’s /// address or receiver’s address. For payment object it is sender.address or /// receiver.address. /// * Could not find on-chain account by an DIP-5 account identifier address in command object /// address. UnknownAddress, /// * Command object is in conflict with another different command object by cid, likely a cid /// is reused for different command object. /// * Failed to acquire lock for the command object by the reference_id. Conflict, /// Field payment.action.currency value is a valid Diem currency code, but it is not supported /// or acceptable by the receiver VASP. UnsupportedCurrency, /// * Could not find data by the original_payment_reference_id if the sender set it. /// * The status of the original payment object found by original_payment_reference_id is /// aborted instead of ready_for_settlement. InvalidOriginalPaymentReferenceId, /// Overwrite a write-once/immutable field value /// * Overwrite a field that can only be written once. /// * Overwrite an immutable field (field can only be set in initial command object), e.g. /// `original_payment_reference_id`). /// * Overwrite opponent payment actor's fields. InvalidOverwrite, /// As we only allow one actor action at a time, and the next states for a given command object /// state are limited to specific states. This error indicates the new payment object state is /// not valid according to the current object state. For example: VASP A sends RSOFT to VASP B, /// VASP B should send the next payment object with ABORT, or SSOFTSEND; VASP A should respond /// to this error code if VASP B sends payment object state SSOFT. InvalidTransition, #[serde(other)] /// Unknown Error Code Unknown, } #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct OffChainError { #[serde(rename = "type")] error_type: OffChainErrorType, #[serde(skip_serializing_if = "Option::is_none")] field: Option<String>, code: ErrorCode, #[serde(skip_serializing_if = "Option::is_none")] message: Option<String>, } #[derive(Deserialize, Serialize)] #[serde(tag = "command_type", content = "command")] pub enum Command { PaymentCommand(PaymentCommandObject), FundPullPreApprovalCommand, } #[derive(Deserialize, Serialize)] pub struct PaymentCommandObject { #[serde(deserialize_with = "ObjectType::deserialize_payment")] #[serde(rename = "_ObjectType")] object_type: ObjectType, payment: PaymentObject, } impl PaymentCommandObject { pub fn new(payment: PaymentObject) -> Self { Self { object_type: ObjectType::PaymentCommand, payment, } } pub fn payment(&self) -> &PaymentObject { &self.payment } pub fn into_payment(self) -> PaymentObject { self.payment } } /// A `PaymentActorObject` represents a participant in a payment - either sender or receiver. It /// also includes the status of the actor, indicates missing information or willingness to settle /// or abort the payment, and the Know-Your-Customer information of the customer involved in the /// payment. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct PaymentActorObject { /// Address of the sender/receiver account. Addresses may be single use or valid for a limited /// time, and therefore VASPs should not rely on them remaining stable across time or different /// VASP addresses. The addresses are encoded using bech32. The bech32 address encodes both the /// address of the VASP as well as the specific user's subaddress. They should be no longer /// than 80 characters. Mandatory and immutable. For Diem addresses, refer to the "account /// identifier" section in DIP-5 for format. pub address: Box<str>, /// The KYC data for this account. This field is optional but immutable once it is set. pub kyc_data: Option<KycDataObject>, /// Status of the payment from the perspective of this actor. This field can only be set by the /// respective sender/receiver VASP and represents the status on the sender/receiver VASP side. /// This field is mandatory by this respective actor (either sender or receiver side) and /// mutable. Note that in the first request (which is initiated by the sender), the receiver /// status should be set to `None`. pub status: StatusObject, /// Can be specified by the respective VASP to hold metadata that the sender/receiver VASP /// wishes to associate with this payment. It may be set to an empty list (i.e. `[]`). New /// `metadata` elements may be appended to the `metadata` list via subsequent commands on an /// object. #[serde(skip_serializing_if = "Vec::is_empty", default)] pub metadata: Vec<String>, /// Freeform KYC data. If a soft-match occurs, this field can be used to specify additional KYC /// data which can be used to clear the soft-match. It is suggested that this data be JSON, /// XML, or another human-readable form. pub additional_kyc_data: Option<String>, } impl PaymentActorObject { pub fn status(&self) -> &StatusObject { &self.status } pub fn kyc_data(&self) -> Option<&KycDataObject> { self.kyc_data.as_ref() } pub fn additional_kyc_data(&self) -> Option<&str> { self.additional_kyc_data.as_deref() } pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> { if self.address!= prior.address { return Err(WriteOnceError); } if prior.kyc_data.is_some() && prior.kyc_data!= self.kyc_data { return Err(WriteOnceError); } if!self.metadata.starts_with(&prior.metadata) { return Err(WriteOnceError); } Ok(()) } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum ActionType { Charge, } #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct PaymentActionObject { /// Amount of the transfer. Base units are the same as for on-chain transactions for this /// currency. For example, if DiemUSD is represented on-chain where “1” equals 1e-6 dollars, /// then “1” equals the same amount here. For any currency, the on-chain mapping must be used /// for amounts. pub amount: u64, /// One of the supported on-chain currency types - ex. XUS, etc. // TODO Should be an enum per https://dip.diem.com/dip-1/#paymentactionobject pub currency: String, /// Populated in the request. This value indicates the requested action to perform, and the /// only valid value is charge. pub action: ActionType, /// [Unix time](https://en.wikipedia.org/wiki/Unix_time) indicating the time that the payment /// Command was created. pub timestamp: u64, } /// Some fields are immutable after they are defined once. Others can be updated multiple times /// (see below). Updating immutable fields with a different value results in a Command error. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct PaymentObject { /// Information about the sender in this payment pub sender: PaymentActorObject, /// Information about the receiver in this payment pub receiver: PaymentActorObject, /// Unique reference ID of this payment on the payment initiator VASP (the VASP which /// originally created this payment Object). This value should be globally unique. This field /// is mandatory on payment creation and immutable after that. We recommend using a 128 bits /// long UUID according to RFC4122 with "-"'s included. pub reference_id: Uuid, /// Used to refer an old payment known to the other VASP. For example, used for refunds. The /// reference ID of the original payment will be placed into this field. This field is /// mandatory on refund and immutable pub originial_payment_reference_id: Option<Uuid>, /// Signature of the recipient of this transaction encoded in hex. The is signed with the /// compliance key of the recipient VASP and is used for on-chain attestation from the /// recipient party. This may be omitted on blockchains which do not require on-chain /// attestation. pub recipient_signature: Option<String>, /// Number of cryptocurrency + currency type (XUS, etc.)1 + type of action to take. This field is mandatory and immutable pub action: PaymentActionObject, /// Description of the payment. To be displayed to the user. Unicode utf-8 encoded max length /// of 255 characters. This field is optional but can only be written once. pub description: Option<String>, } impl PaymentObject { pub fn sender(&self) -> &PaymentActorObject { &self.sender } pub fn receiver(&self) -> &PaymentActorObject { &self.receiver } pub fn reference_id(&self) -> Uuid { self.reference_id } pub fn actor_object_by_actor(&self, actor: Actor) -> &PaymentActorObject { match actor { Actor::Sender => self.sender(), Actor::Receiver => self.receiver(), } } pub fn recipient_signature(&self) -> Option<&str> { self.recipient_signature.as_deref() } pub fn validate_write_once_fields(&self, prior: &Self) -> Result<(), WriteOnceError> { self.sender.validate_write_once_fields(&prior.sender)?; self.receiver.validate_write_once_fields(&prior.receiver)?; if self.reference_id!= prior.reference_id { return Err(WriteOnceError); } if self.originial_payment_reference_id!= prior.originial_payment_reference_id { return Err(WriteOnceError); } if self.action!= prior.action { return Err(WriteOnceError); } if prior.description.is_some() && prior.description!= self.description { return Err(WriteOnceError); } Ok(()) } } #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct StatusObject { /// Status of the payment from the perspective of this actor. This field can only be set by the /// respective sender/receiver VASP and represents the status on the sender/receiver VASP side. /// This field is mandatory by this respective actor (either sender or receiver side) and /// mutable. pub status: Status, /// In the case of an `abort` status, this field may be used to describe the reason for the /// abort. Represents the error code of the corresponding error. pub abort_code: Option<AbortCode>, /// Additional details about this error. To be used only when `abort_code` is populated. pub abort_message: Option<String>, } impl StatusObject { pub fn status(&self) -> Status { self.status } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum Status { /// No status is yet set from this actor. None, /// KYC data about the subaddresses is required by this actor. NeedsKycData, /// Transaction is ready for settlement according to this actor (i.e. the requried /// signatures/KYC data has been provided. ReadyForSettlement, /// Indicates the actor wishes to abort this payment, instaed of settling it. Abort, /// Actor's KYC data resulted in a soft-match, request additional KYC data. SoftMatch, } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum AbortCode { /// The payment is rejected. It should not be used in the `original_payment_reference_id` field /// of a new payment Rejected, } /// Represents a national ID. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct NationalIdObject { /// Indicates the national ID value - for example, a social security number pub id_value: String,
/// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) pub country: Option<String>, /// Indicates the type of the ID #[serde(rename = "type")] pub id_type: Option<String>, } /// Represents a physical address #[derive(Clone, Debug, Default, PartialEq, Deserialize, Serialize)] pub struct AddressObject { /// The city, district, suburb, town, or village pub city: Option<String>, /// Two-letter country code (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) pub country: Option<String>, /// Address line 1 pub line1: Option<String>, /// Address line 2 - apartment, unit, etc. pub line2: Option<String>, /// ZIP or postal code pub postal_code: Option<String>, /// State, county, province, region. pub state: Option<String>, } /// A `KycDataObject` represents the required information for a single subaddress. Proof of /// non-repudiation is provided by the signatures included in the JWS payloads. The only mandatory /// fields are `payload_version` and `type`. All other fields are optional from the point of view of /// the protocol -- however they may need to be included for another VASP to be ready to settle the /// payment. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct KycDataObject { /// Version identifier to allow modifications to KYC data Object without needing to bump /// version of entire API set. Set to 1 payload_version: KycDataObjectVersion, pub kyc_data_type: KycDataObjectType, /// Legal given name of the user for which this KYC data Object applies. pub given_name: Option<String>, /// Legal surname of the user for which this KYC data Object applies. pub surname: Option<String>, /// Physical address data for this account pub address: Option<AddressObject>, /// Date of birth for the holder of this account. Specified as an ISO 8601 calendar date /// format: https://en.wikipedia.org/wiki/ISO_8601 pub dob: Option<String>, /// Place of birth for this user. line1 and line2 fields should not be populated for this usage /// of the address Object pub place_of_birth: Option<String>, /// National ID information for the holder of this account pub national_id: Option<NationalIdObject>, /// Name of the legal entity. Used when subaddress represents a legal entity rather than an /// individual. KYCDataObject should only include one of legal_entity_name OR /// given_name/surname pub legal_entity_name: Option<String>, } impl KycDataObject { pub fn new_entity() -> Self { Self { payload_version: KycDataObjectVersion::V1, kyc_data_type: KycDataObjectType::Entity, given_name: None, surname: None, address: None, dob: None, place_of_birth: None, national_id: None, legal_entity_name: None, } } pub fn new_individual() -> Self { Self { payload_version: KycDataObjectVersion::V1, kyc_data_type: KycDataObjectType::Individual, given_name: None, surname: None, address: None, dob: None, place_of_birth: None, national_id: None, legal_entity_name: None, } } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "snake_case")] pub enum KycDataObjectType { Individual, Entity, } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize_repr, Serialize_repr)] #[repr(u8)] pub enum KycDataObjectVersion { V1 = 1, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct WriteOnceError; #[cfg(test)] mod tests { use super::{KycDataObjectType, KycDataObjectVersion}; use serde_json::json; #[test] fn kyc_data_object_type() { use KycDataObjectType::*; let variants = [(Individual, "individual"), (Entity, "entity")]; for (variant, s) in &variants { let json = json! { s }; assert_eq!(serde_json::to_value(variant).unwrap(), json); assert_eq!( serde_json::from_value::<KycDataObjectType>(json).unwrap(), *variant ); } let invalid = json! { "Organization" }; serde_json::from_value::<KycDataObjectType>(invalid).unwrap_err(); } #[test] fn kyc_data_object_version() { let v1_json = json! { 1 }; let v1: KycDataObjectVersion = serde_json::from_value(v1_json.clone()).unwrap(); assert_eq!(serde_json::to_value(&v1).unwrap(), v1_json); let invalid_version = json! { 52 }; serde_json::from_value::<KycDataObjectVersion>(invalid_version).unwrap_err(); let invalid_type = json! { "1" }; serde_json::from_value::<KycDataObjectVersion>(invalid_type).unwrap_err(); } }
random_line_split
goto_definition.rs
use super::NavigationTarget; use crate::def::{AstPtr, Expr, Literal, ResolveResult}; use crate::{DefDatabase, FileId, FilePos, ModuleKind, VfsPath}; use nix_interop::FLAKE_FILE; use syntax::ast::{self, AstNode}; use syntax::{best_token_at_offset, match_ast, SyntaxKind, SyntaxToken}; #[derive(Debug, Clone, PartialEq, Eq)] pub enum GotoDefinitionResult { Path(VfsPath), Targets(Vec<NavigationTarget>), } pub(crate) fn goto_definition( db: &dyn DefDatabase, FilePos { file_id, pos }: FilePos, ) -> Option<GotoDefinitionResult> { let parse = db.parse(file_id); let tok = best_token_at_offset(&parse.syntax_node(), pos)?; // Special case for goto flake inputs. if let Some(ret) = goto_flake_input(db, file_id, tok.clone()) { return Some(ret); } let ptr = tok.parent_ancestors().find_map(|node| { match_ast! { match node { ast::Ref(n) => Some(AstPtr::new(n.syntax())), ast::Name(n) => Some(AstPtr::new(n.syntax())), ast::Literal(n) => Some(AstPtr::new(n.syntax())), _ => None, } } })?; let source_map = db.source_map(file_id); let expr_id = source_map.expr_for_node(ptr)?; // Special case for goto-path. if tok.kind() == SyntaxKind::PATH { let module = db.module(file_id); let Expr::Literal(Literal::Path(path)) = &module[expr_id] else { return None; }; let path = path.resolve(db)?; return Some(GotoDefinitionResult::Path(path)); } let name_res = db.name_resolution(file_id); let targets = match name_res.get(expr_id)? { &ResolveResult::Definition(name) => source_map .nodes_for_name(name) .filter_map(|ptr| { let name_node = ptr.to_node(&parse.syntax_node()); let full_node = name_node.ancestors().find(|n| { matches!( n.kind(), SyntaxKind::LAMBDA | SyntaxKind::ATTR_PATH_VALUE | SyntaxKind::INHERIT ) })?; Some(NavigationTarget { file_id, focus_range: name_node.text_range(), full_range: full_node.text_range(), }) }) .collect(), ResolveResult::WithExprs(withs) => { withs .iter() .filter_map(|&with_expr| { // with expr; body // ^--^ focus // ^--------^ full let with_node = source_map .node_for_expr(with_expr) .expect("WithExprs must be valid") .to_node(&parse.syntax_node()); let with_node = ast::With::cast(with_node).expect("WithExprs must be valid"); let with_token_range = with_node.with_token()?.text_range(); let with_header_end = with_node .semicolon_token() .map_or_else(|| with_node.syntax().text_range(), |tok| tok.text_range()); let with_header = with_token_range.cover(with_header_end); Some(NavigationTarget { file_id, focus_range: with_token_range, full_range: with_header, }) }) .collect() } // Currently builtin names cannot "goto-definition". ResolveResult::Builtin(_) => return None, }; Some(GotoDefinitionResult::Targets(targets)) } fn goto_flake_input( db: &dyn DefDatabase, file: FileId, tok: SyntaxToken, ) -> Option<GotoDefinitionResult> { let module_kind = db.module_kind(file); let ModuleKind::FlakeNix { explicit_inputs, param_inputs, .. } = &*module_kind else { return None; }; let flake_info = db.source_root_flake_info(db.file_source_root(file))?; let ptr = tok.parent_ancestors().find_map(|node| { match_ast! { match node { ast::Attr(n) => Some(AstPtr::new(n.syntax())), _ => None, } } })?; let module = db.module(file); let source_map = db.source_map(file); let name_id = source_map.name_for_node(ptr)?; let name_str = &*module[name_id].text; if explicit_inputs.get(name_str) == Some(&name_id) || param_inputs.get(name_str) == Some(&name_id) { let target = flake_info .input_store_paths .get(name_str)? .join(FLAKE_FILE)?; return Some(GotoDefinitionResult::Path(target)); } None } #[cfg(test)] mod tests { use super::*; use crate::base::SourceDatabase; use crate::tests::TestDB; use expect_test::{expect, Expect}; #[track_caller] fn check_no(fixture: &str)
#[track_caller] fn check(fixture: &str, expect: Expect) { let (db, f) = TestDB::from_fixture(fixture).unwrap(); assert_eq!(f.markers().len(), 1, "Missing markers"); let mut got = match goto_definition(&db, f[0]).expect("No definition") { GotoDefinitionResult::Path(path) => format!("file://{}", path.display()), GotoDefinitionResult::Targets(targets) => { assert!(!targets.is_empty()); targets .into_iter() .map(|target| { assert!(target.full_range.contains_range(target.focus_range)); let src = db.file_content(target.file_id); let mut full = src[target.full_range].to_owned(); let relative_focus = target.focus_range - target.full_range.start(); full.insert(relative_focus.end().into(), '>'); full.insert(relative_focus.start().into(), '<'); full }) .collect::<Vec<_>>() .join("\n") } }; // Prettify. if got.contains('\n') { got += "\n"; } expect.assert_eq(&got); } #[test] fn not_found() { check_no("$0a"); check_no("b: $0a"); } #[test] fn invalid_position() { check_no("1 $0+ 2"); check_no("wi$0th 1; 2"); } #[test] fn lambda_param() { check("a: (a: (a $0a)) 1", expect!["<a>: (a a)"]); check("x: (a: (a $0x)) 1", expect!["<x>: (a: (a x)) 1"]); check("a: (a@{ x }: (a $0a)) 1", expect!["<a>@{ x }: (a a)"]); check("a: ({ x? $0a }@a: a) 1", expect!["{ x? a }@<a>: a"]); check("a: ({ x? $0x }@a: a) 1", expect!["{ <x>? x }@a: a"]); } #[test] fn with_env() { check("with 1; let a = 1; in with 2; $0a", expect!["<a> = 1;"]); check( "with 1; let a = 1; in with 2; $0b", expect![[r#" <with> 2; <with> 1; "#]], ); } #[test] fn bindings() { check( "let a = a; in rec { inherit a; b = $0a; }", expect!["inherit <a>;"], ); check( "let a = a; in rec { inherit $0a; b = a; }", expect!["<a> = a;"], ); check( "let a = $0a; in rec { inherit a; b = a; }", expect!["<a> = a;"], ); } #[test] fn left_and_right() { check("let a = 1; in $0a ", expect!["<a> = 1;"]); check("let a = 1; in a$0 ", expect!["<a> = 1;"]); check("let a = 1; in 0+$0a+0", expect!["<a> = 1;"]); check("let a = 1; in 0+a$0+0", expect!["<a> = 1;"]); } #[test] fn merged_binding() { check( "let a.a = 1; a.b = 2; a = { c = 3; }; in $0a", expect![[r#" <a>.a = 1; <a>.b = 2; <a> = { c = 3; }; "#]], ); check( "rec { b = $0a; a = { a = 1; }; a = { a = 2; }; }", expect![[r#" <a> = { a = 1; }; <a> = { a = 2; }; "#]], ); } #[test] fn builtin() { check("let true = 1; in $0true && false", expect!["<true> = 1;"]); check_no("let true = 1; in true && $0false"); } #[test] fn path() { check("1 + $0./.", expect!["file:///"]); check( " #- /default.nix import $0./bar.nix #- /bar.nix hello ", expect!["file:///bar.nix"], ); } #[test] fn flake_input() { check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo { description = "Hello flake"; inputs.$0nixpkgs.url = "github:NixOS/nixpkgs"; inputs.nix.url = "github:NixOS/nix"; output = {... }: { }; } "#, expect!["file:///nix/store/eeee/flake.nix"], ); // Flake input in string form. check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo { description = "Hello flake"; inputs = { nixpkgs = { url = "github:NixOS/nixpkgs"; }; "n$0ix" = { url = "github:NixOS/nix"; }; }; output = {... }: { }; } "#, expect!["file:///nix/store/oooo/flake.nix"], ); // Not a flake input. check_no( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee { description = "Hello flake"; inputs.nixpkgs.url = "github:NixOS/nixpkgs"; inputs'.$0nixpkgs.no = 42; } "#, ); // Not a flake input. check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee { description = "Hello flake"; inputs.nixpkgs.url = "github:NixOS/nixpkgs"; outputs = { nixpkgs,... }: $0nixpkgs; "#, expect!["{ <nixpkgs>,... }: nixpkgs"], ); } #[test] fn flake_output_pat() { check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee { outputs = { $0nixpkgs,... }: nixpkgs; } "#, expect!["file:///nix/store/eeee/flake.nix"], ); // `self` in parameter is no an input. check_no( r#" #- /flake.nix input:self=/nix/store/eeee { outputs = { $0self,... }: self; } "#, ); } }
{ let (db, f) = TestDB::from_fixture(fixture).unwrap(); assert_eq!(f.markers().len(), 1, "Missing markers"); assert_eq!(goto_definition(&db, f[0]), None); }
identifier_body
goto_definition.rs
use super::NavigationTarget; use crate::def::{AstPtr, Expr, Literal, ResolveResult}; use crate::{DefDatabase, FileId, FilePos, ModuleKind, VfsPath}; use nix_interop::FLAKE_FILE; use syntax::ast::{self, AstNode}; use syntax::{best_token_at_offset, match_ast, SyntaxKind, SyntaxToken}; #[derive(Debug, Clone, PartialEq, Eq)] pub enum GotoDefinitionResult { Path(VfsPath), Targets(Vec<NavigationTarget>), } pub(crate) fn goto_definition( db: &dyn DefDatabase, FilePos { file_id, pos }: FilePos, ) -> Option<GotoDefinitionResult> { let parse = db.parse(file_id); let tok = best_token_at_offset(&parse.syntax_node(), pos)?; // Special case for goto flake inputs. if let Some(ret) = goto_flake_input(db, file_id, tok.clone()) { return Some(ret); } let ptr = tok.parent_ancestors().find_map(|node| { match_ast! { match node { ast::Ref(n) => Some(AstPtr::new(n.syntax())), ast::Name(n) => Some(AstPtr::new(n.syntax())), ast::Literal(n) => Some(AstPtr::new(n.syntax())), _ => None, } } })?; let source_map = db.source_map(file_id); let expr_id = source_map.expr_for_node(ptr)?; // Special case for goto-path. if tok.kind() == SyntaxKind::PATH { let module = db.module(file_id); let Expr::Literal(Literal::Path(path)) = &module[expr_id] else { return None; }; let path = path.resolve(db)?; return Some(GotoDefinitionResult::Path(path)); } let name_res = db.name_resolution(file_id); let targets = match name_res.get(expr_id)? { &ResolveResult::Definition(name) => source_map .nodes_for_name(name) .filter_map(|ptr| { let name_node = ptr.to_node(&parse.syntax_node()); let full_node = name_node.ancestors().find(|n| { matches!( n.kind(), SyntaxKind::LAMBDA | SyntaxKind::ATTR_PATH_VALUE | SyntaxKind::INHERIT ) })?; Some(NavigationTarget { file_id, focus_range: name_node.text_range(), full_range: full_node.text_range(), }) }) .collect(), ResolveResult::WithExprs(withs) =>
full_range: with_header, }) }) .collect() } // Currently builtin names cannot "goto-definition". ResolveResult::Builtin(_) => return None, }; Some(GotoDefinitionResult::Targets(targets)) } fn goto_flake_input( db: &dyn DefDatabase, file: FileId, tok: SyntaxToken, ) -> Option<GotoDefinitionResult> { let module_kind = db.module_kind(file); let ModuleKind::FlakeNix { explicit_inputs, param_inputs, .. } = &*module_kind else { return None; }; let flake_info = db.source_root_flake_info(db.file_source_root(file))?; let ptr = tok.parent_ancestors().find_map(|node| { match_ast! { match node { ast::Attr(n) => Some(AstPtr::new(n.syntax())), _ => None, } } })?; let module = db.module(file); let source_map = db.source_map(file); let name_id = source_map.name_for_node(ptr)?; let name_str = &*module[name_id].text; if explicit_inputs.get(name_str) == Some(&name_id) || param_inputs.get(name_str) == Some(&name_id) { let target = flake_info .input_store_paths .get(name_str)? .join(FLAKE_FILE)?; return Some(GotoDefinitionResult::Path(target)); } None } #[cfg(test)] mod tests { use super::*; use crate::base::SourceDatabase; use crate::tests::TestDB; use expect_test::{expect, Expect}; #[track_caller] fn check_no(fixture: &str) { let (db, f) = TestDB::from_fixture(fixture).unwrap(); assert_eq!(f.markers().len(), 1, "Missing markers"); assert_eq!(goto_definition(&db, f[0]), None); } #[track_caller] fn check(fixture: &str, expect: Expect) { let (db, f) = TestDB::from_fixture(fixture).unwrap(); assert_eq!(f.markers().len(), 1, "Missing markers"); let mut got = match goto_definition(&db, f[0]).expect("No definition") { GotoDefinitionResult::Path(path) => format!("file://{}", path.display()), GotoDefinitionResult::Targets(targets) => { assert!(!targets.is_empty()); targets .into_iter() .map(|target| { assert!(target.full_range.contains_range(target.focus_range)); let src = db.file_content(target.file_id); let mut full = src[target.full_range].to_owned(); let relative_focus = target.focus_range - target.full_range.start(); full.insert(relative_focus.end().into(), '>'); full.insert(relative_focus.start().into(), '<'); full }) .collect::<Vec<_>>() .join("\n") } }; // Prettify. if got.contains('\n') { got += "\n"; } expect.assert_eq(&got); } #[test] fn not_found() { check_no("$0a"); check_no("b: $0a"); } #[test] fn invalid_position() { check_no("1 $0+ 2"); check_no("wi$0th 1; 2"); } #[test] fn lambda_param() { check("a: (a: (a $0a)) 1", expect!["<a>: (a a)"]); check("x: (a: (a $0x)) 1", expect!["<x>: (a: (a x)) 1"]); check("a: (a@{ x }: (a $0a)) 1", expect!["<a>@{ x }: (a a)"]); check("a: ({ x? $0a }@a: a) 1", expect!["{ x? a }@<a>: a"]); check("a: ({ x? $0x }@a: a) 1", expect!["{ <x>? x }@a: a"]); } #[test] fn with_env() { check("with 1; let a = 1; in with 2; $0a", expect!["<a> = 1;"]); check( "with 1; let a = 1; in with 2; $0b", expect![[r#" <with> 2; <with> 1; "#]], ); } #[test] fn bindings() { check( "let a = a; in rec { inherit a; b = $0a; }", expect!["inherit <a>;"], ); check( "let a = a; in rec { inherit $0a; b = a; }", expect!["<a> = a;"], ); check( "let a = $0a; in rec { inherit a; b = a; }", expect!["<a> = a;"], ); } #[test] fn left_and_right() { check("let a = 1; in $0a ", expect!["<a> = 1;"]); check("let a = 1; in a$0 ", expect!["<a> = 1;"]); check("let a = 1; in 0+$0a+0", expect!["<a> = 1;"]); check("let a = 1; in 0+a$0+0", expect!["<a> = 1;"]); } #[test] fn merged_binding() { check( "let a.a = 1; a.b = 2; a = { c = 3; }; in $0a", expect![[r#" <a>.a = 1; <a>.b = 2; <a> = { c = 3; }; "#]], ); check( "rec { b = $0a; a = { a = 1; }; a = { a = 2; }; }", expect![[r#" <a> = { a = 1; }; <a> = { a = 2; }; "#]], ); } #[test] fn builtin() { check("let true = 1; in $0true && false", expect!["<true> = 1;"]); check_no("let true = 1; in true && $0false"); } #[test] fn path() { check("1 + $0./.", expect!["file:///"]); check( " #- /default.nix import $0./bar.nix #- /bar.nix hello ", expect!["file:///bar.nix"], ); } #[test] fn flake_input() { check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo { description = "Hello flake"; inputs.$0nixpkgs.url = "github:NixOS/nixpkgs"; inputs.nix.url = "github:NixOS/nix"; output = {... }: { }; } "#, expect!["file:///nix/store/eeee/flake.nix"], ); // Flake input in string form. check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo { description = "Hello flake"; inputs = { nixpkgs = { url = "github:NixOS/nixpkgs"; }; "n$0ix" = { url = "github:NixOS/nix"; }; }; output = {... }: { }; } "#, expect!["file:///nix/store/oooo/flake.nix"], ); // Not a flake input. check_no( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee { description = "Hello flake"; inputs.nixpkgs.url = "github:NixOS/nixpkgs"; inputs'.$0nixpkgs.no = 42; } "#, ); // Not a flake input. check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee { description = "Hello flake"; inputs.nixpkgs.url = "github:NixOS/nixpkgs"; outputs = { nixpkgs,... }: $0nixpkgs; "#, expect!["{ <nixpkgs>,... }: nixpkgs"], ); } #[test] fn flake_output_pat() { check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee { outputs = { $0nixpkgs,... }: nixpkgs; } "#, expect!["file:///nix/store/eeee/flake.nix"], ); // `self` in parameter is no an input. check_no( r#" #- /flake.nix input:self=/nix/store/eeee { outputs = { $0self,... }: self; } "#, ); } }
{ withs .iter() .filter_map(|&with_expr| { // with expr; body // ^--^ focus // ^--------^ full let with_node = source_map .node_for_expr(with_expr) .expect("WithExprs must be valid") .to_node(&parse.syntax_node()); let with_node = ast::With::cast(with_node).expect("WithExprs must be valid"); let with_token_range = with_node.with_token()?.text_range(); let with_header_end = with_node .semicolon_token() .map_or_else(|| with_node.syntax().text_range(), |tok| tok.text_range()); let with_header = with_token_range.cover(with_header_end); Some(NavigationTarget { file_id, focus_range: with_token_range,
conditional_block
goto_definition.rs
use super::NavigationTarget; use crate::def::{AstPtr, Expr, Literal, ResolveResult}; use crate::{DefDatabase, FileId, FilePos, ModuleKind, VfsPath}; use nix_interop::FLAKE_FILE; use syntax::ast::{self, AstNode}; use syntax::{best_token_at_offset, match_ast, SyntaxKind, SyntaxToken}; #[derive(Debug, Clone, PartialEq, Eq)] pub enum GotoDefinitionResult { Path(VfsPath), Targets(Vec<NavigationTarget>), } pub(crate) fn goto_definition( db: &dyn DefDatabase, FilePos { file_id, pos }: FilePos, ) -> Option<GotoDefinitionResult> { let parse = db.parse(file_id); let tok = best_token_at_offset(&parse.syntax_node(), pos)?; // Special case for goto flake inputs. if let Some(ret) = goto_flake_input(db, file_id, tok.clone()) { return Some(ret); } let ptr = tok.parent_ancestors().find_map(|node| { match_ast! { match node { ast::Ref(n) => Some(AstPtr::new(n.syntax())), ast::Name(n) => Some(AstPtr::new(n.syntax())), ast::Literal(n) => Some(AstPtr::new(n.syntax())), _ => None, } } })?; let source_map = db.source_map(file_id); let expr_id = source_map.expr_for_node(ptr)?; // Special case for goto-path. if tok.kind() == SyntaxKind::PATH { let module = db.module(file_id); let Expr::Literal(Literal::Path(path)) = &module[expr_id] else { return None; }; let path = path.resolve(db)?; return Some(GotoDefinitionResult::Path(path)); } let name_res = db.name_resolution(file_id); let targets = match name_res.get(expr_id)? { &ResolveResult::Definition(name) => source_map .nodes_for_name(name) .filter_map(|ptr| { let name_node = ptr.to_node(&parse.syntax_node()); let full_node = name_node.ancestors().find(|n| { matches!( n.kind(), SyntaxKind::LAMBDA | SyntaxKind::ATTR_PATH_VALUE | SyntaxKind::INHERIT ) })?; Some(NavigationTarget { file_id, focus_range: name_node.text_range(), full_range: full_node.text_range(), }) }) .collect(), ResolveResult::WithExprs(withs) => { withs .iter() .filter_map(|&with_expr| { // with expr; body // ^--^ focus // ^--------^ full let with_node = source_map .node_for_expr(with_expr) .expect("WithExprs must be valid") .to_node(&parse.syntax_node()); let with_node = ast::With::cast(with_node).expect("WithExprs must be valid"); let with_token_range = with_node.with_token()?.text_range(); let with_header_end = with_node .semicolon_token() .map_or_else(|| with_node.syntax().text_range(), |tok| tok.text_range()); let with_header = with_token_range.cover(with_header_end); Some(NavigationTarget { file_id, focus_range: with_token_range, full_range: with_header, }) }) .collect() } // Currently builtin names cannot "goto-definition". ResolveResult::Builtin(_) => return None, }; Some(GotoDefinitionResult::Targets(targets)) } fn goto_flake_input( db: &dyn DefDatabase, file: FileId, tok: SyntaxToken, ) -> Option<GotoDefinitionResult> { let module_kind = db.module_kind(file); let ModuleKind::FlakeNix { explicit_inputs, param_inputs, .. } = &*module_kind else { return None; }; let flake_info = db.source_root_flake_info(db.file_source_root(file))?; let ptr = tok.parent_ancestors().find_map(|node| { match_ast! { match node { ast::Attr(n) => Some(AstPtr::new(n.syntax())), _ => None, } } })?; let module = db.module(file); let source_map = db.source_map(file); let name_id = source_map.name_for_node(ptr)?; let name_str = &*module[name_id].text; if explicit_inputs.get(name_str) == Some(&name_id) || param_inputs.get(name_str) == Some(&name_id) { let target = flake_info .input_store_paths .get(name_str)? .join(FLAKE_FILE)?; return Some(GotoDefinitionResult::Path(target)); } None } #[cfg(test)] mod tests { use super::*; use crate::base::SourceDatabase; use crate::tests::TestDB; use expect_test::{expect, Expect}; #[track_caller] fn check_no(fixture: &str) { let (db, f) = TestDB::from_fixture(fixture).unwrap(); assert_eq!(f.markers().len(), 1, "Missing markers"); assert_eq!(goto_definition(&db, f[0]), None); } #[track_caller] fn
(fixture: &str, expect: Expect) { let (db, f) = TestDB::from_fixture(fixture).unwrap(); assert_eq!(f.markers().len(), 1, "Missing markers"); let mut got = match goto_definition(&db, f[0]).expect("No definition") { GotoDefinitionResult::Path(path) => format!("file://{}", path.display()), GotoDefinitionResult::Targets(targets) => { assert!(!targets.is_empty()); targets .into_iter() .map(|target| { assert!(target.full_range.contains_range(target.focus_range)); let src = db.file_content(target.file_id); let mut full = src[target.full_range].to_owned(); let relative_focus = target.focus_range - target.full_range.start(); full.insert(relative_focus.end().into(), '>'); full.insert(relative_focus.start().into(), '<'); full }) .collect::<Vec<_>>() .join("\n") } }; // Prettify. if got.contains('\n') { got += "\n"; } expect.assert_eq(&got); } #[test] fn not_found() { check_no("$0a"); check_no("b: $0a"); } #[test] fn invalid_position() { check_no("1 $0+ 2"); check_no("wi$0th 1; 2"); } #[test] fn lambda_param() { check("a: (a: (a $0a)) 1", expect!["<a>: (a a)"]); check("x: (a: (a $0x)) 1", expect!["<x>: (a: (a x)) 1"]); check("a: (a@{ x }: (a $0a)) 1", expect!["<a>@{ x }: (a a)"]); check("a: ({ x? $0a }@a: a) 1", expect!["{ x? a }@<a>: a"]); check("a: ({ x? $0x }@a: a) 1", expect!["{ <x>? x }@a: a"]); } #[test] fn with_env() { check("with 1; let a = 1; in with 2; $0a", expect!["<a> = 1;"]); check( "with 1; let a = 1; in with 2; $0b", expect![[r#" <with> 2; <with> 1; "#]], ); } #[test] fn bindings() { check( "let a = a; in rec { inherit a; b = $0a; }", expect!["inherit <a>;"], ); check( "let a = a; in rec { inherit $0a; b = a; }", expect!["<a> = a;"], ); check( "let a = $0a; in rec { inherit a; b = a; }", expect!["<a> = a;"], ); } #[test] fn left_and_right() { check("let a = 1; in $0a ", expect!["<a> = 1;"]); check("let a = 1; in a$0 ", expect!["<a> = 1;"]); check("let a = 1; in 0+$0a+0", expect!["<a> = 1;"]); check("let a = 1; in 0+a$0+0", expect!["<a> = 1;"]); } #[test] fn merged_binding() { check( "let a.a = 1; a.b = 2; a = { c = 3; }; in $0a", expect![[r#" <a>.a = 1; <a>.b = 2; <a> = { c = 3; }; "#]], ); check( "rec { b = $0a; a = { a = 1; }; a = { a = 2; }; }", expect![[r#" <a> = { a = 1; }; <a> = { a = 2; }; "#]], ); } #[test] fn builtin() { check("let true = 1; in $0true && false", expect!["<true> = 1;"]); check_no("let true = 1; in true && $0false"); } #[test] fn path() { check("1 + $0./.", expect!["file:///"]); check( " #- /default.nix import $0./bar.nix #- /bar.nix hello ", expect!["file:///bar.nix"], ); } #[test] fn flake_input() { check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo { description = "Hello flake"; inputs.$0nixpkgs.url = "github:NixOS/nixpkgs"; inputs.nix.url = "github:NixOS/nix"; output = {... }: { }; } "#, expect!["file:///nix/store/eeee/flake.nix"], ); // Flake input in string form. check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo { description = "Hello flake"; inputs = { nixpkgs = { url = "github:NixOS/nixpkgs"; }; "n$0ix" = { url = "github:NixOS/nix"; }; }; output = {... }: { }; } "#, expect!["file:///nix/store/oooo/flake.nix"], ); // Not a flake input. check_no( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee { description = "Hello flake"; inputs.nixpkgs.url = "github:NixOS/nixpkgs"; inputs'.$0nixpkgs.no = 42; } "#, ); // Not a flake input. check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee { description = "Hello flake"; inputs.nixpkgs.url = "github:NixOS/nixpkgs"; outputs = { nixpkgs,... }: $0nixpkgs; "#, expect!["{ <nixpkgs>,... }: nixpkgs"], ); } #[test] fn flake_output_pat() { check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee { outputs = { $0nixpkgs,... }: nixpkgs; } "#, expect!["file:///nix/store/eeee/flake.nix"], ); // `self` in parameter is no an input. check_no( r#" #- /flake.nix input:self=/nix/store/eeee { outputs = { $0self,... }: self; } "#, ); } }
check
identifier_name
goto_definition.rs
use super::NavigationTarget; use crate::def::{AstPtr, Expr, Literal, ResolveResult}; use crate::{DefDatabase, FileId, FilePos, ModuleKind, VfsPath}; use nix_interop::FLAKE_FILE; use syntax::ast::{self, AstNode}; use syntax::{best_token_at_offset, match_ast, SyntaxKind, SyntaxToken}; #[derive(Debug, Clone, PartialEq, Eq)] pub enum GotoDefinitionResult { Path(VfsPath), Targets(Vec<NavigationTarget>), } pub(crate) fn goto_definition( db: &dyn DefDatabase, FilePos { file_id, pos }: FilePos, ) -> Option<GotoDefinitionResult> { let parse = db.parse(file_id); let tok = best_token_at_offset(&parse.syntax_node(), pos)?; // Special case for goto flake inputs. if let Some(ret) = goto_flake_input(db, file_id, tok.clone()) { return Some(ret); } let ptr = tok.parent_ancestors().find_map(|node| { match_ast! { match node { ast::Ref(n) => Some(AstPtr::new(n.syntax())), ast::Name(n) => Some(AstPtr::new(n.syntax())), ast::Literal(n) => Some(AstPtr::new(n.syntax())), _ => None, } } })?; let source_map = db.source_map(file_id); let expr_id = source_map.expr_for_node(ptr)?; // Special case for goto-path. if tok.kind() == SyntaxKind::PATH { let module = db.module(file_id); let Expr::Literal(Literal::Path(path)) = &module[expr_id] else { return None; }; let path = path.resolve(db)?; return Some(GotoDefinitionResult::Path(path)); } let name_res = db.name_resolution(file_id); let targets = match name_res.get(expr_id)? { &ResolveResult::Definition(name) => source_map .nodes_for_name(name) .filter_map(|ptr| { let name_node = ptr.to_node(&parse.syntax_node()); let full_node = name_node.ancestors().find(|n| { matches!( n.kind(), SyntaxKind::LAMBDA | SyntaxKind::ATTR_PATH_VALUE | SyntaxKind::INHERIT ) })?; Some(NavigationTarget { file_id, focus_range: name_node.text_range(), full_range: full_node.text_range(), }) }) .collect(), ResolveResult::WithExprs(withs) => { withs .iter() .filter_map(|&with_expr| { // with expr; body // ^--^ focus // ^--------^ full let with_node = source_map .node_for_expr(with_expr) .expect("WithExprs must be valid") .to_node(&parse.syntax_node()); let with_node = ast::With::cast(with_node).expect("WithExprs must be valid"); let with_token_range = with_node.with_token()?.text_range(); let with_header_end = with_node .semicolon_token() .map_or_else(|| with_node.syntax().text_range(), |tok| tok.text_range()); let with_header = with_token_range.cover(with_header_end); Some(NavigationTarget { file_id, focus_range: with_token_range, full_range: with_header, }) }) .collect() } // Currently builtin names cannot "goto-definition". ResolveResult::Builtin(_) => return None, }; Some(GotoDefinitionResult::Targets(targets)) } fn goto_flake_input( db: &dyn DefDatabase, file: FileId, tok: SyntaxToken, ) -> Option<GotoDefinitionResult> { let module_kind = db.module_kind(file); let ModuleKind::FlakeNix { explicit_inputs, param_inputs, .. } = &*module_kind else { return None; }; let flake_info = db.source_root_flake_info(db.file_source_root(file))?; let ptr = tok.parent_ancestors().find_map(|node| { match_ast! { match node { ast::Attr(n) => Some(AstPtr::new(n.syntax())), _ => None, } } })?; let module = db.module(file); let source_map = db.source_map(file); let name_id = source_map.name_for_node(ptr)?; let name_str = &*module[name_id].text; if explicit_inputs.get(name_str) == Some(&name_id) || param_inputs.get(name_str) == Some(&name_id) { let target = flake_info .input_store_paths .get(name_str)? .join(FLAKE_FILE)?; return Some(GotoDefinitionResult::Path(target)); } None } #[cfg(test)] mod tests { use super::*; use crate::base::SourceDatabase; use crate::tests::TestDB; use expect_test::{expect, Expect}; #[track_caller] fn check_no(fixture: &str) { let (db, f) = TestDB::from_fixture(fixture).unwrap(); assert_eq!(f.markers().len(), 1, "Missing markers"); assert_eq!(goto_definition(&db, f[0]), None); } #[track_caller] fn check(fixture: &str, expect: Expect) { let (db, f) = TestDB::from_fixture(fixture).unwrap(); assert_eq!(f.markers().len(), 1, "Missing markers"); let mut got = match goto_definition(&db, f[0]).expect("No definition") { GotoDefinitionResult::Path(path) => format!("file://{}", path.display()), GotoDefinitionResult::Targets(targets) => { assert!(!targets.is_empty()); targets .into_iter() .map(|target| { assert!(target.full_range.contains_range(target.focus_range)); let src = db.file_content(target.file_id); let mut full = src[target.full_range].to_owned(); let relative_focus = target.focus_range - target.full_range.start(); full.insert(relative_focus.end().into(), '>'); full.insert(relative_focus.start().into(), '<'); full }) .collect::<Vec<_>>() .join("\n") } }; // Prettify. if got.contains('\n') { got += "\n"; } expect.assert_eq(&got); } #[test] fn not_found() { check_no("$0a"); check_no("b: $0a"); } #[test] fn invalid_position() { check_no("1 $0+ 2"); check_no("wi$0th 1; 2"); } #[test] fn lambda_param() { check("a: (a: (a $0a)) 1", expect!["<a>: (a a)"]); check("x: (a: (a $0x)) 1", expect!["<x>: (a: (a x)) 1"]); check("a: (a@{ x }: (a $0a)) 1", expect!["<a>@{ x }: (a a)"]); check("a: ({ x? $0a }@a: a) 1", expect!["{ x? a }@<a>: a"]); check("a: ({ x? $0x }@a: a) 1", expect!["{ <x>? x }@a: a"]); } #[test] fn with_env() { check("with 1; let a = 1; in with 2; $0a", expect!["<a> = 1;"]); check( "with 1; let a = 1; in with 2; $0b", expect![[r#" <with> 2; <with> 1; "#]], ); } #[test] fn bindings() { check( "let a = a; in rec { inherit a; b = $0a; }",
expect!["<a> = a;"], ); check( "let a = $0a; in rec { inherit a; b = a; }", expect!["<a> = a;"], ); } #[test] fn left_and_right() { check("let a = 1; in $0a ", expect!["<a> = 1;"]); check("let a = 1; in a$0 ", expect!["<a> = 1;"]); check("let a = 1; in 0+$0a+0", expect!["<a> = 1;"]); check("let a = 1; in 0+a$0+0", expect!["<a> = 1;"]); } #[test] fn merged_binding() { check( "let a.a = 1; a.b = 2; a = { c = 3; }; in $0a", expect![[r#" <a>.a = 1; <a>.b = 2; <a> = { c = 3; }; "#]], ); check( "rec { b = $0a; a = { a = 1; }; a = { a = 2; }; }", expect![[r#" <a> = { a = 1; }; <a> = { a = 2; }; "#]], ); } #[test] fn builtin() { check("let true = 1; in $0true && false", expect!["<true> = 1;"]); check_no("let true = 1; in true && $0false"); } #[test] fn path() { check("1 + $0./.", expect!["file:///"]); check( " #- /default.nix import $0./bar.nix #- /bar.nix hello ", expect!["file:///bar.nix"], ); } #[test] fn flake_input() { check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo { description = "Hello flake"; inputs.$0nixpkgs.url = "github:NixOS/nixpkgs"; inputs.nix.url = "github:NixOS/nix"; output = {... }: { }; } "#, expect!["file:///nix/store/eeee/flake.nix"], ); // Flake input in string form. check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee input:nix=/nix/store/oooo { description = "Hello flake"; inputs = { nixpkgs = { url = "github:NixOS/nixpkgs"; }; "n$0ix" = { url = "github:NixOS/nix"; }; }; output = {... }: { }; } "#, expect!["file:///nix/store/oooo/flake.nix"], ); // Not a flake input. check_no( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee { description = "Hello flake"; inputs.nixpkgs.url = "github:NixOS/nixpkgs"; inputs'.$0nixpkgs.no = 42; } "#, ); // Not a flake input. check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee { description = "Hello flake"; inputs.nixpkgs.url = "github:NixOS/nixpkgs"; outputs = { nixpkgs,... }: $0nixpkgs; "#, expect!["{ <nixpkgs>,... }: nixpkgs"], ); } #[test] fn flake_output_pat() { check( r#" #- /flake.nix input:nixpkgs=/nix/store/eeee { outputs = { $0nixpkgs,... }: nixpkgs; } "#, expect!["file:///nix/store/eeee/flake.nix"], ); // `self` in parameter is no an input. check_no( r#" #- /flake.nix input:self=/nix/store/eeee { outputs = { $0self,... }: self; } "#, ); } }
expect!["inherit <a>;"], ); check( "let a = a; in rec { inherit $0a; b = a; }",
random_line_split
amqp.rs
// Copyright 2020-2021, The Tremor Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #![cfg_attr(coverage, no_coverage)] //! # AMQP Offramp //! //! The `amqp` offramp allows producing events to an amqp broker. use crate::channel::{bounded, Receiver}; use crate::sink::prelude::*; use halfbrown::HashMap; use lapin::{ options::BasicPublishOptions, publisher_confirm::Confirmation, BasicProperties, Channel, Connection, ConnectionProperties, PromiseChain, }; use serde::Deserialize; use std::{fmt, time::Instant}; use tremor_common::url::TremorUrl; #[derive(Deserialize, Debug, Clone)] pub(crate) struct Config { pub(crate) amqp_addr: String, #[serde(default = "Default::default")] routing_key: String, #[serde(default = "Default::default")] exchange: String, publish_options: BasicPublishOptions, // headers to use for the messages #[serde(default = "Default::default")] pub(crate) headers: HashMap<String, Vec<String>>, } impl Config { async fn channel(&self) -> PromiseChain<Channel> { match Connection::connect(&self.amqp_addr, ConnectionProperties::default()).await { Ok(connection) => connection.create_channel(), Err(error) => PromiseChain::new_with_data(Err(error)), } } } impl ConfigImpl for Config {} /// Amqp offramp connector pub(crate) struct Amqp { sink_url: TremorUrl, config: Config, postprocessors: Postprocessors, reply_channel: Sender<sink::Reply>, channel: Option<Channel>, error_rx: Receiver<()>, error_tx: Sender<()>, } impl fmt::Debug for Amqp { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "[Sink::{}] RoutingKey: {}", &self.sink_url, self.config.routing_key ) } } pub(crate) struct Builder {} impl offramp::Builder for Builder { fn from_config(&self, config: &Option<OpConfig>) -> Result<Box<dyn Offramp>> { if let Some(config) = config { let config: Config = Config::new(config)?; let (dummy_tx, _) = bounded(1); let (error_tx, error_rx) = bounded(qsize()); Ok(SinkManager::new_box(Amqp { sink_url: TremorUrl::from_offramp_id("amqp")?, // dummy config, postprocessors: vec![], reply_channel: dummy_tx, channel: None, error_rx, error_tx, })) } else { Err("Amqp offramp requires a config".into()) } } } impl Amqp { async fn handle_channel(&mut self) -> Result<Option<&Channel>> { while let Ok(()) = self.error_rx.try_recv() { self.channel = None; } if self.channel.is_none() { match self.config.channel().await.await { Ok(channel) => self.channel = Some(channel), Err(error) => return Err(error.into()), } } return Ok(self.channel.as_ref()); } } #[async_trait::async_trait] impl Sink for Amqp { async fn on_event( &mut self, _input: &str, codec: &mut dyn Codec, _codec_map: &HashMap<String, Box<dyn Codec>>, event: Event, ) -> ResultVec { self.handle_channel().await?; let ingest_ns = event.ingest_ns; let processing_start = Instant::now(); /* // evaluate here to avoid borrowing again while borrowed. let config_reply = self.config.reply.as_deref(); let op_meta = &event.op_meta; self.merged_meta.merge(op_meta.clone()); */ let insight_event = event.insight_ack(); if let Some(channel) = &mut self.channel { for (value, _) in event.value_meta_iter() { let encoded = codec.encode(value)?; let processed = postprocess(self.postprocessors.as_mut_slice(), ingest_ns, encoded)?; //let headers = meta.get("nats").and_then(|v| v.get_object("headers")); for payload in processed { /* // prepare message reply let message_reply = reply.or(config_reply); */ // prepare message headers let properties = BasicProperties::default(); /* let mut key_val: Vec<(&str, &str)> = Vec::with_capacity( self.config.headers.len() + headers.map(HashMap::len).unwrap_or_default(), ); for (key, val) in &self.config.headers { for ele in val.iter() { key_val.push((key.as_str(), ele.as_str())); } } if let Some(headers) = headers { for (key, val) in headers.iter().filter_map(|(k, v)| Some((k, v.as_array()?))) { for ele in val.iter().filter_map(value_trait::ValueAccess::as_str) { key_val.push((key, ele)); } } } let message_headers = if key_val.is_empty() { None } else { Some(Headers::from_iter(key_val)) }; */ let publish_result = channel .basic_publish( self.config.exchange.as_str(), self.config.routing_key.as_str(), self.config.publish_options, payload, properties, ) .await? .await?; match publish_result { Confirmation::NotRequested | Confirmation::Ack(_) => { if event.transactional { let mut insight = insight_event.clone(); insight.cb = CbAction::Ack; // we hopefully enver wait more then u64... if we do we got // bigger problems #[allow(clippy::cast_possible_truncation)] let time = processing_start.elapsed().as_millis() as u64; let mut m = Object::with_capacity(1); m.insert("time".into(), time.into()); insight.data = (Value::null(), m).into(); self.reply_channel .send(sink::Reply::Insight(insight.clone())) .await?; } } Confirmation::Nack(err) => { if let Some(e) = err { error!( "[Sink::{}] failed to send message: {} {}", &self.sink_url, e.reply_code, e.reply_text ); } else { error!( "[Sink::{}] failed to send message: unknown error", &self.sink_url ); } if self.error_tx.send(()).await.is_err() { error!( "[Sink::{}] Error notifying the system about amqp error", &self.sink_url ); } if event.transactional { let mut insight = insight_event.clone(); insight.cb = CbAction::Fail; self.reply_channel .send(sink::Reply::Response(ERR, insight)) .await?; } } } } } } Ok(Vec::new()) } fn default_codec(&self) -> &str { "json" } #[allow(clippy::too_many_arguments)] async fn init( &mut self, _sink_uid: u64, sink_url: &TremorUrl, _codec: &dyn Codec, _codec_map: &HashMap<String, Box<dyn Codec>>, processors: Processors<'_>, _is_linked: bool, reply_channel: Sender<Reply>, ) -> Result<()> { self.handle_channel().await?; self.postprocessors = make_postprocessors(processors.post)?; self.reply_channel = reply_channel; self.sink_url = sink_url.clone(); Ok(()) } async fn
(&mut self, _signal: Event) -> ResultVec { //self.drain_fatal_errors()?; Ok(Vec::new()) } fn is_active(&self) -> bool { true } fn auto_ack(&self) -> bool { false } async fn terminate(&mut self) { if let Some(channel) = self.channel.as_ref() { if let Err(e) = channel.close(0, "terminating sink").await { error!("[Sink] Failed to close channel: {}", e); } if let Err(e) = channel.wait_for_confirms().await { error!("[Sink] Failed to close channel: {}", e); }; } /*if self.channel.in_flight_count() > 0 { // wait a second in order to flush messages. let wait_secs = 1; info!( "[Sink::{}] Flushing messages. Waiting for {} seconds.", wait_secs, &self.sink_url ); self.channel.flush(Duration::from_secs(1)); }*/ info!("[Sink::{}] Terminating.", &self.sink_url); } }
on_signal
identifier_name
amqp.rs
// Copyright 2020-2021, The Tremor Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #![cfg_attr(coverage, no_coverage)] //! # AMQP Offramp //! //! The `amqp` offramp allows producing events to an amqp broker. use crate::channel::{bounded, Receiver}; use crate::sink::prelude::*; use halfbrown::HashMap; use lapin::{ options::BasicPublishOptions, publisher_confirm::Confirmation, BasicProperties, Channel, Connection, ConnectionProperties, PromiseChain, }; use serde::Deserialize; use std::{fmt, time::Instant}; use tremor_common::url::TremorUrl; #[derive(Deserialize, Debug, Clone)] pub(crate) struct Config { pub(crate) amqp_addr: String, #[serde(default = "Default::default")] routing_key: String, #[serde(default = "Default::default")] exchange: String, publish_options: BasicPublishOptions, // headers to use for the messages #[serde(default = "Default::default")] pub(crate) headers: HashMap<String, Vec<String>>, } impl Config { async fn channel(&self) -> PromiseChain<Channel> { match Connection::connect(&self.amqp_addr, ConnectionProperties::default()).await { Ok(connection) => connection.create_channel(), Err(error) => PromiseChain::new_with_data(Err(error)),
impl ConfigImpl for Config {} /// Amqp offramp connector pub(crate) struct Amqp { sink_url: TremorUrl, config: Config, postprocessors: Postprocessors, reply_channel: Sender<sink::Reply>, channel: Option<Channel>, error_rx: Receiver<()>, error_tx: Sender<()>, } impl fmt::Debug for Amqp { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "[Sink::{}] RoutingKey: {}", &self.sink_url, self.config.routing_key ) } } pub(crate) struct Builder {} impl offramp::Builder for Builder { fn from_config(&self, config: &Option<OpConfig>) -> Result<Box<dyn Offramp>> { if let Some(config) = config { let config: Config = Config::new(config)?; let (dummy_tx, _) = bounded(1); let (error_tx, error_rx) = bounded(qsize()); Ok(SinkManager::new_box(Amqp { sink_url: TremorUrl::from_offramp_id("amqp")?, // dummy config, postprocessors: vec![], reply_channel: dummy_tx, channel: None, error_rx, error_tx, })) } else { Err("Amqp offramp requires a config".into()) } } } impl Amqp { async fn handle_channel(&mut self) -> Result<Option<&Channel>> { while let Ok(()) = self.error_rx.try_recv() { self.channel = None; } if self.channel.is_none() { match self.config.channel().await.await { Ok(channel) => self.channel = Some(channel), Err(error) => return Err(error.into()), } } return Ok(self.channel.as_ref()); } } #[async_trait::async_trait] impl Sink for Amqp { async fn on_event( &mut self, _input: &str, codec: &mut dyn Codec, _codec_map: &HashMap<String, Box<dyn Codec>>, event: Event, ) -> ResultVec { self.handle_channel().await?; let ingest_ns = event.ingest_ns; let processing_start = Instant::now(); /* // evaluate here to avoid borrowing again while borrowed. let config_reply = self.config.reply.as_deref(); let op_meta = &event.op_meta; self.merged_meta.merge(op_meta.clone()); */ let insight_event = event.insight_ack(); if let Some(channel) = &mut self.channel { for (value, _) in event.value_meta_iter() { let encoded = codec.encode(value)?; let processed = postprocess(self.postprocessors.as_mut_slice(), ingest_ns, encoded)?; //let headers = meta.get("nats").and_then(|v| v.get_object("headers")); for payload in processed { /* // prepare message reply let message_reply = reply.or(config_reply); */ // prepare message headers let properties = BasicProperties::default(); /* let mut key_val: Vec<(&str, &str)> = Vec::with_capacity( self.config.headers.len() + headers.map(HashMap::len).unwrap_or_default(), ); for (key, val) in &self.config.headers { for ele in val.iter() { key_val.push((key.as_str(), ele.as_str())); } } if let Some(headers) = headers { for (key, val) in headers.iter().filter_map(|(k, v)| Some((k, v.as_array()?))) { for ele in val.iter().filter_map(value_trait::ValueAccess::as_str) { key_val.push((key, ele)); } } } let message_headers = if key_val.is_empty() { None } else { Some(Headers::from_iter(key_val)) }; */ let publish_result = channel .basic_publish( self.config.exchange.as_str(), self.config.routing_key.as_str(), self.config.publish_options, payload, properties, ) .await? .await?; match publish_result { Confirmation::NotRequested | Confirmation::Ack(_) => { if event.transactional { let mut insight = insight_event.clone(); insight.cb = CbAction::Ack; // we hopefully enver wait more then u64... if we do we got // bigger problems #[allow(clippy::cast_possible_truncation)] let time = processing_start.elapsed().as_millis() as u64; let mut m = Object::with_capacity(1); m.insert("time".into(), time.into()); insight.data = (Value::null(), m).into(); self.reply_channel .send(sink::Reply::Insight(insight.clone())) .await?; } } Confirmation::Nack(err) => { if let Some(e) = err { error!( "[Sink::{}] failed to send message: {} {}", &self.sink_url, e.reply_code, e.reply_text ); } else { error!( "[Sink::{}] failed to send message: unknown error", &self.sink_url ); } if self.error_tx.send(()).await.is_err() { error!( "[Sink::{}] Error notifying the system about amqp error", &self.sink_url ); } if event.transactional { let mut insight = insight_event.clone(); insight.cb = CbAction::Fail; self.reply_channel .send(sink::Reply::Response(ERR, insight)) .await?; } } } } } } Ok(Vec::new()) } fn default_codec(&self) -> &str { "json" } #[allow(clippy::too_many_arguments)] async fn init( &mut self, _sink_uid: u64, sink_url: &TremorUrl, _codec: &dyn Codec, _codec_map: &HashMap<String, Box<dyn Codec>>, processors: Processors<'_>, _is_linked: bool, reply_channel: Sender<Reply>, ) -> Result<()> { self.handle_channel().await?; self.postprocessors = make_postprocessors(processors.post)?; self.reply_channel = reply_channel; self.sink_url = sink_url.clone(); Ok(()) } async fn on_signal(&mut self, _signal: Event) -> ResultVec { //self.drain_fatal_errors()?; Ok(Vec::new()) } fn is_active(&self) -> bool { true } fn auto_ack(&self) -> bool { false } async fn terminate(&mut self) { if let Some(channel) = self.channel.as_ref() { if let Err(e) = channel.close(0, "terminating sink").await { error!("[Sink] Failed to close channel: {}", e); } if let Err(e) = channel.wait_for_confirms().await { error!("[Sink] Failed to close channel: {}", e); }; } /*if self.channel.in_flight_count() > 0 { // wait a second in order to flush messages. let wait_secs = 1; info!( "[Sink::{}] Flushing messages. Waiting for {} seconds.", wait_secs, &self.sink_url ); self.channel.flush(Duration::from_secs(1)); }*/ info!("[Sink::{}] Terminating.", &self.sink_url); } }
} } }
random_line_split
amqp.rs
// Copyright 2020-2021, The Tremor Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #![cfg_attr(coverage, no_coverage)] //! # AMQP Offramp //! //! The `amqp` offramp allows producing events to an amqp broker. use crate::channel::{bounded, Receiver}; use crate::sink::prelude::*; use halfbrown::HashMap; use lapin::{ options::BasicPublishOptions, publisher_confirm::Confirmation, BasicProperties, Channel, Connection, ConnectionProperties, PromiseChain, }; use serde::Deserialize; use std::{fmt, time::Instant}; use tremor_common::url::TremorUrl; #[derive(Deserialize, Debug, Clone)] pub(crate) struct Config { pub(crate) amqp_addr: String, #[serde(default = "Default::default")] routing_key: String, #[serde(default = "Default::default")] exchange: String, publish_options: BasicPublishOptions, // headers to use for the messages #[serde(default = "Default::default")] pub(crate) headers: HashMap<String, Vec<String>>, } impl Config { async fn channel(&self) -> PromiseChain<Channel> { match Connection::connect(&self.amqp_addr, ConnectionProperties::default()).await { Ok(connection) => connection.create_channel(), Err(error) => PromiseChain::new_with_data(Err(error)), } } } impl ConfigImpl for Config {} /// Amqp offramp connector pub(crate) struct Amqp { sink_url: TremorUrl, config: Config, postprocessors: Postprocessors, reply_channel: Sender<sink::Reply>, channel: Option<Channel>, error_rx: Receiver<()>, error_tx: Sender<()>, } impl fmt::Debug for Amqp { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "[Sink::{}] RoutingKey: {}", &self.sink_url, self.config.routing_key ) } } pub(crate) struct Builder {} impl offramp::Builder for Builder { fn from_config(&self, config: &Option<OpConfig>) -> Result<Box<dyn Offramp>> { if let Some(config) = config { let config: Config = Config::new(config)?; let (dummy_tx, _) = bounded(1); let (error_tx, error_rx) = bounded(qsize()); Ok(SinkManager::new_box(Amqp { sink_url: TremorUrl::from_offramp_id("amqp")?, // dummy config, postprocessors: vec![], reply_channel: dummy_tx, channel: None, error_rx, error_tx, })) } else { Err("Amqp offramp requires a config".into()) } } } impl Amqp { async fn handle_channel(&mut self) -> Result<Option<&Channel>>
} #[async_trait::async_trait] impl Sink for Amqp { async fn on_event( &mut self, _input: &str, codec: &mut dyn Codec, _codec_map: &HashMap<String, Box<dyn Codec>>, event: Event, ) -> ResultVec { self.handle_channel().await?; let ingest_ns = event.ingest_ns; let processing_start = Instant::now(); /* // evaluate here to avoid borrowing again while borrowed. let config_reply = self.config.reply.as_deref(); let op_meta = &event.op_meta; self.merged_meta.merge(op_meta.clone()); */ let insight_event = event.insight_ack(); if let Some(channel) = &mut self.channel { for (value, _) in event.value_meta_iter() { let encoded = codec.encode(value)?; let processed = postprocess(self.postprocessors.as_mut_slice(), ingest_ns, encoded)?; //let headers = meta.get("nats").and_then(|v| v.get_object("headers")); for payload in processed { /* // prepare message reply let message_reply = reply.or(config_reply); */ // prepare message headers let properties = BasicProperties::default(); /* let mut key_val: Vec<(&str, &str)> = Vec::with_capacity( self.config.headers.len() + headers.map(HashMap::len).unwrap_or_default(), ); for (key, val) in &self.config.headers { for ele in val.iter() { key_val.push((key.as_str(), ele.as_str())); } } if let Some(headers) = headers { for (key, val) in headers.iter().filter_map(|(k, v)| Some((k, v.as_array()?))) { for ele in val.iter().filter_map(value_trait::ValueAccess::as_str) { key_val.push((key, ele)); } } } let message_headers = if key_val.is_empty() { None } else { Some(Headers::from_iter(key_val)) }; */ let publish_result = channel .basic_publish( self.config.exchange.as_str(), self.config.routing_key.as_str(), self.config.publish_options, payload, properties, ) .await? .await?; match publish_result { Confirmation::NotRequested | Confirmation::Ack(_) => { if event.transactional { let mut insight = insight_event.clone(); insight.cb = CbAction::Ack; // we hopefully enver wait more then u64... if we do we got // bigger problems #[allow(clippy::cast_possible_truncation)] let time = processing_start.elapsed().as_millis() as u64; let mut m = Object::with_capacity(1); m.insert("time".into(), time.into()); insight.data = (Value::null(), m).into(); self.reply_channel .send(sink::Reply::Insight(insight.clone())) .await?; } } Confirmation::Nack(err) => { if let Some(e) = err { error!( "[Sink::{}] failed to send message: {} {}", &self.sink_url, e.reply_code, e.reply_text ); } else { error!( "[Sink::{}] failed to send message: unknown error", &self.sink_url ); } if self.error_tx.send(()).await.is_err() { error!( "[Sink::{}] Error notifying the system about amqp error", &self.sink_url ); } if event.transactional { let mut insight = insight_event.clone(); insight.cb = CbAction::Fail; self.reply_channel .send(sink::Reply::Response(ERR, insight)) .await?; } } } } } } Ok(Vec::new()) } fn default_codec(&self) -> &str { "json" } #[allow(clippy::too_many_arguments)] async fn init( &mut self, _sink_uid: u64, sink_url: &TremorUrl, _codec: &dyn Codec, _codec_map: &HashMap<String, Box<dyn Codec>>, processors: Processors<'_>, _is_linked: bool, reply_channel: Sender<Reply>, ) -> Result<()> { self.handle_channel().await?; self.postprocessors = make_postprocessors(processors.post)?; self.reply_channel = reply_channel; self.sink_url = sink_url.clone(); Ok(()) } async fn on_signal(&mut self, _signal: Event) -> ResultVec { //self.drain_fatal_errors()?; Ok(Vec::new()) } fn is_active(&self) -> bool { true } fn auto_ack(&self) -> bool { false } async fn terminate(&mut self) { if let Some(channel) = self.channel.as_ref() { if let Err(e) = channel.close(0, "terminating sink").await { error!("[Sink] Failed to close channel: {}", e); } if let Err(e) = channel.wait_for_confirms().await { error!("[Sink] Failed to close channel: {}", e); }; } /*if self.channel.in_flight_count() > 0 { // wait a second in order to flush messages. let wait_secs = 1; info!( "[Sink::{}] Flushing messages. Waiting for {} seconds.", wait_secs, &self.sink_url ); self.channel.flush(Duration::from_secs(1)); }*/ info!("[Sink::{}] Terminating.", &self.sink_url); } }
{ while let Ok(()) = self.error_rx.try_recv() { self.channel = None; } if self.channel.is_none() { match self.config.channel().await.await { Ok(channel) => self.channel = Some(channel), Err(error) => return Err(error.into()), } } return Ok(self.channel.as_ref()); }
identifier_body
amqp.rs
// Copyright 2020-2021, The Tremor Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #![cfg_attr(coverage, no_coverage)] //! # AMQP Offramp //! //! The `amqp` offramp allows producing events to an amqp broker. use crate::channel::{bounded, Receiver}; use crate::sink::prelude::*; use halfbrown::HashMap; use lapin::{ options::BasicPublishOptions, publisher_confirm::Confirmation, BasicProperties, Channel, Connection, ConnectionProperties, PromiseChain, }; use serde::Deserialize; use std::{fmt, time::Instant}; use tremor_common::url::TremorUrl; #[derive(Deserialize, Debug, Clone)] pub(crate) struct Config { pub(crate) amqp_addr: String, #[serde(default = "Default::default")] routing_key: String, #[serde(default = "Default::default")] exchange: String, publish_options: BasicPublishOptions, // headers to use for the messages #[serde(default = "Default::default")] pub(crate) headers: HashMap<String, Vec<String>>, } impl Config { async fn channel(&self) -> PromiseChain<Channel> { match Connection::connect(&self.amqp_addr, ConnectionProperties::default()).await { Ok(connection) => connection.create_channel(), Err(error) => PromiseChain::new_with_data(Err(error)), } } } impl ConfigImpl for Config {} /// Amqp offramp connector pub(crate) struct Amqp { sink_url: TremorUrl, config: Config, postprocessors: Postprocessors, reply_channel: Sender<sink::Reply>, channel: Option<Channel>, error_rx: Receiver<()>, error_tx: Sender<()>, } impl fmt::Debug for Amqp { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "[Sink::{}] RoutingKey: {}", &self.sink_url, self.config.routing_key ) } } pub(crate) struct Builder {} impl offramp::Builder for Builder { fn from_config(&self, config: &Option<OpConfig>) -> Result<Box<dyn Offramp>> { if let Some(config) = config { let config: Config = Config::new(config)?; let (dummy_tx, _) = bounded(1); let (error_tx, error_rx) = bounded(qsize()); Ok(SinkManager::new_box(Amqp { sink_url: TremorUrl::from_offramp_id("amqp")?, // dummy config, postprocessors: vec![], reply_channel: dummy_tx, channel: None, error_rx, error_tx, })) } else { Err("Amqp offramp requires a config".into()) } } } impl Amqp { async fn handle_channel(&mut self) -> Result<Option<&Channel>> { while let Ok(()) = self.error_rx.try_recv() { self.channel = None; } if self.channel.is_none() { match self.config.channel().await.await { Ok(channel) => self.channel = Some(channel), Err(error) => return Err(error.into()), } } return Ok(self.channel.as_ref()); } } #[async_trait::async_trait] impl Sink for Amqp { async fn on_event( &mut self, _input: &str, codec: &mut dyn Codec, _codec_map: &HashMap<String, Box<dyn Codec>>, event: Event, ) -> ResultVec { self.handle_channel().await?; let ingest_ns = event.ingest_ns; let processing_start = Instant::now(); /* // evaluate here to avoid borrowing again while borrowed. let config_reply = self.config.reply.as_deref(); let op_meta = &event.op_meta; self.merged_meta.merge(op_meta.clone()); */ let insight_event = event.insight_ack(); if let Some(channel) = &mut self.channel { for (value, _) in event.value_meta_iter() { let encoded = codec.encode(value)?; let processed = postprocess(self.postprocessors.as_mut_slice(), ingest_ns, encoded)?; //let headers = meta.get("nats").and_then(|v| v.get_object("headers")); for payload in processed { /* // prepare message reply let message_reply = reply.or(config_reply); */ // prepare message headers let properties = BasicProperties::default(); /* let mut key_val: Vec<(&str, &str)> = Vec::with_capacity( self.config.headers.len() + headers.map(HashMap::len).unwrap_or_default(), ); for (key, val) in &self.config.headers { for ele in val.iter() { key_val.push((key.as_str(), ele.as_str())); } } if let Some(headers) = headers { for (key, val) in headers.iter().filter_map(|(k, v)| Some((k, v.as_array()?))) { for ele in val.iter().filter_map(value_trait::ValueAccess::as_str) { key_val.push((key, ele)); } } } let message_headers = if key_val.is_empty() { None } else { Some(Headers::from_iter(key_val)) }; */ let publish_result = channel .basic_publish( self.config.exchange.as_str(), self.config.routing_key.as_str(), self.config.publish_options, payload, properties, ) .await? .await?; match publish_result { Confirmation::NotRequested | Confirmation::Ack(_) =>
Confirmation::Nack(err) => { if let Some(e) = err { error!( "[Sink::{}] failed to send message: {} {}", &self.sink_url, e.reply_code, e.reply_text ); } else { error!( "[Sink::{}] failed to send message: unknown error", &self.sink_url ); } if self.error_tx.send(()).await.is_err() { error!( "[Sink::{}] Error notifying the system about amqp error", &self.sink_url ); } if event.transactional { let mut insight = insight_event.clone(); insight.cb = CbAction::Fail; self.reply_channel .send(sink::Reply::Response(ERR, insight)) .await?; } } } } } } Ok(Vec::new()) } fn default_codec(&self) -> &str { "json" } #[allow(clippy::too_many_arguments)] async fn init( &mut self, _sink_uid: u64, sink_url: &TremorUrl, _codec: &dyn Codec, _codec_map: &HashMap<String, Box<dyn Codec>>, processors: Processors<'_>, _is_linked: bool, reply_channel: Sender<Reply>, ) -> Result<()> { self.handle_channel().await?; self.postprocessors = make_postprocessors(processors.post)?; self.reply_channel = reply_channel; self.sink_url = sink_url.clone(); Ok(()) } async fn on_signal(&mut self, _signal: Event) -> ResultVec { //self.drain_fatal_errors()?; Ok(Vec::new()) } fn is_active(&self) -> bool { true } fn auto_ack(&self) -> bool { false } async fn terminate(&mut self) { if let Some(channel) = self.channel.as_ref() { if let Err(e) = channel.close(0, "terminating sink").await { error!("[Sink] Failed to close channel: {}", e); } if let Err(e) = channel.wait_for_confirms().await { error!("[Sink] Failed to close channel: {}", e); }; } /*if self.channel.in_flight_count() > 0 { // wait a second in order to flush messages. let wait_secs = 1; info!( "[Sink::{}] Flushing messages. Waiting for {} seconds.", wait_secs, &self.sink_url ); self.channel.flush(Duration::from_secs(1)); }*/ info!("[Sink::{}] Terminating.", &self.sink_url); } }
{ if event.transactional { let mut insight = insight_event.clone(); insight.cb = CbAction::Ack; // we hopefully enver wait more then u64 ... if we do we got // bigger problems #[allow(clippy::cast_possible_truncation)] let time = processing_start.elapsed().as_millis() as u64; let mut m = Object::with_capacity(1); m.insert("time".into(), time.into()); insight.data = (Value::null(), m).into(); self.reply_channel .send(sink::Reply::Insight(insight.clone())) .await?; } }
conditional_block
mod.rs
::{Error, ErrorKind, Result}; use crate::status::StatusBackend; pub mod cached_itarbundle; pub mod dirbundle; pub mod filesystem; pub mod format_cache; pub mod memory; pub mod setup; pub mod stack; pub mod stdstreams; pub mod zipbundle; pub trait InputFeatures: Read { fn get_size(&mut self) -> Result<usize>; fn try_seek(&mut self, pos: SeekFrom) -> Result<u64>; } /// What kind of source an input file ultimately came from. We keep track of /// this in order to be able to emit Makefile-style dependencies for input /// files. Right now, we only provide enough options to achieve this goal; we /// could add more. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum InputOrigin { /// This file lives on the filesystem and might change under us. (That is /// it is not a cached bundle file.) Filesystem, /// This file was never used as an input. NotInput, /// This file is none of the above. Other, } /// Input handles are basically Read objects with a few extras. We don't /// require the standard io::Seek because we need to provide a dummy /// implementation for GZip streams, which we wouldn't be allowed to do /// because both the trait and the target struct are outside of our crate. /// /// An important role for the InputHandle struct is computing a cryptographic /// digest of the input file. The driver uses this information in order to /// figure out if the TeX engine needs rerunning. TeX makes our life more /// difficult, though, since it has somewhat funky file access patterns. LaTeX /// file opens work by opening a file and immediately closing it, which tests /// whether the file exists, and then by opening it again for real. Under the /// hood, XeTeX reads a couple of bytes from each file upon open to sniff its /// encoding. So we can't just stream data from `read()` calls into the SHA2 /// computer, since we end up seeking and reading redundant data. /// /// The current system maintains some internal state that, so far, helps us Do /// The Right Thing given all this. If there's a seek on the file, we give up /// on our digest computation. But if there's a seek back to the file /// beginning, we are open to the possibility of restarting the computation. /// But if nothing is ever read from the file, we once again give up on the /// computation. The `ExecutionState` code then has further pieces that track /// access to nonexistent files, which we treat as being equivalent to an /// existing empty file for these purposes. pub struct InputHandle { name: OsString, inner: Box<dyn InputFeatures>, /// Indicates that the file cannot be written to (provided by a read-only IoProvider) and /// therefore it is useless to compute the digest. read_only: bool, digest: digest::DigestComputer, origin: InputOrigin, ever_read: bool, did_unhandled_seek: bool, } impl InputHandle { pub fn new<T:'static + InputFeatures>( name: &OsStr, inner: T, origin: InputOrigin, ) -> InputHandle { InputHandle { name: name.to_os_string(), inner: Box::new(inner), read_only: false, digest: Default::default(), origin, ever_read: false, did_unhandled_seek: false, } } pub fn new_read_only<T:'static + InputFeatures>( name: &OsStr, inner: T, origin: InputOrigin, ) -> InputHandle { InputHandle { name: name.to_os_string(), inner: Box::new(inner), read_only: true, digest: Default::default(), origin, ever_read: false, did_unhandled_seek: false, } } pub fn name(&self) -> &OsStr { self.name.as_os_str() } pub fn origin(&self) -> InputOrigin { self.origin } /// Consumes the object and returns the underlying readable handle that /// it references. pub fn into_inner(self) -> Box<dyn InputFeatures> { self.inner } /// Consumes the object and returns the SHA256 sum of the content that was /// read. No digest is returned if there was ever a seek on the input /// stream, since in that case the results will not be reliable. We also /// return None if the stream was never read, which is another common /// TeX access pattern: files are opened, immediately closed, and then /// opened again. Finally, no digest is returned if the file is marked read-only. pub fn into_name_digest(self) -> (OsString, Option<DigestData>) { if self.did_unhandled_seek ||!self.ever_read || self.read_only { (self.name, None) } else { (self.name, Some(DigestData::from(self.digest))) } } pub fn getc(&mut self) -> Result<u8> { let mut byte = [0u8; 1]; if self.read(&mut byte[..1])? == 0 { // EOF return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "EOF in getc").into()); } Ok(byte[0]) } } impl Read for InputHandle { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.ever_read = true; let n = self.inner.read(buf)?; if!self.read_only { self.digest.input(&buf[..n]); } Ok(n) } } impl InputFeatures for InputHandle { fn get_size(&mut self) -> Result<usize> { self.inner.get_size() } fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> { match pos { SeekFrom::Start(0) => { // As described above, there is a common pattern in TeX file // accesses: read a few bytes to sniff, then go back to the // beginning. We should tidy up the I/O to just buffer instead // of seeking, but in the meantime, we can handle this. self.digest = Default::default(); self.ever_read = false; } SeekFrom::Current(0) => { // Noop. This must *not* clear the ungetc buffer for our // current PDF startxref/xref parsing code to work. } _ => { self.did_unhandled_seek = true; } } let offset = self.inner.try_seek(pos)?; Ok(offset) } } pub struct OutputHandle { name: OsString, inner: Box<dyn Write>, digest: digest::DigestComputer, } impl OutputHandle { pub fn new<T:'static + Write>(name: &OsStr, inner: T) -> OutputHandle { OutputHandle { name: name.to_os_string(), inner: Box::new(inner), digest: digest::create(), } } pub fn name(&self) -> &OsStr { self.name.as_os_str() } /// Consumes the object and returns the underlying writable handle that /// it references. pub fn into_inner(self) -> Box<dyn Write> { self.inner } /// Consumes the object and returns the SHA256 sum of the content that was /// written. pub fn into_name_digest(self) -> (OsString, DigestData) { (self.name, DigestData::from(self.digest)) } } impl Write for OutputHandle { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let n = self.inner.write(buf)?; self.digest.input(&buf[..n]); Ok(n) } fn flush(&mut self) -> io::Result<()> { self.inner.flush() } } // An Io provider is a source of handles. One wrinkle is that it's good to be // able to distinguish between unavailability of a given name and error // accessing it. We take file paths as OsStrs, although since we parse input // files as Unicode it may not be possible to actually express zany // non-Unicode Unix paths inside the engine. #[derive(Debug)] pub enum OpenResult<T> { Ok(T), NotAvailable, Err(Error), } impl<T> OpenResult<T> { pub fn unwrap(self) -> T { match self { OpenResult::Ok(t) => t, _ => panic!("expected an open file"), } } /// Returns true if this result is of the NotAvailable variant. pub fn is_not_available(&self) -> bool { if let OpenResult::NotAvailable = *self { true } else { false } } /// Convert this object into a plain Result, erroring if the item was not available. pub fn must_exist(self) -> Result<T> { match self { OpenResult::Ok(t) => Ok(t), OpenResult::Err(e) => Err(e), OpenResult::NotAvailable => { Err(io::Error::new(io::ErrorKind::NotFound, "not found").into()) } } } } /// A hack to allow casting of Bundles to IoProviders. /// /// The code that sets up the I/O stack is handed a reference to a Bundle /// trait object. For the actual I/O, it needs to convert this to an /// IoProvider trait object. [According to /// StackExchange](https://stackoverflow.com/a/28664881/3760486), the /// following pattern is the least-bad way to achieve the necessary upcasting. pub trait AsIoProviderMut { /// Represent this value as an IoProvider trait object. fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider; } impl<T: IoProvider> AsIoProviderMut for T { fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider { self } } /// A trait for types that can read or write files needed by the TeX engine. pub trait IoProvider: AsIoProviderMut { fn output_open_name(&mut self, _name: &OsStr) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn input_open_name( &mut self, _name: &OsStr, _status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> {
/// Open the "primary" input file, which in the context of TeX is the main /// input that it's given. When the build is being done using the /// filesystem and the input is a file on the filesystem, this function /// isn't necesssarily that important, but those conditions don't always /// hold. fn input_open_primary(&mut self, _status: &mut dyn StatusBackend) -> OpenResult<InputHandle> { OpenResult::NotAvailable } /// Open a format file with the specified name. Format files have a /// specialized entry point because IOProviders may wish to handle them /// specially: namely, to munge the filename to one that includes the /// current version of the Tectonic engine, since the format contents /// depend sensitively on the engine internals. fn input_open_format( &mut self, name: &OsStr, status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { self.input_open_name(name, status) } /// Save an a format dump in some way that this provider may be able to /// recover in the future. This awkward interface is needed for to write /// formats with their special munged file names. fn write_format( &mut self, _name: &str, _data: &[u8], _status: &mut dyn StatusBackend, ) -> Result<()> { Err(ErrorKind::Msg("this I/O layer cannot save format files".to_owned()).into()) } } impl<P: IoProvider +?Sized> IoProvider for Box<P> { fn output_open_name(&mut self, name: &OsStr) -> OpenResult<OutputHandle> { (**self).output_open_name(name) } fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> { (**self).output_open_stdout() } fn input_open_name( &mut self, name: &OsStr, status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { (**self).input_open_name(name, status) } fn input_open_primary(&mut self, status: &mut dyn StatusBackend) -> OpenResult<InputHandle> { (**self).input_open_primary(status) } fn input_open_format( &mut self, name: &OsStr, status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { (**self).input_open_format(name, status) } fn write_format( &mut self, name: &str, data: &[u8], status: &mut dyn StatusBackend, ) -> Result<()> { (**self).write_format(name, data, status) } } /// A special IoProvider that can make TeX format files. /// /// A “bundle” is expected to contain a large number of TeX support files — /// for instance, a compilation of a TeXLive distribution. In terms of the /// software architecture, though, what is special about a bundle is that one /// can generate one or more TeX format files from its contents without /// reference to any other I/O resources. pub trait Bundle: IoProvider { /// Get a cryptographic digest summarizing this bundle’s contents. /// /// The digest summarizes the exact contents of every file in the bundle. /// It is computed from the sorted names and SHA256 digests of the /// component files [as implemented in the script /// builder/make-zipfile.py](https://github.com/tectonic-typesetting/tectonic-staging/blob/master/builder/make-zipfile.py#L138) /// in the `tectonic-staging` module. /// /// The default implementation gets the digest from a file name /// `SHA256SUM`, which is expected to contain the digest in hex-encoded /// format. fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> { let digest_text = match self.input_open_name(OsStr::new(digest::DIGEST_NAME), status) { OpenResult::Ok(h) => { let mut text = String::new(); h.take(64).read_to_string(&mut text)?; text } OpenResult::NotAvailable => { // Broken or un-cacheable backend. return Err(ErrorKind::Msg( "itar-format bundle does not provide needed SHA256SUM file".to_owned(), ) .into()); } OpenResult::Err(e) => { return Err(e); } }; Ok(ctry!(DigestData::from_str(&digest_text); "corrupted SHA256 digest data")) } } impl<B: Bundle +?Sized> Bundle for Box<B> { fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> { (**self).get_digest(status) } } // Some generically helpful InputFeatures impls impl<R: Read> InputFeatures for GzDecoder<R> { fn get_size(&mut self) -> Result<usize> { Err(ErrorKind::NotSizeable.into()) } fn try_seek(&mut self, _: SeekFrom) -> Result<u64> { Err(ErrorKind::NotSeekable.into()) } } impl InputFeatures for Cursor<Vec<u8>> { fn get_size(&mut self) -> Result<usize> { Ok(self.get_ref().len()) } fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> { Ok(self.seek(pos)?) } } // Reexports pub use self::filesystem::{FilesystemIo, FilesystemPrimaryInputIo}; pub use self::memory::MemoryIo; pub use self::setup::{IoSetup, IoSetupBuilder}; pub use self::stack::IoStack; pub use self::stdstreams::GenuineStdoutIo; // Helpful. pub fn try_open_file<P: AsRef<Path>>(path: P) -> OpenResult<File> { use std::io::ErrorKind::NotFound; match File::open(path) { Ok(f) => OpenResult::Ok(f), Err(e) => { if e.kind() == NotFound { OpenResult::NotAvailable } else { OpenResult::Err(e.into()) } } } } /// Normalize a TeX path in a system independent™ way by stripping any `.`, `..`, /// or extra separators '/' so that it is of the form /// /// ```text /// path/to/my/file.txt ///../../path/to/parent/dir/file.txt /// /absolute/path/to/file.txt /// ``` /// /// Does not strip whitespace. /// /// Returns `None` if the path refers to a parent of the root. fn try_normalize_tex_path(path: &str) -> Option<String> { use std::iter::repeat; if path.is_empty() { return Some("".into()); } let mut r = Vec::new(); let mut parent_level = 0; let mut has_root = false; // TODO: We need to handle a prefix on Windows (i.e. "C:"). for (i, c) in path.split('/').enumerate() { match c { "" if i == 0 => { has_root = true; r.push(""); } "" | "." => {} ".." => { match r.pop() { // about to pop the root Some("") => return None, None => parent_level += 1, _ => {} } } _ => r.push(c), } } let r = repeat("..") .take(parent_level) .chain(r.into_iter()) // No `join` on `Iterator`. .collect::<Vec<_>>() .join("/"); if r.is_empty() { if has_root { Some("/".into()) } else { Some(".".into()) } } else { Some(r) } } /// Normalize a TeX path if possible, otherwise return the original path. /// /// _TeX path_ is a path that obeys simplified semantics: Unix-like syntax (`/` for separators, etc.), /// must be Unicode-able, no symlinks allowed such that `..` can be stripped lexically. /// /// TODO: This function should operate on `&str` someday, but we need to transition the internals /// away from `OsStr/OsString` before that can happen. fn normalize_tex_path(path: &OsStr) -> Cow<OsStr> { if let Some(t) = path .to_str() .and_then(try_normalize_tex_path) .map(OsString::from) { Cow::Owned(t) } else { Cow::Borrowed(path) } } // Helper for testing. FIXME: I want this to be conditionally compiled with // #[cfg(test)] but things break if I do that. pub mod testing { use super::*; use std::ffi::{OsStr, OsString}; use std::fs::File; use std::path::{Path, PathBuf}; pub struct SingleInputFileIo { name: OsString, full_path: PathBuf, } impl SingleInputFileIo { pub fn new(path: &Path) -> SingleInputFileIo { let p = path.to_path_buf(); SingleInputFileIo { name: p.file_name().unwrap().to_os_string(), full_path: p, } } } impl IoProvider for SingleInputFileIo { fn output_open_name(&mut self, _: &OsStr) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn input_open_name( &mut self, name: &OsStr, _status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { if name == self.name { OpenResult::Ok(InputHandle::new( name, File::open(&self.full_path).unwrap(), InputOrigin::Filesystem, )) } else { OpenResult::NotAvailable } } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_try_normalize_tex_path() { // edge cases assert_eq!(try_normalize_tex_path(""), Some("".into())); assert_eq!(try_normalize_tex_path("/"), Some("/".into())); assert_eq!(try_normalize_tex_path("//"), Some("/".into())); assert_eq!(try_normalize_tex_path("."), Some(".".into())); assert_eq!(try_normalize_tex_path("./"), Some(".".into())); assert_eq!(try_normalize_tex_path(".."), Some("..".into())); assert_eq!(try_normalize_tex_path("././/./"), Some(".".into())); assert_eq!(try_normalize_tex_path("/././/."), Some("/".into())); assert_eq!( try_normalize_tex_path("my/path/file.txt"), Some("my/path/file.txt".into()) ); // preserve spaces assert_eq!( try_normalize_tex_path(" my/pa th/file.txt "), Some(" my/pa th/file.txt ".into()) ); assert_eq!( try_normalize_tex_path("/my/path/file.txt"), Some("/my/path/file.txt".into()) ); assert_eq!( try_normalize_tex_path("./my///path/././file.txt"),
OpenResult::NotAvailable }
identifier_body
mod.rs
errors::{Error, ErrorKind, Result}; use crate::status::StatusBackend; pub mod cached_itarbundle; pub mod dirbundle; pub mod filesystem; pub mod format_cache; pub mod memory; pub mod setup; pub mod stack; pub mod stdstreams; pub mod zipbundle; pub trait InputFeatures: Read { fn get_size(&mut self) -> Result<usize>; fn try_seek(&mut self, pos: SeekFrom) -> Result<u64>; } /// What kind of source an input file ultimately came from. We keep track of /// this in order to be able to emit Makefile-style dependencies for input /// files. Right now, we only provide enough options to achieve this goal; we /// could add more. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum InputOrigin { /// This file lives on the filesystem and might change under us. (That is /// it is not a cached bundle file.) Filesystem, /// This file was never used as an input. NotInput, /// This file is none of the above. Other, } /// Input handles are basically Read objects with a few extras. We don't /// require the standard io::Seek because we need to provide a dummy /// implementation for GZip streams, which we wouldn't be allowed to do /// because both the trait and the target struct are outside of our crate. /// /// An important role for the InputHandle struct is computing a cryptographic /// digest of the input file. The driver uses this information in order to /// figure out if the TeX engine needs rerunning. TeX makes our life more /// difficult, though, since it has somewhat funky file access patterns. LaTeX /// file opens work by opening a file and immediately closing it, which tests /// whether the file exists, and then by opening it again for real. Under the /// hood, XeTeX reads a couple of bytes from each file upon open to sniff its /// encoding. So we can't just stream data from `read()` calls into the SHA2 /// computer, since we end up seeking and reading redundant data. /// /// The current system maintains some internal state that, so far, helps us Do /// The Right Thing given all this. If there's a seek on the file, we give up /// on our digest computation. But if there's a seek back to the file /// beginning, we are open to the possibility of restarting the computation. /// But if nothing is ever read from the file, we once again give up on the /// computation. The `ExecutionState` code then has further pieces that track /// access to nonexistent files, which we treat as being equivalent to an /// existing empty file for these purposes. pub struct InputHandle { name: OsString, inner: Box<dyn InputFeatures>, /// Indicates that the file cannot be written to (provided by a read-only IoProvider) and /// therefore it is useless to compute the digest. read_only: bool, digest: digest::DigestComputer, origin: InputOrigin, ever_read: bool, did_unhandled_seek: bool, } impl InputHandle { pub fn new<T:'static + InputFeatures>( name: &OsStr, inner: T, origin: InputOrigin, ) -> InputHandle { InputHandle { name: name.to_os_string(), inner: Box::new(inner), read_only: false, digest: Default::default(), origin, ever_read: false, did_unhandled_seek: false, } } pub fn new_read_only<T:'static + InputFeatures>( name: &OsStr, inner: T, origin: InputOrigin, ) -> InputHandle { InputHandle { name: name.to_os_string(), inner: Box::new(inner), read_only: true, digest: Default::default(), origin, ever_read: false, did_unhandled_seek: false, } } pub fn name(&self) -> &OsStr { self.name.as_os_str() } pub fn origin(&self) -> InputOrigin { self.origin } /// Consumes the object and returns the underlying readable handle that /// it references. pub fn into_inner(self) -> Box<dyn InputFeatures> { self.inner } /// Consumes the object and returns the SHA256 sum of the content that was /// read. No digest is returned if there was ever a seek on the input /// stream, since in that case the results will not be reliable. We also /// return None if the stream was never read, which is another common /// TeX access pattern: files are opened, immediately closed, and then /// opened again. Finally, no digest is returned if the file is marked read-only. pub fn into_name_digest(self) -> (OsString, Option<DigestData>) { if self.did_unhandled_seek ||!self.ever_read || self.read_only { (self.name, None) } else { (self.name, Some(DigestData::from(self.digest))) } } pub fn getc(&mut self) -> Result<u8> { let mut byte = [0u8; 1]; if self.read(&mut byte[..1])? == 0 { // EOF return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "EOF in getc").into()); } Ok(byte[0]) } } impl Read for InputHandle { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.ever_read = true; let n = self.inner.read(buf)?; if!self.read_only { self.digest.input(&buf[..n]); } Ok(n) } } impl InputFeatures for InputHandle { fn get_size(&mut self) -> Result<usize> { self.inner.get_size() } fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> { match pos { SeekFrom::Start(0) => { // As described above, there is a common pattern in TeX file // accesses: read a few bytes to sniff, then go back to the // beginning. We should tidy up the I/O to just buffer instead // of seeking, but in the meantime, we can handle this. self.digest = Default::default(); self.ever_read = false; } SeekFrom::Current(0) => { // Noop. This must *not* clear the ungetc buffer for our // current PDF startxref/xref parsing code to work. } _ => { self.did_unhandled_seek = true; } } let offset = self.inner.try_seek(pos)?; Ok(offset) } } pub struct OutputHandle { name: OsString, inner: Box<dyn Write>, digest: digest::DigestComputer, } impl OutputHandle { pub fn new<T:'static + Write>(name: &OsStr, inner: T) -> OutputHandle { OutputHandle { name: name.to_os_string(), inner: Box::new(inner), digest: digest::create(), } } pub fn name(&self) -> &OsStr { self.name.as_os_str() } /// Consumes the object and returns the underlying writable handle that /// it references. pub fn into_inner(self) -> Box<dyn Write> { self.inner } /// Consumes the object and returns the SHA256 sum of the content that was /// written. pub fn into_name_digest(self) -> (OsString, DigestData) { (self.name, DigestData::from(self.digest)) } } impl Write for OutputHandle { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let n = self.inner.write(buf)?; self.digest.input(&buf[..n]); Ok(n) } fn flush(&mut self) -> io::Result<()> { self.inner.flush() } } // An Io provider is a source of handles. One wrinkle is that it's good to be // able to distinguish between unavailability of a given name and error // accessing it. We take file paths as OsStrs, although since we parse input // files as Unicode it may not be possible to actually express zany // non-Unicode Unix paths inside the engine. #[derive(Debug)] pub enum OpenResult<T> { Ok(T), NotAvailable, Err(Error), } impl<T> OpenResult<T> { pub fn unwrap(self) -> T { match self { OpenResult::Ok(t) => t, _ => panic!("expected an open file"), } } /// Returns true if this result is of the NotAvailable variant. pub fn is_not_available(&self) -> bool { if let OpenResult::NotAvailable = *self { true } else { false } } /// Convert this object into a plain Result, erroring if the item was not available. pub fn must_exist(self) -> Result<T> { match self { OpenResult::Ok(t) => Ok(t), OpenResult::Err(e) => Err(e), OpenResult::NotAvailable => { Err(io::Error::new(io::ErrorKind::NotFound, "not found").into()) } } } } /// A hack to allow casting of Bundles to IoProviders. /// /// The code that sets up the I/O stack is handed a reference to a Bundle /// trait object. For the actual I/O, it needs to convert this to an /// IoProvider trait object. [According to /// StackExchange](https://stackoverflow.com/a/28664881/3760486), the /// following pattern is the least-bad way to achieve the necessary upcasting. pub trait AsIoProviderMut { /// Represent this value as an IoProvider trait object. fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider; } impl<T: IoProvider> AsIoProviderMut for T { fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider { self } } /// A trait for types that can read or write files needed by the TeX engine. pub trait IoProvider: AsIoProviderMut { fn output_open_name(&mut self, _name: &OsStr) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn input_open_name( &mut self, _name: &OsStr, _status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { OpenResult::NotAvailable } /// Open the "primary" input file, which in the context of TeX is the main /// input that it's given. When the build is being done using the /// filesystem and the input is a file on the filesystem, this function /// isn't necesssarily that important, but those conditions don't always /// hold. fn input_open_primary(&mut self, _status: &mut dyn StatusBackend) -> OpenResult<InputHandle> { OpenResult::NotAvailable } /// Open a format file with the specified name. Format files have a /// specialized entry point because IOProviders may wish to handle them /// specially: namely, to munge the filename to one that includes the /// current version of the Tectonic engine, since the format contents /// depend sensitively on the engine internals. fn input_open_format( &mut self, name: &OsStr, status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { self.input_open_name(name, status) } /// Save an a format dump in some way that this provider may be able to /// recover in the future. This awkward interface is needed for to write /// formats with their special munged file names. fn write_format( &mut self, _name: &str, _data: &[u8], _status: &mut dyn StatusBackend, ) -> Result<()> { Err(ErrorKind::Msg("this I/O layer cannot save format files".to_owned()).into()) } } impl<P: IoProvider +?Sized> IoProvider for Box<P> { fn output_open_name(&mut self, name: &OsStr) -> OpenResult<OutputHandle> { (**self).output_open_name(name) } fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> { (**self).output_open_stdout() } fn input_open_name( &mut self, name: &OsStr, status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { (**self).input_open_name(name, status) } fn input_open_primary(&mut self, status: &mut dyn StatusBackend) -> OpenResult<InputHandle> { (**self).input_open_primary(status) } fn input_open_format( &mut self, name: &OsStr, status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { (**self).input_open_format(name, status) } fn write_format( &mut self, name: &str, data: &[u8], status: &mut dyn StatusBackend, ) -> Result<()> { (**self).write_format(name, data, status) } } /// A special IoProvider that can make TeX format files. /// /// A “bundle” is expected to contain a large number of TeX support files — /// for instance, a compilation of a TeXLive distribution. In terms of the /// software architecture, though, what is special about a bundle is that one /// can generate one or more TeX format files from its contents without /// reference to any other I/O resources. pub trait Bundle: IoProvider { /// Get a cryptographic digest summarizing this bundle’s contents. /// /// The digest summarizes the exact contents of every file in the bundle. /// It is computed from the sorted names and SHA256 digests of the /// component files [as implemented in the script /// builder/make-zipfile.py](https://github.com/tectonic-typesetting/tectonic-staging/blob/master/builder/make-zipfile.py#L138) /// in the `tectonic-staging` module. /// /// The default implementation gets the digest from a file name /// `SHA256SUM`, which is expected to contain the digest in hex-encoded /// format. fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> { let digest_text = match self.input_open_name(OsStr::new(digest::DIGEST_NAME), status) { OpenResult::Ok(h) => { let mut text = String::new(); h.take(64).read_to_string(&mut text)?; text } OpenResult::NotAvailable => { // Broken or un-cacheable backend. return Err(ErrorKind::Msg( "itar-format bundle does not provide needed SHA256SUM file".to_owned(), ) .into()); } OpenResult::Err(e) => { return Err(e); } }; Ok(ctry!(DigestData::from_str(&digest_text); "corrupted SHA256 digest data")) } } impl<B: Bundle +?Sized> Bundle for Box<B> { fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> { (**self).get_digest(status) } } // Some generically helpful InputFeatures impls impl<R: Read> InputFeatures for GzDecoder<R> { fn get_size(&mut self) -> Result<usize> { Err(ErrorKind::NotSizeable.into()) } fn try_seek(&mut self, _: SeekFrom) -> Result<u64> { Err(ErrorKind::NotSeekable.into()) } } impl InputFeatures for Cursor<Vec<u8>> { fn get_size(&mut self) -> Result<usize> { Ok(self.get_ref().len()) } fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> { Ok(self.seek(pos)?) } } // Reexports pub use self::filesystem::{FilesystemIo, FilesystemPrimaryInputIo}; pub use self::memory::MemoryIo; pub use self::setup::{IoSetup, IoSetupBuilder}; pub use self::stack::IoStack; pub use self::stdstreams::GenuineStdoutIo; // Helpful. pub fn try_open_file<P: AsRef<Path>>(path: P) -> OpenResult<File> { use std::io::ErrorKind::NotFound; match File::open(path) { Ok(f) => OpenResult::Ok(f), Err(e) => { if e.kind() == NotFound { OpenResult::NotAvailable } else { OpenResult::Err(e.into()) } } } } /// Normalize a TeX path in a system independent™ way by stripping any `.`, `..`, /// or extra separators '/' so that it is of the form /// /// ```text /// path/to/my/file.txt ///../../path/to/parent/dir/file.txt /// /absolute/path/to/file.txt /// ``` /// /// Does not strip whitespace. /// /// Returns `None` if the path refers to a parent of the root. fn try_normalize_tex_path(path: &str) -> Option<String> { use std::iter::repeat; if path.is_empty() { return Some("".into()); } let mut r = Vec::new(); let mut parent_level = 0; let mut has_root = false; // TODO: We need to handle a prefix on Windows (i.e. "C:"). for (i, c) in path.split('/').enumerate() { match c { "" if i == 0 => { has_root = true; r.push(""); } "" | "." => {} ".." => { match r.pop() { // about to pop the root Some("") => return None, None => parent_level += 1,
_ => r.push(c), } } let r = repeat("..") .take(parent_level) .chain(r.into_iter()) // No `join` on `Iterator`. .collect::<Vec<_>>() .join("/"); if r.is_empty() { if has_root { Some("/".into()) } else { Some(".".into()) } } else { Some(r) } } /// Normalize a TeX path if possible, otherwise return the original path. /// /// _TeX path_ is a path that obeys simplified semantics: Unix-like syntax (`/` for separators, etc.), /// must be Unicode-able, no symlinks allowed such that `..` can be stripped lexically. /// /// TODO: This function should operate on `&str` someday, but we need to transition the internals /// away from `OsStr/OsString` before that can happen. fn normalize_tex_path(path: &OsStr) -> Cow<OsStr> { if let Some(t) = path .to_str() .and_then(try_normalize_tex_path) .map(OsString::from) { Cow::Owned(t) } else { Cow::Borrowed(path) } } // Helper for testing. FIXME: I want this to be conditionally compiled with // #[cfg(test)] but things break if I do that. pub mod testing { use super::*; use std::ffi::{OsStr, OsString}; use std::fs::File; use std::path::{Path, PathBuf}; pub struct SingleInputFileIo { name: OsString, full_path: PathBuf, } impl SingleInputFileIo { pub fn new(path: &Path) -> SingleInputFileIo { let p = path.to_path_buf(); SingleInputFileIo { name: p.file_name().unwrap().to_os_string(), full_path: p, } } } impl IoProvider for SingleInputFileIo { fn output_open_name(&mut self, _: &OsStr) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn input_open_name( &mut self, name: &OsStr, _status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { if name == self.name { OpenResult::Ok(InputHandle::new( name, File::open(&self.full_path).unwrap(), InputOrigin::Filesystem, )) } else { OpenResult::NotAvailable } } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_try_normalize_tex_path() { // edge cases assert_eq!(try_normalize_tex_path(""), Some("".into())); assert_eq!(try_normalize_tex_path("/"), Some("/".into())); assert_eq!(try_normalize_tex_path("//"), Some("/".into())); assert_eq!(try_normalize_tex_path("."), Some(".".into())); assert_eq!(try_normalize_tex_path("./"), Some(".".into())); assert_eq!(try_normalize_tex_path(".."), Some("..".into())); assert_eq!(try_normalize_tex_path("././/./"), Some(".".into())); assert_eq!(try_normalize_tex_path("/././/."), Some("/".into())); assert_eq!( try_normalize_tex_path("my/path/file.txt"), Some("my/path/file.txt".into()) ); // preserve spaces assert_eq!( try_normalize_tex_path(" my/pa th/file.txt "), Some(" my/pa th/file.txt ".into()) ); assert_eq!( try_normalize_tex_path("/my/path/file.txt"), Some("/my/path/file.txt".into()) ); assert_eq!( try_normalize_tex_path("./my///path/././file.txt"),
_ => {} } }
random_line_split
mod.rs
::{Error, ErrorKind, Result}; use crate::status::StatusBackend; pub mod cached_itarbundle; pub mod dirbundle; pub mod filesystem; pub mod format_cache; pub mod memory; pub mod setup; pub mod stack; pub mod stdstreams; pub mod zipbundle; pub trait InputFeatures: Read { fn get_size(&mut self) -> Result<usize>; fn try_seek(&mut self, pos: SeekFrom) -> Result<u64>; } /// What kind of source an input file ultimately came from. We keep track of /// this in order to be able to emit Makefile-style dependencies for input /// files. Right now, we only provide enough options to achieve this goal; we /// could add more. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum InputOrigin { /// This file lives on the filesystem and might change under us. (That is /// it is not a cached bundle file.) Filesystem, /// This file was never used as an input. NotInput, /// This file is none of the above. Other, } /// Input handles are basically Read objects with a few extras. We don't /// require the standard io::Seek because we need to provide a dummy /// implementation for GZip streams, which we wouldn't be allowed to do /// because both the trait and the target struct are outside of our crate. /// /// An important role for the InputHandle struct is computing a cryptographic /// digest of the input file. The driver uses this information in order to /// figure out if the TeX engine needs rerunning. TeX makes our life more /// difficult, though, since it has somewhat funky file access patterns. LaTeX /// file opens work by opening a file and immediately closing it, which tests /// whether the file exists, and then by opening it again for real. Under the /// hood, XeTeX reads a couple of bytes from each file upon open to sniff its /// encoding. So we can't just stream data from `read()` calls into the SHA2 /// computer, since we end up seeking and reading redundant data. /// /// The current system maintains some internal state that, so far, helps us Do /// The Right Thing given all this. If there's a seek on the file, we give up /// on our digest computation. But if there's a seek back to the file /// beginning, we are open to the possibility of restarting the computation. /// But if nothing is ever read from the file, we once again give up on the /// computation. The `ExecutionState` code then has further pieces that track /// access to nonexistent files, which we treat as being equivalent to an /// existing empty file for these purposes. pub struct InputHandle { name: OsString, inner: Box<dyn InputFeatures>, /// Indicates that the file cannot be written to (provided by a read-only IoProvider) and /// therefore it is useless to compute the digest. read_only: bool, digest: digest::DigestComputer, origin: InputOrigin, ever_read: bool, did_unhandled_seek: bool, } impl InputHandle { pub fn new<T:'static + InputFeatures>( name: &OsStr, inner: T, origin: InputOrigin, ) -> InputHandle { InputHandle { name: name.to_os_string(), inner: Box::new(inner), read_only: false, digest: Default::default(), origin, ever_read: false, did_unhandled_seek: false, } } pub fn new_read_only<T:'static + InputFeatures>( name: &OsStr, inner: T, origin: InputOrigin, ) -> InputHandle { InputHandle { name: name.to_os_string(), inner: Box::new(inner), read_only: true, digest: Default::default(), origin, ever_read: false, did_unhandled_seek: false, } } pub fn name(&self) -> &OsStr { self.name.as_os_str() } pub fn origin(&self) -> InputOrigin { self.origin } /// Consumes the object and returns the underlying readable handle that /// it references. pub fn into_inner(self) -> Box<dyn InputFeatures> { self.inner } /// Consumes the object and returns the SHA256 sum of the content that was /// read. No digest is returned if there was ever a seek on the input /// stream, since in that case the results will not be reliable. We also /// return None if the stream was never read, which is another common /// TeX access pattern: files are opened, immediately closed, and then /// opened again. Finally, no digest is returned if the file is marked read-only. pub fn into_name_digest(self) -> (OsString, Option<DigestData>) { if self.did_unhandled_seek ||!self.ever_read || self.read_only { (self.name, None) } else { (self.name, Some(DigestData::from(self.digest))) } } pub fn getc(&mut self) -> Result<u8> { let mut byte = [0u8; 1]; if self.read(&mut byte[..1])? == 0 { // EOF return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "EOF in getc").into()); } Ok(byte[0]) } } impl Read for InputHandle { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.ever_read = true; let n = self.inner.read(buf)?; if!self.read_only { self.digest.input(&buf[..n]); } Ok(n) } } impl InputFeatures for InputHandle { fn get_size(&mut self) -> Result<usize> { self.inner.get_size() } fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> { match pos { SeekFrom::Start(0) => { // As described above, there is a common pattern in TeX file // accesses: read a few bytes to sniff, then go back to the // beginning. We should tidy up the I/O to just buffer instead // of seeking, but in the meantime, we can handle this. self.digest = Default::default(); self.ever_read = false; } SeekFrom::Current(0) => { // Noop. This must *not* clear the ungetc buffer for our // current PDF startxref/xref parsing code to work. } _ => { self.did_unhandled_seek = true; } } let offset = self.inner.try_seek(pos)?; Ok(offset) } } pub struct OutputHandle { name: OsString, inner: Box<dyn Write>, digest: digest::DigestComputer, } impl OutputHandle { pub fn new<T:'static + Write>(name: &OsStr, inner: T) -> OutputHandle { OutputHandle { name: name.to_os_string(), inner: Box::new(inner), digest: digest::create(), } } pub fn name(&self) -> &OsStr { self.name.as_os_str() } /// Consumes the object and returns the underlying writable handle that /// it references. pub fn into_inner(self) -> Box<dyn Write> { self.inner } /// Consumes the object and returns the SHA256 sum of the content that was /// written. pub fn into_name_digest(self) -> (OsString, DigestData) { (self.name, DigestData::from(self.digest)) } } impl Write for OutputHandle { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let n = self.inner.write(buf)?; self.digest.input(&buf[..n]); Ok(n) } fn flush(&mut self) -> io::Result<()> { self.inner.flush() } } // An Io provider is a source of handles. One wrinkle is that it's good to be // able to distinguish between unavailability of a given name and error // accessing it. We take file paths as OsStrs, although since we parse input // files as Unicode it may not be possible to actually express zany // non-Unicode Unix paths inside the engine. #[derive(Debug)] pub enum OpenResult<T> { Ok(T), NotAvailable, Err(Error), } impl<T> OpenResult<T> { pub fn unwrap(self) -> T { match self { OpenResult::Ok(t) => t, _ => panic!("expected an open file"), } } /// Returns true if this result is of the NotAvailable variant. pub fn is_not_available(&self) -> bool { if let OpenResult::NotAvailable = *self { true } else { false } } /// Convert this object into a plain Result, erroring if the item was not available. pub fn must_exist(self) -> Result<T> { match self { OpenResult::Ok(t) => Ok(t), OpenResult::Err(e) => Err(e), OpenResult::NotAvailable => { Err(io::Error::new(io::ErrorKind::NotFound, "not found").into()) } } } } /// A hack to allow casting of Bundles to IoProviders. /// /// The code that sets up the I/O stack is handed a reference to a Bundle /// trait object. For the actual I/O, it needs to convert this to an /// IoProvider trait object. [According to /// StackExchange](https://stackoverflow.com/a/28664881/3760486), the /// following pattern is the least-bad way to achieve the necessary upcasting. pub trait AsIoProviderMut { /// Represent this value as an IoProvider trait object. fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider; } impl<T: IoProvider> AsIoProviderMut for T { fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider { self } } /// A trait for types that can read or write files needed by the TeX engine. pub trait IoProvider: AsIoProviderMut { fn output_open_name(&mut self, _name: &OsStr) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn input_open_name( &mut self, _name: &OsStr, _status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { OpenResult::NotAvailable } /// Open the "primary" input file, which in the context of TeX is the main /// input that it's given. When the build is being done using the /// filesystem and the input is a file on the filesystem, this function /// isn't necesssarily that important, but those conditions don't always /// hold. fn input_open_primary(&mut self, _status: &mut dyn StatusBackend) -> OpenResult<InputHandle> { OpenResult::NotAvailable } /// Open a format file with the specified name. Format files have a /// specialized entry point because IOProviders may wish to handle them /// specially: namely, to munge the filename to one that includes the /// current version of the Tectonic engine, since the format contents /// depend sensitively on the engine internals. fn input_open_format( &mut self, name: &OsStr, status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { self.input_open_name(name, status) } /// Save an a format dump in some way that this provider may be able to /// recover in the future. This awkward interface is needed for to write /// formats with their special munged file names. fn write_format( &mut self, _name: &str, _data: &[u8], _status: &mut dyn StatusBackend, ) -> Result<()> { Err(ErrorKind::Msg("this I/O layer cannot save format files".to_owned()).into()) } } impl<P: IoProvider +?Sized> IoProvider for Box<P> { fn output_open_name(&mut self, name: &OsStr) -> OpenResult<OutputHandle> { (**self).output_open_name(name) } fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> { (**self).output_open_stdout() } fn input_open_name( &mut self, name: &OsStr, status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { (**self).input_open_name(name, status) } fn input_open_primary(&mut self, status: &mut dyn StatusBackend) -> OpenResult<InputHandle> { (**self).input_open_primary(status) } fn input_open_format( &mut self, name: &OsStr, status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { (**self).input_open_format(name, status) } fn write_format( &mut self, name: &str, data: &[u8], status: &mut dyn StatusBackend, ) -> Result<()> { (**self).write_format(name, data, status) } } /// A special IoProvider that can make TeX format files. /// /// A “bundle” is expected to contain a large number of TeX support files — /// for instance, a compilation of a TeXLive distribution. In terms of the /// software architecture, though, what is special about a bundle is that one /// can generate one or more TeX format files from its contents without /// reference to any other I/O resources. pub trait Bundle: IoProvider { /// Get a cryptographic digest summarizing this bundle’s contents. /// /// The digest summarizes the exact contents of every file in the bundle. /// It is computed from the sorted names and SHA256 digests of the /// component files [as implemented in the script /// builder/make-zipfile.py](https://github.com/tectonic-typesetting/tectonic-staging/blob/master/builder/make-zipfile.py#L138) /// in the `tectonic-staging` module. /// /// The default implementation gets the digest from a file name /// `SHA256SUM`, which is expected to contain the digest in hex-encoded /// format. fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> { let digest_text = match self.input_open_name(OsStr::new(digest::DIGEST_NAME), status) { OpenResult::Ok(h) => { let mut text = String::new(); h.take(64).read_to_string(&mut text)?; text } OpenResult::NotAvailable => { // Broken or un-cacheable backend. return Err(ErrorKind::Msg( "itar-format bundle does not provide needed SHA256SUM file".to_owned(), ) .into()); } OpenResult::Err(e) => { return Err(e); } }; Ok(ctry!(DigestData::from_str(&digest_text); "corrupted SHA256 digest data")) } } impl<B: Bundle +?Sized> Bundle for Box<B> { fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> { (**self).get_digest(status) } } // Some generically helpful InputFeatures impls impl<R: Read> InputFeatures for GzDecoder<R> { fn get_size(&mut self) -> Result<usize> { Err(ErrorKind::NotSizeable.into()) } fn try_seek(&mut self, _: SeekFrom) -> Result<u64> { Err(ErrorKind::NotSeekable.into()) } } impl InputFeatures for Cursor<Vec<u8>> { fn get_size(&mut self) -> Result<usize> { Ok(self.get_ref().len()) } fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> { Ok(self.seek(pos)?) } } // Reexports pub use self::filesystem::{FilesystemIo, FilesystemPrimaryInputIo}; pub use self::memory::MemoryIo; pub use self::setup::{IoSetup, IoSetupBuilder}; pub use self::stack::IoStack; pub use self::stdstreams::GenuineStdoutIo; // Helpful. pub fn try_open_file<P: AsRef<Path>>(path: P) -> OpenResult<File> { use std::io::ErrorKind::NotFound; match File::open(path) { Ok(f) => OpenResult::Ok(f), Err(e) => { if e.kind() == NotFound { OpenResult::NotAvailable } else {
} } /// Normalize a TeX path in a system independent™ way by stripping any `.`, `..`, /// or extra separators '/' so that it is of the form /// /// ```text /// path/to/my/file.txt ///../../path/to/parent/dir/file.txt /// /absolute/path/to/file.txt /// ``` /// /// Does not strip whitespace. /// /// Returns `None` if the path refers to a parent of the root. fn try_normalize_tex_path(path: &str) -> Option<String> { use std::iter::repeat; if path.is_empty() { return Some("".into()); } let mut r = Vec::new(); let mut parent_level = 0; let mut has_root = false; // TODO: We need to handle a prefix on Windows (i.e. "C:"). for (i, c) in path.split('/').enumerate() { match c { "" if i == 0 => { has_root = true; r.push(""); } "" | "." => {} ".." => { match r.pop() { // about to pop the root Some("") => return None, None => parent_level += 1, _ => {} } } _ => r.push(c), } } let r = repeat("..") .take(parent_level) .chain(r.into_iter()) // No `join` on `Iterator`. .collect::<Vec<_>>() .join("/"); if r.is_empty() { if has_root { Some("/".into()) } else { Some(".".into()) } } else { Some(r) } } /// Normalize a TeX path if possible, otherwise return the original path. /// /// _TeX path_ is a path that obeys simplified semantics: Unix-like syntax (`/` for separators, etc.), /// must be Unicode-able, no symlinks allowed such that `..` can be stripped lexically. /// /// TODO: This function should operate on `&str` someday, but we need to transition the internals /// away from `OsStr/OsString` before that can happen. fn normalize_tex_path(path: &OsStr) -> Cow<OsStr> { if let Some(t) = path .to_str() .and_then(try_normalize_tex_path) .map(OsString::from) { Cow::Owned(t) } else { Cow::Borrowed(path) } } // Helper for testing. FIXME: I want this to be conditionally compiled with // #[cfg(test)] but things break if I do that. pub mod testing { use super::*; use std::ffi::{OsStr, OsString}; use std::fs::File; use std::path::{Path, PathBuf}; pub struct SingleInputFileIo { name: OsString, full_path: PathBuf, } impl SingleInputFileIo { pub fn new(path: &Path) -> SingleInputFileIo { let p = path.to_path_buf(); SingleInputFileIo { name: p.file_name().unwrap().to_os_string(), full_path: p, } } } impl IoProvider for SingleInputFileIo { fn output_open_name(&mut self, _: &OsStr) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn input_open_name( &mut self, name: &OsStr, _status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { if name == self.name { OpenResult::Ok(InputHandle::new( name, File::open(&self.full_path).unwrap(), InputOrigin::Filesystem, )) } else { OpenResult::NotAvailable } } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_try_normalize_tex_path() { // edge cases assert_eq!(try_normalize_tex_path(""), Some("".into())); assert_eq!(try_normalize_tex_path("/"), Some("/".into())); assert_eq!(try_normalize_tex_path("//"), Some("/".into())); assert_eq!(try_normalize_tex_path("."), Some(".".into())); assert_eq!(try_normalize_tex_path("./"), Some(".".into())); assert_eq!(try_normalize_tex_path(".."), Some("..".into())); assert_eq!(try_normalize_tex_path("././/./"), Some(".".into())); assert_eq!(try_normalize_tex_path("/././/."), Some("/".into())); assert_eq!( try_normalize_tex_path("my/path/file.txt"), Some("my/path/file.txt".into()) ); // preserve spaces assert_eq!( try_normalize_tex_path(" my/pa th/file.txt "), Some(" my/pa th/file.txt ".into()) ); assert_eq!( try_normalize_tex_path("/my/path/file.txt"), Some("/my/path/file.txt".into()) ); assert_eq!( try_normalize_tex_path("./my///path/././file.txt"),
OpenResult::Err(e.into()) } }
conditional_block
mod.rs
::{Error, ErrorKind, Result}; use crate::status::StatusBackend; pub mod cached_itarbundle; pub mod dirbundle; pub mod filesystem; pub mod format_cache; pub mod memory; pub mod setup; pub mod stack; pub mod stdstreams; pub mod zipbundle; pub trait InputFeatures: Read { fn get_size(&mut self) -> Result<usize>; fn try_seek(&mut self, pos: SeekFrom) -> Result<u64>; } /// What kind of source an input file ultimately came from. We keep track of /// this in order to be able to emit Makefile-style dependencies for input /// files. Right now, we only provide enough options to achieve this goal; we /// could add more. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum InputOrigin { /// This file lives on the filesystem and might change under us. (That is /// it is not a cached bundle file.) Filesystem, /// This file was never used as an input. NotInput, /// This file is none of the above. Other, } /// Input handles are basically Read objects with a few extras. We don't /// require the standard io::Seek because we need to provide a dummy /// implementation for GZip streams, which we wouldn't be allowed to do /// because both the trait and the target struct are outside of our crate. /// /// An important role for the InputHandle struct is computing a cryptographic /// digest of the input file. The driver uses this information in order to /// figure out if the TeX engine needs rerunning. TeX makes our life more /// difficult, though, since it has somewhat funky file access patterns. LaTeX /// file opens work by opening a file and immediately closing it, which tests /// whether the file exists, and then by opening it again for real. Under the /// hood, XeTeX reads a couple of bytes from each file upon open to sniff its /// encoding. So we can't just stream data from `read()` calls into the SHA2 /// computer, since we end up seeking and reading redundant data. /// /// The current system maintains some internal state that, so far, helps us Do /// The Right Thing given all this. If there's a seek on the file, we give up /// on our digest computation. But if there's a seek back to the file /// beginning, we are open to the possibility of restarting the computation. /// But if nothing is ever read from the file, we once again give up on the /// computation. The `ExecutionState` code then has further pieces that track /// access to nonexistent files, which we treat as being equivalent to an /// existing empty file for these purposes. pub struct InputHandle { name: OsString, inner: Box<dyn InputFeatures>, /// Indicates that the file cannot be written to (provided by a read-only IoProvider) and /// therefore it is useless to compute the digest. read_only: bool, digest: digest::DigestComputer, origin: InputOrigin, ever_read: bool, did_unhandled_seek: bool, } impl InputHandle { pub fn new<T:'static + InputFeatures>( name: &OsStr, inner: T, origin: InputOrigin, ) -> InputHandle { InputHandle { name: name.to_os_string(), inner: Box::new(inner), read_only: false, digest: Default::default(), origin, ever_read: false, did_unhandled_seek: false, } } pub fn new_read_only<T:'static + InputFeatures>( name: &OsStr, inner: T, origin: InputOrigin, ) -> InputHandle { InputHandle { name: name.to_os_string(), inner: Box::new(inner), read_only: true, digest: Default::default(), origin, ever_read: false, did_unhandled_seek: false, } } pub fn name(&self) -> &OsStr { self.name.as_os_str() } pub fn origin(&self) -> InputOrigin { self.origin } /// Consumes the object and returns the underlying readable handle that /// it references. pub fn into_inner(self) -> Box<dyn InputFeatures> { self.inner } /// Consumes the object and returns the SHA256 sum of the content that was /// read. No digest is returned if there was ever a seek on the input /// stream, since in that case the results will not be reliable. We also /// return None if the stream was never read, which is another common /// TeX access pattern: files are opened, immediately closed, and then /// opened again. Finally, no digest is returned if the file is marked read-only. pub fn into_name_digest(self) -> (OsString, Option<DigestData>) { if self.did_unhandled_seek ||!self.ever_read || self.read_only { (self.name, None) } else { (self.name, Some(DigestData::from(self.digest))) } } pub fn getc(&mut self) -> Result<u8> { let mut byte = [0u8; 1]; if self.read(&mut byte[..1])? == 0 { // EOF return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "EOF in getc").into()); } Ok(byte[0]) } } impl Read for InputHandle { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.ever_read = true; let n = self.inner.read(buf)?; if!self.read_only { self.digest.input(&buf[..n]); } Ok(n) } } impl InputFeatures for InputHandle { fn get_size(&mut self) -> Result<usize> { self.inner.get_size() } fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> { match pos { SeekFrom::Start(0) => { // As described above, there is a common pattern in TeX file // accesses: read a few bytes to sniff, then go back to the // beginning. We should tidy up the I/O to just buffer instead // of seeking, but in the meantime, we can handle this. self.digest = Default::default(); self.ever_read = false; } SeekFrom::Current(0) => { // Noop. This must *not* clear the ungetc buffer for our // current PDF startxref/xref parsing code to work. } _ => { self.did_unhandled_seek = true; } } let offset = self.inner.try_seek(pos)?; Ok(offset) } } pub struct OutputHandle { name: OsString, inner: Box<dyn Write>, digest: digest::DigestComputer, } impl OutputHandle { pub fn ne
:'static + Write>(name: &OsStr, inner: T) -> OutputHandle { OutputHandle { name: name.to_os_string(), inner: Box::new(inner), digest: digest::create(), } } pub fn name(&self) -> &OsStr { self.name.as_os_str() } /// Consumes the object and returns the underlying writable handle that /// it references. pub fn into_inner(self) -> Box<dyn Write> { self.inner } /// Consumes the object and returns the SHA256 sum of the content that was /// written. pub fn into_name_digest(self) -> (OsString, DigestData) { (self.name, DigestData::from(self.digest)) } } impl Write for OutputHandle { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let n = self.inner.write(buf)?; self.digest.input(&buf[..n]); Ok(n) } fn flush(&mut self) -> io::Result<()> { self.inner.flush() } } // An Io provider is a source of handles. One wrinkle is that it's good to be // able to distinguish between unavailability of a given name and error // accessing it. We take file paths as OsStrs, although since we parse input // files as Unicode it may not be possible to actually express zany // non-Unicode Unix paths inside the engine. #[derive(Debug)] pub enum OpenResult<T> { Ok(T), NotAvailable, Err(Error), } impl<T> OpenResult<T> { pub fn unwrap(self) -> T { match self { OpenResult::Ok(t) => t, _ => panic!("expected an open file"), } } /// Returns true if this result is of the NotAvailable variant. pub fn is_not_available(&self) -> bool { if let OpenResult::NotAvailable = *self { true } else { false } } /// Convert this object into a plain Result, erroring if the item was not available. pub fn must_exist(self) -> Result<T> { match self { OpenResult::Ok(t) => Ok(t), OpenResult::Err(e) => Err(e), OpenResult::NotAvailable => { Err(io::Error::new(io::ErrorKind::NotFound, "not found").into()) } } } } /// A hack to allow casting of Bundles to IoProviders. /// /// The code that sets up the I/O stack is handed a reference to a Bundle /// trait object. For the actual I/O, it needs to convert this to an /// IoProvider trait object. [According to /// StackExchange](https://stackoverflow.com/a/28664881/3760486), the /// following pattern is the least-bad way to achieve the necessary upcasting. pub trait AsIoProviderMut { /// Represent this value as an IoProvider trait object. fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider; } impl<T: IoProvider> AsIoProviderMut for T { fn as_ioprovider_mut(&mut self) -> &mut dyn IoProvider { self } } /// A trait for types that can read or write files needed by the TeX engine. pub trait IoProvider: AsIoProviderMut { fn output_open_name(&mut self, _name: &OsStr) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn input_open_name( &mut self, _name: &OsStr, _status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { OpenResult::NotAvailable } /// Open the "primary" input file, which in the context of TeX is the main /// input that it's given. When the build is being done using the /// filesystem and the input is a file on the filesystem, this function /// isn't necesssarily that important, but those conditions don't always /// hold. fn input_open_primary(&mut self, _status: &mut dyn StatusBackend) -> OpenResult<InputHandle> { OpenResult::NotAvailable } /// Open a format file with the specified name. Format files have a /// specialized entry point because IOProviders may wish to handle them /// specially: namely, to munge the filename to one that includes the /// current version of the Tectonic engine, since the format contents /// depend sensitively on the engine internals. fn input_open_format( &mut self, name: &OsStr, status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { self.input_open_name(name, status) } /// Save an a format dump in some way that this provider may be able to /// recover in the future. This awkward interface is needed for to write /// formats with their special munged file names. fn write_format( &mut self, _name: &str, _data: &[u8], _status: &mut dyn StatusBackend, ) -> Result<()> { Err(ErrorKind::Msg("this I/O layer cannot save format files".to_owned()).into()) } } impl<P: IoProvider +?Sized> IoProvider for Box<P> { fn output_open_name(&mut self, name: &OsStr) -> OpenResult<OutputHandle> { (**self).output_open_name(name) } fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> { (**self).output_open_stdout() } fn input_open_name( &mut self, name: &OsStr, status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { (**self).input_open_name(name, status) } fn input_open_primary(&mut self, status: &mut dyn StatusBackend) -> OpenResult<InputHandle> { (**self).input_open_primary(status) } fn input_open_format( &mut self, name: &OsStr, status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { (**self).input_open_format(name, status) } fn write_format( &mut self, name: &str, data: &[u8], status: &mut dyn StatusBackend, ) -> Result<()> { (**self).write_format(name, data, status) } } /// A special IoProvider that can make TeX format files. /// /// A “bundle” is expected to contain a large number of TeX support files — /// for instance, a compilation of a TeXLive distribution. In terms of the /// software architecture, though, what is special about a bundle is that one /// can generate one or more TeX format files from its contents without /// reference to any other I/O resources. pub trait Bundle: IoProvider { /// Get a cryptographic digest summarizing this bundle’s contents. /// /// The digest summarizes the exact contents of every file in the bundle. /// It is computed from the sorted names and SHA256 digests of the /// component files [as implemented in the script /// builder/make-zipfile.py](https://github.com/tectonic-typesetting/tectonic-staging/blob/master/builder/make-zipfile.py#L138) /// in the `tectonic-staging` module. /// /// The default implementation gets the digest from a file name /// `SHA256SUM`, which is expected to contain the digest in hex-encoded /// format. fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> { let digest_text = match self.input_open_name(OsStr::new(digest::DIGEST_NAME), status) { OpenResult::Ok(h) => { let mut text = String::new(); h.take(64).read_to_string(&mut text)?; text } OpenResult::NotAvailable => { // Broken or un-cacheable backend. return Err(ErrorKind::Msg( "itar-format bundle does not provide needed SHA256SUM file".to_owned(), ) .into()); } OpenResult::Err(e) => { return Err(e); } }; Ok(ctry!(DigestData::from_str(&digest_text); "corrupted SHA256 digest data")) } } impl<B: Bundle +?Sized> Bundle for Box<B> { fn get_digest(&mut self, status: &mut dyn StatusBackend) -> Result<DigestData> { (**self).get_digest(status) } } // Some generically helpful InputFeatures impls impl<R: Read> InputFeatures for GzDecoder<R> { fn get_size(&mut self) -> Result<usize> { Err(ErrorKind::NotSizeable.into()) } fn try_seek(&mut self, _: SeekFrom) -> Result<u64> { Err(ErrorKind::NotSeekable.into()) } } impl InputFeatures for Cursor<Vec<u8>> { fn get_size(&mut self) -> Result<usize> { Ok(self.get_ref().len()) } fn try_seek(&mut self, pos: SeekFrom) -> Result<u64> { Ok(self.seek(pos)?) } } // Reexports pub use self::filesystem::{FilesystemIo, FilesystemPrimaryInputIo}; pub use self::memory::MemoryIo; pub use self::setup::{IoSetup, IoSetupBuilder}; pub use self::stack::IoStack; pub use self::stdstreams::GenuineStdoutIo; // Helpful. pub fn try_open_file<P: AsRef<Path>>(path: P) -> OpenResult<File> { use std::io::ErrorKind::NotFound; match File::open(path) { Ok(f) => OpenResult::Ok(f), Err(e) => { if e.kind() == NotFound { OpenResult::NotAvailable } else { OpenResult::Err(e.into()) } } } } /// Normalize a TeX path in a system independent™ way by stripping any `.`, `..`, /// or extra separators '/' so that it is of the form /// /// ```text /// path/to/my/file.txt ///../../path/to/parent/dir/file.txt /// /absolute/path/to/file.txt /// ``` /// /// Does not strip whitespace. /// /// Returns `None` if the path refers to a parent of the root. fn try_normalize_tex_path(path: &str) -> Option<String> { use std::iter::repeat; if path.is_empty() { return Some("".into()); } let mut r = Vec::new(); let mut parent_level = 0; let mut has_root = false; // TODO: We need to handle a prefix on Windows (i.e. "C:"). for (i, c) in path.split('/').enumerate() { match c { "" if i == 0 => { has_root = true; r.push(""); } "" | "." => {} ".." => { match r.pop() { // about to pop the root Some("") => return None, None => parent_level += 1, _ => {} } } _ => r.push(c), } } let r = repeat("..") .take(parent_level) .chain(r.into_iter()) // No `join` on `Iterator`. .collect::<Vec<_>>() .join("/"); if r.is_empty() { if has_root { Some("/".into()) } else { Some(".".into()) } } else { Some(r) } } /// Normalize a TeX path if possible, otherwise return the original path. /// /// _TeX path_ is a path that obeys simplified semantics: Unix-like syntax (`/` for separators, etc.), /// must be Unicode-able, no symlinks allowed such that `..` can be stripped lexically. /// /// TODO: This function should operate on `&str` someday, but we need to transition the internals /// away from `OsStr/OsString` before that can happen. fn normalize_tex_path(path: &OsStr) -> Cow<OsStr> { if let Some(t) = path .to_str() .and_then(try_normalize_tex_path) .map(OsString::from) { Cow::Owned(t) } else { Cow::Borrowed(path) } } // Helper for testing. FIXME: I want this to be conditionally compiled with // #[cfg(test)] but things break if I do that. pub mod testing { use super::*; use std::ffi::{OsStr, OsString}; use std::fs::File; use std::path::{Path, PathBuf}; pub struct SingleInputFileIo { name: OsString, full_path: PathBuf, } impl SingleInputFileIo { pub fn new(path: &Path) -> SingleInputFileIo { let p = path.to_path_buf(); SingleInputFileIo { name: p.file_name().unwrap().to_os_string(), full_path: p, } } } impl IoProvider for SingleInputFileIo { fn output_open_name(&mut self, _: &OsStr) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn output_open_stdout(&mut self) -> OpenResult<OutputHandle> { OpenResult::NotAvailable } fn input_open_name( &mut self, name: &OsStr, _status: &mut dyn StatusBackend, ) -> OpenResult<InputHandle> { if name == self.name { OpenResult::Ok(InputHandle::new( name, File::open(&self.full_path).unwrap(), InputOrigin::Filesystem, )) } else { OpenResult::NotAvailable } } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_try_normalize_tex_path() { // edge cases assert_eq!(try_normalize_tex_path(""), Some("".into())); assert_eq!(try_normalize_tex_path("/"), Some("/".into())); assert_eq!(try_normalize_tex_path("//"), Some("/".into())); assert_eq!(try_normalize_tex_path("."), Some(".".into())); assert_eq!(try_normalize_tex_path("./"), Some(".".into())); assert_eq!(try_normalize_tex_path(".."), Some("..".into())); assert_eq!(try_normalize_tex_path("././/./"), Some(".".into())); assert_eq!(try_normalize_tex_path("/././/."), Some("/".into())); assert_eq!( try_normalize_tex_path("my/path/file.txt"), Some("my/path/file.txt".into()) ); // preserve spaces assert_eq!( try_normalize_tex_path(" my/pa th/file.txt "), Some(" my/pa th/file.txt ".into()) ); assert_eq!( try_normalize_tex_path("/my/path/file.txt"), Some("/my/path/file.txt".into()) ); assert_eq!( try_normalize_tex_path("./my///path/././file.txt"),
w<T
identifier_name
execution.rs
// Copyright 2018-2021 Parity Technologies (UK) Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::reflect::{ ContractEnv, DispatchError, }; use core::{ any::TypeId, convert::Infallible, mem::ManuallyDrop, }; use ink_env::{ Environment, ReturnFlags, }; use ink_primitives::{ Key, KeyPtr, }; use ink_storage::{ alloc, alloc::ContractPhase, traits::{ pull_spread_root, push_spread_root, SpreadAllocate, SpreadLayout, }, }; /// The root key of the ink! smart contract. /// /// # Note /// /// - This is the key where storage allocation, pushing and pulling is rooted /// using the `SpreadLayout` and `SpreadAllocate` traits primarily. /// - This trait is automatically implemented by the ink! codegen. /// - The existence of this trait allows to customize the root key in future /// versions of ink! if needed. pub trait ContractRootKey { const ROOT_KEY: Key; } /// Returns `Ok` if the caller did not transfer additional value to the callee. /// /// # Errors /// /// If the caller did send some amount of transferred value to the callee. #[inline] pub fn deny_payment<E>() -> Result<(), DispatchError> where E: Environment, { let transferred = ink_env::transferred_balance::<E>(); if transferred!= <E as Environment>::Balance::from(0_u32) { return Err(DispatchError::PaidUnpayableMessage) } Ok(()) } /// Configuration for execution of ink! constructor. #[derive(Debug, Copy, Clone)] pub struct ExecuteConstructorConfig { /// Yields `true` if the dynamic storage allocator has been enabled. /// /// # Note /// /// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`. pub dynamic_storage_alloc: bool, } /// Executes the given ink! constructor. /// /// # Note /// /// The closure is supposed to already contain all the arguments that the real /// constructor message requires and forwards them. #[inline] pub fn execute_constructor<Contract, F, R>( config: ExecuteConstructorConfig, f: F, ) -> Result<(), DispatchError> where Contract: SpreadLayout + ContractRootKey, F: FnOnce() -> R, <private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue: scale::Encode, private::Seal<R>: ConstructorReturnType<Contract>, { if config.dynamic_storage_alloc { alloc::initialize(ContractPhase::Deploy); } let result = ManuallyDrop::new(private::Seal(f())); match result.as_result() { Ok(contract) => { // Constructor is infallible or is fallible but succeeded. // // This requires us to sync back the changes of the contract storage. let root_key = <Contract as ContractRootKey>::ROOT_KEY; push_spread_root::<Contract>(contract, &root_key); if config.dynamic_storage_alloc { alloc::finalize(); } Ok(()) } Err(_) => { // Constructor is fallible and failed. // // We need to revert the state of the transaction. ink_env::return_value::< <private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue, >( ReturnFlags::default().set_reverted(true), result.return_value(), ) } } } /// Initializes the ink! contract using the given initialization routine. /// /// # Note /// /// - This uses `SpreadAllocate` trait in order to default initialize the /// ink! smart contract before calling the initialization routine. /// - This either returns `Contract` or `Result<Contract, E>` depending /// on the return type `R` of the initializer closure `F`. /// If `R` is `()` then `Contract` is returned and if `R` is any type of /// `Result<(), E>` then `Result<Contract, E>` is returned. /// Other return types for `F` than the ones listed above are not allowed. #[inline] pub fn initialize_contract<Contract, F, R>( initializer: F, ) -> <R as InitializerReturnType<Contract>>::Wrapped where Contract: ContractRootKey + SpreadAllocate, F: FnOnce(&mut Contract) -> R, R: InitializerReturnType<Contract>, { let mut key_ptr = KeyPtr::from(<Contract as ContractRootKey>::ROOT_KEY); let mut instance = <Contract as SpreadAllocate>::allocate_spread(&mut key_ptr); let result = initializer(&mut instance); result.into_wrapped(instance) } mod private { /// Seals the implementation of `ContractInitializerReturnType`. pub trait Sealed {} impl Sealed for () {} impl<T, E> Sealed for Result<T, E> {} /// A thin-wrapper type that automatically seals its inner type. /// /// Since it is private it can only be used from within this crate. /// We need this type in order to properly seal the `ConstructorReturnType` /// trait from unwanted external trait implementations. #[repr(transparent)] pub struct Seal<T>(pub T); impl<T> Sealed for Seal<T> {} } /// Guards against using invalid contract initializer types. /// /// # Note /// /// Currently the only allowed types are `()` and `Result<(), E>` /// where `E` is some unspecified error type. /// If the contract initializer returns `Result::Err` the utility /// method that is used to initialize an ink! smart contract will /// revert the state of the contract instantiation. pub trait ConstructorReturnType<C>: private::Sealed { /// Is `true` if `Self` is `Result<C, E>`. const IS_RESULT: bool = false; /// The error type of the constructor return type. /// /// # Note /// /// For infallible constructors this is `core::convert::Infallible`. type Error; /// The type of the return value of the constructor. /// /// # Note /// /// For infallible constructors this is `()` whereas for fallible /// constructors this is the actual return value. Since we only ever /// return a value in case of `Result::Err` the `Result::Ok` value /// does not matter. type ReturnValue; /// Converts the return value into a `Result` instance. /// /// # Note /// /// For infallible constructor returns this always yields `Ok`. fn as_result(&self) -> Result<&C, &Self::Error>; /// Returns the actual return value of the constructor. /// /// # Note /// /// For infallible constructor returns this always yields `()` /// and is basically ignored since this does not get called /// if the constructor did not fail. fn return_value(&self) -> &Self::ReturnValue; } impl<C> ConstructorReturnType<C> for private::Seal<C> { type Error = Infallible; type ReturnValue = (); #[inline] fn as_result(&self) -> Result<&C, &Self::Error> { Ok(&self.0) } #[inline] fn return_value(&self) -> &Self::ReturnValue { &() } } impl<C, E> ConstructorReturnType<C> for private::Seal<Result<C, E>> { const IS_RESULT: bool = true; type Error = E; type ReturnValue = Result<C, E>; #[inline] fn as_result(&self) -> Result<&C, &Self::Error> { self.0.as_ref() } #[inline] fn return_value(&self) -> &Self::ReturnValue
} /// Trait used to convert return types of contract initializer routines. /// /// Only `()` and `Result<(), E>` are allowed contract initializer return types. /// For `WrapReturnType<C>` where `C` is the contract type the trait converts /// `()` into `C` and `Result<(), E>` into `Result<C, E>`. pub trait InitializerReturnType<C>: private::Sealed { type Wrapped; /// Performs the type conversion of the initialization routine return type. fn into_wrapped(self, wrapped: C) -> Self::Wrapped; } impl<C> InitializerReturnType<C> for () { type Wrapped = C; #[inline] fn into_wrapped(self, wrapped: C) -> C { wrapped } } impl<C, E> InitializerReturnType<C> for Result<(), E> { type Wrapped = Result<C, E>; #[inline] fn into_wrapped(self, wrapped: C) -> Self::Wrapped { self.map(|_| wrapped) } } /// Configuration for execution of ink! messages. #[derive(Debug, Copy, Clone)] pub struct ExecuteMessageConfig { /// Yields `true` if the ink! message accepts payment. /// /// # Note /// /// If no ink! message within the same ink! smart contract /// is payable then this flag will be `true` since the check /// then is moved before the message dispatch as an optimization. pub payable: bool, /// Yields `true` if the ink! message might mutate contract storage. /// /// # Note /// /// This is usually true for `&mut self` ink! messages. pub mutates: bool, /// Yields `true` if the dynamic storage allocator has been enabled. /// /// # Note /// /// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`. pub dynamic_storage_alloc: bool, } /// Initiates an ink! message call with the given configuration. /// /// Returns the contract state pulled from the root storage region upon success. /// /// # Note /// /// This work around that splits executing an ink! message into initiate /// and finalize phases was needed due to the fact that `is_result_type` /// and `is_result_err` macros do not work in generic contexts. #[inline] pub fn initiate_message<Contract>( config: ExecuteMessageConfig, ) -> Result<Contract, DispatchError> where Contract: SpreadLayout + ContractEnv, { if!config.payable { deny_payment::<<Contract as ContractEnv>::Env>()?; } if config.dynamic_storage_alloc { alloc::initialize(ContractPhase::Call); } let root_key = Key::from([0x00; 32]); let contract = pull_spread_root::<Contract>(&root_key); Ok(contract) } /// Finalizes an ink! message call with the given configuration. /// /// This dispatches into fallible and infallible message finalization /// depending on the given `success` state. /// /// - If the message call was successful the return value is simply returned /// and cached storage is pushed back to the contract storage. /// - If the message call failed the return value result is returned instead /// and the transaction is signalled to be reverted. /// /// # Note /// /// This work around that splits executing an ink! message into initiate /// and finalize phases was needed due to the fact that `is_result_type` /// and `is_result_err` macros do not work in generic contexts. #[inline] pub fn finalize_message<Contract, R>( success: bool, contract: &Contract, config: ExecuteMessageConfig, result: &R, ) -> Result<(), DispatchError> where Contract: SpreadLayout, R: scale::Encode +'static, { if success { finalize_infallible_message(contract, config, result) } else { finalize_fallible_message(result) } } #[inline] fn finalize_infallible_message<Contract, R>( contract: &Contract, config: ExecuteMessageConfig, result: &R, ) -> Result<(), DispatchError> where Contract: SpreadLayout, R: scale::Encode +'static, { if config.mutates { let root_key = Key::from([0x00; 32]); push_spread_root::<Contract>(contract, &root_key); } if config.dynamic_storage_alloc { alloc::finalize(); } if TypeId::of::<R>()!= TypeId::of::<()>() { // In case the return type is `()` we do not return a value. ink_env::return_value::<R>(ReturnFlags::default(), result) } Ok(()) } #[inline] fn finalize_fallible_message<R>(result: &R) ->! where R: scale::Encode +'static, { // There is no need to push back the intermediate results of the // contract since the transaction is going to be reverted. ink_env::return_value::<R>(ReturnFlags::default().set_reverted(true), result) }
{ &self.0 }
identifier_body
execution.rs
// Copyright 2018-2021 Parity Technologies (UK) Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::reflect::{ ContractEnv, DispatchError, }; use core::{ any::TypeId, convert::Infallible, mem::ManuallyDrop, }; use ink_env::{ Environment, ReturnFlags, }; use ink_primitives::{ Key, KeyPtr, }; use ink_storage::{ alloc, alloc::ContractPhase, traits::{ pull_spread_root, push_spread_root, SpreadAllocate, SpreadLayout, }, }; /// The root key of the ink! smart contract. /// /// # Note /// /// - This is the key where storage allocation, pushing and pulling is rooted /// using the `SpreadLayout` and `SpreadAllocate` traits primarily. /// - This trait is automatically implemented by the ink! codegen. /// - The existence of this trait allows to customize the root key in future /// versions of ink! if needed. pub trait ContractRootKey { const ROOT_KEY: Key; } /// Returns `Ok` if the caller did not transfer additional value to the callee. /// /// # Errors /// /// If the caller did send some amount of transferred value to the callee. #[inline] pub fn deny_payment<E>() -> Result<(), DispatchError> where E: Environment, { let transferred = ink_env::transferred_balance::<E>(); if transferred!= <E as Environment>::Balance::from(0_u32) { return Err(DispatchError::PaidUnpayableMessage) } Ok(()) } /// Configuration for execution of ink! constructor. #[derive(Debug, Copy, Clone)] pub struct ExecuteConstructorConfig { /// Yields `true` if the dynamic storage allocator has been enabled. /// /// # Note /// /// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`. pub dynamic_storage_alloc: bool, } /// Executes the given ink! constructor. /// /// # Note /// /// The closure is supposed to already contain all the arguments that the real /// constructor message requires and forwards them. #[inline] pub fn execute_constructor<Contract, F, R>( config: ExecuteConstructorConfig, f: F, ) -> Result<(), DispatchError> where Contract: SpreadLayout + ContractRootKey, F: FnOnce() -> R, <private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue: scale::Encode, private::Seal<R>: ConstructorReturnType<Contract>, { if config.dynamic_storage_alloc { alloc::initialize(ContractPhase::Deploy); } let result = ManuallyDrop::new(private::Seal(f())); match result.as_result() { Ok(contract) => { // Constructor is infallible or is fallible but succeeded. // // This requires us to sync back the changes of the contract storage. let root_key = <Contract as ContractRootKey>::ROOT_KEY; push_spread_root::<Contract>(contract, &root_key); if config.dynamic_storage_alloc { alloc::finalize(); } Ok(()) } Err(_) => { // Constructor is fallible and failed. // // We need to revert the state of the transaction. ink_env::return_value::< <private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue, >( ReturnFlags::default().set_reverted(true), result.return_value(), ) } } } /// Initializes the ink! contract using the given initialization routine. /// /// # Note /// /// - This uses `SpreadAllocate` trait in order to default initialize the /// ink! smart contract before calling the initialization routine. /// - This either returns `Contract` or `Result<Contract, E>` depending /// on the return type `R` of the initializer closure `F`. /// If `R` is `()` then `Contract` is returned and if `R` is any type of /// `Result<(), E>` then `Result<Contract, E>` is returned. /// Other return types for `F` than the ones listed above are not allowed. #[inline] pub fn initialize_contract<Contract, F, R>( initializer: F, ) -> <R as InitializerReturnType<Contract>>::Wrapped where Contract: ContractRootKey + SpreadAllocate, F: FnOnce(&mut Contract) -> R, R: InitializerReturnType<Contract>, { let mut key_ptr = KeyPtr::from(<Contract as ContractRootKey>::ROOT_KEY); let mut instance = <Contract as SpreadAllocate>::allocate_spread(&mut key_ptr); let result = initializer(&mut instance); result.into_wrapped(instance) } mod private { /// Seals the implementation of `ContractInitializerReturnType`. pub trait Sealed {} impl Sealed for () {} impl<T, E> Sealed for Result<T, E> {} /// A thin-wrapper type that automatically seals its inner type. /// /// Since it is private it can only be used from within this crate. /// We need this type in order to properly seal the `ConstructorReturnType` /// trait from unwanted external trait implementations. #[repr(transparent)] pub struct Seal<T>(pub T); impl<T> Sealed for Seal<T> {} } /// Guards against using invalid contract initializer types. /// /// # Note /// /// Currently the only allowed types are `()` and `Result<(), E>` /// where `E` is some unspecified error type. /// If the contract initializer returns `Result::Err` the utility /// method that is used to initialize an ink! smart contract will /// revert the state of the contract instantiation. pub trait ConstructorReturnType<C>: private::Sealed { /// Is `true` if `Self` is `Result<C, E>`. const IS_RESULT: bool = false; /// The error type of the constructor return type. /// /// # Note /// /// For infallible constructors this is `core::convert::Infallible`. type Error; /// The type of the return value of the constructor. /// /// # Note /// /// For infallible constructors this is `()` whereas for fallible /// constructors this is the actual return value. Since we only ever /// return a value in case of `Result::Err` the `Result::Ok` value /// does not matter. type ReturnValue; /// Converts the return value into a `Result` instance. /// /// # Note /// /// For infallible constructor returns this always yields `Ok`. fn as_result(&self) -> Result<&C, &Self::Error>; /// Returns the actual return value of the constructor. /// /// # Note /// /// For infallible constructor returns this always yields `()` /// and is basically ignored since this does not get called /// if the constructor did not fail. fn return_value(&self) -> &Self::ReturnValue; } impl<C> ConstructorReturnType<C> for private::Seal<C> { type Error = Infallible; type ReturnValue = (); #[inline] fn as_result(&self) -> Result<&C, &Self::Error> { Ok(&self.0) } #[inline] fn return_value(&self) -> &Self::ReturnValue { &() } } impl<C, E> ConstructorReturnType<C> for private::Seal<Result<C, E>> { const IS_RESULT: bool = true; type Error = E; type ReturnValue = Result<C, E>; #[inline] fn as_result(&self) -> Result<&C, &Self::Error> { self.0.as_ref() } #[inline] fn return_value(&self) -> &Self::ReturnValue { &self.0 } } /// Trait used to convert return types of contract initializer routines. /// /// Only `()` and `Result<(), E>` are allowed contract initializer return types. /// For `WrapReturnType<C>` where `C` is the contract type the trait converts /// `()` into `C` and `Result<(), E>` into `Result<C, E>`. pub trait InitializerReturnType<C>: private::Sealed { type Wrapped; /// Performs the type conversion of the initialization routine return type. fn into_wrapped(self, wrapped: C) -> Self::Wrapped; } impl<C> InitializerReturnType<C> for () { type Wrapped = C; #[inline] fn into_wrapped(self, wrapped: C) -> C { wrapped } } impl<C, E> InitializerReturnType<C> for Result<(), E> { type Wrapped = Result<C, E>; #[inline] fn into_wrapped(self, wrapped: C) -> Self::Wrapped { self.map(|_| wrapped) } } /// Configuration for execution of ink! messages. #[derive(Debug, Copy, Clone)] pub struct ExecuteMessageConfig { /// Yields `true` if the ink! message accepts payment. /// /// # Note /// /// If no ink! message within the same ink! smart contract /// is payable then this flag will be `true` since the check /// then is moved before the message dispatch as an optimization. pub payable: bool, /// Yields `true` if the ink! message might mutate contract storage. /// /// # Note /// /// This is usually true for `&mut self` ink! messages. pub mutates: bool, /// Yields `true` if the dynamic storage allocator has been enabled. /// /// # Note /// /// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`. pub dynamic_storage_alloc: bool, } /// Initiates an ink! message call with the given configuration. /// /// Returns the contract state pulled from the root storage region upon success. /// /// # Note /// /// This work around that splits executing an ink! message into initiate /// and finalize phases was needed due to the fact that `is_result_type` /// and `is_result_err` macros do not work in generic contexts. #[inline] pub fn initiate_message<Contract>( config: ExecuteMessageConfig, ) -> Result<Contract, DispatchError> where Contract: SpreadLayout + ContractEnv, { if!config.payable { deny_payment::<<Contract as ContractEnv>::Env>()?; } if config.dynamic_storage_alloc { alloc::initialize(ContractPhase::Call); } let root_key = Key::from([0x00; 32]); let contract = pull_spread_root::<Contract>(&root_key); Ok(contract) } /// Finalizes an ink! message call with the given configuration. /// /// This dispatches into fallible and infallible message finalization /// depending on the given `success` state. /// /// - If the message call was successful the return value is simply returned /// and cached storage is pushed back to the contract storage. /// - If the message call failed the return value result is returned instead /// and the transaction is signalled to be reverted. /// /// # Note /// /// This work around that splits executing an ink! message into initiate /// and finalize phases was needed due to the fact that `is_result_type` /// and `is_result_err` macros do not work in generic contexts. #[inline] pub fn finalize_message<Contract, R>( success: bool, contract: &Contract, config: ExecuteMessageConfig, result: &R, ) -> Result<(), DispatchError> where Contract: SpreadLayout, R: scale::Encode +'static, { if success { finalize_infallible_message(contract, config, result) } else { finalize_fallible_message(result) } } #[inline] fn finalize_infallible_message<Contract, R>( contract: &Contract, config: ExecuteMessageConfig, result: &R, ) -> Result<(), DispatchError> where Contract: SpreadLayout, R: scale::Encode +'static, { if config.mutates { let root_key = Key::from([0x00; 32]); push_spread_root::<Contract>(contract, &root_key); } if config.dynamic_storage_alloc
if TypeId::of::<R>()!= TypeId::of::<()>() { // In case the return type is `()` we do not return a value. ink_env::return_value::<R>(ReturnFlags::default(), result) } Ok(()) } #[inline] fn finalize_fallible_message<R>(result: &R) ->! where R: scale::Encode +'static, { // There is no need to push back the intermediate results of the // contract since the transaction is going to be reverted. ink_env::return_value::<R>(ReturnFlags::default().set_reverted(true), result) }
{ alloc::finalize(); }
conditional_block
execution.rs
// Copyright 2018-2021 Parity Technologies (UK) Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::reflect::{ ContractEnv, DispatchError, }; use core::{ any::TypeId, convert::Infallible, mem::ManuallyDrop, }; use ink_env::{ Environment, ReturnFlags, }; use ink_primitives::{ Key, KeyPtr, }; use ink_storage::{ alloc, alloc::ContractPhase, traits::{ pull_spread_root, push_spread_root, SpreadAllocate, SpreadLayout, }, }; /// The root key of the ink! smart contract. /// /// # Note /// /// - This is the key where storage allocation, pushing and pulling is rooted /// using the `SpreadLayout` and `SpreadAllocate` traits primarily. /// - This trait is automatically implemented by the ink! codegen. /// - The existence of this trait allows to customize the root key in future /// versions of ink! if needed. pub trait ContractRootKey { const ROOT_KEY: Key; } /// Returns `Ok` if the caller did not transfer additional value to the callee. /// /// # Errors /// /// If the caller did send some amount of transferred value to the callee. #[inline] pub fn deny_payment<E>() -> Result<(), DispatchError> where E: Environment, { let transferred = ink_env::transferred_balance::<E>(); if transferred!= <E as Environment>::Balance::from(0_u32) { return Err(DispatchError::PaidUnpayableMessage) } Ok(()) } /// Configuration for execution of ink! constructor. #[derive(Debug, Copy, Clone)] pub struct ExecuteConstructorConfig { /// Yields `true` if the dynamic storage allocator has been enabled. /// /// # Note /// /// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`. pub dynamic_storage_alloc: bool, } /// Executes the given ink! constructor. /// /// # Note /// /// The closure is supposed to already contain all the arguments that the real /// constructor message requires and forwards them. #[inline] pub fn execute_constructor<Contract, F, R>( config: ExecuteConstructorConfig, f: F, ) -> Result<(), DispatchError> where Contract: SpreadLayout + ContractRootKey, F: FnOnce() -> R, <private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue: scale::Encode, private::Seal<R>: ConstructorReturnType<Contract>, { if config.dynamic_storage_alloc { alloc::initialize(ContractPhase::Deploy); } let result = ManuallyDrop::new(private::Seal(f())); match result.as_result() { Ok(contract) => { // Constructor is infallible or is fallible but succeeded. // // This requires us to sync back the changes of the contract storage. let root_key = <Contract as ContractRootKey>::ROOT_KEY; push_spread_root::<Contract>(contract, &root_key); if config.dynamic_storage_alloc { alloc::finalize(); } Ok(()) } Err(_) => { // Constructor is fallible and failed. // // We need to revert the state of the transaction. ink_env::return_value::< <private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue, >( ReturnFlags::default().set_reverted(true), result.return_value(), ) } } } /// Initializes the ink! contract using the given initialization routine. /// /// # Note /// /// - This uses `SpreadAllocate` trait in order to default initialize the /// ink! smart contract before calling the initialization routine. /// - This either returns `Contract` or `Result<Contract, E>` depending /// on the return type `R` of the initializer closure `F`. /// If `R` is `()` then `Contract` is returned and if `R` is any type of /// `Result<(), E>` then `Result<Contract, E>` is returned. /// Other return types for `F` than the ones listed above are not allowed. #[inline] pub fn
<Contract, F, R>( initializer: F, ) -> <R as InitializerReturnType<Contract>>::Wrapped where Contract: ContractRootKey + SpreadAllocate, F: FnOnce(&mut Contract) -> R, R: InitializerReturnType<Contract>, { let mut key_ptr = KeyPtr::from(<Contract as ContractRootKey>::ROOT_KEY); let mut instance = <Contract as SpreadAllocate>::allocate_spread(&mut key_ptr); let result = initializer(&mut instance); result.into_wrapped(instance) } mod private { /// Seals the implementation of `ContractInitializerReturnType`. pub trait Sealed {} impl Sealed for () {} impl<T, E> Sealed for Result<T, E> {} /// A thin-wrapper type that automatically seals its inner type. /// /// Since it is private it can only be used from within this crate. /// We need this type in order to properly seal the `ConstructorReturnType` /// trait from unwanted external trait implementations. #[repr(transparent)] pub struct Seal<T>(pub T); impl<T> Sealed for Seal<T> {} } /// Guards against using invalid contract initializer types. /// /// # Note /// /// Currently the only allowed types are `()` and `Result<(), E>` /// where `E` is some unspecified error type. /// If the contract initializer returns `Result::Err` the utility /// method that is used to initialize an ink! smart contract will /// revert the state of the contract instantiation. pub trait ConstructorReturnType<C>: private::Sealed { /// Is `true` if `Self` is `Result<C, E>`. const IS_RESULT: bool = false; /// The error type of the constructor return type. /// /// # Note /// /// For infallible constructors this is `core::convert::Infallible`. type Error; /// The type of the return value of the constructor. /// /// # Note /// /// For infallible constructors this is `()` whereas for fallible /// constructors this is the actual return value. Since we only ever /// return a value in case of `Result::Err` the `Result::Ok` value /// does not matter. type ReturnValue; /// Converts the return value into a `Result` instance. /// /// # Note /// /// For infallible constructor returns this always yields `Ok`. fn as_result(&self) -> Result<&C, &Self::Error>; /// Returns the actual return value of the constructor. /// /// # Note /// /// For infallible constructor returns this always yields `()` /// and is basically ignored since this does not get called /// if the constructor did not fail. fn return_value(&self) -> &Self::ReturnValue; } impl<C> ConstructorReturnType<C> for private::Seal<C> { type Error = Infallible; type ReturnValue = (); #[inline] fn as_result(&self) -> Result<&C, &Self::Error> { Ok(&self.0) } #[inline] fn return_value(&self) -> &Self::ReturnValue { &() } } impl<C, E> ConstructorReturnType<C> for private::Seal<Result<C, E>> { const IS_RESULT: bool = true; type Error = E; type ReturnValue = Result<C, E>; #[inline] fn as_result(&self) -> Result<&C, &Self::Error> { self.0.as_ref() } #[inline] fn return_value(&self) -> &Self::ReturnValue { &self.0 } } /// Trait used to convert return types of contract initializer routines. /// /// Only `()` and `Result<(), E>` are allowed contract initializer return types. /// For `WrapReturnType<C>` where `C` is the contract type the trait converts /// `()` into `C` and `Result<(), E>` into `Result<C, E>`. pub trait InitializerReturnType<C>: private::Sealed { type Wrapped; /// Performs the type conversion of the initialization routine return type. fn into_wrapped(self, wrapped: C) -> Self::Wrapped; } impl<C> InitializerReturnType<C> for () { type Wrapped = C; #[inline] fn into_wrapped(self, wrapped: C) -> C { wrapped } } impl<C, E> InitializerReturnType<C> for Result<(), E> { type Wrapped = Result<C, E>; #[inline] fn into_wrapped(self, wrapped: C) -> Self::Wrapped { self.map(|_| wrapped) } } /// Configuration for execution of ink! messages. #[derive(Debug, Copy, Clone)] pub struct ExecuteMessageConfig { /// Yields `true` if the ink! message accepts payment. /// /// # Note /// /// If no ink! message within the same ink! smart contract /// is payable then this flag will be `true` since the check /// then is moved before the message dispatch as an optimization. pub payable: bool, /// Yields `true` if the ink! message might mutate contract storage. /// /// # Note /// /// This is usually true for `&mut self` ink! messages. pub mutates: bool, /// Yields `true` if the dynamic storage allocator has been enabled. /// /// # Note /// /// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`. pub dynamic_storage_alloc: bool, } /// Initiates an ink! message call with the given configuration. /// /// Returns the contract state pulled from the root storage region upon success. /// /// # Note /// /// This work around that splits executing an ink! message into initiate /// and finalize phases was needed due to the fact that `is_result_type` /// and `is_result_err` macros do not work in generic contexts. #[inline] pub fn initiate_message<Contract>( config: ExecuteMessageConfig, ) -> Result<Contract, DispatchError> where Contract: SpreadLayout + ContractEnv, { if!config.payable { deny_payment::<<Contract as ContractEnv>::Env>()?; } if config.dynamic_storage_alloc { alloc::initialize(ContractPhase::Call); } let root_key = Key::from([0x00; 32]); let contract = pull_spread_root::<Contract>(&root_key); Ok(contract) } /// Finalizes an ink! message call with the given configuration. /// /// This dispatches into fallible and infallible message finalization /// depending on the given `success` state. /// /// - If the message call was successful the return value is simply returned /// and cached storage is pushed back to the contract storage. /// - If the message call failed the return value result is returned instead /// and the transaction is signalled to be reverted. /// /// # Note /// /// This work around that splits executing an ink! message into initiate /// and finalize phases was needed due to the fact that `is_result_type` /// and `is_result_err` macros do not work in generic contexts. #[inline] pub fn finalize_message<Contract, R>( success: bool, contract: &Contract, config: ExecuteMessageConfig, result: &R, ) -> Result<(), DispatchError> where Contract: SpreadLayout, R: scale::Encode +'static, { if success { finalize_infallible_message(contract, config, result) } else { finalize_fallible_message(result) } } #[inline] fn finalize_infallible_message<Contract, R>( contract: &Contract, config: ExecuteMessageConfig, result: &R, ) -> Result<(), DispatchError> where Contract: SpreadLayout, R: scale::Encode +'static, { if config.mutates { let root_key = Key::from([0x00; 32]); push_spread_root::<Contract>(contract, &root_key); } if config.dynamic_storage_alloc { alloc::finalize(); } if TypeId::of::<R>()!= TypeId::of::<()>() { // In case the return type is `()` we do not return a value. ink_env::return_value::<R>(ReturnFlags::default(), result) } Ok(()) } #[inline] fn finalize_fallible_message<R>(result: &R) ->! where R: scale::Encode +'static, { // There is no need to push back the intermediate results of the // contract since the transaction is going to be reverted. ink_env::return_value::<R>(ReturnFlags::default().set_reverted(true), result) }
initialize_contract
identifier_name
execution.rs
// Copyright 2018-2021 Parity Technologies (UK) Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::reflect::{ ContractEnv, DispatchError, }; use core::{ any::TypeId, convert::Infallible, mem::ManuallyDrop, }; use ink_env::{ Environment, ReturnFlags, }; use ink_primitives::{ Key, KeyPtr, }; use ink_storage::{ alloc, alloc::ContractPhase, traits::{ pull_spread_root, push_spread_root, SpreadAllocate, SpreadLayout, }, }; /// The root key of the ink! smart contract. /// /// # Note /// /// - This is the key where storage allocation, pushing and pulling is rooted /// using the `SpreadLayout` and `SpreadAllocate` traits primarily. /// - This trait is automatically implemented by the ink! codegen. /// - The existence of this trait allows to customize the root key in future /// versions of ink! if needed. pub trait ContractRootKey { const ROOT_KEY: Key; } /// Returns `Ok` if the caller did not transfer additional value to the callee. /// /// # Errors /// /// If the caller did send some amount of transferred value to the callee. #[inline] pub fn deny_payment<E>() -> Result<(), DispatchError> where E: Environment, { let transferred = ink_env::transferred_balance::<E>(); if transferred!= <E as Environment>::Balance::from(0_u32) { return Err(DispatchError::PaidUnpayableMessage) } Ok(()) } /// Configuration for execution of ink! constructor. #[derive(Debug, Copy, Clone)] pub struct ExecuteConstructorConfig { /// Yields `true` if the dynamic storage allocator has been enabled. /// /// # Note /// /// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`. pub dynamic_storage_alloc: bool, } /// Executes the given ink! constructor. /// /// # Note /// /// The closure is supposed to already contain all the arguments that the real /// constructor message requires and forwards them. #[inline] pub fn execute_constructor<Contract, F, R>( config: ExecuteConstructorConfig, f: F, ) -> Result<(), DispatchError> where Contract: SpreadLayout + ContractRootKey, F: FnOnce() -> R, <private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue: scale::Encode, private::Seal<R>: ConstructorReturnType<Contract>, { if config.dynamic_storage_alloc { alloc::initialize(ContractPhase::Deploy); } let result = ManuallyDrop::new(private::Seal(f())); match result.as_result() { Ok(contract) => { // Constructor is infallible or is fallible but succeeded. // // This requires us to sync back the changes of the contract storage. let root_key = <Contract as ContractRootKey>::ROOT_KEY; push_spread_root::<Contract>(contract, &root_key); if config.dynamic_storage_alloc { alloc::finalize(); } Ok(()) } Err(_) => { // Constructor is fallible and failed. // // We need to revert the state of the transaction. ink_env::return_value::< <private::Seal<R> as ConstructorReturnType<Contract>>::ReturnValue, >( ReturnFlags::default().set_reverted(true), result.return_value(), ) } } } /// Initializes the ink! contract using the given initialization routine. /// /// # Note /// /// - This uses `SpreadAllocate` trait in order to default initialize the /// ink! smart contract before calling the initialization routine. /// - This either returns `Contract` or `Result<Contract, E>` depending /// on the return type `R` of the initializer closure `F`. /// If `R` is `()` then `Contract` is returned and if `R` is any type of /// `Result<(), E>` then `Result<Contract, E>` is returned. /// Other return types for `F` than the ones listed above are not allowed. #[inline] pub fn initialize_contract<Contract, F, R>( initializer: F, ) -> <R as InitializerReturnType<Contract>>::Wrapped where Contract: ContractRootKey + SpreadAllocate, F: FnOnce(&mut Contract) -> R, R: InitializerReturnType<Contract>, { let mut key_ptr = KeyPtr::from(<Contract as ContractRootKey>::ROOT_KEY); let mut instance = <Contract as SpreadAllocate>::allocate_spread(&mut key_ptr); let result = initializer(&mut instance); result.into_wrapped(instance) } mod private { /// Seals the implementation of `ContractInitializerReturnType`. pub trait Sealed {} impl Sealed for () {} impl<T, E> Sealed for Result<T, E> {} /// A thin-wrapper type that automatically seals its inner type. /// /// Since it is private it can only be used from within this crate. /// We need this type in order to properly seal the `ConstructorReturnType` /// trait from unwanted external trait implementations. #[repr(transparent)] pub struct Seal<T>(pub T); impl<T> Sealed for Seal<T> {} } /// Guards against using invalid contract initializer types. /// /// # Note /// /// Currently the only allowed types are `()` and `Result<(), E>`
/// where `E` is some unspecified error type. /// If the contract initializer returns `Result::Err` the utility /// method that is used to initialize an ink! smart contract will /// revert the state of the contract instantiation. pub trait ConstructorReturnType<C>: private::Sealed { /// Is `true` if `Self` is `Result<C, E>`. const IS_RESULT: bool = false; /// The error type of the constructor return type. /// /// # Note /// /// For infallible constructors this is `core::convert::Infallible`. type Error; /// The type of the return value of the constructor. /// /// # Note /// /// For infallible constructors this is `()` whereas for fallible /// constructors this is the actual return value. Since we only ever /// return a value in case of `Result::Err` the `Result::Ok` value /// does not matter. type ReturnValue; /// Converts the return value into a `Result` instance. /// /// # Note /// /// For infallible constructor returns this always yields `Ok`. fn as_result(&self) -> Result<&C, &Self::Error>; /// Returns the actual return value of the constructor. /// /// # Note /// /// For infallible constructor returns this always yields `()` /// and is basically ignored since this does not get called /// if the constructor did not fail. fn return_value(&self) -> &Self::ReturnValue; } impl<C> ConstructorReturnType<C> for private::Seal<C> { type Error = Infallible; type ReturnValue = (); #[inline] fn as_result(&self) -> Result<&C, &Self::Error> { Ok(&self.0) } #[inline] fn return_value(&self) -> &Self::ReturnValue { &() } } impl<C, E> ConstructorReturnType<C> for private::Seal<Result<C, E>> { const IS_RESULT: bool = true; type Error = E; type ReturnValue = Result<C, E>; #[inline] fn as_result(&self) -> Result<&C, &Self::Error> { self.0.as_ref() } #[inline] fn return_value(&self) -> &Self::ReturnValue { &self.0 } } /// Trait used to convert return types of contract initializer routines. /// /// Only `()` and `Result<(), E>` are allowed contract initializer return types. /// For `WrapReturnType<C>` where `C` is the contract type the trait converts /// `()` into `C` and `Result<(), E>` into `Result<C, E>`. pub trait InitializerReturnType<C>: private::Sealed { type Wrapped; /// Performs the type conversion of the initialization routine return type. fn into_wrapped(self, wrapped: C) -> Self::Wrapped; } impl<C> InitializerReturnType<C> for () { type Wrapped = C; #[inline] fn into_wrapped(self, wrapped: C) -> C { wrapped } } impl<C, E> InitializerReturnType<C> for Result<(), E> { type Wrapped = Result<C, E>; #[inline] fn into_wrapped(self, wrapped: C) -> Self::Wrapped { self.map(|_| wrapped) } } /// Configuration for execution of ink! messages. #[derive(Debug, Copy, Clone)] pub struct ExecuteMessageConfig { /// Yields `true` if the ink! message accepts payment. /// /// # Note /// /// If no ink! message within the same ink! smart contract /// is payable then this flag will be `true` since the check /// then is moved before the message dispatch as an optimization. pub payable: bool, /// Yields `true` if the ink! message might mutate contract storage. /// /// # Note /// /// This is usually true for `&mut self` ink! messages. pub mutates: bool, /// Yields `true` if the dynamic storage allocator has been enabled. /// /// # Note /// /// Authors can enable it via `#[ink::contract(dynamic_storage_allocator = true)]`. pub dynamic_storage_alloc: bool, } /// Initiates an ink! message call with the given configuration. /// /// Returns the contract state pulled from the root storage region upon success. /// /// # Note /// /// This work around that splits executing an ink! message into initiate /// and finalize phases was needed due to the fact that `is_result_type` /// and `is_result_err` macros do not work in generic contexts. #[inline] pub fn initiate_message<Contract>( config: ExecuteMessageConfig, ) -> Result<Contract, DispatchError> where Contract: SpreadLayout + ContractEnv, { if!config.payable { deny_payment::<<Contract as ContractEnv>::Env>()?; } if config.dynamic_storage_alloc { alloc::initialize(ContractPhase::Call); } let root_key = Key::from([0x00; 32]); let contract = pull_spread_root::<Contract>(&root_key); Ok(contract) } /// Finalizes an ink! message call with the given configuration. /// /// This dispatches into fallible and infallible message finalization /// depending on the given `success` state. /// /// - If the message call was successful the return value is simply returned /// and cached storage is pushed back to the contract storage. /// - If the message call failed the return value result is returned instead /// and the transaction is signalled to be reverted. /// /// # Note /// /// This work around that splits executing an ink! message into initiate /// and finalize phases was needed due to the fact that `is_result_type` /// and `is_result_err` macros do not work in generic contexts. #[inline] pub fn finalize_message<Contract, R>( success: bool, contract: &Contract, config: ExecuteMessageConfig, result: &R, ) -> Result<(), DispatchError> where Contract: SpreadLayout, R: scale::Encode +'static, { if success { finalize_infallible_message(contract, config, result) } else { finalize_fallible_message(result) } } #[inline] fn finalize_infallible_message<Contract, R>( contract: &Contract, config: ExecuteMessageConfig, result: &R, ) -> Result<(), DispatchError> where Contract: SpreadLayout, R: scale::Encode +'static, { if config.mutates { let root_key = Key::from([0x00; 32]); push_spread_root::<Contract>(contract, &root_key); } if config.dynamic_storage_alloc { alloc::finalize(); } if TypeId::of::<R>()!= TypeId::of::<()>() { // In case the return type is `()` we do not return a value. ink_env::return_value::<R>(ReturnFlags::default(), result) } Ok(()) } #[inline] fn finalize_fallible_message<R>(result: &R) ->! where R: scale::Encode +'static, { // There is no need to push back the intermediate results of the // contract since the transaction is going to be reverted. ink_env::return_value::<R>(ReturnFlags::default().set_reverted(true), result) }
random_line_split
exec.rs
// Copyright 2018 Grove Enterprises LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::collections::HashMap; use std::io::Error; use std::io::BufReader; use std::io::prelude::*; use std::iter::Iterator; use std::fs::File; use std::string::String; use std::convert::*; extern crate csv; use super::csv::StringRecord; use super::api::*; use super::rel::*; use super::parser::*; use super::sqltorel::*; use super::dataframe::*; use super::functions::math::*; use super::functions::geospatial::*; #[derive(Debug)] pub enum ExecutionError { IoError(Error), CsvError(csv::Error), ParserError(ParserError), Custom(String) } impl From<Error> for ExecutionError { fn from(e: Error) -> Self { ExecutionError::IoError(e) } } impl From<String> for ExecutionError { fn from(e: String) -> Self { ExecutionError::Custom(e) } } impl From<ParserError> for ExecutionError { fn from(e: ParserError) -> Self { ExecutionError::ParserError(e) } } /// Represents a csv file with a known schema #[derive(Debug)] pub struct CsvRelation { file: File, schema: Schema } pub struct FilterRelation { schema: Schema, input: Box<SimpleRelation>, expr: Expr } pub struct ProjectRelation { schema: Schema, input: Box<SimpleRelation>, expr: Vec<Expr> } pub struct LimitRelation { schema: Schema, input: Box<SimpleRelation>, limit: usize, } impl<'a> CsvRelation { pub fn open(file: File, schema: Schema) -> Result<Self,ExecutionError> { Ok(CsvRelation { file, schema }) } /// Convert StringRecord into our internal tuple type based on the known schema fn create_tuple(&self, r: &StringRecord) -> Result<Row,ExecutionError> { assert_eq!(self.schema.columns.len(), r.len()); let values = self.schema.columns.iter().zip(r.into_iter()).map(|(c,s)| match c.data_type { //TODO: remove unwrap use here DataType::UnsignedLong => Value::UnsignedLong(s.parse::<u64>().unwrap()), DataType::String => Value::String(s.to_string()), DataType::Double => Value::Double(s.parse::<f64>().unwrap()), _ => panic!("csv unsupported type") }).collect(); Ok(Row::new(values)) } } /// trait for all relations (a relation is essentially just an iterator over tuples with /// a known schema) pub trait SimpleRelation { /// scan all records in this relation fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row,ExecutionError>> + 'a>; /// get the schema for this relation fn schema<'a>(&'a self) -> &'a Schema; } impl SimpleRelation for CsvRelation { fn scan<'a>(&'a self, _ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row,ExecutionError>> + 'a> { let buf_reader = BufReader::new(&self.file); let csv_reader = csv::Reader::from_reader(buf_reader); let record_iter = csv_reader.into_records(); let tuple_iter = record_iter.map(move|r| match r { Ok(record) => self.create_tuple(&record), Err(e) => Err(ExecutionError::CsvError(e)) }); Box::new(tuple_iter) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for FilterRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { Box::new(self.input.scan(ctx).filter(move|t| match t { &Ok(ref tuple) => match ctx.evaluate(tuple, &self.schema, &self.expr) { Ok(Value::Boolean(b)) => b, _ => panic!("Predicate expression evaluated to non-boolean value") }, _ => true // let errors through the filter so they can be handled later } )) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for ProjectRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { let foo = self.input.scan(ctx).map(move|r| match r { Ok(tuple) => { let values = self.expr.iter() .map(|e| match e { &Expr::TupleValue(i) => tuple.values[i].clone(), //TODO: relation delegating back to execution context seems wrong way around _ => ctx.evaluate(&tuple,&self.schema, e).unwrap() //TODO: remove unwrap //unimplemented!("Unsupported expression for projection") }) .collect(); Ok(Row::new(values)) }, Err(_) => r }); Box::new(foo) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for LimitRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { Box::new(self.input.scan(ctx).take(self.limit)) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } /// Execution plans are sent to worker nodes for execution pub enum ExecutionPlan { /// Run a query and return the results to the client Interactive { plan: LogicalPlan }, /// Partition the relation Partition { plan: LogicalPlan, partition_count: usize, partition_expr: Expr } } #[derive(Debug,Clone)] pub struct ExecutionContext { schemas: HashMap<String, Schema>, functions: HashMap<String, FunctionMeta>, } impl ExecutionContext { pub fn new() -> Self { ExecutionContext { schemas: HashMap::new(), functions: HashMap::new() } } pub fn define_schema(&mut self, name: &str, schema: &Schema) { self.schemas.insert(name.to_string(), schema.clone()); } pub fn define_function(&mut self, func: &ScalarFunction) { let fm = FunctionMeta { name: func.name(), args: func.args(), return_type: func.return_type() }; self.functions.insert(fm.name.to_lowercase(), fm); } pub fn sql(&self, sql: &str) -> Result<Box<DataFrame>, ExecutionError> { // parse SQL into AST let ast = Parser::parse_sql(String::from(sql))?; // create a query planner let query_planner = SqlToRel::new(self.schemas.clone()); //TODO: pass reference to schemas // plan the query (create a logical relational plan) let plan = query_planner.sql_to_rel(&ast)?; // return the DataFrame Ok(Box::new(DF { ctx: Box::new(self.clone()), plan: plan })) //TODO: don't clone context } /// Open a CSV file ///TODO: this is building a relational plan not an execution plan so shouldn't really be here pub fn load(&self, filename: &str, schema: &Schema) -> Result<Box<DataFrame>, ExecutionError> { let plan = LogicalPlan::CsvFile { filename: filename.to_string(), schema: schema.clone() }; Ok(Box::new(DF { ctx: Box::new((*self).clone()), plan: Box::new(plan) })) } pub fn register_table(&mut self, name: String, schema: Schema) { self.schemas.insert(name, schema); } pub fn create_execution_plan(&self, plan: &LogicalPlan) -> Result<Box<SimpleRelation>,ExecutionError> { match *plan { LogicalPlan::EmptyRelation => { panic!() }, LogicalPlan::TableScan { ref table_name, ref schema,.. } => { // for now, tables are csv files let file = File::open(format!("test/data/{}.csv", table_name))?; let rel = CsvRelation::open(file, schema.clone())?; Ok(Box::new(rel)) }, LogicalPlan::CsvFile { ref filename, ref schema } => { let file = File::open(filename)?; let rel = CsvRelation::open(file, schema.clone())?; Ok(Box::new(rel)) }, LogicalPlan::Selection { ref expr, ref input, ref schema } => { let input_rel = self.create_execution_plan(input)?; let rel = FilterRelation { input: input_rel, expr: expr.clone(), schema: schema.clone() }; Ok(Box::new(rel)) }, LogicalPlan::Projection { ref expr, ref input,.. } => { let input_rel = self.create_execution_plan(&input)?; let input_schema = input_rel.schema().clone(); //TODO: seems to be duplicate of sql_to_rel code let project_columns: Vec<Field> = expr.iter().map(|e| { match e { &Expr::TupleValue(i) => input_schema.columns[i].clone(), &Expr::ScalarFunction {ref name,.. } => Field { name: name.clone(), data_type: DataType::Double, //TODO: hard-coded.. no function metadata yet nullable: true }, _ => unimplemented!("Unsupported projection expression") } }).collect(); let project_schema = Schema { columns: project_columns }; let rel = ProjectRelation { input: input_rel, expr: expr.clone(), schema: project_schema, }; Ok(Box::new(rel)) } LogicalPlan::Limit { limit, ref input, ref schema,.. } => { let input_rel = self.create_execution_plan(input)?; let rel = LimitRelation { input: input_rel, limit: limit, schema: schema.clone() }; Ok(Box::new(rel)) } } } /// Evaluate a relational expression against a tuple pub fn evaluate(&self, tuple: &Row, tt: &Schema, rex: &Expr) -> Result<Value, Box<ExecutionError>> { match rex { &Expr::BinaryExpr { ref left, ref op, ref right } => { let left_value = self.evaluate(tuple, tt, left)?; let right_value = self.evaluate(tuple, tt, right)?; match op { &Operator::Eq => Ok(Value::Boolean(left_value == right_value)), &Operator::NotEq => Ok(Value::Boolean(left_value!= right_value)), &Operator::Lt => Ok(Value::Boolean(left_value < right_value)), &Operator::LtEq => Ok(Value::Boolean(left_value <= right_value)), &Operator::Gt => Ok(Value::Boolean(left_value > right_value)), &Operator::GtEq => Ok(Value::Boolean(left_value >= right_value)), } }, &Expr::TupleValue(index) => Ok(tuple.values[index].clone()), &Expr::Literal(ref value) => Ok(value.clone()), &Expr::ScalarFunction { ref name, ref args } => { // evaluate the arguments to the function let arg_values : Vec<Value> = args.iter() .map(|a| self.evaluate(tuple, tt, &a)) .collect::<Result<Vec<Value>, Box<ExecutionError>>>()?; let func = self.load_function_impl(name.as_ref())?; match func.execute(arg_values) { Ok(value) => Ok(value), Err(_) => Err(Box::new(ExecutionError::Custom("TBD".to_string()))) //TODO: fix } } } } /// load a function implementation fn load_function_impl(&self, function_name: &str) -> Result<Box<ScalarFunction>,Box<ExecutionError>> { //TODO: this is a huge hack since the functions have already been registered with the // execution context... I need to implement this so it dynamically loads the functions match function_name.to_lowercase().as_ref() { "sqrt" => Ok(Box::new(SqrtFunction {})), "st_point" => Ok(Box::new(STPointFunc {})), "st_astext" => Ok(Box::new(STAsText {})), _ => Err(Box::new(ExecutionError::Custom(format!("Unknown function {}", function_name)))) } } pub fn udf(&self, name: &str, args: Vec<Expr>) -> Expr { Expr::ScalarFunction { name: name.to_string(), args: args.clone() } } } pub struct DF { ctx: Box<ExecutionContext>, plan: Box<LogicalPlan> } impl DataFrame for DF { fn select(&self, expr: Vec<Expr>) -> Result<Box<DataFrame>, DataFrameError> { let plan = LogicalPlan::Projection { expr: expr, input: self.plan.clone(), schema: self.plan.schema().clone() }; Ok(Box::new(DF { ctx: self.ctx.clone(), plan: Box::new(plan) })) } fn filter(&self, expr: Expr) -> Result<Box<DataFrame>, DataFrameError> { let plan = LogicalPlan::Selection { expr: expr, input: self.plan.clone(), schema: self.plan.schema().clone() }; Ok(Box::new(DF { ctx: self.ctx.clone(), plan: Box::new(plan) })) } fn write(&self, filename: &str) -> Result<(), DataFrameError> { let execution_plan = self.ctx.create_execution_plan(&self.plan)?; // create output file // println!("Writing csv to {}", filename); let mut file = File::create(filename)?; // implement execution here for now but should be a common method for processing a plan let it = execution_plan.scan(&self.ctx); it.for_each(|t| { match t { Ok(tuple) => { let csv = format!("{}\n", tuple.to_string()); file.write(&csv.into_bytes()).unwrap(); //TODO: remove unwrap }, Err(e) => panic!(format!("Error processing tuple: {:?}", e)) //TODO: error handling } }); Ok(()) } fn col(&self, column_name: &str) -> Result<Expr, DataFrameError> { match self.plan.schema().column(column_name) { Some((i,_)) => Ok(Expr::TupleValue(i)), _ => Err(DataFrameError::InvalidColumn(column_name.to_string())) } } fn schema(&self) -> Schema { self.plan.schema().clone() } fn repartition(&self, _n: u32) -> Result<Box<DataFrame>, DataFrameError> { unimplemented!() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_sqrt() { let mut ctx = create_context(); ctx.define_function(&SqrtFunction {}); let df = ctx.sql(&"SELECT id, sqrt(id) FROM people").unwrap(); df.write("_sqrt_out.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_sql_udf_udt() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let df = ctx.sql(&"SELECT ST_Point(lat, lng) FROM uk_cities").unwrap(); df.write("_uk_cities_sql.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_df_udf_udt() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let schema = Schema::new(vec![ Field::new("city", DataType::String, false), Field::new("lat", DataType::Double, false), Field::new("lng", DataType::Double, false)]); let df = ctx.load("test/data/uk_cities.csv", &schema).unwrap(); // create an expression for invoking a scalar function // let func_expr = Expr::ScalarFunction { // name: "ST_Point".to_string(), // args: vec![df.col("lat").unwrap(), df.col("lng").unwrap()] // }; // invoke custom code as a scalar UDF let func_expr = ctx.udf("ST_Point",vec![ df.col("lat").unwrap(), df.col("lng").unwrap()] ); let df2 = df.select(vec![func_expr]).unwrap(); df2.write("_uk_cities_df.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_chaining_functions() { let mut ctx = create_context();
df.write("_uk_cities_wkt.csv").unwrap(); //TODO: check that generated file has expected contents } fn create_context() -> ExecutionContext { // create execution context let mut ctx = ExecutionContext::new(); // define schemas for test data ctx.define_schema("people", &Schema::new(vec![ Field::new("id", DataType::UnsignedLong, false), Field::new("name", DataType::String, false)])); ctx.define_schema("uk_cities", &Schema::new(vec![ Field::new("city", DataType::String, false), Field::new("lat", DataType::Double, false), Field::new("lng", DataType::Double, false)])); ctx } }
ctx.define_function(&STPointFunc {}); let df = ctx.sql(&"SELECT ST_AsText(ST_Point(lat, lng)) FROM uk_cities").unwrap();
random_line_split
exec.rs
// Copyright 2018 Grove Enterprises LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::collections::HashMap; use std::io::Error; use std::io::BufReader; use std::io::prelude::*; use std::iter::Iterator; use std::fs::File; use std::string::String; use std::convert::*; extern crate csv; use super::csv::StringRecord; use super::api::*; use super::rel::*; use super::parser::*; use super::sqltorel::*; use super::dataframe::*; use super::functions::math::*; use super::functions::geospatial::*; #[derive(Debug)] pub enum ExecutionError { IoError(Error), CsvError(csv::Error), ParserError(ParserError), Custom(String) } impl From<Error> for ExecutionError { fn from(e: Error) -> Self { ExecutionError::IoError(e) } } impl From<String> for ExecutionError { fn from(e: String) -> Self { ExecutionError::Custom(e) } } impl From<ParserError> for ExecutionError { fn from(e: ParserError) -> Self { ExecutionError::ParserError(e) } } /// Represents a csv file with a known schema #[derive(Debug)] pub struct CsvRelation { file: File, schema: Schema } pub struct FilterRelation { schema: Schema, input: Box<SimpleRelation>, expr: Expr } pub struct ProjectRelation { schema: Schema, input: Box<SimpleRelation>, expr: Vec<Expr> } pub struct LimitRelation { schema: Schema, input: Box<SimpleRelation>, limit: usize, } impl<'a> CsvRelation { pub fn open(file: File, schema: Schema) -> Result<Self,ExecutionError> { Ok(CsvRelation { file, schema }) } /// Convert StringRecord into our internal tuple type based on the known schema fn create_tuple(&self, r: &StringRecord) -> Result<Row,ExecutionError> { assert_eq!(self.schema.columns.len(), r.len()); let values = self.schema.columns.iter().zip(r.into_iter()).map(|(c,s)| match c.data_type { //TODO: remove unwrap use here DataType::UnsignedLong => Value::UnsignedLong(s.parse::<u64>().unwrap()), DataType::String => Value::String(s.to_string()), DataType::Double => Value::Double(s.parse::<f64>().unwrap()), _ => panic!("csv unsupported type") }).collect(); Ok(Row::new(values)) } } /// trait for all relations (a relation is essentially just an iterator over tuples with /// a known schema) pub trait SimpleRelation { /// scan all records in this relation fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row,ExecutionError>> + 'a>; /// get the schema for this relation fn schema<'a>(&'a self) -> &'a Schema; } impl SimpleRelation for CsvRelation { fn scan<'a>(&'a self, _ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row,ExecutionError>> + 'a> { let buf_reader = BufReader::new(&self.file); let csv_reader = csv::Reader::from_reader(buf_reader); let record_iter = csv_reader.into_records(); let tuple_iter = record_iter.map(move|r| match r { Ok(record) => self.create_tuple(&record), Err(e) => Err(ExecutionError::CsvError(e)) }); Box::new(tuple_iter) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for FilterRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { Box::new(self.input.scan(ctx).filter(move|t| match t { &Ok(ref tuple) => match ctx.evaluate(tuple, &self.schema, &self.expr) { Ok(Value::Boolean(b)) => b, _ => panic!("Predicate expression evaluated to non-boolean value") }, _ => true // let errors through the filter so they can be handled later } )) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for ProjectRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { let foo = self.input.scan(ctx).map(move|r| match r { Ok(tuple) => { let values = self.expr.iter() .map(|e| match e { &Expr::TupleValue(i) => tuple.values[i].clone(), //TODO: relation delegating back to execution context seems wrong way around _ => ctx.evaluate(&tuple,&self.schema, e).unwrap() //TODO: remove unwrap //unimplemented!("Unsupported expression for projection") }) .collect(); Ok(Row::new(values)) }, Err(_) => r }); Box::new(foo) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for LimitRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { Box::new(self.input.scan(ctx).take(self.limit)) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } /// Execution plans are sent to worker nodes for execution pub enum ExecutionPlan { /// Run a query and return the results to the client Interactive { plan: LogicalPlan }, /// Partition the relation Partition { plan: LogicalPlan, partition_count: usize, partition_expr: Expr } } #[derive(Debug,Clone)] pub struct ExecutionContext { schemas: HashMap<String, Schema>, functions: HashMap<String, FunctionMeta>, } impl ExecutionContext { pub fn new() -> Self { ExecutionContext { schemas: HashMap::new(), functions: HashMap::new() } } pub fn define_schema(&mut self, name: &str, schema: &Schema) { self.schemas.insert(name.to_string(), schema.clone()); } pub fn define_function(&mut self, func: &ScalarFunction) { let fm = FunctionMeta { name: func.name(), args: func.args(), return_type: func.return_type() }; self.functions.insert(fm.name.to_lowercase(), fm); } pub fn sql(&self, sql: &str) -> Result<Box<DataFrame>, ExecutionError> { // parse SQL into AST let ast = Parser::parse_sql(String::from(sql))?; // create a query planner let query_planner = SqlToRel::new(self.schemas.clone()); //TODO: pass reference to schemas // plan the query (create a logical relational plan) let plan = query_planner.sql_to_rel(&ast)?; // return the DataFrame Ok(Box::new(DF { ctx: Box::new(self.clone()), plan: plan })) //TODO: don't clone context } /// Open a CSV file ///TODO: this is building a relational plan not an execution plan so shouldn't really be here pub fn load(&self, filename: &str, schema: &Schema) -> Result<Box<DataFrame>, ExecutionError> { let plan = LogicalPlan::CsvFile { filename: filename.to_string(), schema: schema.clone() }; Ok(Box::new(DF { ctx: Box::new((*self).clone()), plan: Box::new(plan) })) } pub fn register_table(&mut self, name: String, schema: Schema) { self.schemas.insert(name, schema); } pub fn create_execution_plan(&self, plan: &LogicalPlan) -> Result<Box<SimpleRelation>,ExecutionError> { match *plan { LogicalPlan::EmptyRelation => { panic!() }, LogicalPlan::TableScan { ref table_name, ref schema,.. } => { // for now, tables are csv files let file = File::open(format!("test/data/{}.csv", table_name))?; let rel = CsvRelation::open(file, schema.clone())?; Ok(Box::new(rel)) }, LogicalPlan::CsvFile { ref filename, ref schema } => { let file = File::open(filename)?; let rel = CsvRelation::open(file, schema.clone())?; Ok(Box::new(rel)) }, LogicalPlan::Selection { ref expr, ref input, ref schema } => { let input_rel = self.create_execution_plan(input)?; let rel = FilterRelation { input: input_rel, expr: expr.clone(), schema: schema.clone() }; Ok(Box::new(rel)) }, LogicalPlan::Projection { ref expr, ref input,.. } => { let input_rel = self.create_execution_plan(&input)?; let input_schema = input_rel.schema().clone(); //TODO: seems to be duplicate of sql_to_rel code let project_columns: Vec<Field> = expr.iter().map(|e| { match e { &Expr::TupleValue(i) => input_schema.columns[i].clone(), &Expr::ScalarFunction {ref name,.. } => Field { name: name.clone(), data_type: DataType::Double, //TODO: hard-coded.. no function metadata yet nullable: true }, _ => unimplemented!("Unsupported projection expression") } }).collect(); let project_schema = Schema { columns: project_columns }; let rel = ProjectRelation { input: input_rel, expr: expr.clone(), schema: project_schema, }; Ok(Box::new(rel)) } LogicalPlan::Limit { limit, ref input, ref schema,.. } => { let input_rel = self.create_execution_plan(input)?; let rel = LimitRelation { input: input_rel, limit: limit, schema: schema.clone() }; Ok(Box::new(rel)) } } } /// Evaluate a relational expression against a tuple pub fn evaluate(&self, tuple: &Row, tt: &Schema, rex: &Expr) -> Result<Value, Box<ExecutionError>> { match rex { &Expr::BinaryExpr { ref left, ref op, ref right } => { let left_value = self.evaluate(tuple, tt, left)?; let right_value = self.evaluate(tuple, tt, right)?; match op { &Operator::Eq => Ok(Value::Boolean(left_value == right_value)), &Operator::NotEq => Ok(Value::Boolean(left_value!= right_value)), &Operator::Lt => Ok(Value::Boolean(left_value < right_value)), &Operator::LtEq => Ok(Value::Boolean(left_value <= right_value)), &Operator::Gt => Ok(Value::Boolean(left_value > right_value)), &Operator::GtEq => Ok(Value::Boolean(left_value >= right_value)), } }, &Expr::TupleValue(index) => Ok(tuple.values[index].clone()), &Expr::Literal(ref value) => Ok(value.clone()), &Expr::ScalarFunction { ref name, ref args } => { // evaluate the arguments to the function let arg_values : Vec<Value> = args.iter() .map(|a| self.evaluate(tuple, tt, &a)) .collect::<Result<Vec<Value>, Box<ExecutionError>>>()?; let func = self.load_function_impl(name.as_ref())?; match func.execute(arg_values) { Ok(value) => Ok(value), Err(_) => Err(Box::new(ExecutionError::Custom("TBD".to_string()))) //TODO: fix } } } } /// load a function implementation fn load_function_impl(&self, function_name: &str) -> Result<Box<ScalarFunction>,Box<ExecutionError>> { //TODO: this is a huge hack since the functions have already been registered with the // execution context... I need to implement this so it dynamically loads the functions match function_name.to_lowercase().as_ref() { "sqrt" => Ok(Box::new(SqrtFunction {})), "st_point" => Ok(Box::new(STPointFunc {})), "st_astext" => Ok(Box::new(STAsText {})), _ => Err(Box::new(ExecutionError::Custom(format!("Unknown function {}", function_name)))) } } pub fn udf(&self, name: &str, args: Vec<Expr>) -> Expr { Expr::ScalarFunction { name: name.to_string(), args: args.clone() } } } pub struct DF { ctx: Box<ExecutionContext>, plan: Box<LogicalPlan> } impl DataFrame for DF { fn
(&self, expr: Vec<Expr>) -> Result<Box<DataFrame>, DataFrameError> { let plan = LogicalPlan::Projection { expr: expr, input: self.plan.clone(), schema: self.plan.schema().clone() }; Ok(Box::new(DF { ctx: self.ctx.clone(), plan: Box::new(plan) })) } fn filter(&self, expr: Expr) -> Result<Box<DataFrame>, DataFrameError> { let plan = LogicalPlan::Selection { expr: expr, input: self.plan.clone(), schema: self.plan.schema().clone() }; Ok(Box::new(DF { ctx: self.ctx.clone(), plan: Box::new(plan) })) } fn write(&self, filename: &str) -> Result<(), DataFrameError> { let execution_plan = self.ctx.create_execution_plan(&self.plan)?; // create output file // println!("Writing csv to {}", filename); let mut file = File::create(filename)?; // implement execution here for now but should be a common method for processing a plan let it = execution_plan.scan(&self.ctx); it.for_each(|t| { match t { Ok(tuple) => { let csv = format!("{}\n", tuple.to_string()); file.write(&csv.into_bytes()).unwrap(); //TODO: remove unwrap }, Err(e) => panic!(format!("Error processing tuple: {:?}", e)) //TODO: error handling } }); Ok(()) } fn col(&self, column_name: &str) -> Result<Expr, DataFrameError> { match self.plan.schema().column(column_name) { Some((i,_)) => Ok(Expr::TupleValue(i)), _ => Err(DataFrameError::InvalidColumn(column_name.to_string())) } } fn schema(&self) -> Schema { self.plan.schema().clone() } fn repartition(&self, _n: u32) -> Result<Box<DataFrame>, DataFrameError> { unimplemented!() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_sqrt() { let mut ctx = create_context(); ctx.define_function(&SqrtFunction {}); let df = ctx.sql(&"SELECT id, sqrt(id) FROM people").unwrap(); df.write("_sqrt_out.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_sql_udf_udt() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let df = ctx.sql(&"SELECT ST_Point(lat, lng) FROM uk_cities").unwrap(); df.write("_uk_cities_sql.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_df_udf_udt() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let schema = Schema::new(vec![ Field::new("city", DataType::String, false), Field::new("lat", DataType::Double, false), Field::new("lng", DataType::Double, false)]); let df = ctx.load("test/data/uk_cities.csv", &schema).unwrap(); // create an expression for invoking a scalar function // let func_expr = Expr::ScalarFunction { // name: "ST_Point".to_string(), // args: vec![df.col("lat").unwrap(), df.col("lng").unwrap()] // }; // invoke custom code as a scalar UDF let func_expr = ctx.udf("ST_Point",vec![ df.col("lat").unwrap(), df.col("lng").unwrap()] ); let df2 = df.select(vec![func_expr]).unwrap(); df2.write("_uk_cities_df.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_chaining_functions() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let df = ctx.sql(&"SELECT ST_AsText(ST_Point(lat, lng)) FROM uk_cities").unwrap(); df.write("_uk_cities_wkt.csv").unwrap(); //TODO: check that generated file has expected contents } fn create_context() -> ExecutionContext { // create execution context let mut ctx = ExecutionContext::new(); // define schemas for test data ctx.define_schema("people", &Schema::new(vec![ Field::new("id", DataType::UnsignedLong, false), Field::new("name", DataType::String, false)])); ctx.define_schema("uk_cities", &Schema::new(vec![ Field::new("city", DataType::String, false), Field::new("lat", DataType::Double, false), Field::new("lng", DataType::Double, false)])); ctx } }
select
identifier_name
exec.rs
// Copyright 2018 Grove Enterprises LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::collections::HashMap; use std::io::Error; use std::io::BufReader; use std::io::prelude::*; use std::iter::Iterator; use std::fs::File; use std::string::String; use std::convert::*; extern crate csv; use super::csv::StringRecord; use super::api::*; use super::rel::*; use super::parser::*; use super::sqltorel::*; use super::dataframe::*; use super::functions::math::*; use super::functions::geospatial::*; #[derive(Debug)] pub enum ExecutionError { IoError(Error), CsvError(csv::Error), ParserError(ParserError), Custom(String) } impl From<Error> for ExecutionError { fn from(e: Error) -> Self { ExecutionError::IoError(e) } } impl From<String> for ExecutionError { fn from(e: String) -> Self { ExecutionError::Custom(e) } } impl From<ParserError> for ExecutionError { fn from(e: ParserError) -> Self { ExecutionError::ParserError(e) } } /// Represents a csv file with a known schema #[derive(Debug)] pub struct CsvRelation { file: File, schema: Schema } pub struct FilterRelation { schema: Schema, input: Box<SimpleRelation>, expr: Expr } pub struct ProjectRelation { schema: Schema, input: Box<SimpleRelation>, expr: Vec<Expr> } pub struct LimitRelation { schema: Schema, input: Box<SimpleRelation>, limit: usize, } impl<'a> CsvRelation { pub fn open(file: File, schema: Schema) -> Result<Self,ExecutionError> { Ok(CsvRelation { file, schema }) } /// Convert StringRecord into our internal tuple type based on the known schema fn create_tuple(&self, r: &StringRecord) -> Result<Row,ExecutionError> { assert_eq!(self.schema.columns.len(), r.len()); let values = self.schema.columns.iter().zip(r.into_iter()).map(|(c,s)| match c.data_type { //TODO: remove unwrap use here DataType::UnsignedLong => Value::UnsignedLong(s.parse::<u64>().unwrap()), DataType::String => Value::String(s.to_string()), DataType::Double => Value::Double(s.parse::<f64>().unwrap()), _ => panic!("csv unsupported type") }).collect(); Ok(Row::new(values)) } } /// trait for all relations (a relation is essentially just an iterator over tuples with /// a known schema) pub trait SimpleRelation { /// scan all records in this relation fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row,ExecutionError>> + 'a>; /// get the schema for this relation fn schema<'a>(&'a self) -> &'a Schema; } impl SimpleRelation for CsvRelation { fn scan<'a>(&'a self, _ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row,ExecutionError>> + 'a> { let buf_reader = BufReader::new(&self.file); let csv_reader = csv::Reader::from_reader(buf_reader); let record_iter = csv_reader.into_records(); let tuple_iter = record_iter.map(move|r| match r { Ok(record) => self.create_tuple(&record), Err(e) => Err(ExecutionError::CsvError(e)) }); Box::new(tuple_iter) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for FilterRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { Box::new(self.input.scan(ctx).filter(move|t| match t { &Ok(ref tuple) => match ctx.evaluate(tuple, &self.schema, &self.expr) { Ok(Value::Boolean(b)) => b, _ => panic!("Predicate expression evaluated to non-boolean value") }, _ => true // let errors through the filter so they can be handled later } )) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for ProjectRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { let foo = self.input.scan(ctx).map(move|r| match r { Ok(tuple) => { let values = self.expr.iter() .map(|e| match e { &Expr::TupleValue(i) => tuple.values[i].clone(), //TODO: relation delegating back to execution context seems wrong way around _ => ctx.evaluate(&tuple,&self.schema, e).unwrap() //TODO: remove unwrap //unimplemented!("Unsupported expression for projection") }) .collect(); Ok(Row::new(values)) }, Err(_) => r }); Box::new(foo) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } impl SimpleRelation for LimitRelation { fn scan<'a>(&'a self, ctx: &'a ExecutionContext) -> Box<Iterator<Item=Result<Row, ExecutionError>> + 'a> { Box::new(self.input.scan(ctx).take(self.limit)) } fn schema<'a>(&'a self) -> &'a Schema { &self.schema } } /// Execution plans are sent to worker nodes for execution pub enum ExecutionPlan { /// Run a query and return the results to the client Interactive { plan: LogicalPlan }, /// Partition the relation Partition { plan: LogicalPlan, partition_count: usize, partition_expr: Expr } } #[derive(Debug,Clone)] pub struct ExecutionContext { schemas: HashMap<String, Schema>, functions: HashMap<String, FunctionMeta>, } impl ExecutionContext { pub fn new() -> Self { ExecutionContext { schemas: HashMap::new(), functions: HashMap::new() } } pub fn define_schema(&mut self, name: &str, schema: &Schema) { self.schemas.insert(name.to_string(), schema.clone()); } pub fn define_function(&mut self, func: &ScalarFunction) { let fm = FunctionMeta { name: func.name(), args: func.args(), return_type: func.return_type() }; self.functions.insert(fm.name.to_lowercase(), fm); } pub fn sql(&self, sql: &str) -> Result<Box<DataFrame>, ExecutionError> { // parse SQL into AST let ast = Parser::parse_sql(String::from(sql))?; // create a query planner let query_planner = SqlToRel::new(self.schemas.clone()); //TODO: pass reference to schemas // plan the query (create a logical relational plan) let plan = query_planner.sql_to_rel(&ast)?; // return the DataFrame Ok(Box::new(DF { ctx: Box::new(self.clone()), plan: plan })) //TODO: don't clone context } /// Open a CSV file ///TODO: this is building a relational plan not an execution plan so shouldn't really be here pub fn load(&self, filename: &str, schema: &Schema) -> Result<Box<DataFrame>, ExecutionError> { let plan = LogicalPlan::CsvFile { filename: filename.to_string(), schema: schema.clone() }; Ok(Box::new(DF { ctx: Box::new((*self).clone()), plan: Box::new(plan) })) } pub fn register_table(&mut self, name: String, schema: Schema) { self.schemas.insert(name, schema); } pub fn create_execution_plan(&self, plan: &LogicalPlan) -> Result<Box<SimpleRelation>,ExecutionError> { match *plan { LogicalPlan::EmptyRelation => { panic!() }, LogicalPlan::TableScan { ref table_name, ref schema,.. } => { // for now, tables are csv files let file = File::open(format!("test/data/{}.csv", table_name))?; let rel = CsvRelation::open(file, schema.clone())?; Ok(Box::new(rel)) }, LogicalPlan::CsvFile { ref filename, ref schema } => { let file = File::open(filename)?; let rel = CsvRelation::open(file, schema.clone())?; Ok(Box::new(rel)) }, LogicalPlan::Selection { ref expr, ref input, ref schema } => { let input_rel = self.create_execution_plan(input)?; let rel = FilterRelation { input: input_rel, expr: expr.clone(), schema: schema.clone() }; Ok(Box::new(rel)) }, LogicalPlan::Projection { ref expr, ref input,.. } => { let input_rel = self.create_execution_plan(&input)?; let input_schema = input_rel.schema().clone(); //TODO: seems to be duplicate of sql_to_rel code let project_columns: Vec<Field> = expr.iter().map(|e| { match e { &Expr::TupleValue(i) => input_schema.columns[i].clone(), &Expr::ScalarFunction {ref name,.. } => Field { name: name.clone(), data_type: DataType::Double, //TODO: hard-coded.. no function metadata yet nullable: true }, _ => unimplemented!("Unsupported projection expression") } }).collect(); let project_schema = Schema { columns: project_columns }; let rel = ProjectRelation { input: input_rel, expr: expr.clone(), schema: project_schema, }; Ok(Box::new(rel)) } LogicalPlan::Limit { limit, ref input, ref schema,.. } => { let input_rel = self.create_execution_plan(input)?; let rel = LimitRelation { input: input_rel, limit: limit, schema: schema.clone() }; Ok(Box::new(rel)) } } } /// Evaluate a relational expression against a tuple pub fn evaluate(&self, tuple: &Row, tt: &Schema, rex: &Expr) -> Result<Value, Box<ExecutionError>> { match rex { &Expr::BinaryExpr { ref left, ref op, ref right } => { let left_value = self.evaluate(tuple, tt, left)?; let right_value = self.evaluate(tuple, tt, right)?; match op { &Operator::Eq => Ok(Value::Boolean(left_value == right_value)), &Operator::NotEq => Ok(Value::Boolean(left_value!= right_value)), &Operator::Lt => Ok(Value::Boolean(left_value < right_value)), &Operator::LtEq => Ok(Value::Boolean(left_value <= right_value)), &Operator::Gt => Ok(Value::Boolean(left_value > right_value)), &Operator::GtEq => Ok(Value::Boolean(left_value >= right_value)), } }, &Expr::TupleValue(index) => Ok(tuple.values[index].clone()), &Expr::Literal(ref value) => Ok(value.clone()), &Expr::ScalarFunction { ref name, ref args } => { // evaluate the arguments to the function let arg_values : Vec<Value> = args.iter() .map(|a| self.evaluate(tuple, tt, &a)) .collect::<Result<Vec<Value>, Box<ExecutionError>>>()?; let func = self.load_function_impl(name.as_ref())?; match func.execute(arg_values) { Ok(value) => Ok(value), Err(_) => Err(Box::new(ExecutionError::Custom("TBD".to_string()))) //TODO: fix } } } } /// load a function implementation fn load_function_impl(&self, function_name: &str) -> Result<Box<ScalarFunction>,Box<ExecutionError>>
pub fn udf(&self, name: &str, args: Vec<Expr>) -> Expr { Expr::ScalarFunction { name: name.to_string(), args: args.clone() } } } pub struct DF { ctx: Box<ExecutionContext>, plan: Box<LogicalPlan> } impl DataFrame for DF { fn select(&self, expr: Vec<Expr>) -> Result<Box<DataFrame>, DataFrameError> { let plan = LogicalPlan::Projection { expr: expr, input: self.plan.clone(), schema: self.plan.schema().clone() }; Ok(Box::new(DF { ctx: self.ctx.clone(), plan: Box::new(plan) })) } fn filter(&self, expr: Expr) -> Result<Box<DataFrame>, DataFrameError> { let plan = LogicalPlan::Selection { expr: expr, input: self.plan.clone(), schema: self.plan.schema().clone() }; Ok(Box::new(DF { ctx: self.ctx.clone(), plan: Box::new(plan) })) } fn write(&self, filename: &str) -> Result<(), DataFrameError> { let execution_plan = self.ctx.create_execution_plan(&self.plan)?; // create output file // println!("Writing csv to {}", filename); let mut file = File::create(filename)?; // implement execution here for now but should be a common method for processing a plan let it = execution_plan.scan(&self.ctx); it.for_each(|t| { match t { Ok(tuple) => { let csv = format!("{}\n", tuple.to_string()); file.write(&csv.into_bytes()).unwrap(); //TODO: remove unwrap }, Err(e) => panic!(format!("Error processing tuple: {:?}", e)) //TODO: error handling } }); Ok(()) } fn col(&self, column_name: &str) -> Result<Expr, DataFrameError> { match self.plan.schema().column(column_name) { Some((i,_)) => Ok(Expr::TupleValue(i)), _ => Err(DataFrameError::InvalidColumn(column_name.to_string())) } } fn schema(&self) -> Schema { self.plan.schema().clone() } fn repartition(&self, _n: u32) -> Result<Box<DataFrame>, DataFrameError> { unimplemented!() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_sqrt() { let mut ctx = create_context(); ctx.define_function(&SqrtFunction {}); let df = ctx.sql(&"SELECT id, sqrt(id) FROM people").unwrap(); df.write("_sqrt_out.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_sql_udf_udt() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let df = ctx.sql(&"SELECT ST_Point(lat, lng) FROM uk_cities").unwrap(); df.write("_uk_cities_sql.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_df_udf_udt() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let schema = Schema::new(vec![ Field::new("city", DataType::String, false), Field::new("lat", DataType::Double, false), Field::new("lng", DataType::Double, false)]); let df = ctx.load("test/data/uk_cities.csv", &schema).unwrap(); // create an expression for invoking a scalar function // let func_expr = Expr::ScalarFunction { // name: "ST_Point".to_string(), // args: vec![df.col("lat").unwrap(), df.col("lng").unwrap()] // }; // invoke custom code as a scalar UDF let func_expr = ctx.udf("ST_Point",vec![ df.col("lat").unwrap(), df.col("lng").unwrap()] ); let df2 = df.select(vec![func_expr]).unwrap(); df2.write("_uk_cities_df.csv").unwrap(); //TODO: check that generated file has expected contents } #[test] fn test_chaining_functions() { let mut ctx = create_context(); ctx.define_function(&STPointFunc {}); let df = ctx.sql(&"SELECT ST_AsText(ST_Point(lat, lng)) FROM uk_cities").unwrap(); df.write("_uk_cities_wkt.csv").unwrap(); //TODO: check that generated file has expected contents } fn create_context() -> ExecutionContext { // create execution context let mut ctx = ExecutionContext::new(); // define schemas for test data ctx.define_schema("people", &Schema::new(vec![ Field::new("id", DataType::UnsignedLong, false), Field::new("name", DataType::String, false)])); ctx.define_schema("uk_cities", &Schema::new(vec![ Field::new("city", DataType::String, false), Field::new("lat", DataType::Double, false), Field::new("lng", DataType::Double, false)])); ctx } }
{ //TODO: this is a huge hack since the functions have already been registered with the // execution context ... I need to implement this so it dynamically loads the functions match function_name.to_lowercase().as_ref() { "sqrt" => Ok(Box::new(SqrtFunction {})), "st_point" => Ok(Box::new(STPointFunc {})), "st_astext" => Ok(Box::new(STAsText {})), _ => Err(Box::new(ExecutionError::Custom(format!("Unknown function {}", function_name)))) } }
identifier_body
main.rs
extern crate hsl; extern crate rand; extern crate sdl2; use sdl2::audio::{AudioCVT, AudioSpecDesired, AudioSpecWAV, AudioQueue}; use sdl2::event::Event; use sdl2::image::{INIT_PNG, LoadSurface}; use sdl2::gfx::rotozoom::RotozoomSurface; use sdl2::keyboard::Keycode; use sdl2::pixels::Color; use sdl2::rect::Rect; use sdl2::render::{TextureQuery}; use sdl2::surface::Surface; use sdl2::video::FullscreenType; use hsl::HSL; use rand::{thread_rng, Rng}; use std::collections::{HashSet, HashMap}; use std::borrow::Cow; use std::path::{Path, PathBuf}; use std::time::Duration; macro_rules! rect( ($x:expr, $y:expr, $w:expr, $h:expr) => ( Rect::new($x as i32, $y as i32, $w as u32, $h as u32) ) ); trait PositionStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect; fn reset(&mut self)
} struct RandomPositionStrategy {} impl PositionStrategy for RandomPositionStrategy { // Return a random position that fits rect within rect fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { let rx: f64 = thread_rng().gen(); let ry: f64 = thread_rng().gen(); let posx = rx * (within_rect.width() - 1 * rect.width()) as f64; let posy = ry * (within_rect.height() - 1 * rect.height()) as f64; rect!(posx as f64, posy as f64, rect.width(), rect.height()) } } struct LeftToRightStrategy { next_x: u32, next_y: u32, } impl PositionStrategy for LeftToRightStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { if self.next_x > within_rect.right() as u32 { self.next_x = 0; self.next_y = self.next_y + rect.height() as u32; } if self.next_y > within_rect.bottom() as u32 { self.next_y = 0; } let y = self.next_y; let x = self.next_x; self.next_x = x + rect.width(); rect!(x, y, rect.width(), rect.height()) } fn reset(&mut self) { self.next_x = 0; self.next_y = 0; } } fn random_colour(c: Color) -> Color { let not_near_hsl = HSL::from_rgb(&[c.r, c.g, c.b]); let mut generated = not_near_hsl.clone(); while (generated.h - not_near_hsl.h).abs() < 40. { let h: f64 = thread_rng().gen(); generated = HSL { h: h * 360.0, s: 1_f64, l: 0.5_f64, }; } let rgb = generated.to_rgb(); return Color::RGB(rgb.0, rgb.1, rgb.2); } fn load_sound(note: &str) -> AudioSpecWAV { // Load a sound let filename = format!("{}.wav", note); let path: PathBuf = ["./sounds", &filename].iter().collect(); let wav_file: Cow<'static, Path> = Cow::from(path); AudioSpecWAV::load_wav(wav_file.clone()) .expect("Could not load test WAV file") } fn load_image(fname: &str) -> Surface { // Load an image let filename = format!("{}.png", fname); let path: PathBuf = ["./images", &filename].iter().collect(); let image_file: Cow<'static, Path> = Cow::from(path); Surface::from_file(image_file.clone()) .expect("Could not load image file") } pub fn main() { let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); let audio_subsystem = sdl_context.audio().unwrap(); let _image_context = sdl2::image::init(INIT_PNG); let (window_width, window_height) = (800, 600); let mut window = video_subsystem .window("Bish Bash Bosh", window_width, window_height) .position_centered() .opengl() .build() .unwrap(); window.set_fullscreen(FullscreenType::Desktop).unwrap(); window.set_grab(true); let (window_width, window_height) = window.size(); let mut canvas = window.into_canvas().build().unwrap(); let texture_creator = canvas.texture_creator(); let mut event_pump = sdl_context.event_pump().unwrap(); let ttf_context = sdl2::ttf::init().unwrap(); // Load a font let mut font = ttf_context.load_font("DejaVuSans-Bold.ttf", 112).unwrap(); font.set_style(sdl2::ttf::STYLE_BOLD); let desired_spec = AudioSpecDesired { freq: Some(44_100), channels: Some(1), // mono samples: None, // default }; let audio_queue: AudioQueue<u8> = audio_subsystem .open_queue(None, &desired_spec) .unwrap(); // let mut position_strategy = RandomPositionStrategy { }; let mut position_strategy = LeftToRightStrategy { next_x: 0, next_y: window_height / 3}; canvas.set_draw_color(Color::RGB(255, 0, 0)); canvas.clear(); canvas.present(); // Keep track of all displayed characters, and their postitions let mut drawables = vec![]; let drawable_keys: HashSet<String> = [ "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ].iter() .map(|s| s.to_string()) .collect(); let noisy_keys: HashMap<String, String> = [ ("F1", "37a"), ("F2", "38b"), ("F3", "39bb"), ("F4", "40c"), ("F5", "41c"), ("F6", "42d"), ("F7", "43e"), ("F8", "44eb"), ("F9", "45f"), ("F10", "46f"), ("F11", "47g"), ("F12", "48g"), ("A", "alpha-a"), ("B", "alpha-b"), ("C", "alpha-c"), ("D", "alpha-d"), ("E", "alpha-e"), ("F", "alpha-f"), ("G", "alpha-g"), ("H", "alpha-h"), ("I", "alpha-i"), ("J", "alpha-j"), ("K", "alpha-k"), ("L", "alpha-l"), ("M", "alpha-m"), ("N", "alpha-n"), ("O", "alpha-o"), ("P", "alpha-p"), ("Q", "alpha-q"), ("R", "alpha-r"), ("S", "alpha-s"), ("T", "alpha-t"), ("U", "alpha-u"), ("V", "alpha-v"), ("W", "alpha-w"), ("X", "alpha-x"), ("Y", "alpha-y"), ("Z", "alpha-z"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string())) .collect(); let images: HashMap<String, String> = [ ("T", "T"), ("B", "buzz"), ("C", "chase"), ("D", "dumbo"), ("G", "geo"), ("H", "harrison"), ("I", "igglepiggle"), ("M", "mickey"), ("P", "peppa"), ("S", "simba"), ("U", "upsiedaisy"), ("W", "woody"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string())) .collect(); let mut background_color = random_colour(Color::RGB(255, 255, 255)); 'running: loop { for event in event_pump.poll_iter() { match event { Event::Quit {.. } | Event::KeyDown { keycode: Some(Keycode::Escape), repeat: true, .. } => break 'running, Event::KeyDown { keycode: Some(Keycode::Return), repeat: false, .. } => { position_strategy.reset(); drawables.clear(); background_color = random_colour(Color::RGB(255, 255, 255)); } Event::KeyDown { keycode: Some(key), repeat: false, .. } => { if drawable_keys.contains(&key.name()) { let colour = random_colour(background_color); let surface = font.render(&key.name()).blended(colour).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height,.. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); //rect!(150, 150, width, height); drawables.push((texture, target)); } if let Some(note) = noisy_keys.get(&key.name()) { let wav = load_sound(&note); let spec = audio_queue.spec(); let cvt = AudioCVT::new( wav.format, wav.channels, wav.freq, spec.format, spec.channels, spec.freq, ).expect("Could not convert WAV file"); let data = cvt.convert(wav.buffer().to_vec()); audio_queue.clear(); audio_queue.queue(&data); // Start playback audio_queue.resume(); } if let Some(filename) = images.get(&key.name()) { let mut surface = load_image(&filename); let sf = (100f64 / surface.height() as f64); println!("{}", sf ); let surface = surface.rotozoom(0f64, sf, false).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height,.. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); drawables.push((texture, target)); } } _ => {} } } // Draw the chars canvas.set_draw_color(background_color); canvas.clear(); for &(ref texture, target) in drawables.iter() { canvas.copy(&texture, None, Some(target.clone())).unwrap(); } canvas.present(); ::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / 60)); } }
{ }
identifier_body
main.rs
extern crate hsl; extern crate rand; extern crate sdl2; use sdl2::audio::{AudioCVT, AudioSpecDesired, AudioSpecWAV, AudioQueue}; use sdl2::event::Event; use sdl2::image::{INIT_PNG, LoadSurface}; use sdl2::gfx::rotozoom::RotozoomSurface; use sdl2::keyboard::Keycode; use sdl2::pixels::Color; use sdl2::rect::Rect; use sdl2::render::{TextureQuery}; use sdl2::surface::Surface; use sdl2::video::FullscreenType; use hsl::HSL; use rand::{thread_rng, Rng}; use std::collections::{HashSet, HashMap}; use std::borrow::Cow; use std::path::{Path, PathBuf}; use std::time::Duration; macro_rules! rect( ($x:expr, $y:expr, $w:expr, $h:expr) => ( Rect::new($x as i32, $y as i32, $w as u32, $h as u32) ) ); trait PositionStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect; fn reset(&mut self) { } } struct RandomPositionStrategy {} impl PositionStrategy for RandomPositionStrategy { // Return a random position that fits rect within rect fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { let rx: f64 = thread_rng().gen(); let ry: f64 = thread_rng().gen(); let posx = rx * (within_rect.width() - 1 * rect.width()) as f64; let posy = ry * (within_rect.height() - 1 * rect.height()) as f64; rect!(posx as f64, posy as f64, rect.width(), rect.height()) } } struct LeftToRightStrategy { next_x: u32, next_y: u32, } impl PositionStrategy for LeftToRightStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { if self.next_x > within_rect.right() as u32 { self.next_x = 0; self.next_y = self.next_y + rect.height() as u32; } if self.next_y > within_rect.bottom() as u32 { self.next_y = 0; } let y = self.next_y; let x = self.next_x; self.next_x = x + rect.width(); rect!(x, y, rect.width(), rect.height()) } fn reset(&mut self) { self.next_x = 0; self.next_y = 0; } } fn random_colour(c: Color) -> Color { let not_near_hsl = HSL::from_rgb(&[c.r, c.g, c.b]); let mut generated = not_near_hsl.clone(); while (generated.h - not_near_hsl.h).abs() < 40. { let h: f64 = thread_rng().gen(); generated = HSL { h: h * 360.0, s: 1_f64, l: 0.5_f64, }; } let rgb = generated.to_rgb(); return Color::RGB(rgb.0, rgb.1, rgb.2); } fn load_sound(note: &str) -> AudioSpecWAV { // Load a sound let filename = format!("{}.wav", note); let path: PathBuf = ["./sounds", &filename].iter().collect(); let wav_file: Cow<'static, Path> = Cow::from(path); AudioSpecWAV::load_wav(wav_file.clone()) .expect("Could not load test WAV file") } fn load_image(fname: &str) -> Surface { // Load an image let filename = format!("{}.png", fname); let path: PathBuf = ["./images", &filename].iter().collect(); let image_file: Cow<'static, Path> = Cow::from(path); Surface::from_file(image_file.clone()) .expect("Could not load image file") } pub fn main() { let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); let audio_subsystem = sdl_context.audio().unwrap(); let _image_context = sdl2::image::init(INIT_PNG); let (window_width, window_height) = (800, 600); let mut window = video_subsystem .window("Bish Bash Bosh", window_width, window_height) .position_centered() .opengl() .build() .unwrap(); window.set_fullscreen(FullscreenType::Desktop).unwrap(); window.set_grab(true); let (window_width, window_height) = window.size(); let mut canvas = window.into_canvas().build().unwrap(); let texture_creator = canvas.texture_creator(); let mut event_pump = sdl_context.event_pump().unwrap(); let ttf_context = sdl2::ttf::init().unwrap(); // Load a font let mut font = ttf_context.load_font("DejaVuSans-Bold.ttf", 112).unwrap(); font.set_style(sdl2::ttf::STYLE_BOLD); let desired_spec = AudioSpecDesired { freq: Some(44_100), channels: Some(1), // mono samples: None, // default }; let audio_queue: AudioQueue<u8> = audio_subsystem .open_queue(None, &desired_spec) .unwrap(); // let mut position_strategy = RandomPositionStrategy { }; let mut position_strategy = LeftToRightStrategy { next_x: 0, next_y: window_height / 3}; canvas.set_draw_color(Color::RGB(255, 0, 0)); canvas.clear(); canvas.present(); // Keep track of all displayed characters, and their postitions let mut drawables = vec![]; let drawable_keys: HashSet<String> = [ "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ].iter() .map(|s| s.to_string()) .collect(); let noisy_keys: HashMap<String, String> = [ ("F1", "37a"), ("F2", "38b"), ("F3", "39bb"), ("F4", "40c"), ("F5", "41c"), ("F6", "42d"), ("F7", "43e"), ("F8", "44eb"), ("F9", "45f"), ("F10", "46f"), ("F11", "47g"), ("F12", "48g"), ("A", "alpha-a"), ("B", "alpha-b"), ("C", "alpha-c"), ("D", "alpha-d"), ("E", "alpha-e"), ("F", "alpha-f"), ("G", "alpha-g"), ("H", "alpha-h"), ("I", "alpha-i"), ("J", "alpha-j"), ("K", "alpha-k"), ("L", "alpha-l"), ("M", "alpha-m"), ("N", "alpha-n"), ("O", "alpha-o"), ("P", "alpha-p"), ("Q", "alpha-q"), ("R", "alpha-r"), ("S", "alpha-s"), ("T", "alpha-t"), ("U", "alpha-u"), ("V", "alpha-v"), ("W", "alpha-w"), ("X", "alpha-x"), ("Y", "alpha-y"), ("Z", "alpha-z"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string())) .collect(); let images: HashMap<String, String> = [ ("T", "T"), ("B", "buzz"), ("C", "chase"), ("D", "dumbo"), ("G", "geo"), ("H", "harrison"), ("I", "igglepiggle"), ("M", "mickey"), ("P", "peppa"), ("S", "simba"),
.collect(); let mut background_color = random_colour(Color::RGB(255, 255, 255)); 'running: loop { for event in event_pump.poll_iter() { match event { Event::Quit {.. } | Event::KeyDown { keycode: Some(Keycode::Escape), repeat: true, .. } => break 'running, Event::KeyDown { keycode: Some(Keycode::Return), repeat: false, .. } => { position_strategy.reset(); drawables.clear(); background_color = random_colour(Color::RGB(255, 255, 255)); } Event::KeyDown { keycode: Some(key), repeat: false, .. } => { if drawable_keys.contains(&key.name()) { let colour = random_colour(background_color); let surface = font.render(&key.name()).blended(colour).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height,.. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); //rect!(150, 150, width, height); drawables.push((texture, target)); } if let Some(note) = noisy_keys.get(&key.name()) { let wav = load_sound(&note); let spec = audio_queue.spec(); let cvt = AudioCVT::new( wav.format, wav.channels, wav.freq, spec.format, spec.channels, spec.freq, ).expect("Could not convert WAV file"); let data = cvt.convert(wav.buffer().to_vec()); audio_queue.clear(); audio_queue.queue(&data); // Start playback audio_queue.resume(); } if let Some(filename) = images.get(&key.name()) { let mut surface = load_image(&filename); let sf = (100f64 / surface.height() as f64); println!("{}", sf ); let surface = surface.rotozoom(0f64, sf, false).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height,.. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); drawables.push((texture, target)); } } _ => {} } } // Draw the chars canvas.set_draw_color(background_color); canvas.clear(); for &(ref texture, target) in drawables.iter() { canvas.copy(&texture, None, Some(target.clone())).unwrap(); } canvas.present(); ::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / 60)); } }
("U", "upsiedaisy"), ("W", "woody"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string()))
random_line_split
main.rs
extern crate hsl; extern crate rand; extern crate sdl2; use sdl2::audio::{AudioCVT, AudioSpecDesired, AudioSpecWAV, AudioQueue}; use sdl2::event::Event; use sdl2::image::{INIT_PNG, LoadSurface}; use sdl2::gfx::rotozoom::RotozoomSurface; use sdl2::keyboard::Keycode; use sdl2::pixels::Color; use sdl2::rect::Rect; use sdl2::render::{TextureQuery}; use sdl2::surface::Surface; use sdl2::video::FullscreenType; use hsl::HSL; use rand::{thread_rng, Rng}; use std::collections::{HashSet, HashMap}; use std::borrow::Cow; use std::path::{Path, PathBuf}; use std::time::Duration; macro_rules! rect( ($x:expr, $y:expr, $w:expr, $h:expr) => ( Rect::new($x as i32, $y as i32, $w as u32, $h as u32) ) ); trait PositionStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect; fn
(&mut self) { } } struct RandomPositionStrategy {} impl PositionStrategy for RandomPositionStrategy { // Return a random position that fits rect within rect fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { let rx: f64 = thread_rng().gen(); let ry: f64 = thread_rng().gen(); let posx = rx * (within_rect.width() - 1 * rect.width()) as f64; let posy = ry * (within_rect.height() - 1 * rect.height()) as f64; rect!(posx as f64, posy as f64, rect.width(), rect.height()) } } struct LeftToRightStrategy { next_x: u32, next_y: u32, } impl PositionStrategy for LeftToRightStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { if self.next_x > within_rect.right() as u32 { self.next_x = 0; self.next_y = self.next_y + rect.height() as u32; } if self.next_y > within_rect.bottom() as u32 { self.next_y = 0; } let y = self.next_y; let x = self.next_x; self.next_x = x + rect.width(); rect!(x, y, rect.width(), rect.height()) } fn reset(&mut self) { self.next_x = 0; self.next_y = 0; } } fn random_colour(c: Color) -> Color { let not_near_hsl = HSL::from_rgb(&[c.r, c.g, c.b]); let mut generated = not_near_hsl.clone(); while (generated.h - not_near_hsl.h).abs() < 40. { let h: f64 = thread_rng().gen(); generated = HSL { h: h * 360.0, s: 1_f64, l: 0.5_f64, }; } let rgb = generated.to_rgb(); return Color::RGB(rgb.0, rgb.1, rgb.2); } fn load_sound(note: &str) -> AudioSpecWAV { // Load a sound let filename = format!("{}.wav", note); let path: PathBuf = ["./sounds", &filename].iter().collect(); let wav_file: Cow<'static, Path> = Cow::from(path); AudioSpecWAV::load_wav(wav_file.clone()) .expect("Could not load test WAV file") } fn load_image(fname: &str) -> Surface { // Load an image let filename = format!("{}.png", fname); let path: PathBuf = ["./images", &filename].iter().collect(); let image_file: Cow<'static, Path> = Cow::from(path); Surface::from_file(image_file.clone()) .expect("Could not load image file") } pub fn main() { let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); let audio_subsystem = sdl_context.audio().unwrap(); let _image_context = sdl2::image::init(INIT_PNG); let (window_width, window_height) = (800, 600); let mut window = video_subsystem .window("Bish Bash Bosh", window_width, window_height) .position_centered() .opengl() .build() .unwrap(); window.set_fullscreen(FullscreenType::Desktop).unwrap(); window.set_grab(true); let (window_width, window_height) = window.size(); let mut canvas = window.into_canvas().build().unwrap(); let texture_creator = canvas.texture_creator(); let mut event_pump = sdl_context.event_pump().unwrap(); let ttf_context = sdl2::ttf::init().unwrap(); // Load a font let mut font = ttf_context.load_font("DejaVuSans-Bold.ttf", 112).unwrap(); font.set_style(sdl2::ttf::STYLE_BOLD); let desired_spec = AudioSpecDesired { freq: Some(44_100), channels: Some(1), // mono samples: None, // default }; let audio_queue: AudioQueue<u8> = audio_subsystem .open_queue(None, &desired_spec) .unwrap(); // let mut position_strategy = RandomPositionStrategy { }; let mut position_strategy = LeftToRightStrategy { next_x: 0, next_y: window_height / 3}; canvas.set_draw_color(Color::RGB(255, 0, 0)); canvas.clear(); canvas.present(); // Keep track of all displayed characters, and their postitions let mut drawables = vec![]; let drawable_keys: HashSet<String> = [ "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ].iter() .map(|s| s.to_string()) .collect(); let noisy_keys: HashMap<String, String> = [ ("F1", "37a"), ("F2", "38b"), ("F3", "39bb"), ("F4", "40c"), ("F5", "41c"), ("F6", "42d"), ("F7", "43e"), ("F8", "44eb"), ("F9", "45f"), ("F10", "46f"), ("F11", "47g"), ("F12", "48g"), ("A", "alpha-a"), ("B", "alpha-b"), ("C", "alpha-c"), ("D", "alpha-d"), ("E", "alpha-e"), ("F", "alpha-f"), ("G", "alpha-g"), ("H", "alpha-h"), ("I", "alpha-i"), ("J", "alpha-j"), ("K", "alpha-k"), ("L", "alpha-l"), ("M", "alpha-m"), ("N", "alpha-n"), ("O", "alpha-o"), ("P", "alpha-p"), ("Q", "alpha-q"), ("R", "alpha-r"), ("S", "alpha-s"), ("T", "alpha-t"), ("U", "alpha-u"), ("V", "alpha-v"), ("W", "alpha-w"), ("X", "alpha-x"), ("Y", "alpha-y"), ("Z", "alpha-z"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string())) .collect(); let images: HashMap<String, String> = [ ("T", "T"), ("B", "buzz"), ("C", "chase"), ("D", "dumbo"), ("G", "geo"), ("H", "harrison"), ("I", "igglepiggle"), ("M", "mickey"), ("P", "peppa"), ("S", "simba"), ("U", "upsiedaisy"), ("W", "woody"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string())) .collect(); let mut background_color = random_colour(Color::RGB(255, 255, 255)); 'running: loop { for event in event_pump.poll_iter() { match event { Event::Quit {.. } | Event::KeyDown { keycode: Some(Keycode::Escape), repeat: true, .. } => break 'running, Event::KeyDown { keycode: Some(Keycode::Return), repeat: false, .. } => { position_strategy.reset(); drawables.clear(); background_color = random_colour(Color::RGB(255, 255, 255)); } Event::KeyDown { keycode: Some(key), repeat: false, .. } => { if drawable_keys.contains(&key.name()) { let colour = random_colour(background_color); let surface = font.render(&key.name()).blended(colour).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height,.. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); //rect!(150, 150, width, height); drawables.push((texture, target)); } if let Some(note) = noisy_keys.get(&key.name()) { let wav = load_sound(&note); let spec = audio_queue.spec(); let cvt = AudioCVT::new( wav.format, wav.channels, wav.freq, spec.format, spec.channels, spec.freq, ).expect("Could not convert WAV file"); let data = cvt.convert(wav.buffer().to_vec()); audio_queue.clear(); audio_queue.queue(&data); // Start playback audio_queue.resume(); } if let Some(filename) = images.get(&key.name()) { let mut surface = load_image(&filename); let sf = (100f64 / surface.height() as f64); println!("{}", sf ); let surface = surface.rotozoom(0f64, sf, false).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height,.. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); drawables.push((texture, target)); } } _ => {} } } // Draw the chars canvas.set_draw_color(background_color); canvas.clear(); for &(ref texture, target) in drawables.iter() { canvas.copy(&texture, None, Some(target.clone())).unwrap(); } canvas.present(); ::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / 60)); } }
reset
identifier_name
main.rs
extern crate hsl; extern crate rand; extern crate sdl2; use sdl2::audio::{AudioCVT, AudioSpecDesired, AudioSpecWAV, AudioQueue}; use sdl2::event::Event; use sdl2::image::{INIT_PNG, LoadSurface}; use sdl2::gfx::rotozoom::RotozoomSurface; use sdl2::keyboard::Keycode; use sdl2::pixels::Color; use sdl2::rect::Rect; use sdl2::render::{TextureQuery}; use sdl2::surface::Surface; use sdl2::video::FullscreenType; use hsl::HSL; use rand::{thread_rng, Rng}; use std::collections::{HashSet, HashMap}; use std::borrow::Cow; use std::path::{Path, PathBuf}; use std::time::Duration; macro_rules! rect( ($x:expr, $y:expr, $w:expr, $h:expr) => ( Rect::new($x as i32, $y as i32, $w as u32, $h as u32) ) ); trait PositionStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect; fn reset(&mut self) { } } struct RandomPositionStrategy {} impl PositionStrategy for RandomPositionStrategy { // Return a random position that fits rect within rect fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { let rx: f64 = thread_rng().gen(); let ry: f64 = thread_rng().gen(); let posx = rx * (within_rect.width() - 1 * rect.width()) as f64; let posy = ry * (within_rect.height() - 1 * rect.height()) as f64; rect!(posx as f64, posy as f64, rect.width(), rect.height()) } } struct LeftToRightStrategy { next_x: u32, next_y: u32, } impl PositionStrategy for LeftToRightStrategy { fn next_position(&mut self, rect: Rect, within_rect: Rect) -> Rect { if self.next_x > within_rect.right() as u32 { self.next_x = 0; self.next_y = self.next_y + rect.height() as u32; } if self.next_y > within_rect.bottom() as u32 { self.next_y = 0; } let y = self.next_y; let x = self.next_x; self.next_x = x + rect.width(); rect!(x, y, rect.width(), rect.height()) } fn reset(&mut self) { self.next_x = 0; self.next_y = 0; } } fn random_colour(c: Color) -> Color { let not_near_hsl = HSL::from_rgb(&[c.r, c.g, c.b]); let mut generated = not_near_hsl.clone(); while (generated.h - not_near_hsl.h).abs() < 40. { let h: f64 = thread_rng().gen(); generated = HSL { h: h * 360.0, s: 1_f64, l: 0.5_f64, }; } let rgb = generated.to_rgb(); return Color::RGB(rgb.0, rgb.1, rgb.2); } fn load_sound(note: &str) -> AudioSpecWAV { // Load a sound let filename = format!("{}.wav", note); let path: PathBuf = ["./sounds", &filename].iter().collect(); let wav_file: Cow<'static, Path> = Cow::from(path); AudioSpecWAV::load_wav(wav_file.clone()) .expect("Could not load test WAV file") } fn load_image(fname: &str) -> Surface { // Load an image let filename = format!("{}.png", fname); let path: PathBuf = ["./images", &filename].iter().collect(); let image_file: Cow<'static, Path> = Cow::from(path); Surface::from_file(image_file.clone()) .expect("Could not load image file") } pub fn main() { let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); let audio_subsystem = sdl_context.audio().unwrap(); let _image_context = sdl2::image::init(INIT_PNG); let (window_width, window_height) = (800, 600); let mut window = video_subsystem .window("Bish Bash Bosh", window_width, window_height) .position_centered() .opengl() .build() .unwrap(); window.set_fullscreen(FullscreenType::Desktop).unwrap(); window.set_grab(true); let (window_width, window_height) = window.size(); let mut canvas = window.into_canvas().build().unwrap(); let texture_creator = canvas.texture_creator(); let mut event_pump = sdl_context.event_pump().unwrap(); let ttf_context = sdl2::ttf::init().unwrap(); // Load a font let mut font = ttf_context.load_font("DejaVuSans-Bold.ttf", 112).unwrap(); font.set_style(sdl2::ttf::STYLE_BOLD); let desired_spec = AudioSpecDesired { freq: Some(44_100), channels: Some(1), // mono samples: None, // default }; let audio_queue: AudioQueue<u8> = audio_subsystem .open_queue(None, &desired_spec) .unwrap(); // let mut position_strategy = RandomPositionStrategy { }; let mut position_strategy = LeftToRightStrategy { next_x: 0, next_y: window_height / 3}; canvas.set_draw_color(Color::RGB(255, 0, 0)); canvas.clear(); canvas.present(); // Keep track of all displayed characters, and their postitions let mut drawables = vec![]; let drawable_keys: HashSet<String> = [ "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", ].iter() .map(|s| s.to_string()) .collect(); let noisy_keys: HashMap<String, String> = [ ("F1", "37a"), ("F2", "38b"), ("F3", "39bb"), ("F4", "40c"), ("F5", "41c"), ("F6", "42d"), ("F7", "43e"), ("F8", "44eb"), ("F9", "45f"), ("F10", "46f"), ("F11", "47g"), ("F12", "48g"), ("A", "alpha-a"), ("B", "alpha-b"), ("C", "alpha-c"), ("D", "alpha-d"), ("E", "alpha-e"), ("F", "alpha-f"), ("G", "alpha-g"), ("H", "alpha-h"), ("I", "alpha-i"), ("J", "alpha-j"), ("K", "alpha-k"), ("L", "alpha-l"), ("M", "alpha-m"), ("N", "alpha-n"), ("O", "alpha-o"), ("P", "alpha-p"), ("Q", "alpha-q"), ("R", "alpha-r"), ("S", "alpha-s"), ("T", "alpha-t"), ("U", "alpha-u"), ("V", "alpha-v"), ("W", "alpha-w"), ("X", "alpha-x"), ("Y", "alpha-y"), ("Z", "alpha-z"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string())) .collect(); let images: HashMap<String, String> = [ ("T", "T"), ("B", "buzz"), ("C", "chase"), ("D", "dumbo"), ("G", "geo"), ("H", "harrison"), ("I", "igglepiggle"), ("M", "mickey"), ("P", "peppa"), ("S", "simba"), ("U", "upsiedaisy"), ("W", "woody"), ].iter() .map(|(s1, s2)| (s1.to_string(), s2.to_string())) .collect(); let mut background_color = random_colour(Color::RGB(255, 255, 255)); 'running: loop { for event in event_pump.poll_iter() { match event { Event::Quit {.. } | Event::KeyDown { keycode: Some(Keycode::Escape), repeat: true, .. } => break 'running, Event::KeyDown { keycode: Some(Keycode::Return), repeat: false, .. } => { position_strategy.reset(); drawables.clear(); background_color = random_colour(Color::RGB(255, 255, 255)); } Event::KeyDown { keycode: Some(key), repeat: false, .. } => { if drawable_keys.contains(&key.name()) { let colour = random_colour(background_color); let surface = font.render(&key.name()).blended(colour).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height,.. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); //rect!(150, 150, width, height); drawables.push((texture, target)); } if let Some(note) = noisy_keys.get(&key.name()) { let wav = load_sound(&note); let spec = audio_queue.spec(); let cvt = AudioCVT::new( wav.format, wav.channels, wav.freq, spec.format, spec.channels, spec.freq, ).expect("Could not convert WAV file"); let data = cvt.convert(wav.buffer().to_vec()); audio_queue.clear(); audio_queue.queue(&data); // Start playback audio_queue.resume(); } if let Some(filename) = images.get(&key.name())
} _ => {} } } // Draw the chars canvas.set_draw_color(background_color); canvas.clear(); for &(ref texture, target) in drawables.iter() { canvas.copy(&texture, None, Some(target.clone())).unwrap(); } canvas.present(); ::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / 60)); } }
{ let mut surface = load_image(&filename); let sf = (100f64 / surface.height() as f64); println!("{}", sf ); let surface = surface.rotozoom(0f64, sf, false).unwrap(); let texture = texture_creator .create_texture_from_surface(&surface) .unwrap(); let TextureQuery { width, height, .. } = texture.query(); let target = position_strategy.next_position( rect!(0, 0, width, height), rect!(0, 0, window_width, window_height), ); drawables.push((texture, target)); }
conditional_block
system.rs
// This file is part of Substrate. // Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! System manager: Handles all of the top-level stuff; executing block/transaction, setting code //! and depositing logs. use crate::{ AccountId, AuthorityId, Block, BlockNumber, Digest, Extrinsic, Header, Transfer, H256 as Hash, }; use codec::{Decode, Encode, KeyedVec}; use frame_support::storage; use frame_support::{decl_module, decl_storage}; use frame_system::Trait; use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; use sp_io::{ hashing::blake2_256, storage::changes_root as storage_changes_root, storage::root as storage_root, trie, }; use sp_runtime::{ generic, traits::Header as _, transaction_validity::{ InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, ApplyExtrinsicResult, }; use sp_std::prelude::*; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; decl_module! { pub struct Module<T: Trait> for enum Call where origin: T::Origin {} } decl_storage! { trait Store for Module<T: Trait> as TestRuntime { ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec<u8>; // The current block number being processed. Set by `execute_block`. Number get(fn number): Option<BlockNumber>; ParentHash get(fn parent_hash): Hash; NewAuthorities get(fn new_authorities): Option<Vec<AuthorityId>>; NewChangesTrieConfig get(fn new_changes_trie_config): Option<Option<ChangesTrieConfiguration>>; StorageDigest get(fn storage_digest): Option<Digest>; Authorities get(fn authorities) config(): Vec<AuthorityId>; } } pub fn balance_of_key(who: AccountId) -> Vec<u8> { who.to_keyed_vec(BALANCE_OF) } pub fn balance_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &balance_of_key(who), 0) } pub fn nonce_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &who.to_keyed_vec(NONCE_OF), 0) } pub fn initialize_block(header: &Header) { // populate environment. <Number>::put(&header.number); <ParentHash>::put(&header.parent_hash); <StorageDigest>::put(header.digest()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); // try to read something that depends on current header digest // so that it'll be included in execution proof if let Some(generic::DigestItem::Other(v)) = header.digest().logs().iter().next() { let _: Option<u32> = storage::unhashed::get(&v); } } pub fn authorities() -> Vec<AuthorityId> { Authorities::get() } pub fn get_block_number() -> Option<BlockNumber> { Number::get() } pub fn take_block_number() -> Option<BlockNumber> { Number::take() } #[derive(Copy, Clone)] enum Mode { Verify, Overwrite, } /// Actually execute all transitioning for `block`. pub fn polish_block(block: &mut Block) { execute_block_with_state_root_handler(block, Mode::Overwrite); } pub fn execute_block(mut block: Block) { execute_block_with_state_root_handler(&mut block, Mode::Verify); } fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) { let header = &mut block.header; initialize_block(header); // execute transactions block.extrinsics.iter().for_each(|e| { let _ = execute_transaction(e.clone()).unwrap_or_else(|_| panic!("Invalid transaction")); }); let new_header = finalize_block(); if let Mode::Overwrite = mode { header.state_root = new_header.state_root; } else { info_expect_equal_hash(&new_header.state_root, &header.state_root); assert!( new_header.state_root == header.state_root, "Storage root must match that calculated.", ); } if let Mode::Overwrite = mode { header.extrinsics_root = new_header.extrinsics_root; } else { info_expect_equal_hash(&new_header.extrinsics_root, &header.extrinsics_root); assert!( new_header.extrinsics_root == header.extrinsics_root, "Transaction trie root must be valid.", ); } } /// The block executor. pub struct BlockExecutor; impl frame_executive::ExecuteBlock<Block> for BlockExecutor { fn execute_block(block: Block) { execute_block(block); } } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { if check_signature(&utx).is_err() { return InvalidTransaction::BadProof.into() } let tx = utx.transfer(); let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if tx.nonce < expected_nonce { return InvalidTransaction::Stale.into() } if tx.nonce > expected_nonce + 64 { return InvalidTransaction::Future.into() } let encode = |from: &AccountId, nonce: u64| (from, nonce).encode(); let requires = if tx.nonce!= expected_nonce && tx.nonce > 0 { vec![encode(&tx.from, tx.nonce - 1)] } else { vec![] }; let provides = vec![encode(&tx.from, tx.nonce)]; Ok(ValidTransaction { priority: tx.amount, requires, provides, longevity: 64, propagate: true }) } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); let result = execute_transaction_backend(&utx, extrinsic_index); ExtrinsicData::insert(extrinsic_index, utx.encode()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); result } /// Finalize the block. pub fn finalize_block() -> Header
} if let Some(new_authorities) = o_new_authorities { digest.push(generic::DigestItem::Consensus(*b"aura", new_authorities.encode())); digest.push(generic::DigestItem::Consensus(*b"babe", new_authorities.encode())); } if let Some(new_config) = new_changes_trie_config { digest.push(generic::DigestItem::ChangesTrieSignal( generic::ChangesTrieSignal::NewConfiguration(new_config), )); } Header { number, extrinsics_root, state_root: storage_root, parent_hash, digest } } #[ inline(always)] fn check_signature(utx: &Extrinsic) -> Result<(), TransactionValidityError> { use sp_runtime::traits::BlindCheckable; utx.clone().check().map_err(|_| InvalidTransaction::BadProof.into()).map(|_| ()) } fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyExtrinsicResult { check_signature(utx)?; match utx { Extrinsic::Transfer { exhaust_resources_when_not_first: true,.. } if extrinsic_index!= 0 => Err(InvalidTransaction::ExhaustsResources.into()), Extrinsic::Transfer { ref transfer,.. } => execute_transfer_backend(transfer), Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), Extrinsic::IncludeData(_) => Ok(Ok(())), Extrinsic::StorageChange(key, value) => execute_storage_change(key, value.as_ref().map(|v| &**v)), Extrinsic::ChangesTrieConfigUpdate(ref new_config) => execute_changes_trie_config_update(new_config.clone()), } } fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { // check nonce let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if!(tx.nonce == expected_nonce) { return Err(InvalidTransaction::Stale.into()) } // increment nonce in storage storage::hashed::put(&blake2_256, &nonce_key, &(expected_nonce + 1)); // check sender balance let from_balance_key = tx.from.to_keyed_vec(BALANCE_OF); let from_balance: u64 = storage::hashed::get_or(&blake2_256, &from_balance_key, 0); // enact transfer if!(tx.amount <= from_balance) { return Err(InvalidTransaction::Payment.into()) } let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); let to_balance: u64 = storage::hashed::get_or(&blake2_256, &to_balance_key, 0); storage::hashed::put(&blake2_256, &from_balance_key, &(from_balance - tx.amount)); storage::hashed::put(&blake2_256, &to_balance_key, &(to_balance + tx.amount)); Ok(Ok(())) } fn execute_new_authorities_backend(new_authorities: &[AuthorityId]) -> ApplyExtrinsicResult { NewAuthorities::put(new_authorities.to_vec()); Ok(Ok(())) } fn execute_storage_change(key: &[u8], value: Option<&[u8]>) -> ApplyExtrinsicResult { match value { Some(value) => storage::unhashed::put_raw(key, value), None => storage::unhashed::kill(key), } Ok(Ok(())) } fn execute_changes_trie_config_update( new_config: Option<ChangesTrieConfiguration>, ) -> ApplyExtrinsicResult { match new_config.clone() { Some(new_config) => storage::unhashed::put_raw(well_known_keys::CHANGES_TRIE_CONFIG, &new_config.encode()), None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), } <NewChangesTrieConfig>::put(new_config); Ok(Ok(())) } #[cfg(feature = "std")] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { use sp_core::hexdisplay::HexDisplay; if given!= expected { println!( "Hash: given={}, expected={}", HexDisplay::from(given.as_fixed_bytes()), HexDisplay::from(expected.as_fixed_bytes()), ); } } #[cfg(not(feature = "std"))] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { if given!= expected { sp_runtime::print("Hash not equal"); sp_runtime::print(given.as_bytes()); sp_runtime::print(expected.as_bytes()); } } #[cfg(test)] mod tests { use super::*; use crate::{wasm_binary_unwrap, Header, Transfer}; use sc_executor::{native_executor_instance, NativeExecutor, WasmExecutionMethod}; use sp_core::{ map, traits::{CodeExecutor, RuntimeCode}, NeverNativeValue, }; use sp_io::hashing::twox_128; use sp_io::TestExternalities; use substrate_test_runtime_client::{AccountKeyring, Sr25519Keyring}; // Declare an instance of the native executor dispatch for the test runtime. native_executor_instance!(NativeDispatch, crate::api::dispatch, crate::native_version); fn executor() -> NativeExecutor<NativeDispatch> { NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) } fn new_test_ext() -> TestExternalities { let authorities = vec![ Sr25519Keyring::Alice.to_raw_public(), Sr25519Keyring::Bob.to_raw_public(), Sr25519Keyring::Charlie.to_raw_public(), ]; TestExternalities::new_with_code( wasm_binary_unwrap(), sp_core::storage::Storage { top: map![ twox_128(b"latest").to_vec() => vec![69u8; 32], twox_128(b"sys:auth").to_vec() => authorities.encode(), blake2_256(&AccountKeyring::Alice.to_raw_public().to_keyed_vec(b"balance:")).to_vec() => { vec![111u8, 0, 0, 0, 0, 0, 0, 0] } ], children_default: map![], }, ) } fn block_import_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let h = Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }; let mut b = Block { header: h, extrinsics: vec![] }; new_test_ext().execute_with(|| polish_block(&mut b)); block_executor(b, &mut new_test_ext()); } #[test] fn block_import_works_native() { block_import_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_works_wasm() { block_import_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } fn block_import_with_transaction_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let mut b1 = Block { header: Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Bob.into(), amount: 69, nonce: 0, } .into_signed_tx()], }; let mut dummy_ext = new_test_ext(); dummy_ext.execute_with(|| polish_block(&mut b1)); let mut b2 = Block { header: Header { parent_hash: b1.header.hash(), number: 2, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![ Transfer { from: AccountKeyring::Bob.into(), to: AccountKeyring::Alice.into(), amount: 27, nonce: 0, } .into_signed_tx(), Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Charlie.into(), amount: 69, nonce: 1, } .into_signed_tx(), ], }; dummy_ext.execute_with(|| polish_block(&mut b2)); drop(dummy_ext); let mut t = new_test_ext(); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 111); assert_eq!(balance_of(AccountKeyring::Bob.into()), 0); }); block_executor(b1, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 42); assert_eq!(balance_of(AccountKeyring::Bob.into()), 69); }); block_executor(b2, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 0); assert_eq!(balance_of(AccountKeyring::Bob.into()), 42); assert_eq!(balance_of(AccountKeyring::Charlie.into()), 69); }); } #[test] fn block_import_with_transaction_works_native() { block_import_with_transaction_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_with_transaction_works_wasm() { block_import_with_transaction_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } }
{ let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap(); let txs: Vec<_> = (0..extrinsic_index).map(ExtrinsicData::take).collect(); let extrinsics_root = trie::blake2_256_ordered_root(txs).into(); let number = <Number>::take().expect("Number is set by `initialize_block`"); let parent_hash = <ParentHash>::take(); let mut digest = <StorageDigest>::take().expect("StorageDigest is set by `initialize_block`"); let o_new_authorities = <NewAuthorities>::take(); let new_changes_trie_config = <NewChangesTrieConfig>::take(); // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. let storage_root = Hash::decode(&mut &storage_root()[..]).expect("`storage_root` is a valid hash"); let storage_changes_root = storage_changes_root(&parent_hash.encode()) .map(|r| Hash::decode(&mut &r[..]).expect("`storage_changes_root` is a valid hash")); if let Some(storage_changes_root) = storage_changes_root { digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root));
identifier_body
system.rs
// This file is part of Substrate. // Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! System manager: Handles all of the top-level stuff; executing block/transaction, setting code //! and depositing logs. use crate::{ AccountId, AuthorityId, Block, BlockNumber, Digest, Extrinsic, Header, Transfer, H256 as Hash, }; use codec::{Decode, Encode, KeyedVec}; use frame_support::storage; use frame_support::{decl_module, decl_storage}; use frame_system::Trait; use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; use sp_io::{ hashing::blake2_256, storage::changes_root as storage_changes_root, storage::root as storage_root, trie, }; use sp_runtime::{ generic, traits::Header as _, transaction_validity::{ InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, ApplyExtrinsicResult, }; use sp_std::prelude::*; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; decl_module! { pub struct Module<T: Trait> for enum Call where origin: T::Origin {} } decl_storage! { trait Store for Module<T: Trait> as TestRuntime { ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec<u8>; // The current block number being processed. Set by `execute_block`. Number get(fn number): Option<BlockNumber>; ParentHash get(fn parent_hash): Hash; NewAuthorities get(fn new_authorities): Option<Vec<AuthorityId>>; NewChangesTrieConfig get(fn new_changes_trie_config): Option<Option<ChangesTrieConfiguration>>; StorageDigest get(fn storage_digest): Option<Digest>; Authorities get(fn authorities) config(): Vec<AuthorityId>; } } pub fn balance_of_key(who: AccountId) -> Vec<u8> { who.to_keyed_vec(BALANCE_OF) } pub fn balance_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &balance_of_key(who), 0) } pub fn nonce_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &who.to_keyed_vec(NONCE_OF), 0) } pub fn initialize_block(header: &Header) { // populate environment. <Number>::put(&header.number); <ParentHash>::put(&header.parent_hash); <StorageDigest>::put(header.digest()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); // try to read something that depends on current header digest // so that it'll be included in execution proof if let Some(generic::DigestItem::Other(v)) = header.digest().logs().iter().next() { let _: Option<u32> = storage::unhashed::get(&v); } } pub fn authorities() -> Vec<AuthorityId> { Authorities::get() } pub fn get_block_number() -> Option<BlockNumber> { Number::get() } pub fn take_block_number() -> Option<BlockNumber> { Number::take() } #[derive(Copy, Clone)] enum Mode { Verify, Overwrite, } /// Actually execute all transitioning for `block`. pub fn polish_block(block: &mut Block) { execute_block_with_state_root_handler(block, Mode::Overwrite); } pub fn execute_block(mut block: Block) { execute_block_with_state_root_handler(&mut block, Mode::Verify); } fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) { let header = &mut block.header; initialize_block(header); // execute transactions block.extrinsics.iter().for_each(|e| { let _ = execute_transaction(e.clone()).unwrap_or_else(|_| panic!("Invalid transaction")); }); let new_header = finalize_block(); if let Mode::Overwrite = mode { header.state_root = new_header.state_root; } else { info_expect_equal_hash(&new_header.state_root, &header.state_root); assert!( new_header.state_root == header.state_root, "Storage root must match that calculated.", ); } if let Mode::Overwrite = mode { header.extrinsics_root = new_header.extrinsics_root; } else { info_expect_equal_hash(&new_header.extrinsics_root, &header.extrinsics_root); assert!( new_header.extrinsics_root == header.extrinsics_root, "Transaction trie root must be valid.", ); } } /// The block executor. pub struct BlockExecutor; impl frame_executive::ExecuteBlock<Block> for BlockExecutor { fn execute_block(block: Block) { execute_block(block); } } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { if check_signature(&utx).is_err() { return InvalidTransaction::BadProof.into() } let tx = utx.transfer(); let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if tx.nonce < expected_nonce { return InvalidTransaction::Stale.into() } if tx.nonce > expected_nonce + 64 { return InvalidTransaction::Future.into() } let encode = |from: &AccountId, nonce: u64| (from, nonce).encode(); let requires = if tx.nonce!= expected_nonce && tx.nonce > 0
else { vec![] }; let provides = vec![encode(&tx.from, tx.nonce)]; Ok(ValidTransaction { priority: tx.amount, requires, provides, longevity: 64, propagate: true }) } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); let result = execute_transaction_backend(&utx, extrinsic_index); ExtrinsicData::insert(extrinsic_index, utx.encode()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); result } /// Finalize the block. pub fn finalize_block() -> Header { let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap(); let txs: Vec<_> = (0..extrinsic_index).map(ExtrinsicData::take).collect(); let extrinsics_root = trie::blake2_256_ordered_root(txs).into(); let number = <Number>::take().expect("Number is set by `initialize_block`"); let parent_hash = <ParentHash>::take(); let mut digest = <StorageDigest>::take().expect("StorageDigest is set by `initialize_block`"); let o_new_authorities = <NewAuthorities>::take(); let new_changes_trie_config = <NewChangesTrieConfig>::take(); // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. let storage_root = Hash::decode(&mut &storage_root()[..]).expect("`storage_root` is a valid hash"); let storage_changes_root = storage_changes_root(&parent_hash.encode()) .map(|r| Hash::decode(&mut &r[..]).expect("`storage_changes_root` is a valid hash")); if let Some(storage_changes_root) = storage_changes_root { digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root)); } if let Some(new_authorities) = o_new_authorities { digest.push(generic::DigestItem::Consensus(*b"aura", new_authorities.encode())); digest.push(generic::DigestItem::Consensus(*b"babe", new_authorities.encode())); } if let Some(new_config) = new_changes_trie_config { digest.push(generic::DigestItem::ChangesTrieSignal( generic::ChangesTrieSignal::NewConfiguration(new_config), )); } Header { number, extrinsics_root, state_root: storage_root, parent_hash, digest } } #[inline(always)] fn check_signature(utx: &Extrinsic) -> Result<(), TransactionValidityError> { use sp_runtime::traits::BlindCheckable; utx.clone().check().map_err(|_| InvalidTransaction::BadProof.into()).map(|_| ()) } fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyExtrinsicResult { check_signature(utx)?; match utx { Extrinsic::Transfer { exhaust_resources_when_not_first: true,.. } if extrinsic_index!= 0 => Err(InvalidTransaction::ExhaustsResources.into()), Extrinsic::Transfer { ref transfer,.. } => execute_transfer_backend(transfer), Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), Extrinsic::IncludeData(_) => Ok(Ok(())), Extrinsic::StorageChange(key, value) => execute_storage_change(key, value.as_ref().map(|v| &**v)), Extrinsic::ChangesTrieConfigUpdate(ref new_config) => execute_changes_trie_config_update(new_config.clone()), } } fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { // check nonce let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if!(tx.nonce == expected_nonce) { return Err(InvalidTransaction::Stale.into()) } // increment nonce in storage storage::hashed::put(&blake2_256, &nonce_key, &(expected_nonce + 1)); // check sender balance let from_balance_key = tx.from.to_keyed_vec(BALANCE_OF); let from_balance: u64 = storage::hashed::get_or(&blake2_256, &from_balance_key, 0); // enact transfer if!(tx.amount <= from_balance) { return Err(InvalidTransaction::Payment.into()) } let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); let to_balance: u64 = storage::hashed::get_or(&blake2_256, &to_balance_key, 0); storage::hashed::put(&blake2_256, &from_balance_key, &(from_balance - tx.amount)); storage::hashed::put(&blake2_256, &to_balance_key, &(to_balance + tx.amount)); Ok(Ok(())) } fn execute_new_authorities_backend(new_authorities: &[AuthorityId]) -> ApplyExtrinsicResult { NewAuthorities::put(new_authorities.to_vec()); Ok(Ok(())) } fn execute_storage_change(key: &[u8], value: Option<&[u8]>) -> ApplyExtrinsicResult { match value { Some(value) => storage::unhashed::put_raw(key, value), None => storage::unhashed::kill(key), } Ok(Ok(())) } fn execute_changes_trie_config_update( new_config: Option<ChangesTrieConfiguration>, ) -> ApplyExtrinsicResult { match new_config.clone() { Some(new_config) => storage::unhashed::put_raw(well_known_keys::CHANGES_TRIE_CONFIG, &new_config.encode()), None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), } <NewChangesTrieConfig>::put(new_config); Ok(Ok(())) } #[cfg(feature = "std")] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { use sp_core::hexdisplay::HexDisplay; if given!= expected { println!( "Hash: given={}, expected={}", HexDisplay::from(given.as_fixed_bytes()), HexDisplay::from(expected.as_fixed_bytes()), ); } } #[cfg(not(feature = "std"))] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { if given!= expected { sp_runtime::print("Hash not equal"); sp_runtime::print(given.as_bytes()); sp_runtime::print(expected.as_bytes()); } } #[cfg(test)] mod tests { use super::*; use crate::{wasm_binary_unwrap, Header, Transfer}; use sc_executor::{native_executor_instance, NativeExecutor, WasmExecutionMethod}; use sp_core::{ map, traits::{CodeExecutor, RuntimeCode}, NeverNativeValue, }; use sp_io::hashing::twox_128; use sp_io::TestExternalities; use substrate_test_runtime_client::{AccountKeyring, Sr25519Keyring}; // Declare an instance of the native executor dispatch for the test runtime. native_executor_instance!(NativeDispatch, crate::api::dispatch, crate::native_version); fn executor() -> NativeExecutor<NativeDispatch> { NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) } fn new_test_ext() -> TestExternalities { let authorities = vec![ Sr25519Keyring::Alice.to_raw_public(), Sr25519Keyring::Bob.to_raw_public(), Sr25519Keyring::Charlie.to_raw_public(), ]; TestExternalities::new_with_code( wasm_binary_unwrap(), sp_core::storage::Storage { top: map![ twox_128(b"latest").to_vec() => vec![69u8; 32], twox_128(b"sys:auth").to_vec() => authorities.encode(), blake2_256(&AccountKeyring::Alice.to_raw_public().to_keyed_vec(b"balance:")).to_vec() => { vec![111u8, 0, 0, 0, 0, 0, 0, 0] } ], children_default: map![], }, ) } fn block_import_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let h = Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }; let mut b = Block { header: h, extrinsics: vec![] }; new_test_ext().execute_with(|| polish_block(&mut b)); block_executor(b, &mut new_test_ext()); } #[test] fn block_import_works_native() { block_import_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_works_wasm() { block_import_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } fn block_import_with_transaction_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let mut b1 = Block { header: Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Bob.into(), amount: 69, nonce: 0, } .into_signed_tx()], }; let mut dummy_ext = new_test_ext(); dummy_ext.execute_with(|| polish_block(&mut b1)); let mut b2 = Block { header: Header { parent_hash: b1.header.hash(), number: 2, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![ Transfer { from: AccountKeyring::Bob.into(), to: AccountKeyring::Alice.into(), amount: 27, nonce: 0, } .into_signed_tx(), Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Charlie.into(), amount: 69, nonce: 1, } .into_signed_tx(), ], }; dummy_ext.execute_with(|| polish_block(&mut b2)); drop(dummy_ext); let mut t = new_test_ext(); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 111); assert_eq!(balance_of(AccountKeyring::Bob.into()), 0); }); block_executor(b1, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 42); assert_eq!(balance_of(AccountKeyring::Bob.into()), 69); }); block_executor(b2, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 0); assert_eq!(balance_of(AccountKeyring::Bob.into()), 42); assert_eq!(balance_of(AccountKeyring::Charlie.into()), 69); }); } #[test] fn block_import_with_transaction_works_native() { block_import_with_transaction_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_with_transaction_works_wasm() { block_import_with_transaction_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } }
{ vec![encode(&tx.from, tx.nonce - 1)] }
conditional_block
system.rs
// This file is part of Substrate. // Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! System manager: Handles all of the top-level stuff; executing block/transaction, setting code //! and depositing logs. use crate::{ AccountId, AuthorityId, Block, BlockNumber, Digest, Extrinsic, Header, Transfer, H256 as Hash, }; use codec::{Decode, Encode, KeyedVec}; use frame_support::storage; use frame_support::{decl_module, decl_storage}; use frame_system::Trait; use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; use sp_io::{ hashing::blake2_256, storage::changes_root as storage_changes_root, storage::root as storage_root, trie, }; use sp_runtime::{ generic, traits::Header as _, transaction_validity::{ InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, ApplyExtrinsicResult, }; use sp_std::prelude::*; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; decl_module! { pub struct Module<T: Trait> for enum Call where origin: T::Origin {} } decl_storage! { trait Store for Module<T: Trait> as TestRuntime { ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec<u8>; // The current block number being processed. Set by `execute_block`. Number get(fn number): Option<BlockNumber>; ParentHash get(fn parent_hash): Hash; NewAuthorities get(fn new_authorities): Option<Vec<AuthorityId>>; NewChangesTrieConfig get(fn new_changes_trie_config): Option<Option<ChangesTrieConfiguration>>; StorageDigest get(fn storage_digest): Option<Digest>; Authorities get(fn authorities) config(): Vec<AuthorityId>; } } pub fn balance_of_key(who: AccountId) -> Vec<u8> { who.to_keyed_vec(BALANCE_OF) } pub fn balance_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &balance_of_key(who), 0) } pub fn nonce_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &who.to_keyed_vec(NONCE_OF), 0) } pub fn initialize_block(header: &Header) { // populate environment. <Number>::put(&header.number); <ParentHash>::put(&header.parent_hash); <StorageDigest>::put(header.digest()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); // try to read something that depends on current header digest // so that it'll be included in execution proof if let Some(generic::DigestItem::Other(v)) = header.digest().logs().iter().next() { let _: Option<u32> = storage::unhashed::get(&v); } } pub fn authorities() -> Vec<AuthorityId> { Authorities::get() } pub fn get_block_number() -> Option<BlockNumber> { Number::get() } pub fn take_block_number() -> Option<BlockNumber> { Number::take() } #[derive(Copy, Clone)] enum
{ Verify, Overwrite, } /// Actually execute all transitioning for `block`. pub fn polish_block(block: &mut Block) { execute_block_with_state_root_handler(block, Mode::Overwrite); } pub fn execute_block(mut block: Block) { execute_block_with_state_root_handler(&mut block, Mode::Verify); } fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) { let header = &mut block.header; initialize_block(header); // execute transactions block.extrinsics.iter().for_each(|e| { let _ = execute_transaction(e.clone()).unwrap_or_else(|_| panic!("Invalid transaction")); }); let new_header = finalize_block(); if let Mode::Overwrite = mode { header.state_root = new_header.state_root; } else { info_expect_equal_hash(&new_header.state_root, &header.state_root); assert!( new_header.state_root == header.state_root, "Storage root must match that calculated.", ); } if let Mode::Overwrite = mode { header.extrinsics_root = new_header.extrinsics_root; } else { info_expect_equal_hash(&new_header.extrinsics_root, &header.extrinsics_root); assert!( new_header.extrinsics_root == header.extrinsics_root, "Transaction trie root must be valid.", ); } } /// The block executor. pub struct BlockExecutor; impl frame_executive::ExecuteBlock<Block> for BlockExecutor { fn execute_block(block: Block) { execute_block(block); } } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { if check_signature(&utx).is_err() { return InvalidTransaction::BadProof.into() } let tx = utx.transfer(); let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if tx.nonce < expected_nonce { return InvalidTransaction::Stale.into() } if tx.nonce > expected_nonce + 64 { return InvalidTransaction::Future.into() } let encode = |from: &AccountId, nonce: u64| (from, nonce).encode(); let requires = if tx.nonce!= expected_nonce && tx.nonce > 0 { vec![encode(&tx.from, tx.nonce - 1)] } else { vec![] }; let provides = vec![encode(&tx.from, tx.nonce)]; Ok(ValidTransaction { priority: tx.amount, requires, provides, longevity: 64, propagate: true }) } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); let result = execute_transaction_backend(&utx, extrinsic_index); ExtrinsicData::insert(extrinsic_index, utx.encode()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); result } /// Finalize the block. pub fn finalize_block() -> Header { let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap(); let txs: Vec<_> = (0..extrinsic_index).map(ExtrinsicData::take).collect(); let extrinsics_root = trie::blake2_256_ordered_root(txs).into(); let number = <Number>::take().expect("Number is set by `initialize_block`"); let parent_hash = <ParentHash>::take(); let mut digest = <StorageDigest>::take().expect("StorageDigest is set by `initialize_block`"); let o_new_authorities = <NewAuthorities>::take(); let new_changes_trie_config = <NewChangesTrieConfig>::take(); // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. let storage_root = Hash::decode(&mut &storage_root()[..]).expect("`storage_root` is a valid hash"); let storage_changes_root = storage_changes_root(&parent_hash.encode()) .map(|r| Hash::decode(&mut &r[..]).expect("`storage_changes_root` is a valid hash")); if let Some(storage_changes_root) = storage_changes_root { digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root)); } if let Some(new_authorities) = o_new_authorities { digest.push(generic::DigestItem::Consensus(*b"aura", new_authorities.encode())); digest.push(generic::DigestItem::Consensus(*b"babe", new_authorities.encode())); } if let Some(new_config) = new_changes_trie_config { digest.push(generic::DigestItem::ChangesTrieSignal( generic::ChangesTrieSignal::NewConfiguration(new_config), )); } Header { number, extrinsics_root, state_root: storage_root, parent_hash, digest } } #[inline(always)] fn check_signature(utx: &Extrinsic) -> Result<(), TransactionValidityError> { use sp_runtime::traits::BlindCheckable; utx.clone().check().map_err(|_| InvalidTransaction::BadProof.into()).map(|_| ()) } fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyExtrinsicResult { check_signature(utx)?; match utx { Extrinsic::Transfer { exhaust_resources_when_not_first: true,.. } if extrinsic_index!= 0 => Err(InvalidTransaction::ExhaustsResources.into()), Extrinsic::Transfer { ref transfer,.. } => execute_transfer_backend(transfer), Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), Extrinsic::IncludeData(_) => Ok(Ok(())), Extrinsic::StorageChange(key, value) => execute_storage_change(key, value.as_ref().map(|v| &**v)), Extrinsic::ChangesTrieConfigUpdate(ref new_config) => execute_changes_trie_config_update(new_config.clone()), } } fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { // check nonce let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if!(tx.nonce == expected_nonce) { return Err(InvalidTransaction::Stale.into()) } // increment nonce in storage storage::hashed::put(&blake2_256, &nonce_key, &(expected_nonce + 1)); // check sender balance let from_balance_key = tx.from.to_keyed_vec(BALANCE_OF); let from_balance: u64 = storage::hashed::get_or(&blake2_256, &from_balance_key, 0); // enact transfer if!(tx.amount <= from_balance) { return Err(InvalidTransaction::Payment.into()) } let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); let to_balance: u64 = storage::hashed::get_or(&blake2_256, &to_balance_key, 0); storage::hashed::put(&blake2_256, &from_balance_key, &(from_balance - tx.amount)); storage::hashed::put(&blake2_256, &to_balance_key, &(to_balance + tx.amount)); Ok(Ok(())) } fn execute_new_authorities_backend(new_authorities: &[AuthorityId]) -> ApplyExtrinsicResult { NewAuthorities::put(new_authorities.to_vec()); Ok(Ok(())) } fn execute_storage_change(key: &[u8], value: Option<&[u8]>) -> ApplyExtrinsicResult { match value { Some(value) => storage::unhashed::put_raw(key, value), None => storage::unhashed::kill(key), } Ok(Ok(())) } fn execute_changes_trie_config_update( new_config: Option<ChangesTrieConfiguration>, ) -> ApplyExtrinsicResult { match new_config.clone() { Some(new_config) => storage::unhashed::put_raw(well_known_keys::CHANGES_TRIE_CONFIG, &new_config.encode()), None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), } <NewChangesTrieConfig>::put(new_config); Ok(Ok(())) } #[cfg(feature = "std")] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { use sp_core::hexdisplay::HexDisplay; if given!= expected { println!( "Hash: given={}, expected={}", HexDisplay::from(given.as_fixed_bytes()), HexDisplay::from(expected.as_fixed_bytes()), ); } } #[cfg(not(feature = "std"))] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { if given!= expected { sp_runtime::print("Hash not equal"); sp_runtime::print(given.as_bytes()); sp_runtime::print(expected.as_bytes()); } } #[cfg(test)] mod tests { use super::*; use crate::{wasm_binary_unwrap, Header, Transfer}; use sc_executor::{native_executor_instance, NativeExecutor, WasmExecutionMethod}; use sp_core::{ map, traits::{CodeExecutor, RuntimeCode}, NeverNativeValue, }; use sp_io::hashing::twox_128; use sp_io::TestExternalities; use substrate_test_runtime_client::{AccountKeyring, Sr25519Keyring}; // Declare an instance of the native executor dispatch for the test runtime. native_executor_instance!(NativeDispatch, crate::api::dispatch, crate::native_version); fn executor() -> NativeExecutor<NativeDispatch> { NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) } fn new_test_ext() -> TestExternalities { let authorities = vec![ Sr25519Keyring::Alice.to_raw_public(), Sr25519Keyring::Bob.to_raw_public(), Sr25519Keyring::Charlie.to_raw_public(), ]; TestExternalities::new_with_code( wasm_binary_unwrap(), sp_core::storage::Storage { top: map![ twox_128(b"latest").to_vec() => vec![69u8; 32], twox_128(b"sys:auth").to_vec() => authorities.encode(), blake2_256(&AccountKeyring::Alice.to_raw_public().to_keyed_vec(b"balance:")).to_vec() => { vec![111u8, 0, 0, 0, 0, 0, 0, 0] } ], children_default: map![], }, ) } fn block_import_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let h = Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }; let mut b = Block { header: h, extrinsics: vec![] }; new_test_ext().execute_with(|| polish_block(&mut b)); block_executor(b, &mut new_test_ext()); } #[test] fn block_import_works_native() { block_import_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_works_wasm() { block_import_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } fn block_import_with_transaction_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let mut b1 = Block { header: Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Bob.into(), amount: 69, nonce: 0, } .into_signed_tx()], }; let mut dummy_ext = new_test_ext(); dummy_ext.execute_with(|| polish_block(&mut b1)); let mut b2 = Block { header: Header { parent_hash: b1.header.hash(), number: 2, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![ Transfer { from: AccountKeyring::Bob.into(), to: AccountKeyring::Alice.into(), amount: 27, nonce: 0, } .into_signed_tx(), Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Charlie.into(), amount: 69, nonce: 1, } .into_signed_tx(), ], }; dummy_ext.execute_with(|| polish_block(&mut b2)); drop(dummy_ext); let mut t = new_test_ext(); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 111); assert_eq!(balance_of(AccountKeyring::Bob.into()), 0); }); block_executor(b1, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 42); assert_eq!(balance_of(AccountKeyring::Bob.into()), 69); }); block_executor(b2, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 0); assert_eq!(balance_of(AccountKeyring::Bob.into()), 42); assert_eq!(balance_of(AccountKeyring::Charlie.into()), 69); }); } #[test] fn block_import_with_transaction_works_native() { block_import_with_transaction_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_with_transaction_works_wasm() { block_import_with_transaction_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } }
Mode
identifier_name
system.rs
// This file is part of Substrate. // Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! System manager: Handles all of the top-level stuff; executing block/transaction, setting code //! and depositing logs. use crate::{ AccountId, AuthorityId, Block, BlockNumber, Digest, Extrinsic, Header, Transfer, H256 as Hash, }; use codec::{Decode, Encode, KeyedVec}; use frame_support::storage; use frame_support::{decl_module, decl_storage}; use frame_system::Trait; use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; use sp_io::{ hashing::blake2_256, storage::changes_root as storage_changes_root, storage::root as storage_root, trie, }; use sp_runtime::{ generic, traits::Header as _, transaction_validity::{ InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, }, ApplyExtrinsicResult, }; use sp_std::prelude::*; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; decl_module! { pub struct Module<T: Trait> for enum Call where origin: T::Origin {} } decl_storage! { trait Store for Module<T: Trait> as TestRuntime { ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec<u8>; // The current block number being processed. Set by `execute_block`. Number get(fn number): Option<BlockNumber>; ParentHash get(fn parent_hash): Hash;
} } pub fn balance_of_key(who: AccountId) -> Vec<u8> { who.to_keyed_vec(BALANCE_OF) } pub fn balance_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &balance_of_key(who), 0) } pub fn nonce_of(who: AccountId) -> u64 { storage::hashed::get_or(&blake2_256, &who.to_keyed_vec(NONCE_OF), 0) } pub fn initialize_block(header: &Header) { // populate environment. <Number>::put(&header.number); <ParentHash>::put(&header.parent_hash); <StorageDigest>::put(header.digest()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); // try to read something that depends on current header digest // so that it'll be included in execution proof if let Some(generic::DigestItem::Other(v)) = header.digest().logs().iter().next() { let _: Option<u32> = storage::unhashed::get(&v); } } pub fn authorities() -> Vec<AuthorityId> { Authorities::get() } pub fn get_block_number() -> Option<BlockNumber> { Number::get() } pub fn take_block_number() -> Option<BlockNumber> { Number::take() } #[derive(Copy, Clone)] enum Mode { Verify, Overwrite, } /// Actually execute all transitioning for `block`. pub fn polish_block(block: &mut Block) { execute_block_with_state_root_handler(block, Mode::Overwrite); } pub fn execute_block(mut block: Block) { execute_block_with_state_root_handler(&mut block, Mode::Verify); } fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) { let header = &mut block.header; initialize_block(header); // execute transactions block.extrinsics.iter().for_each(|e| { let _ = execute_transaction(e.clone()).unwrap_or_else(|_| panic!("Invalid transaction")); }); let new_header = finalize_block(); if let Mode::Overwrite = mode { header.state_root = new_header.state_root; } else { info_expect_equal_hash(&new_header.state_root, &header.state_root); assert!( new_header.state_root == header.state_root, "Storage root must match that calculated.", ); } if let Mode::Overwrite = mode { header.extrinsics_root = new_header.extrinsics_root; } else { info_expect_equal_hash(&new_header.extrinsics_root, &header.extrinsics_root); assert!( new_header.extrinsics_root == header.extrinsics_root, "Transaction trie root must be valid.", ); } } /// The block executor. pub struct BlockExecutor; impl frame_executive::ExecuteBlock<Block> for BlockExecutor { fn execute_block(block: Block) { execute_block(block); } } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { if check_signature(&utx).is_err() { return InvalidTransaction::BadProof.into() } let tx = utx.transfer(); let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if tx.nonce < expected_nonce { return InvalidTransaction::Stale.into() } if tx.nonce > expected_nonce + 64 { return InvalidTransaction::Future.into() } let encode = |from: &AccountId, nonce: u64| (from, nonce).encode(); let requires = if tx.nonce!= expected_nonce && tx.nonce > 0 { vec![encode(&tx.from, tx.nonce - 1)] } else { vec![] }; let provides = vec![encode(&tx.from, tx.nonce)]; Ok(ValidTransaction { priority: tx.amount, requires, provides, longevity: 64, propagate: true }) } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); let result = execute_transaction_backend(&utx, extrinsic_index); ExtrinsicData::insert(extrinsic_index, utx.encode()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); result } /// Finalize the block. pub fn finalize_block() -> Header { let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap(); let txs: Vec<_> = (0..extrinsic_index).map(ExtrinsicData::take).collect(); let extrinsics_root = trie::blake2_256_ordered_root(txs).into(); let number = <Number>::take().expect("Number is set by `initialize_block`"); let parent_hash = <ParentHash>::take(); let mut digest = <StorageDigest>::take().expect("StorageDigest is set by `initialize_block`"); let o_new_authorities = <NewAuthorities>::take(); let new_changes_trie_config = <NewChangesTrieConfig>::take(); // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. let storage_root = Hash::decode(&mut &storage_root()[..]).expect("`storage_root` is a valid hash"); let storage_changes_root = storage_changes_root(&parent_hash.encode()) .map(|r| Hash::decode(&mut &r[..]).expect("`storage_changes_root` is a valid hash")); if let Some(storage_changes_root) = storage_changes_root { digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root)); } if let Some(new_authorities) = o_new_authorities { digest.push(generic::DigestItem::Consensus(*b"aura", new_authorities.encode())); digest.push(generic::DigestItem::Consensus(*b"babe", new_authorities.encode())); } if let Some(new_config) = new_changes_trie_config { digest.push(generic::DigestItem::ChangesTrieSignal( generic::ChangesTrieSignal::NewConfiguration(new_config), )); } Header { number, extrinsics_root, state_root: storage_root, parent_hash, digest } } #[inline(always)] fn check_signature(utx: &Extrinsic) -> Result<(), TransactionValidityError> { use sp_runtime::traits::BlindCheckable; utx.clone().check().map_err(|_| InvalidTransaction::BadProof.into()).map(|_| ()) } fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyExtrinsicResult { check_signature(utx)?; match utx { Extrinsic::Transfer { exhaust_resources_when_not_first: true,.. } if extrinsic_index!= 0 => Err(InvalidTransaction::ExhaustsResources.into()), Extrinsic::Transfer { ref transfer,.. } => execute_transfer_backend(transfer), Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), Extrinsic::IncludeData(_) => Ok(Ok(())), Extrinsic::StorageChange(key, value) => execute_storage_change(key, value.as_ref().map(|v| &**v)), Extrinsic::ChangesTrieConfigUpdate(ref new_config) => execute_changes_trie_config_update(new_config.clone()), } } fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { // check nonce let nonce_key = tx.from.to_keyed_vec(NONCE_OF); let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); if!(tx.nonce == expected_nonce) { return Err(InvalidTransaction::Stale.into()) } // increment nonce in storage storage::hashed::put(&blake2_256, &nonce_key, &(expected_nonce + 1)); // check sender balance let from_balance_key = tx.from.to_keyed_vec(BALANCE_OF); let from_balance: u64 = storage::hashed::get_or(&blake2_256, &from_balance_key, 0); // enact transfer if!(tx.amount <= from_balance) { return Err(InvalidTransaction::Payment.into()) } let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); let to_balance: u64 = storage::hashed::get_or(&blake2_256, &to_balance_key, 0); storage::hashed::put(&blake2_256, &from_balance_key, &(from_balance - tx.amount)); storage::hashed::put(&blake2_256, &to_balance_key, &(to_balance + tx.amount)); Ok(Ok(())) } fn execute_new_authorities_backend(new_authorities: &[AuthorityId]) -> ApplyExtrinsicResult { NewAuthorities::put(new_authorities.to_vec()); Ok(Ok(())) } fn execute_storage_change(key: &[u8], value: Option<&[u8]>) -> ApplyExtrinsicResult { match value { Some(value) => storage::unhashed::put_raw(key, value), None => storage::unhashed::kill(key), } Ok(Ok(())) } fn execute_changes_trie_config_update( new_config: Option<ChangesTrieConfiguration>, ) -> ApplyExtrinsicResult { match new_config.clone() { Some(new_config) => storage::unhashed::put_raw(well_known_keys::CHANGES_TRIE_CONFIG, &new_config.encode()), None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), } <NewChangesTrieConfig>::put(new_config); Ok(Ok(())) } #[cfg(feature = "std")] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { use sp_core::hexdisplay::HexDisplay; if given!= expected { println!( "Hash: given={}, expected={}", HexDisplay::from(given.as_fixed_bytes()), HexDisplay::from(expected.as_fixed_bytes()), ); } } #[cfg(not(feature = "std"))] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { if given!= expected { sp_runtime::print("Hash not equal"); sp_runtime::print(given.as_bytes()); sp_runtime::print(expected.as_bytes()); } } #[cfg(test)] mod tests { use super::*; use crate::{wasm_binary_unwrap, Header, Transfer}; use sc_executor::{native_executor_instance, NativeExecutor, WasmExecutionMethod}; use sp_core::{ map, traits::{CodeExecutor, RuntimeCode}, NeverNativeValue, }; use sp_io::hashing::twox_128; use sp_io::TestExternalities; use substrate_test_runtime_client::{AccountKeyring, Sr25519Keyring}; // Declare an instance of the native executor dispatch for the test runtime. native_executor_instance!(NativeDispatch, crate::api::dispatch, crate::native_version); fn executor() -> NativeExecutor<NativeDispatch> { NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) } fn new_test_ext() -> TestExternalities { let authorities = vec![ Sr25519Keyring::Alice.to_raw_public(), Sr25519Keyring::Bob.to_raw_public(), Sr25519Keyring::Charlie.to_raw_public(), ]; TestExternalities::new_with_code( wasm_binary_unwrap(), sp_core::storage::Storage { top: map![ twox_128(b"latest").to_vec() => vec![69u8; 32], twox_128(b"sys:auth").to_vec() => authorities.encode(), blake2_256(&AccountKeyring::Alice.to_raw_public().to_keyed_vec(b"balance:")).to_vec() => { vec![111u8, 0, 0, 0, 0, 0, 0, 0] } ], children_default: map![], }, ) } fn block_import_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let h = Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }; let mut b = Block { header: h, extrinsics: vec![] }; new_test_ext().execute_with(|| polish_block(&mut b)); block_executor(b, &mut new_test_ext()); } #[test] fn block_import_works_native() { block_import_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_works_wasm() { block_import_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } fn block_import_with_transaction_works<F>(block_executor: F) where F: Fn(Block, &mut TestExternalities), { let mut b1 = Block { header: Header { parent_hash: [69u8; 32].into(), number: 1, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Bob.into(), amount: 69, nonce: 0, } .into_signed_tx()], }; let mut dummy_ext = new_test_ext(); dummy_ext.execute_with(|| polish_block(&mut b1)); let mut b2 = Block { header: Header { parent_hash: b1.header.hash(), number: 2, state_root: Default::default(), extrinsics_root: Default::default(), digest: Default::default(), }, extrinsics: vec![ Transfer { from: AccountKeyring::Bob.into(), to: AccountKeyring::Alice.into(), amount: 27, nonce: 0, } .into_signed_tx(), Transfer { from: AccountKeyring::Alice.into(), to: AccountKeyring::Charlie.into(), amount: 69, nonce: 1, } .into_signed_tx(), ], }; dummy_ext.execute_with(|| polish_block(&mut b2)); drop(dummy_ext); let mut t = new_test_ext(); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 111); assert_eq!(balance_of(AccountKeyring::Bob.into()), 0); }); block_executor(b1, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 42); assert_eq!(balance_of(AccountKeyring::Bob.into()), 69); }); block_executor(b2, &mut t); t.execute_with(|| { assert_eq!(balance_of(AccountKeyring::Alice.into()), 0); assert_eq!(balance_of(AccountKeyring::Bob.into()), 42); assert_eq!(balance_of(AccountKeyring::Charlie.into()), 69); }); } #[test] fn block_import_with_transaction_works_native() { block_import_with_transaction_works(|b, ext| ext.execute_with(|| execute_block(b))); } #[test] fn block_import_with_transaction_works_wasm() { block_import_with_transaction_works(|b, ext| { let mut ext = ext.ext(); let runtime_code = RuntimeCode { code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_binary_unwrap().into()), hash: Vec::new(), heap_pages: None, }; executor() .call::<NeverNativeValue, fn() -> _>( &mut ext, &runtime_code, "Core_execute_block", &b.encode(), false, None, ) .0 .unwrap(); }) } }
NewAuthorities get(fn new_authorities): Option<Vec<AuthorityId>>; NewChangesTrieConfig get(fn new_changes_trie_config): Option<Option<ChangesTrieConfiguration>>; StorageDigest get(fn storage_digest): Option<Digest>; Authorities get(fn authorities) config(): Vec<AuthorityId>;
random_line_split
delaunay_triangulation.rs
and half-edge ids are related. The half-edges of triangle t are 3*t, 3*t + 1, and 3*t + 2. The triangle of half-edge id e is floor(e/3 # Example ```rust use delaunator::triangulate; use structures::Point2D let points = vec![ Point2D { x: 0., y: 0. }, Point2D { x: 1., y: 0. }, Point2D { x: 1., y: 1. }, Point2D { x: 0., y: 1. }, ]; let result = triangulate(&points).expect("No triangulation exists."); println!("{:?}", result.triangles); // [0, 2, 1, 0, 3, 2] ``` */ use crate::structures::Point2D; use std::collections::HashSet; use std::f64; /// Represents the area outside of the triangulation. /// Halfedges on the convex hull (which don't have an adjacent halfedge) /// will have this value. pub const EMPTY: usize = usize::max_value(); /// A data structure used to perform Delaunay triangulation on /// a set of input vector points. Connectivity between points, /// triangles, and halfedges is as follows: /// /// - edge → edges: next_halfedge, prevHalfedge, halfedges[] /// - edge → points: triangles[] /// - edge → triangle: triangle_of_edge /// - triangle → edges: edges_of_triangle /// - triangle → points: points_of_triangle /// - triangle → triangles: triangles_adjacent_to_triangle /// - point → incoming edges: edges_around_point /// - point → outgoing edges: edges_around_point + halfedge[] /// - point → points: edges_around_point + triangles[] /// - point → triangles: edges_around_point + triangle_of_edge pub struct Triangulation { /// A vector of point indices where each triple represents a Delaunay triangle. /// All triangles are directed counter-clockwise. pub triangles: Vec<usize>, /// A vector of adjacent halfedge indices that allows traversing the triangulation graph. /// /// `i`-th half-edge in the array corresponds to vertex `triangles[i]` /// the half-edge is coming from. `halfedges[i]` is the index of a twin half-edge /// in an adjacent triangle (or `EMPTY` for outer half-edges on the convex hull). pub halfedges: Vec<usize>, /// A vector of indices that reference points on the convex hull of the triangulation, /// counter-clockwise. pub hull: Vec<usize>, } impl Triangulation { /// Constructs a new *Triangulation*. fn new(n: usize) -> Self { let max_triangles = 2 * n - 5; Self { triangles: Vec::with_capacity(max_triangles * 3), halfedges: Vec::with_capacity(max_triangles * 3), hull: Vec::new(), } } /// The number of triangles in the triangulation. pub fn len(&self) -> usize { self.triangles.len() / 3 } /// Next halfedge in a triangle. pub fn next_halfedge(&self, edge: usize) -> usize { if edge % 3 == 2 { edge - 2 } else { edge + 1 } } /// Previous halfedge in a triangle. pub fn prev_halfedge(&self, edge: usize) -> usize { if edge % 3 == 0 { edge + 2 } else { edge - 1 } } /// Returns the triangle of an edge. pub fn triangle_of_edge(&self, edge: usize) -> usize { edge / 3 } /// Returns the edges of a triangle. pub fn edges_of_triangle(&self, triangle: usize) -> [usize; 3] { [3 * triangle, 3 * triangle + 1, 3 * triangle + 2] } /// Returns the points of a triangle. pub fn points_of_triangle(&self, triangle: usize) -> [usize; 3] { // self.edges_of_tri
r. pub fn triangle_center(&self, points: &[Point2D], triangle: usize) -> Point2D { let p = self.points_of_triangle(triangle); points[p[0]].circumcenter(&points[p[1]], &points[p[2]]) } /// Returns the edges around a point connected to halfedge '*start*'. pub fn edges_around_point(&self, start: usize) -> Vec<usize> { let mut result = vec![]; let mut incoming = start; let mut outgoing: usize; // let mut i = 0; loop { if result.contains(&incoming) { break; } result.push(incoming); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { result.push(incoming); break; } // i += 1; // if i > 100 { // // println!("{} {} {}", outgoing, incoming, start); // break; // } } result } pub fn natural_neighbours_from_incoming_edge(&self, start: usize) -> Vec<usize> { let mut result = vec![]; //result.push(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { result.push(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } result } pub fn natural_neighbours_2nd_order(&self, start: usize) -> Vec<usize> { let mut set = HashSet::new(); let mut edges = vec![]; // result.push(self.triangles[self.next_halfedge(start)]); // set.insert(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; edges.push(outgoing); if incoming == EMPTY { break; } else if incoming == start { break; } } for start in edges { incoming = start; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } } set.into_iter().map(|i| i).collect() } /// Returns the indices of the adjacent triangles to a triangle. pub fn triangles_adjacent_to_triangle(&self, triangle: usize) -> Vec<usize> { let mut adjacent_triangles: Vec<usize> = vec![]; let mut opposite: usize; for e in self.edges_of_triangle(triangle).iter() { opposite = self.halfedges[*e]; if opposite!= EMPTY { adjacent_triangles.push(self.triangle_of_edge(opposite)); } } adjacent_triangles } fn add_triangle( &mut self, i0: usize, i1: usize, i2: usize, a: usize, b: usize, c: usize, ) -> usize { let t = self.triangles.len(); self.triangles.push(i0); self.triangles.push(i1); self.triangles.push(i2); self.halfedges.push(a); self.halfedges.push(b); self.halfedges.push(c); if a!= EMPTY { self.halfedges[a] = t; } if b!= EMPTY { self.halfedges[b] = t + 1; } if c!= EMPTY { self.halfedges[c] = t + 2; } t } fn legalize(&mut self, a: usize, points: &[Point2D], hull: &mut Hull) -> usize { let b = self.halfedges[a]; // if the pair of triangles doesn't satisfy the Delaunay condition // (p1 is inside the circumcircle of [p0, pl, pr]), flip them, // then do the same check/flip recursively for the new pair of triangles // // pl pl // /||\ / \ // al/ || \bl al/ \a // / || \ / \ // / a||b \ flip /___ar___\ // p0\ || /p1 => p0\---bl---/p1 // \ || / \ / // ar\ || /br b\ /br // \||/ \ / // pr pr // let ar = self.prev_halfedge(a); if b == EMPTY { return ar; } let al = self.next_halfedge(a); let bl = self.prev_halfedge(b); let p0 = self.triangles[ar]; let pr = self.triangles[a]; let pl = self.triangles[al]; let p1 = self.triangles[bl]; let illegal = (&points[p0]).in_circle(&points[pr], &points[pl], &points[p1]); if illegal { self.triangles[a] = p1; self.triangles[b] = p0; let hbl = self.halfedges[bl]; let har = self.halfedges[ar]; // edge swapped on the other side of the hull (rare); fix the halfedge reference if hbl == EMPTY { let mut e = hull.start; loop { if hull.tri[e] == bl { hull.tri[e] = a; break; } e = hull.next[e]; if e == hull.start || e == EMPTY { // notice, I added the || e == EMPTY after // finding a bug. I don't know about this. break; } } } self.halfedges[a] = hbl; self.halfedges[b] = har; self.halfedges[ar] = bl; if hbl!= EMPTY { self.halfedges[hbl] = a; } if har!= EMPTY { self.halfedges[har] = b; } if bl!= EMPTY { self.halfedges[bl] = ar; } let br = self.next_halfedge(b); self.legalize(a, points, hull); return self.legalize(br, points, hull); } ar } } // data structure for tracking the edges of the advancing convex hull struct Hull { prev: Vec<usize>, next: Vec<usize>, tri: Vec<usize>, hash: Vec<usize>, start: usize, center: Point2D, } impl Hull { fn new(n: usize, center: Point2D, i0: usize, i1: usize, i2: usize, points: &[Point2D]) -> Self { let hash_len = (n as f64).sqrt() as usize; let mut hull = Self { prev: vec![0; n], // edge to prev edge next: vec![0; n], // edge to next edge tri: vec![0; n], // edge to adjacent halfedge hash: vec![EMPTY; hash_len], // angular edge hash start: i0, center, }; hull.next[i0] = i1; hull.prev[i2] = i1; hull.next[i1] = i2; hull.prev[i0] = i2; hull.next[i2] = i0; hull.prev[i1] = i0; hull.tri[i0] = 0; hull.tri[i1] = 1; hull.tri[i2] = 2; hull.hash_edge(&points[i0], i0); hull.hash_edge(&points[i1], i1); hull.hash_edge(&points[i2], i2); hull } fn hash_key(&self, p: &Point2D) -> usize { let dx = p.x - self.center.x; let dy = p.y - self.center.y; let p = dx / (dx.abs() + dy.abs()); let a = (if dy > 0.0 { 3.0 - p } else { 1.0 + p }) / 4.0; // [0..1] let len = self.hash.len(); (((len as f64) * a).floor() as usize) % len } fn hash_edge(&mut self, p: &Point2D, i: usize) { let key = self.hash_key(p); self.hash[key] = i; } fn find_visible_edge(&self, p: &Point2D, points: &[Point2D]) -> (usize, bool) { let mut start: usize = 0; let key = self.hash_key(p); let len = self.hash.len(); for j in 0..len { start = self.hash[(key + j) % len]; if start!= EMPTY && self.next[start]!= EMPTY { break; } } start = self.prev[start]; let mut e = start; while!p.orient(&points[e], &points[self.next[e]]) { e = self.next[e]; if e == start { return (EMPTY, false); } } (e, e == start) } } fn calc_bbox_center(points: &[Point2D]) -> Point2D { let mut min_x = f64::INFINITY; let mut min_y = f64::INFINITY; let mut max_x = f64::NEG_INFINITY; let mut max_y = f64::NEG_INFINITY; for p in points.iter() { min_x = min_x.min(p.x); min_y = min_y.min(p.y); max_x = max_x.max(p.x); max_y = max_y.max(p.y); } Point2D { x: (min_x + max_x) / 2.0, y: (min_y + max_y) / 2.0, } } fn find_closest_point(points: &[Point2D], p0: &Point2D) -> Option<usize> { let mut min_dist = f64::INFINITY; let mut k: usize = 0; for (i, p) in points.iter().enumerate() { let d = p0.distance_squared(p); if d > 0.0 && d < min_dist { k = i; min_dist = d; } } if min_dist == f64::INFINITY { None } else { Some(k) } } fn find_seed_triangle(points: &[Point2D]) -> Option<(usize, usize, usize)> { // pick a seed point close to the center let bbox_center = calc_bbox_center(points); let i0 = find_closest_point(points, &bbox_center)?; let p0 = &points[i0]; // find the point closest to the seed let i1 = find_closest_point(points, p0)?; let p1 = &points[i1]; // find the third point which forms the smallest circumcircle with the first two let mut min_radius = f64::INFINITY; let mut i2: usize = 0; for (i, p) in points.iter().enumerate() { if i == i0 || i == i1 { continue; } let r = p0.circumradius2(p1, p); if r < min_radius { i2 = i; min_radius = r; } } if min_radius == f64::INFINITY { None } else { // swap the order of the seed points for counter-clockwise orientation Some(if p0.orient(p1, &points[i2]) { (i0, i2, i1) } else { (i0, i1, i2) }) } } /// Triangulate a set of 2D points. /// Returns `None` if no triangulation exists for the input (e.g. all points are collinear). pub fn triangulate(points: &[Point2D]) -> Option<Triangulation> { let n = points.len(); let (i0, i1, i2) = find_seed_triangle(points)?; let center = (&points[i0]).circumcenter(&points[i1], &points[i2]); let mut triangulation = Triangulation::new(n); triangulation.add_triangle(i0, i1, i2, EMPTY, EMPTY, EMPTY); // sort the points by distance from the seed triangle circumcenter let mut dists: Vec<_> = points .iter() .enumerate() .map(|(i, point)| (i, center.distance_squared(point))) .collect(); dists.sort_unstable_by(|&(_, da), &(_, db)| da.partial_cmp(&db).unwrap()); let mut hull = Hull::new(n, center, i0, i1, i2, points); for (k, &(i, _)) in dists.iter().enumerate() { let p = &points[i]; // skip near-duplicates if k > 0 && p.nearly_equals(&points[dists[k - 1].0]) { continue; } // skip seed triangle points if i == i0 || i == i1 || i == i2 { continue; } // find a visible edge on the convex hull using edge hash let (mut e, walk_back) = hull.find_visible_edge(p, points); if e == EMPTY { continue; // likely a near-duplicate point; skip it } // add the first triangle from the point let t = triangulation.add_triangle(e, i, hull.next[e], EMPTY, EMPTY, hull.tri[e]); // recursively flip triangles from the point until they satisfy the Delaunay condition hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.tri[e] = t; // keep track of boundary triangles on the hull // walk forward through the hull, adding more triangles and flipping recursively let mut n = hull.next[e]; loop { let q = hull.next[n]; if!p.orient(&points[n], &points[q]) { break; } let t = triangulation.add_triangle(n, i, q, hull.tri[i], EMPTY, hull.tri[n]); hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.next[n] = EMPTY; // mark as removed n = q; } // walk backward from the other side, adding more triangles
angle(t) // .into_iter() // .map(|e| self.triangles[*e]) // .collect() let e = self.edges_of_triangle(triangle); [ self.triangles[e[0]], self.triangles[e[1]], self.triangles[e[2]], ] } /// Triangle circumcente
identifier_body
delaunay_triangulation.rs
ids and half-edge ids are related. The half-edges of triangle t are 3*t, 3*t + 1, and 3*t + 2. The triangle of half-edge id e is floor(e/3 # Example ```rust use delaunator::triangulate; use structures::Point2D let points = vec![ Point2D { x: 0., y: 0. }, Point2D { x: 1., y: 0. }, Point2D { x: 1., y: 1. }, Point2D { x: 0., y: 1. }, ]; let result = triangulate(&points).expect("No triangulation exists."); println!("{:?}", result.triangles); // [0, 2, 1, 0, 3, 2] ``` */ use crate::structures::Point2D; use std::collections::HashSet; use std::f64; /// Represents the area outside of the triangulation. /// Halfedges on the convex hull (which don't have an adjacent halfedge) /// will have this value. pub const EMPTY: usize = usize::max_value(); /// A data structure used to perform Delaunay triangulation on /// a set of input vector points. Connectivity between points, /// triangles, and halfedges is as follows: /// /// - edge → edges: next_halfedge, prevHalfedge, halfedges[] /// - edge → points: triangles[] /// - edge → triangle: triangle_of_edge /// - triangle → edges: edges_of_triangle /// - triangle → points: points_of_triangle /// - triangle → triangles: triangles_adjacent_to_triangle /// - point → incoming edges: edges_around_point /// - point → outgoing edges: edges_around_point + halfedge[] /// - point → points: edges_around_point + triangles[] /// - point → triangles: edges_around_point + triangle_of_edge pub struct Triangulation { /// A vector of point indices where each triple represents a Delaunay triangle. /// All triangles are directed counter-clockwise. pub triangles: Vec<usize>, /// A vector of adjacent halfedge indices that allows traversing the triangulation graph. /// /// `i`-th half-edge in the array corresponds to vertex `triangles[i]` /// the half-edge is coming from. `halfedges[i]` is the index of a twin half-edge /// in an adjacent triangle (or `EMPTY` for outer half-edges on the convex hull). pub halfedges: Vec<usize>, /// A vector of indices that reference points on the convex hull of the triangulation, /// counter-clockwise. pub hull: Vec<usize>, } impl Triangulation { /// Constructs a new *Triangulation*. fn new(n: usize) -> Self { let max_triangles = 2 * n - 5; Self { triangles: Vec::with_capacity(max_triangles * 3), halfedges: Vec::with_capacity(max_triangles * 3), hull: Vec::new(), } } /// The number of triangles in the triangulation. pub fn len(&self) -> usize { self.triangles.len() / 3 } /// Next halfedge in a triangle. pub fn next_halfedge(&self, edge: usize) -> usize { if edge % 3 == 2 { edge - 2 } else { edge + 1 } } /// Previous halfedge in a triangle. pub fn prev_halfedge(&self, edge: usize) -> usize { if edge % 3 == 0 { edge + 2 } else { edge - 1 } } /// Returns the triangle of an edge. pub fn triangle_of_edge(&self, edge: usize) -> usize { edge / 3 } /// Returns the edges of a triangle. pub fn edges_of_triangle(&self, triangle: usize) -> [usize; 3] { [3 * triangle, 3 * triangle + 1, 3 * triangle + 2] } /// Returns the points of a triangle. pub fn points_of_triangle(&self, triangle: usize) -> [usize; 3] { // self.edges_of_triangle(t) // .into_iter() // .map(|e| self.triangles[*e]) // .collect() let e = self.edges_of_triangle(triangle); [ self.triangles[e[0]], self.triangles[e[1]], self.triangles[e[2]], ] } /// Triangle circumcenter. pub fn triangle_center(&self, points: &[Point2D], triangle: usize) -> Point2D { let p = self.points_of_triangle(triangle); points[p[0]].circumcenter(&points[p[1]], &points[p[2]]) } /// Returns the edges around a point connected to halfedge '*start*'. pub fn edges_around_point(&self, start: usize) -> Vec<usize> { let mut result = vec![]; let mut incoming = start; let mut outgoing: usize; // let mut i = 0; loop { if result.contains(&incoming) { break; } result.push(incoming); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break;
result.push(incoming); break; } // i += 1; // if i > 100 { // // println!("{} {} {}", outgoing, incoming, start); // break; // } } result } pub fn natural_neighbours_from_incoming_edge(&self, start: usize) -> Vec<usize> { let mut result = vec![]; //result.push(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { result.push(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } result } pub fn natural_neighbours_2nd_order(&self, start: usize) -> Vec<usize> { let mut set = HashSet::new(); let mut edges = vec![]; // result.push(self.triangles[self.next_halfedge(start)]); // set.insert(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; edges.push(outgoing); if incoming == EMPTY { break; } else if incoming == start { break; } } for start in edges { incoming = start; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } } set.into_iter().map(|i| i).collect() } /// Returns the indices of the adjacent triangles to a triangle. pub fn triangles_adjacent_to_triangle(&self, triangle: usize) -> Vec<usize> { let mut adjacent_triangles: Vec<usize> = vec![]; let mut opposite: usize; for e in self.edges_of_triangle(triangle).iter() { opposite = self.halfedges[*e]; if opposite!= EMPTY { adjacent_triangles.push(self.triangle_of_edge(opposite)); } } adjacent_triangles } fn add_triangle( &mut self, i0: usize, i1: usize, i2: usize, a: usize, b: usize, c: usize, ) -> usize { let t = self.triangles.len(); self.triangles.push(i0); self.triangles.push(i1); self.triangles.push(i2); self.halfedges.push(a); self.halfedges.push(b); self.halfedges.push(c); if a!= EMPTY { self.halfedges[a] = t; } if b!= EMPTY { self.halfedges[b] = t + 1; } if c!= EMPTY { self.halfedges[c] = t + 2; } t } fn legalize(&mut self, a: usize, points: &[Point2D], hull: &mut Hull) -> usize { let b = self.halfedges[a]; // if the pair of triangles doesn't satisfy the Delaunay condition // (p1 is inside the circumcircle of [p0, pl, pr]), flip them, // then do the same check/flip recursively for the new pair of triangles // // pl pl // /||\ / \ // al/ || \bl al/ \a // / || \ / \ // / a||b \ flip /___ar___\ // p0\ || /p1 => p0\---bl---/p1 // \ || / \ / // ar\ || /br b\ /br // \||/ \ / // pr pr // let ar = self.prev_halfedge(a); if b == EMPTY { return ar; } let al = self.next_halfedge(a); let bl = self.prev_halfedge(b); let p0 = self.triangles[ar]; let pr = self.triangles[a]; let pl = self.triangles[al]; let p1 = self.triangles[bl]; let illegal = (&points[p0]).in_circle(&points[pr], &points[pl], &points[p1]); if illegal { self.triangles[a] = p1; self.triangles[b] = p0; let hbl = self.halfedges[bl]; let har = self.halfedges[ar]; // edge swapped on the other side of the hull (rare); fix the halfedge reference if hbl == EMPTY { let mut e = hull.start; loop { if hull.tri[e] == bl { hull.tri[e] = a; break; } e = hull.next[e]; if e == hull.start || e == EMPTY { // notice, I added the || e == EMPTY after // finding a bug. I don't know about this. break; } } } self.halfedges[a] = hbl; self.halfedges[b] = har; self.halfedges[ar] = bl; if hbl!= EMPTY { self.halfedges[hbl] = a; } if har!= EMPTY { self.halfedges[har] = b; } if bl!= EMPTY { self.halfedges[bl] = ar; } let br = self.next_halfedge(b); self.legalize(a, points, hull); return self.legalize(br, points, hull); } ar } } // data structure for tracking the edges of the advancing convex hull struct Hull { prev: Vec<usize>, next: Vec<usize>, tri: Vec<usize>, hash: Vec<usize>, start: usize, center: Point2D, } impl Hull { fn new(n: usize, center: Point2D, i0: usize, i1: usize, i2: usize, points: &[Point2D]) -> Self { let hash_len = (n as f64).sqrt() as usize; let mut hull = Self { prev: vec![0; n], // edge to prev edge next: vec![0; n], // edge to next edge tri: vec![0; n], // edge to adjacent halfedge hash: vec![EMPTY; hash_len], // angular edge hash start: i0, center, }; hull.next[i0] = i1; hull.prev[i2] = i1; hull.next[i1] = i2; hull.prev[i0] = i2; hull.next[i2] = i0; hull.prev[i1] = i0; hull.tri[i0] = 0; hull.tri[i1] = 1; hull.tri[i2] = 2; hull.hash_edge(&points[i0], i0); hull.hash_edge(&points[i1], i1); hull.hash_edge(&points[i2], i2); hull } fn hash_key(&self, p: &Point2D) -> usize { let dx = p.x - self.center.x; let dy = p.y - self.center.y; let p = dx / (dx.abs() + dy.abs()); let a = (if dy > 0.0 { 3.0 - p } else { 1.0 + p }) / 4.0; // [0..1] let len = self.hash.len(); (((len as f64) * a).floor() as usize) % len } fn hash_edge(&mut self, p: &Point2D, i: usize) { let key = self.hash_key(p); self.hash[key] = i; } fn find_visible_edge(&self, p: &Point2D, points: &[Point2D]) -> (usize, bool) { let mut start: usize = 0; let key = self.hash_key(p); let len = self.hash.len(); for j in 0..len { start = self.hash[(key + j) % len]; if start!= EMPTY && self.next[start]!= EMPTY { break; } } start = self.prev[start]; let mut e = start; while!p.orient(&points[e], &points[self.next[e]]) { e = self.next[e]; if e == start { return (EMPTY, false); } } (e, e == start) } } fn calc_bbox_center(points: &[Point2D]) -> Point2D { let mut min_x = f64::INFINITY; let mut min_y = f64::INFINITY; let mut max_x = f64::NEG_INFINITY; let mut max_y = f64::NEG_INFINITY; for p in points.iter() { min_x = min_x.min(p.x); min_y = min_y.min(p.y); max_x = max_x.max(p.x); max_y = max_y.max(p.y); } Point2D { x: (min_x + max_x) / 2.0, y: (min_y + max_y) / 2.0, } } fn find_closest_point(points: &[Point2D], p0: &Point2D) -> Option<usize> { let mut min_dist = f64::INFINITY; let mut k: usize = 0; for (i, p) in points.iter().enumerate() { let d = p0.distance_squared(p); if d > 0.0 && d < min_dist { k = i; min_dist = d; } } if min_dist == f64::INFINITY { None } else { Some(k) } } fn find_seed_triangle(points: &[Point2D]) -> Option<(usize, usize, usize)> { // pick a seed point close to the center let bbox_center = calc_bbox_center(points); let i0 = find_closest_point(points, &bbox_center)?; let p0 = &points[i0]; // find the point closest to the seed let i1 = find_closest_point(points, p0)?; let p1 = &points[i1]; // find the third point which forms the smallest circumcircle with the first two let mut min_radius = f64::INFINITY; let mut i2: usize = 0; for (i, p) in points.iter().enumerate() { if i == i0 || i == i1 { continue; } let r = p0.circumradius2(p1, p); if r < min_radius { i2 = i; min_radius = r; } } if min_radius == f64::INFINITY { None } else { // swap the order of the seed points for counter-clockwise orientation Some(if p0.orient(p1, &points[i2]) { (i0, i2, i1) } else { (i0, i1, i2) }) } } /// Triangulate a set of 2D points. /// Returns `None` if no triangulation exists for the input (e.g. all points are collinear). pub fn triangulate(points: &[Point2D]) -> Option<Triangulation> { let n = points.len(); let (i0, i1, i2) = find_seed_triangle(points)?; let center = (&points[i0]).circumcenter(&points[i1], &points[i2]); let mut triangulation = Triangulation::new(n); triangulation.add_triangle(i0, i1, i2, EMPTY, EMPTY, EMPTY); // sort the points by distance from the seed triangle circumcenter let mut dists: Vec<_> = points .iter() .enumerate() .map(|(i, point)| (i, center.distance_squared(point))) .collect(); dists.sort_unstable_by(|&(_, da), &(_, db)| da.partial_cmp(&db).unwrap()); let mut hull = Hull::new(n, center, i0, i1, i2, points); for (k, &(i, _)) in dists.iter().enumerate() { let p = &points[i]; // skip near-duplicates if k > 0 && p.nearly_equals(&points[dists[k - 1].0]) { continue; } // skip seed triangle points if i == i0 || i == i1 || i == i2 { continue; } // find a visible edge on the convex hull using edge hash let (mut e, walk_back) = hull.find_visible_edge(p, points); if e == EMPTY { continue; // likely a near-duplicate point; skip it } // add the first triangle from the point let t = triangulation.add_triangle(e, i, hull.next[e], EMPTY, EMPTY, hull.tri[e]); // recursively flip triangles from the point until they satisfy the Delaunay condition hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.tri[e] = t; // keep track of boundary triangles on the hull // walk forward through the hull, adding more triangles and flipping recursively let mut n = hull.next[e]; loop { let q = hull.next[n]; if!p.orient(&points[n], &points[q]) { break; } let t = triangulation.add_triangle(n, i, q, hull.tri[i], EMPTY, hull.tri[n]); hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.next[n] = EMPTY; // mark as removed n = q; } // walk backward from the other side, adding more triangles and
} else if incoming == start {
conditional_block
delaunay_triangulation.rs
Triangle ids and half-edge ids are related. The half-edges of triangle t are 3*t, 3*t + 1, and 3*t + 2. The triangle of half-edge id e is floor(e/3 # Example ```rust use delaunator::triangulate; use structures::Point2D let points = vec![ Point2D { x: 0., y: 0. }, Point2D { x: 1., y: 0. }, Point2D { x: 1., y: 1. }, Point2D { x: 0., y: 1. }, ]; let result = triangulate(&points).expect("No triangulation exists."); println!("{:?}", result.triangles); // [0, 2, 1, 0, 3, 2] ``` */ use crate::structures::Point2D; use std::collections::HashSet; use std::f64; /// Represents the area outside of the triangulation. /// Halfedges on the convex hull (which don't have an adjacent halfedge) /// will have this value. pub const EMPTY: usize = usize::max_value(); /// A data structure used to perform Delaunay triangulation on /// a set of input vector points. Connectivity between points, /// triangles, and halfedges is as follows: /// /// - edge → edges: next_halfedge, prevHalfedge, halfedges[] /// - edge → points: triangles[] /// - edge → triangle: triangle_of_edge /// - triangle → edges: edges_of_triangle /// - triangle → points: points_of_triangle /// - triangle → triangles: triangles_adjacent_to_triangle /// - point → incoming edges: edges_around_point /// - point → outgoing edges: edges_around_point + halfedge[] /// - point → points: edges_around_point + triangles[] /// - point → triangles: edges_around_point + triangle_of_edge pub struct Triangulation { /// A vector of point indices where each triple represents a Delaunay triangle. /// All triangles are directed counter-clockwise. pub triangles: Vec<usize>, /// A vector of adjacent halfedge indices that allows traversing the triangulation graph. /// /// `i`-th half-edge in the array corresponds to vertex `triangles[i]` /// the half-edge is coming from. `halfedges[i]` is the index of a twin half-edge /// in an adjacent triangle (or `EMPTY` for outer half-edges on the convex hull). pub halfedges: Vec<usize>, /// A vector of indices that reference points on the convex hull of the triangulation,
} impl Triangulation { /// Constructs a new *Triangulation*. fn new(n: usize) -> Self { let max_triangles = 2 * n - 5; Self { triangles: Vec::with_capacity(max_triangles * 3), halfedges: Vec::with_capacity(max_triangles * 3), hull: Vec::new(), } } /// The number of triangles in the triangulation. pub fn len(&self) -> usize { self.triangles.len() / 3 } /// Next halfedge in a triangle. pub fn next_halfedge(&self, edge: usize) -> usize { if edge % 3 == 2 { edge - 2 } else { edge + 1 } } /// Previous halfedge in a triangle. pub fn prev_halfedge(&self, edge: usize) -> usize { if edge % 3 == 0 { edge + 2 } else { edge - 1 } } /// Returns the triangle of an edge. pub fn triangle_of_edge(&self, edge: usize) -> usize { edge / 3 } /// Returns the edges of a triangle. pub fn edges_of_triangle(&self, triangle: usize) -> [usize; 3] { [3 * triangle, 3 * triangle + 1, 3 * triangle + 2] } /// Returns the points of a triangle. pub fn points_of_triangle(&self, triangle: usize) -> [usize; 3] { // self.edges_of_triangle(t) // .into_iter() // .map(|e| self.triangles[*e]) // .collect() let e = self.edges_of_triangle(triangle); [ self.triangles[e[0]], self.triangles[e[1]], self.triangles[e[2]], ] } /// Triangle circumcenter. pub fn triangle_center(&self, points: &[Point2D], triangle: usize) -> Point2D { let p = self.points_of_triangle(triangle); points[p[0]].circumcenter(&points[p[1]], &points[p[2]]) } /// Returns the edges around a point connected to halfedge '*start*'. pub fn edges_around_point(&self, start: usize) -> Vec<usize> { let mut result = vec![]; let mut incoming = start; let mut outgoing: usize; // let mut i = 0; loop { if result.contains(&incoming) { break; } result.push(incoming); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { result.push(incoming); break; } // i += 1; // if i > 100 { // // println!("{} {} {}", outgoing, incoming, start); // break; // } } result } pub fn natural_neighbours_from_incoming_edge(&self, start: usize) -> Vec<usize> { let mut result = vec![]; //result.push(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { result.push(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } result } pub fn natural_neighbours_2nd_order(&self, start: usize) -> Vec<usize> { let mut set = HashSet::new(); let mut edges = vec![]; // result.push(self.triangles[self.next_halfedge(start)]); // set.insert(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; edges.push(outgoing); if incoming == EMPTY { break; } else if incoming == start { break; } } for start in edges { incoming = start; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } } set.into_iter().map(|i| i).collect() } /// Returns the indices of the adjacent triangles to a triangle. pub fn triangles_adjacent_to_triangle(&self, triangle: usize) -> Vec<usize> { let mut adjacent_triangles: Vec<usize> = vec![]; let mut opposite: usize; for e in self.edges_of_triangle(triangle).iter() { opposite = self.halfedges[*e]; if opposite!= EMPTY { adjacent_triangles.push(self.triangle_of_edge(opposite)); } } adjacent_triangles } fn add_triangle( &mut self, i0: usize, i1: usize, i2: usize, a: usize, b: usize, c: usize, ) -> usize { let t = self.triangles.len(); self.triangles.push(i0); self.triangles.push(i1); self.triangles.push(i2); self.halfedges.push(a); self.halfedges.push(b); self.halfedges.push(c); if a!= EMPTY { self.halfedges[a] = t; } if b!= EMPTY { self.halfedges[b] = t + 1; } if c!= EMPTY { self.halfedges[c] = t + 2; } t } fn legalize(&mut self, a: usize, points: &[Point2D], hull: &mut Hull) -> usize { let b = self.halfedges[a]; // if the pair of triangles doesn't satisfy the Delaunay condition // (p1 is inside the circumcircle of [p0, pl, pr]), flip them, // then do the same check/flip recursively for the new pair of triangles // // pl pl // /||\ / \ // al/ || \bl al/ \a // / || \ / \ // / a||b \ flip /___ar___\ // p0\ || /p1 => p0\---bl---/p1 // \ || / \ / // ar\ || /br b\ /br // \||/ \ / // pr pr // let ar = self.prev_halfedge(a); if b == EMPTY { return ar; } let al = self.next_halfedge(a); let bl = self.prev_halfedge(b); let p0 = self.triangles[ar]; let pr = self.triangles[a]; let pl = self.triangles[al]; let p1 = self.triangles[bl]; let illegal = (&points[p0]).in_circle(&points[pr], &points[pl], &points[p1]); if illegal { self.triangles[a] = p1; self.triangles[b] = p0; let hbl = self.halfedges[bl]; let har = self.halfedges[ar]; // edge swapped on the other side of the hull (rare); fix the halfedge reference if hbl == EMPTY { let mut e = hull.start; loop { if hull.tri[e] == bl { hull.tri[e] = a; break; } e = hull.next[e]; if e == hull.start || e == EMPTY { // notice, I added the || e == EMPTY after // finding a bug. I don't know about this. break; } } } self.halfedges[a] = hbl; self.halfedges[b] = har; self.halfedges[ar] = bl; if hbl!= EMPTY { self.halfedges[hbl] = a; } if har!= EMPTY { self.halfedges[har] = b; } if bl!= EMPTY { self.halfedges[bl] = ar; } let br = self.next_halfedge(b); self.legalize(a, points, hull); return self.legalize(br, points, hull); } ar } } // data structure for tracking the edges of the advancing convex hull struct Hull { prev: Vec<usize>, next: Vec<usize>, tri: Vec<usize>, hash: Vec<usize>, start: usize, center: Point2D, } impl Hull { fn new(n: usize, center: Point2D, i0: usize, i1: usize, i2: usize, points: &[Point2D]) -> Self { let hash_len = (n as f64).sqrt() as usize; let mut hull = Self { prev: vec![0; n], // edge to prev edge next: vec![0; n], // edge to next edge tri: vec![0; n], // edge to adjacent halfedge hash: vec![EMPTY; hash_len], // angular edge hash start: i0, center, }; hull.next[i0] = i1; hull.prev[i2] = i1; hull.next[i1] = i2; hull.prev[i0] = i2; hull.next[i2] = i0; hull.prev[i1] = i0; hull.tri[i0] = 0; hull.tri[i1] = 1; hull.tri[i2] = 2; hull.hash_edge(&points[i0], i0); hull.hash_edge(&points[i1], i1); hull.hash_edge(&points[i2], i2); hull } fn hash_key(&self, p: &Point2D) -> usize { let dx = p.x - self.center.x; let dy = p.y - self.center.y; let p = dx / (dx.abs() + dy.abs()); let a = (if dy > 0.0 { 3.0 - p } else { 1.0 + p }) / 4.0; // [0..1] let len = self.hash.len(); (((len as f64) * a).floor() as usize) % len } fn hash_edge(&mut self, p: &Point2D, i: usize) { let key = self.hash_key(p); self.hash[key] = i; } fn find_visible_edge(&self, p: &Point2D, points: &[Point2D]) -> (usize, bool) { let mut start: usize = 0; let key = self.hash_key(p); let len = self.hash.len(); for j in 0..len { start = self.hash[(key + j) % len]; if start!= EMPTY && self.next[start]!= EMPTY { break; } } start = self.prev[start]; let mut e = start; while!p.orient(&points[e], &points[self.next[e]]) { e = self.next[e]; if e == start { return (EMPTY, false); } } (e, e == start) } } fn calc_bbox_center(points: &[Point2D]) -> Point2D { let mut min_x = f64::INFINITY; let mut min_y = f64::INFINITY; let mut max_x = f64::NEG_INFINITY; let mut max_y = f64::NEG_INFINITY; for p in points.iter() { min_x = min_x.min(p.x); min_y = min_y.min(p.y); max_x = max_x.max(p.x); max_y = max_y.max(p.y); } Point2D { x: (min_x + max_x) / 2.0, y: (min_y + max_y) / 2.0, } } fn find_closest_point(points: &[Point2D], p0: &Point2D) -> Option<usize> { let mut min_dist = f64::INFINITY; let mut k: usize = 0; for (i, p) in points.iter().enumerate() { let d = p0.distance_squared(p); if d > 0.0 && d < min_dist { k = i; min_dist = d; } } if min_dist == f64::INFINITY { None } else { Some(k) } } fn find_seed_triangle(points: &[Point2D]) -> Option<(usize, usize, usize)> { // pick a seed point close to the center let bbox_center = calc_bbox_center(points); let i0 = find_closest_point(points, &bbox_center)?; let p0 = &points[i0]; // find the point closest to the seed let i1 = find_closest_point(points, p0)?; let p1 = &points[i1]; // find the third point which forms the smallest circumcircle with the first two let mut min_radius = f64::INFINITY; let mut i2: usize = 0; for (i, p) in points.iter().enumerate() { if i == i0 || i == i1 { continue; } let r = p0.circumradius2(p1, p); if r < min_radius { i2 = i; min_radius = r; } } if min_radius == f64::INFINITY { None } else { // swap the order of the seed points for counter-clockwise orientation Some(if p0.orient(p1, &points[i2]) { (i0, i2, i1) } else { (i0, i1, i2) }) } } /// Triangulate a set of 2D points. /// Returns `None` if no triangulation exists for the input (e.g. all points are collinear). pub fn triangulate(points: &[Point2D]) -> Option<Triangulation> { let n = points.len(); let (i0, i1, i2) = find_seed_triangle(points)?; let center = (&points[i0]).circumcenter(&points[i1], &points[i2]); let mut triangulation = Triangulation::new(n); triangulation.add_triangle(i0, i1, i2, EMPTY, EMPTY, EMPTY); // sort the points by distance from the seed triangle circumcenter let mut dists: Vec<_> = points .iter() .enumerate() .map(|(i, point)| (i, center.distance_squared(point))) .collect(); dists.sort_unstable_by(|&(_, da), &(_, db)| da.partial_cmp(&db).unwrap()); let mut hull = Hull::new(n, center, i0, i1, i2, points); for (k, &(i, _)) in dists.iter().enumerate() { let p = &points[i]; // skip near-duplicates if k > 0 && p.nearly_equals(&points[dists[k - 1].0]) { continue; } // skip seed triangle points if i == i0 || i == i1 || i == i2 { continue; } // find a visible edge on the convex hull using edge hash let (mut e, walk_back) = hull.find_visible_edge(p, points); if e == EMPTY { continue; // likely a near-duplicate point; skip it } // add the first triangle from the point let t = triangulation.add_triangle(e, i, hull.next[e], EMPTY, EMPTY, hull.tri[e]); // recursively flip triangles from the point until they satisfy the Delaunay condition hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.tri[e] = t; // keep track of boundary triangles on the hull // walk forward through the hull, adding more triangles and flipping recursively let mut n = hull.next[e]; loop { let q = hull.next[n]; if!p.orient(&points[n], &points[q]) { break; } let t = triangulation.add_triangle(n, i, q, hull.tri[i], EMPTY, hull.tri[n]); hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.next[n] = EMPTY; // mark as removed n = q; } // walk backward from the other side, adding more triangles and fli
/// counter-clockwise. pub hull: Vec<usize>,
random_line_split
delaunay_triangulation.rs
ids and half-edge ids are related. The half-edges of triangle t are 3*t, 3*t + 1, and 3*t + 2. The triangle of half-edge id e is floor(e/3 # Example ```rust use delaunator::triangulate; use structures::Point2D let points = vec![ Point2D { x: 0., y: 0. }, Point2D { x: 1., y: 0. }, Point2D { x: 1., y: 1. }, Point2D { x: 0., y: 1. }, ]; let result = triangulate(&points).expect("No triangulation exists."); println!("{:?}", result.triangles); // [0, 2, 1, 0, 3, 2] ``` */ use crate::structures::Point2D; use std::collections::HashSet; use std::f64; /// Represents the area outside of the triangulation. /// Halfedges on the convex hull (which don't have an adjacent halfedge) /// will have this value. pub const EMPTY: usize = usize::max_value(); /// A data structure used to perform Delaunay triangulation on /// a set of input vector points. Connectivity between points, /// triangles, and halfedges is as follows: /// /// - edge → edges: next_halfedge, prevHalfedge, halfedges[] /// - edge → points: triangles[] /// - edge → triangle: triangle_of_edge /// - triangle → edges: edges_of_triangle /// - triangle → points: points_of_triangle /// - triangle → triangles: triangles_adjacent_to_triangle /// - point → incoming edges: edges_around_point /// - point → outgoing edges: edges_around_point + halfedge[] /// - point → points: edges_around_point + triangles[] /// - point → triangles: edges_around_point + triangle_of_edge pub struct Triangulation { /// A vector of point indices where each triple represents a Delaunay triangle. /// All triangles are directed counter-clockwise. pub triangles: Vec<usize>, /// A vector of adjacent halfedge indices that allows traversing the triangulation graph. /// /// `i`-th half-edge in the array corresponds to vertex `triangles[i]` /// the half-edge is coming from. `halfedges[i]` is the index of a twin half-edge /// in an adjacent triangle (or `EMPTY` for outer half-edges on the convex hull). pub halfedges: Vec<usize>, /// A vector of indices that reference points on the convex hull of the triangulation, /// counter-clockwise. pub hull: Vec<usize>, } impl Triangulation { /// Constructs a new *Triangulation*. fn new(n: usize) -> Self { let max_triangles = 2 * n - 5; Self { triangles: Vec::with_capacity(max_triangles * 3), halfedges: Vec::with_capacity(max_triangles * 3), hull: Vec::new(), } } /// The number of triangles in the triangulation. pub fn len(&self) -> usize { self.triangles.len() / 3 } /// Next halfedge in a triangle. pub fn next_halfedge(&self, edge: usize) -> usize { if edge % 3 == 2 { edge - 2 } else { edge + 1 } } /// Previous halfedge in a triangle. pub fn prev_halfedge(&self, edge: usize) -> usize { if edge % 3 == 0 { edge + 2 } else { edge - 1 } } /// Returns the triangle of an edge. pub fn triangle_of_edge(&self, edge: usize) -> usize { edge / 3 } /// Returns the edges of a triangle. pub fn edges_of_triangle(&self, triangle: usize) -> [usize; 3] { [3 * triangle, 3 * triangle + 1, 3 * triangle + 2] } /// Returns the points of a triangle. pub fn points_of_triangle(&self, triangle: usize) -> [usize; 3] { // self.edges_of_triangle(t) // .into_iter() // .map(|e| self.triangles[*e]) // .collect() let e = self.edges_of_triangle(triangle); [ self.triangles[e[0]], self.triangles[e[1]], self.triangles[e[2]], ] } /// Triangle circumcenter. pub fn triangle_center(&self, points: &[Point2D], triangle: usize) -> Point2D { let p = self.points_of_triangle(triangle); points[p[0]].circumcenter(&points[p[1]], &points[p[2]]) } /// Returns the edges around a point connected to halfedge '*start*'. pub fn edges_around_point(&self, start: usize) -> Vec<usize> { let mut result = vec![]; let mut incoming = start; let mut outgoing: usize; // let mut i = 0; loop { if result.contains(&incoming) { break; } result.push(incoming); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { result.push(incoming); break; } // i += 1; // if i > 100 { // // println!("{} {} {}", outgoing, incoming, start); // break; // } } result } pub fn natural_neighbours_from_incoming_edge(&self, start: usize) -> Vec<usize> { let mut result = vec![]; //result.push(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { result.push(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } result } pub fn natural_neighbours_2nd_order(&self, start: usize) -> Vec<usize> { let mut set = HashSet::new(); let mut edges = vec![]; // result.push(self.triangles[self.next_halfedge(start)]); // set.insert(self.triangles[self.next_halfedge(start)]); let mut incoming = start; let mut outgoing: usize; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; edges.push(outgoing); if incoming == EMPTY { break; } else if incoming == start { break; } } for start in edges { incoming = start; loop { set.insert(self.triangles[incoming]); outgoing = self.next_halfedge(incoming); incoming = self.halfedges[outgoing]; if incoming == EMPTY { break; } else if incoming == start { break; } } } set.into_iter().map(|i| i).collect() } /// Returns the indices of the adjacent triangles to a triangle. pub fn triangles_adjacent_to_triangle(&self, triangle: usize) -> Vec<usize> { let mut adjacent_triangles: Vec<usize> = vec![]; let mut opposite: usize; for e in self.edges_of_triangle(triangle).iter() { opposite = self.halfedges[*e]; if opposite!= EMPTY { adjacent_triangles.push(self.triangle_of_edge(opposite)); } } adjacent_triangles } fn add_triangle( &mut sel
0: usize, i1: usize, i2: usize, a: usize, b: usize, c: usize, ) -> usize { let t = self.triangles.len(); self.triangles.push(i0); self.triangles.push(i1); self.triangles.push(i2); self.halfedges.push(a); self.halfedges.push(b); self.halfedges.push(c); if a!= EMPTY { self.halfedges[a] = t; } if b!= EMPTY { self.halfedges[b] = t + 1; } if c!= EMPTY { self.halfedges[c] = t + 2; } t } fn legalize(&mut self, a: usize, points: &[Point2D], hull: &mut Hull) -> usize { let b = self.halfedges[a]; // if the pair of triangles doesn't satisfy the Delaunay condition // (p1 is inside the circumcircle of [p0, pl, pr]), flip them, // then do the same check/flip recursively for the new pair of triangles // // pl pl // /||\ / \ // al/ || \bl al/ \a // / || \ / \ // / a||b \ flip /___ar___\ // p0\ || /p1 => p0\---bl---/p1 // \ || / \ / // ar\ || /br b\ /br // \||/ \ / // pr pr // let ar = self.prev_halfedge(a); if b == EMPTY { return ar; } let al = self.next_halfedge(a); let bl = self.prev_halfedge(b); let p0 = self.triangles[ar]; let pr = self.triangles[a]; let pl = self.triangles[al]; let p1 = self.triangles[bl]; let illegal = (&points[p0]).in_circle(&points[pr], &points[pl], &points[p1]); if illegal { self.triangles[a] = p1; self.triangles[b] = p0; let hbl = self.halfedges[bl]; let har = self.halfedges[ar]; // edge swapped on the other side of the hull (rare); fix the halfedge reference if hbl == EMPTY { let mut e = hull.start; loop { if hull.tri[e] == bl { hull.tri[e] = a; break; } e = hull.next[e]; if e == hull.start || e == EMPTY { // notice, I added the || e == EMPTY after // finding a bug. I don't know about this. break; } } } self.halfedges[a] = hbl; self.halfedges[b] = har; self.halfedges[ar] = bl; if hbl!= EMPTY { self.halfedges[hbl] = a; } if har!= EMPTY { self.halfedges[har] = b; } if bl!= EMPTY { self.halfedges[bl] = ar; } let br = self.next_halfedge(b); self.legalize(a, points, hull); return self.legalize(br, points, hull); } ar } } // data structure for tracking the edges of the advancing convex hull struct Hull { prev: Vec<usize>, next: Vec<usize>, tri: Vec<usize>, hash: Vec<usize>, start: usize, center: Point2D, } impl Hull { fn new(n: usize, center: Point2D, i0: usize, i1: usize, i2: usize, points: &[Point2D]) -> Self { let hash_len = (n as f64).sqrt() as usize; let mut hull = Self { prev: vec![0; n], // edge to prev edge next: vec![0; n], // edge to next edge tri: vec![0; n], // edge to adjacent halfedge hash: vec![EMPTY; hash_len], // angular edge hash start: i0, center, }; hull.next[i0] = i1; hull.prev[i2] = i1; hull.next[i1] = i2; hull.prev[i0] = i2; hull.next[i2] = i0; hull.prev[i1] = i0; hull.tri[i0] = 0; hull.tri[i1] = 1; hull.tri[i2] = 2; hull.hash_edge(&points[i0], i0); hull.hash_edge(&points[i1], i1); hull.hash_edge(&points[i2], i2); hull } fn hash_key(&self, p: &Point2D) -> usize { let dx = p.x - self.center.x; let dy = p.y - self.center.y; let p = dx / (dx.abs() + dy.abs()); let a = (if dy > 0.0 { 3.0 - p } else { 1.0 + p }) / 4.0; // [0..1] let len = self.hash.len(); (((len as f64) * a).floor() as usize) % len } fn hash_edge(&mut self, p: &Point2D, i: usize) { let key = self.hash_key(p); self.hash[key] = i; } fn find_visible_edge(&self, p: &Point2D, points: &[Point2D]) -> (usize, bool) { let mut start: usize = 0; let key = self.hash_key(p); let len = self.hash.len(); for j in 0..len { start = self.hash[(key + j) % len]; if start!= EMPTY && self.next[start]!= EMPTY { break; } } start = self.prev[start]; let mut e = start; while!p.orient(&points[e], &points[self.next[e]]) { e = self.next[e]; if e == start { return (EMPTY, false); } } (e, e == start) } } fn calc_bbox_center(points: &[Point2D]) -> Point2D { let mut min_x = f64::INFINITY; let mut min_y = f64::INFINITY; let mut max_x = f64::NEG_INFINITY; let mut max_y = f64::NEG_INFINITY; for p in points.iter() { min_x = min_x.min(p.x); min_y = min_y.min(p.y); max_x = max_x.max(p.x); max_y = max_y.max(p.y); } Point2D { x: (min_x + max_x) / 2.0, y: (min_y + max_y) / 2.0, } } fn find_closest_point(points: &[Point2D], p0: &Point2D) -> Option<usize> { let mut min_dist = f64::INFINITY; let mut k: usize = 0; for (i, p) in points.iter().enumerate() { let d = p0.distance_squared(p); if d > 0.0 && d < min_dist { k = i; min_dist = d; } } if min_dist == f64::INFINITY { None } else { Some(k) } } fn find_seed_triangle(points: &[Point2D]) -> Option<(usize, usize, usize)> { // pick a seed point close to the center let bbox_center = calc_bbox_center(points); let i0 = find_closest_point(points, &bbox_center)?; let p0 = &points[i0]; // find the point closest to the seed let i1 = find_closest_point(points, p0)?; let p1 = &points[i1]; // find the third point which forms the smallest circumcircle with the first two let mut min_radius = f64::INFINITY; let mut i2: usize = 0; for (i, p) in points.iter().enumerate() { if i == i0 || i == i1 { continue; } let r = p0.circumradius2(p1, p); if r < min_radius { i2 = i; min_radius = r; } } if min_radius == f64::INFINITY { None } else { // swap the order of the seed points for counter-clockwise orientation Some(if p0.orient(p1, &points[i2]) { (i0, i2, i1) } else { (i0, i1, i2) }) } } /// Triangulate a set of 2D points. /// Returns `None` if no triangulation exists for the input (e.g. all points are collinear). pub fn triangulate(points: &[Point2D]) -> Option<Triangulation> { let n = points.len(); let (i0, i1, i2) = find_seed_triangle(points)?; let center = (&points[i0]).circumcenter(&points[i1], &points[i2]); let mut triangulation = Triangulation::new(n); triangulation.add_triangle(i0, i1, i2, EMPTY, EMPTY, EMPTY); // sort the points by distance from the seed triangle circumcenter let mut dists: Vec<_> = points .iter() .enumerate() .map(|(i, point)| (i, center.distance_squared(point))) .collect(); dists.sort_unstable_by(|&(_, da), &(_, db)| da.partial_cmp(&db).unwrap()); let mut hull = Hull::new(n, center, i0, i1, i2, points); for (k, &(i, _)) in dists.iter().enumerate() { let p = &points[i]; // skip near-duplicates if k > 0 && p.nearly_equals(&points[dists[k - 1].0]) { continue; } // skip seed triangle points if i == i0 || i == i1 || i == i2 { continue; } // find a visible edge on the convex hull using edge hash let (mut e, walk_back) = hull.find_visible_edge(p, points); if e == EMPTY { continue; // likely a near-duplicate point; skip it } // add the first triangle from the point let t = triangulation.add_triangle(e, i, hull.next[e], EMPTY, EMPTY, hull.tri[e]); // recursively flip triangles from the point until they satisfy the Delaunay condition hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.tri[e] = t; // keep track of boundary triangles on the hull // walk forward through the hull, adding more triangles and flipping recursively let mut n = hull.next[e]; loop { let q = hull.next[n]; if!p.orient(&points[n], &points[q]) { break; } let t = triangulation.add_triangle(n, i, q, hull.tri[i], EMPTY, hull.tri[n]); hull.tri[i] = triangulation.legalize(t + 2, points, &mut hull); hull.next[n] = EMPTY; // mark as removed n = q; } // walk backward from the other side, adding more triangles
f, i
identifier_name
toggle.rs
use bars_duration_ticks; use conrod_core::{self as conrod, widget}; use env; use ruler; use time_calc::{self as time, Ticks}; use track; pub use env::{Point, PointTrait, Toggle as ToggleValue, Trait as EnvelopeTrait}; /// The envelope type compatible with the `Toggle` automation track. pub type Envelope = env::bounded::Envelope<ToggleValue>; /// For viewing and manipulating series of boolean valued points over time. #[derive(WidgetCommon)] pub struct Toggle<'a> { #[conrod(common_builder)] common: widget::CommonBuilder, envelope: &'a Envelope, bars: &'a [time::TimeSig], ppqn: time::Ppqn, /// The position of the playhead in ticks along with the change in its position in ticks. pub maybe_playhead: Option<(Ticks, Ticks)>, style: Style, } /// Unique state for the Toggle automation. pub struct
{ ids: Ids, } widget_ids! { struct Ids { circles[], rectangles[], phantom_line, } } #[derive(Copy, Clone, Debug, Default, PartialEq, WidgetStyle)] pub struct Style { #[conrod(default = "theme.shape_color")] pub color: Option<conrod::Color>, #[conrod(default = "4.0")] pub point_radius: Option<conrod::Scalar>, } /// The various kinds of events returned by an automation track. #[derive(Copy, Clone, Debug)] pub enum Event { /// Upon playhead movement, represents new boolean value at playhead. Interpolate(bool), /// Indicatees that the toggle value has changed since the last update. SwitchTo(bool), /// Some event which would mutate the envelope has occurred. Mutate(super::Mutate<ToggleValue>), } impl<'a> Toggle<'a> { /// Construct a new default Automation. pub fn new(bars: &'a [time::TimeSig], ppqn: time::Ppqn, envelope: &'a Envelope) -> Self { Toggle { bars: bars, ppqn: ppqn, maybe_playhead: None, envelope: envelope, common: widget::CommonBuilder::default(), style: Style::default(), } } builder_methods! { pub point_radius { style.point_radius = Some(conrod::Scalar) } } } impl<'a> track::Widget for Toggle<'a> { fn playhead(mut self, playhead: (Ticks, Ticks)) -> Self { self.maybe_playhead = Some(playhead); self } } impl<'a> conrod::Colorable for Toggle<'a> { builder_method!(color { style.color = Some(conrod::Color) }); } impl<'a> conrod::Widget for Toggle<'a> { type State = State; type Style = Style; type Event = Vec<Event>; fn init_state(&self, id_gen: widget::id::Generator) -> Self::State { State { ids: Ids::new(id_gen), } } fn style(&self) -> Self::Style { self.style.clone() } fn default_y_dimension(&self, ui: &conrod::Ui) -> conrod::position::Dimension { ui.theme .widget_style::<Style>() .and_then(|default| default.common.maybe_y_dimension) .unwrap_or(conrod::position::Dimension::Absolute( super::super::DEFAULT_HEIGHT, )) } fn update(self, args: widget::UpdateArgs<Self>) -> Self::Event { use super::Elem; use conrod_core::utils::{clamp, map_range}; use conrod_core::{Colorable, Positionable}; let widget::UpdateArgs { id, rect, state, style, ui, .. } = args; let Toggle { envelope, bars, ppqn, maybe_playhead, .. } = self; let num_points = envelope.points().count(); let num_rectangles = { let mut points = envelope.points(); points .next() .map(|first| { let mut prev_toggle = first.value; let mut count = 0; for point in points { if prev_toggle == ToggleValue(true) { count += 1; } prev_toggle = point.value; } count }) .unwrap_or(0) }; // Ensure we have a circle index for each point. if state.ids.circles.len() < num_points { let id_gen = &mut ui.widget_id_generator(); state.update(|state| state.ids.circles.resize(num_points, id_gen)); } // Ensure we have a rectangle index for each point. if state.ids.rectangles.len() < num_rectangles { let id_gen = &mut ui.widget_id_generator(); state.update(|state| state.ids.rectangles.resize(num_rectangles, id_gen)); } let (w, h) = rect.w_h(); let half_h = h / 2.0; let color = style.color(ui.theme()); let point_radius = style.point_radius(ui.theme()); let total_ticks = bars_duration_ticks(bars.iter().cloned(), ppqn); // Get the time in ticks from some position over the Bang automation. let ticks_from_x = |x: conrod::Scalar| { Ticks(map_range( x, rect.left(), rect.right(), 0, total_ticks.ticks(), )) }; // `false` if `y` is closer to the bottom, `true` if y is closer to the top. let value_from_y = |y: conrod::Scalar| { let perc = map_range(y, rect.bottom(), rect.top(), 0.0, 1.0); if perc < 0.5 { ToggleValue(false) } else { ToggleValue(true) } }; // Same as `ticks_from_x` but clamps the ticks to the total_ticks range. let clamped_ticks_from_x = |x: conrod::Scalar| clamp(ticks_from_x(x), Ticks(0), total_ticks); // All that remains is to instantiate the graphics widgets. // // Check whether or not we need to do so by checking whether or not we're visible. if conrod::graph::algo::cropped_area_of_widget(ui.widget_graph(), id).is_none() { return Vec::new(); } // Determine the element range over which the playhead has traversed since the last update. let playhead_delta_range = match maybe_playhead { Some((playhead, delta)) if delta > Ticks(0) => { let start = playhead - delta; let end = playhead; super::maybe_surrounding_elems(total_ticks, envelope, start, end) } _ => None, }; // A function for instantiating a Circle widget for a point. let point_widget = |i: usize, x_offset: conrod::Scalar, value: ToggleValue, point_id: widget::Id, ui: &mut conrod::UiCell, events: &mut Vec<Event>| { for widget_event in ui.widget_input(point_id).events() { use conrod_core::{event, input}; match widget_event { // Check to see if the toggle point is being dragged. event::Widget::Drag(drag) if drag.button == input::MouseButton::Left => { let point_rect = ui.rect_of(point_id).unwrap(); let drag_to_abs_xy = conrod::utils::vec2_add(drag.to, point_rect.xy()); let drag_point = super::DragPoint { idx: i, ticks: clamped_ticks_from_x(drag_to_abs_xy[0]), value: value_from_y(drag_to_abs_xy[1]), }; events.push(Event::Mutate(drag_point.into())); } // Check to see if the toggle point is being removed. event::Widget::Click(click) if click.button == input::MouseButton::Right => { let remove_point = super::RemovePoint { idx: i }; events.push(Event::Mutate(remove_point.into())); } _ => (), } } let y_offset = if value == ToggleValue(false) { -half_h } else { half_h }; let point_elem = Elem::Point(i); let color = super::color_elem_by_playhead(point_elem, playhead_delta_range, color); let color = match ui.widget_input(point_id).mouse() { Some(mouse) => match mouse.buttons.left().is_down() { true => color.clicked(), false => color.highlighted(), }, None => color, }; widget::Circle::fill(point_radius) .x_y_relative_to(id, x_offset, y_offset) .graphics_for(id) .parent(id) .color(color) .set(point_id, ui); }; let mut events = Vec::new(); // Instantiate the widgets in a big loop. let mut iter = envelope.points().zip(state.ids.circles.iter()).enumerate(); if let Some((i, (&first, &first_id))) = iter.next() { // The first point widget. let first_offset = ruler::x_offset_from_ticks(first.ticks, total_ticks, w); point_widget(i, first_offset, first.value, first_id, ui, &mut events); let mut prev_offset = first_offset; let mut prev_toggle = first.value; let mut rectangle_ids = state.ids.rectangles.iter(); let mut prev_point_id = first_id; for (i, (&point, &point_id)) in iter { // All following point widgets. let point_x_offset = ruler::x_offset_from_ticks(point.ticks, total_ticks, w); point_widget(i, point_x_offset, point.value, point_id, ui, &mut events); // The rectangle widget. if prev_toggle == ToggleValue(true) { let &rectangle_id = rectangle_ids.next().expect("Not enough rectangle ids"); let right = point_x_offset; let left = prev_offset; let width = right - left; let elem = Elem::BetweenPoints(i - 1, i); let color = super::color_elem_by_playhead(elem, playhead_delta_range, color); let color = match ui .widget_input(prev_point_id) .mouse() .or_else(|| ui.widget_input(point_id).mouse()) { Some(mouse) => match mouse.buttons.left().is_down() { true => color.clicked(), false => color.highlighted(), }, None => color, }; let x_offset = left + width / 2.0; widget::Rectangle::fill([width, h]) .depth(2.0) // Place behind lines and circles. .x_relative_to(id, x_offset) .graphics_for(id) .color(color.alpha(0.5)) .parent(id) .set(rectangle_id, ui); } prev_offset = point_x_offset; prev_toggle = point.value; prev_point_id = point_id; } } // // A Line widget to accent the current interaction with the widget. // if let Some(mouse) = ui.widget_input(idx).mouse() { // let (x, ticks, value) = match new_interaction { // Highlighted(Elem::Point(p_idx)) | Clicked(Elem::Point(p_idx), _, _) => { // let p = envelope.env.points[p_idx]; // let x = x_from_ticks(p.ticks); // (x, p.ticks, p.value) // }, // Highlighted(_) | Clicked(_, _, _) => { // let x = mouse.xy[0]; // let ticks = ticks_from_x(x); // let value = value_from_y(mouse.xy[1]); // (x, ticks, value) // }, // _ => return, // }; // let color = match new_interaction { // // If whatever we're interacting with is highlighted, we should be too. // Highlighted(Elem::Point(_)) => color.highlighted(), // Highlighted(_) => color.highlighted().alpha(0.5), // // Only draw the clicked point if it is still between the clicked area. // Clicked(Elem::BetweenPoints(a, b), _, _) => // match (envelope.points().nth(a), envelope.points().nth(b)) { // (Some(p_a), Some(p_b)) if p_a.ticks <= ticks && ticks <= p_b.ticks => // color.clicked().alpha(0.7), // _ => return, // }, // // Only draw the clicked point if it is still before the first point. // Clicked(Elem::BeforeFirstPoint, _, _) => // match envelope.points().nth(0) { // Some(p) if ticks <= p.ticks => color.clicked().alpha(0.7), // _ => return, // }, // // Only draw the clicked point if it is still after the last point. // Clicked(Elem::AfterLastPoint, _, _) => // match envelope.points().last() { // Some(p) if p.ticks <= ticks => color.clicked().alpha(0.7), // _ => return, // }, // Clicked(Elem::EmptyRect, _, _) => color.clicked().alpha(0.7), // Clicked(Elem::Point(_), _, _) => color.clicked(), // _ => return, // }; // let (y_bottom, y_top) = match value { // ToggleValue(true) => (y + h / 4.0, rect.top()), // ToggleValue(false) => (rect.bottom(), y - h / 4.0), // }; // let start = [x, y_bottom]; // let end = [x, y_top]; // const THICKNESS: Scalar = 2.0; // let line_idx = state.phantom_line_idx.get(&mut ui); // Line::abs(start, end) // .depth(1.0) // Place beind circles but in front of rectangles. // .graphics_for(idx) // .parent(idx) // .color(color) // .thickness(THICKNESS) // .set(line_idx, &mut ui); // }; events } }
State
identifier_name
toggle.rs
use bars_duration_ticks; use conrod_core::{self as conrod, widget}; use env; use ruler; use time_calc::{self as time, Ticks}; use track; pub use env::{Point, PointTrait, Toggle as ToggleValue, Trait as EnvelopeTrait}; /// The envelope type compatible with the `Toggle` automation track. pub type Envelope = env::bounded::Envelope<ToggleValue>; /// For viewing and manipulating series of boolean valued points over time. #[derive(WidgetCommon)] pub struct Toggle<'a> { #[conrod(common_builder)] common: widget::CommonBuilder, envelope: &'a Envelope, bars: &'a [time::TimeSig], ppqn: time::Ppqn, /// The position of the playhead in ticks along with the change in its position in ticks. pub maybe_playhead: Option<(Ticks, Ticks)>, style: Style, } /// Unique state for the Toggle automation. pub struct State { ids: Ids, } widget_ids! { struct Ids { circles[], rectangles[], phantom_line, } } #[derive(Copy, Clone, Debug, Default, PartialEq, WidgetStyle)] pub struct Style { #[conrod(default = "theme.shape_color")] pub color: Option<conrod::Color>, #[conrod(default = "4.0")] pub point_radius: Option<conrod::Scalar>, } /// The various kinds of events returned by an automation track. #[derive(Copy, Clone, Debug)] pub enum Event { /// Upon playhead movement, represents new boolean value at playhead. Interpolate(bool), /// Indicatees that the toggle value has changed since the last update. SwitchTo(bool), /// Some event which would mutate the envelope has occurred. Mutate(super::Mutate<ToggleValue>), } impl<'a> Toggle<'a> { /// Construct a new default Automation. pub fn new(bars: &'a [time::TimeSig], ppqn: time::Ppqn, envelope: &'a Envelope) -> Self
builder_methods! { pub point_radius { style.point_radius = Some(conrod::Scalar) } } } impl<'a> track::Widget for Toggle<'a> { fn playhead(mut self, playhead: (Ticks, Ticks)) -> Self { self.maybe_playhead = Some(playhead); self } } impl<'a> conrod::Colorable for Toggle<'a> { builder_method!(color { style.color = Some(conrod::Color) }); } impl<'a> conrod::Widget for Toggle<'a> { type State = State; type Style = Style; type Event = Vec<Event>; fn init_state(&self, id_gen: widget::id::Generator) -> Self::State { State { ids: Ids::new(id_gen), } } fn style(&self) -> Self::Style { self.style.clone() } fn default_y_dimension(&self, ui: &conrod::Ui) -> conrod::position::Dimension { ui.theme .widget_style::<Style>() .and_then(|default| default.common.maybe_y_dimension) .unwrap_or(conrod::position::Dimension::Absolute( super::super::DEFAULT_HEIGHT, )) } fn update(self, args: widget::UpdateArgs<Self>) -> Self::Event { use super::Elem; use conrod_core::utils::{clamp, map_range}; use conrod_core::{Colorable, Positionable}; let widget::UpdateArgs { id, rect, state, style, ui, .. } = args; let Toggle { envelope, bars, ppqn, maybe_playhead, .. } = self; let num_points = envelope.points().count(); let num_rectangles = { let mut points = envelope.points(); points .next() .map(|first| { let mut prev_toggle = first.value; let mut count = 0; for point in points { if prev_toggle == ToggleValue(true) { count += 1; } prev_toggle = point.value; } count }) .unwrap_or(0) }; // Ensure we have a circle index for each point. if state.ids.circles.len() < num_points { let id_gen = &mut ui.widget_id_generator(); state.update(|state| state.ids.circles.resize(num_points, id_gen)); } // Ensure we have a rectangle index for each point. if state.ids.rectangles.len() < num_rectangles { let id_gen = &mut ui.widget_id_generator(); state.update(|state| state.ids.rectangles.resize(num_rectangles, id_gen)); } let (w, h) = rect.w_h(); let half_h = h / 2.0; let color = style.color(ui.theme()); let point_radius = style.point_radius(ui.theme()); let total_ticks = bars_duration_ticks(bars.iter().cloned(), ppqn); // Get the time in ticks from some position over the Bang automation. let ticks_from_x = |x: conrod::Scalar| { Ticks(map_range( x, rect.left(), rect.right(), 0, total_ticks.ticks(), )) }; // `false` if `y` is closer to the bottom, `true` if y is closer to the top. let value_from_y = |y: conrod::Scalar| { let perc = map_range(y, rect.bottom(), rect.top(), 0.0, 1.0); if perc < 0.5 { ToggleValue(false) } else { ToggleValue(true) } }; // Same as `ticks_from_x` but clamps the ticks to the total_ticks range. let clamped_ticks_from_x = |x: conrod::Scalar| clamp(ticks_from_x(x), Ticks(0), total_ticks); // All that remains is to instantiate the graphics widgets. // // Check whether or not we need to do so by checking whether or not we're visible. if conrod::graph::algo::cropped_area_of_widget(ui.widget_graph(), id).is_none() { return Vec::new(); } // Determine the element range over which the playhead has traversed since the last update. let playhead_delta_range = match maybe_playhead { Some((playhead, delta)) if delta > Ticks(0) => { let start = playhead - delta; let end = playhead; super::maybe_surrounding_elems(total_ticks, envelope, start, end) } _ => None, }; // A function for instantiating a Circle widget for a point. let point_widget = |i: usize, x_offset: conrod::Scalar, value: ToggleValue, point_id: widget::Id, ui: &mut conrod::UiCell, events: &mut Vec<Event>| { for widget_event in ui.widget_input(point_id).events() { use conrod_core::{event, input}; match widget_event { // Check to see if the toggle point is being dragged. event::Widget::Drag(drag) if drag.button == input::MouseButton::Left => { let point_rect = ui.rect_of(point_id).unwrap(); let drag_to_abs_xy = conrod::utils::vec2_add(drag.to, point_rect.xy()); let drag_point = super::DragPoint { idx: i, ticks: clamped_ticks_from_x(drag_to_abs_xy[0]), value: value_from_y(drag_to_abs_xy[1]), }; events.push(Event::Mutate(drag_point.into())); } // Check to see if the toggle point is being removed. event::Widget::Click(click) if click.button == input::MouseButton::Right => { let remove_point = super::RemovePoint { idx: i }; events.push(Event::Mutate(remove_point.into())); } _ => (), } } let y_offset = if value == ToggleValue(false) { -half_h } else { half_h }; let point_elem = Elem::Point(i); let color = super::color_elem_by_playhead(point_elem, playhead_delta_range, color); let color = match ui.widget_input(point_id).mouse() { Some(mouse) => match mouse.buttons.left().is_down() { true => color.clicked(), false => color.highlighted(), }, None => color, }; widget::Circle::fill(point_radius) .x_y_relative_to(id, x_offset, y_offset) .graphics_for(id) .parent(id) .color(color) .set(point_id, ui); }; let mut events = Vec::new(); // Instantiate the widgets in a big loop. let mut iter = envelope.points().zip(state.ids.circles.iter()).enumerate(); if let Some((i, (&first, &first_id))) = iter.next() { // The first point widget. let first_offset = ruler::x_offset_from_ticks(first.ticks, total_ticks, w); point_widget(i, first_offset, first.value, first_id, ui, &mut events); let mut prev_offset = first_offset; let mut prev_toggle = first.value; let mut rectangle_ids = state.ids.rectangles.iter(); let mut prev_point_id = first_id; for (i, (&point, &point_id)) in iter { // All following point widgets. let point_x_offset = ruler::x_offset_from_ticks(point.ticks, total_ticks, w); point_widget(i, point_x_offset, point.value, point_id, ui, &mut events); // The rectangle widget. if prev_toggle == ToggleValue(true) { let &rectangle_id = rectangle_ids.next().expect("Not enough rectangle ids"); let right = point_x_offset; let left = prev_offset; let width = right - left; let elem = Elem::BetweenPoints(i - 1, i); let color = super::color_elem_by_playhead(elem, playhead_delta_range, color); let color = match ui .widget_input(prev_point_id) .mouse() .or_else(|| ui.widget_input(point_id).mouse()) { Some(mouse) => match mouse.buttons.left().is_down() { true => color.clicked(), false => color.highlighted(), }, None => color, }; let x_offset = left + width / 2.0; widget::Rectangle::fill([width, h]) .depth(2.0) // Place behind lines and circles. .x_relative_to(id, x_offset) .graphics_for(id) .color(color.alpha(0.5)) .parent(id) .set(rectangle_id, ui); } prev_offset = point_x_offset; prev_toggle = point.value; prev_point_id = point_id; } } // // A Line widget to accent the current interaction with the widget. // if let Some(mouse) = ui.widget_input(idx).mouse() { // let (x, ticks, value) = match new_interaction { // Highlighted(Elem::Point(p_idx)) | Clicked(Elem::Point(p_idx), _, _) => { // let p = envelope.env.points[p_idx]; // let x = x_from_ticks(p.ticks); // (x, p.ticks, p.value) // }, // Highlighted(_) | Clicked(_, _, _) => { // let x = mouse.xy[0]; // let ticks = ticks_from_x(x); // let value = value_from_y(mouse.xy[1]); // (x, ticks, value) // }, // _ => return, // }; // let color = match new_interaction { // // If whatever we're interacting with is highlighted, we should be too. // Highlighted(Elem::Point(_)) => color.highlighted(), // Highlighted(_) => color.highlighted().alpha(0.5), // // Only draw the clicked point if it is still between the clicked area. // Clicked(Elem::BetweenPoints(a, b), _, _) => // match (envelope.points().nth(a), envelope.points().nth(b)) { // (Some(p_a), Some(p_b)) if p_a.ticks <= ticks && ticks <= p_b.ticks => // color.clicked().alpha(0.7), // _ => return, // }, // // Only draw the clicked point if it is still before the first point. // Clicked(Elem::BeforeFirstPoint, _, _) => // match envelope.points().nth(0) { // Some(p) if ticks <= p.ticks => color.clicked().alpha(0.7), // _ => return, // }, // // Only draw the clicked point if it is still after the last point. // Clicked(Elem::AfterLastPoint, _, _) => // match envelope.points().last() { // Some(p) if p.ticks <= ticks => color.clicked().alpha(0.7), // _ => return, // }, // Clicked(Elem::EmptyRect, _, _) => color.clicked().alpha(0.7), // Clicked(Elem::Point(_), _, _) => color.clicked(), // _ => return, // }; // let (y_bottom, y_top) = match value { // ToggleValue(true) => (y + h / 4.0, rect.top()), // ToggleValue(false) => (rect.bottom(), y - h / 4.0), // }; // let start = [x, y_bottom]; // let end = [x, y_top]; // const THICKNESS: Scalar = 2.0; // let line_idx = state.phantom_line_idx.get(&mut ui); // Line::abs(start, end) // .depth(1.0) // Place beind circles but in front of rectangles. // .graphics_for(idx) // .parent(idx) // .color(color) // .thickness(THICKNESS) // .set(line_idx, &mut ui); // }; events } }
{ Toggle { bars: bars, ppqn: ppqn, maybe_playhead: None, envelope: envelope, common: widget::CommonBuilder::default(), style: Style::default(), } }
identifier_body
toggle.rs
use bars_duration_ticks; use conrod_core::{self as conrod, widget}; use env; use ruler; use time_calc::{self as time, Ticks}; use track; pub use env::{Point, PointTrait, Toggle as ToggleValue, Trait as EnvelopeTrait}; /// The envelope type compatible with the `Toggle` automation track. pub type Envelope = env::bounded::Envelope<ToggleValue>; /// For viewing and manipulating series of boolean valued points over time. #[derive(WidgetCommon)] pub struct Toggle<'a> { #[conrod(common_builder)] common: widget::CommonBuilder, envelope: &'a Envelope, bars: &'a [time::TimeSig], ppqn: time::Ppqn, /// The position of the playhead in ticks along with the change in its position in ticks. pub maybe_playhead: Option<(Ticks, Ticks)>, style: Style, } /// Unique state for the Toggle automation. pub struct State { ids: Ids, } widget_ids! { struct Ids { circles[], rectangles[], phantom_line, } } #[derive(Copy, Clone, Debug, Default, PartialEq, WidgetStyle)] pub struct Style { #[conrod(default = "theme.shape_color")] pub color: Option<conrod::Color>, #[conrod(default = "4.0")] pub point_radius: Option<conrod::Scalar>, } /// The various kinds of events returned by an automation track. #[derive(Copy, Clone, Debug)] pub enum Event { /// Upon playhead movement, represents new boolean value at playhead. Interpolate(bool), /// Indicatees that the toggle value has changed since the last update. SwitchTo(bool), /// Some event which would mutate the envelope has occurred. Mutate(super::Mutate<ToggleValue>), } impl<'a> Toggle<'a> { /// Construct a new default Automation. pub fn new(bars: &'a [time::TimeSig], ppqn: time::Ppqn, envelope: &'a Envelope) -> Self { Toggle { bars: bars, ppqn: ppqn, maybe_playhead: None, envelope: envelope, common: widget::CommonBuilder::default(), style: Style::default(), } } builder_methods! { pub point_radius { style.point_radius = Some(conrod::Scalar) } } } impl<'a> track::Widget for Toggle<'a> { fn playhead(mut self, playhead: (Ticks, Ticks)) -> Self { self.maybe_playhead = Some(playhead); self } } impl<'a> conrod::Colorable for Toggle<'a> { builder_method!(color { style.color = Some(conrod::Color) }); } impl<'a> conrod::Widget for Toggle<'a> { type State = State; type Style = Style; type Event = Vec<Event>; fn init_state(&self, id_gen: widget::id::Generator) -> Self::State { State { ids: Ids::new(id_gen), } } fn style(&self) -> Self::Style { self.style.clone() } fn default_y_dimension(&self, ui: &conrod::Ui) -> conrod::position::Dimension { ui.theme .widget_style::<Style>() .and_then(|default| default.common.maybe_y_dimension) .unwrap_or(conrod::position::Dimension::Absolute( super::super::DEFAULT_HEIGHT, )) } fn update(self, args: widget::UpdateArgs<Self>) -> Self::Event { use super::Elem; use conrod_core::utils::{clamp, map_range}; use conrod_core::{Colorable, Positionable}; let widget::UpdateArgs { id, rect, state, style, ui, .. } = args; let Toggle { envelope, bars, ppqn, maybe_playhead, .. } = self; let num_points = envelope.points().count(); let num_rectangles = { let mut points = envelope.points(); points .next() .map(|first| { let mut prev_toggle = first.value; let mut count = 0; for point in points { if prev_toggle == ToggleValue(true) { count += 1; } prev_toggle = point.value; } count }) .unwrap_or(0) }; // Ensure we have a circle index for each point. if state.ids.circles.len() < num_points { let id_gen = &mut ui.widget_id_generator(); state.update(|state| state.ids.circles.resize(num_points, id_gen)); } // Ensure we have a rectangle index for each point. if state.ids.rectangles.len() < num_rectangles { let id_gen = &mut ui.widget_id_generator(); state.update(|state| state.ids.rectangles.resize(num_rectangles, id_gen)); } let (w, h) = rect.w_h(); let half_h = h / 2.0; let color = style.color(ui.theme()); let point_radius = style.point_radius(ui.theme()); let total_ticks = bars_duration_ticks(bars.iter().cloned(), ppqn); // Get the time in ticks from some position over the Bang automation. let ticks_from_x = |x: conrod::Scalar| { Ticks(map_range( x, rect.left(), rect.right(), 0, total_ticks.ticks(), )) }; // `false` if `y` is closer to the bottom, `true` if y is closer to the top. let value_from_y = |y: conrod::Scalar| { let perc = map_range(y, rect.bottom(), rect.top(), 0.0, 1.0); if perc < 0.5 { ToggleValue(false) } else { ToggleValue(true) } }; // Same as `ticks_from_x` but clamps the ticks to the total_ticks range. let clamped_ticks_from_x = |x: conrod::Scalar| clamp(ticks_from_x(x), Ticks(0), total_ticks); // All that remains is to instantiate the graphics widgets. // // Check whether or not we need to do so by checking whether or not we're visible. if conrod::graph::algo::cropped_area_of_widget(ui.widget_graph(), id).is_none() { return Vec::new(); } // Determine the element range over which the playhead has traversed since the last update. let playhead_delta_range = match maybe_playhead { Some((playhead, delta)) if delta > Ticks(0) => { let start = playhead - delta; let end = playhead; super::maybe_surrounding_elems(total_ticks, envelope, start, end) } _ => None, }; // A function for instantiating a Circle widget for a point. let point_widget = |i: usize, x_offset: conrod::Scalar, value: ToggleValue, point_id: widget::Id, ui: &mut conrod::UiCell, events: &mut Vec<Event>| { for widget_event in ui.widget_input(point_id).events() { use conrod_core::{event, input}; match widget_event { // Check to see if the toggle point is being dragged. event::Widget::Drag(drag) if drag.button == input::MouseButton::Left => { let point_rect = ui.rect_of(point_id).unwrap(); let drag_to_abs_xy = conrod::utils::vec2_add(drag.to, point_rect.xy()); let drag_point = super::DragPoint { idx: i, ticks: clamped_ticks_from_x(drag_to_abs_xy[0]), value: value_from_y(drag_to_abs_xy[1]), }; events.push(Event::Mutate(drag_point.into())); } // Check to see if the toggle point is being removed. event::Widget::Click(click) if click.button == input::MouseButton::Right => { let remove_point = super::RemovePoint { idx: i }; events.push(Event::Mutate(remove_point.into())); } _ => (), } } let y_offset = if value == ToggleValue(false) { -half_h } else { half_h }; let point_elem = Elem::Point(i); let color = super::color_elem_by_playhead(point_elem, playhead_delta_range, color); let color = match ui.widget_input(point_id).mouse() { Some(mouse) => match mouse.buttons.left().is_down() { true => color.clicked(), false => color.highlighted(), }, None => color, }; widget::Circle::fill(point_radius) .x_y_relative_to(id, x_offset, y_offset) .graphics_for(id) .parent(id) .color(color) .set(point_id, ui); }; let mut events = Vec::new(); // Instantiate the widgets in a big loop. let mut iter = envelope.points().zip(state.ids.circles.iter()).enumerate(); if let Some((i, (&first, &first_id))) = iter.next() { // The first point widget. let first_offset = ruler::x_offset_from_ticks(first.ticks, total_ticks, w); point_widget(i, first_offset, first.value, first_id, ui, &mut events); let mut prev_offset = first_offset; let mut prev_toggle = first.value; let mut rectangle_ids = state.ids.rectangles.iter(); let mut prev_point_id = first_id; for (i, (&point, &point_id)) in iter { // All following point widgets. let point_x_offset = ruler::x_offset_from_ticks(point.ticks, total_ticks, w); point_widget(i, point_x_offset, point.value, point_id, ui, &mut events); // The rectangle widget. if prev_toggle == ToggleValue(true) { let &rectangle_id = rectangle_ids.next().expect("Not enough rectangle ids"); let right = point_x_offset; let left = prev_offset; let width = right - left; let elem = Elem::BetweenPoints(i - 1, i); let color = super::color_elem_by_playhead(elem, playhead_delta_range, color); let color = match ui .widget_input(prev_point_id) .mouse() .or_else(|| ui.widget_input(point_id).mouse()) { Some(mouse) => match mouse.buttons.left().is_down() { true => color.clicked(), false => color.highlighted(), }, None => color, }; let x_offset = left + width / 2.0; widget::Rectangle::fill([width, h]) .depth(2.0) // Place behind lines and circles. .x_relative_to(id, x_offset) .graphics_for(id) .color(color.alpha(0.5)) .parent(id) .set(rectangle_id, ui); } prev_offset = point_x_offset; prev_toggle = point.value; prev_point_id = point_id; } } // // A Line widget to accent the current interaction with the widget. // if let Some(mouse) = ui.widget_input(idx).mouse() { // let (x, ticks, value) = match new_interaction { // Highlighted(Elem::Point(p_idx)) | Clicked(Elem::Point(p_idx), _, _) => { // let p = envelope.env.points[p_idx]; // let x = x_from_ticks(p.ticks); // (x, p.ticks, p.value) // }, // Highlighted(_) | Clicked(_, _, _) => { // let x = mouse.xy[0]; // let ticks = ticks_from_x(x); // let value = value_from_y(mouse.xy[1]); // (x, ticks, value) // }, // _ => return, // }; // let color = match new_interaction { // // If whatever we're interacting with is highlighted, we should be too. // Highlighted(Elem::Point(_)) => color.highlighted(), // Highlighted(_) => color.highlighted().alpha(0.5), // // Only draw the clicked point if it is still between the clicked area. // Clicked(Elem::BetweenPoints(a, b), _, _) => // match (envelope.points().nth(a), envelope.points().nth(b)) { // (Some(p_a), Some(p_b)) if p_a.ticks <= ticks && ticks <= p_b.ticks => // color.clicked().alpha(0.7), // _ => return, // },
// _ => return, // }, // // Only draw the clicked point if it is still after the last point. // Clicked(Elem::AfterLastPoint, _, _) => // match envelope.points().last() { // Some(p) if p.ticks <= ticks => color.clicked().alpha(0.7), // _ => return, // }, // Clicked(Elem::EmptyRect, _, _) => color.clicked().alpha(0.7), // Clicked(Elem::Point(_), _, _) => color.clicked(), // _ => return, // }; // let (y_bottom, y_top) = match value { // ToggleValue(true) => (y + h / 4.0, rect.top()), // ToggleValue(false) => (rect.bottom(), y - h / 4.0), // }; // let start = [x, y_bottom]; // let end = [x, y_top]; // const THICKNESS: Scalar = 2.0; // let line_idx = state.phantom_line_idx.get(&mut ui); // Line::abs(start, end) // .depth(1.0) // Place beind circles but in front of rectangles. // .graphics_for(idx) // .parent(idx) // .color(color) // .thickness(THICKNESS) // .set(line_idx, &mut ui); // }; events } }
// // Only draw the clicked point if it is still before the first point. // Clicked(Elem::BeforeFirstPoint, _, _) => // match envelope.points().nth(0) { // Some(p) if ticks <= p.ticks => color.clicked().alpha(0.7),
random_line_split
route_planner.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{ future_help::{Observer, PollMutex}, labels::{NodeId, NodeLinkId}, link::LinkStatus, router::Router, }; use anyhow::{bail, format_err, Error}; use fuchsia_async::Timer; use futures::{future::poll_fn, lock::Mutex, prelude::*, ready}; use std::{ collections::{BTreeMap, BinaryHeap}, sync::{Arc, Weak}, task::{Context, Poll, Waker}, time::Duration, }; /// Assumed forwarding time through a node. /// This is a temporary hack to alleviate some bad route selection. const FORWARDING_TIME: Duration = Duration::from_millis(100); /// Collects all information about a node in one place #[derive(Debug)] struct Node { links: BTreeMap<NodeLinkId, Link>, } /// During pathfinding, collects the shortest path so far to a node #[derive(Debug, Clone, Copy)] struct NodeProgress { round_trip_time: Duration, outgoing_link: NodeLinkId, } /// Describes the state of a link #[derive(Debug, Clone)] pub struct LinkDescription { /// Current round trip time estimate for this link pub round_trip_time: Duration, } /// Collects all information about one link on one node /// Links that are owned by NodeTable should remain owned (mutable references should not be given /// out) #[derive(Debug)] pub struct Link { /// Destination node for this link pub to: NodeId, /// Description of this link pub desc: LinkDescription, } /// Table of all nodes (and links between them) known to an instance struct NodeTable { root_node: NodeId, nodes: BTreeMap<NodeId, Node>, version: u64, wake_on_version_change: Option<Waker>, } impl NodeTable { /// Create a new node table rooted at `root_node` pub fn new(root_node: NodeId) -> NodeTable { NodeTable { root_node, nodes: BTreeMap::new(), version: 0, wake_on_version_change: None } } fn poll_new_version(&mut self, ctx: &mut Context<'_>, last_version: &mut u64) -> Poll<()> { if *last_version == self.version { self.wake_on_version_change = Some(ctx.waker().clone()); Poll::Pending } else { *last_version = self.version; Poll::Ready(()) } } fn get_or_create_node_mut(&mut self, node_id: NodeId) -> &mut Node { self.nodes.entry(node_id).or_insert_with(|| Node { links: BTreeMap::new() }) } /// Update a single link on a node. fn update_link( &mut self, from: NodeId, to: NodeId, link_id: NodeLinkId, desc: LinkDescription, ) -> Result<(), Error> { log::trace!( "{:?} update_link: from:{:?} to:{:?} link_id:{:?} desc:{:?}", self.root_node, from, to, link_id, desc ); if from == to { bail!("Circular link seen"); } self.get_or_create_node_mut(to); self.get_or_create_node_mut(from).links.insert(link_id, Link { to, desc }); Ok(()) } fn update_links(&mut self, from: NodeId, links: Vec<LinkStatus>) -> Result<(), Error> { self.get_or_create_node_mut(from).links.clear(); for LinkStatus { to, local_id, round_trip_time } in links.into_iter() { self.update_link(from, to, local_id, LinkDescription { round_trip_time })?; } self.version += 1; self.wake_on_version_change.take().map(|w| w.wake()); Ok(()) } /// Build a routing table for our node based on current link data fn build_routes(&self) -> impl Iterator<Item = (NodeId, NodeLinkId)> { let mut todo = BinaryHeap::new(); log::trace!("{:?} BUILD ROUTES: {:?}", self.root_node, self.nodes); let mut progress = BTreeMap::<NodeId, NodeProgress>::new(); for (link_id, link) in self.nodes.get(&self.root_node).unwrap().links.iter() { if link.to == self.root_node { continue; } todo.push(link.to); let new_progress = NodeProgress { round_trip_time: link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: *link_id, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; } }) .or_insert_with(|| new_progress); } log::trace!("BUILD START: progress={:?} todo={:?}", progress, todo); while let Some(from) = todo.pop() { log::trace!("STEP {:?}: progress={:?} todo={:?}", from, progress, todo); let progress_from = progress.get(&from).unwrap().clone(); for (_, link) in self.nodes.get(&from).unwrap().links.iter() { if link.to == self.root_node { continue; } let new_progress = NodeProgress { round_trip_time: progress_from.round_trip_time + link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: progress_from.outgoing_link, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; todo.push(link.to); } }) .or_insert_with(|| { todo.push(link.to); new_progress }); } } log::trace!("DONE: progress={:?} todo={:?}", progress, todo); progress .into_iter() .map(|(node_id, NodeProgress { outgoing_link: link_id,.. })| (node_id, link_id)) } } #[derive(Debug)] pub(crate) struct RemoteRoutingUpdate { pub(crate) from_node_id: NodeId, pub(crate) status: Vec<LinkStatus>, } pub(crate) type RemoteRoutingUpdateSender = futures::channel::mpsc::Sender<RemoteRoutingUpdate>; pub(crate) type RemoteRoutingUpdateReceiver = futures::channel::mpsc::Receiver<RemoteRoutingUpdate>; pub(crate) fn routing_update_channel() -> (RemoteRoutingUpdateSender, RemoteRoutingUpdateReceiver) { futures::channel::mpsc::channel(1) } pub(crate) async fn run_route_planner( router: &Weak<Router>, mut remote_updates: RemoteRoutingUpdateReceiver, mut local_updates: Observer<Vec<LinkStatus>>, ) -> Result<(), Error> { let get_router = move || Weak::upgrade(router).ok_or_else(|| format_err!("router gone")); let node_table = Arc::new(Mutex::new(NodeTable::new(get_router()?.node_id()))); let remote_node_table = node_table.clone(); let local_node_table = node_table.clone(); let update_node_table = node_table; let _: ((), (), ()) = futures::future::try_join3( async move { while let Some(RemoteRoutingUpdate { from_node_id, status }) = remote_updates.next().await { let mut node_table = remote_node_table.lock().await; if from_node_id == node_table.root_node { log::warn!("Attempt to update own node id links as remote"); continue; } if let Err(e) = node_table.update_links(from_node_id, status)
} Ok::<_, Error>(()) }, async move { while let Some(status) = local_updates.next().await { let mut node_table = local_node_table.lock().await; let root_node = node_table.root_node; if let Err(e) = node_table.update_links(root_node, status) { log::warn!("Update local links failed: {:?}", e); continue; } } Ok(()) }, async move { let mut pm = PollMutex::new(&*update_node_table); let mut current_version = 0; let mut poll_version = move |ctx: &mut Context<'_>| { let mut node_table = ready!(pm.poll(ctx)); ready!(node_table.poll_new_version(ctx, &mut current_version)); Poll::Ready(node_table) }; loop { let node_table = poll_fn(&mut poll_version).await; get_router()?.update_routes(node_table.build_routes(), "new_routes").await?; drop(node_table); Timer::new(Duration::from_millis(100)).await; } }, ) .await?; Ok(()) } #[cfg(test)] mod test { use super::*; use arbitrary::{Arbitrary, Unstructured}; use rand::Rng; use std::collections::HashMap; use std::time::Instant; fn remove_item<T: Eq>(value: &T, from: &mut Vec<T>) -> bool { let len = from.len(); for i in 0..len { if from[i] == *value { from.remove(i); return true; } } return false; } fn construct_node_table_from_links(links: &[(u64, u64, u64, u64)]) -> NodeTable { let mut node_table = NodeTable::new(1.into()); for (from, to, link_id, rtt) in links { node_table .update_link( (*from).into(), (*to).into(), (*link_id).into(), LinkDescription { round_trip_time: Duration::from_millis(*rtt) }, ) .unwrap(); } node_table } fn is_outcome(mut got: Vec<(NodeId, NodeLinkId)>, outcome: &[(u64, u64)]) -> bool { let mut result = true; for (node_id, link_id) in outcome { if!remove_item(&((*node_id).into(), (*link_id).into()), &mut got) { log::trace!("Expected outcome not found: {}#{}", node_id, link_id); result = false; } } for (node_id, link_id) in got { log::trace!("Unexpected outcome: {}#{}", node_id.0, link_id.0); result = false; } result } fn builds_route_ok(links: &[(u64, u64, u64, u64)], outcome: &[(u64, u64)]) -> bool { log::trace!("TEST: {:?} --> {:?}", links, outcome); let node_table = construct_node_table_from_links(links); let built: Vec<(NodeId, NodeLinkId)> = node_table.build_routes().collect(); let r = is_outcome(built.clone(), outcome); if!r { log::trace!("NODE_TABLE: {:?}", node_table.nodes); log::trace!("BUILT: {:?}", built); } r } #[test] fn test_build_routes() { crate::test_util::init(); assert!(builds_route_ok(&[(1, 2, 1, 10), (2, 1, 123, 5)], &[(2, 1)])); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 10), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 2)] )); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 1000), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 1)] )); } #[derive(Arbitrary, Debug, Clone, Copy)] struct DoesntFormLoops { a_to_b: u64, b_to_a: u64, a_to_c: u64, c_to_a: u64, } fn verify_no_loops(config: DoesntFormLoops) { // With node configuration: // B(2) - A(1) - C(3) // Verify that routes from A to B do not point at C // and that routes from A to C do not point at B println!("{:?}", config); let built: HashMap<NodeId, NodeLinkId> = construct_node_table_from_links(&[ (1, 2, 100, config.a_to_b), (2, 1, 200, config.b_to_a), (1, 3, 300, config.a_to_c), (3, 1, 400, config.c_to_a), ]) .build_routes() .collect(); assert_eq!(built.get(&2.into()), Some(&100.into())); assert_eq!(built.get(&3.into()), Some(&300.into())); } #[test] fn no_loops() { crate::test_util::init(); let start = Instant::now(); while Instant::now() - start < Duration::from_secs(1) { let mut random_junk = [0u8; 64]; rand::thread_rng().fill(&mut random_junk); verify_no_loops(Arbitrary::arbitrary(&mut Unstructured::new(&random_junk)).unwrap()); } } }
{ log::warn!("Update remote links from {:?} failed: {:?}", from_node_id, e); continue; }
conditional_block
route_planner.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{ future_help::{Observer, PollMutex}, labels::{NodeId, NodeLinkId}, link::LinkStatus, router::Router, }; use anyhow::{bail, format_err, Error}; use fuchsia_async::Timer; use futures::{future::poll_fn, lock::Mutex, prelude::*, ready}; use std::{ collections::{BTreeMap, BinaryHeap}, sync::{Arc, Weak}, task::{Context, Poll, Waker}, time::Duration, }; /// Assumed forwarding time through a node. /// This is a temporary hack to alleviate some bad route selection. const FORWARDING_TIME: Duration = Duration::from_millis(100); /// Collects all information about a node in one place #[derive(Debug)] struct Node { links: BTreeMap<NodeLinkId, Link>, } /// During pathfinding, collects the shortest path so far to a node #[derive(Debug, Clone, Copy)] struct NodeProgress { round_trip_time: Duration, outgoing_link: NodeLinkId, } /// Describes the state of a link #[derive(Debug, Clone)] pub struct LinkDescription { /// Current round trip time estimate for this link pub round_trip_time: Duration, } /// Collects all information about one link on one node /// Links that are owned by NodeTable should remain owned (mutable references should not be given /// out) #[derive(Debug)] pub struct Link { /// Destination node for this link pub to: NodeId, /// Description of this link pub desc: LinkDescription, } /// Table of all nodes (and links between them) known to an instance struct NodeTable { root_node: NodeId, nodes: BTreeMap<NodeId, Node>, version: u64, wake_on_version_change: Option<Waker>, } impl NodeTable { /// Create a new node table rooted at `root_node` pub fn new(root_node: NodeId) -> NodeTable { NodeTable { root_node, nodes: BTreeMap::new(), version: 0, wake_on_version_change: None } } fn poll_new_version(&mut self, ctx: &mut Context<'_>, last_version: &mut u64) -> Poll<()> { if *last_version == self.version { self.wake_on_version_change = Some(ctx.waker().clone()); Poll::Pending } else { *last_version = self.version; Poll::Ready(()) } } fn get_or_create_node_mut(&mut self, node_id: NodeId) -> &mut Node { self.nodes.entry(node_id).or_insert_with(|| Node { links: BTreeMap::new() }) } /// Update a single link on a node. fn update_link( &mut self, from: NodeId, to: NodeId, link_id: NodeLinkId, desc: LinkDescription, ) -> Result<(), Error> { log::trace!( "{:?} update_link: from:{:?} to:{:?} link_id:{:?} desc:{:?}", self.root_node, from, to, link_id, desc ); if from == to { bail!("Circular link seen"); } self.get_or_create_node_mut(to); self.get_or_create_node_mut(from).links.insert(link_id, Link { to, desc }); Ok(()) } fn update_links(&mut self, from: NodeId, links: Vec<LinkStatus>) -> Result<(), Error> { self.get_or_create_node_mut(from).links.clear(); for LinkStatus { to, local_id, round_trip_time } in links.into_iter() { self.update_link(from, to, local_id, LinkDescription { round_trip_time })?; } self.version += 1; self.wake_on_version_change.take().map(|w| w.wake()); Ok(()) } /// Build a routing table for our node based on current link data fn build_routes(&self) -> impl Iterator<Item = (NodeId, NodeLinkId)> { let mut todo = BinaryHeap::new(); log::trace!("{:?} BUILD ROUTES: {:?}", self.root_node, self.nodes); let mut progress = BTreeMap::<NodeId, NodeProgress>::new(); for (link_id, link) in self.nodes.get(&self.root_node).unwrap().links.iter() { if link.to == self.root_node { continue; } todo.push(link.to); let new_progress = NodeProgress { round_trip_time: link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: *link_id, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; } }) .or_insert_with(|| new_progress); } log::trace!("BUILD START: progress={:?} todo={:?}", progress, todo); while let Some(from) = todo.pop() { log::trace!("STEP {:?}: progress={:?} todo={:?}", from, progress, todo); let progress_from = progress.get(&from).unwrap().clone(); for (_, link) in self.nodes.get(&from).unwrap().links.iter() { if link.to == self.root_node { continue; } let new_progress = NodeProgress { round_trip_time: progress_from.round_trip_time + link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: progress_from.outgoing_link, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; todo.push(link.to); } }) .or_insert_with(|| { todo.push(link.to); new_progress }); } } log::trace!("DONE: progress={:?} todo={:?}", progress, todo); progress .into_iter() .map(|(node_id, NodeProgress { outgoing_link: link_id,.. })| (node_id, link_id)) } } #[derive(Debug)] pub(crate) struct RemoteRoutingUpdate { pub(crate) from_node_id: NodeId, pub(crate) status: Vec<LinkStatus>, } pub(crate) type RemoteRoutingUpdateSender = futures::channel::mpsc::Sender<RemoteRoutingUpdate>; pub(crate) type RemoteRoutingUpdateReceiver = futures::channel::mpsc::Receiver<RemoteRoutingUpdate>; pub(crate) fn routing_update_channel() -> (RemoteRoutingUpdateSender, RemoteRoutingUpdateReceiver) { futures::channel::mpsc::channel(1) } pub(crate) async fn run_route_planner( router: &Weak<Router>, mut remote_updates: RemoteRoutingUpdateReceiver, mut local_updates: Observer<Vec<LinkStatus>>, ) -> Result<(), Error> { let get_router = move || Weak::upgrade(router).ok_or_else(|| format_err!("router gone")); let node_table = Arc::new(Mutex::new(NodeTable::new(get_router()?.node_id()))); let remote_node_table = node_table.clone(); let local_node_table = node_table.clone(); let update_node_table = node_table; let _: ((), (), ()) = futures::future::try_join3( async move { while let Some(RemoteRoutingUpdate { from_node_id, status }) = remote_updates.next().await { let mut node_table = remote_node_table.lock().await; if from_node_id == node_table.root_node { log::warn!("Attempt to update own node id links as remote"); continue; } if let Err(e) = node_table.update_links(from_node_id, status) { log::warn!("Update remote links from {:?} failed: {:?}", from_node_id, e); continue; }
async move { while let Some(status) = local_updates.next().await { let mut node_table = local_node_table.lock().await; let root_node = node_table.root_node; if let Err(e) = node_table.update_links(root_node, status) { log::warn!("Update local links failed: {:?}", e); continue; } } Ok(()) }, async move { let mut pm = PollMutex::new(&*update_node_table); let mut current_version = 0; let mut poll_version = move |ctx: &mut Context<'_>| { let mut node_table = ready!(pm.poll(ctx)); ready!(node_table.poll_new_version(ctx, &mut current_version)); Poll::Ready(node_table) }; loop { let node_table = poll_fn(&mut poll_version).await; get_router()?.update_routes(node_table.build_routes(), "new_routes").await?; drop(node_table); Timer::new(Duration::from_millis(100)).await; } }, ) .await?; Ok(()) } #[cfg(test)] mod test { use super::*; use arbitrary::{Arbitrary, Unstructured}; use rand::Rng; use std::collections::HashMap; use std::time::Instant; fn remove_item<T: Eq>(value: &T, from: &mut Vec<T>) -> bool { let len = from.len(); for i in 0..len { if from[i] == *value { from.remove(i); return true; } } return false; } fn construct_node_table_from_links(links: &[(u64, u64, u64, u64)]) -> NodeTable { let mut node_table = NodeTable::new(1.into()); for (from, to, link_id, rtt) in links { node_table .update_link( (*from).into(), (*to).into(), (*link_id).into(), LinkDescription { round_trip_time: Duration::from_millis(*rtt) }, ) .unwrap(); } node_table } fn is_outcome(mut got: Vec<(NodeId, NodeLinkId)>, outcome: &[(u64, u64)]) -> bool { let mut result = true; for (node_id, link_id) in outcome { if!remove_item(&((*node_id).into(), (*link_id).into()), &mut got) { log::trace!("Expected outcome not found: {}#{}", node_id, link_id); result = false; } } for (node_id, link_id) in got { log::trace!("Unexpected outcome: {}#{}", node_id.0, link_id.0); result = false; } result } fn builds_route_ok(links: &[(u64, u64, u64, u64)], outcome: &[(u64, u64)]) -> bool { log::trace!("TEST: {:?} --> {:?}", links, outcome); let node_table = construct_node_table_from_links(links); let built: Vec<(NodeId, NodeLinkId)> = node_table.build_routes().collect(); let r = is_outcome(built.clone(), outcome); if!r { log::trace!("NODE_TABLE: {:?}", node_table.nodes); log::trace!("BUILT: {:?}", built); } r } #[test] fn test_build_routes() { crate::test_util::init(); assert!(builds_route_ok(&[(1, 2, 1, 10), (2, 1, 123, 5)], &[(2, 1)])); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 10), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 2)] )); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 1000), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 1)] )); } #[derive(Arbitrary, Debug, Clone, Copy)] struct DoesntFormLoops { a_to_b: u64, b_to_a: u64, a_to_c: u64, c_to_a: u64, } fn verify_no_loops(config: DoesntFormLoops) { // With node configuration: // B(2) - A(1) - C(3) // Verify that routes from A to B do not point at C // and that routes from A to C do not point at B println!("{:?}", config); let built: HashMap<NodeId, NodeLinkId> = construct_node_table_from_links(&[ (1, 2, 100, config.a_to_b), (2, 1, 200, config.b_to_a), (1, 3, 300, config.a_to_c), (3, 1, 400, config.c_to_a), ]) .build_routes() .collect(); assert_eq!(built.get(&2.into()), Some(&100.into())); assert_eq!(built.get(&3.into()), Some(&300.into())); } #[test] fn no_loops() { crate::test_util::init(); let start = Instant::now(); while Instant::now() - start < Duration::from_secs(1) { let mut random_junk = [0u8; 64]; rand::thread_rng().fill(&mut random_junk); verify_no_loops(Arbitrary::arbitrary(&mut Unstructured::new(&random_junk)).unwrap()); } } }
} Ok::<_, Error>(()) },
random_line_split
route_planner.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{ future_help::{Observer, PollMutex}, labels::{NodeId, NodeLinkId}, link::LinkStatus, router::Router, }; use anyhow::{bail, format_err, Error}; use fuchsia_async::Timer; use futures::{future::poll_fn, lock::Mutex, prelude::*, ready}; use std::{ collections::{BTreeMap, BinaryHeap}, sync::{Arc, Weak}, task::{Context, Poll, Waker}, time::Duration, }; /// Assumed forwarding time through a node. /// This is a temporary hack to alleviate some bad route selection. const FORWARDING_TIME: Duration = Duration::from_millis(100); /// Collects all information about a node in one place #[derive(Debug)] struct Node { links: BTreeMap<NodeLinkId, Link>, } /// During pathfinding, collects the shortest path so far to a node #[derive(Debug, Clone, Copy)] struct NodeProgress { round_trip_time: Duration, outgoing_link: NodeLinkId, } /// Describes the state of a link #[derive(Debug, Clone)] pub struct LinkDescription { /// Current round trip time estimate for this link pub round_trip_time: Duration, } /// Collects all information about one link on one node /// Links that are owned by NodeTable should remain owned (mutable references should not be given /// out) #[derive(Debug)] pub struct Link { /// Destination node for this link pub to: NodeId, /// Description of this link pub desc: LinkDescription, } /// Table of all nodes (and links between them) known to an instance struct NodeTable { root_node: NodeId, nodes: BTreeMap<NodeId, Node>, version: u64, wake_on_version_change: Option<Waker>, } impl NodeTable { /// Create a new node table rooted at `root_node` pub fn new(root_node: NodeId) -> NodeTable { NodeTable { root_node, nodes: BTreeMap::new(), version: 0, wake_on_version_change: None } } fn poll_new_version(&mut self, ctx: &mut Context<'_>, last_version: &mut u64) -> Poll<()> { if *last_version == self.version { self.wake_on_version_change = Some(ctx.waker().clone()); Poll::Pending } else { *last_version = self.version; Poll::Ready(()) } } fn get_or_create_node_mut(&mut self, node_id: NodeId) -> &mut Node { self.nodes.entry(node_id).or_insert_with(|| Node { links: BTreeMap::new() }) } /// Update a single link on a node. fn update_link( &mut self, from: NodeId, to: NodeId, link_id: NodeLinkId, desc: LinkDescription, ) -> Result<(), Error>
fn update_links(&mut self, from: NodeId, links: Vec<LinkStatus>) -> Result<(), Error> { self.get_or_create_node_mut(from).links.clear(); for LinkStatus { to, local_id, round_trip_time } in links.into_iter() { self.update_link(from, to, local_id, LinkDescription { round_trip_time })?; } self.version += 1; self.wake_on_version_change.take().map(|w| w.wake()); Ok(()) } /// Build a routing table for our node based on current link data fn build_routes(&self) -> impl Iterator<Item = (NodeId, NodeLinkId)> { let mut todo = BinaryHeap::new(); log::trace!("{:?} BUILD ROUTES: {:?}", self.root_node, self.nodes); let mut progress = BTreeMap::<NodeId, NodeProgress>::new(); for (link_id, link) in self.nodes.get(&self.root_node).unwrap().links.iter() { if link.to == self.root_node { continue; } todo.push(link.to); let new_progress = NodeProgress { round_trip_time: link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: *link_id, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; } }) .or_insert_with(|| new_progress); } log::trace!("BUILD START: progress={:?} todo={:?}", progress, todo); while let Some(from) = todo.pop() { log::trace!("STEP {:?}: progress={:?} todo={:?}", from, progress, todo); let progress_from = progress.get(&from).unwrap().clone(); for (_, link) in self.nodes.get(&from).unwrap().links.iter() { if link.to == self.root_node { continue; } let new_progress = NodeProgress { round_trip_time: progress_from.round_trip_time + link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: progress_from.outgoing_link, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; todo.push(link.to); } }) .or_insert_with(|| { todo.push(link.to); new_progress }); } } log::trace!("DONE: progress={:?} todo={:?}", progress, todo); progress .into_iter() .map(|(node_id, NodeProgress { outgoing_link: link_id,.. })| (node_id, link_id)) } } #[derive(Debug)] pub(crate) struct RemoteRoutingUpdate { pub(crate) from_node_id: NodeId, pub(crate) status: Vec<LinkStatus>, } pub(crate) type RemoteRoutingUpdateSender = futures::channel::mpsc::Sender<RemoteRoutingUpdate>; pub(crate) type RemoteRoutingUpdateReceiver = futures::channel::mpsc::Receiver<RemoteRoutingUpdate>; pub(crate) fn routing_update_channel() -> (RemoteRoutingUpdateSender, RemoteRoutingUpdateReceiver) { futures::channel::mpsc::channel(1) } pub(crate) async fn run_route_planner( router: &Weak<Router>, mut remote_updates: RemoteRoutingUpdateReceiver, mut local_updates: Observer<Vec<LinkStatus>>, ) -> Result<(), Error> { let get_router = move || Weak::upgrade(router).ok_or_else(|| format_err!("router gone")); let node_table = Arc::new(Mutex::new(NodeTable::new(get_router()?.node_id()))); let remote_node_table = node_table.clone(); let local_node_table = node_table.clone(); let update_node_table = node_table; let _: ((), (), ()) = futures::future::try_join3( async move { while let Some(RemoteRoutingUpdate { from_node_id, status }) = remote_updates.next().await { let mut node_table = remote_node_table.lock().await; if from_node_id == node_table.root_node { log::warn!("Attempt to update own node id links as remote"); continue; } if let Err(e) = node_table.update_links(from_node_id, status) { log::warn!("Update remote links from {:?} failed: {:?}", from_node_id, e); continue; } } Ok::<_, Error>(()) }, async move { while let Some(status) = local_updates.next().await { let mut node_table = local_node_table.lock().await; let root_node = node_table.root_node; if let Err(e) = node_table.update_links(root_node, status) { log::warn!("Update local links failed: {:?}", e); continue; } } Ok(()) }, async move { let mut pm = PollMutex::new(&*update_node_table); let mut current_version = 0; let mut poll_version = move |ctx: &mut Context<'_>| { let mut node_table = ready!(pm.poll(ctx)); ready!(node_table.poll_new_version(ctx, &mut current_version)); Poll::Ready(node_table) }; loop { let node_table = poll_fn(&mut poll_version).await; get_router()?.update_routes(node_table.build_routes(), "new_routes").await?; drop(node_table); Timer::new(Duration::from_millis(100)).await; } }, ) .await?; Ok(()) } #[cfg(test)] mod test { use super::*; use arbitrary::{Arbitrary, Unstructured}; use rand::Rng; use std::collections::HashMap; use std::time::Instant; fn remove_item<T: Eq>(value: &T, from: &mut Vec<T>) -> bool { let len = from.len(); for i in 0..len { if from[i] == *value { from.remove(i); return true; } } return false; } fn construct_node_table_from_links(links: &[(u64, u64, u64, u64)]) -> NodeTable { let mut node_table = NodeTable::new(1.into()); for (from, to, link_id, rtt) in links { node_table .update_link( (*from).into(), (*to).into(), (*link_id).into(), LinkDescription { round_trip_time: Duration::from_millis(*rtt) }, ) .unwrap(); } node_table } fn is_outcome(mut got: Vec<(NodeId, NodeLinkId)>, outcome: &[(u64, u64)]) -> bool { let mut result = true; for (node_id, link_id) in outcome { if!remove_item(&((*node_id).into(), (*link_id).into()), &mut got) { log::trace!("Expected outcome not found: {}#{}", node_id, link_id); result = false; } } for (node_id, link_id) in got { log::trace!("Unexpected outcome: {}#{}", node_id.0, link_id.0); result = false; } result } fn builds_route_ok(links: &[(u64, u64, u64, u64)], outcome: &[(u64, u64)]) -> bool { log::trace!("TEST: {:?} --> {:?}", links, outcome); let node_table = construct_node_table_from_links(links); let built: Vec<(NodeId, NodeLinkId)> = node_table.build_routes().collect(); let r = is_outcome(built.clone(), outcome); if!r { log::trace!("NODE_TABLE: {:?}", node_table.nodes); log::trace!("BUILT: {:?}", built); } r } #[test] fn test_build_routes() { crate::test_util::init(); assert!(builds_route_ok(&[(1, 2, 1, 10), (2, 1, 123, 5)], &[(2, 1)])); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 10), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 2)] )); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 1000), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 1)] )); } #[derive(Arbitrary, Debug, Clone, Copy)] struct DoesntFormLoops { a_to_b: u64, b_to_a: u64, a_to_c: u64, c_to_a: u64, } fn verify_no_loops(config: DoesntFormLoops) { // With node configuration: // B(2) - A(1) - C(3) // Verify that routes from A to B do not point at C // and that routes from A to C do not point at B println!("{:?}", config); let built: HashMap<NodeId, NodeLinkId> = construct_node_table_from_links(&[ (1, 2, 100, config.a_to_b), (2, 1, 200, config.b_to_a), (1, 3, 300, config.a_to_c), (3, 1, 400, config.c_to_a), ]) .build_routes() .collect(); assert_eq!(built.get(&2.into()), Some(&100.into())); assert_eq!(built.get(&3.into()), Some(&300.into())); } #[test] fn no_loops() { crate::test_util::init(); let start = Instant::now(); while Instant::now() - start < Duration::from_secs(1) { let mut random_junk = [0u8; 64]; rand::thread_rng().fill(&mut random_junk); verify_no_loops(Arbitrary::arbitrary(&mut Unstructured::new(&random_junk)).unwrap()); } } }
{ log::trace!( "{:?} update_link: from:{:?} to:{:?} link_id:{:?} desc:{:?}", self.root_node, from, to, link_id, desc ); if from == to { bail!("Circular link seen"); } self.get_or_create_node_mut(to); self.get_or_create_node_mut(from).links.insert(link_id, Link { to, desc }); Ok(()) }
identifier_body
route_planner.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{ future_help::{Observer, PollMutex}, labels::{NodeId, NodeLinkId}, link::LinkStatus, router::Router, }; use anyhow::{bail, format_err, Error}; use fuchsia_async::Timer; use futures::{future::poll_fn, lock::Mutex, prelude::*, ready}; use std::{ collections::{BTreeMap, BinaryHeap}, sync::{Arc, Weak}, task::{Context, Poll, Waker}, time::Duration, }; /// Assumed forwarding time through a node. /// This is a temporary hack to alleviate some bad route selection. const FORWARDING_TIME: Duration = Duration::from_millis(100); /// Collects all information about a node in one place #[derive(Debug)] struct Node { links: BTreeMap<NodeLinkId, Link>, } /// During pathfinding, collects the shortest path so far to a node #[derive(Debug, Clone, Copy)] struct NodeProgress { round_trip_time: Duration, outgoing_link: NodeLinkId, } /// Describes the state of a link #[derive(Debug, Clone)] pub struct LinkDescription { /// Current round trip time estimate for this link pub round_trip_time: Duration, } /// Collects all information about one link on one node /// Links that are owned by NodeTable should remain owned (mutable references should not be given /// out) #[derive(Debug)] pub struct Link { /// Destination node for this link pub to: NodeId, /// Description of this link pub desc: LinkDescription, } /// Table of all nodes (and links between them) known to an instance struct NodeTable { root_node: NodeId, nodes: BTreeMap<NodeId, Node>, version: u64, wake_on_version_change: Option<Waker>, } impl NodeTable { /// Create a new node table rooted at `root_node` pub fn new(root_node: NodeId) -> NodeTable { NodeTable { root_node, nodes: BTreeMap::new(), version: 0, wake_on_version_change: None } } fn poll_new_version(&mut self, ctx: &mut Context<'_>, last_version: &mut u64) -> Poll<()> { if *last_version == self.version { self.wake_on_version_change = Some(ctx.waker().clone()); Poll::Pending } else { *last_version = self.version; Poll::Ready(()) } } fn get_or_create_node_mut(&mut self, node_id: NodeId) -> &mut Node { self.nodes.entry(node_id).or_insert_with(|| Node { links: BTreeMap::new() }) } /// Update a single link on a node. fn
( &mut self, from: NodeId, to: NodeId, link_id: NodeLinkId, desc: LinkDescription, ) -> Result<(), Error> { log::trace!( "{:?} update_link: from:{:?} to:{:?} link_id:{:?} desc:{:?}", self.root_node, from, to, link_id, desc ); if from == to { bail!("Circular link seen"); } self.get_or_create_node_mut(to); self.get_or_create_node_mut(from).links.insert(link_id, Link { to, desc }); Ok(()) } fn update_links(&mut self, from: NodeId, links: Vec<LinkStatus>) -> Result<(), Error> { self.get_or_create_node_mut(from).links.clear(); for LinkStatus { to, local_id, round_trip_time } in links.into_iter() { self.update_link(from, to, local_id, LinkDescription { round_trip_time })?; } self.version += 1; self.wake_on_version_change.take().map(|w| w.wake()); Ok(()) } /// Build a routing table for our node based on current link data fn build_routes(&self) -> impl Iterator<Item = (NodeId, NodeLinkId)> { let mut todo = BinaryHeap::new(); log::trace!("{:?} BUILD ROUTES: {:?}", self.root_node, self.nodes); let mut progress = BTreeMap::<NodeId, NodeProgress>::new(); for (link_id, link) in self.nodes.get(&self.root_node).unwrap().links.iter() { if link.to == self.root_node { continue; } todo.push(link.to); let new_progress = NodeProgress { round_trip_time: link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: *link_id, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; } }) .or_insert_with(|| new_progress); } log::trace!("BUILD START: progress={:?} todo={:?}", progress, todo); while let Some(from) = todo.pop() { log::trace!("STEP {:?}: progress={:?} todo={:?}", from, progress, todo); let progress_from = progress.get(&from).unwrap().clone(); for (_, link) in self.nodes.get(&from).unwrap().links.iter() { if link.to == self.root_node { continue; } let new_progress = NodeProgress { round_trip_time: progress_from.round_trip_time + link.desc.round_trip_time + 2 * FORWARDING_TIME, outgoing_link: progress_from.outgoing_link, }; progress .entry(link.to) .and_modify(|p| { if p.round_trip_time > new_progress.round_trip_time { *p = new_progress; todo.push(link.to); } }) .or_insert_with(|| { todo.push(link.to); new_progress }); } } log::trace!("DONE: progress={:?} todo={:?}", progress, todo); progress .into_iter() .map(|(node_id, NodeProgress { outgoing_link: link_id,.. })| (node_id, link_id)) } } #[derive(Debug)] pub(crate) struct RemoteRoutingUpdate { pub(crate) from_node_id: NodeId, pub(crate) status: Vec<LinkStatus>, } pub(crate) type RemoteRoutingUpdateSender = futures::channel::mpsc::Sender<RemoteRoutingUpdate>; pub(crate) type RemoteRoutingUpdateReceiver = futures::channel::mpsc::Receiver<RemoteRoutingUpdate>; pub(crate) fn routing_update_channel() -> (RemoteRoutingUpdateSender, RemoteRoutingUpdateReceiver) { futures::channel::mpsc::channel(1) } pub(crate) async fn run_route_planner( router: &Weak<Router>, mut remote_updates: RemoteRoutingUpdateReceiver, mut local_updates: Observer<Vec<LinkStatus>>, ) -> Result<(), Error> { let get_router = move || Weak::upgrade(router).ok_or_else(|| format_err!("router gone")); let node_table = Arc::new(Mutex::new(NodeTable::new(get_router()?.node_id()))); let remote_node_table = node_table.clone(); let local_node_table = node_table.clone(); let update_node_table = node_table; let _: ((), (), ()) = futures::future::try_join3( async move { while let Some(RemoteRoutingUpdate { from_node_id, status }) = remote_updates.next().await { let mut node_table = remote_node_table.lock().await; if from_node_id == node_table.root_node { log::warn!("Attempt to update own node id links as remote"); continue; } if let Err(e) = node_table.update_links(from_node_id, status) { log::warn!("Update remote links from {:?} failed: {:?}", from_node_id, e); continue; } } Ok::<_, Error>(()) }, async move { while let Some(status) = local_updates.next().await { let mut node_table = local_node_table.lock().await; let root_node = node_table.root_node; if let Err(e) = node_table.update_links(root_node, status) { log::warn!("Update local links failed: {:?}", e); continue; } } Ok(()) }, async move { let mut pm = PollMutex::new(&*update_node_table); let mut current_version = 0; let mut poll_version = move |ctx: &mut Context<'_>| { let mut node_table = ready!(pm.poll(ctx)); ready!(node_table.poll_new_version(ctx, &mut current_version)); Poll::Ready(node_table) }; loop { let node_table = poll_fn(&mut poll_version).await; get_router()?.update_routes(node_table.build_routes(), "new_routes").await?; drop(node_table); Timer::new(Duration::from_millis(100)).await; } }, ) .await?; Ok(()) } #[cfg(test)] mod test { use super::*; use arbitrary::{Arbitrary, Unstructured}; use rand::Rng; use std::collections::HashMap; use std::time::Instant; fn remove_item<T: Eq>(value: &T, from: &mut Vec<T>) -> bool { let len = from.len(); for i in 0..len { if from[i] == *value { from.remove(i); return true; } } return false; } fn construct_node_table_from_links(links: &[(u64, u64, u64, u64)]) -> NodeTable { let mut node_table = NodeTable::new(1.into()); for (from, to, link_id, rtt) in links { node_table .update_link( (*from).into(), (*to).into(), (*link_id).into(), LinkDescription { round_trip_time: Duration::from_millis(*rtt) }, ) .unwrap(); } node_table } fn is_outcome(mut got: Vec<(NodeId, NodeLinkId)>, outcome: &[(u64, u64)]) -> bool { let mut result = true; for (node_id, link_id) in outcome { if!remove_item(&((*node_id).into(), (*link_id).into()), &mut got) { log::trace!("Expected outcome not found: {}#{}", node_id, link_id); result = false; } } for (node_id, link_id) in got { log::trace!("Unexpected outcome: {}#{}", node_id.0, link_id.0); result = false; } result } fn builds_route_ok(links: &[(u64, u64, u64, u64)], outcome: &[(u64, u64)]) -> bool { log::trace!("TEST: {:?} --> {:?}", links, outcome); let node_table = construct_node_table_from_links(links); let built: Vec<(NodeId, NodeLinkId)> = node_table.build_routes().collect(); let r = is_outcome(built.clone(), outcome); if!r { log::trace!("NODE_TABLE: {:?}", node_table.nodes); log::trace!("BUILT: {:?}", built); } r } #[test] fn test_build_routes() { crate::test_util::init(); assert!(builds_route_ok(&[(1, 2, 1, 10), (2, 1, 123, 5)], &[(2, 1)])); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 10), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 2)] )); assert!(builds_route_ok( &[ (1, 2, 1, 10), (2, 1, 123, 5), (1, 3, 2, 1000), (3, 1, 133, 1), (2, 3, 7, 88), (3, 2, 334, 23) ], &[(2, 1), (3, 1)] )); } #[derive(Arbitrary, Debug, Clone, Copy)] struct DoesntFormLoops { a_to_b: u64, b_to_a: u64, a_to_c: u64, c_to_a: u64, } fn verify_no_loops(config: DoesntFormLoops) { // With node configuration: // B(2) - A(1) - C(3) // Verify that routes from A to B do not point at C // and that routes from A to C do not point at B println!("{:?}", config); let built: HashMap<NodeId, NodeLinkId> = construct_node_table_from_links(&[ (1, 2, 100, config.a_to_b), (2, 1, 200, config.b_to_a), (1, 3, 300, config.a_to_c), (3, 1, 400, config.c_to_a), ]) .build_routes() .collect(); assert_eq!(built.get(&2.into()), Some(&100.into())); assert_eq!(built.get(&3.into()), Some(&300.into())); } #[test] fn no_loops() { crate::test_util::init(); let start = Instant::now(); while Instant::now() - start < Duration::from_secs(1) { let mut random_junk = [0u8; 64]; rand::thread_rng().fill(&mut random_junk); verify_no_loops(Arbitrary::arbitrary(&mut Unstructured::new(&random_junk)).unwrap()); } } }
update_link
identifier_name
lib.rs
mod data; mod nn; use std::mem; use std::slice; //use std::os::raw::{/*c_double, c_int, */c_void}; // for js functions imports use once_cell::sync::Lazy; use std::sync::Mutex; // for lazy_static // for global variables use ndarray::prelude::*; use ndarray::{array, Array, Array1, Array3, Axis, Zip}; use data::Data; use nn::Network; #[derive(Default)] struct MetaData { fc_size: u32, num_classes: u32, descent_rate: f32, regular_rate: f32, } #[derive(Default)] struct CriticalSection(MetaData, Data, Network); // Imported js functions extern "C" { // for debug fn log_u64(num: u32); // for data pointer draw // x,y: the offset from upper left corner // label: a fractal which represents the position current label is in total // position range fn draw_point(x: u32, y: u32, label_ratio: f32); } static DATA: Lazy<Mutex<CriticalSection>> = Lazy::new(|| Mutex::default()); #[no_mangle] // This function returns the offset of the allocated buffer in wasm memory pub fn alloc(size: u32) -> *mut u8 { let mut buffer: Vec<u8> = Vec::with_capacity(size as usize); let buffer_ptr = buffer.as_mut_ptr(); mem::forget(buffer); buffer_ptr } #[no_mangle] pub fn free(buffer_ptr: *mut u8, size: u32) { let _ = unsafe { Vec::from_raw_parts(buffer_ptr, 0, size as usize) }; } #[no_mangle] pub fn init( data_radius: f32, data_spin_span: f32, data_num: u32, num_classes: u32, data_gen_rand_max: f32, network_gen_rand_max: f32, fc_size: u32, descent_rate: f32, regular_rate: f32, ) { // Thanks rust compiler :-/ let ref mut tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, network) = tmp; metadata.fc_size = fc_size; metadata.num_classes = num_classes; metadata.descent_rate = descent_rate; metadata.regular_rate = regular_rate; // Num of each data class is the same data.init( num_classes, data_num / num_classes, data_radius, data_spin_span, data_gen_rand_max, ); // Input of this network is two dimension points // output label is sparsed num_classes integer const PLANE_DIMENSION: u32 = 2; network.init(PLANE_DIMENSION, fc_size, num_classes, network_gen_rand_max); } #[no_mangle] pub fn train() -> f32 { let ref mut tmp = *DATA.lock().unwrap(); // Jesus, thats magic let CriticalSection(ref metadata, ref data, ref mut network) = *tmp; let regular_rate = metadata.regular_rate; let descent_rate = metadata.descent_rate; let (fc_layer, softmax) = network.forward_propagation(&data.points); let (dw1, db1, dw2, db2) = network.back_propagation( &data.points, &fc_layer, &softmax, &data.labels, regular_rate, ); let loss = network.loss(&softmax, &data.labels, regular_rate); network.descent(&dw1, &db1, &dw2, &db2, descent_rate); let (data_loss, regular_loss) = loss; data_loss + regular_loss } // Plot classified backgroud to canvas // span_least The least span of area should be drawn to canvas(because usually the canvas is not square) #[no_mangle] pub fn draw_prediction(canvas: *mut u8, width: u32, height: u32, span_least: f32) { // assert!(span_least > 0f32); let width = width as usize; let height = height as usize; // `data` will be used to draw data points let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, _, network) = tmp; let num_classes = metadata.num_classes as usize; let r: Array1<f32> = Array::linspace(0f32, 200f32, num_classes); let g: Array1<f32> = Array::linspace(0f32, 240f32, num_classes); let b: Array1<f32> = Array::linspace(0f32, 255f32, num_classes); let span_per_pixel = span_least / width.min(height) as f32; let span_height = height as f32 * span_per_pixel; let span_width = width as f32 * span_per_pixel; let width_max = span_width / 2f32; let width_min = -span_width / 2f32; let height_max = span_height / 2f32; let height_min = -span_height / 2f32; let x_axis: Array1<f32> = Array::linspace(width_min, width_max, width); let y_axis: Array1<f32> = Array::linspace(height_min, height_max, height); // coordination let mut grid: Array3<f32> = Array::zeros((height, width, 2)); for y in 0..height { for x in 0..width { let coord = array![x_axis[[x]], y_axis[[y]]]; let mut slice = grid.slice_mut(s![y, x,..]); slice.assign(&coord); } } let xys = grid.into_shape((height * width, 2)).unwrap(); let (_, softmax) = network.forward_propagation(&xys); let mut labels: Array1<usize> = Array::zeros(height * width); for (y, row) in softmax.axis_iter(Axis(0)).enumerate() { let mut maxx = 0 as usize; let mut max = row[[0]]; for (x, col) in row.iter().enumerate() { if *col > max { maxx = x; max = *col; } } labels[[y]] = maxx; } let grid_label = labels.into_shape((height, width)).unwrap(); let canvas_size = width * height * 4; let canvas: &mut [u8] = unsafe { slice::from_raw_parts_mut(canvas, canvas_size) }; for y in 0..height { for x in 0..width { // assume rgba canvas[4 * (y * width + x) + 0] = r[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 1] = g[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 2] = b[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 3] = 0xFF as u8; } } } // check parameters for function below which draws predictions #[no_mangle] pub fn draw_points(width: u32, height: u32, span_least: f32) { let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, _) = tmp; let num_classes = metadata.num_classes as f32; let pixel_per_span = width.min(height) as f32 / span_least; let labels = &data.labels; let points = &data.points; let points_x = points.index_axis(Axis(1), 0); let points_y = points.index_axis(Axis(1), 1); Zip::from(labels) .and(points_x) .and(points_y) .apply(|&label, &x, &y| { // Assume data position is limited in: // [-data_radius - data_rand_max, data_radius + data_rand_max] let x = (x * pixel_per_span) as i64 + width as i64 / 2; let y = (y * pixel_per_span) as i64 + height as i64 / 2; // if points can show in canvas if!(x >= width as i64 || x < 0 || y >= height as i64 || y < 0)
}); } #[cfg(test)] mod kernel_test { use super::*; static POINT_DRAW_TIMES: Lazy<Mutex<u32>> = Lazy::new(|| Mutex::new(0)); // Override the extern functions #[no_mangle] extern "C" fn draw_point(_: u32, _: u32, _: f32) { *POINT_DRAW_TIMES.lock().unwrap() += 1; } use std::f32::consts::PI; // for math functions const DATA_GEN_RADIUS: f32 = 1f32; const SPIN_SPAN: f32 = PI; const NUM_CLASSES: u32 = 3; const DATA_NUM: u32 = 300; const FC_SIZE: u32 = 100; const REGULAR_RATE: f32 = 0.001f32; const DESCENT_RATE: f32 = 1f32; const DATA_GEN_RAND_MAX: f32 = 0.25f32; const NETWORK_GEN_RAND_MAX: f32 = 0.1f32; #[test] fn test_all() { init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let loss_before: f32 = train(); for _ in 0..50 { let loss = train(); assert!(loss < loss_before * 1.1f32); } } #[test] fn test_buffer_allocation() { let buffer = alloc(114514); free(buffer, 114514); } #[test] fn test_draw_prediction() { init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let width = 100; let height = 100; let buffer = alloc(width * height * 4); draw_prediction(buffer, width, height, 2f32); free(buffer, width * height * 4); } #[test] fn test_draw_points() { // Because cargo test is default multi-thread, put them together to avoid data_racing // span_least * 1.1 for padding init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); // test small resolution drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 1, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test tall screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test flat screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test square screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(100, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test huge screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(10000000, 1000000, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); } }
{ // floor let x = x as u32; let y = y as u32; let label_ratio = label as f32 / num_classes; unsafe { draw_point(x, y, label_ratio); } }
conditional_block
lib.rs
mod data; mod nn; use std::mem; use std::slice; //use std::os::raw::{/*c_double, c_int, */c_void}; // for js functions imports use once_cell::sync::Lazy; use std::sync::Mutex; // for lazy_static // for global variables use ndarray::prelude::*; use ndarray::{array, Array, Array1, Array3, Axis, Zip}; use data::Data; use nn::Network; #[derive(Default)] struct MetaData { fc_size: u32, num_classes: u32, descent_rate: f32, regular_rate: f32, } #[derive(Default)] struct CriticalSection(MetaData, Data, Network); // Imported js functions extern "C" { // for debug fn log_u64(num: u32); // for data pointer draw // x,y: the offset from upper left corner // label: a fractal which represents the position current label is in total // position range fn draw_point(x: u32, y: u32, label_ratio: f32); } static DATA: Lazy<Mutex<CriticalSection>> = Lazy::new(|| Mutex::default()); #[no_mangle] // This function returns the offset of the allocated buffer in wasm memory pub fn alloc(size: u32) -> *mut u8 { let mut buffer: Vec<u8> = Vec::with_capacity(size as usize); let buffer_ptr = buffer.as_mut_ptr(); mem::forget(buffer); buffer_ptr } #[no_mangle] pub fn free(buffer_ptr: *mut u8, size: u32) { let _ = unsafe { Vec::from_raw_parts(buffer_ptr, 0, size as usize) }; } #[no_mangle] pub fn init( data_radius: f32, data_spin_span: f32, data_num: u32, num_classes: u32, data_gen_rand_max: f32, network_gen_rand_max: f32, fc_size: u32, descent_rate: f32, regular_rate: f32, ) { // Thanks rust compiler :-/ let ref mut tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, network) = tmp; metadata.fc_size = fc_size; metadata.num_classes = num_classes; metadata.descent_rate = descent_rate; metadata.regular_rate = regular_rate; // Num of each data class is the same data.init( num_classes, data_num / num_classes, data_radius, data_spin_span, data_gen_rand_max, ); // Input of this network is two dimension points // output label is sparsed num_classes integer const PLANE_DIMENSION: u32 = 2; network.init(PLANE_DIMENSION, fc_size, num_classes, network_gen_rand_max); } #[no_mangle] pub fn train() -> f32 { let ref mut tmp = *DATA.lock().unwrap(); // Jesus, thats magic let CriticalSection(ref metadata, ref data, ref mut network) = *tmp; let regular_rate = metadata.regular_rate; let descent_rate = metadata.descent_rate; let (fc_layer, softmax) = network.forward_propagation(&data.points); let (dw1, db1, dw2, db2) = network.back_propagation( &data.points, &fc_layer, &softmax, &data.labels, regular_rate, ); let loss = network.loss(&softmax, &data.labels, regular_rate); network.descent(&dw1, &db1, &dw2, &db2, descent_rate); let (data_loss, regular_loss) = loss; data_loss + regular_loss } // Plot classified backgroud to canvas // span_least The least span of area should be drawn to canvas(because usually the canvas is not square) #[no_mangle] pub fn draw_prediction(canvas: *mut u8, width: u32, height: u32, span_least: f32) { // assert!(span_least > 0f32); let width = width as usize; let height = height as usize; // `data` will be used to draw data points let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, _, network) = tmp; let num_classes = metadata.num_classes as usize; let r: Array1<f32> = Array::linspace(0f32, 200f32, num_classes); let g: Array1<f32> = Array::linspace(0f32, 240f32, num_classes); let b: Array1<f32> = Array::linspace(0f32, 255f32, num_classes); let span_per_pixel = span_least / width.min(height) as f32; let span_height = height as f32 * span_per_pixel; let span_width = width as f32 * span_per_pixel; let width_max = span_width / 2f32; let width_min = -span_width / 2f32; let height_max = span_height / 2f32; let height_min = -span_height / 2f32; let x_axis: Array1<f32> = Array::linspace(width_min, width_max, width); let y_axis: Array1<f32> = Array::linspace(height_min, height_max, height); // coordination let mut grid: Array3<f32> = Array::zeros((height, width, 2)); for y in 0..height { for x in 0..width { let coord = array![x_axis[[x]], y_axis[[y]]]; let mut slice = grid.slice_mut(s![y, x,..]); slice.assign(&coord); } } let xys = grid.into_shape((height * width, 2)).unwrap(); let (_, softmax) = network.forward_propagation(&xys); let mut labels: Array1<usize> = Array::zeros(height * width); for (y, row) in softmax.axis_iter(Axis(0)).enumerate() { let mut maxx = 0 as usize; let mut max = row[[0]]; for (x, col) in row.iter().enumerate() { if *col > max { maxx = x; max = *col; } } labels[[y]] = maxx; } let grid_label = labels.into_shape((height, width)).unwrap(); let canvas_size = width * height * 4; let canvas: &mut [u8] = unsafe { slice::from_raw_parts_mut(canvas, canvas_size) }; for y in 0..height { for x in 0..width { // assume rgba canvas[4 * (y * width + x) + 0] = r[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 1] = g[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 2] = b[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 3] = 0xFF as u8; } } } // check parameters for function below which draws predictions #[no_mangle] pub fn draw_points(width: u32, height: u32, span_least: f32) { let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, _) = tmp; let num_classes = metadata.num_classes as f32; let pixel_per_span = width.min(height) as f32 / span_least; let labels = &data.labels; let points = &data.points; let points_x = points.index_axis(Axis(1), 0); let points_y = points.index_axis(Axis(1), 1); Zip::from(labels) .and(points_x) .and(points_y) .apply(|&label, &x, &y| { // Assume data position is limited in: // [-data_radius - data_rand_max, data_radius + data_rand_max] let x = (x * pixel_per_span) as i64 + width as i64 / 2; let y = (y * pixel_per_span) as i64 + height as i64 / 2; // if points can show in canvas if!(x >= width as i64 || x < 0 || y >= height as i64 || y < 0) { // floor let x = x as u32; let y = y as u32; let label_ratio = label as f32 / num_classes; unsafe { draw_point(x, y, label_ratio); } } }); } #[cfg(test)] mod kernel_test { use super::*; static POINT_DRAW_TIMES: Lazy<Mutex<u32>> = Lazy::new(|| Mutex::new(0)); // Override the extern functions #[no_mangle] extern "C" fn draw_point(_: u32, _: u32, _: f32) {
const DATA_GEN_RADIUS: f32 = 1f32; const SPIN_SPAN: f32 = PI; const NUM_CLASSES: u32 = 3; const DATA_NUM: u32 = 300; const FC_SIZE: u32 = 100; const REGULAR_RATE: f32 = 0.001f32; const DESCENT_RATE: f32 = 1f32; const DATA_GEN_RAND_MAX: f32 = 0.25f32; const NETWORK_GEN_RAND_MAX: f32 = 0.1f32; #[test] fn test_all() { init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let loss_before: f32 = train(); for _ in 0..50 { let loss = train(); assert!(loss < loss_before * 1.1f32); } } #[test] fn test_buffer_allocation() { let buffer = alloc(114514); free(buffer, 114514); } #[test] fn test_draw_prediction() { init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let width = 100; let height = 100; let buffer = alloc(width * height * 4); draw_prediction(buffer, width, height, 2f32); free(buffer, width * height * 4); } #[test] fn test_draw_points() { // Because cargo test is default multi-thread, put them together to avoid data_racing // span_least * 1.1 for padding init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); // test small resolution drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 1, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test tall screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test flat screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test square screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(100, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test huge screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(10000000, 1000000, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); } }
*POINT_DRAW_TIMES.lock().unwrap() += 1; } use std::f32::consts::PI; // for math functions
random_line_split
lib.rs
mod data; mod nn; use std::mem; use std::slice; //use std::os::raw::{/*c_double, c_int, */c_void}; // for js functions imports use once_cell::sync::Lazy; use std::sync::Mutex; // for lazy_static // for global variables use ndarray::prelude::*; use ndarray::{array, Array, Array1, Array3, Axis, Zip}; use data::Data; use nn::Network; #[derive(Default)] struct MetaData { fc_size: u32, num_classes: u32, descent_rate: f32, regular_rate: f32, } #[derive(Default)] struct CriticalSection(MetaData, Data, Network); // Imported js functions extern "C" { // for debug fn log_u64(num: u32); // for data pointer draw // x,y: the offset from upper left corner // label: a fractal which represents the position current label is in total // position range fn draw_point(x: u32, y: u32, label_ratio: f32); } static DATA: Lazy<Mutex<CriticalSection>> = Lazy::new(|| Mutex::default()); #[no_mangle] // This function returns the offset of the allocated buffer in wasm memory pub fn alloc(size: u32) -> *mut u8 { let mut buffer: Vec<u8> = Vec::with_capacity(size as usize); let buffer_ptr = buffer.as_mut_ptr(); mem::forget(buffer); buffer_ptr } #[no_mangle] pub fn free(buffer_ptr: *mut u8, size: u32) { let _ = unsafe { Vec::from_raw_parts(buffer_ptr, 0, size as usize) }; } #[no_mangle] pub fn init( data_radius: f32, data_spin_span: f32, data_num: u32, num_classes: u32, data_gen_rand_max: f32, network_gen_rand_max: f32, fc_size: u32, descent_rate: f32, regular_rate: f32, ) { // Thanks rust compiler :-/ let ref mut tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, network) = tmp; metadata.fc_size = fc_size; metadata.num_classes = num_classes; metadata.descent_rate = descent_rate; metadata.regular_rate = regular_rate; // Num of each data class is the same data.init( num_classes, data_num / num_classes, data_radius, data_spin_span, data_gen_rand_max, ); // Input of this network is two dimension points // output label is sparsed num_classes integer const PLANE_DIMENSION: u32 = 2; network.init(PLANE_DIMENSION, fc_size, num_classes, network_gen_rand_max); } #[no_mangle] pub fn train() -> f32 { let ref mut tmp = *DATA.lock().unwrap(); // Jesus, thats magic let CriticalSection(ref metadata, ref data, ref mut network) = *tmp; let regular_rate = metadata.regular_rate; let descent_rate = metadata.descent_rate; let (fc_layer, softmax) = network.forward_propagation(&data.points); let (dw1, db1, dw2, db2) = network.back_propagation( &data.points, &fc_layer, &softmax, &data.labels, regular_rate, ); let loss = network.loss(&softmax, &data.labels, regular_rate); network.descent(&dw1, &db1, &dw2, &db2, descent_rate); let (data_loss, regular_loss) = loss; data_loss + regular_loss } // Plot classified backgroud to canvas // span_least The least span of area should be drawn to canvas(because usually the canvas is not square) #[no_mangle] pub fn draw_prediction(canvas: *mut u8, width: u32, height: u32, span_least: f32) { // assert!(span_least > 0f32); let width = width as usize; let height = height as usize; // `data` will be used to draw data points let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, _, network) = tmp; let num_classes = metadata.num_classes as usize; let r: Array1<f32> = Array::linspace(0f32, 200f32, num_classes); let g: Array1<f32> = Array::linspace(0f32, 240f32, num_classes); let b: Array1<f32> = Array::linspace(0f32, 255f32, num_classes); let span_per_pixel = span_least / width.min(height) as f32; let span_height = height as f32 * span_per_pixel; let span_width = width as f32 * span_per_pixel; let width_max = span_width / 2f32; let width_min = -span_width / 2f32; let height_max = span_height / 2f32; let height_min = -span_height / 2f32; let x_axis: Array1<f32> = Array::linspace(width_min, width_max, width); let y_axis: Array1<f32> = Array::linspace(height_min, height_max, height); // coordination let mut grid: Array3<f32> = Array::zeros((height, width, 2)); for y in 0..height { for x in 0..width { let coord = array![x_axis[[x]], y_axis[[y]]]; let mut slice = grid.slice_mut(s![y, x,..]); slice.assign(&coord); } } let xys = grid.into_shape((height * width, 2)).unwrap(); let (_, softmax) = network.forward_propagation(&xys); let mut labels: Array1<usize> = Array::zeros(height * width); for (y, row) in softmax.axis_iter(Axis(0)).enumerate() { let mut maxx = 0 as usize; let mut max = row[[0]]; for (x, col) in row.iter().enumerate() { if *col > max { maxx = x; max = *col; } } labels[[y]] = maxx; } let grid_label = labels.into_shape((height, width)).unwrap(); let canvas_size = width * height * 4; let canvas: &mut [u8] = unsafe { slice::from_raw_parts_mut(canvas, canvas_size) }; for y in 0..height { for x in 0..width { // assume rgba canvas[4 * (y * width + x) + 0] = r[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 1] = g[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 2] = b[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 3] = 0xFF as u8; } } } // check parameters for function below which draws predictions #[no_mangle] pub fn draw_points(width: u32, height: u32, span_least: f32) { let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, _) = tmp; let num_classes = metadata.num_classes as f32; let pixel_per_span = width.min(height) as f32 / span_least; let labels = &data.labels; let points = &data.points; let points_x = points.index_axis(Axis(1), 0); let points_y = points.index_axis(Axis(1), 1); Zip::from(labels) .and(points_x) .and(points_y) .apply(|&label, &x, &y| { // Assume data position is limited in: // [-data_radius - data_rand_max, data_radius + data_rand_max] let x = (x * pixel_per_span) as i64 + width as i64 / 2; let y = (y * pixel_per_span) as i64 + height as i64 / 2; // if points can show in canvas if!(x >= width as i64 || x < 0 || y >= height as i64 || y < 0) { // floor let x = x as u32; let y = y as u32; let label_ratio = label as f32 / num_classes; unsafe { draw_point(x, y, label_ratio); } } }); } #[cfg(test)] mod kernel_test { use super::*; static POINT_DRAW_TIMES: Lazy<Mutex<u32>> = Lazy::new(|| Mutex::new(0)); // Override the extern functions #[no_mangle] extern "C" fn draw_point(_: u32, _: u32, _: f32) { *POINT_DRAW_TIMES.lock().unwrap() += 1; } use std::f32::consts::PI; // for math functions const DATA_GEN_RADIUS: f32 = 1f32; const SPIN_SPAN: f32 = PI; const NUM_CLASSES: u32 = 3; const DATA_NUM: u32 = 300; const FC_SIZE: u32 = 100; const REGULAR_RATE: f32 = 0.001f32; const DESCENT_RATE: f32 = 1f32; const DATA_GEN_RAND_MAX: f32 = 0.25f32; const NETWORK_GEN_RAND_MAX: f32 = 0.1f32; #[test] fn test_all()
#[test] fn test_buffer_allocation() { let buffer = alloc(114514); free(buffer, 114514); } #[test] fn test_draw_prediction() { init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let width = 100; let height = 100; let buffer = alloc(width * height * 4); draw_prediction(buffer, width, height, 2f32); free(buffer, width * height * 4); } #[test] fn test_draw_points() { // Because cargo test is default multi-thread, put them together to avoid data_racing // span_least * 1.1 for padding init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); // test small resolution drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 1, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test tall screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test flat screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test square screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(100, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test huge screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(10000000, 1000000, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); } }
{ init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let loss_before: f32 = train(); for _ in 0..50 { let loss = train(); assert!(loss < loss_before * 1.1f32); } }
identifier_body
lib.rs
mod data; mod nn; use std::mem; use std::slice; //use std::os::raw::{/*c_double, c_int, */c_void}; // for js functions imports use once_cell::sync::Lazy; use std::sync::Mutex; // for lazy_static // for global variables use ndarray::prelude::*; use ndarray::{array, Array, Array1, Array3, Axis, Zip}; use data::Data; use nn::Network; #[derive(Default)] struct
{ fc_size: u32, num_classes: u32, descent_rate: f32, regular_rate: f32, } #[derive(Default)] struct CriticalSection(MetaData, Data, Network); // Imported js functions extern "C" { // for debug fn log_u64(num: u32); // for data pointer draw // x,y: the offset from upper left corner // label: a fractal which represents the position current label is in total // position range fn draw_point(x: u32, y: u32, label_ratio: f32); } static DATA: Lazy<Mutex<CriticalSection>> = Lazy::new(|| Mutex::default()); #[no_mangle] // This function returns the offset of the allocated buffer in wasm memory pub fn alloc(size: u32) -> *mut u8 { let mut buffer: Vec<u8> = Vec::with_capacity(size as usize); let buffer_ptr = buffer.as_mut_ptr(); mem::forget(buffer); buffer_ptr } #[no_mangle] pub fn free(buffer_ptr: *mut u8, size: u32) { let _ = unsafe { Vec::from_raw_parts(buffer_ptr, 0, size as usize) }; } #[no_mangle] pub fn init( data_radius: f32, data_spin_span: f32, data_num: u32, num_classes: u32, data_gen_rand_max: f32, network_gen_rand_max: f32, fc_size: u32, descent_rate: f32, regular_rate: f32, ) { // Thanks rust compiler :-/ let ref mut tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, network) = tmp; metadata.fc_size = fc_size; metadata.num_classes = num_classes; metadata.descent_rate = descent_rate; metadata.regular_rate = regular_rate; // Num of each data class is the same data.init( num_classes, data_num / num_classes, data_radius, data_spin_span, data_gen_rand_max, ); // Input of this network is two dimension points // output label is sparsed num_classes integer const PLANE_DIMENSION: u32 = 2; network.init(PLANE_DIMENSION, fc_size, num_classes, network_gen_rand_max); } #[no_mangle] pub fn train() -> f32 { let ref mut tmp = *DATA.lock().unwrap(); // Jesus, thats magic let CriticalSection(ref metadata, ref data, ref mut network) = *tmp; let regular_rate = metadata.regular_rate; let descent_rate = metadata.descent_rate; let (fc_layer, softmax) = network.forward_propagation(&data.points); let (dw1, db1, dw2, db2) = network.back_propagation( &data.points, &fc_layer, &softmax, &data.labels, regular_rate, ); let loss = network.loss(&softmax, &data.labels, regular_rate); network.descent(&dw1, &db1, &dw2, &db2, descent_rate); let (data_loss, regular_loss) = loss; data_loss + regular_loss } // Plot classified backgroud to canvas // span_least The least span of area should be drawn to canvas(because usually the canvas is not square) #[no_mangle] pub fn draw_prediction(canvas: *mut u8, width: u32, height: u32, span_least: f32) { // assert!(span_least > 0f32); let width = width as usize; let height = height as usize; // `data` will be used to draw data points let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, _, network) = tmp; let num_classes = metadata.num_classes as usize; let r: Array1<f32> = Array::linspace(0f32, 200f32, num_classes); let g: Array1<f32> = Array::linspace(0f32, 240f32, num_classes); let b: Array1<f32> = Array::linspace(0f32, 255f32, num_classes); let span_per_pixel = span_least / width.min(height) as f32; let span_height = height as f32 * span_per_pixel; let span_width = width as f32 * span_per_pixel; let width_max = span_width / 2f32; let width_min = -span_width / 2f32; let height_max = span_height / 2f32; let height_min = -span_height / 2f32; let x_axis: Array1<f32> = Array::linspace(width_min, width_max, width); let y_axis: Array1<f32> = Array::linspace(height_min, height_max, height); // coordination let mut grid: Array3<f32> = Array::zeros((height, width, 2)); for y in 0..height { for x in 0..width { let coord = array![x_axis[[x]], y_axis[[y]]]; let mut slice = grid.slice_mut(s![y, x,..]); slice.assign(&coord); } } let xys = grid.into_shape((height * width, 2)).unwrap(); let (_, softmax) = network.forward_propagation(&xys); let mut labels: Array1<usize> = Array::zeros(height * width); for (y, row) in softmax.axis_iter(Axis(0)).enumerate() { let mut maxx = 0 as usize; let mut max = row[[0]]; for (x, col) in row.iter().enumerate() { if *col > max { maxx = x; max = *col; } } labels[[y]] = maxx; } let grid_label = labels.into_shape((height, width)).unwrap(); let canvas_size = width * height * 4; let canvas: &mut [u8] = unsafe { slice::from_raw_parts_mut(canvas, canvas_size) }; for y in 0..height { for x in 0..width { // assume rgba canvas[4 * (y * width + x) + 0] = r[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 1] = g[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 2] = b[[grid_label[[y, x]]]] as u8; canvas[4 * (y * width + x) + 3] = 0xFF as u8; } } } // check parameters for function below which draws predictions #[no_mangle] pub fn draw_points(width: u32, height: u32, span_least: f32) { let ref tmp = *DATA.lock().unwrap(); let CriticalSection(metadata, data, _) = tmp; let num_classes = metadata.num_classes as f32; let pixel_per_span = width.min(height) as f32 / span_least; let labels = &data.labels; let points = &data.points; let points_x = points.index_axis(Axis(1), 0); let points_y = points.index_axis(Axis(1), 1); Zip::from(labels) .and(points_x) .and(points_y) .apply(|&label, &x, &y| { // Assume data position is limited in: // [-data_radius - data_rand_max, data_radius + data_rand_max] let x = (x * pixel_per_span) as i64 + width as i64 / 2; let y = (y * pixel_per_span) as i64 + height as i64 / 2; // if points can show in canvas if!(x >= width as i64 || x < 0 || y >= height as i64 || y < 0) { // floor let x = x as u32; let y = y as u32; let label_ratio = label as f32 / num_classes; unsafe { draw_point(x, y, label_ratio); } } }); } #[cfg(test)] mod kernel_test { use super::*; static POINT_DRAW_TIMES: Lazy<Mutex<u32>> = Lazy::new(|| Mutex::new(0)); // Override the extern functions #[no_mangle] extern "C" fn draw_point(_: u32, _: u32, _: f32) { *POINT_DRAW_TIMES.lock().unwrap() += 1; } use std::f32::consts::PI; // for math functions const DATA_GEN_RADIUS: f32 = 1f32; const SPIN_SPAN: f32 = PI; const NUM_CLASSES: u32 = 3; const DATA_NUM: u32 = 300; const FC_SIZE: u32 = 100; const REGULAR_RATE: f32 = 0.001f32; const DESCENT_RATE: f32 = 1f32; const DATA_GEN_RAND_MAX: f32 = 0.25f32; const NETWORK_GEN_RAND_MAX: f32 = 0.1f32; #[test] fn test_all() { init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let loss_before: f32 = train(); for _ in 0..50 { let loss = train(); assert!(loss < loss_before * 1.1f32); } } #[test] fn test_buffer_allocation() { let buffer = alloc(114514); free(buffer, 114514); } #[test] fn test_draw_prediction() { init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); let width = 100; let height = 100; let buffer = alloc(width * height * 4); draw_prediction(buffer, width, height, 2f32); free(buffer, width * height * 4); } #[test] fn test_draw_points() { // Because cargo test is default multi-thread, put them together to avoid data_racing // span_least * 1.1 for padding init( DATA_GEN_RADIUS, SPIN_SPAN, DATA_NUM, NUM_CLASSES, DATA_GEN_RAND_MAX, NETWORK_GEN_RAND_MAX, FC_SIZE, DESCENT_RATE, REGULAR_RATE, ); // test small resolution drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 1, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test tall screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test flat screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(1, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test square screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(100, 100, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); // test huge screen drawing *POINT_DRAW_TIMES.lock().unwrap() = 0; draw_points(10000000, 1000000, DATA_GEN_RADIUS * 2f32 * 1.1f32); assert_eq!(DATA_NUM, *POINT_DRAW_TIMES.lock().unwrap()); } }
MetaData
identifier_name
cache.rs
did = *last; let path = match self.paths.get(&did) { // The current stack not necessarily has correlation // for where the type was defined. On the other // hand, `paths` always has the right // information if present. Some(&(ref fqp, ItemType::Trait)) | Some(&(ref fqp, ItemType::Struct)) | Some(&(ref fqp, ItemType::Union)) | Some(&(ref fqp, ItemType::Enum)) => Some(&fqp[..fqp.len() - 1]), Some(..) => Some(&*self.stack), None => None }; ((Some(*last), path), true) } } _ => ((None, Some(&*self.stack)), false) }; match parent { (parent, Some(path)) if is_inherent_impl_item || (!self.stripped_mod) => { debug_assert!(!item.is_stripped()); // A crate has a module at its root, containing all items, // which should not be indexed. The crate-item itself is // inserted later on when serializing the search-index. if item.def_id.index!= CRATE_DEF_INDEX { self.search_index.push(IndexItem { ty: item.type_(), name: s.to_string(), path: path.join("::"), desc: shorten(plain_summary_line(item.doc_value())), parent, parent_idx: None, search_type: get_index_search_type(&item), }); } } (Some(parent), None) if is_inherent_impl_item => { // We have a parent, but we don't know where they're // defined yet. Wait for later to index this item. self.orphan_impl_items.push((parent, item.clone())); } _ => {} } } // Keep track of the fully qualified path for this item. let pushed = match item.name { Some(ref n) if!n.is_empty() => { self.stack.push(n.to_string()); true } _ => false, }; match item.inner { clean::StructItem(..) | clean::EnumItem(..) | clean::TypedefItem(..) | clean::TraitItem(..) | clean::FunctionItem(..) | clean::ModuleItem(..) | clean::ForeignFunctionItem(..) | clean::ForeignStaticItem(..) | clean::ConstantItem(..) | clean::StaticItem(..) | clean::UnionItem(..) | clean::ForeignTypeItem | clean::MacroItem(..) | clean::ProcMacroItem(..) if!self.stripped_mod => { // Re-exported items mean that the same id can show up twice // in the rustdoc ast that we're looking at. We know, // however, that a re-exported item doesn't show up in the // `public_items` map, so we can skip inserting into the // paths map if there was already an entry present and we're // not a public item. if!self.paths.contains_key(&item.def_id) || self.access_levels.is_public(item.def_id) { self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } self.add_aliases(&item); } // Link variants to their parent enum because pages aren't emitted // for each variant. clean::VariantItem(..) if!self.stripped_mod => { let mut stack = self.stack.clone(); stack.pop(); self.paths.insert(item.def_id, (stack, ItemType::Enum)); } clean::PrimitiveItem(..) => { self.add_aliases(&item); self.paths.insert(item.def_id, (self.stack.clone(), item.type_())); } _ => {} } // Maintain the parent stack let orig_parent_is_trait_impl = self.parent_is_trait_impl; let parent_pushed = match item.inner { clean::TraitItem(..) | clean::EnumItem(..) | clean::ForeignTypeItem | clean::StructItem(..) | clean::UnionItem(..) => { self.parent_stack.push(item.def_id); self.parent_is_trait_impl = false; true } clean::ImplItem(ref i) => { self.parent_is_trait_impl = i.trait_.is_some(); match i.for_ { clean::ResolvedPath{ did,.. } => { self.parent_stack.push(did); true } ref t => { let prim_did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); match prim_did { Some(did) => { self.parent_stack.push(did); true } None => false, } } } } _ => false }; // Once we've recursively found all the generics, hoard off all the // implementations elsewhere. let ret = self.fold_item_recur(item).and_then(|item| { if let clean::Item { inner: clean::ImplItem(_),.. } = item { // Figure out the id of this impl. This may map to a // primitive rather than always to a struct/enum. // Note: matching twice to restrict the lifetime of the `i` borrow. let mut dids = FxHashSet::default(); if let clean::Item { inner: clean::ImplItem(ref i),.. } = item { match i.for_ { clean::ResolvedPath { did,.. } | clean::BorrowedRef { type_: box clean::ResolvedPath { did,.. },.. } => { dids.insert(did); } ref t => { let did = t.primitive_type().and_then(|t| { self.primitive_locations.get(&t).cloned() }); if let Some(did) = did { dids.insert(did); } } } if let Some(generics) = i.trait_.as_ref().and_then(|t| t.generics()) { for bound in generics { if let Some(did) = bound.def_id() { dids.insert(did); } } } } else { unreachable!() }; let impl_item = Impl { impl_item: item, }; if impl_item.trait_did().map_or(true, |d| self.traits.contains_key(&d)) { for did in dids { self.impls.entry(did).or_insert(vec![]).push(impl_item.clone()); } } else { let trait_did = impl_item.trait_did().unwrap(); self.orphan_trait_impls.push((trait_did, dids, impl_item)); } None } else { Some(item) } }); if pushed { self.stack.pop().unwrap(); } if parent_pushed { self.parent_stack.pop().unwrap(); } self.stripped_mod = orig_stripped_mod; self.parent_is_trait_impl = orig_parent_is_trait_impl; ret } } impl Cache { fn add_aliases(&mut self, item: &clean::Item) { if item.def_id.index == CRATE_DEF_INDEX { return } if let Some(ref item_name) = item.name { let path = self.paths.get(&item.def_id) .map(|p| p.0[..p.0.len() - 1].join("::")) .unwrap_or("std".to_owned()); for alias in item.attrs.lists(sym::doc) .filter(|a| a.check_name(sym::alias)) .filter_map(|a| a.value_str() .map(|s| s.to_string().replace("\"", ""))) .filter(|v|!v.is_empty()) .collect::<FxHashSet<_>>() .into_iter() { self.aliases.entry(alias) .or_insert(Vec::with_capacity(1)) .push(IndexItem { ty: item.type_(), name: item_name.to_string(), path: path.clone(), desc: shorten(plain_summary_line(item.doc_value())), parent: None, parent_idx: None, search_type: get_index_search_type(&item), }); } } } } /// Attempts to find where an external crate is located, given that we're /// rendering in to the specified source destination. fn extern_location(e: &clean::ExternalCrate, extern_url: Option<&str>, dst: &Path) -> ExternalLocation { use ExternalLocation::*; // See if there's documentation generated into the local directory let local_location = dst.join(&e.name); if local_location.is_dir() { return Local; } if let Some(url) = extern_url { let mut url = url.to_string(); if!url.ends_with("/") { url.push('/'); } return Remote(url); } // Failing that, see if there's an attribute specifying where to find this // external crate e.attrs.lists(sym::doc) .filter(|a| a.check_name(sym::html_root_url)) .filter_map(|a| a.value_str()) .map(|url| { let mut url = url.to_string(); if!url.ends_with("/") { url.push('/') } Remote(url) }).next().unwrap_or(Unknown) // Well, at least we tried. } /// Builds the search index from the collected metadata fn build_index(krate: &clean::Crate, cache: &mut Cache) -> String { let mut nodeid_to_pathid = FxHashMap::default(); let mut crate_items = Vec::with_capacity(cache.search_index.len()); let mut crate_paths = Vec::<Json>::new(); let Cache { ref mut search_index, ref orphan_impl_items, ref paths,.. } = *cache; // Attach all orphan items to the type's definition if the type // has since been learned. for &(did, ref item) in orphan_impl_items { if let Some(&(ref fqp, _)) = paths.get(&did) { search_index.push(IndexItem { ty: item.type_(), name: item.name.clone().unwrap(), path: fqp[..fqp.len() - 1].join("::"), desc: shorten(plain_summary_line(item.doc_value())), parent: Some(did), parent_idx: None, search_type: get_index_search_type(&item), }); } } // Reduce `NodeId` in paths into smaller sequential numbers, // and prune the paths that do not appear in the index. let mut lastpath = String::new(); let mut lastpathid = 0usize; for item in search_index { item.parent_idx = item.parent.map(|nodeid| { if nodeid_to_pathid.contains_key(&nodeid) { *nodeid_to_pathid.get(&nodeid).unwrap() } else { let pathid = lastpathid; nodeid_to_pathid.insert(nodeid, pathid); lastpathid += 1; let &(ref fqp, short) = paths.get(&nodeid).unwrap(); crate_paths.push(((short as usize), fqp.last().unwrap().clone()).to_json()); pathid } }); // Omit the parent path if it is same to that of the prior item. if lastpath == item.path { item.path.clear(); } else { lastpath = item.path.clone(); } crate_items.push(item.to_json()); } let crate_doc = krate.module.as_ref().map(|module| { shorten(plain_summary_line(module.doc_value())) }).unwrap_or(String::new()); let mut crate_data = BTreeMap::new(); crate_data.insert("doc".to_owned(), Json::String(crate_doc)); crate_data.insert("i".to_owned(), Json::Array(crate_items)); crate_data.insert("p".to_owned(), Json::Array(crate_paths)); // Collect the index into a string format!("searchIndex[{}] = {};", as_json(&krate.name), Json::Object(crate_data)) } fn get_index_search_type(item: &clean::Item) -> Option<IndexItemFunctionType> { let (all_types, ret_types) = match item.inner { clean::FunctionItem(ref f) => (&f.all_types, &f.ret_types), clean::MethodItem(ref m) => (&m.all_types, &m.ret_types), clean::TyMethodItem(ref m) => (&m.all_types, &m.ret_types), _ => return None, }; let inputs = all_types.iter().map(|arg| { get_index_type(&arg) }).filter(|a| a.name.is_some()).collect(); let output = ret_types.iter().map(|arg| { get_index_type(&arg) }).filter(|a| a.name.is_some()).collect::<Vec<_>>(); let output = if output.is_empty() { None } else { Some(output) }; Some(IndexItemFunctionType { inputs, output }) } fn get_index_type(clean_type: &clean::Type) -> Type { let t = Type { name: get_index_type_name(clean_type, true).map(|s| s.to_ascii_lowercase()), generics: get_generics(clean_type), }; t } fn get_index_type_name(clean_type: &clean::Type, accept_generic: bool) -> Option<String>
{ match *clean_type { clean::ResolvedPath { ref path, .. } => { let segments = &path.segments; let path_segment = segments.into_iter().last().unwrap_or_else(|| panic!( "get_index_type_name(clean_type: {:?}, accept_generic: {:?}) had length zero path", clean_type, accept_generic )); Some(path_segment.name.clone()) } clean::Generic(ref s) if accept_generic => Some(s.clone()), clean::Primitive(ref p) => Some(format!("{:?}", p)), clean::BorrowedRef { ref type_, .. } => get_index_type_name(type_, accept_generic), // FIXME: add all from clean::Type. _ => None } }
identifier_body