file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
main.rs
use futures_util::future::Either; use futures_util::stream::StreamExt; use std::collections::{BTreeMap, HashMap}; use std::io::{self}; use structopt::StructOpt; use termion::raw::IntoRawMode; use tokio::prelude::*; use tui::backend::Backend; use tui::backend::TermionBackend; use tui::layout::{Constraint, Direction, Layout}; use tui::style::{Color, Modifier, Style}; use tui::widgets::{Block, Borders, Paragraph, Text, Widget}; use tui::Terminal; const DRAW_EVERY: std::time::Duration = std::time::Duration::from_millis(200); const WINDOW: std::time::Duration = std::time::Duration::from_secs(10); #[derive(Debug, StructOpt)] /// A live profile visualizer. /// /// Pipe the output of the appropriate `bpftrace` command into this program, and enjoy. /// Happy profiling! struct Opt { /// Treat input as a replay of a trace and emulate time accordingly. #[structopt(long)] replay: bool, } #[derive(Debug, Default)] struct Thread { window: BTreeMap<usize, String>, } fn main() -> Result<(), io::Error> { let opt = Opt::from_args(); if termion::is_tty(&io::stdin().lock()) { eprintln!("Don't type input to this program, that's silly."); return Ok(()); } let stdout = io::stdout().into_raw_mode()?; let backend = TermionBackend::new(stdout); let mut terminal = Terminal::new(backend)?; let mut tids = BTreeMap::new(); let mut inframe = None; let mut stack = String::new(); terminal.hide_cursor()?; terminal.clear()?; terminal.draw(|mut f| { let chunks = Layout::default() .direction(Direction::Vertical) .margin(2) .constraints([Constraint::Percentage(100)].as_ref()) .split(f.size()); Block::default() .borders(Borders::ALL) .title("Common thread fan-out points") .title_style(Style::default().fg(Color::Magenta).modifier(Modifier::BOLD)) .render(&mut f, chunks[0]); })?; // a _super_ hacky way for us to get input from the TTY let tty = termion::get_tty()?; let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); std::thread::spawn(move || { use termion::input::TermRead; for key in tty.keys() { if let Err(_) = tx.send(key) { return; } } }); let mut rt = tokio::runtime::Runtime::new()?; rt.block_on(async move { let stdin = tokio::io::BufReader::new(tokio::io::stdin()); let lines = stdin.lines().map(Either::Left); let rx = rx.map(Either::Right); let mut input = futures_util::stream::select(lines, rx); let mut lastprint = 0; let mut lasttime = 0; while let Some(got) = input.next().await { match got { Either::Left(line) => { let line = line.unwrap(); if line.starts_with("Error") || line.starts_with("Attaching") { } else if!line.starts_with(' ') || line.is_empty() { if let Some((time, tid)) = inframe { // new frame starts, so finish the old one // skip empty stack frames if!stack.is_empty() { let nxt_stack = String::with_capacity(stack.capacity()); let mut stack = std::mem::replace(&mut stack, nxt_stack); // remove trailing ; let stackn = stack.len(); stack.truncate(stackn - 1); tids.entry(tid) .or_insert_with(Thread::default) .window .insert(time, stack); if opt.replay && lasttime!= 0 && time - lasttime > 1_000_000 { tokio::time::delay_for(std::time::Duration::from_nanos( (time - lasttime) as u64, )) .await; } lasttime = time; if std::time::Duration::from_nanos((time - lastprint) as u64) > DRAW_EVERY { draw(&mut terminal, &mut tids)?; lastprint = time; } } inframe = None; } if!line.is_empty() { // read time + tid let mut fields = line.split_whitespace(); let time = fields .next() .expect("no time given for frame") .parse::<usize>() .expect("invalid time"); let tid = fields .next() .expect("no tid given for frame") .parse::<usize>() .expect("invalid tid"); inframe = Some((time, tid)); } } else { assert!(inframe.is_some()); stack.push_str(line.trim()); stack.push(';'); } } Either::Right(key) => { let key = key?; if let termion::event::Key::Char('q') = key { break; } } } } terminal.clear()?; Ok(()) }) } fn draw<B: Backend>( terminal: &mut Terminal<B>, threads: &mut BTreeMap<usize, Thread>, ) -> Result<(), io::Error> { // keep our window relatively short let mut latest = 0; for thread in threads.values() { if let Some(&last) = thread.window.keys().next_back() { latest = std::cmp::max(latest, last); } } if latest > WINDOW.as_nanos() as usize { for thread in threads.values_mut() { // trim to last 5 seconds thread.window = thread .window .split_off(&(latest - WINDOW.as_nanos() as usize)); } } // now only reading let threads = &*threads; let mut lines = Vec::new(); let mut hits = HashMap::new(); let mut maxes = BTreeMap::new(); for (_, thread) in threads { // add up across the window let mut max: Option<(&str, usize)> = None; for (&time, stack) in &thread.window { latest = std::cmp::max(latest, time); let mut at = stack.len(); while let Some(stack_start) = stack[..at].rfind(';') { at = stack_start; let stack = &stack[at + 1..]; let count = hits.entry(stack).or_insert(0); *count += 1; if let Some((_, max_count)) = max { if *count >= max_count { max = Some((stack, *count)); } } else { max = Some((stack, *count)); } } } if let Some((stack, count)) = max { let e = maxes.entry(stack).or_insert((0, 0)); e.0 += 1; e.1 += count; } hits.clear(); } if maxes.is_empty() { return Ok(()); } let max = *maxes.values().map(|(_, count)| count).max().unwrap() as f64; // sort by where most threads are let mut maxes: Vec<_> = maxes.into_iter().collect(); maxes.sort_by_key(|(_, (nthreads, _))| *nthreads); for (stack, (nthreads, count)) in maxes.iter().rev() { let count = *count; let nthreads = *nthreads; if stack.find(';').is_none() { // this thread just shares the root frame continue; } if count == 1 { // this thread only has one sample ever, let's reduce noise... continue; } let red = (128.0 * count as f64 / max) as u8; let color = Color::Rgb(255, 128 - red, 128 - red); if nthreads == 1 { lines.push(Text::styled( format!("A thread fanned out from here {} times\n", count), Style::default().modifier(Modifier::BOLD).fg(color), )); } else { lines.push(Text::styled( format!( "{} threads fanned out from here {} times\n", nthreads, count ), Style::default().modifier(Modifier::BOLD).fg(color), )); } for (i, frame) in stack.split(';').enumerate() { // https://github.com/alexcrichton/rustc-demangle/issues/34 let offset = &frame[frame.rfind('+').unwrap_or_else(|| frame.len())..]; let frame = rustc_demangle::demangle(&frame[..frame.rfind('+').unwrap_or_else(|| frame.len())]); if i == 0 { lines.push(Text::styled( format!(" {}{}\n", frame, offset), Style::default(), ));
lines.push(Text::styled( format!(" {}{}\n", frame, offset), Style::default().modifier(Modifier::DIM), )); } } lines.push(Text::raw("\n")); } terminal.draw(|mut f| { let chunks = Layout::default() .direction(Direction::Vertical) .margin(2) .constraints([Constraint::Percentage(100)].as_ref()) .split(f.size()); Paragraph::new(lines.iter()) .block( Block::default() .borders(Borders::ALL) .title("Common thread fan-out points") .title_style(Style::default().fg(Color::Magenta).modifier(Modifier::BOLD)), ) .render(&mut f, chunks[0]); })?; Ok(()) }
} else {
random_line_split
main.rs
use futures_util::future::Either; use futures_util::stream::StreamExt; use std::collections::{BTreeMap, HashMap}; use std::io::{self}; use structopt::StructOpt; use termion::raw::IntoRawMode; use tokio::prelude::*; use tui::backend::Backend; use tui::backend::TermionBackend; use tui::layout::{Constraint, Direction, Layout}; use tui::style::{Color, Modifier, Style}; use tui::widgets::{Block, Borders, Paragraph, Text, Widget}; use tui::Terminal; const DRAW_EVERY: std::time::Duration = std::time::Duration::from_millis(200); const WINDOW: std::time::Duration = std::time::Duration::from_secs(10); #[derive(Debug, StructOpt)] /// A live profile visualizer. /// /// Pipe the output of the appropriate `bpftrace` command into this program, and enjoy. /// Happy profiling! struct Opt { /// Treat input as a replay of a trace and emulate time accordingly. #[structopt(long)] replay: bool, } #[derive(Debug, Default)] struct Thread { window: BTreeMap<usize, String>, } fn main() -> Result<(), io::Error> { let opt = Opt::from_args(); if termion::is_tty(&io::stdin().lock()) { eprintln!("Don't type input to this program, that's silly."); return Ok(()); } let stdout = io::stdout().into_raw_mode()?; let backend = TermionBackend::new(stdout); let mut terminal = Terminal::new(backend)?; let mut tids = BTreeMap::new(); let mut inframe = None; let mut stack = String::new(); terminal.hide_cursor()?; terminal.clear()?; terminal.draw(|mut f| { let chunks = Layout::default() .direction(Direction::Vertical) .margin(2) .constraints([Constraint::Percentage(100)].as_ref()) .split(f.size()); Block::default() .borders(Borders::ALL) .title("Common thread fan-out points") .title_style(Style::default().fg(Color::Magenta).modifier(Modifier::BOLD)) .render(&mut f, chunks[0]); })?; // a _super_ hacky way for us to get input from the TTY let tty = termion::get_tty()?; let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); std::thread::spawn(move || { use termion::input::TermRead; for key in tty.keys() { if let Err(_) = tx.send(key) { return; } } }); let mut rt = tokio::runtime::Runtime::new()?; rt.block_on(async move { let stdin = tokio::io::BufReader::new(tokio::io::stdin()); let lines = stdin.lines().map(Either::Left); let rx = rx.map(Either::Right); let mut input = futures_util::stream::select(lines, rx); let mut lastprint = 0; let mut lasttime = 0; while let Some(got) = input.next().await { match got { Either::Left(line) => { let line = line.unwrap(); if line.starts_with("Error") || line.starts_with("Attaching") { } else if!line.starts_with(' ') || line.is_empty() { if let Some((time, tid)) = inframe { // new frame starts, so finish the old one // skip empty stack frames if!stack.is_empty() { let nxt_stack = String::with_capacity(stack.capacity()); let mut stack = std::mem::replace(&mut stack, nxt_stack); // remove trailing ; let stackn = stack.len(); stack.truncate(stackn - 1); tids.entry(tid) .or_insert_with(Thread::default) .window .insert(time, stack); if opt.replay && lasttime!= 0 && time - lasttime > 1_000_000 { tokio::time::delay_for(std::time::Duration::from_nanos( (time - lasttime) as u64, )) .await; } lasttime = time; if std::time::Duration::from_nanos((time - lastprint) as u64) > DRAW_EVERY { draw(&mut terminal, &mut tids)?; lastprint = time; } } inframe = None; } if!line.is_empty() { // read time + tid let mut fields = line.split_whitespace(); let time = fields .next() .expect("no time given for frame") .parse::<usize>() .expect("invalid time"); let tid = fields .next() .expect("no tid given for frame") .parse::<usize>() .expect("invalid tid"); inframe = Some((time, tid)); } } else
} Either::Right(key) => { let key = key?; if let termion::event::Key::Char('q') = key { break; } } } } terminal.clear()?; Ok(()) }) } fn draw<B: Backend>( terminal: &mut Terminal<B>, threads: &mut BTreeMap<usize, Thread>, ) -> Result<(), io::Error> { // keep our window relatively short let mut latest = 0; for thread in threads.values() { if let Some(&last) = thread.window.keys().next_back() { latest = std::cmp::max(latest, last); } } if latest > WINDOW.as_nanos() as usize { for thread in threads.values_mut() { // trim to last 5 seconds thread.window = thread .window .split_off(&(latest - WINDOW.as_nanos() as usize)); } } // now only reading let threads = &*threads; let mut lines = Vec::new(); let mut hits = HashMap::new(); let mut maxes = BTreeMap::new(); for (_, thread) in threads { // add up across the window let mut max: Option<(&str, usize)> = None; for (&time, stack) in &thread.window { latest = std::cmp::max(latest, time); let mut at = stack.len(); while let Some(stack_start) = stack[..at].rfind(';') { at = stack_start; let stack = &stack[at + 1..]; let count = hits.entry(stack).or_insert(0); *count += 1; if let Some((_, max_count)) = max { if *count >= max_count { max = Some((stack, *count)); } } else { max = Some((stack, *count)); } } } if let Some((stack, count)) = max { let e = maxes.entry(stack).or_insert((0, 0)); e.0 += 1; e.1 += count; } hits.clear(); } if maxes.is_empty() { return Ok(()); } let max = *maxes.values().map(|(_, count)| count).max().unwrap() as f64; // sort by where most threads are let mut maxes: Vec<_> = maxes.into_iter().collect(); maxes.sort_by_key(|(_, (nthreads, _))| *nthreads); for (stack, (nthreads, count)) in maxes.iter().rev() { let count = *count; let nthreads = *nthreads; if stack.find(';').is_none() { // this thread just shares the root frame continue; } if count == 1 { // this thread only has one sample ever, let's reduce noise... continue; } let red = (128.0 * count as f64 / max) as u8; let color = Color::Rgb(255, 128 - red, 128 - red); if nthreads == 1 { lines.push(Text::styled( format!("A thread fanned out from here {} times\n", count), Style::default().modifier(Modifier::BOLD).fg(color), )); } else { lines.push(Text::styled( format!( "{} threads fanned out from here {} times\n", nthreads, count ), Style::default().modifier(Modifier::BOLD).fg(color), )); } for (i, frame) in stack.split(';').enumerate() { // https://github.com/alexcrichton/rustc-demangle/issues/34 let offset = &frame[frame.rfind('+').unwrap_or_else(|| frame.len())..]; let frame = rustc_demangle::demangle(&frame[..frame.rfind('+').unwrap_or_else(|| frame.len())]); if i == 0 { lines.push(Text::styled( format!(" {}{}\n", frame, offset), Style::default(), )); } else { lines.push(Text::styled( format!(" {}{}\n", frame, offset), Style::default().modifier(Modifier::DIM), )); } } lines.push(Text::raw("\n")); } terminal.draw(|mut f| { let chunks = Layout::default() .direction(Direction::Vertical) .margin(2) .constraints([Constraint::Percentage(100)].as_ref()) .split(f.size()); Paragraph::new(lines.iter()) .block( Block::default() .borders(Borders::ALL) .title("Common thread fan-out points") .title_style(Style::default().fg(Color::Magenta).modifier(Modifier::BOLD)), ) .render(&mut f, chunks[0]); })?; Ok(()) }
{ assert!(inframe.is_some()); stack.push_str(line.trim()); stack.push(';'); }
conditional_block
main.rs
use futures_util::future::Either; use futures_util::stream::StreamExt; use std::collections::{BTreeMap, HashMap}; use std::io::{self}; use structopt::StructOpt; use termion::raw::IntoRawMode; use tokio::prelude::*; use tui::backend::Backend; use tui::backend::TermionBackend; use tui::layout::{Constraint, Direction, Layout}; use tui::style::{Color, Modifier, Style}; use tui::widgets::{Block, Borders, Paragraph, Text, Widget}; use tui::Terminal; const DRAW_EVERY: std::time::Duration = std::time::Duration::from_millis(200); const WINDOW: std::time::Duration = std::time::Duration::from_secs(10); #[derive(Debug, StructOpt)] /// A live profile visualizer. /// /// Pipe the output of the appropriate `bpftrace` command into this program, and enjoy. /// Happy profiling! struct Opt { /// Treat input as a replay of a trace and emulate time accordingly. #[structopt(long)] replay: bool, } #[derive(Debug, Default)] struct Thread { window: BTreeMap<usize, String>, } fn main() -> Result<(), io::Error>
.direction(Direction::Vertical) .margin(2) .constraints([Constraint::Percentage(100)].as_ref()) .split(f.size()); Block::default() .borders(Borders::ALL) .title("Common thread fan-out points") .title_style(Style::default().fg(Color::Magenta).modifier(Modifier::BOLD)) .render(&mut f, chunks[0]); })?; // a _super_ hacky way for us to get input from the TTY let tty = termion::get_tty()?; let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); std::thread::spawn(move || { use termion::input::TermRead; for key in tty.keys() { if let Err(_) = tx.send(key) { return; } } }); let mut rt = tokio::runtime::Runtime::new()?; rt.block_on(async move { let stdin = tokio::io::BufReader::new(tokio::io::stdin()); let lines = stdin.lines().map(Either::Left); let rx = rx.map(Either::Right); let mut input = futures_util::stream::select(lines, rx); let mut lastprint = 0; let mut lasttime = 0; while let Some(got) = input.next().await { match got { Either::Left(line) => { let line = line.unwrap(); if line.starts_with("Error") || line.starts_with("Attaching") { } else if!line.starts_with(' ') || line.is_empty() { if let Some((time, tid)) = inframe { // new frame starts, so finish the old one // skip empty stack frames if!stack.is_empty() { let nxt_stack = String::with_capacity(stack.capacity()); let mut stack = std::mem::replace(&mut stack, nxt_stack); // remove trailing ; let stackn = stack.len(); stack.truncate(stackn - 1); tids.entry(tid) .or_insert_with(Thread::default) .window .insert(time, stack); if opt.replay && lasttime!= 0 && time - lasttime > 1_000_000 { tokio::time::delay_for(std::time::Duration::from_nanos( (time - lasttime) as u64, )) .await; } lasttime = time; if std::time::Duration::from_nanos((time - lastprint) as u64) > DRAW_EVERY { draw(&mut terminal, &mut tids)?; lastprint = time; } } inframe = None; } if!line.is_empty() { // read time + tid let mut fields = line.split_whitespace(); let time = fields .next() .expect("no time given for frame") .parse::<usize>() .expect("invalid time"); let tid = fields .next() .expect("no tid given for frame") .parse::<usize>() .expect("invalid tid"); inframe = Some((time, tid)); } } else { assert!(inframe.is_some()); stack.push_str(line.trim()); stack.push(';'); } } Either::Right(key) => { let key = key?; if let termion::event::Key::Char('q') = key { break; } } } } terminal.clear()?; Ok(()) }) } fn draw<B: Backend>( terminal: &mut Terminal<B>, threads: &mut BTreeMap<usize, Thread>, ) -> Result<(), io::Error> { // keep our window relatively short let mut latest = 0; for thread in threads.values() { if let Some(&last) = thread.window.keys().next_back() { latest = std::cmp::max(latest, last); } } if latest > WINDOW.as_nanos() as usize { for thread in threads.values_mut() { // trim to last 5 seconds thread.window = thread .window .split_off(&(latest - WINDOW.as_nanos() as usize)); } } // now only reading let threads = &*threads; let mut lines = Vec::new(); let mut hits = HashMap::new(); let mut maxes = BTreeMap::new(); for (_, thread) in threads { // add up across the window let mut max: Option<(&str, usize)> = None; for (&time, stack) in &thread.window { latest = std::cmp::max(latest, time); let mut at = stack.len(); while let Some(stack_start) = stack[..at].rfind(';') { at = stack_start; let stack = &stack[at + 1..]; let count = hits.entry(stack).or_insert(0); *count += 1; if let Some((_, max_count)) = max { if *count >= max_count { max = Some((stack, *count)); } } else { max = Some((stack, *count)); } } } if let Some((stack, count)) = max { let e = maxes.entry(stack).or_insert((0, 0)); e.0 += 1; e.1 += count; } hits.clear(); } if maxes.is_empty() { return Ok(()); } let max = *maxes.values().map(|(_, count)| count).max().unwrap() as f64; // sort by where most threads are let mut maxes: Vec<_> = maxes.into_iter().collect(); maxes.sort_by_key(|(_, (nthreads, _))| *nthreads); for (stack, (nthreads, count)) in maxes.iter().rev() { let count = *count; let nthreads = *nthreads; if stack.find(';').is_none() { // this thread just shares the root frame continue; } if count == 1 { // this thread only has one sample ever, let's reduce noise... continue; } let red = (128.0 * count as f64 / max) as u8; let color = Color::Rgb(255, 128 - red, 128 - red); if nthreads == 1 { lines.push(Text::styled( format!("A thread fanned out from here {} times\n", count), Style::default().modifier(Modifier::BOLD).fg(color), )); } else { lines.push(Text::styled( format!( "{} threads fanned out from here {} times\n", nthreads, count ), Style::default().modifier(Modifier::BOLD).fg(color), )); } for (i, frame) in stack.split(';').enumerate() { // https://github.com/alexcrichton/rustc-demangle/issues/34 let offset = &frame[frame.rfind('+').unwrap_or_else(|| frame.len())..]; let frame = rustc_demangle::demangle(&frame[..frame.rfind('+').unwrap_or_else(|| frame.len())]); if i == 0 { lines.push(Text::styled( format!(" {}{}\n", frame, offset), Style::default(), )); } else { lines.push(Text::styled( format!(" {}{}\n", frame, offset), Style::default().modifier(Modifier::DIM), )); } } lines.push(Text::raw("\n")); } terminal.draw(|mut f| { let chunks = Layout::default() .direction(Direction::Vertical) .margin(2) .constraints([Constraint::Percentage(100)].as_ref()) .split(f.size()); Paragraph::new(lines.iter()) .block( Block::default() .borders(Borders::ALL) .title("Common thread fan-out points") .title_style(Style::default().fg(Color::Magenta).modifier(Modifier::BOLD)), ) .render(&mut f, chunks[0]); })?; Ok(()) }
{ let opt = Opt::from_args(); if termion::is_tty(&io::stdin().lock()) { eprintln!("Don't type input to this program, that's silly."); return Ok(()); } let stdout = io::stdout().into_raw_mode()?; let backend = TermionBackend::new(stdout); let mut terminal = Terminal::new(backend)?; let mut tids = BTreeMap::new(); let mut inframe = None; let mut stack = String::new(); terminal.hide_cursor()?; terminal.clear()?; terminal.draw(|mut f| { let chunks = Layout::default()
identifier_body
sql_utils.rs
//! Module for SQL Utility functions use diesel::prelude::*; use std::{ borrow::Cow, fs::File, io::BufReader, path::Path, }; use crate::error::IOErrorToError; use super::archive::import::{ detect_archive_type, import_ytdlr_json_archive, ArchiveType, ImportProgress, }; /// All migrations from "libytdlr/migrations" embedded into the binary pub const MIGRATIONS: diesel_migrations::EmbeddedMigrations = diesel_migrations::embed_migrations!(); /// Open a SQLite Connection for `sqlite_path` and apply sqlite migrations /// does not migrate archive formats, use [migrate_and_connect] instead pub fn sqlite_connect<P: AsRef<Path>>(sqlite_path: P) -> Result<SqliteConnection, crate::Error> { // having to convert the path to "str" because diesel (and underlying sqlite library) only accept strings return match sqlite_path.as_ref().to_str() { Some(path) => { let mut connection = SqliteConnection::establish(path)?; apply_sqlite_migrations(&mut connection)?; return Ok(connection); }, None => Err(crate::Error::other(format!("SQLite only accepts UTF-8 Paths, and given path failed to be converted to a string without being lossy, Path (converted lossy): \"{}\"", sqlite_path.as_ref().to_string_lossy()))), }; } /// Apply all (up) migrations to a SQLite Database #[inline] fn apply_sqlite_migrations(connection: &mut SqliteConnection) -> Result<(), crate::Error> { let applied = diesel_migrations::MigrationHarness::run_pending_migrations(connection, MIGRATIONS) .map_err(|err| return crate::Error::other(format!("Applying SQL Migrations Errored! Error:\n{err}")))?; debug!("Applied Migrations: {:?}", applied); return Ok(()); } /// Check if the input path is a sql database, if not migrate to sql and return new path and open connection /// Parameter `pgcb` will be used when migration will be applied /// /// This function is intended to be used over [`sqlite_connect`] in all non-test cases pub fn migrate_and_connect<S: FnMut(ImportProgress)>( archive_path: &Path, pgcb: S, ) -> Result<(Cow<Path>, SqliteConnection), crate::Error> { // early return in case the file does not actually exist if!archive_path.exists() { return Ok((archive_path.into(), sqlite_connect(archive_path)?)); } let migrate_to_path = { let mut tmp = archive_path.to_path_buf(); tmp.set_extension("db"); tmp }; // check if the "migrate-to" path already exists, and use that directly instead or error of already existing if migrate_to_path.exists() { if!migrate_to_path.is_file() { return Err(crate::Error::not_a_file( "Migrate-To Path exists but is not a file!", migrate_to_path, )); } let mut sqlite_path_reader = BufReader::new(File::open(&migrate_to_path).attach_path_err(&migrate_to_path)?); return Ok( match detect_archive_type(&mut sqlite_path_reader)? { ArchiveType::Unknown => return Err(crate::Error::other(format!("Migrate-To Path already exists, but is of unknown type! Path: \"{}\"", migrate_to_path.to_string_lossy()))), ArchiveType::JSON => return Err(crate::Error::other(format!("Migrate-To Path already exists and is a JSON archive, please rename it and retry the migration! Path: \"{}\"", migrate_to_path.to_string_lossy()))), ArchiveType::SQLite => { // this has to be done before, because the following ".into" call will move the value let connection = sqlite_connect(&migrate_to_path)?; (migrate_to_path.into(), connection) }, }, ); } let mut input_archive_reader = BufReader::new(File::open(archive_path).attach_path_err(archive_path)?); return Ok(match detect_archive_type(&mut input_archive_reader)? { ArchiveType::Unknown => { return Err(crate::Error::other( "Unknown Archive type to migrate, maybe try importing", )) }, ArchiveType::JSON => { debug!("Applying Migration from JSON to SQLite"); // handle case where the input path matches the changed path if migrate_to_path == archive_path { return Err(crate::Error::other( "Migration cannot be done: Input path matches output path (setting extension to \".db\")", )); } let mut connection = sqlite_connect(&migrate_to_path)?; import_ytdlr_json_archive(&mut input_archive_reader, &mut connection, pgcb)?; debug!("Migration from JSON to SQLite done"); (migrate_to_path.into(), connection) }, ArchiveType::SQLite => (archive_path.into(), sqlite_connect(archive_path)?), }); } #[cfg(test)] mod test { use super::*; use tempfile::{ Builder as TempBuilder, TempDir, }; fn create_connection() -> (SqliteConnection, TempDir) { let testdir = TempBuilder::new() .prefix("ytdl-test-sqlite-") .tempdir() .expect("Expected a temp dir to be created"); // chrono is used to create a different database for each thread let path = testdir.as_ref().join(format!("{}-sqlite.db", chrono::Utc::now())); // remove if already exists to have a clean test if path.exists() { std::fs::remove_file(&path).expect("Expected the file to be removed"); } return ( crate::main::sql_utils::sqlite_connect(&path).expect("Expected SQLite to successfully start"), testdir, ); } mod connect { use super::*; use std::{ ffi::OsString, os::unix::prelude::OsStringExt, }; #[test] fn test_connect() { let testdir = TempBuilder::new() .prefix("ytdl-test-sqliteConnect-") .tempdir() .expect("Expected a temp dir to be created"); let path = testdir.as_ref().join(format!("{}-sqlite.db", chrono::Utc::now())); std::fs::create_dir_all(path.parent().expect("Expected the file to have a parent")) .expect("expected the directory to be created"); let connection = sqlite_connect(path); assert!(connection.is_ok()); } // it seems like non-utf8 paths are a pain to create os-independently, so it is just linux where the following works #[cfg(target_os = "linux")] #[test] fn test_connect_notutf8() { let path = OsString::from_vec(vec![255]); let err = sqlite_connect(path); assert!(err.is_err()); // Not using "unwrap_err", because of https://github.com/diesel-rs/diesel/discussions/3124 let err = match err { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; // the following is only a "contains", because of the abitrary path that could be after it assert!(err.to_string().contains("SQLite only accepts UTF-8 Paths, and given path failed to be converted to a string without being lossy, Path (converted lossy):")); } } mod apply_sqlite_migrations { use super::*; #[test] fn test_all_migrations_applied() { let (mut connection, _tempdir) = create_connection(); let res = diesel_migrations::MigrationHarness::has_pending_migration(&mut connection, MIGRATIONS); assert!(res.is_ok()); let res = res.unwrap(); assert!(!res); } } mod migrate_and_connect { use std::{ ffi::OsStr, io::{ BufWriter, Write, }, ops::Deref, path::PathBuf, sync::RwLock, }; use super::*; fn gen_archive_path<P: AsRef<OsStr>>(extension: P) -> (PathBuf, TempDir) { let testdir = TempBuilder::new() .prefix("ytdl-test-sqliteMigrate-") .tempdir() .expect("Expected a temp dir to be created"); let mut path = testdir.as_ref().join(format!("{}-gen_archive", uuid::Uuid::new_v4())); path.set_extension(extension); println!("generated: {}", path.to_string_lossy()); // clear generated path clear_path(&path); { let mut migrate_to_path = path.clone(); migrate_to_path.set_extension("db"); // clear migrate_to_path clear_path(migrate_to_path); } return (path, testdir); } fn clear_path<P: AsRef<Path>>(path: P) { let path = path.as_ref(); if path.exists() { std::fs::remove_file(path).expect("Expected file to be removed"); } } fn create_dir_all_parent<P: AsRef<Path>>(path: P) { let path = path.as_ref(); std::fs::create_dir_all(path.parent().expect("Expected the file to have a parent")) .expect("expected the directory to be created"); } fn write_file_with_content<S: AsRef<str>, P: AsRef<OsStr>>(input: S, extension: P) -> (PathBuf, TempDir) { let (path, tempdir) = gen_archive_path(extension); create_dir_all_parent(&path); let mut file = BufWriter::new(std::fs::File::create(&path).expect("Expected file to be created")); file.write_all(input.as_ref().as_bytes()) .expect("Expected successfull file write"); return (path, tempdir); } /// Test utility function for easy callbacks fn callback_counter(c: &RwLock<Vec<ImportProgress>>) -> impl FnMut(ImportProgress) + '_
#[test] fn test_input_unknown_archive() { let string0 = " youtube ____________ youtube ------------ youtube aaaaaaaaaaaa soundcloud 0000000000 "; let (path, _tempdir) = write_file_with_content(string0, "unknown_ytdl"); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_err()); let res = match res { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; assert!(res .to_string() .contains("Unknown Archive type to migrate, maybe try importing")); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_input_sqlite_archive() { let (path, _tempdir) = gen_archive_path("db_sqlite"); create_dir_all_parent(&path); { // create database file assert!(sqlite_connect(&path).is_ok()); } let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(&path, res.0.as_ref()); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_input_json_archive() { let string0 = r#" { "version": "0.1.0", "videos": [ { "id": "____________", "provider": "youtube", "dlFinished": true, "editAsked": true, "fileName": "someFile1.mp3" }, { "id": "------------", "provider": "youtube", "dlFinished": false, "editAsked": true, "fileName": "someFile2.mp3" }, { "id": "aaaaaaaaaaaa", "provider": "youtube", "dlFinished": true, "editAsked": false, "fileName": "someFile3.mp3" }, { "id": "0000000000", "provider": "soundcloud", "dlFinished": true, "editAsked": true, "fileName": "someFile4.mp3" } ] } "#; let (path, _tempdir) = write_file_with_content(string0, "json_json"); let expected_path = { let mut tmp = path.clone(); tmp.set_extension("db"); tmp }; clear_path(&expected_path); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(&expected_path, res.0.as_ref()); assert_eq!( &vec![ ImportProgress::Starting, ImportProgress::SizeHint(4), // Size Hint of 4, because of a intermediate array length // index start at 0, thanks to json array index ImportProgress::Increase(1, 0), ImportProgress::Increase(1, 1), ImportProgress::Increase(1, 2), ImportProgress::Increase(1, 3), ImportProgress::Finished(4) ], pgcounter.read().expect("failed to read").deref() ); } #[test] fn test_to_existing_json() { let string0 = r#" { } "#; let (path, _tempdir) = write_file_with_content(string0, "db"); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_err()); let res = match res { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; assert_eq!( res.to_string(), format!("Other: Migrate-To Path already exists and is a JSON archive, please rename it and retry the migration! Path: \"{}\"", path.to_string_lossy()) ); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_to_existing_unknown() { let string0 = " youtube ____________ youtube ------------ youtube aaaaaaaaaaaa soundcloud 0000000000 "; let (path, _tempdir) = write_file_with_content(string0, "db"); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_err()); let res = match res { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; assert_eq!( res.to_string(), format!( "Other: Migrate-To Path already exists, but is of unknown type! Path: \"{}\"", path.to_string_lossy() ) ); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_to_existing_sqlite() { let (path, _tempdir) = gen_archive_path("db"); create_dir_all_parent(&path); { // create database file assert!(sqlite_connect(&path).is_ok()); } let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(&path, res.0.as_ref()); assert_eq!(0, pgcounter.read().expect("read failed").len()); } } }
{ return |imp| c.write().expect("write failed").push(imp); }
identifier_body
sql_utils.rs
//! Module for SQL Utility functions use diesel::prelude::*; use std::{ borrow::Cow, fs::File, io::BufReader, path::Path, }; use crate::error::IOErrorToError; use super::archive::import::{ detect_archive_type, import_ytdlr_json_archive, ArchiveType, ImportProgress, }; /// All migrations from "libytdlr/migrations" embedded into the binary pub const MIGRATIONS: diesel_migrations::EmbeddedMigrations = diesel_migrations::embed_migrations!(); /// Open a SQLite Connection for `sqlite_path` and apply sqlite migrations /// does not migrate archive formats, use [migrate_and_connect] instead pub fn sqlite_connect<P: AsRef<Path>>(sqlite_path: P) -> Result<SqliteConnection, crate::Error> { // having to convert the path to "str" because diesel (and underlying sqlite library) only accept strings return match sqlite_path.as_ref().to_str() { Some(path) => { let mut connection = SqliteConnection::establish(path)?; apply_sqlite_migrations(&mut connection)?; return Ok(connection); }, None => Err(crate::Error::other(format!("SQLite only accepts UTF-8 Paths, and given path failed to be converted to a string without being lossy, Path (converted lossy): \"{}\"", sqlite_path.as_ref().to_string_lossy()))), }; } /// Apply all (up) migrations to a SQLite Database #[inline] fn apply_sqlite_migrations(connection: &mut SqliteConnection) -> Result<(), crate::Error> { let applied = diesel_migrations::MigrationHarness::run_pending_migrations(connection, MIGRATIONS) .map_err(|err| return crate::Error::other(format!("Applying SQL Migrations Errored! Error:\n{err}")))?; debug!("Applied Migrations: {:?}", applied); return Ok(()); } /// Check if the input path is a sql database, if not migrate to sql and return new path and open connection /// Parameter `pgcb` will be used when migration will be applied /// /// This function is intended to be used over [`sqlite_connect`] in all non-test cases pub fn migrate_and_connect<S: FnMut(ImportProgress)>( archive_path: &Path, pgcb: S, ) -> Result<(Cow<Path>, SqliteConnection), crate::Error> { // early return in case the file does not actually exist if!archive_path.exists() { return Ok((archive_path.into(), sqlite_connect(archive_path)?)); } let migrate_to_path = { let mut tmp = archive_path.to_path_buf(); tmp.set_extension("db"); tmp }; // check if the "migrate-to" path already exists, and use that directly instead or error of already existing if migrate_to_path.exists() { if!migrate_to_path.is_file() { return Err(crate::Error::not_a_file( "Migrate-To Path exists but is not a file!", migrate_to_path, )); } let mut sqlite_path_reader = BufReader::new(File::open(&migrate_to_path).attach_path_err(&migrate_to_path)?); return Ok( match detect_archive_type(&mut sqlite_path_reader)? { ArchiveType::Unknown => return Err(crate::Error::other(format!("Migrate-To Path already exists, but is of unknown type! Path: \"{}\"", migrate_to_path.to_string_lossy()))), ArchiveType::JSON => return Err(crate::Error::other(format!("Migrate-To Path already exists and is a JSON archive, please rename it and retry the migration! Path: \"{}\"", migrate_to_path.to_string_lossy()))), ArchiveType::SQLite => { // this has to be done before, because the following ".into" call will move the value let connection = sqlite_connect(&migrate_to_path)?; (migrate_to_path.into(), connection) }, }, ); } let mut input_archive_reader = BufReader::new(File::open(archive_path).attach_path_err(archive_path)?); return Ok(match detect_archive_type(&mut input_archive_reader)? { ArchiveType::Unknown => { return Err(crate::Error::other( "Unknown Archive type to migrate, maybe try importing", )) }, ArchiveType::JSON => { debug!("Applying Migration from JSON to SQLite"); // handle case where the input path matches the changed path if migrate_to_path == archive_path { return Err(crate::Error::other( "Migration cannot be done: Input path matches output path (setting extension to \".db\")", )); } let mut connection = sqlite_connect(&migrate_to_path)?; import_ytdlr_json_archive(&mut input_archive_reader, &mut connection, pgcb)?; debug!("Migration from JSON to SQLite done"); (migrate_to_path.into(), connection) }, ArchiveType::SQLite => (archive_path.into(), sqlite_connect(archive_path)?), }); } #[cfg(test)] mod test { use super::*; use tempfile::{ Builder as TempBuilder, TempDir, }; fn create_connection() -> (SqliteConnection, TempDir) { let testdir = TempBuilder::new() .prefix("ytdl-test-sqlite-") .tempdir() .expect("Expected a temp dir to be created"); // chrono is used to create a different database for each thread let path = testdir.as_ref().join(format!("{}-sqlite.db", chrono::Utc::now())); // remove if already exists to have a clean test if path.exists() { std::fs::remove_file(&path).expect("Expected the file to be removed"); } return ( crate::main::sql_utils::sqlite_connect(&path).expect("Expected SQLite to successfully start"), testdir, ); } mod connect { use super::*; use std::{ ffi::OsString, os::unix::prelude::OsStringExt, }; #[test] fn test_connect() { let testdir = TempBuilder::new() .prefix("ytdl-test-sqliteConnect-") .tempdir() .expect("Expected a temp dir to be created"); let path = testdir.as_ref().join(format!("{}-sqlite.db", chrono::Utc::now())); std::fs::create_dir_all(path.parent().expect("Expected the file to have a parent")) .expect("expected the directory to be created"); let connection = sqlite_connect(path); assert!(connection.is_ok()); } // it seems like non-utf8 paths are a pain to create os-independently, so it is just linux where the following works #[cfg(target_os = "linux")] #[test] fn test_connect_notutf8() { let path = OsString::from_vec(vec![255]); let err = sqlite_connect(path); assert!(err.is_err()); // Not using "unwrap_err", because of https://github.com/diesel-rs/diesel/discussions/3124 let err = match err { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; // the following is only a "contains", because of the abitrary path that could be after it assert!(err.to_string().contains("SQLite only accepts UTF-8 Paths, and given path failed to be converted to a string without being lossy, Path (converted lossy):")); } } mod apply_sqlite_migrations { use super::*; #[test] fn test_all_migrations_applied() { let (mut connection, _tempdir) = create_connection(); let res = diesel_migrations::MigrationHarness::has_pending_migration(&mut connection, MIGRATIONS); assert!(res.is_ok()); let res = res.unwrap(); assert!(!res); } } mod migrate_and_connect { use std::{ ffi::OsStr, io::{ BufWriter, Write, }, ops::Deref, path::PathBuf, sync::RwLock, }; use super::*; fn gen_archive_path<P: AsRef<OsStr>>(extension: P) -> (PathBuf, TempDir) { let testdir = TempBuilder::new() .prefix("ytdl-test-sqliteMigrate-") .tempdir() .expect("Expected a temp dir to be created"); let mut path = testdir.as_ref().join(format!("{}-gen_archive", uuid::Uuid::new_v4())); path.set_extension(extension); println!("generated: {}", path.to_string_lossy()); // clear generated path clear_path(&path); { let mut migrate_to_path = path.clone(); migrate_to_path.set_extension("db"); // clear migrate_to_path clear_path(migrate_to_path); } return (path, testdir); } fn clear_path<P: AsRef<Path>>(path: P) { let path = path.as_ref(); if path.exists() { std::fs::remove_file(path).expect("Expected file to be removed"); } } fn create_dir_all_parent<P: AsRef<Path>>(path: P) { let path = path.as_ref(); std::fs::create_dir_all(path.parent().expect("Expected the file to have a parent")) .expect("expected the directory to be created"); } fn write_file_with_content<S: AsRef<str>, P: AsRef<OsStr>>(input: S, extension: P) -> (PathBuf, TempDir) { let (path, tempdir) = gen_archive_path(extension); create_dir_all_parent(&path); let mut file = BufWriter::new(std::fs::File::create(&path).expect("Expected file to be created")); file.write_all(input.as_ref().as_bytes()) .expect("Expected successfull file write"); return (path, tempdir); } /// Test utility function for easy callbacks fn callback_counter(c: &RwLock<Vec<ImportProgress>>) -> impl FnMut(ImportProgress) + '_ { return |imp| c.write().expect("write failed").push(imp); } #[test] fn test_input_unknown_archive() { let string0 = " youtube ____________ youtube ------------ youtube aaaaaaaaaaaa soundcloud 0000000000 "; let (path, _tempdir) = write_file_with_content(string0, "unknown_ytdl"); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter));
assert!(res.is_err()); let res = match res { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; assert!(res .to_string() .contains("Unknown Archive type to migrate, maybe try importing")); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_input_sqlite_archive() { let (path, _tempdir) = gen_archive_path("db_sqlite"); create_dir_all_parent(&path); { // create database file assert!(sqlite_connect(&path).is_ok()); } let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(&path, res.0.as_ref()); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_input_json_archive() { let string0 = r#" { "version": "0.1.0", "videos": [ { "id": "____________", "provider": "youtube", "dlFinished": true, "editAsked": true, "fileName": "someFile1.mp3" }, { "id": "------------", "provider": "youtube", "dlFinished": false, "editAsked": true, "fileName": "someFile2.mp3" }, { "id": "aaaaaaaaaaaa", "provider": "youtube", "dlFinished": true, "editAsked": false, "fileName": "someFile3.mp3" }, { "id": "0000000000", "provider": "soundcloud", "dlFinished": true, "editAsked": true, "fileName": "someFile4.mp3" } ] } "#; let (path, _tempdir) = write_file_with_content(string0, "json_json"); let expected_path = { let mut tmp = path.clone(); tmp.set_extension("db"); tmp }; clear_path(&expected_path); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(&expected_path, res.0.as_ref()); assert_eq!( &vec![ ImportProgress::Starting, ImportProgress::SizeHint(4), // Size Hint of 4, because of a intermediate array length // index start at 0, thanks to json array index ImportProgress::Increase(1, 0), ImportProgress::Increase(1, 1), ImportProgress::Increase(1, 2), ImportProgress::Increase(1, 3), ImportProgress::Finished(4) ], pgcounter.read().expect("failed to read").deref() ); } #[test] fn test_to_existing_json() { let string0 = r#" { } "#; let (path, _tempdir) = write_file_with_content(string0, "db"); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_err()); let res = match res { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; assert_eq!( res.to_string(), format!("Other: Migrate-To Path already exists and is a JSON archive, please rename it and retry the migration! Path: \"{}\"", path.to_string_lossy()) ); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_to_existing_unknown() { let string0 = " youtube ____________ youtube ------------ youtube aaaaaaaaaaaa soundcloud 0000000000 "; let (path, _tempdir) = write_file_with_content(string0, "db"); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_err()); let res = match res { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; assert_eq!( res.to_string(), format!( "Other: Migrate-To Path already exists, but is of unknown type! Path: \"{}\"", path.to_string_lossy() ) ); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_to_existing_sqlite() { let (path, _tempdir) = gen_archive_path("db"); create_dir_all_parent(&path); { // create database file assert!(sqlite_connect(&path).is_ok()); } let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(&path, res.0.as_ref()); assert_eq!(0, pgcounter.read().expect("read failed").len()); } } }
random_line_split
sql_utils.rs
//! Module for SQL Utility functions use diesel::prelude::*; use std::{ borrow::Cow, fs::File, io::BufReader, path::Path, }; use crate::error::IOErrorToError; use super::archive::import::{ detect_archive_type, import_ytdlr_json_archive, ArchiveType, ImportProgress, }; /// All migrations from "libytdlr/migrations" embedded into the binary pub const MIGRATIONS: diesel_migrations::EmbeddedMigrations = diesel_migrations::embed_migrations!(); /// Open a SQLite Connection for `sqlite_path` and apply sqlite migrations /// does not migrate archive formats, use [migrate_and_connect] instead pub fn sqlite_connect<P: AsRef<Path>>(sqlite_path: P) -> Result<SqliteConnection, crate::Error> { // having to convert the path to "str" because diesel (and underlying sqlite library) only accept strings return match sqlite_path.as_ref().to_str() { Some(path) => { let mut connection = SqliteConnection::establish(path)?; apply_sqlite_migrations(&mut connection)?; return Ok(connection); }, None => Err(crate::Error::other(format!("SQLite only accepts UTF-8 Paths, and given path failed to be converted to a string without being lossy, Path (converted lossy): \"{}\"", sqlite_path.as_ref().to_string_lossy()))), }; } /// Apply all (up) migrations to a SQLite Database #[inline] fn apply_sqlite_migrations(connection: &mut SqliteConnection) -> Result<(), crate::Error> { let applied = diesel_migrations::MigrationHarness::run_pending_migrations(connection, MIGRATIONS) .map_err(|err| return crate::Error::other(format!("Applying SQL Migrations Errored! Error:\n{err}")))?; debug!("Applied Migrations: {:?}", applied); return Ok(()); } /// Check if the input path is a sql database, if not migrate to sql and return new path and open connection /// Parameter `pgcb` will be used when migration will be applied /// /// This function is intended to be used over [`sqlite_connect`] in all non-test cases pub fn migrate_and_connect<S: FnMut(ImportProgress)>( archive_path: &Path, pgcb: S, ) -> Result<(Cow<Path>, SqliteConnection), crate::Error> { // early return in case the file does not actually exist if!archive_path.exists() { return Ok((archive_path.into(), sqlite_connect(archive_path)?)); } let migrate_to_path = { let mut tmp = archive_path.to_path_buf(); tmp.set_extension("db"); tmp }; // check if the "migrate-to" path already exists, and use that directly instead or error of already existing if migrate_to_path.exists() { if!migrate_to_path.is_file() { return Err(crate::Error::not_a_file( "Migrate-To Path exists but is not a file!", migrate_to_path, )); } let mut sqlite_path_reader = BufReader::new(File::open(&migrate_to_path).attach_path_err(&migrate_to_path)?); return Ok( match detect_archive_type(&mut sqlite_path_reader)? { ArchiveType::Unknown => return Err(crate::Error::other(format!("Migrate-To Path already exists, but is of unknown type! Path: \"{}\"", migrate_to_path.to_string_lossy()))), ArchiveType::JSON => return Err(crate::Error::other(format!("Migrate-To Path already exists and is a JSON archive, please rename it and retry the migration! Path: \"{}\"", migrate_to_path.to_string_lossy()))), ArchiveType::SQLite => { // this has to be done before, because the following ".into" call will move the value let connection = sqlite_connect(&migrate_to_path)?; (migrate_to_path.into(), connection) }, }, ); } let mut input_archive_reader = BufReader::new(File::open(archive_path).attach_path_err(archive_path)?); return Ok(match detect_archive_type(&mut input_archive_reader)? { ArchiveType::Unknown =>
, ArchiveType::JSON => { debug!("Applying Migration from JSON to SQLite"); // handle case where the input path matches the changed path if migrate_to_path == archive_path { return Err(crate::Error::other( "Migration cannot be done: Input path matches output path (setting extension to \".db\")", )); } let mut connection = sqlite_connect(&migrate_to_path)?; import_ytdlr_json_archive(&mut input_archive_reader, &mut connection, pgcb)?; debug!("Migration from JSON to SQLite done"); (migrate_to_path.into(), connection) }, ArchiveType::SQLite => (archive_path.into(), sqlite_connect(archive_path)?), }); } #[cfg(test)] mod test { use super::*; use tempfile::{ Builder as TempBuilder, TempDir, }; fn create_connection() -> (SqliteConnection, TempDir) { let testdir = TempBuilder::new() .prefix("ytdl-test-sqlite-") .tempdir() .expect("Expected a temp dir to be created"); // chrono is used to create a different database for each thread let path = testdir.as_ref().join(format!("{}-sqlite.db", chrono::Utc::now())); // remove if already exists to have a clean test if path.exists() { std::fs::remove_file(&path).expect("Expected the file to be removed"); } return ( crate::main::sql_utils::sqlite_connect(&path).expect("Expected SQLite to successfully start"), testdir, ); } mod connect { use super::*; use std::{ ffi::OsString, os::unix::prelude::OsStringExt, }; #[test] fn test_connect() { let testdir = TempBuilder::new() .prefix("ytdl-test-sqliteConnect-") .tempdir() .expect("Expected a temp dir to be created"); let path = testdir.as_ref().join(format!("{}-sqlite.db", chrono::Utc::now())); std::fs::create_dir_all(path.parent().expect("Expected the file to have a parent")) .expect("expected the directory to be created"); let connection = sqlite_connect(path); assert!(connection.is_ok()); } // it seems like non-utf8 paths are a pain to create os-independently, so it is just linux where the following works #[cfg(target_os = "linux")] #[test] fn test_connect_notutf8() { let path = OsString::from_vec(vec![255]); let err = sqlite_connect(path); assert!(err.is_err()); // Not using "unwrap_err", because of https://github.com/diesel-rs/diesel/discussions/3124 let err = match err { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; // the following is only a "contains", because of the abitrary path that could be after it assert!(err.to_string().contains("SQLite only accepts UTF-8 Paths, and given path failed to be converted to a string without being lossy, Path (converted lossy):")); } } mod apply_sqlite_migrations { use super::*; #[test] fn test_all_migrations_applied() { let (mut connection, _tempdir) = create_connection(); let res = diesel_migrations::MigrationHarness::has_pending_migration(&mut connection, MIGRATIONS); assert!(res.is_ok()); let res = res.unwrap(); assert!(!res); } } mod migrate_and_connect { use std::{ ffi::OsStr, io::{ BufWriter, Write, }, ops::Deref, path::PathBuf, sync::RwLock, }; use super::*; fn gen_archive_path<P: AsRef<OsStr>>(extension: P) -> (PathBuf, TempDir) { let testdir = TempBuilder::new() .prefix("ytdl-test-sqliteMigrate-") .tempdir() .expect("Expected a temp dir to be created"); let mut path = testdir.as_ref().join(format!("{}-gen_archive", uuid::Uuid::new_v4())); path.set_extension(extension); println!("generated: {}", path.to_string_lossy()); // clear generated path clear_path(&path); { let mut migrate_to_path = path.clone(); migrate_to_path.set_extension("db"); // clear migrate_to_path clear_path(migrate_to_path); } return (path, testdir); } fn clear_path<P: AsRef<Path>>(path: P) { let path = path.as_ref(); if path.exists() { std::fs::remove_file(path).expect("Expected file to be removed"); } } fn create_dir_all_parent<P: AsRef<Path>>(path: P) { let path = path.as_ref(); std::fs::create_dir_all(path.parent().expect("Expected the file to have a parent")) .expect("expected the directory to be created"); } fn write_file_with_content<S: AsRef<str>, P: AsRef<OsStr>>(input: S, extension: P) -> (PathBuf, TempDir) { let (path, tempdir) = gen_archive_path(extension); create_dir_all_parent(&path); let mut file = BufWriter::new(std::fs::File::create(&path).expect("Expected file to be created")); file.write_all(input.as_ref().as_bytes()) .expect("Expected successfull file write"); return (path, tempdir); } /// Test utility function for easy callbacks fn callback_counter(c: &RwLock<Vec<ImportProgress>>) -> impl FnMut(ImportProgress) + '_ { return |imp| c.write().expect("write failed").push(imp); } #[test] fn test_input_unknown_archive() { let string0 = " youtube ____________ youtube ------------ youtube aaaaaaaaaaaa soundcloud 0000000000 "; let (path, _tempdir) = write_file_with_content(string0, "unknown_ytdl"); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_err()); let res = match res { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; assert!(res .to_string() .contains("Unknown Archive type to migrate, maybe try importing")); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_input_sqlite_archive() { let (path, _tempdir) = gen_archive_path("db_sqlite"); create_dir_all_parent(&path); { // create database file assert!(sqlite_connect(&path).is_ok()); } let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(&path, res.0.as_ref()); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_input_json_archive() { let string0 = r#" { "version": "0.1.0", "videos": [ { "id": "____________", "provider": "youtube", "dlFinished": true, "editAsked": true, "fileName": "someFile1.mp3" }, { "id": "------------", "provider": "youtube", "dlFinished": false, "editAsked": true, "fileName": "someFile2.mp3" }, { "id": "aaaaaaaaaaaa", "provider": "youtube", "dlFinished": true, "editAsked": false, "fileName": "someFile3.mp3" }, { "id": "0000000000", "provider": "soundcloud", "dlFinished": true, "editAsked": true, "fileName": "someFile4.mp3" } ] } "#; let (path, _tempdir) = write_file_with_content(string0, "json_json"); let expected_path = { let mut tmp = path.clone(); tmp.set_extension("db"); tmp }; clear_path(&expected_path); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(&expected_path, res.0.as_ref()); assert_eq!( &vec![ ImportProgress::Starting, ImportProgress::SizeHint(4), // Size Hint of 4, because of a intermediate array length // index start at 0, thanks to json array index ImportProgress::Increase(1, 0), ImportProgress::Increase(1, 1), ImportProgress::Increase(1, 2), ImportProgress::Increase(1, 3), ImportProgress::Finished(4) ], pgcounter.read().expect("failed to read").deref() ); } #[test] fn test_to_existing_json() { let string0 = r#" { } "#; let (path, _tempdir) = write_file_with_content(string0, "db"); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_err()); let res = match res { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; assert_eq!( res.to_string(), format!("Other: Migrate-To Path already exists and is a JSON archive, please rename it and retry the migration! Path: \"{}\"", path.to_string_lossy()) ); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_to_existing_unknown() { let string0 = " youtube ____________ youtube ------------ youtube aaaaaaaaaaaa soundcloud 0000000000 "; let (path, _tempdir) = write_file_with_content(string0, "db"); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_err()); let res = match res { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; assert_eq!( res.to_string(), format!( "Other: Migrate-To Path already exists, but is of unknown type! Path: \"{}\"", path.to_string_lossy() ) ); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_to_existing_sqlite() { let (path, _tempdir) = gen_archive_path("db"); create_dir_all_parent(&path); { // create database file assert!(sqlite_connect(&path).is_ok()); } let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(&path, res.0.as_ref()); assert_eq!(0, pgcounter.read().expect("read failed").len()); } } }
{ return Err(crate::Error::other( "Unknown Archive type to migrate, maybe try importing", )) }
conditional_block
sql_utils.rs
//! Module for SQL Utility functions use diesel::prelude::*; use std::{ borrow::Cow, fs::File, io::BufReader, path::Path, }; use crate::error::IOErrorToError; use super::archive::import::{ detect_archive_type, import_ytdlr_json_archive, ArchiveType, ImportProgress, }; /// All migrations from "libytdlr/migrations" embedded into the binary pub const MIGRATIONS: diesel_migrations::EmbeddedMigrations = diesel_migrations::embed_migrations!(); /// Open a SQLite Connection for `sqlite_path` and apply sqlite migrations /// does not migrate archive formats, use [migrate_and_connect] instead pub fn sqlite_connect<P: AsRef<Path>>(sqlite_path: P) -> Result<SqliteConnection, crate::Error> { // having to convert the path to "str" because diesel (and underlying sqlite library) only accept strings return match sqlite_path.as_ref().to_str() { Some(path) => { let mut connection = SqliteConnection::establish(path)?; apply_sqlite_migrations(&mut connection)?; return Ok(connection); }, None => Err(crate::Error::other(format!("SQLite only accepts UTF-8 Paths, and given path failed to be converted to a string without being lossy, Path (converted lossy): \"{}\"", sqlite_path.as_ref().to_string_lossy()))), }; } /// Apply all (up) migrations to a SQLite Database #[inline] fn apply_sqlite_migrations(connection: &mut SqliteConnection) -> Result<(), crate::Error> { let applied = diesel_migrations::MigrationHarness::run_pending_migrations(connection, MIGRATIONS) .map_err(|err| return crate::Error::other(format!("Applying SQL Migrations Errored! Error:\n{err}")))?; debug!("Applied Migrations: {:?}", applied); return Ok(()); } /// Check if the input path is a sql database, if not migrate to sql and return new path and open connection /// Parameter `pgcb` will be used when migration will be applied /// /// This function is intended to be used over [`sqlite_connect`] in all non-test cases pub fn migrate_and_connect<S: FnMut(ImportProgress)>( archive_path: &Path, pgcb: S, ) -> Result<(Cow<Path>, SqliteConnection), crate::Error> { // early return in case the file does not actually exist if!archive_path.exists() { return Ok((archive_path.into(), sqlite_connect(archive_path)?)); } let migrate_to_path = { let mut tmp = archive_path.to_path_buf(); tmp.set_extension("db"); tmp }; // check if the "migrate-to" path already exists, and use that directly instead or error of already existing if migrate_to_path.exists() { if!migrate_to_path.is_file() { return Err(crate::Error::not_a_file( "Migrate-To Path exists but is not a file!", migrate_to_path, )); } let mut sqlite_path_reader = BufReader::new(File::open(&migrate_to_path).attach_path_err(&migrate_to_path)?); return Ok( match detect_archive_type(&mut sqlite_path_reader)? { ArchiveType::Unknown => return Err(crate::Error::other(format!("Migrate-To Path already exists, but is of unknown type! Path: \"{}\"", migrate_to_path.to_string_lossy()))), ArchiveType::JSON => return Err(crate::Error::other(format!("Migrate-To Path already exists and is a JSON archive, please rename it and retry the migration! Path: \"{}\"", migrate_to_path.to_string_lossy()))), ArchiveType::SQLite => { // this has to be done before, because the following ".into" call will move the value let connection = sqlite_connect(&migrate_to_path)?; (migrate_to_path.into(), connection) }, }, ); } let mut input_archive_reader = BufReader::new(File::open(archive_path).attach_path_err(archive_path)?); return Ok(match detect_archive_type(&mut input_archive_reader)? { ArchiveType::Unknown => { return Err(crate::Error::other( "Unknown Archive type to migrate, maybe try importing", )) }, ArchiveType::JSON => { debug!("Applying Migration from JSON to SQLite"); // handle case where the input path matches the changed path if migrate_to_path == archive_path { return Err(crate::Error::other( "Migration cannot be done: Input path matches output path (setting extension to \".db\")", )); } let mut connection = sqlite_connect(&migrate_to_path)?; import_ytdlr_json_archive(&mut input_archive_reader, &mut connection, pgcb)?; debug!("Migration from JSON to SQLite done"); (migrate_to_path.into(), connection) }, ArchiveType::SQLite => (archive_path.into(), sqlite_connect(archive_path)?), }); } #[cfg(test)] mod test { use super::*; use tempfile::{ Builder as TempBuilder, TempDir, }; fn create_connection() -> (SqliteConnection, TempDir) { let testdir = TempBuilder::new() .prefix("ytdl-test-sqlite-") .tempdir() .expect("Expected a temp dir to be created"); // chrono is used to create a different database for each thread let path = testdir.as_ref().join(format!("{}-sqlite.db", chrono::Utc::now())); // remove if already exists to have a clean test if path.exists() { std::fs::remove_file(&path).expect("Expected the file to be removed"); } return ( crate::main::sql_utils::sqlite_connect(&path).expect("Expected SQLite to successfully start"), testdir, ); } mod connect { use super::*; use std::{ ffi::OsString, os::unix::prelude::OsStringExt, }; #[test] fn test_connect() { let testdir = TempBuilder::new() .prefix("ytdl-test-sqliteConnect-") .tempdir() .expect("Expected a temp dir to be created"); let path = testdir.as_ref().join(format!("{}-sqlite.db", chrono::Utc::now())); std::fs::create_dir_all(path.parent().expect("Expected the file to have a parent")) .expect("expected the directory to be created"); let connection = sqlite_connect(path); assert!(connection.is_ok()); } // it seems like non-utf8 paths are a pain to create os-independently, so it is just linux where the following works #[cfg(target_os = "linux")] #[test] fn test_connect_notutf8() { let path = OsString::from_vec(vec![255]); let err = sqlite_connect(path); assert!(err.is_err()); // Not using "unwrap_err", because of https://github.com/diesel-rs/diesel/discussions/3124 let err = match err { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; // the following is only a "contains", because of the abitrary path that could be after it assert!(err.to_string().contains("SQLite only accepts UTF-8 Paths, and given path failed to be converted to a string without being lossy, Path (converted lossy):")); } } mod apply_sqlite_migrations { use super::*; #[test] fn test_all_migrations_applied() { let (mut connection, _tempdir) = create_connection(); let res = diesel_migrations::MigrationHarness::has_pending_migration(&mut connection, MIGRATIONS); assert!(res.is_ok()); let res = res.unwrap(); assert!(!res); } } mod migrate_and_connect { use std::{ ffi::OsStr, io::{ BufWriter, Write, }, ops::Deref, path::PathBuf, sync::RwLock, }; use super::*; fn gen_archive_path<P: AsRef<OsStr>>(extension: P) -> (PathBuf, TempDir) { let testdir = TempBuilder::new() .prefix("ytdl-test-sqliteMigrate-") .tempdir() .expect("Expected a temp dir to be created"); let mut path = testdir.as_ref().join(format!("{}-gen_archive", uuid::Uuid::new_v4())); path.set_extension(extension); println!("generated: {}", path.to_string_lossy()); // clear generated path clear_path(&path); { let mut migrate_to_path = path.clone(); migrate_to_path.set_extension("db"); // clear migrate_to_path clear_path(migrate_to_path); } return (path, testdir); } fn clear_path<P: AsRef<Path>>(path: P) { let path = path.as_ref(); if path.exists() { std::fs::remove_file(path).expect("Expected file to be removed"); } } fn create_dir_all_parent<P: AsRef<Path>>(path: P) { let path = path.as_ref(); std::fs::create_dir_all(path.parent().expect("Expected the file to have a parent")) .expect("expected the directory to be created"); } fn write_file_with_content<S: AsRef<str>, P: AsRef<OsStr>>(input: S, extension: P) -> (PathBuf, TempDir) { let (path, tempdir) = gen_archive_path(extension); create_dir_all_parent(&path); let mut file = BufWriter::new(std::fs::File::create(&path).expect("Expected file to be created")); file.write_all(input.as_ref().as_bytes()) .expect("Expected successfull file write"); return (path, tempdir); } /// Test utility function for easy callbacks fn
(c: &RwLock<Vec<ImportProgress>>) -> impl FnMut(ImportProgress) + '_ { return |imp| c.write().expect("write failed").push(imp); } #[test] fn test_input_unknown_archive() { let string0 = " youtube ____________ youtube ------------ youtube aaaaaaaaaaaa soundcloud 0000000000 "; let (path, _tempdir) = write_file_with_content(string0, "unknown_ytdl"); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_err()); let res = match res { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; assert!(res .to_string() .contains("Unknown Archive type to migrate, maybe try importing")); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_input_sqlite_archive() { let (path, _tempdir) = gen_archive_path("db_sqlite"); create_dir_all_parent(&path); { // create database file assert!(sqlite_connect(&path).is_ok()); } let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(&path, res.0.as_ref()); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_input_json_archive() { let string0 = r#" { "version": "0.1.0", "videos": [ { "id": "____________", "provider": "youtube", "dlFinished": true, "editAsked": true, "fileName": "someFile1.mp3" }, { "id": "------------", "provider": "youtube", "dlFinished": false, "editAsked": true, "fileName": "someFile2.mp3" }, { "id": "aaaaaaaaaaaa", "provider": "youtube", "dlFinished": true, "editAsked": false, "fileName": "someFile3.mp3" }, { "id": "0000000000", "provider": "soundcloud", "dlFinished": true, "editAsked": true, "fileName": "someFile4.mp3" } ] } "#; let (path, _tempdir) = write_file_with_content(string0, "json_json"); let expected_path = { let mut tmp = path.clone(); tmp.set_extension("db"); tmp }; clear_path(&expected_path); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(&expected_path, res.0.as_ref()); assert_eq!( &vec![ ImportProgress::Starting, ImportProgress::SizeHint(4), // Size Hint of 4, because of a intermediate array length // index start at 0, thanks to json array index ImportProgress::Increase(1, 0), ImportProgress::Increase(1, 1), ImportProgress::Increase(1, 2), ImportProgress::Increase(1, 3), ImportProgress::Finished(4) ], pgcounter.read().expect("failed to read").deref() ); } #[test] fn test_to_existing_json() { let string0 = r#" { } "#; let (path, _tempdir) = write_file_with_content(string0, "db"); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_err()); let res = match res { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; assert_eq!( res.to_string(), format!("Other: Migrate-To Path already exists and is a JSON archive, please rename it and retry the migration! Path: \"{}\"", path.to_string_lossy()) ); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_to_existing_unknown() { let string0 = " youtube ____________ youtube ------------ youtube aaaaaaaaaaaa soundcloud 0000000000 "; let (path, _tempdir) = write_file_with_content(string0, "db"); let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_err()); let res = match res { Ok(_) => panic!("Expected a Error value"), Err(err) => err, }; assert_eq!( res.to_string(), format!( "Other: Migrate-To Path already exists, but is of unknown type! Path: \"{}\"", path.to_string_lossy() ) ); assert_eq!(0, pgcounter.read().expect("read failed").len()); } #[test] fn test_to_existing_sqlite() { let (path, _tempdir) = gen_archive_path("db"); create_dir_all_parent(&path); { // create database file assert!(sqlite_connect(&path).is_ok()); } let pgcounter = RwLock::new(Vec::<ImportProgress>::new()); let res = migrate_and_connect(&path, callback_counter(&pgcounter)); assert!(res.is_ok()); let res = res.unwrap(); assert_eq!(&path, res.0.as_ref()); assert_eq!(0, pgcounter.read().expect("read failed").len()); } } }
callback_counter
identifier_name
scheduler.rs
scheduler.stop_waiting(process) } } #[derive(Copy, Clone)] struct StackPointer(*mut u64); #[export_name = "__lumen_builtin_spawn"] pub extern "C" fn builtin_spawn(to: Term, msg: Term) -> Term
#[export_name = "__lumen_builtin_yield"] pub unsafe extern "C" fn process_yield() -> bool { let s = <Scheduler as rt_core::Scheduler>::current(); // NOTE: We always set root=false here because the root // process never invokes this function s.process_yield(/* root= */ false) } #[naked] #[inline(never)] #[cfg(all(unix, target_arch = "x86_64"))] pub unsafe extern "C" fn process_return_continuation() { let f: fn() -> () = process_return; asm!(" callq *$0 " : : "r"(f) : : "volatile", "alignstack" ); } #[inline(never)] fn process_return() { let s = <Scheduler as rt_core::Scheduler>::current(); do_process_return(&s); } #[export_name = "__lumen_builtin_malloc"] pub unsafe extern "C" fn builtin_malloc(kind: u32, arity: usize) -> *mut u8 { use core::convert::TryInto; use liblumen_alloc::erts::term::closure::ClosureLayout; use liblumen_alloc::erts::term::prelude::*; use liblumen_core::alloc::Layout; use liblumen_term::TermKind; let kind_result: Result<TermKind, _> = kind.try_into(); match kind_result { Ok(TermKind::Closure) => { let s = <Scheduler as rt_core::Scheduler>::current(); let cl = ClosureLayout::for_env_len(arity); let result = s.current.alloc_nofrag_layout(cl.layout().clone()); if let Ok(nn) = result { return nn.as_ptr() as *mut u8; } } Ok(TermKind::Tuple) => { let s = <Scheduler as rt_core::Scheduler>::current(); let layout = Tuple::layout_for_len(arity); let result = s.current.alloc_nofrag_layout(layout); if let Ok(nn) = result { return nn.as_ptr() as *mut u8; } } Ok(TermKind::Cons) => { let s = <Scheduler as rt_core::Scheduler>::current(); let layout = Layout::new::<Cons>(); let result = s.current.alloc_nofrag_layout(layout); if let Ok(nn) = result { return nn.as_ptr() as *mut u8; } } Ok(tk) => { unimplemented!("unhandled use of malloc for {:?}", tk); } Err(_) => { panic!("invalid term kind: {}", kind); } } ptr::null_mut() } /// Called when the current process has finished executing, and has /// returned all the way to its entry function. This marks the process /// as exiting (if it wasn't already), and then yields to the scheduler fn do_process_return(scheduler: &Scheduler) -> bool { use liblumen_alloc::erts::term::prelude::*; if scheduler.current.pid()!= scheduler.root.pid() { scheduler .current .exit(atom!("normal"), anyhow!("Out of code").into()); // NOTE: We always set root=false here, even though this can // be called from the root process, since returning from the // root process exits the scheduler loop anyway, so no stack // swapping can occur scheduler.process_yield(/* root= */ false) } else { true } } pub struct Scheduler { id: id::ID, hierarchy: RwLock<Hierarchy>, // References are always 64-bits even on 32-bit platforms reference_count: AtomicU64, run_queues: RwLock<run_queue::Queues>, // Non-monotonic unique integers are scoped to the scheduler ID and then use this per-scheduler // `u64`. unique_integer: AtomicU64, root: Arc<Process>, init: ThreadLocalCell<Arc<Process>>, current: ThreadLocalCell<Arc<Process>>, } // This guarantee holds as long as `init` and `current` are only // ever accessed by the scheduler when scheduling unsafe impl Sync for Scheduler {} impl rt_core::Scheduler for Scheduler { #[inline] fn current() -> Arc<Self> { SCHEDULER.with(|s| s.clone()) } fn id(&self) -> id::ID { self.id } fn hierarchy(&self) -> &RwLock<Hierarchy> { &self.hierarchy } /// Gets the next available reference number fn next_reference_number(&self) -> ReferenceNumber { self.reference_count.fetch_add(1, Ordering::SeqCst) } } impl Scheduler { /// Creates a new scheduler with the default configuration fn new() -> anyhow::Result<Scheduler> { let id = id::next(); // The root process is how the scheduler gets time for itself, // and is also how we know when to shutdown the scheduler due // to termination of all its processes let root = Arc::new(Process::new( Priority::Normal, None, Arc::new(ModuleFunctionArity { module: Atom::from_str("root"), function: Atom::from_str("init"), arity: 0, }), ptr::null_mut(), 0, )); let run_queues = Default::default(); Scheduler::spawn_root(root.clone(), id, &run_queues)?; // Placeholder let init = Arc::new(Process::new( Priority::Normal, None, Arc::new(ModuleFunctionArity { module: Atom::from_str("undef"), function: Atom::from_str("undef"), arity: 0, }), ptr::null_mut(), 0, )); // The scheduler starts with the root process running let current = ThreadLocalCell::new(root.clone()); Ok(Self { id, run_queues, root, init: ThreadLocalCell::new(init), current, hierarchy: Default::default(), reference_count: AtomicU64::new(0), unique_integer: AtomicU64::new(0), }) } // Spawns the init process, should be called immediately after // scheduler creation pub fn init(&self) -> anyhow::Result<()> { // The init process is the actual "root" Erlang process, it acts // as the entry point for the program from Erlang's perspective, // and is responsible for starting/stopping the system in Erlang. // // If this process exits, the scheduler terminates let (init_heap, init_heap_size) = process::alloc::default_heap()?; let init = Arc::new(Process::new_with_stack( Priority::Normal, None, Arc::new(ModuleFunctionArity { module: Atom::from_str("init"), function: Atom::from_str("start"), arity: 0, }), init_heap, init_heap_size, )?); let clone = init.clone(); unsafe { self.init.set(init); } Scheduler::spawn_internal(clone, self.id, &self.run_queues); Ok(()) } /// Gets the scheduler registered to this thread /// /// If no scheduler has been created for this thread, one is created fn registered() -> Arc<Self> { let mut schedulers = SCHEDULERS.lock(); let s = Arc::new(Self::new().unwrap()); if let Some(_) = schedulers.insert(s.id, Arc::downgrade(&s)) { panic!("Scheduler already registered with ID ({:?}", s.id); } s } /// Gets a scheduler by its ID pub fn from_id(id: &id::ID) -> Option<Arc<Self>> { Self::current_from_id(id).or_else(|| SCHEDULERS.lock().get(id).and_then(|s| s.upgrade())) } /// Returns the current thread's scheduler if it matches the given ID fn current_from_id(id: &id::ID) -> Option<Arc<Self>> { SCHEDULER.with(|s| if &s.id == id { Some(s.clone()) } else { None }) } /// Gets the next available unique integer pub fn next_unique_integer(&self) -> u64 { self.unique_integer.fetch_add(1, Ordering::SeqCst) } /// Returns the length of the current scheduler's run queue pub fn run_queues_len(&self) -> usize { self.run_queues.read().len() } /// Returns the length of a specific run queue in the current scheduler #[cfg(test)] pub fn run_queue_len(&self, priority: Priority) -> usize { self.run_queues.read().run_queue_len(priority) } /// Returns true if the given process is in the current scheduler's run queue #[cfg(test)] pub fn is_run_queued(&self, value: &Arc<Process>) -> bool { self.run_queues.read().contains(value) } pub fn stop_waiting(&self, process: &Process) { self.run_queues.write().stop_waiting(process); } // TODO: Request application master termination for controlled shutdown // This request will always come from the thread which spawned the application // master, i.e. the "main" scheduler thread // // Returns `Ok(())` if shutdown was successful, `Err(anyhow::Error)` if something // went wrong during shutdown, and it was not able to complete normally pub fn shutdown(&self) -> anyhow::Result<()> { // For now just Ok(()), but this needs to be addressed when proper // system startup/shutdown is in place CURRENT_PROCESS.with(|cp| cp.replace(None)); Ok(()) } } impl Debug for Scheduler { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Scheduler") .field("id", &self.id) // The hiearchy slots take a lot of space, so don't print them by default .field("reference_count", &self.reference_count) .field("run_queues", &self.run_queues) .finish() } } impl Drop for Scheduler { fn drop(&mut self) { let mut locked_scheduler_by_id = SCHEDULERS.lock(); locked_scheduler_by_id .remove(&self.id) .expect("Scheduler not registered"); } } impl PartialEq for Scheduler { fn eq(&self, other: &Self) -> bool { self.id == other.id } } /// What to run pub enum Run { /// Run the process now Now(Arc<Process>), /// There was a process in the queue, but it needs to be delayed because it is `Priority::Low` /// and hadn't been delayed enough yet. Ask the `RunQueue` again for another process. /// -- https://github.com/erlang/otp/blob/fe2b1323a3866ed0a9712e9d12e1f8f84793ec47/erts/emulator/beam/erl_process.c#L9601-L9606 Delayed, /// There are no processes in the run queue, do other work None, } impl Scheduler { /// > 1. Update reduction counters /// > 2. Check timers /// > 3. If needed check balance /// > 4. If needed migrated processes and ports /// > 5. Do auxiliary scheduler work /// > 6. If needed check I/O and update time /// > 7. While needed pick a port task to execute /// > 8. Pick a process to execute /// > -- [The Scheduler Loop](https://blog.stenmans.org/theBeamBook/#_the_scheduler_loop) /// /// Returns `true` if a process was run. Returns `false` if no process could be run and the /// scheduler should sleep or work steal. #[must_use] pub fn run_once(&self) -> bool { // We always set root=true here, since calling this function is always done // from the scheduler loop, and only ever from the root context self.process_yield(/* root= */ true) } /// This function performs two roles, albeit virtually identical: /// /// First, this function is called by the scheduler to resume execution /// of a process pulled from the run queue. It does so using its "root" /// process as its context. /// /// Second, this function is called by a process when it chooses to /// yield back to the scheduler. In this case, the scheduler "root" /// process is swapped in, so the scheduler has a chance to do its /// auxilary tasks, after which the scheduler will call it again to /// swap in a new process. fn process_yield(&self, is_root: bool) -> bool { info!("entering core scheduler loop"); self.hierarchy.write().timeout(); loop { let next = { let mut rq = self.run_queues.write(); rq.dequeue() }; match next { Run::Now(process) => { info!("found process to schedule"); // Don't allow exiting processes to run again. // // Without this check, a process.exit() from outside the process during WAITING // will return to the Frame that called `process.wait()` if!process.is_exiting() { info!("swapping into process (is_root = {})", is_root); unsafe { self.swap_process(process, is_root); } } else { info!("process is exiting"); process.reduce() } info!("exiting scheduler loop"); // When reached, either the process scheduled is the root process, // or the process is exiting and we called.reduce(); either way we're // returning to the main scheduler loop to check for signals, etc. break true; } Run::Delayed => { info!("found process, but it is delayed"); continue; } Run::None if is_root => { info!("no processes remaining to schedule, exiting loop"); // If no processes are available, then the scheduler should steal, // but if it can't/doesn't, then it must terminate, as there is // nothing we can swap to. When we break here, we're returning // to the core scheduler loop, which _must_ terminate, if it does // not, we'll just end up right back here again. // // TODO: stealing break false; } Run::None => unreachable!(), } } } /// This function takes care of coordinating the scheduling of a new /// process/descheduling of the current process. /// /// - Updating process status /// - Updating reduction count based on accumulated reductions during execution /// - Resetting reduction counter for next process /// - Handling exiting processes (logging/propagating) /// /// Once that is complete, it swaps to the new process stack via `swap_stack`, /// at which point execution resumes where the newly scheduled process left /// off previously, or in its init function. unsafe fn swap_process(&self, new: Arc<Process>, is_root: bool) { // Mark the new process as Running let new_ctx = &new.registers as *const _; { let mut new_status = new.status.write(); *new_status = Status::Running; } // Replace the previous process with the new as the currently scheduled process let _ = CURRENT_PROCESS.with(|cp| cp.replace(Some(new.clone()))); let prev = self.current.replace(new.clone()); // Increment reduction count if not the root process if!is_root { let prev_reductions = reset_reduction_counter(); prev.total_reductions .fetch_add(prev_reductions as u64, Ordering::Relaxed); } // Change the previous process status to Runnable { let mut prev_status = prev.status.write(); if Status::Running == *prev_status { *prev_status = Status::Runnable } } // Save the previous process registers for the stack swap let prev_ctx = &prev.registers as *const _ as *mut _; // Then try to schedule it for the future // If the process is exiting, then handle the exit, otherwise // proceed to the stack swap if let Some(exiting) = self.run_queues.write().requeue(prev) { if let Status::Exiting(ref ex) = *exiting.status.read() { crate::process::log_exit(&exiting, ex); crate::process::propagate_exit(&exiting, ex); } else { unreachable!() } } // Execute the swap // // When swapping to the root process, we return here, which // will unwind back to the main scheduler loop in `lib.rs`. // // When swapping to a newly spawned process, we return "into" // its init function, or put another way, we jump to its // function prologue. In this situation, all of the saved registers // except %rsp and %rbp will be zeroed. %rsp is set during the call // to `spawn`, but %rbp is set to the current %rbp value to ensure // that stack traces link the new stack to the frame in which execution // started // // When swapping to a previously spawned process, we return here, // since the process called `process_yield`. From here we unwind back // to the call to `process_yield` and resume execution from the point // where it was called. swap_stack(prev_ctx, new_ctx); } /// Schedules the given process for execution pub fn schedule(&mut self, process: Arc<Process>) { debug_assert_ne!( Some(self.id), process.scheduler_id(), "process is already scheduled here!" ); process.schedule_with(self.id); let mut rq = self.run_queues.write(); rq.enqueue(process); } /// Spawns a new process using the given init function as its entry #[inline] pub fn spawn(&mut self, process: Arc<Process>) -> anyhow::Result<()> { Self::spawn_internal(process, self.id, &self.run_queues); Ok(()) } // Root process uses the original thread stack, no initialization required. // // It also starts "running", so we don't put it on the run queue fn spawn_root( process: Arc<Process>, id: id::ID, _run_queues: &RwLock<run_queue::Queues>, ) -> anyhow::Result<()> { process.schedule_with(id); *process.status.write() = Status::Running; Ok(()) } fn spawn_internal(process: Arc<Process>, id: id::ID, run_queues: &RwLock<run_queue::Queues>) { process.schedule_with(id); let mfa = &process.initial_module_function_arity; let init_fn_result = apply::find_symbol(&mfa); if init_fn_result.is_none() { panic!( "invalid mfa provided for process ({}), no such symbol found", &mfa ); } let init_fn = init_fn_result.unwrap(); #[inline(always)] unsafe fn push(sp: &mut StackPointer, value: u64) { sp.0 = sp.0.offset(-1); ptr::write(sp.0, value); } // Write the return function and init function to the end of the stack, // when execution resumes, the pointer before the stack pointer will be // used as the return address - the first time that will be the init function. // // When execution returns from the init function, then it will return via // `process_return`, which will return to the scheduler and indicate that // the process exited. The nature of the exit is indicated by error state // in the process itself unsafe { let mut sp = StackPointer(process.stack.top as *mut u64); // Function that will be called when returning from init_fn push(&mut sp, process_return_continuation as u64);
{ unimplemented!() }
identifier_body
scheduler.rs
scheduler.stop_waiting(process) } } #[derive(Copy, Clone)] struct StackPointer(*mut u64); #[export_name = "__lumen_builtin_spawn"] pub extern "C" fn builtin_spawn(to: Term, msg: Term) -> Term { unimplemented!() } #[export_name = "__lumen_builtin_yield"] pub unsafe extern "C" fn process_yield() -> bool { let s = <Scheduler as rt_core::Scheduler>::current(); // NOTE: We always set root=false here because the root // process never invokes this function s.process_yield(/* root= */ false) } #[naked] #[inline(never)] #[cfg(all(unix, target_arch = "x86_64"))] pub unsafe extern "C" fn process_return_continuation() { let f: fn() -> () = process_return; asm!(" callq *$0 " : : "r"(f) : : "volatile", "alignstack" ); } #[inline(never)] fn process_return() { let s = <Scheduler as rt_core::Scheduler>::current(); do_process_return(&s); } #[export_name = "__lumen_builtin_malloc"] pub unsafe extern "C" fn builtin_malloc(kind: u32, arity: usize) -> *mut u8 { use core::convert::TryInto; use liblumen_alloc::erts::term::closure::ClosureLayout; use liblumen_alloc::erts::term::prelude::*; use liblumen_core::alloc::Layout; use liblumen_term::TermKind; let kind_result: Result<TermKind, _> = kind.try_into(); match kind_result { Ok(TermKind::Closure) => { let s = <Scheduler as rt_core::Scheduler>::current(); let cl = ClosureLayout::for_env_len(arity); let result = s.current.alloc_nofrag_layout(cl.layout().clone()); if let Ok(nn) = result { return nn.as_ptr() as *mut u8; } } Ok(TermKind::Tuple) => { let s = <Scheduler as rt_core::Scheduler>::current(); let layout = Tuple::layout_for_len(arity); let result = s.current.alloc_nofrag_layout(layout); if let Ok(nn) = result { return nn.as_ptr() as *mut u8; } } Ok(TermKind::Cons) => { let s = <Scheduler as rt_core::Scheduler>::current(); let layout = Layout::new::<Cons>(); let result = s.current.alloc_nofrag_layout(layout); if let Ok(nn) = result { return nn.as_ptr() as *mut u8; } } Ok(tk) => { unimplemented!("unhandled use of malloc for {:?}", tk); } Err(_) => { panic!("invalid term kind: {}", kind); } } ptr::null_mut() } /// Called when the current process has finished executing, and has /// returned all the way to its entry function. This marks the process /// as exiting (if it wasn't already), and then yields to the scheduler fn do_process_return(scheduler: &Scheduler) -> bool { use liblumen_alloc::erts::term::prelude::*; if scheduler.current.pid()!= scheduler.root.pid() { scheduler .current .exit(atom!("normal"), anyhow!("Out of code").into()); // NOTE: We always set root=false here, even though this can // be called from the root process, since returning from the // root process exits the scheduler loop anyway, so no stack // swapping can occur scheduler.process_yield(/* root= */ false) } else { true } } pub struct Scheduler { id: id::ID, hierarchy: RwLock<Hierarchy>, // References are always 64-bits even on 32-bit platforms reference_count: AtomicU64, run_queues: RwLock<run_queue::Queues>, // Non-monotonic unique integers are scoped to the scheduler ID and then use this per-scheduler // `u64`. unique_integer: AtomicU64, root: Arc<Process>, init: ThreadLocalCell<Arc<Process>>, current: ThreadLocalCell<Arc<Process>>, } // This guarantee holds as long as `init` and `current` are only // ever accessed by the scheduler when scheduling unsafe impl Sync for Scheduler {} impl rt_core::Scheduler for Scheduler { #[inline] fn current() -> Arc<Self> { SCHEDULER.with(|s| s.clone()) } fn id(&self) -> id::ID { self.id } fn hierarchy(&self) -> &RwLock<Hierarchy> { &self.hierarchy } /// Gets the next available reference number fn next_reference_number(&self) -> ReferenceNumber { self.reference_count.fetch_add(1, Ordering::SeqCst) } } impl Scheduler { /// Creates a new scheduler with the default configuration fn new() -> anyhow::Result<Scheduler> { let id = id::next(); // The root process is how the scheduler gets time for itself, // and is also how we know when to shutdown the scheduler due // to termination of all its processes let root = Arc::new(Process::new( Priority::Normal, None, Arc::new(ModuleFunctionArity { module: Atom::from_str("root"), function: Atom::from_str("init"), arity: 0, }), ptr::null_mut(), 0, )); let run_queues = Default::default(); Scheduler::spawn_root(root.clone(), id, &run_queues)?; // Placeholder let init = Arc::new(Process::new( Priority::Normal, None, Arc::new(ModuleFunctionArity { module: Atom::from_str("undef"), function: Atom::from_str("undef"), arity: 0, }), ptr::null_mut(), 0, )); // The scheduler starts with the root process running let current = ThreadLocalCell::new(root.clone()); Ok(Self { id, run_queues, root, init: ThreadLocalCell::new(init), current, hierarchy: Default::default(), reference_count: AtomicU64::new(0), unique_integer: AtomicU64::new(0), }) } // Spawns the init process, should be called immediately after // scheduler creation pub fn init(&self) -> anyhow::Result<()> { // The init process is the actual "root" Erlang process, it acts // as the entry point for the program from Erlang's perspective, // and is responsible for starting/stopping the system in Erlang. // // If this process exits, the scheduler terminates let (init_heap, init_heap_size) = process::alloc::default_heap()?; let init = Arc::new(Process::new_with_stack( Priority::Normal, None, Arc::new(ModuleFunctionArity { module: Atom::from_str("init"), function: Atom::from_str("start"), arity: 0, }), init_heap, init_heap_size, )?); let clone = init.clone(); unsafe { self.init.set(init); } Scheduler::spawn_internal(clone, self.id, &self.run_queues); Ok(()) } /// Gets the scheduler registered to this thread /// /// If no scheduler has been created for this thread, one is created fn registered() -> Arc<Self> { let mut schedulers = SCHEDULERS.lock(); let s = Arc::new(Self::new().unwrap()); if let Some(_) = schedulers.insert(s.id, Arc::downgrade(&s)) { panic!("Scheduler already registered with ID ({:?}", s.id); } s } /// Gets a scheduler by its ID pub fn from_id(id: &id::ID) -> Option<Arc<Self>> { Self::current_from_id(id).or_else(|| SCHEDULERS.lock().get(id).and_then(|s| s.upgrade())) } /// Returns the current thread's scheduler if it matches the given ID fn current_from_id(id: &id::ID) -> Option<Arc<Self>> { SCHEDULER.with(|s| if &s.id == id { Some(s.clone()) } else { None }) } /// Gets the next available unique integer pub fn next_unique_integer(&self) -> u64 { self.unique_integer.fetch_add(1, Ordering::SeqCst) } /// Returns the length of the current scheduler's run queue pub fn run_queues_len(&self) -> usize { self.run_queues.read().len() } /// Returns the length of a specific run queue in the current scheduler #[cfg(test)] pub fn
(&self, priority: Priority) -> usize { self.run_queues.read().run_queue_len(priority) } /// Returns true if the given process is in the current scheduler's run queue #[cfg(test)] pub fn is_run_queued(&self, value: &Arc<Process>) -> bool { self.run_queues.read().contains(value) } pub fn stop_waiting(&self, process: &Process) { self.run_queues.write().stop_waiting(process); } // TODO: Request application master termination for controlled shutdown // This request will always come from the thread which spawned the application // master, i.e. the "main" scheduler thread // // Returns `Ok(())` if shutdown was successful, `Err(anyhow::Error)` if something // went wrong during shutdown, and it was not able to complete normally pub fn shutdown(&self) -> anyhow::Result<()> { // For now just Ok(()), but this needs to be addressed when proper // system startup/shutdown is in place CURRENT_PROCESS.with(|cp| cp.replace(None)); Ok(()) } } impl Debug for Scheduler { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Scheduler") .field("id", &self.id) // The hiearchy slots take a lot of space, so don't print them by default .field("reference_count", &self.reference_count) .field("run_queues", &self.run_queues) .finish() } } impl Drop for Scheduler { fn drop(&mut self) { let mut locked_scheduler_by_id = SCHEDULERS.lock(); locked_scheduler_by_id .remove(&self.id) .expect("Scheduler not registered"); } } impl PartialEq for Scheduler { fn eq(&self, other: &Self) -> bool { self.id == other.id } } /// What to run pub enum Run { /// Run the process now Now(Arc<Process>), /// There was a process in the queue, but it needs to be delayed because it is `Priority::Low` /// and hadn't been delayed enough yet. Ask the `RunQueue` again for another process. /// -- https://github.com/erlang/otp/blob/fe2b1323a3866ed0a9712e9d12e1f8f84793ec47/erts/emulator/beam/erl_process.c#L9601-L9606 Delayed, /// There are no processes in the run queue, do other work None, } impl Scheduler { /// > 1. Update reduction counters /// > 2. Check timers /// > 3. If needed check balance /// > 4. If needed migrated processes and ports /// > 5. Do auxiliary scheduler work /// > 6. If needed check I/O and update time /// > 7. While needed pick a port task to execute /// > 8. Pick a process to execute /// > -- [The Scheduler Loop](https://blog.stenmans.org/theBeamBook/#_the_scheduler_loop) /// /// Returns `true` if a process was run. Returns `false` if no process could be run and the /// scheduler should sleep or work steal. #[must_use] pub fn run_once(&self) -> bool { // We always set root=true here, since calling this function is always done // from the scheduler loop, and only ever from the root context self.process_yield(/* root= */ true) } /// This function performs two roles, albeit virtually identical: /// /// First, this function is called by the scheduler to resume execution /// of a process pulled from the run queue. It does so using its "root" /// process as its context. /// /// Second, this function is called by a process when it chooses to /// yield back to the scheduler. In this case, the scheduler "root" /// process is swapped in, so the scheduler has a chance to do its /// auxilary tasks, after which the scheduler will call it again to /// swap in a new process. fn process_yield(&self, is_root: bool) -> bool { info!("entering core scheduler loop"); self.hierarchy.write().timeout(); loop { let next = { let mut rq = self.run_queues.write(); rq.dequeue() }; match next { Run::Now(process) => { info!("found process to schedule"); // Don't allow exiting processes to run again. // // Without this check, a process.exit() from outside the process during WAITING // will return to the Frame that called `process.wait()` if!process.is_exiting() { info!("swapping into process (is_root = {})", is_root); unsafe { self.swap_process(process, is_root); } } else { info!("process is exiting"); process.reduce() } info!("exiting scheduler loop"); // When reached, either the process scheduled is the root process, // or the process is exiting and we called.reduce(); either way we're // returning to the main scheduler loop to check for signals, etc. break true; } Run::Delayed => { info!("found process, but it is delayed"); continue; } Run::None if is_root => { info!("no processes remaining to schedule, exiting loop"); // If no processes are available, then the scheduler should steal, // but if it can't/doesn't, then it must terminate, as there is // nothing we can swap to. When we break here, we're returning // to the core scheduler loop, which _must_ terminate, if it does // not, we'll just end up right back here again. // // TODO: stealing break false; } Run::None => unreachable!(), } } } /// This function takes care of coordinating the scheduling of a new /// process/descheduling of the current process. /// /// - Updating process status /// - Updating reduction count based on accumulated reductions during execution /// - Resetting reduction counter for next process /// - Handling exiting processes (logging/propagating) /// /// Once that is complete, it swaps to the new process stack via `swap_stack`, /// at which point execution resumes where the newly scheduled process left /// off previously, or in its init function. unsafe fn swap_process(&self, new: Arc<Process>, is_root: bool) { // Mark the new process as Running let new_ctx = &new.registers as *const _; { let mut new_status = new.status.write(); *new_status = Status::Running; } // Replace the previous process with the new as the currently scheduled process let _ = CURRENT_PROCESS.with(|cp| cp.replace(Some(new.clone()))); let prev = self.current.replace(new.clone()); // Increment reduction count if not the root process if!is_root { let prev_reductions = reset_reduction_counter(); prev.total_reductions .fetch_add(prev_reductions as u64, Ordering::Relaxed); } // Change the previous process status to Runnable { let mut prev_status = prev.status.write(); if Status::Running == *prev_status { *prev_status = Status::Runnable } } // Save the previous process registers for the stack swap let prev_ctx = &prev.registers as *const _ as *mut _; // Then try to schedule it for the future // If the process is exiting, then handle the exit, otherwise // proceed to the stack swap if let Some(exiting) = self.run_queues.write().requeue(prev) { if let Status::Exiting(ref ex) = *exiting.status.read() { crate::process::log_exit(&exiting, ex); crate::process::propagate_exit(&exiting, ex); } else { unreachable!() } } // Execute the swap // // When swapping to the root process, we return here, which // will unwind back to the main scheduler loop in `lib.rs`. // // When swapping to a newly spawned process, we return "into" // its init function, or put another way, we jump to its // function prologue. In this situation, all of the saved registers // except %rsp and %rbp will be zeroed. %rsp is set during the call // to `spawn`, but %rbp is set to the current %rbp value to ensure // that stack traces link the new stack to the frame in which execution // started // // When swapping to a previously spawned process, we return here, // since the process called `process_yield`. From here we unwind back // to the call to `process_yield` and resume execution from the point // where it was called. swap_stack(prev_ctx, new_ctx); } /// Schedules the given process for execution pub fn schedule(&mut self, process: Arc<Process>) { debug_assert_ne!( Some(self.id), process.scheduler_id(), "process is already scheduled here!" ); process.schedule_with(self.id); let mut rq = self.run_queues.write(); rq.enqueue(process); } /// Spawns a new process using the given init function as its entry #[inline] pub fn spawn(&mut self, process: Arc<Process>) -> anyhow::Result<()> { Self::spawn_internal(process, self.id, &self.run_queues); Ok(()) } // Root process uses the original thread stack, no initialization required. // // It also starts "running", so we don't put it on the run queue fn spawn_root( process: Arc<Process>, id: id::ID, _run_queues: &RwLock<run_queue::Queues>, ) -> anyhow::Result<()> { process.schedule_with(id); *process.status.write() = Status::Running; Ok(()) } fn spawn_internal(process: Arc<Process>, id: id::ID, run_queues: &RwLock<run_queue::Queues>) { process.schedule_with(id); let mfa = &process.initial_module_function_arity; let init_fn_result = apply::find_symbol(&mfa); if init_fn_result.is_none() { panic!( "invalid mfa provided for process ({}), no such symbol found", &mfa ); } let init_fn = init_fn_result.unwrap(); #[inline(always)] unsafe fn push(sp: &mut StackPointer, value: u64) { sp.0 = sp.0.offset(-1); ptr::write(sp.0, value); } // Write the return function and init function to the end of the stack, // when execution resumes, the pointer before the stack pointer will be // used as the return address - the first time that will be the init function. // // When execution returns from the init function, then it will return via // `process_return`, which will return to the scheduler and indicate that // the process exited. The nature of the exit is indicated by error state // in the process itself unsafe { let mut sp = StackPointer(process.stack.top as *mut u64); // Function that will be called when returning from init_fn push(&mut sp, process_return_continuation as u64);
run_queue_len
identifier_name
scheduler.rs
scheduler.stop_waiting(process) } } #[derive(Copy, Clone)] struct StackPointer(*mut u64); #[export_name = "__lumen_builtin_spawn"] pub extern "C" fn builtin_spawn(to: Term, msg: Term) -> Term { unimplemented!() } #[export_name = "__lumen_builtin_yield"] pub unsafe extern "C" fn process_yield() -> bool { let s = <Scheduler as rt_core::Scheduler>::current(); // NOTE: We always set root=false here because the root // process never invokes this function s.process_yield(/* root= */ false) } #[naked] #[inline(never)] #[cfg(all(unix, target_arch = "x86_64"))] pub unsafe extern "C" fn process_return_continuation() { let f: fn() -> () = process_return; asm!(" callq *$0 " : : "r"(f) : : "volatile", "alignstack" ); } #[inline(never)] fn process_return() { let s = <Scheduler as rt_core::Scheduler>::current(); do_process_return(&s); } #[export_name = "__lumen_builtin_malloc"] pub unsafe extern "C" fn builtin_malloc(kind: u32, arity: usize) -> *mut u8 { use core::convert::TryInto; use liblumen_alloc::erts::term::closure::ClosureLayout; use liblumen_alloc::erts::term::prelude::*; use liblumen_core::alloc::Layout; use liblumen_term::TermKind; let kind_result: Result<TermKind, _> = kind.try_into(); match kind_result { Ok(TermKind::Closure) => { let s = <Scheduler as rt_core::Scheduler>::current(); let cl = ClosureLayout::for_env_len(arity); let result = s.current.alloc_nofrag_layout(cl.layout().clone()); if let Ok(nn) = result { return nn.as_ptr() as *mut u8; } } Ok(TermKind::Tuple) => { let s = <Scheduler as rt_core::Scheduler>::current(); let layout = Tuple::layout_for_len(arity); let result = s.current.alloc_nofrag_layout(layout); if let Ok(nn) = result { return nn.as_ptr() as *mut u8; } } Ok(TermKind::Cons) => { let s = <Scheduler as rt_core::Scheduler>::current(); let layout = Layout::new::<Cons>(); let result = s.current.alloc_nofrag_layout(layout); if let Ok(nn) = result { return nn.as_ptr() as *mut u8; } } Ok(tk) =>
Err(_) => { panic!("invalid term kind: {}", kind); } } ptr::null_mut() } /// Called when the current process has finished executing, and has /// returned all the way to its entry function. This marks the process /// as exiting (if it wasn't already), and then yields to the scheduler fn do_process_return(scheduler: &Scheduler) -> bool { use liblumen_alloc::erts::term::prelude::*; if scheduler.current.pid()!= scheduler.root.pid() { scheduler .current .exit(atom!("normal"), anyhow!("Out of code").into()); // NOTE: We always set root=false here, even though this can // be called from the root process, since returning from the // root process exits the scheduler loop anyway, so no stack // swapping can occur scheduler.process_yield(/* root= */ false) } else { true } } pub struct Scheduler { id: id::ID, hierarchy: RwLock<Hierarchy>, // References are always 64-bits even on 32-bit platforms reference_count: AtomicU64, run_queues: RwLock<run_queue::Queues>, // Non-monotonic unique integers are scoped to the scheduler ID and then use this per-scheduler // `u64`. unique_integer: AtomicU64, root: Arc<Process>, init: ThreadLocalCell<Arc<Process>>, current: ThreadLocalCell<Arc<Process>>, } // This guarantee holds as long as `init` and `current` are only // ever accessed by the scheduler when scheduling unsafe impl Sync for Scheduler {} impl rt_core::Scheduler for Scheduler { #[inline] fn current() -> Arc<Self> { SCHEDULER.with(|s| s.clone()) } fn id(&self) -> id::ID { self.id } fn hierarchy(&self) -> &RwLock<Hierarchy> { &self.hierarchy } /// Gets the next available reference number fn next_reference_number(&self) -> ReferenceNumber { self.reference_count.fetch_add(1, Ordering::SeqCst) } } impl Scheduler { /// Creates a new scheduler with the default configuration fn new() -> anyhow::Result<Scheduler> { let id = id::next(); // The root process is how the scheduler gets time for itself, // and is also how we know when to shutdown the scheduler due // to termination of all its processes let root = Arc::new(Process::new( Priority::Normal, None, Arc::new(ModuleFunctionArity { module: Atom::from_str("root"), function: Atom::from_str("init"), arity: 0, }), ptr::null_mut(), 0, )); let run_queues = Default::default(); Scheduler::spawn_root(root.clone(), id, &run_queues)?; // Placeholder let init = Arc::new(Process::new( Priority::Normal, None, Arc::new(ModuleFunctionArity { module: Atom::from_str("undef"), function: Atom::from_str("undef"), arity: 0, }), ptr::null_mut(), 0, )); // The scheduler starts with the root process running let current = ThreadLocalCell::new(root.clone()); Ok(Self { id, run_queues, root, init: ThreadLocalCell::new(init), current, hierarchy: Default::default(), reference_count: AtomicU64::new(0), unique_integer: AtomicU64::new(0), }) } // Spawns the init process, should be called immediately after // scheduler creation pub fn init(&self) -> anyhow::Result<()> { // The init process is the actual "root" Erlang process, it acts // as the entry point for the program from Erlang's perspective, // and is responsible for starting/stopping the system in Erlang. // // If this process exits, the scheduler terminates let (init_heap, init_heap_size) = process::alloc::default_heap()?; let init = Arc::new(Process::new_with_stack( Priority::Normal, None, Arc::new(ModuleFunctionArity { module: Atom::from_str("init"), function: Atom::from_str("start"), arity: 0, }), init_heap, init_heap_size, )?); let clone = init.clone(); unsafe { self.init.set(init); } Scheduler::spawn_internal(clone, self.id, &self.run_queues); Ok(()) } /// Gets the scheduler registered to this thread /// /// If no scheduler has been created for this thread, one is created fn registered() -> Arc<Self> { let mut schedulers = SCHEDULERS.lock(); let s = Arc::new(Self::new().unwrap()); if let Some(_) = schedulers.insert(s.id, Arc::downgrade(&s)) { panic!("Scheduler already registered with ID ({:?}", s.id); } s } /// Gets a scheduler by its ID pub fn from_id(id: &id::ID) -> Option<Arc<Self>> { Self::current_from_id(id).or_else(|| SCHEDULERS.lock().get(id).and_then(|s| s.upgrade())) } /// Returns the current thread's scheduler if it matches the given ID fn current_from_id(id: &id::ID) -> Option<Arc<Self>> { SCHEDULER.with(|s| if &s.id == id { Some(s.clone()) } else { None }) } /// Gets the next available unique integer pub fn next_unique_integer(&self) -> u64 { self.unique_integer.fetch_add(1, Ordering::SeqCst) } /// Returns the length of the current scheduler's run queue pub fn run_queues_len(&self) -> usize { self.run_queues.read().len() } /// Returns the length of a specific run queue in the current scheduler #[cfg(test)] pub fn run_queue_len(&self, priority: Priority) -> usize { self.run_queues.read().run_queue_len(priority) } /// Returns true if the given process is in the current scheduler's run queue #[cfg(test)] pub fn is_run_queued(&self, value: &Arc<Process>) -> bool { self.run_queues.read().contains(value) } pub fn stop_waiting(&self, process: &Process) { self.run_queues.write().stop_waiting(process); } // TODO: Request application master termination for controlled shutdown // This request will always come from the thread which spawned the application // master, i.e. the "main" scheduler thread // // Returns `Ok(())` if shutdown was successful, `Err(anyhow::Error)` if something // went wrong during shutdown, and it was not able to complete normally pub fn shutdown(&self) -> anyhow::Result<()> { // For now just Ok(()), but this needs to be addressed when proper // system startup/shutdown is in place CURRENT_PROCESS.with(|cp| cp.replace(None)); Ok(()) } } impl Debug for Scheduler { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Scheduler") .field("id", &self.id) // The hiearchy slots take a lot of space, so don't print them by default .field("reference_count", &self.reference_count) .field("run_queues", &self.run_queues) .finish() } } impl Drop for Scheduler { fn drop(&mut self) { let mut locked_scheduler_by_id = SCHEDULERS.lock(); locked_scheduler_by_id .remove(&self.id) .expect("Scheduler not registered"); } } impl PartialEq for Scheduler { fn eq(&self, other: &Self) -> bool { self.id == other.id } } /// What to run pub enum Run { /// Run the process now Now(Arc<Process>), /// There was a process in the queue, but it needs to be delayed because it is `Priority::Low` /// and hadn't been delayed enough yet. Ask the `RunQueue` again for another process. /// -- https://github.com/erlang/otp/blob/fe2b1323a3866ed0a9712e9d12e1f8f84793ec47/erts/emulator/beam/erl_process.c#L9601-L9606 Delayed, /// There are no processes in the run queue, do other work None, } impl Scheduler { /// > 1. Update reduction counters /// > 2. Check timers /// > 3. If needed check balance /// > 4. If needed migrated processes and ports /// > 5. Do auxiliary scheduler work /// > 6. If needed check I/O and update time /// > 7. While needed pick a port task to execute /// > 8. Pick a process to execute /// > -- [The Scheduler Loop](https://blog.stenmans.org/theBeamBook/#_the_scheduler_loop) /// /// Returns `true` if a process was run. Returns `false` if no process could be run and the /// scheduler should sleep or work steal. #[must_use] pub fn run_once(&self) -> bool { // We always set root=true here, since calling this function is always done // from the scheduler loop, and only ever from the root context self.process_yield(/* root= */ true) } /// This function performs two roles, albeit virtually identical: /// /// First, this function is called by the scheduler to resume execution /// of a process pulled from the run queue. It does so using its "root" /// process as its context. /// /// Second, this function is called by a process when it chooses to /// yield back to the scheduler. In this case, the scheduler "root" /// process is swapped in, so the scheduler has a chance to do its /// auxilary tasks, after which the scheduler will call it again to /// swap in a new process. fn process_yield(&self, is_root: bool) -> bool { info!("entering core scheduler loop"); self.hierarchy.write().timeout(); loop { let next = { let mut rq = self.run_queues.write(); rq.dequeue() }; match next { Run::Now(process) => { info!("found process to schedule"); // Don't allow exiting processes to run again. // // Without this check, a process.exit() from outside the process during WAITING // will return to the Frame that called `process.wait()` if!process.is_exiting() { info!("swapping into process (is_root = {})", is_root); unsafe { self.swap_process(process, is_root); } } else { info!("process is exiting"); process.reduce() } info!("exiting scheduler loop"); // When reached, either the process scheduled is the root process, // or the process is exiting and we called.reduce(); either way we're // returning to the main scheduler loop to check for signals, etc. break true; } Run::Delayed => { info!("found process, but it is delayed"); continue; } Run::None if is_root => { info!("no processes remaining to schedule, exiting loop"); // If no processes are available, then the scheduler should steal, // but if it can't/doesn't, then it must terminate, as there is // nothing we can swap to. When we break here, we're returning // to the core scheduler loop, which _must_ terminate, if it does // not, we'll just end up right back here again. // // TODO: stealing break false; } Run::None => unreachable!(), } } } /// This function takes care of coordinating the scheduling of a new /// process/descheduling of the current process. /// /// - Updating process status /// - Updating reduction count based on accumulated reductions during execution /// - Resetting reduction counter for next process /// - Handling exiting processes (logging/propagating) /// /// Once that is complete, it swaps to the new process stack via `swap_stack`, /// at which point execution resumes where the newly scheduled process left /// off previously, or in its init function. unsafe fn swap_process(&self, new: Arc<Process>, is_root: bool) { // Mark the new process as Running let new_ctx = &new.registers as *const _; { let mut new_status = new.status.write(); *new_status = Status::Running; } // Replace the previous process with the new as the currently scheduled process let _ = CURRENT_PROCESS.with(|cp| cp.replace(Some(new.clone()))); let prev = self.current.replace(new.clone()); // Increment reduction count if not the root process if!is_root { let prev_reductions = reset_reduction_counter(); prev.total_reductions .fetch_add(prev_reductions as u64, Ordering::Relaxed); } // Change the previous process status to Runnable { let mut prev_status = prev.status.write(); if Status::Running == *prev_status { *prev_status = Status::Runnable } } // Save the previous process registers for the stack swap let prev_ctx = &prev.registers as *const _ as *mut _; // Then try to schedule it for the future // If the process is exiting, then handle the exit, otherwise // proceed to the stack swap if let Some(exiting) = self.run_queues.write().requeue(prev) { if let Status::Exiting(ref ex) = *exiting.status.read() { crate::process::log_exit(&exiting, ex); crate::process::propagate_exit(&exiting, ex); } else { unreachable!() } } // Execute the swap // // When swapping to the root process, we return here, which // will unwind back to the main scheduler loop in `lib.rs`. // // When swapping to a newly spawned process, we return "into" // its init function, or put another way, we jump to its // function prologue. In this situation, all of the saved registers // except %rsp and %rbp will be zeroed. %rsp is set during the call // to `spawn`, but %rbp is set to the current %rbp value to ensure // that stack traces link the new stack to the frame in which execution // started // // When swapping to a previously spawned process, we return here, // since the process called `process_yield`. From here we unwind back // to the call to `process_yield` and resume execution from the point // where it was called. swap_stack(prev_ctx, new_ctx); } /// Schedules the given process for execution pub fn schedule(&mut self, process: Arc<Process>) { debug_assert_ne!( Some(self.id), process.scheduler_id(), "process is already scheduled here!" ); process.schedule_with(self.id); let mut rq = self.run_queues.write(); rq.enqueue(process); } /// Spawns a new process using the given init function as its entry #[inline] pub fn spawn(&mut self, process: Arc<Process>) -> anyhow::Result<()> { Self::spawn_internal(process, self.id, &self.run_queues); Ok(()) } // Root process uses the original thread stack, no initialization required. // // It also starts "running", so we don't put it on the run queue fn spawn_root( process: Arc<Process>, id: id::ID, _run_queues: &RwLock<run_queue::Queues>, ) -> anyhow::Result<()> { process.schedule_with(id); *process.status.write() = Status::Running; Ok(()) } fn spawn_internal(process: Arc<Process>, id: id::ID, run_queues: &RwLock<run_queue::Queues>) { process.schedule_with(id); let mfa = &process.initial_module_function_arity; let init_fn_result = apply::find_symbol(&mfa); if init_fn_result.is_none() { panic!( "invalid mfa provided for process ({}), no such symbol found", &mfa ); } let init_fn = init_fn_result.unwrap(); #[inline(always)] unsafe fn push(sp: &mut StackPointer, value: u64) { sp.0 = sp.0.offset(-1); ptr::write(sp.0, value); } // Write the return function and init function to the end of the stack, // when execution resumes, the pointer before the stack pointer will be // used as the return address - the first time that will be the init function. // // When execution returns from the init function, then it will return via // `process_return`, which will return to the scheduler and indicate that // the process exited. The nature of the exit is indicated by error state // in the process itself unsafe { let mut sp = StackPointer(process.stack.top as *mut u64); // Function that will be called when returning from init_fn push(&mut sp, process_return_continuation as u64);
{ unimplemented!("unhandled use of malloc for {:?}", tk); }
conditional_block
scheduler.rs
scheduler.stop_waiting(process) } } #[derive(Copy, Clone)] struct StackPointer(*mut u64); #[export_name = "__lumen_builtin_spawn"] pub extern "C" fn builtin_spawn(to: Term, msg: Term) -> Term { unimplemented!() } #[export_name = "__lumen_builtin_yield"] pub unsafe extern "C" fn process_yield() -> bool { let s = <Scheduler as rt_core::Scheduler>::current(); // NOTE: We always set root=false here because the root // process never invokes this function s.process_yield(/* root= */ false) } #[naked] #[inline(never)] #[cfg(all(unix, target_arch = "x86_64"))] pub unsafe extern "C" fn process_return_continuation() { let f: fn() -> () = process_return; asm!(" callq *$0 " : : "r"(f) : : "volatile", "alignstack" ); } #[inline(never)] fn process_return() { let s = <Scheduler as rt_core::Scheduler>::current(); do_process_return(&s); } #[export_name = "__lumen_builtin_malloc"] pub unsafe extern "C" fn builtin_malloc(kind: u32, arity: usize) -> *mut u8 { use core::convert::TryInto; use liblumen_alloc::erts::term::closure::ClosureLayout; use liblumen_alloc::erts::term::prelude::*; use liblumen_core::alloc::Layout; use liblumen_term::TermKind; let kind_result: Result<TermKind, _> = kind.try_into(); match kind_result { Ok(TermKind::Closure) => { let s = <Scheduler as rt_core::Scheduler>::current(); let cl = ClosureLayout::for_env_len(arity); let result = s.current.alloc_nofrag_layout(cl.layout().clone()); if let Ok(nn) = result { return nn.as_ptr() as *mut u8; } } Ok(TermKind::Tuple) => { let s = <Scheduler as rt_core::Scheduler>::current(); let layout = Tuple::layout_for_len(arity); let result = s.current.alloc_nofrag_layout(layout); if let Ok(nn) = result { return nn.as_ptr() as *mut u8; } } Ok(TermKind::Cons) => { let s = <Scheduler as rt_core::Scheduler>::current(); let layout = Layout::new::<Cons>(); let result = s.current.alloc_nofrag_layout(layout); if let Ok(nn) = result { return nn.as_ptr() as *mut u8; } } Ok(tk) => { unimplemented!("unhandled use of malloc for {:?}", tk); } Err(_) => { panic!("invalid term kind: {}", kind); } } ptr::null_mut() } /// Called when the current process has finished executing, and has /// returned all the way to its entry function. This marks the process /// as exiting (if it wasn't already), and then yields to the scheduler fn do_process_return(scheduler: &Scheduler) -> bool { use liblumen_alloc::erts::term::prelude::*; if scheduler.current.pid()!= scheduler.root.pid() { scheduler .current .exit(atom!("normal"), anyhow!("Out of code").into()); // NOTE: We always set root=false here, even though this can // be called from the root process, since returning from the // root process exits the scheduler loop anyway, so no stack // swapping can occur scheduler.process_yield(/* root= */ false) } else { true } } pub struct Scheduler { id: id::ID, hierarchy: RwLock<Hierarchy>, // References are always 64-bits even on 32-bit platforms reference_count: AtomicU64, run_queues: RwLock<run_queue::Queues>, // Non-monotonic unique integers are scoped to the scheduler ID and then use this per-scheduler // `u64`. unique_integer: AtomicU64, root: Arc<Process>, init: ThreadLocalCell<Arc<Process>>, current: ThreadLocalCell<Arc<Process>>, } // This guarantee holds as long as `init` and `current` are only // ever accessed by the scheduler when scheduling unsafe impl Sync for Scheduler {} impl rt_core::Scheduler for Scheduler { #[inline] fn current() -> Arc<Self> { SCHEDULER.with(|s| s.clone()) } fn id(&self) -> id::ID { self.id } fn hierarchy(&self) -> &RwLock<Hierarchy> { &self.hierarchy } /// Gets the next available reference number fn next_reference_number(&self) -> ReferenceNumber { self.reference_count.fetch_add(1, Ordering::SeqCst) } } impl Scheduler { /// Creates a new scheduler with the default configuration fn new() -> anyhow::Result<Scheduler> { let id = id::next(); // The root process is how the scheduler gets time for itself, // and is also how we know when to shutdown the scheduler due // to termination of all its processes let root = Arc::new(Process::new( Priority::Normal, None, Arc::new(ModuleFunctionArity { module: Atom::from_str("root"), function: Atom::from_str("init"), arity: 0, }), ptr::null_mut(), 0, )); let run_queues = Default::default(); Scheduler::spawn_root(root.clone(), id, &run_queues)?; // Placeholder let init = Arc::new(Process::new( Priority::Normal, None, Arc::new(ModuleFunctionArity { module: Atom::from_str("undef"), function: Atom::from_str("undef"), arity: 0, }), ptr::null_mut(), 0, )); // The scheduler starts with the root process running let current = ThreadLocalCell::new(root.clone()); Ok(Self { id, run_queues, root, init: ThreadLocalCell::new(init), current, hierarchy: Default::default(), reference_count: AtomicU64::new(0), unique_integer: AtomicU64::new(0), }) } // Spawns the init process, should be called immediately after // scheduler creation pub fn init(&self) -> anyhow::Result<()> { // The init process is the actual "root" Erlang process, it acts // as the entry point for the program from Erlang's perspective, // and is responsible for starting/stopping the system in Erlang. // // If this process exits, the scheduler terminates let (init_heap, init_heap_size) = process::alloc::default_heap()?; let init = Arc::new(Process::new_with_stack( Priority::Normal, None, Arc::new(ModuleFunctionArity { module: Atom::from_str("init"), function: Atom::from_str("start"), arity: 0, }), init_heap, init_heap_size, )?); let clone = init.clone(); unsafe { self.init.set(init); } Scheduler::spawn_internal(clone, self.id, &self.run_queues); Ok(()) } /// Gets the scheduler registered to this thread /// /// If no scheduler has been created for this thread, one is created fn registered() -> Arc<Self> { let mut schedulers = SCHEDULERS.lock(); let s = Arc::new(Self::new().unwrap()); if let Some(_) = schedulers.insert(s.id, Arc::downgrade(&s)) { panic!("Scheduler already registered with ID ({:?}", s.id); } s } /// Gets a scheduler by its ID pub fn from_id(id: &id::ID) -> Option<Arc<Self>> { Self::current_from_id(id).or_else(|| SCHEDULERS.lock().get(id).and_then(|s| s.upgrade())) } /// Returns the current thread's scheduler if it matches the given ID fn current_from_id(id: &id::ID) -> Option<Arc<Self>> { SCHEDULER.with(|s| if &s.id == id { Some(s.clone()) } else { None }) } /// Gets the next available unique integer pub fn next_unique_integer(&self) -> u64 { self.unique_integer.fetch_add(1, Ordering::SeqCst) } /// Returns the length of the current scheduler's run queue pub fn run_queues_len(&self) -> usize { self.run_queues.read().len() } /// Returns the length of a specific run queue in the current scheduler #[cfg(test)] pub fn run_queue_len(&self, priority: Priority) -> usize { self.run_queues.read().run_queue_len(priority) } /// Returns true if the given process is in the current scheduler's run queue #[cfg(test)] pub fn is_run_queued(&self, value: &Arc<Process>) -> bool { self.run_queues.read().contains(value) } pub fn stop_waiting(&self, process: &Process) { self.run_queues.write().stop_waiting(process); } // TODO: Request application master termination for controlled shutdown // This request will always come from the thread which spawned the application // master, i.e. the "main" scheduler thread // // Returns `Ok(())` if shutdown was successful, `Err(anyhow::Error)` if something // went wrong during shutdown, and it was not able to complete normally pub fn shutdown(&self) -> anyhow::Result<()> { // For now just Ok(()), but this needs to be addressed when proper // system startup/shutdown is in place CURRENT_PROCESS.with(|cp| cp.replace(None)); Ok(()) } } impl Debug for Scheduler { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Scheduler") .field("id", &self.id) // The hiearchy slots take a lot of space, so don't print them by default .field("reference_count", &self.reference_count) .field("run_queues", &self.run_queues) .finish() } } impl Drop for Scheduler { fn drop(&mut self) { let mut locked_scheduler_by_id = SCHEDULERS.lock(); locked_scheduler_by_id .remove(&self.id) .expect("Scheduler not registered"); } } impl PartialEq for Scheduler { fn eq(&self, other: &Self) -> bool { self.id == other.id
pub enum Run { /// Run the process now Now(Arc<Process>), /// There was a process in the queue, but it needs to be delayed because it is `Priority::Low` /// and hadn't been delayed enough yet. Ask the `RunQueue` again for another process. /// -- https://github.com/erlang/otp/blob/fe2b1323a3866ed0a9712e9d12e1f8f84793ec47/erts/emulator/beam/erl_process.c#L9601-L9606 Delayed, /// There are no processes in the run queue, do other work None, } impl Scheduler { /// > 1. Update reduction counters /// > 2. Check timers /// > 3. If needed check balance /// > 4. If needed migrated processes and ports /// > 5. Do auxiliary scheduler work /// > 6. If needed check I/O and update time /// > 7. While needed pick a port task to execute /// > 8. Pick a process to execute /// > -- [The Scheduler Loop](https://blog.stenmans.org/theBeamBook/#_the_scheduler_loop) /// /// Returns `true` if a process was run. Returns `false` if no process could be run and the /// scheduler should sleep or work steal. #[must_use] pub fn run_once(&self) -> bool { // We always set root=true here, since calling this function is always done // from the scheduler loop, and only ever from the root context self.process_yield(/* root= */ true) } /// This function performs two roles, albeit virtually identical: /// /// First, this function is called by the scheduler to resume execution /// of a process pulled from the run queue. It does so using its "root" /// process as its context. /// /// Second, this function is called by a process when it chooses to /// yield back to the scheduler. In this case, the scheduler "root" /// process is swapped in, so the scheduler has a chance to do its /// auxilary tasks, after which the scheduler will call it again to /// swap in a new process. fn process_yield(&self, is_root: bool) -> bool { info!("entering core scheduler loop"); self.hierarchy.write().timeout(); loop { let next = { let mut rq = self.run_queues.write(); rq.dequeue() }; match next { Run::Now(process) => { info!("found process to schedule"); // Don't allow exiting processes to run again. // // Without this check, a process.exit() from outside the process during WAITING // will return to the Frame that called `process.wait()` if!process.is_exiting() { info!("swapping into process (is_root = {})", is_root); unsafe { self.swap_process(process, is_root); } } else { info!("process is exiting"); process.reduce() } info!("exiting scheduler loop"); // When reached, either the process scheduled is the root process, // or the process is exiting and we called.reduce(); either way we're // returning to the main scheduler loop to check for signals, etc. break true; } Run::Delayed => { info!("found process, but it is delayed"); continue; } Run::None if is_root => { info!("no processes remaining to schedule, exiting loop"); // If no processes are available, then the scheduler should steal, // but if it can't/doesn't, then it must terminate, as there is // nothing we can swap to. When we break here, we're returning // to the core scheduler loop, which _must_ terminate, if it does // not, we'll just end up right back here again. // // TODO: stealing break false; } Run::None => unreachable!(), } } } /// This function takes care of coordinating the scheduling of a new /// process/descheduling of the current process. /// /// - Updating process status /// - Updating reduction count based on accumulated reductions during execution /// - Resetting reduction counter for next process /// - Handling exiting processes (logging/propagating) /// /// Once that is complete, it swaps to the new process stack via `swap_stack`, /// at which point execution resumes where the newly scheduled process left /// off previously, or in its init function. unsafe fn swap_process(&self, new: Arc<Process>, is_root: bool) { // Mark the new process as Running let new_ctx = &new.registers as *const _; { let mut new_status = new.status.write(); *new_status = Status::Running; } // Replace the previous process with the new as the currently scheduled process let _ = CURRENT_PROCESS.with(|cp| cp.replace(Some(new.clone()))); let prev = self.current.replace(new.clone()); // Increment reduction count if not the root process if!is_root { let prev_reductions = reset_reduction_counter(); prev.total_reductions .fetch_add(prev_reductions as u64, Ordering::Relaxed); } // Change the previous process status to Runnable { let mut prev_status = prev.status.write(); if Status::Running == *prev_status { *prev_status = Status::Runnable } } // Save the previous process registers for the stack swap let prev_ctx = &prev.registers as *const _ as *mut _; // Then try to schedule it for the future // If the process is exiting, then handle the exit, otherwise // proceed to the stack swap if let Some(exiting) = self.run_queues.write().requeue(prev) { if let Status::Exiting(ref ex) = *exiting.status.read() { crate::process::log_exit(&exiting, ex); crate::process::propagate_exit(&exiting, ex); } else { unreachable!() } } // Execute the swap // // When swapping to the root process, we return here, which // will unwind back to the main scheduler loop in `lib.rs`. // // When swapping to a newly spawned process, we return "into" // its init function, or put another way, we jump to its // function prologue. In this situation, all of the saved registers // except %rsp and %rbp will be zeroed. %rsp is set during the call // to `spawn`, but %rbp is set to the current %rbp value to ensure // that stack traces link the new stack to the frame in which execution // started // // When swapping to a previously spawned process, we return here, // since the process called `process_yield`. From here we unwind back // to the call to `process_yield` and resume execution from the point // where it was called. swap_stack(prev_ctx, new_ctx); } /// Schedules the given process for execution pub fn schedule(&mut self, process: Arc<Process>) { debug_assert_ne!( Some(self.id), process.scheduler_id(), "process is already scheduled here!" ); process.schedule_with(self.id); let mut rq = self.run_queues.write(); rq.enqueue(process); } /// Spawns a new process using the given init function as its entry #[inline] pub fn spawn(&mut self, process: Arc<Process>) -> anyhow::Result<()> { Self::spawn_internal(process, self.id, &self.run_queues); Ok(()) } // Root process uses the original thread stack, no initialization required. // // It also starts "running", so we don't put it on the run queue fn spawn_root( process: Arc<Process>, id: id::ID, _run_queues: &RwLock<run_queue::Queues>, ) -> anyhow::Result<()> { process.schedule_with(id); *process.status.write() = Status::Running; Ok(()) } fn spawn_internal(process: Arc<Process>, id: id::ID, run_queues: &RwLock<run_queue::Queues>) { process.schedule_with(id); let mfa = &process.initial_module_function_arity; let init_fn_result = apply::find_symbol(&mfa); if init_fn_result.is_none() { panic!( "invalid mfa provided for process ({}), no such symbol found", &mfa ); } let init_fn = init_fn_result.unwrap(); #[inline(always)] unsafe fn push(sp: &mut StackPointer, value: u64) { sp.0 = sp.0.offset(-1); ptr::write(sp.0, value); } // Write the return function and init function to the end of the stack, // when execution resumes, the pointer before the stack pointer will be // used as the return address - the first time that will be the init function. // // When execution returns from the init function, then it will return via // `process_return`, which will return to the scheduler and indicate that // the process exited. The nature of the exit is indicated by error state // in the process itself unsafe { let mut sp = StackPointer(process.stack.top as *mut u64); // Function that will be called when returning from init_fn push(&mut sp, process_return_continuation as u64);
} } /// What to run
random_line_split
svh_visitor.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // FIXME (#14132): Even this SVH computation still has implementation // artifacts: namely, the order of item declaration will affect the // hash computation, but for many kinds of items the order of // declaration should be irrelevant to the ABI. pub use self::SawExprComponent::*; pub use self::SawStmtComponent::*; use self::SawAbiComponent::*; use syntax::ast::{self, Name, NodeId}; use syntax::parse::token; use syntax_pos::Span; use rustc::hir; use rustc::hir::*; use rustc::hir::def::{Def, PathResolution}; use rustc::hir::def_id::DefId; use rustc::hir::intravisit as visit; use rustc::hir::intravisit::{Visitor, FnKind}; use rustc::ty::TyCtxt; use std::hash::{Hash, SipHasher}; use super::def_path_hash::DefPathHashes; pub struct StrictVersionHashVisitor<'a, 'hash: 'a, 'tcx: 'hash> { pub tcx: TyCtxt<'hash, 'tcx, 'tcx>, pub st: &'a mut SipHasher, // collect a deterministic hash of def-ids that we have seen def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>, } impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> { pub fn new(st: &'a mut SipHasher, tcx: TyCtxt<'hash, 'tcx, 'tcx>, def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>) -> Self { StrictVersionHashVisitor { st: st, tcx: tcx, def_path_hashes: def_path_hashes } } fn compute_def_id_hash(&mut self, def_id: DefId) -> u64 { self.def_path_hashes.hash(def_id) } } // To off-load the bulk of the hash-computation on #[derive(Hash)], // we define a set of enums corresponding to the content that our // crate visitor will encounter as it traverses the ast. // // The important invariant is that all of the Saw*Component enums // do not carry any Spans, Names, or Idents. // // Not carrying any Names/Idents is the important fix for problem // noted on PR #13948: using the ident.name as the basis for a // hash leads to unstable SVH, because ident.name is just an index // into intern table (i.e. essentially a random address), not // computed from the name content. // // With the below enums, the SVH computation is not sensitive to // artifacts of how rustc was invoked nor of how the source code // was laid out. (Or at least it is *less* sensitive.) // This enum represents the different potential bits of code the // visitor could encounter that could affect the ABI for the crate, // and assigns each a distinct tag to feed into the hash computation. #[derive(Hash)] enum SawAbiComponent<'a> { // FIXME (#14132): should we include (some function of) // ident.ctxt as well? SawIdent(token::InternedString), SawStructDef(token::InternedString), SawLifetime(token::InternedString), SawLifetimeDef(token::InternedString), SawMod, SawForeignItem, SawItem,
SawGenerics, SawFn, SawTraitItem, SawImplItem, SawStructField, SawVariant, SawPath, SawBlock, SawPat, SawLocal, SawArm, SawExpr(SawExprComponent<'a>), SawStmt(SawStmtComponent), } /// SawExprComponent carries all of the information that we want /// to include in the hash that *won't* be covered by the /// subsequent recursive traversal of the expression's /// substructure by the visitor. /// /// We know every Expr_ variant is covered by a variant because /// `fn saw_expr` maps each to some case below. Ensuring that /// each variant carries an appropriate payload has to be verified /// by hand. /// /// (However, getting that *exactly* right is not so important /// because the SVH is just a developer convenience; there is no /// guarantee of collision-freedom, hash collisions are just /// (hopefully) unlikely.) #[derive(Hash)] pub enum SawExprComponent<'a> { SawExprLoop(Option<token::InternedString>), SawExprField(token::InternedString), SawExprTupField(usize), SawExprBreak(Option<token::InternedString>), SawExprAgain(Option<token::InternedString>), SawExprBox, SawExprVec, SawExprCall, SawExprMethodCall, SawExprTup, SawExprBinary(hir::BinOp_), SawExprUnary(hir::UnOp), SawExprLit(ast::LitKind), SawExprCast, SawExprType, SawExprIf, SawExprWhile, SawExprMatch, SawExprClosure, SawExprBlock, SawExprAssign, SawExprAssignOp(hir::BinOp_), SawExprIndex, SawExprPath(Option<usize>), SawExprAddrOf(hir::Mutability), SawExprRet, SawExprInlineAsm(&'a hir::InlineAsm), SawExprStruct, SawExprRepeat, } fn saw_expr<'a>(node: &'a Expr_) -> SawExprComponent<'a> { match *node { ExprBox(..) => SawExprBox, ExprVec(..) => SawExprVec, ExprCall(..) => SawExprCall, ExprMethodCall(..) => SawExprMethodCall, ExprTup(..) => SawExprTup, ExprBinary(op, _, _) => SawExprBinary(op.node), ExprUnary(op, _) => SawExprUnary(op), ExprLit(ref lit) => SawExprLit(lit.node.clone()), ExprCast(..) => SawExprCast, ExprType(..) => SawExprType, ExprIf(..) => SawExprIf, ExprWhile(..) => SawExprWhile, ExprLoop(_, id) => SawExprLoop(id.map(|id| id.node.as_str())), ExprMatch(..) => SawExprMatch, ExprClosure(..) => SawExprClosure, ExprBlock(..) => SawExprBlock, ExprAssign(..) => SawExprAssign, ExprAssignOp(op, _, _) => SawExprAssignOp(op.node), ExprField(_, name) => SawExprField(name.node.as_str()), ExprTupField(_, id) => SawExprTupField(id.node), ExprIndex(..) => SawExprIndex, ExprPath(ref qself, _) => SawExprPath(qself.as_ref().map(|q| q.position)), ExprAddrOf(m, _) => SawExprAddrOf(m), ExprBreak(id) => SawExprBreak(id.map(|id| id.node.as_str())), ExprAgain(id) => SawExprAgain(id.map(|id| id.node.as_str())), ExprRet(..) => SawExprRet, ExprInlineAsm(ref a,_,_) => SawExprInlineAsm(a), ExprStruct(..) => SawExprStruct, ExprRepeat(..) => SawExprRepeat, } } /// SawStmtComponent is analogous to SawExprComponent, but for statements. #[derive(Hash)] pub enum SawStmtComponent { SawStmtExpr, SawStmtSemi, } impl<'a, 'hash, 'tcx> Visitor<'tcx> for StrictVersionHashVisitor<'a, 'hash, 'tcx> { fn visit_nested_item(&mut self, _: ItemId) { // Each item is hashed independently; ignore nested items. } fn visit_variant_data(&mut self, s: &'tcx VariantData, name: Name, g: &'tcx Generics, _: NodeId, _: Span) { debug!("visit_variant_data: st={:?}", self.st); SawStructDef(name.as_str()).hash(self.st); visit::walk_generics(self, g); visit::walk_struct_def(self, s) } fn visit_variant(&mut self, v: &'tcx Variant, g: &'tcx Generics, item_id: NodeId) { debug!("visit_variant: st={:?}", self.st); SawVariant.hash(self.st); // walk_variant does not call walk_generics, so do it here. visit::walk_generics(self, g); visit::walk_variant(self, v, g, item_id) } // All of the remaining methods just record (in the hash // SipHasher) that the visitor saw that particular variant // (with its payload), and continue walking as the default // visitor would. // // Some of the implementations have some notes as to how one // might try to make their SVH computation less discerning // (e.g. by incorporating reachability analysis). But // currently all of their implementations are uniform and // uninteresting. // // (If you edit a method such that it deviates from the // pattern, please move that method up above this comment.) fn visit_name(&mut self, _: Span, name: Name) { debug!("visit_name: st={:?}", self.st); SawIdent(name.as_str()).hash(self.st); } fn visit_lifetime(&mut self, l: &'tcx Lifetime) { debug!("visit_lifetime: st={:?}", self.st); SawLifetime(l.name.as_str()).hash(self.st); } fn visit_lifetime_def(&mut self, l: &'tcx LifetimeDef) { debug!("visit_lifetime_def: st={:?}", self.st); SawLifetimeDef(l.lifetime.name.as_str()).hash(self.st); } // We do recursively walk the bodies of functions/methods // (rather than omitting their bodies from the hash) since // monomorphization and cross-crate inlining generally implies // that a change to a crate body will require downstream // crates to be recompiled. fn visit_expr(&mut self, ex: &'tcx Expr) { debug!("visit_expr: st={:?}", self.st); SawExpr(saw_expr(&ex.node)).hash(self.st); visit::walk_expr(self, ex) } fn visit_stmt(&mut self, s: &'tcx Stmt) { debug!("visit_stmt: st={:?}", self.st); // We don't want to modify the hash for decls, because // they might be item decls (if they are local decls, // we'll hash that fact in visit_local); but we do want to // remember if this was a StmtExpr or StmtSemi (the later // had an explicit semi-colon; this affects the typing // rules). match s.node { StmtDecl(..) => (), StmtExpr(..) => SawStmt(SawStmtExpr).hash(self.st), StmtSemi(..) => SawStmt(SawStmtSemi).hash(self.st), } visit::walk_stmt(self, s) } fn visit_foreign_item(&mut self, i: &'tcx ForeignItem) { debug!("visit_foreign_item: st={:?}", self.st); // FIXME (#14132) ideally we would incorporate privacy (or // perhaps reachability) somewhere here, so foreign items // that do not leak into downstream crates would not be // part of the ABI. SawForeignItem.hash(self.st); visit::walk_foreign_item(self, i) } fn visit_item(&mut self, i: &'tcx Item) { debug!("visit_item: {:?} st={:?}", i, self.st); // FIXME (#14132) ideally would incorporate reachability // analysis somewhere here, so items that never leak into // downstream crates (e.g. via monomorphisation or // inlining) would not be part of the ABI. SawItem.hash(self.st); visit::walk_item(self, i) } fn visit_mod(&mut self, m: &'tcx Mod, _s: Span, n: NodeId) { debug!("visit_mod: st={:?}", self.st); SawMod.hash(self.st); visit::walk_mod(self, m, n) } fn visit_ty(&mut self, t: &'tcx Ty) { debug!("visit_ty: st={:?}", self.st); SawTy.hash(self.st); visit::walk_ty(self, t) } fn visit_generics(&mut self, g: &'tcx Generics) { debug!("visit_generics: st={:?}", self.st); SawGenerics.hash(self.st); visit::walk_generics(self, g) } fn visit_fn(&mut self, fk: FnKind<'tcx>, fd: &'tcx FnDecl, b: &'tcx Block, s: Span, n: NodeId) { debug!("visit_fn: st={:?}", self.st); SawFn.hash(self.st); visit::walk_fn(self, fk, fd, b, s, n) } fn visit_trait_item(&mut self, ti: &'tcx TraitItem) { debug!("visit_trait_item: st={:?}", self.st); SawTraitItem.hash(self.st); visit::walk_trait_item(self, ti) } fn visit_impl_item(&mut self, ii: &'tcx ImplItem) { debug!("visit_impl_item: st={:?}", self.st); SawImplItem.hash(self.st); visit::walk_impl_item(self, ii) } fn visit_struct_field(&mut self, s: &'tcx StructField) { debug!("visit_struct_field: st={:?}", self.st); SawStructField.hash(self.st); visit::walk_struct_field(self, s) } fn visit_path(&mut self, path: &'tcx Path, _: ast::NodeId) { debug!("visit_path: st={:?}", self.st); SawPath.hash(self.st); visit::walk_path(self, path) } fn visit_block(&mut self, b: &'tcx Block) { debug!("visit_block: st={:?}", self.st); SawBlock.hash(self.st); visit::walk_block(self, b) } fn visit_pat(&mut self, p: &'tcx Pat) { debug!("visit_pat: st={:?}", self.st); SawPat.hash(self.st); visit::walk_pat(self, p) } fn visit_local(&mut self, l: &'tcx Local) { debug!("visit_local: st={:?}", self.st); SawLocal.hash(self.st); visit::walk_local(self, l) } fn visit_arm(&mut self, a: &'tcx Arm) { debug!("visit_arm: st={:?}", self.st); SawArm.hash(self.st); visit::walk_arm(self, a) } fn visit_id(&mut self, id: NodeId) { debug!("visit_id: id={} st={:?}", id, self.st); self.hash_resolve(id); } } #[derive(Hash)] pub enum DefHash { SawDefId, SawLabel, SawPrimTy, SawSelfTy, SawErr, } impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> { fn hash_resolve(&mut self, id: ast::NodeId) { // Because whether or not a given id has an entry is dependent // solely on expr variant etc, we don't need to hash whether // or not an entry was present (we are already hashing what // variant it is above when we visit the HIR). if let Some(def) = self.tcx.def_map.borrow().get(&id) { debug!("hash_resolve: id={:?} def={:?} st={:?}", id, def, self.st); self.hash_partial_def(def); } if let Some(traits) = self.tcx.trait_map.get(&id) { debug!("hash_resolve: id={:?} traits={:?} st={:?}", id, traits, self.st); traits.len().hash(self.st); // The ordering of the candidates is not fixed. So we hash // the def-ids and then sort them and hash the collection. let mut candidates: Vec<_> = traits.iter() .map(|&TraitCandidate { def_id, import_id: _ }| { self.compute_def_id_hash(def_id) }) .collect(); candidates.sort(); candidates.hash(self.st); } } fn hash_def_id(&mut self, def_id: DefId) { self.compute_def_id_hash(def_id).hash(self.st); } fn hash_partial_def(&mut self, def: &PathResolution) { self.hash_def(def.base_def); def.depth.hash(self.st); } fn hash_def(&mut self, def: Def) { match def { // Crucial point: for all of these variants, the variant + // add'l data that is added is always the same if the // def-id is the same, so it suffices to hash the def-id Def::Fn(..) | Def::Mod(..) | Def::ForeignMod(..) | Def::Static(..) | Def::Variant(..) | Def::Enum(..) | Def::TyAlias(..) | Def::AssociatedTy(..) | Def::TyParam(..) | Def::Struct(..) | Def::Trait(..) | Def::Method(..) | Def::Const(..) | Def::AssociatedConst(..) | Def::Local(..) | Def::Upvar(..) => { DefHash::SawDefId.hash(self.st); self.hash_def_id(def.def_id()); } Def::Label(..) => { DefHash::SawLabel.hash(self.st); // we don't encode the `id` because it always refers to something // within this item, so if it changed, there would have to be other // changes too } Def::PrimTy(ref prim_ty) => { DefHash::SawPrimTy.hash(self.st); prim_ty.hash(self.st); } Def::SelfTy(..) => { DefHash::SawSelfTy.hash(self.st); // the meaning of Self is always the same within a // given context, so we don't need to hash the other // fields } Def::Err => { DefHash::SawErr.hash(self.st); } } } }
SawTy,
random_line_split
svh_visitor.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // FIXME (#14132): Even this SVH computation still has implementation // artifacts: namely, the order of item declaration will affect the // hash computation, but for many kinds of items the order of // declaration should be irrelevant to the ABI. pub use self::SawExprComponent::*; pub use self::SawStmtComponent::*; use self::SawAbiComponent::*; use syntax::ast::{self, Name, NodeId}; use syntax::parse::token; use syntax_pos::Span; use rustc::hir; use rustc::hir::*; use rustc::hir::def::{Def, PathResolution}; use rustc::hir::def_id::DefId; use rustc::hir::intravisit as visit; use rustc::hir::intravisit::{Visitor, FnKind}; use rustc::ty::TyCtxt; use std::hash::{Hash, SipHasher}; use super::def_path_hash::DefPathHashes; pub struct StrictVersionHashVisitor<'a, 'hash: 'a, 'tcx: 'hash> { pub tcx: TyCtxt<'hash, 'tcx, 'tcx>, pub st: &'a mut SipHasher, // collect a deterministic hash of def-ids that we have seen def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>, } impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> { pub fn new(st: &'a mut SipHasher, tcx: TyCtxt<'hash, 'tcx, 'tcx>, def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>) -> Self { StrictVersionHashVisitor { st: st, tcx: tcx, def_path_hashes: def_path_hashes } } fn compute_def_id_hash(&mut self, def_id: DefId) -> u64 { self.def_path_hashes.hash(def_id) } } // To off-load the bulk of the hash-computation on #[derive(Hash)], // we define a set of enums corresponding to the content that our // crate visitor will encounter as it traverses the ast. // // The important invariant is that all of the Saw*Component enums // do not carry any Spans, Names, or Idents. // // Not carrying any Names/Idents is the important fix for problem // noted on PR #13948: using the ident.name as the basis for a // hash leads to unstable SVH, because ident.name is just an index // into intern table (i.e. essentially a random address), not // computed from the name content. // // With the below enums, the SVH computation is not sensitive to // artifacts of how rustc was invoked nor of how the source code // was laid out. (Or at least it is *less* sensitive.) // This enum represents the different potential bits of code the // visitor could encounter that could affect the ABI for the crate, // and assigns each a distinct tag to feed into the hash computation. #[derive(Hash)] enum SawAbiComponent<'a> { // FIXME (#14132): should we include (some function of) // ident.ctxt as well? SawIdent(token::InternedString), SawStructDef(token::InternedString), SawLifetime(token::InternedString), SawLifetimeDef(token::InternedString), SawMod, SawForeignItem, SawItem, SawTy, SawGenerics, SawFn, SawTraitItem, SawImplItem, SawStructField, SawVariant, SawPath, SawBlock, SawPat, SawLocal, SawArm, SawExpr(SawExprComponent<'a>), SawStmt(SawStmtComponent), } /// SawExprComponent carries all of the information that we want /// to include in the hash that *won't* be covered by the /// subsequent recursive traversal of the expression's /// substructure by the visitor. /// /// We know every Expr_ variant is covered by a variant because /// `fn saw_expr` maps each to some case below. Ensuring that /// each variant carries an appropriate payload has to be verified /// by hand. /// /// (However, getting that *exactly* right is not so important /// because the SVH is just a developer convenience; there is no /// guarantee of collision-freedom, hash collisions are just /// (hopefully) unlikely.) #[derive(Hash)] pub enum SawExprComponent<'a> { SawExprLoop(Option<token::InternedString>), SawExprField(token::InternedString), SawExprTupField(usize), SawExprBreak(Option<token::InternedString>), SawExprAgain(Option<token::InternedString>), SawExprBox, SawExprVec, SawExprCall, SawExprMethodCall, SawExprTup, SawExprBinary(hir::BinOp_), SawExprUnary(hir::UnOp), SawExprLit(ast::LitKind), SawExprCast, SawExprType, SawExprIf, SawExprWhile, SawExprMatch, SawExprClosure, SawExprBlock, SawExprAssign, SawExprAssignOp(hir::BinOp_), SawExprIndex, SawExprPath(Option<usize>), SawExprAddrOf(hir::Mutability), SawExprRet, SawExprInlineAsm(&'a hir::InlineAsm), SawExprStruct, SawExprRepeat, } fn saw_expr<'a>(node: &'a Expr_) -> SawExprComponent<'a> { match *node { ExprBox(..) => SawExprBox, ExprVec(..) => SawExprVec, ExprCall(..) => SawExprCall, ExprMethodCall(..) => SawExprMethodCall, ExprTup(..) => SawExprTup, ExprBinary(op, _, _) => SawExprBinary(op.node), ExprUnary(op, _) => SawExprUnary(op), ExprLit(ref lit) => SawExprLit(lit.node.clone()), ExprCast(..) => SawExprCast, ExprType(..) => SawExprType, ExprIf(..) => SawExprIf, ExprWhile(..) => SawExprWhile, ExprLoop(_, id) => SawExprLoop(id.map(|id| id.node.as_str())), ExprMatch(..) => SawExprMatch, ExprClosure(..) => SawExprClosure, ExprBlock(..) => SawExprBlock, ExprAssign(..) => SawExprAssign, ExprAssignOp(op, _, _) => SawExprAssignOp(op.node), ExprField(_, name) => SawExprField(name.node.as_str()), ExprTupField(_, id) => SawExprTupField(id.node), ExprIndex(..) => SawExprIndex, ExprPath(ref qself, _) => SawExprPath(qself.as_ref().map(|q| q.position)), ExprAddrOf(m, _) => SawExprAddrOf(m), ExprBreak(id) => SawExprBreak(id.map(|id| id.node.as_str())), ExprAgain(id) => SawExprAgain(id.map(|id| id.node.as_str())), ExprRet(..) => SawExprRet, ExprInlineAsm(ref a,_,_) => SawExprInlineAsm(a), ExprStruct(..) => SawExprStruct, ExprRepeat(..) => SawExprRepeat, } } /// SawStmtComponent is analogous to SawExprComponent, but for statements. #[derive(Hash)] pub enum SawStmtComponent { SawStmtExpr, SawStmtSemi, } impl<'a, 'hash, 'tcx> Visitor<'tcx> for StrictVersionHashVisitor<'a, 'hash, 'tcx> { fn visit_nested_item(&mut self, _: ItemId) { // Each item is hashed independently; ignore nested items. } fn visit_variant_data(&mut self, s: &'tcx VariantData, name: Name, g: &'tcx Generics, _: NodeId, _: Span) { debug!("visit_variant_data: st={:?}", self.st); SawStructDef(name.as_str()).hash(self.st); visit::walk_generics(self, g); visit::walk_struct_def(self, s) } fn visit_variant(&mut self, v: &'tcx Variant, g: &'tcx Generics, item_id: NodeId) { debug!("visit_variant: st={:?}", self.st); SawVariant.hash(self.st); // walk_variant does not call walk_generics, so do it here. visit::walk_generics(self, g); visit::walk_variant(self, v, g, item_id) } // All of the remaining methods just record (in the hash // SipHasher) that the visitor saw that particular variant // (with its payload), and continue walking as the default // visitor would. // // Some of the implementations have some notes as to how one // might try to make their SVH computation less discerning // (e.g. by incorporating reachability analysis). But // currently all of their implementations are uniform and // uninteresting. // // (If you edit a method such that it deviates from the // pattern, please move that method up above this comment.) fn visit_name(&mut self, _: Span, name: Name) { debug!("visit_name: st={:?}", self.st); SawIdent(name.as_str()).hash(self.st); } fn visit_lifetime(&mut self, l: &'tcx Lifetime) { debug!("visit_lifetime: st={:?}", self.st); SawLifetime(l.name.as_str()).hash(self.st); } fn visit_lifetime_def(&mut self, l: &'tcx LifetimeDef) { debug!("visit_lifetime_def: st={:?}", self.st); SawLifetimeDef(l.lifetime.name.as_str()).hash(self.st); } // We do recursively walk the bodies of functions/methods // (rather than omitting their bodies from the hash) since // monomorphization and cross-crate inlining generally implies // that a change to a crate body will require downstream // crates to be recompiled. fn visit_expr(&mut self, ex: &'tcx Expr) { debug!("visit_expr: st={:?}", self.st); SawExpr(saw_expr(&ex.node)).hash(self.st); visit::walk_expr(self, ex) } fn visit_stmt(&mut self, s: &'tcx Stmt) { debug!("visit_stmt: st={:?}", self.st); // We don't want to modify the hash for decls, because // they might be item decls (if they are local decls, // we'll hash that fact in visit_local); but we do want to // remember if this was a StmtExpr or StmtSemi (the later // had an explicit semi-colon; this affects the typing // rules). match s.node { StmtDecl(..) => (), StmtExpr(..) => SawStmt(SawStmtExpr).hash(self.st), StmtSemi(..) => SawStmt(SawStmtSemi).hash(self.st), } visit::walk_stmt(self, s) } fn visit_foreign_item(&mut self, i: &'tcx ForeignItem) { debug!("visit_foreign_item: st={:?}", self.st); // FIXME (#14132) ideally we would incorporate privacy (or // perhaps reachability) somewhere here, so foreign items // that do not leak into downstream crates would not be // part of the ABI. SawForeignItem.hash(self.st); visit::walk_foreign_item(self, i) } fn visit_item(&mut self, i: &'tcx Item) { debug!("visit_item: {:?} st={:?}", i, self.st); // FIXME (#14132) ideally would incorporate reachability // analysis somewhere here, so items that never leak into // downstream crates (e.g. via monomorphisation or // inlining) would not be part of the ABI. SawItem.hash(self.st); visit::walk_item(self, i) } fn visit_mod(&mut self, m: &'tcx Mod, _s: Span, n: NodeId) { debug!("visit_mod: st={:?}", self.st); SawMod.hash(self.st); visit::walk_mod(self, m, n) } fn visit_ty(&mut self, t: &'tcx Ty) { debug!("visit_ty: st={:?}", self.st); SawTy.hash(self.st); visit::walk_ty(self, t) } fn visit_generics(&mut self, g: &'tcx Generics) { debug!("visit_generics: st={:?}", self.st); SawGenerics.hash(self.st); visit::walk_generics(self, g) } fn visit_fn(&mut self, fk: FnKind<'tcx>, fd: &'tcx FnDecl, b: &'tcx Block, s: Span, n: NodeId) { debug!("visit_fn: st={:?}", self.st); SawFn.hash(self.st); visit::walk_fn(self, fk, fd, b, s, n) } fn visit_trait_item(&mut self, ti: &'tcx TraitItem) { debug!("visit_trait_item: st={:?}", self.st); SawTraitItem.hash(self.st); visit::walk_trait_item(self, ti) } fn visit_impl_item(&mut self, ii: &'tcx ImplItem) { debug!("visit_impl_item: st={:?}", self.st); SawImplItem.hash(self.st); visit::walk_impl_item(self, ii) } fn visit_struct_field(&mut self, s: &'tcx StructField)
fn visit_path(&mut self, path: &'tcx Path, _: ast::NodeId) { debug!("visit_path: st={:?}", self.st); SawPath.hash(self.st); visit::walk_path(self, path) } fn visit_block(&mut self, b: &'tcx Block) { debug!("visit_block: st={:?}", self.st); SawBlock.hash(self.st); visit::walk_block(self, b) } fn visit_pat(&mut self, p: &'tcx Pat) { debug!("visit_pat: st={:?}", self.st); SawPat.hash(self.st); visit::walk_pat(self, p) } fn visit_local(&mut self, l: &'tcx Local) { debug!("visit_local: st={:?}", self.st); SawLocal.hash(self.st); visit::walk_local(self, l) } fn visit_arm(&mut self, a: &'tcx Arm) { debug!("visit_arm: st={:?}", self.st); SawArm.hash(self.st); visit::walk_arm(self, a) } fn visit_id(&mut self, id: NodeId) { debug!("visit_id: id={} st={:?}", id, self.st); self.hash_resolve(id); } } #[derive(Hash)] pub enum DefHash { SawDefId, SawLabel, SawPrimTy, SawSelfTy, SawErr, } impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> { fn hash_resolve(&mut self, id: ast::NodeId) { // Because whether or not a given id has an entry is dependent // solely on expr variant etc, we don't need to hash whether // or not an entry was present (we are already hashing what // variant it is above when we visit the HIR). if let Some(def) = self.tcx.def_map.borrow().get(&id) { debug!("hash_resolve: id={:?} def={:?} st={:?}", id, def, self.st); self.hash_partial_def(def); } if let Some(traits) = self.tcx.trait_map.get(&id) { debug!("hash_resolve: id={:?} traits={:?} st={:?}", id, traits, self.st); traits.len().hash(self.st); // The ordering of the candidates is not fixed. So we hash // the def-ids and then sort them and hash the collection. let mut candidates: Vec<_> = traits.iter() .map(|&TraitCandidate { def_id, import_id: _ }| { self.compute_def_id_hash(def_id) }) .collect(); candidates.sort(); candidates.hash(self.st); } } fn hash_def_id(&mut self, def_id: DefId) { self.compute_def_id_hash(def_id).hash(self.st); } fn hash_partial_def(&mut self, def: &PathResolution) { self.hash_def(def.base_def); def.depth.hash(self.st); } fn hash_def(&mut self, def: Def) { match def { // Crucial point: for all of these variants, the variant + // add'l data that is added is always the same if the // def-id is the same, so it suffices to hash the def-id Def::Fn(..) | Def::Mod(..) | Def::ForeignMod(..) | Def::Static(..) | Def::Variant(..) | Def::Enum(..) | Def::TyAlias(..) | Def::AssociatedTy(..) | Def::TyParam(..) | Def::Struct(..) | Def::Trait(..) | Def::Method(..) | Def::Const(..) | Def::AssociatedConst(..) | Def::Local(..) | Def::Upvar(..) => { DefHash::SawDefId.hash(self.st); self.hash_def_id(def.def_id()); } Def::Label(..) => { DefHash::SawLabel.hash(self.st); // we don't encode the `id` because it always refers to something // within this item, so if it changed, there would have to be other // changes too } Def::PrimTy(ref prim_ty) => { DefHash::SawPrimTy.hash(self.st); prim_ty.hash(self.st); } Def::SelfTy(..) => { DefHash::SawSelfTy.hash(self.st); // the meaning of Self is always the same within a // given context, so we don't need to hash the other // fields } Def::Err => { DefHash::SawErr.hash(self.st); } } } }
{ debug!("visit_struct_field: st={:?}", self.st); SawStructField.hash(self.st); visit::walk_struct_field(self, s) }
identifier_body
svh_visitor.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // FIXME (#14132): Even this SVH computation still has implementation // artifacts: namely, the order of item declaration will affect the // hash computation, but for many kinds of items the order of // declaration should be irrelevant to the ABI. pub use self::SawExprComponent::*; pub use self::SawStmtComponent::*; use self::SawAbiComponent::*; use syntax::ast::{self, Name, NodeId}; use syntax::parse::token; use syntax_pos::Span; use rustc::hir; use rustc::hir::*; use rustc::hir::def::{Def, PathResolution}; use rustc::hir::def_id::DefId; use rustc::hir::intravisit as visit; use rustc::hir::intravisit::{Visitor, FnKind}; use rustc::ty::TyCtxt; use std::hash::{Hash, SipHasher}; use super::def_path_hash::DefPathHashes; pub struct StrictVersionHashVisitor<'a, 'hash: 'a, 'tcx: 'hash> { pub tcx: TyCtxt<'hash, 'tcx, 'tcx>, pub st: &'a mut SipHasher, // collect a deterministic hash of def-ids that we have seen def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>, } impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> { pub fn new(st: &'a mut SipHasher, tcx: TyCtxt<'hash, 'tcx, 'tcx>, def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>) -> Self { StrictVersionHashVisitor { st: st, tcx: tcx, def_path_hashes: def_path_hashes } } fn compute_def_id_hash(&mut self, def_id: DefId) -> u64 { self.def_path_hashes.hash(def_id) } } // To off-load the bulk of the hash-computation on #[derive(Hash)], // we define a set of enums corresponding to the content that our // crate visitor will encounter as it traverses the ast. // // The important invariant is that all of the Saw*Component enums // do not carry any Spans, Names, or Idents. // // Not carrying any Names/Idents is the important fix for problem // noted on PR #13948: using the ident.name as the basis for a // hash leads to unstable SVH, because ident.name is just an index // into intern table (i.e. essentially a random address), not // computed from the name content. // // With the below enums, the SVH computation is not sensitive to // artifacts of how rustc was invoked nor of how the source code // was laid out. (Or at least it is *less* sensitive.) // This enum represents the different potential bits of code the // visitor could encounter that could affect the ABI for the crate, // and assigns each a distinct tag to feed into the hash computation. #[derive(Hash)] enum SawAbiComponent<'a> { // FIXME (#14132): should we include (some function of) // ident.ctxt as well? SawIdent(token::InternedString), SawStructDef(token::InternedString), SawLifetime(token::InternedString), SawLifetimeDef(token::InternedString), SawMod, SawForeignItem, SawItem, SawTy, SawGenerics, SawFn, SawTraitItem, SawImplItem, SawStructField, SawVariant, SawPath, SawBlock, SawPat, SawLocal, SawArm, SawExpr(SawExprComponent<'a>), SawStmt(SawStmtComponent), } /// SawExprComponent carries all of the information that we want /// to include in the hash that *won't* be covered by the /// subsequent recursive traversal of the expression's /// substructure by the visitor. /// /// We know every Expr_ variant is covered by a variant because /// `fn saw_expr` maps each to some case below. Ensuring that /// each variant carries an appropriate payload has to be verified /// by hand. /// /// (However, getting that *exactly* right is not so important /// because the SVH is just a developer convenience; there is no /// guarantee of collision-freedom, hash collisions are just /// (hopefully) unlikely.) #[derive(Hash)] pub enum SawExprComponent<'a> { SawExprLoop(Option<token::InternedString>), SawExprField(token::InternedString), SawExprTupField(usize), SawExprBreak(Option<token::InternedString>), SawExprAgain(Option<token::InternedString>), SawExprBox, SawExprVec, SawExprCall, SawExprMethodCall, SawExprTup, SawExprBinary(hir::BinOp_), SawExprUnary(hir::UnOp), SawExprLit(ast::LitKind), SawExprCast, SawExprType, SawExprIf, SawExprWhile, SawExprMatch, SawExprClosure, SawExprBlock, SawExprAssign, SawExprAssignOp(hir::BinOp_), SawExprIndex, SawExprPath(Option<usize>), SawExprAddrOf(hir::Mutability), SawExprRet, SawExprInlineAsm(&'a hir::InlineAsm), SawExprStruct, SawExprRepeat, } fn saw_expr<'a>(node: &'a Expr_) -> SawExprComponent<'a> { match *node { ExprBox(..) => SawExprBox, ExprVec(..) => SawExprVec, ExprCall(..) => SawExprCall, ExprMethodCall(..) => SawExprMethodCall, ExprTup(..) => SawExprTup, ExprBinary(op, _, _) => SawExprBinary(op.node), ExprUnary(op, _) => SawExprUnary(op), ExprLit(ref lit) => SawExprLit(lit.node.clone()), ExprCast(..) => SawExprCast, ExprType(..) => SawExprType, ExprIf(..) => SawExprIf, ExprWhile(..) => SawExprWhile, ExprLoop(_, id) => SawExprLoop(id.map(|id| id.node.as_str())), ExprMatch(..) => SawExprMatch, ExprClosure(..) => SawExprClosure, ExprBlock(..) => SawExprBlock, ExprAssign(..) => SawExprAssign, ExprAssignOp(op, _, _) => SawExprAssignOp(op.node), ExprField(_, name) => SawExprField(name.node.as_str()), ExprTupField(_, id) => SawExprTupField(id.node), ExprIndex(..) => SawExprIndex, ExprPath(ref qself, _) => SawExprPath(qself.as_ref().map(|q| q.position)), ExprAddrOf(m, _) => SawExprAddrOf(m), ExprBreak(id) => SawExprBreak(id.map(|id| id.node.as_str())), ExprAgain(id) => SawExprAgain(id.map(|id| id.node.as_str())), ExprRet(..) => SawExprRet, ExprInlineAsm(ref a,_,_) => SawExprInlineAsm(a), ExprStruct(..) => SawExprStruct, ExprRepeat(..) => SawExprRepeat, } } /// SawStmtComponent is analogous to SawExprComponent, but for statements. #[derive(Hash)] pub enum SawStmtComponent { SawStmtExpr, SawStmtSemi, } impl<'a, 'hash, 'tcx> Visitor<'tcx> for StrictVersionHashVisitor<'a, 'hash, 'tcx> { fn visit_nested_item(&mut self, _: ItemId) { // Each item is hashed independently; ignore nested items. } fn visit_variant_data(&mut self, s: &'tcx VariantData, name: Name, g: &'tcx Generics, _: NodeId, _: Span) { debug!("visit_variant_data: st={:?}", self.st); SawStructDef(name.as_str()).hash(self.st); visit::walk_generics(self, g); visit::walk_struct_def(self, s) } fn
(&mut self, v: &'tcx Variant, g: &'tcx Generics, item_id: NodeId) { debug!("visit_variant: st={:?}", self.st); SawVariant.hash(self.st); // walk_variant does not call walk_generics, so do it here. visit::walk_generics(self, g); visit::walk_variant(self, v, g, item_id) } // All of the remaining methods just record (in the hash // SipHasher) that the visitor saw that particular variant // (with its payload), and continue walking as the default // visitor would. // // Some of the implementations have some notes as to how one // might try to make their SVH computation less discerning // (e.g. by incorporating reachability analysis). But // currently all of their implementations are uniform and // uninteresting. // // (If you edit a method such that it deviates from the // pattern, please move that method up above this comment.) fn visit_name(&mut self, _: Span, name: Name) { debug!("visit_name: st={:?}", self.st); SawIdent(name.as_str()).hash(self.st); } fn visit_lifetime(&mut self, l: &'tcx Lifetime) { debug!("visit_lifetime: st={:?}", self.st); SawLifetime(l.name.as_str()).hash(self.st); } fn visit_lifetime_def(&mut self, l: &'tcx LifetimeDef) { debug!("visit_lifetime_def: st={:?}", self.st); SawLifetimeDef(l.lifetime.name.as_str()).hash(self.st); } // We do recursively walk the bodies of functions/methods // (rather than omitting their bodies from the hash) since // monomorphization and cross-crate inlining generally implies // that a change to a crate body will require downstream // crates to be recompiled. fn visit_expr(&mut self, ex: &'tcx Expr) { debug!("visit_expr: st={:?}", self.st); SawExpr(saw_expr(&ex.node)).hash(self.st); visit::walk_expr(self, ex) } fn visit_stmt(&mut self, s: &'tcx Stmt) { debug!("visit_stmt: st={:?}", self.st); // We don't want to modify the hash for decls, because // they might be item decls (if they are local decls, // we'll hash that fact in visit_local); but we do want to // remember if this was a StmtExpr or StmtSemi (the later // had an explicit semi-colon; this affects the typing // rules). match s.node { StmtDecl(..) => (), StmtExpr(..) => SawStmt(SawStmtExpr).hash(self.st), StmtSemi(..) => SawStmt(SawStmtSemi).hash(self.st), } visit::walk_stmt(self, s) } fn visit_foreign_item(&mut self, i: &'tcx ForeignItem) { debug!("visit_foreign_item: st={:?}", self.st); // FIXME (#14132) ideally we would incorporate privacy (or // perhaps reachability) somewhere here, so foreign items // that do not leak into downstream crates would not be // part of the ABI. SawForeignItem.hash(self.st); visit::walk_foreign_item(self, i) } fn visit_item(&mut self, i: &'tcx Item) { debug!("visit_item: {:?} st={:?}", i, self.st); // FIXME (#14132) ideally would incorporate reachability // analysis somewhere here, so items that never leak into // downstream crates (e.g. via monomorphisation or // inlining) would not be part of the ABI. SawItem.hash(self.st); visit::walk_item(self, i) } fn visit_mod(&mut self, m: &'tcx Mod, _s: Span, n: NodeId) { debug!("visit_mod: st={:?}", self.st); SawMod.hash(self.st); visit::walk_mod(self, m, n) } fn visit_ty(&mut self, t: &'tcx Ty) { debug!("visit_ty: st={:?}", self.st); SawTy.hash(self.st); visit::walk_ty(self, t) } fn visit_generics(&mut self, g: &'tcx Generics) { debug!("visit_generics: st={:?}", self.st); SawGenerics.hash(self.st); visit::walk_generics(self, g) } fn visit_fn(&mut self, fk: FnKind<'tcx>, fd: &'tcx FnDecl, b: &'tcx Block, s: Span, n: NodeId) { debug!("visit_fn: st={:?}", self.st); SawFn.hash(self.st); visit::walk_fn(self, fk, fd, b, s, n) } fn visit_trait_item(&mut self, ti: &'tcx TraitItem) { debug!("visit_trait_item: st={:?}", self.st); SawTraitItem.hash(self.st); visit::walk_trait_item(self, ti) } fn visit_impl_item(&mut self, ii: &'tcx ImplItem) { debug!("visit_impl_item: st={:?}", self.st); SawImplItem.hash(self.st); visit::walk_impl_item(self, ii) } fn visit_struct_field(&mut self, s: &'tcx StructField) { debug!("visit_struct_field: st={:?}", self.st); SawStructField.hash(self.st); visit::walk_struct_field(self, s) } fn visit_path(&mut self, path: &'tcx Path, _: ast::NodeId) { debug!("visit_path: st={:?}", self.st); SawPath.hash(self.st); visit::walk_path(self, path) } fn visit_block(&mut self, b: &'tcx Block) { debug!("visit_block: st={:?}", self.st); SawBlock.hash(self.st); visit::walk_block(self, b) } fn visit_pat(&mut self, p: &'tcx Pat) { debug!("visit_pat: st={:?}", self.st); SawPat.hash(self.st); visit::walk_pat(self, p) } fn visit_local(&mut self, l: &'tcx Local) { debug!("visit_local: st={:?}", self.st); SawLocal.hash(self.st); visit::walk_local(self, l) } fn visit_arm(&mut self, a: &'tcx Arm) { debug!("visit_arm: st={:?}", self.st); SawArm.hash(self.st); visit::walk_arm(self, a) } fn visit_id(&mut self, id: NodeId) { debug!("visit_id: id={} st={:?}", id, self.st); self.hash_resolve(id); } } #[derive(Hash)] pub enum DefHash { SawDefId, SawLabel, SawPrimTy, SawSelfTy, SawErr, } impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> { fn hash_resolve(&mut self, id: ast::NodeId) { // Because whether or not a given id has an entry is dependent // solely on expr variant etc, we don't need to hash whether // or not an entry was present (we are already hashing what // variant it is above when we visit the HIR). if let Some(def) = self.tcx.def_map.borrow().get(&id) { debug!("hash_resolve: id={:?} def={:?} st={:?}", id, def, self.st); self.hash_partial_def(def); } if let Some(traits) = self.tcx.trait_map.get(&id) { debug!("hash_resolve: id={:?} traits={:?} st={:?}", id, traits, self.st); traits.len().hash(self.st); // The ordering of the candidates is not fixed. So we hash // the def-ids and then sort them and hash the collection. let mut candidates: Vec<_> = traits.iter() .map(|&TraitCandidate { def_id, import_id: _ }| { self.compute_def_id_hash(def_id) }) .collect(); candidates.sort(); candidates.hash(self.st); } } fn hash_def_id(&mut self, def_id: DefId) { self.compute_def_id_hash(def_id).hash(self.st); } fn hash_partial_def(&mut self, def: &PathResolution) { self.hash_def(def.base_def); def.depth.hash(self.st); } fn hash_def(&mut self, def: Def) { match def { // Crucial point: for all of these variants, the variant + // add'l data that is added is always the same if the // def-id is the same, so it suffices to hash the def-id Def::Fn(..) | Def::Mod(..) | Def::ForeignMod(..) | Def::Static(..) | Def::Variant(..) | Def::Enum(..) | Def::TyAlias(..) | Def::AssociatedTy(..) | Def::TyParam(..) | Def::Struct(..) | Def::Trait(..) | Def::Method(..) | Def::Const(..) | Def::AssociatedConst(..) | Def::Local(..) | Def::Upvar(..) => { DefHash::SawDefId.hash(self.st); self.hash_def_id(def.def_id()); } Def::Label(..) => { DefHash::SawLabel.hash(self.st); // we don't encode the `id` because it always refers to something // within this item, so if it changed, there would have to be other // changes too } Def::PrimTy(ref prim_ty) => { DefHash::SawPrimTy.hash(self.st); prim_ty.hash(self.st); } Def::SelfTy(..) => { DefHash::SawSelfTy.hash(self.st); // the meaning of Self is always the same within a // given context, so we don't need to hash the other // fields } Def::Err => { DefHash::SawErr.hash(self.st); } } } }
visit_variant
identifier_name
config.rs
//! Configuration for the iroh CLI. use std::{ collections::HashMap, env, fmt, path::{Path, PathBuf}, str::FromStr, }; use anyhow::{anyhow, bail, Result}; use config::{Environment, File, Value}; use iroh_net::{ defaults::{default_eu_derp_region, default_na_derp_region}, derp::{DerpMap, DerpRegion}, }; use serde::{Deserialize, Serialize}; use tracing::debug; /// CONFIG_FILE_NAME is the name of the optional config file located in the iroh home directory pub const CONFIG_FILE_NAME: &str = "iroh.config.toml"; /// ENV_PREFIX should be used along side the config field name to set a config field using /// environment variables /// For example, `IROH_PATH=/path/to/config` would set the value of the `Config.path` field pub const ENV_PREFIX: &str = "IROH"; /// Paths to files or directory within the [`iroh_data_root`] used by Iroh. #[derive(Debug, Clone, Eq, PartialEq)] pub enum IrohPaths { /// Path to the node's private key for the [`iroh_net::PeerId`]. Keypair, /// Path to the node's [flat-file store](iroh::baomap::flat) for complete blobs. BaoFlatStoreComplete, /// Path to the node's [flat-file store](iroh::baomap::flat) for partial blobs. BaoFlatStorePartial, } impl From<&IrohPaths> for &'static str { fn from(value: &IrohPaths) -> Self { match value { IrohPaths::Keypair => "keypair", IrohPaths::BaoFlatStoreComplete => "blobs.v0", IrohPaths::BaoFlatStorePartial => "blobs-partial.v0", } } } impl FromStr for IrohPaths { type Err = anyhow::Error; fn from_str(s: &str) -> Result<Self> { Ok(match s { "keypair" => Self::Keypair, "blobs.v0" => Self::BaoFlatStoreComplete, "blobs-partial.v0" => Self::BaoFlatStorePartial, _ => bail!("unknown file or directory"), }) } } impl fmt::Display for IrohPaths { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let s: &str = self.into(); write!(f, "{s}") } } impl AsRef<Path> for IrohPaths { fn as_ref(&self) -> &Path { let s: &str = self.into(); Path::new(s) } } impl IrohPaths { /// Get the path for this [`IrohPath`] by joining the name to `IROH_DATA_DIR` environment variable. pub fn with_env(self) -> Result<PathBuf> { let mut root = iroh_data_root()?; if!root.is_absolute() { root = std::env::current_dir()?.join(root); } Ok(self.with_root(root)) } /// Get the path for this [`IrohPath`] by joining the name to a root directory. pub fn with_root(self, root: impl AsRef<Path>) -> PathBuf { let path = root.as_ref().join(self); path } } /// The configuration for the iroh cli. #[derive(PartialEq, Eq, Debug, Deserialize, Serialize, Clone)] #[serde(default)] pub struct Config { /// The regions for DERP to use. pub derp_regions: Vec<DerpRegion>, } impl Default for Config { fn
() -> Self { Self { // TODO(ramfox): this should probably just be a derp map derp_regions: [default_na_derp_region(), default_eu_derp_region()].into(), } } } impl Config { /// Make a config using a default, files, environment variables, and commandline flags. /// /// Later items in the *file_paths* slice will have a higher priority than earlier ones. /// /// Environment variables are expected to start with the *env_prefix*. Nested fields can be /// accessed using `.`, if your environment allows env vars with `.` /// /// Note: For the metrics configuration env vars, it is recommended to use the metrics /// specific prefix `IROH_METRICS` to set a field in the metrics config. You can use the /// above dot notation to set a metrics field, eg, `IROH_CONFIG_METRICS.SERVICE_NAME`, but /// only if your environment allows it pub fn load<S, V>( file_paths: &[Option<&Path>], env_prefix: &str, flag_overrides: HashMap<S, V>, ) -> Result<Config> where S: AsRef<str>, V: Into<Value>, { let mut builder = config::Config::builder(); // layer on config options from files for path in file_paths.iter().flatten() { if path.exists() { let p = path.to_str().ok_or_else(|| anyhow::anyhow!("empty path"))?; builder = builder.add_source(File::with_name(p)); } } // next, add any environment variables builder = builder.add_source( Environment::with_prefix(env_prefix) .separator("__") .try_parsing(true), ); // finally, override any values for (flag, val) in flag_overrides.into_iter() { builder = builder.set_override(flag, val)?; } let cfg = builder.build()?; debug!("make_config:\n{:#?}\n", cfg); let cfg = cfg.try_deserialize()?; Ok(cfg) } /// Constructs a `DerpMap` based on the current configuration. pub fn derp_map(&self) -> Option<DerpMap> { if self.derp_regions.is_empty() { return None; } let dm: DerpMap = self.derp_regions.iter().cloned().into(); Some(dm) } } /// Name of directory that wraps all iroh files in a given application directory const IROH_DIR: &str = "iroh"; /// Returns the path to the user's iroh config directory. /// /// If the `IROH_CONFIG_DIR` environment variable is set it will be used unconditionally. /// Otherwise the returned value depends on the operating system according to the following /// table. /// /// | Platform | Value | Example | /// | -------- | ------------------------------------- | -------------------------------- | /// | Linux | `$XDG_CONFIG_HOME` or `$HOME`/.config/iroh | /home/alice/.config/iroh | /// | macOS | `$HOME`/Library/Application Support/iroh | /Users/Alice/Library/Application Support/iroh | /// | Windows | `{FOLDERID_RoamingAppData}`/iroh | C:\Users\Alice\AppData\Roaming\iroh | pub fn iroh_config_root() -> Result<PathBuf> { if let Some(val) = env::var_os("IROH_CONFIG_DIR") { return Ok(PathBuf::from(val)); } let cfg = dirs_next::config_dir() .ok_or_else(|| anyhow!("operating environment provides no directory for configuration"))?; Ok(cfg.join(IROH_DIR)) } /// Path that leads to a file in the iroh config directory. pub fn iroh_config_path(file_name: impl AsRef<Path>) -> Result<PathBuf> { let path = iroh_config_root()?.join(file_name); Ok(path) } /// Returns the path to the user's iroh data directory. /// /// If the `IROH_DATA_DIR` environment variable is set it will be used unconditionally. /// Otherwise the returned value depends on the operating system according to the following /// table. /// /// | Platform | Value | Example | /// | -------- | --------------------------------------------- | ---------------------------------------- | /// | Linux | `$XDG_DATA_HOME`/iroh or `$HOME`/.local/share/iroh | /home/alice/.local/share/iroh | /// | macOS | `$HOME`/Library/Application Support/iroh | /Users/Alice/Library/Application Support/iroh | /// | Windows | `{FOLDERID_RoamingAppData}/iroh` | C:\Users\Alice\AppData\Roaming\iroh | pub fn iroh_data_root() -> Result<PathBuf> { if let Some(val) = env::var_os("IROH_DATA_DIR") { return Ok(PathBuf::from(val)); } let path = dirs_next::data_dir().ok_or_else(|| { anyhow!("operating environment provides no directory for application data") })?; Ok(path.join(IROH_DIR)) } /// Path that leads to a file in the iroh data directory. #[allow(dead_code)] pub fn iroh_data_path(file_name: &Path) -> Result<PathBuf> { let path = iroh_data_root()?.join(file_name); Ok(path) } /// Returns the path to the user's iroh cache directory. /// /// If the `IROH_CACHE_DIR` environment variable is set it will be used unconditionally. /// Otherwise the returned value depends on the operating system according to the following /// table. /// /// | Platform | Value | Example | /// | -------- | --------------------------------------------- | ---------------------------------------- | /// | Linux | `$XDG_CACHE_HOME`/iroh or `$HOME`/.cache/iroh | /home/.cache/iroh | /// | macOS | `$HOME`/Library/Caches/iroh | /Users/Alice/Library/Caches/iroh | /// | Windows | `{FOLDERID_LocalAppData}/iroh` | C:\Users\Alice\AppData\Roaming\iroh | #[allow(dead_code)] pub fn iroh_cache_root() -> Result<PathBuf> { if let Some(val) = env::var_os("IROH_CACHE_DIR") { return Ok(PathBuf::from(val)); } let path = dirs_next::cache_dir().ok_or_else(|| { anyhow!("operating environment provides no directory for application data") })?; Ok(path.join(IROH_DIR)) } /// Path that leads to a file in the iroh cache directory. #[allow(dead_code)] pub fn iroh_cache_path(file_name: &Path) -> Result<PathBuf> { let path = iroh_cache_root()?.join(file_name); Ok(path) } #[cfg(test)] mod tests { use super::*; #[test] fn test_default_settings() { let config = Config::load::<String, String>(&[][..], "__FOO", Default::default()).unwrap(); assert_eq!(config.derp_regions.len(), 2); } #[test] fn test_iroh_paths_parse_roundtrip() { let kinds = [ IrohPaths::BaoFlatStoreComplete, IrohPaths::BaoFlatStorePartial, IrohPaths::Keypair, ]; for iroh_path in &kinds { let root = PathBuf::from("/tmp"); let path = root.join(iroh_path); let fname = path.file_name().unwrap().to_str().unwrap(); let parsed = IrohPaths::from_str(fname).unwrap(); assert_eq!(*iroh_path, parsed); } } }
default
identifier_name
config.rs
//! Configuration for the iroh CLI. use std::{ collections::HashMap, env, fmt, path::{Path, PathBuf}, str::FromStr, }; use anyhow::{anyhow, bail, Result}; use config::{Environment, File, Value}; use iroh_net::{ defaults::{default_eu_derp_region, default_na_derp_region}, derp::{DerpMap, DerpRegion}, }; use serde::{Deserialize, Serialize}; use tracing::debug; /// CONFIG_FILE_NAME is the name of the optional config file located in the iroh home directory pub const CONFIG_FILE_NAME: &str = "iroh.config.toml"; /// ENV_PREFIX should be used along side the config field name to set a config field using /// environment variables /// For example, `IROH_PATH=/path/to/config` would set the value of the `Config.path` field pub const ENV_PREFIX: &str = "IROH"; /// Paths to files or directory within the [`iroh_data_root`] used by Iroh. #[derive(Debug, Clone, Eq, PartialEq)] pub enum IrohPaths { /// Path to the node's private key for the [`iroh_net::PeerId`]. Keypair, /// Path to the node's [flat-file store](iroh::baomap::flat) for complete blobs. BaoFlatStoreComplete, /// Path to the node's [flat-file store](iroh::baomap::flat) for partial blobs. BaoFlatStorePartial, } impl From<&IrohPaths> for &'static str { fn from(value: &IrohPaths) -> Self { match value { IrohPaths::Keypair => "keypair", IrohPaths::BaoFlatStoreComplete => "blobs.v0", IrohPaths::BaoFlatStorePartial => "blobs-partial.v0", } } } impl FromStr for IrohPaths { type Err = anyhow::Error; fn from_str(s: &str) -> Result<Self> { Ok(match s { "keypair" => Self::Keypair, "blobs.v0" => Self::BaoFlatStoreComplete, "blobs-partial.v0" => Self::BaoFlatStorePartial, _ => bail!("unknown file or directory"), }) } } impl fmt::Display for IrohPaths { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let s: &str = self.into(); write!(f, "{s}") } } impl AsRef<Path> for IrohPaths { fn as_ref(&self) -> &Path { let s: &str = self.into(); Path::new(s) } } impl IrohPaths { /// Get the path for this [`IrohPath`] by joining the name to `IROH_DATA_DIR` environment variable. pub fn with_env(self) -> Result<PathBuf>
/// Get the path for this [`IrohPath`] by joining the name to a root directory. pub fn with_root(self, root: impl AsRef<Path>) -> PathBuf { let path = root.as_ref().join(self); path } } /// The configuration for the iroh cli. #[derive(PartialEq, Eq, Debug, Deserialize, Serialize, Clone)] #[serde(default)] pub struct Config { /// The regions for DERP to use. pub derp_regions: Vec<DerpRegion>, } impl Default for Config { fn default() -> Self { Self { // TODO(ramfox): this should probably just be a derp map derp_regions: [default_na_derp_region(), default_eu_derp_region()].into(), } } } impl Config { /// Make a config using a default, files, environment variables, and commandline flags. /// /// Later items in the *file_paths* slice will have a higher priority than earlier ones. /// /// Environment variables are expected to start with the *env_prefix*. Nested fields can be /// accessed using `.`, if your environment allows env vars with `.` /// /// Note: For the metrics configuration env vars, it is recommended to use the metrics /// specific prefix `IROH_METRICS` to set a field in the metrics config. You can use the /// above dot notation to set a metrics field, eg, `IROH_CONFIG_METRICS.SERVICE_NAME`, but /// only if your environment allows it pub fn load<S, V>( file_paths: &[Option<&Path>], env_prefix: &str, flag_overrides: HashMap<S, V>, ) -> Result<Config> where S: AsRef<str>, V: Into<Value>, { let mut builder = config::Config::builder(); // layer on config options from files for path in file_paths.iter().flatten() { if path.exists() { let p = path.to_str().ok_or_else(|| anyhow::anyhow!("empty path"))?; builder = builder.add_source(File::with_name(p)); } } // next, add any environment variables builder = builder.add_source( Environment::with_prefix(env_prefix) .separator("__") .try_parsing(true), ); // finally, override any values for (flag, val) in flag_overrides.into_iter() { builder = builder.set_override(flag, val)?; } let cfg = builder.build()?; debug!("make_config:\n{:#?}\n", cfg); let cfg = cfg.try_deserialize()?; Ok(cfg) } /// Constructs a `DerpMap` based on the current configuration. pub fn derp_map(&self) -> Option<DerpMap> { if self.derp_regions.is_empty() { return None; } let dm: DerpMap = self.derp_regions.iter().cloned().into(); Some(dm) } } /// Name of directory that wraps all iroh files in a given application directory const IROH_DIR: &str = "iroh"; /// Returns the path to the user's iroh config directory. /// /// If the `IROH_CONFIG_DIR` environment variable is set it will be used unconditionally. /// Otherwise the returned value depends on the operating system according to the following /// table. /// /// | Platform | Value | Example | /// | -------- | ------------------------------------- | -------------------------------- | /// | Linux | `$XDG_CONFIG_HOME` or `$HOME`/.config/iroh | /home/alice/.config/iroh | /// | macOS | `$HOME`/Library/Application Support/iroh | /Users/Alice/Library/Application Support/iroh | /// | Windows | `{FOLDERID_RoamingAppData}`/iroh | C:\Users\Alice\AppData\Roaming\iroh | pub fn iroh_config_root() -> Result<PathBuf> { if let Some(val) = env::var_os("IROH_CONFIG_DIR") { return Ok(PathBuf::from(val)); } let cfg = dirs_next::config_dir() .ok_or_else(|| anyhow!("operating environment provides no directory for configuration"))?; Ok(cfg.join(IROH_DIR)) } /// Path that leads to a file in the iroh config directory. pub fn iroh_config_path(file_name: impl AsRef<Path>) -> Result<PathBuf> { let path = iroh_config_root()?.join(file_name); Ok(path) } /// Returns the path to the user's iroh data directory. /// /// If the `IROH_DATA_DIR` environment variable is set it will be used unconditionally. /// Otherwise the returned value depends on the operating system according to the following /// table. /// /// | Platform | Value | Example | /// | -------- | --------------------------------------------- | ---------------------------------------- | /// | Linux | `$XDG_DATA_HOME`/iroh or `$HOME`/.local/share/iroh | /home/alice/.local/share/iroh | /// | macOS | `$HOME`/Library/Application Support/iroh | /Users/Alice/Library/Application Support/iroh | /// | Windows | `{FOLDERID_RoamingAppData}/iroh` | C:\Users\Alice\AppData\Roaming\iroh | pub fn iroh_data_root() -> Result<PathBuf> { if let Some(val) = env::var_os("IROH_DATA_DIR") { return Ok(PathBuf::from(val)); } let path = dirs_next::data_dir().ok_or_else(|| { anyhow!("operating environment provides no directory for application data") })?; Ok(path.join(IROH_DIR)) } /// Path that leads to a file in the iroh data directory. #[allow(dead_code)] pub fn iroh_data_path(file_name: &Path) -> Result<PathBuf> { let path = iroh_data_root()?.join(file_name); Ok(path) } /// Returns the path to the user's iroh cache directory. /// /// If the `IROH_CACHE_DIR` environment variable is set it will be used unconditionally. /// Otherwise the returned value depends on the operating system according to the following /// table. /// /// | Platform | Value | Example | /// | -------- | --------------------------------------------- | ---------------------------------------- | /// | Linux | `$XDG_CACHE_HOME`/iroh or `$HOME`/.cache/iroh | /home/.cache/iroh | /// | macOS | `$HOME`/Library/Caches/iroh | /Users/Alice/Library/Caches/iroh | /// | Windows | `{FOLDERID_LocalAppData}/iroh` | C:\Users\Alice\AppData\Roaming\iroh | #[allow(dead_code)] pub fn iroh_cache_root() -> Result<PathBuf> { if let Some(val) = env::var_os("IROH_CACHE_DIR") { return Ok(PathBuf::from(val)); } let path = dirs_next::cache_dir().ok_or_else(|| { anyhow!("operating environment provides no directory for application data") })?; Ok(path.join(IROH_DIR)) } /// Path that leads to a file in the iroh cache directory. #[allow(dead_code)] pub fn iroh_cache_path(file_name: &Path) -> Result<PathBuf> { let path = iroh_cache_root()?.join(file_name); Ok(path) } #[cfg(test)] mod tests { use super::*; #[test] fn test_default_settings() { let config = Config::load::<String, String>(&[][..], "__FOO", Default::default()).unwrap(); assert_eq!(config.derp_regions.len(), 2); } #[test] fn test_iroh_paths_parse_roundtrip() { let kinds = [ IrohPaths::BaoFlatStoreComplete, IrohPaths::BaoFlatStorePartial, IrohPaths::Keypair, ]; for iroh_path in &kinds { let root = PathBuf::from("/tmp"); let path = root.join(iroh_path); let fname = path.file_name().unwrap().to_str().unwrap(); let parsed = IrohPaths::from_str(fname).unwrap(); assert_eq!(*iroh_path, parsed); } } }
{ let mut root = iroh_data_root()?; if !root.is_absolute() { root = std::env::current_dir()?.join(root); } Ok(self.with_root(root)) }
identifier_body
config.rs
//! Configuration for the iroh CLI. use std::{ collections::HashMap, env, fmt, path::{Path, PathBuf}, str::FromStr, }; use anyhow::{anyhow, bail, Result}; use config::{Environment, File, Value}; use iroh_net::{ defaults::{default_eu_derp_region, default_na_derp_region}, derp::{DerpMap, DerpRegion}, }; use serde::{Deserialize, Serialize}; use tracing::debug; /// CONFIG_FILE_NAME is the name of the optional config file located in the iroh home directory pub const CONFIG_FILE_NAME: &str = "iroh.config.toml"; /// ENV_PREFIX should be used along side the config field name to set a config field using /// environment variables /// For example, `IROH_PATH=/path/to/config` would set the value of the `Config.path` field pub const ENV_PREFIX: &str = "IROH"; /// Paths to files or directory within the [`iroh_data_root`] used by Iroh. #[derive(Debug, Clone, Eq, PartialEq)] pub enum IrohPaths { /// Path to the node's private key for the [`iroh_net::PeerId`]. Keypair, /// Path to the node's [flat-file store](iroh::baomap::flat) for complete blobs. BaoFlatStoreComplete, /// Path to the node's [flat-file store](iroh::baomap::flat) for partial blobs. BaoFlatStorePartial, } impl From<&IrohPaths> for &'static str { fn from(value: &IrohPaths) -> Self { match value {
} } impl FromStr for IrohPaths { type Err = anyhow::Error; fn from_str(s: &str) -> Result<Self> { Ok(match s { "keypair" => Self::Keypair, "blobs.v0" => Self::BaoFlatStoreComplete, "blobs-partial.v0" => Self::BaoFlatStorePartial, _ => bail!("unknown file or directory"), }) } } impl fmt::Display for IrohPaths { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let s: &str = self.into(); write!(f, "{s}") } } impl AsRef<Path> for IrohPaths { fn as_ref(&self) -> &Path { let s: &str = self.into(); Path::new(s) } } impl IrohPaths { /// Get the path for this [`IrohPath`] by joining the name to `IROH_DATA_DIR` environment variable. pub fn with_env(self) -> Result<PathBuf> { let mut root = iroh_data_root()?; if!root.is_absolute() { root = std::env::current_dir()?.join(root); } Ok(self.with_root(root)) } /// Get the path for this [`IrohPath`] by joining the name to a root directory. pub fn with_root(self, root: impl AsRef<Path>) -> PathBuf { let path = root.as_ref().join(self); path } } /// The configuration for the iroh cli. #[derive(PartialEq, Eq, Debug, Deserialize, Serialize, Clone)] #[serde(default)] pub struct Config { /// The regions for DERP to use. pub derp_regions: Vec<DerpRegion>, } impl Default for Config { fn default() -> Self { Self { // TODO(ramfox): this should probably just be a derp map derp_regions: [default_na_derp_region(), default_eu_derp_region()].into(), } } } impl Config { /// Make a config using a default, files, environment variables, and commandline flags. /// /// Later items in the *file_paths* slice will have a higher priority than earlier ones. /// /// Environment variables are expected to start with the *env_prefix*. Nested fields can be /// accessed using `.`, if your environment allows env vars with `.` /// /// Note: For the metrics configuration env vars, it is recommended to use the metrics /// specific prefix `IROH_METRICS` to set a field in the metrics config. You can use the /// above dot notation to set a metrics field, eg, `IROH_CONFIG_METRICS.SERVICE_NAME`, but /// only if your environment allows it pub fn load<S, V>( file_paths: &[Option<&Path>], env_prefix: &str, flag_overrides: HashMap<S, V>, ) -> Result<Config> where S: AsRef<str>, V: Into<Value>, { let mut builder = config::Config::builder(); // layer on config options from files for path in file_paths.iter().flatten() { if path.exists() { let p = path.to_str().ok_or_else(|| anyhow::anyhow!("empty path"))?; builder = builder.add_source(File::with_name(p)); } } // next, add any environment variables builder = builder.add_source( Environment::with_prefix(env_prefix) .separator("__") .try_parsing(true), ); // finally, override any values for (flag, val) in flag_overrides.into_iter() { builder = builder.set_override(flag, val)?; } let cfg = builder.build()?; debug!("make_config:\n{:#?}\n", cfg); let cfg = cfg.try_deserialize()?; Ok(cfg) } /// Constructs a `DerpMap` based on the current configuration. pub fn derp_map(&self) -> Option<DerpMap> { if self.derp_regions.is_empty() { return None; } let dm: DerpMap = self.derp_regions.iter().cloned().into(); Some(dm) } } /// Name of directory that wraps all iroh files in a given application directory const IROH_DIR: &str = "iroh"; /// Returns the path to the user's iroh config directory. /// /// If the `IROH_CONFIG_DIR` environment variable is set it will be used unconditionally. /// Otherwise the returned value depends on the operating system according to the following /// table. /// /// | Platform | Value | Example | /// | -------- | ------------------------------------- | -------------------------------- | /// | Linux | `$XDG_CONFIG_HOME` or `$HOME`/.config/iroh | /home/alice/.config/iroh | /// | macOS | `$HOME`/Library/Application Support/iroh | /Users/Alice/Library/Application Support/iroh | /// | Windows | `{FOLDERID_RoamingAppData}`/iroh | C:\Users\Alice\AppData\Roaming\iroh | pub fn iroh_config_root() -> Result<PathBuf> { if let Some(val) = env::var_os("IROH_CONFIG_DIR") { return Ok(PathBuf::from(val)); } let cfg = dirs_next::config_dir() .ok_or_else(|| anyhow!("operating environment provides no directory for configuration"))?; Ok(cfg.join(IROH_DIR)) } /// Path that leads to a file in the iroh config directory. pub fn iroh_config_path(file_name: impl AsRef<Path>) -> Result<PathBuf> { let path = iroh_config_root()?.join(file_name); Ok(path) } /// Returns the path to the user's iroh data directory. /// /// If the `IROH_DATA_DIR` environment variable is set it will be used unconditionally. /// Otherwise the returned value depends on the operating system according to the following /// table. /// /// | Platform | Value | Example | /// | -------- | --------------------------------------------- | ---------------------------------------- | /// | Linux | `$XDG_DATA_HOME`/iroh or `$HOME`/.local/share/iroh | /home/alice/.local/share/iroh | /// | macOS | `$HOME`/Library/Application Support/iroh | /Users/Alice/Library/Application Support/iroh | /// | Windows | `{FOLDERID_RoamingAppData}/iroh` | C:\Users\Alice\AppData\Roaming\iroh | pub fn iroh_data_root() -> Result<PathBuf> { if let Some(val) = env::var_os("IROH_DATA_DIR") { return Ok(PathBuf::from(val)); } let path = dirs_next::data_dir().ok_or_else(|| { anyhow!("operating environment provides no directory for application data") })?; Ok(path.join(IROH_DIR)) } /// Path that leads to a file in the iroh data directory. #[allow(dead_code)] pub fn iroh_data_path(file_name: &Path) -> Result<PathBuf> { let path = iroh_data_root()?.join(file_name); Ok(path) } /// Returns the path to the user's iroh cache directory. /// /// If the `IROH_CACHE_DIR` environment variable is set it will be used unconditionally. /// Otherwise the returned value depends on the operating system according to the following /// table. /// /// | Platform | Value | Example | /// | -------- | --------------------------------------------- | ---------------------------------------- | /// | Linux | `$XDG_CACHE_HOME`/iroh or `$HOME`/.cache/iroh | /home/.cache/iroh | /// | macOS | `$HOME`/Library/Caches/iroh | /Users/Alice/Library/Caches/iroh | /// | Windows | `{FOLDERID_LocalAppData}/iroh` | C:\Users\Alice\AppData\Roaming\iroh | #[allow(dead_code)] pub fn iroh_cache_root() -> Result<PathBuf> { if let Some(val) = env::var_os("IROH_CACHE_DIR") { return Ok(PathBuf::from(val)); } let path = dirs_next::cache_dir().ok_or_else(|| { anyhow!("operating environment provides no directory for application data") })?; Ok(path.join(IROH_DIR)) } /// Path that leads to a file in the iroh cache directory. #[allow(dead_code)] pub fn iroh_cache_path(file_name: &Path) -> Result<PathBuf> { let path = iroh_cache_root()?.join(file_name); Ok(path) } #[cfg(test)] mod tests { use super::*; #[test] fn test_default_settings() { let config = Config::load::<String, String>(&[][..], "__FOO", Default::default()).unwrap(); assert_eq!(config.derp_regions.len(), 2); } #[test] fn test_iroh_paths_parse_roundtrip() { let kinds = [ IrohPaths::BaoFlatStoreComplete, IrohPaths::BaoFlatStorePartial, IrohPaths::Keypair, ]; for iroh_path in &kinds { let root = PathBuf::from("/tmp"); let path = root.join(iroh_path); let fname = path.file_name().unwrap().to_str().unwrap(); let parsed = IrohPaths::from_str(fname).unwrap(); assert_eq!(*iroh_path, parsed); } } }
IrohPaths::Keypair => "keypair", IrohPaths::BaoFlatStoreComplete => "blobs.v0", IrohPaths::BaoFlatStorePartial => "blobs-partial.v0", }
random_line_split
utils.rs
// Copyright (c) 2011 Jan Kokemüller // Copyright (c) 2020 Sebastian Dröge <[email protected]> // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use dasp_frame::Frame; /// Convert linear energy to logarithmic loudness. pub fn energy_to_loudness(energy: f64) -> f64 { // The non-test version is faster and more accurate but gives // slightly different results than the C version and fails the // tests because of that. #[cfg(test)] { 10.0 * (f64::ln(energy) / std::f64::consts::LN_10) - 0.691 } #[cfg(not(test))] { 10.0 * f64::log10(energy) - 0.691 } } /// Trait for abstracting over interleaved and planar samples. pub trait Samples<'a, S: Sample + 'a>: Sized { /// Call the given closure for each sample of the given channel. // FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable // and because of that we wouldn't get nice optimizations fn foreach_sample(&self, channel: usize, func: impl FnMut(&'a S)); /// Call the given closure for each sample of the given channel. // FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable // and because of that we wouldn't get nice optimizations fn foreach_sample_zipped<U>( &self, channel: usize, iter: impl Iterator<Item = U>, func: impl FnMut(&'a S, U), ); fn foreach_frame<F: Frame<Sample = S>>(&self, func: impl FnMut(F)); /// Number of frames. fn frames(&self) -> usize; /// Number of channels. fn channels(&self) -> usize; /// Split into two at the given sample. fn split_at(self, sample: usize) -> (Self, Self); } /// Struct representing interleaved samples. pub struct Interleaved<'a, S> { /// Interleaved sample data. data: &'a [S], /// Number of channels. channels: usize, } impl<'a, S> Interleaved<'a, S> { /// Create a new wrapper around the interleaved channels and do a sanity check. pub fn new(data: &'a [S], channels: usize) -> Result<Self, crate::Error> { if channels == 0 { return Err(crate::Error::NoMem); } if data.len() % channels!= 0 { return Err(crate::Error::NoMem); } Ok(Interleaved { data, channels }) } } impl<'a, S: Sample> Samples<'a, S> for Interleaved<'a, S> { #[inline] fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a S)) { assert!(channel < self.channels); for v in self.data.chunks_exact(self.channels) { func(&v[channel]) } } #[inline] fn fo
>( &self, channel: usize, iter: impl Iterator<Item = U>, mut func: impl FnMut(&'a S, U), ) { assert!(channel < self.channels); for (v, u) in Iterator::zip(self.data.chunks_exact(self.channels), iter) { func(&v[channel], u) } } #[inline] fn foreach_frame<F: Frame<Sample = S>>(&self, mut func: impl FnMut(F)) { assert_eq!(F::CHANNELS, self.channels); for f in self.data.chunks_exact(self.channels) { func(F::from_samples(&mut f.iter().copied()).unwrap()); } } #[inline] fn frames(&self) -> usize { self.data.len() / self.channels } #[inline] fn channels(&self) -> usize { self.channels } #[inline] fn split_at(self, sample: usize) -> (Self, Self) { assert!(sample * self.channels <= self.data.len()); let (fst, snd) = self.data.split_at(sample * self.channels); ( Interleaved { data: fst, channels: self.channels, }, Interleaved { data: snd, channels: self.channels, }, ) } } /// Struct representing interleaved samples. pub struct Planar<'a, S> { data: &'a [&'a [S]], start: usize, end: usize, } impl<'a, S> Planar<'a, S> { /// Create a new wrapper around the planar channels and do a sanity check. pub fn new(data: &'a [&'a [S]]) -> Result<Self, crate::Error> { if data.is_empty() { return Err(crate::Error::NoMem); } if data.iter().any(|d| data[0].len()!= d.len()) { return Err(crate::Error::NoMem); } Ok(Planar { data, start: 0, end: data[0].len(), }) } } impl<'a, S: Sample> Samples<'a, S> for Planar<'a, S> { #[inline] fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a S)) { assert!(channel < self.data.len()); for v in &self.data[channel][self.start..self.end] { func(v) } } #[inline] fn foreach_sample_zipped<U>( &self, channel: usize, iter: impl Iterator<Item = U>, mut func: impl FnMut(&'a S, U), ) { assert!(channel < self.data.len()); for (v, u) in Iterator::zip(self.data[channel][self.start..self.end].iter(), iter) { func(v, u) } } #[inline] fn foreach_frame<F: Frame<Sample = S>>(&self, mut func: impl FnMut(F)) { let channels = self.data.len(); assert_eq!(F::CHANNELS, channels); for f in self.start..self.end { func(F::from_fn(|c| self.data[c][f])); } } #[inline] fn frames(&self) -> usize { self.end - self.start } #[inline] fn channels(&self) -> usize { self.data.len() } #[inline] fn split_at(self, sample: usize) -> (Self, Self) { assert!(self.start + sample <= self.end); ( Planar { data: self.data, start: self.start, end: self.start + sample, }, Planar { data: self.data, start: self.start + sample, end: self.end, }, ) } } pub trait Sample: dasp_sample::Sample + dasp_sample::Duplex<f32> + dasp_sample::Duplex<f64> { const MAX_AMPLITUDE: f64; fn as_f64_raw(self) -> f64; } impl Sample for f32 { const MAX_AMPLITUDE: f64 = 1.0; #[inline(always)] fn as_f64_raw(self) -> f64 { self as f64 } } impl Sample for f64 { const MAX_AMPLITUDE: f64 = 1.0; #[inline(always)] fn as_f64_raw(self) -> f64 { self } } impl Sample for i16 { const MAX_AMPLITUDE: f64 = -(Self::MIN as f64); #[inline(always)] fn as_f64_raw(self) -> f64 { self as f64 } } impl Sample for i32 { const MAX_AMPLITUDE: f64 = -(Self::MIN as f64); #[inline(always)] fn as_f64_raw(self) -> f64 { self as f64 } } /// An extension-trait to accumulate samples into a frame pub trait FrameAccumulator: Frame { fn scale_add(&mut self, other: &Self, coeff: f32); fn retain_max_samples(&mut self, other: &Self); } impl<F: Frame, S> FrameAccumulator for F where S: SampleAccumulator + std::fmt::Debug, F: IndexMut<Target = S>, { #[inline(always)] fn scale_add(&mut self, other: &Self, coeff: f32) { for i in 0..Self::CHANNELS { self.index_mut(i).scale_add(*other.index(i), coeff); } } fn retain_max_samples(&mut self, other: &Self) { for i in 0..Self::CHANNELS { let this = self.index_mut(i); let other = other.index(i); if *other > *this { *this = *other; } } } } // Required since std::ops::IndexMut seem to not be implemented for arrays // making FrameAcc hard to implement for auto-vectorization // IndexMut seems to be coming to stdlib, when https://github.com/rust-lang/rust/pull/74989 // implemented, this trait can be removed pub trait IndexMut { type Target; fn index_mut(&mut self, i: usize) -> &mut Self::Target; fn index(&self, i: usize) -> &Self::Target; } macro_rules! index_mut_impl { ( $channels:expr ) => { impl<T: SampleAccumulator> IndexMut for [T; $channels] { type Target = T; #[inline(always)] fn index_mut(&mut self, i: usize) -> &mut Self::Target { &mut self[i] } #[inline(always)] fn index(&self, i: usize) -> &Self::Target { &self[i] } } }; } index_mut_impl!(1); index_mut_impl!(2); index_mut_impl!(4); index_mut_impl!(6); index_mut_impl!(8); pub trait SampleAccumulator: Sample { fn scale_add(&mut self, other: Self, coeff: f32); } impl SampleAccumulator for f32 { #[inline(always)] fn scale_add(&mut self, other: Self, coeff: f32) { #[cfg(feature = "precision-true-peak")] { *self = other.mul_add(coeff, *self); } #[cfg(not(feature = "precision-true-peak"))] { *self += other * coeff } } } #[cfg(test)] pub mod tests { use dasp_sample::{FromSample, Sample}; #[derive(Clone, Debug)] pub struct Signal<S: FromSample<f32>> { pub data: Vec<S>, pub channels: u32, pub rate: u32, } impl<S: Sample + FromSample<f32> + quickcheck::Arbitrary> quickcheck::Arbitrary for Signal<S> { fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self { use rand::Rng; let channels = g.gen_range(1, 16); let rate = g.gen_range(16_000, 224_000); let num_frames = (rate as f64 * g.gen_range(0.0, 5.0)) as usize; let max = g.gen_range(0.0, 1.0); let freqs = [ g.gen_range(20.0, 16_000.0), g.gen_range(20.0, 16_000.0), g.gen_range(20.0, 16_000.0), g.gen_range(20.0, 16_000.0), ]; let volumes = [ g.gen_range(0.0, 1.0), g.gen_range(0.0, 1.0), g.gen_range(0.0, 1.0), g.gen_range(0.0, 1.0), ]; let volume_scale = 1.0 / volumes.iter().sum::<f32>(); let mut accumulators = [0.0; 4]; let steps = [ 2.0 * std::f32::consts::PI * freqs[0] / rate as f32, 2.0 * std::f32::consts::PI * freqs[1] / rate as f32, 2.0 * std::f32::consts::PI * freqs[2] / rate as f32, 2.0 * std::f32::consts::PI * freqs[3] / rate as f32, ]; let mut data = vec![S::from_sample(0.0f32); num_frames * channels as usize]; for frame in data.chunks_exact_mut(channels as usize) { let val = max * (f32::sin(accumulators[0]) * volumes[0] + f32::sin(accumulators[1]) * volumes[1] + f32::sin(accumulators[2]) * volumes[2] + f32::sin(accumulators[3]) * volumes[3]) / volume_scale; for sample in frame.iter_mut() { *sample = S::from_sample(val); } for (acc, step) in accumulators.iter_mut().zip(steps.iter()) { *acc += step; } } Signal { data, channels, rate, } } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { SignalShrinker::boxed(self.clone()) } } struct SignalShrinker<A: FromSample<f32>> { seed: Signal<A>, /// How many elements to take size: usize, /// Whether we tried with one channel already tried_one_channel: bool, } impl<A: FromSample<f32> + quickcheck::Arbitrary> SignalShrinker<A> { fn boxed(seed: Signal<A>) -> Box<dyn Iterator<Item = Signal<A>>> { let channels = seed.channels; Box::new(SignalShrinker { seed, size: 0, tried_one_channel: channels == 1, }) } } impl<A> Iterator for SignalShrinker<A> where A: FromSample<f32> + quickcheck::Arbitrary, { type Item = Signal<A>; fn next(&mut self) -> Option<Signal<A>> { if self.size < self.seed.data.len() { // Generate a smaller vector by removing size elements let xs1 = if self.tried_one_channel { Vec::from(&self.seed.data[..self.size]) } else { self.seed .data .iter() .cloned() .step_by(self.seed.channels as usize) .take(self.size) .collect() }; if self.size == 0 { self.size = if self.tried_one_channel { self.seed.channels as usize } else { 1 }; } else { self.size *= 2; } Some(Signal { data: xs1, channels: if self.tried_one_channel { self.seed.channels } else { 1 }, rate: self.seed.rate, }) } else if!self.tried_one_channel { self.tried_one_channel = true; self.size = 0; self.next() } else { None } } } }
reach_sample_zipped<U
identifier_name
utils.rs
// Copyright (c) 2011 Jan Kokemüller // Copyright (c) 2020 Sebastian Dröge <[email protected]> // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use dasp_frame::Frame; /// Convert linear energy to logarithmic loudness. pub fn energy_to_loudness(energy: f64) -> f64 { // The non-test version is faster and more accurate but gives // slightly different results than the C version and fails the // tests because of that. #[cfg(test)] { 10.0 * (f64::ln(energy) / std::f64::consts::LN_10) - 0.691 } #[cfg(not(test))] { 10.0 * f64::log10(energy) - 0.691 } } /// Trait for abstracting over interleaved and planar samples. pub trait Samples<'a, S: Sample + 'a>: Sized { /// Call the given closure for each sample of the given channel. // FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable // and because of that we wouldn't get nice optimizations fn foreach_sample(&self, channel: usize, func: impl FnMut(&'a S)); /// Call the given closure for each sample of the given channel. // FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable // and because of that we wouldn't get nice optimizations fn foreach_sample_zipped<U>( &self, channel: usize, iter: impl Iterator<Item = U>, func: impl FnMut(&'a S, U), ); fn foreach_frame<F: Frame<Sample = S>>(&self, func: impl FnMut(F)); /// Number of frames. fn frames(&self) -> usize; /// Number of channels. fn channels(&self) -> usize; /// Split into two at the given sample. fn split_at(self, sample: usize) -> (Self, Self); } /// Struct representing interleaved samples. pub struct Interleaved<'a, S> { /// Interleaved sample data. data: &'a [S], /// Number of channels. channels: usize, } impl<'a, S> Interleaved<'a, S> { /// Create a new wrapper around the interleaved channels and do a sanity check. pub fn new(data: &'a [S], channels: usize) -> Result<Self, crate::Error> { if channels == 0 { return Err(crate::Error::NoMem); } if data.len() % channels!= 0 { return Err(crate::Error::NoMem); } Ok(Interleaved { data, channels }) } } impl<'a, S: Sample> Samples<'a, S> for Interleaved<'a, S> { #[inline] fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a S)) { assert!(channel < self.channels); for v in self.data.chunks_exact(self.channels) { func(&v[channel]) } } #[inline] fn foreach_sample_zipped<U>( &self, channel: usize, iter: impl Iterator<Item = U>, mut func: impl FnMut(&'a S, U), ) { assert!(channel < self.channels); for (v, u) in Iterator::zip(self.data.chunks_exact(self.channels), iter) { func(&v[channel], u) } } #[inline] fn foreach_frame<F: Frame<Sample = S>>(&self, mut func: impl FnMut(F)) { assert_eq!(F::CHANNELS, self.channels); for f in self.data.chunks_exact(self.channels) { func(F::from_samples(&mut f.iter().copied()).unwrap()); } } #[inline] fn frames(&self) -> usize { self.data.len() / self.channels } #[inline] fn channels(&self) -> usize { self.channels } #[inline] fn split_at(self, sample: usize) -> (Self, Self) { assert!(sample * self.channels <= self.data.len()); let (fst, snd) = self.data.split_at(sample * self.channels); ( Interleaved { data: fst, channels: self.channels, }, Interleaved { data: snd, channels: self.channels, }, ) } } /// Struct representing interleaved samples. pub struct Planar<'a, S> { data: &'a [&'a [S]], start: usize, end: usize, } impl<'a, S> Planar<'a, S> { /// Create a new wrapper around the planar channels and do a sanity check. pub fn new(data: &'a [&'a [S]]) -> Result<Self, crate::Error> { if data.is_empty() { return Err(crate::Error::NoMem); } if data.iter().any(|d| data[0].len()!= d.len()) { return Err(crate::Error::NoMem); } Ok(Planar { data, start: 0, end: data[0].len(), }) } } impl<'a, S: Sample> Samples<'a, S> for Planar<'a, S> { #[inline] fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a S)) { assert!(channel < self.data.len()); for v in &self.data[channel][self.start..self.end] { func(v) } } #[inline] fn foreach_sample_zipped<U>( &self, channel: usize, iter: impl Iterator<Item = U>, mut func: impl FnMut(&'a S, U), ) { assert!(channel < self.data.len()); for (v, u) in Iterator::zip(self.data[channel][self.start..self.end].iter(), iter) { func(v, u) } } #[inline] fn foreach_frame<F: Frame<Sample = S>>(&self, mut func: impl FnMut(F)) { let channels = self.data.len(); assert_eq!(F::CHANNELS, channels); for f in self.start..self.end { func(F::from_fn(|c| self.data[c][f])); } } #[inline] fn frames(&self) -> usize { self.end - self.start } #[inline] fn channels(&self) -> usize { self.data.len() } #[inline] fn split_at(self, sample: usize) -> (Self, Self) { assert!(self.start + sample <= self.end); ( Planar { data: self.data, start: self.start, end: self.start + sample, }, Planar { data: self.data, start: self.start + sample, end: self.end, }, ) } } pub trait Sample: dasp_sample::Sample + dasp_sample::Duplex<f32> + dasp_sample::Duplex<f64> { const MAX_AMPLITUDE: f64; fn as_f64_raw(self) -> f64; } impl Sample for f32 { const MAX_AMPLITUDE: f64 = 1.0; #[inline(always)] fn as_f64_raw(self) -> f64 { self as f64 } } impl Sample for f64 { const MAX_AMPLITUDE: f64 = 1.0; #[inline(always)] fn as_f64_raw(self) -> f64 { self
const MAX_AMPLITUDE: f64 = -(Self::MIN as f64); #[inline(always)] fn as_f64_raw(self) -> f64 { self as f64 } } impl Sample for i32 { const MAX_AMPLITUDE: f64 = -(Self::MIN as f64); #[inline(always)] fn as_f64_raw(self) -> f64 { self as f64 } } /// An extension-trait to accumulate samples into a frame pub trait FrameAccumulator: Frame { fn scale_add(&mut self, other: &Self, coeff: f32); fn retain_max_samples(&mut self, other: &Self); } impl<F: Frame, S> FrameAccumulator for F where S: SampleAccumulator + std::fmt::Debug, F: IndexMut<Target = S>, { #[inline(always)] fn scale_add(&mut self, other: &Self, coeff: f32) { for i in 0..Self::CHANNELS { self.index_mut(i).scale_add(*other.index(i), coeff); } } fn retain_max_samples(&mut self, other: &Self) { for i in 0..Self::CHANNELS { let this = self.index_mut(i); let other = other.index(i); if *other > *this { *this = *other; } } } } // Required since std::ops::IndexMut seem to not be implemented for arrays // making FrameAcc hard to implement for auto-vectorization // IndexMut seems to be coming to stdlib, when https://github.com/rust-lang/rust/pull/74989 // implemented, this trait can be removed pub trait IndexMut { type Target; fn index_mut(&mut self, i: usize) -> &mut Self::Target; fn index(&self, i: usize) -> &Self::Target; } macro_rules! index_mut_impl { ( $channels:expr ) => { impl<T: SampleAccumulator> IndexMut for [T; $channels] { type Target = T; #[inline(always)] fn index_mut(&mut self, i: usize) -> &mut Self::Target { &mut self[i] } #[inline(always)] fn index(&self, i: usize) -> &Self::Target { &self[i] } } }; } index_mut_impl!(1); index_mut_impl!(2); index_mut_impl!(4); index_mut_impl!(6); index_mut_impl!(8); pub trait SampleAccumulator: Sample { fn scale_add(&mut self, other: Self, coeff: f32); } impl SampleAccumulator for f32 { #[inline(always)] fn scale_add(&mut self, other: Self, coeff: f32) { #[cfg(feature = "precision-true-peak")] { *self = other.mul_add(coeff, *self); } #[cfg(not(feature = "precision-true-peak"))] { *self += other * coeff } } } #[cfg(test)] pub mod tests { use dasp_sample::{FromSample, Sample}; #[derive(Clone, Debug)] pub struct Signal<S: FromSample<f32>> { pub data: Vec<S>, pub channels: u32, pub rate: u32, } impl<S: Sample + FromSample<f32> + quickcheck::Arbitrary> quickcheck::Arbitrary for Signal<S> { fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self { use rand::Rng; let channels = g.gen_range(1, 16); let rate = g.gen_range(16_000, 224_000); let num_frames = (rate as f64 * g.gen_range(0.0, 5.0)) as usize; let max = g.gen_range(0.0, 1.0); let freqs = [ g.gen_range(20.0, 16_000.0), g.gen_range(20.0, 16_000.0), g.gen_range(20.0, 16_000.0), g.gen_range(20.0, 16_000.0), ]; let volumes = [ g.gen_range(0.0, 1.0), g.gen_range(0.0, 1.0), g.gen_range(0.0, 1.0), g.gen_range(0.0, 1.0), ]; let volume_scale = 1.0 / volumes.iter().sum::<f32>(); let mut accumulators = [0.0; 4]; let steps = [ 2.0 * std::f32::consts::PI * freqs[0] / rate as f32, 2.0 * std::f32::consts::PI * freqs[1] / rate as f32, 2.0 * std::f32::consts::PI * freqs[2] / rate as f32, 2.0 * std::f32::consts::PI * freqs[3] / rate as f32, ]; let mut data = vec![S::from_sample(0.0f32); num_frames * channels as usize]; for frame in data.chunks_exact_mut(channels as usize) { let val = max * (f32::sin(accumulators[0]) * volumes[0] + f32::sin(accumulators[1]) * volumes[1] + f32::sin(accumulators[2]) * volumes[2] + f32::sin(accumulators[3]) * volumes[3]) / volume_scale; for sample in frame.iter_mut() { *sample = S::from_sample(val); } for (acc, step) in accumulators.iter_mut().zip(steps.iter()) { *acc += step; } } Signal { data, channels, rate, } } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { SignalShrinker::boxed(self.clone()) } } struct SignalShrinker<A: FromSample<f32>> { seed: Signal<A>, /// How many elements to take size: usize, /// Whether we tried with one channel already tried_one_channel: bool, } impl<A: FromSample<f32> + quickcheck::Arbitrary> SignalShrinker<A> { fn boxed(seed: Signal<A>) -> Box<dyn Iterator<Item = Signal<A>>> { let channels = seed.channels; Box::new(SignalShrinker { seed, size: 0, tried_one_channel: channels == 1, }) } } impl<A> Iterator for SignalShrinker<A> where A: FromSample<f32> + quickcheck::Arbitrary, { type Item = Signal<A>; fn next(&mut self) -> Option<Signal<A>> { if self.size < self.seed.data.len() { // Generate a smaller vector by removing size elements let xs1 = if self.tried_one_channel { Vec::from(&self.seed.data[..self.size]) } else { self.seed .data .iter() .cloned() .step_by(self.seed.channels as usize) .take(self.size) .collect() }; if self.size == 0 { self.size = if self.tried_one_channel { self.seed.channels as usize } else { 1 }; } else { self.size *= 2; } Some(Signal { data: xs1, channels: if self.tried_one_channel { self.seed.channels } else { 1 }, rate: self.seed.rate, }) } else if!self.tried_one_channel { self.tried_one_channel = true; self.size = 0; self.next() } else { None } } } }
} } impl Sample for i16 {
random_line_split
utils.rs
// Copyright (c) 2011 Jan Kokemüller // Copyright (c) 2020 Sebastian Dröge <[email protected]> // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use dasp_frame::Frame; /// Convert linear energy to logarithmic loudness. pub fn energy_to_loudness(energy: f64) -> f64 { // The non-test version is faster and more accurate but gives // slightly different results than the C version and fails the // tests because of that. #[cfg(test)] { 10.0 * (f64::ln(energy) / std::f64::consts::LN_10) - 0.691 } #[cfg(not(test))] { 10.0 * f64::log10(energy) - 0.691 } } /// Trait for abstracting over interleaved and planar samples. pub trait Samples<'a, S: Sample + 'a>: Sized { /// Call the given closure for each sample of the given channel. // FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable // and because of that we wouldn't get nice optimizations fn foreach_sample(&self, channel: usize, func: impl FnMut(&'a S)); /// Call the given closure for each sample of the given channel. // FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable // and because of that we wouldn't get nice optimizations fn foreach_sample_zipped<U>( &self, channel: usize, iter: impl Iterator<Item = U>, func: impl FnMut(&'a S, U), ); fn foreach_frame<F: Frame<Sample = S>>(&self, func: impl FnMut(F)); /// Number of frames. fn frames(&self) -> usize; /// Number of channels. fn channels(&self) -> usize; /// Split into two at the given sample. fn split_at(self, sample: usize) -> (Self, Self); } /// Struct representing interleaved samples. pub struct Interleaved<'a, S> { /// Interleaved sample data. data: &'a [S], /// Number of channels. channels: usize, } impl<'a, S> Interleaved<'a, S> { /// Create a new wrapper around the interleaved channels and do a sanity check. pub fn new(data: &'a [S], channels: usize) -> Result<Self, crate::Error> { if channels == 0 { return Err(crate::Error::NoMem); } if data.len() % channels!= 0 { return Err(crate::Error::NoMem); } Ok(Interleaved { data, channels }) } } impl<'a, S: Sample> Samples<'a, S> for Interleaved<'a, S> { #[inline] fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a S)) { assert!(channel < self.channels); for v in self.data.chunks_exact(self.channels) { func(&v[channel]) } } #[inline] fn foreach_sample_zipped<U>( &self, channel: usize, iter: impl Iterator<Item = U>, mut func: impl FnMut(&'a S, U), ) { assert!(channel < self.channels); for (v, u) in Iterator::zip(self.data.chunks_exact(self.channels), iter) { func(&v[channel], u) } } #[inline] fn foreach_frame<F: Frame<Sample = S>>(&self, mut func: impl FnMut(F)) { assert_eq!(F::CHANNELS, self.channels); for f in self.data.chunks_exact(self.channels) { func(F::from_samples(&mut f.iter().copied()).unwrap()); } } #[inline] fn frames(&self) -> usize { self.data.len() / self.channels } #[inline] fn channels(&self) -> usize { self.channels } #[inline] fn split_at(self, sample: usize) -> (Self, Self) { assert!(sample * self.channels <= self.data.len()); let (fst, snd) = self.data.split_at(sample * self.channels); ( Interleaved { data: fst, channels: self.channels, }, Interleaved { data: snd, channels: self.channels, }, ) } } /// Struct representing interleaved samples. pub struct Planar<'a, S> { data: &'a [&'a [S]], start: usize, end: usize, } impl<'a, S> Planar<'a, S> { /// Create a new wrapper around the planar channels and do a sanity check. pub fn new(data: &'a [&'a [S]]) -> Result<Self, crate::Error> { if data.is_empty() { return Err(crate::Error::NoMem); } if data.iter().any(|d| data[0].len()!= d.len()) { return Err(crate::Error::NoMem); } Ok(Planar { data, start: 0, end: data[0].len(), }) } } impl<'a, S: Sample> Samples<'a, S> for Planar<'a, S> { #[inline] fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a S)) { assert!(channel < self.data.len()); for v in &self.data[channel][self.start..self.end] { func(v) } } #[inline] fn foreach_sample_zipped<U>( &self, channel: usize, iter: impl Iterator<Item = U>, mut func: impl FnMut(&'a S, U), ) { assert!(channel < self.data.len()); for (v, u) in Iterator::zip(self.data[channel][self.start..self.end].iter(), iter) { func(v, u) } } #[inline] fn foreach_frame<F: Frame<Sample = S>>(&self, mut func: impl FnMut(F)) { let channels = self.data.len(); assert_eq!(F::CHANNELS, channels); for f in self.start..self.end { func(F::from_fn(|c| self.data[c][f])); } } #[inline] fn frames(&self) -> usize { self.end - self.start } #[inline] fn channels(&self) -> usize { self.data.len() } #[inline] fn split_at(self, sample: usize) -> (Self, Self) { assert!(self.start + sample <= self.end); ( Planar { data: self.data, start: self.start, end: self.start + sample, }, Planar { data: self.data, start: self.start + sample, end: self.end, }, ) } } pub trait Sample: dasp_sample::Sample + dasp_sample::Duplex<f32> + dasp_sample::Duplex<f64> { const MAX_AMPLITUDE: f64; fn as_f64_raw(self) -> f64; } impl Sample for f32 { const MAX_AMPLITUDE: f64 = 1.0; #[inline(always)] fn as_f64_raw(self) -> f64 { self as f64 } } impl Sample for f64 { const MAX_AMPLITUDE: f64 = 1.0; #[inline(always)] fn as_f64_raw(self) -> f64 {
impl Sample for i16 { const MAX_AMPLITUDE: f64 = -(Self::MIN as f64); #[inline(always)] fn as_f64_raw(self) -> f64 { self as f64 } } impl Sample for i32 { const MAX_AMPLITUDE: f64 = -(Self::MIN as f64); #[inline(always)] fn as_f64_raw(self) -> f64 { self as f64 } } /// An extension-trait to accumulate samples into a frame pub trait FrameAccumulator: Frame { fn scale_add(&mut self, other: &Self, coeff: f32); fn retain_max_samples(&mut self, other: &Self); } impl<F: Frame, S> FrameAccumulator for F where S: SampleAccumulator + std::fmt::Debug, F: IndexMut<Target = S>, { #[inline(always)] fn scale_add(&mut self, other: &Self, coeff: f32) { for i in 0..Self::CHANNELS { self.index_mut(i).scale_add(*other.index(i), coeff); } } fn retain_max_samples(&mut self, other: &Self) { for i in 0..Self::CHANNELS { let this = self.index_mut(i); let other = other.index(i); if *other > *this { *this = *other; } } } } // Required since std::ops::IndexMut seem to not be implemented for arrays // making FrameAcc hard to implement for auto-vectorization // IndexMut seems to be coming to stdlib, when https://github.com/rust-lang/rust/pull/74989 // implemented, this trait can be removed pub trait IndexMut { type Target; fn index_mut(&mut self, i: usize) -> &mut Self::Target; fn index(&self, i: usize) -> &Self::Target; } macro_rules! index_mut_impl { ( $channels:expr ) => { impl<T: SampleAccumulator> IndexMut for [T; $channels] { type Target = T; #[inline(always)] fn index_mut(&mut self, i: usize) -> &mut Self::Target { &mut self[i] } #[inline(always)] fn index(&self, i: usize) -> &Self::Target { &self[i] } } }; } index_mut_impl!(1); index_mut_impl!(2); index_mut_impl!(4); index_mut_impl!(6); index_mut_impl!(8); pub trait SampleAccumulator: Sample { fn scale_add(&mut self, other: Self, coeff: f32); } impl SampleAccumulator for f32 { #[inline(always)] fn scale_add(&mut self, other: Self, coeff: f32) { #[cfg(feature = "precision-true-peak")] { *self = other.mul_add(coeff, *self); } #[cfg(not(feature = "precision-true-peak"))] { *self += other * coeff } } } #[cfg(test)] pub mod tests { use dasp_sample::{FromSample, Sample}; #[derive(Clone, Debug)] pub struct Signal<S: FromSample<f32>> { pub data: Vec<S>, pub channels: u32, pub rate: u32, } impl<S: Sample + FromSample<f32> + quickcheck::Arbitrary> quickcheck::Arbitrary for Signal<S> { fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self { use rand::Rng; let channels = g.gen_range(1, 16); let rate = g.gen_range(16_000, 224_000); let num_frames = (rate as f64 * g.gen_range(0.0, 5.0)) as usize; let max = g.gen_range(0.0, 1.0); let freqs = [ g.gen_range(20.0, 16_000.0), g.gen_range(20.0, 16_000.0), g.gen_range(20.0, 16_000.0), g.gen_range(20.0, 16_000.0), ]; let volumes = [ g.gen_range(0.0, 1.0), g.gen_range(0.0, 1.0), g.gen_range(0.0, 1.0), g.gen_range(0.0, 1.0), ]; let volume_scale = 1.0 / volumes.iter().sum::<f32>(); let mut accumulators = [0.0; 4]; let steps = [ 2.0 * std::f32::consts::PI * freqs[0] / rate as f32, 2.0 * std::f32::consts::PI * freqs[1] / rate as f32, 2.0 * std::f32::consts::PI * freqs[2] / rate as f32, 2.0 * std::f32::consts::PI * freqs[3] / rate as f32, ]; let mut data = vec![S::from_sample(0.0f32); num_frames * channels as usize]; for frame in data.chunks_exact_mut(channels as usize) { let val = max * (f32::sin(accumulators[0]) * volumes[0] + f32::sin(accumulators[1]) * volumes[1] + f32::sin(accumulators[2]) * volumes[2] + f32::sin(accumulators[3]) * volumes[3]) / volume_scale; for sample in frame.iter_mut() { *sample = S::from_sample(val); } for (acc, step) in accumulators.iter_mut().zip(steps.iter()) { *acc += step; } } Signal { data, channels, rate, } } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { SignalShrinker::boxed(self.clone()) } } struct SignalShrinker<A: FromSample<f32>> { seed: Signal<A>, /// How many elements to take size: usize, /// Whether we tried with one channel already tried_one_channel: bool, } impl<A: FromSample<f32> + quickcheck::Arbitrary> SignalShrinker<A> { fn boxed(seed: Signal<A>) -> Box<dyn Iterator<Item = Signal<A>>> { let channels = seed.channels; Box::new(SignalShrinker { seed, size: 0, tried_one_channel: channels == 1, }) } } impl<A> Iterator for SignalShrinker<A> where A: FromSample<f32> + quickcheck::Arbitrary, { type Item = Signal<A>; fn next(&mut self) -> Option<Signal<A>> { if self.size < self.seed.data.len() { // Generate a smaller vector by removing size elements let xs1 = if self.tried_one_channel { Vec::from(&self.seed.data[..self.size]) } else { self.seed .data .iter() .cloned() .step_by(self.seed.channels as usize) .take(self.size) .collect() }; if self.size == 0 { self.size = if self.tried_one_channel { self.seed.channels as usize } else { 1 }; } else { self.size *= 2; } Some(Signal { data: xs1, channels: if self.tried_one_channel { self.seed.channels } else { 1 }, rate: self.seed.rate, }) } else if!self.tried_one_channel { self.tried_one_channel = true; self.size = 0; self.next() } else { None } } } }
self } }
identifier_body
mul_fixed.rs
Advice, Column, ConstraintSystem, Constraints, Error, Expression, Fixed, Selector, VirtualCells, }, poly::Rotation, }; use lazy_static::lazy_static; use pasta_curves::{arithmetic::CurveAffine, pallas}; pub mod base_field_elem; pub mod full_width; pub mod short; lazy_static! { static ref TWO_SCALAR: pallas::Scalar = pallas::Scalar::from(2); // H = 2^3 (3-bit window) static ref H_SCALAR: pallas::Scalar = pallas::Scalar::from(H as u64); static ref H_BASE: pallas::Base = pallas::Base::from(H as u64); } #[derive(Clone, Debug, Eq, PartialEq)] pub struct Config<FixedPoints: super::FixedPoints<pallas::Affine>> { running_sum_config: RunningSumConfig<pallas::Base, FIXED_BASE_WINDOW_SIZE>, // The fixed Lagrange interpolation coefficients for `x_p`. lagrange_coeffs: [Column<Fixed>; H], // The fixed `z` for each window such that `y + z = u^2`. fixed_z: Column<Fixed>, // Decomposition of an `n-1`-bit scalar into `k`-bit windows: // a = a_0 + 2^k(a_1) + 2^{2k}(a_2) +... + 2^{(n-1)k}(a_{n-1}) window: Column<Advice>, // y-coordinate of accumulator (only used in the final row). u: Column<Advice>, // Configuration for `add` add_config: add::Config, // Configuration for `add_incomplete` add_incomplete_config: add_incomplete::Config, _marker: PhantomData<FixedPoints>, } impl<FixedPoints: super::FixedPoints<pallas::Affine>> Config<FixedPoints> { #[allow(clippy::too_many_arguments)] pub(super) fn configure( meta: &mut ConstraintSystem<pallas::Base>, lagrange_coeffs: [Column<Fixed>; H], window: Column<Advice>, u: Column<Advice>, add_config: add::Config, add_incomplete_config: add_incomplete::Config, ) -> Self { meta.enable_equality(window); meta.enable_equality(u); let q_running_sum = meta.selector(); let running_sum_config = RunningSumConfig::configure(meta, q_running_sum, window); let config = Self { running_sum_config, lagrange_coeffs, fixed_z: meta.fixed_column(), window, u, add_config, add_incomplete_config, _marker: PhantomData, }; // Check relationships between `add_config` and `add_incomplete_config`. assert_eq!( config.add_config.x_p, config.add_incomplete_config.x_p, "add and add_incomplete are used internally in mul_fixed." ); assert_eq!( config.add_config.y_p, config.add_incomplete_config.y_p, "add and add_incomplete are used internally in mul_fixed." ); for advice in [config.window, config.u].iter() { assert_ne!( *advice, config.add_config.x_qr, "Do not overlap with output columns of add." ); assert_ne!( *advice, config.add_config.y_qr, "Do not overlap with output columns of add." );
config.running_sum_coords_gate(meta); config } /// Check that each window in the running sum decomposition uses the correct y_p /// and interpolated x_p. /// /// This gate is used both in the mul_fixed::base_field_elem and mul_fixed::short /// helpers, which decompose the scalar using a running sum. /// /// This gate is not used in the mul_fixed::full_width helper, since the full-width /// scalar is witnessed directly as three-bit windows instead of being decomposed /// via a running sum. fn running_sum_coords_gate(&self, meta: &mut ConstraintSystem<pallas::Base>) { meta.create_gate("Running sum coordinates check", |meta| { let q_mul_fixed_running_sum = meta.query_selector(self.running_sum_config.q_range_check()); let z_cur = meta.query_advice(self.window, Rotation::cur()); let z_next = meta.query_advice(self.window, Rotation::next()); // z_{i+1} = (z_i - a_i) / 2^3 // => a_i = z_i - z_{i+1} * 2^3 let word = z_cur - z_next * pallas::Base::from(H as u64); Constraints::with_selector(q_mul_fixed_running_sum, self.coords_check(meta, word)) }); } /// [Specification](https://p.z.cash/halo2-0.1:ecc-fixed-mul-coordinates). #[allow(clippy::op_ref)] fn coords_check( &self, meta: &mut VirtualCells<'_, pallas::Base>, window: Expression<pallas::Base>, ) -> Vec<(&'static str, Expression<pallas::Base>)> { let y_p = meta.query_advice(self.add_config.y_p, Rotation::cur()); let x_p = meta.query_advice(self.add_config.x_p, Rotation::cur()); let z = meta.query_fixed(self.fixed_z); let u = meta.query_advice(self.u, Rotation::cur()); let window_pow: Vec<Expression<pallas::Base>> = (0..H) .map(|pow| { (0..pow).fold(Expression::Constant(pallas::Base::one()), |acc, _| { acc * window.clone() }) }) .collect(); let interpolated_x = window_pow.iter().zip(self.lagrange_coeffs.iter()).fold( Expression::Constant(pallas::Base::zero()), |acc, (window_pow, coeff)| acc + (window_pow.clone() * meta.query_fixed(*coeff)), ); // Check interpolation of x-coordinate let x_check = interpolated_x - x_p.clone(); // Check that `y + z = u^2`, where `z` is fixed and `u`, `y` are witnessed let y_check = u.square() - y_p.clone() - z; // Check that (x, y) is on the curve let on_curve = y_p.square() - x_p.clone().square() * x_p - Expression::Constant(pallas::Affine::b()); vec![ ("check x", x_check), ("check y", y_check), ("on-curve", on_curve), ] } #[allow(clippy::type_complexity)] fn assign_region_inner<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, scalar: &ScalarFixed, base: &F, coords_check_toggle: Selector, ) -> Result<(NonIdentityEccPoint, NonIdentityEccPoint), Error> { // Assign fixed columns for given fixed base self.assign_fixed_constants::<F, NUM_WINDOWS>(region, offset, base, coords_check_toggle)?; // Initialize accumulator let acc = self.initialize_accumulator::<F, NUM_WINDOWS>(region, offset, base, scalar)?; // Process all windows excluding least and most significant windows let acc = self.add_incomplete::<F, NUM_WINDOWS>(region, offset, acc, base, scalar)?; // Process most significant window let mul_b = self.process_msb::<F, NUM_WINDOWS>(region, offset, base, scalar)?; Ok((acc, mul_b)) } /// [Specification](https://p.z.cash/halo2-0.1:ecc-fixed-mul-load-base). fn assign_fixed_constants<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, base: &F, coords_check_toggle: Selector, ) -> Result<(), Error> { let mut constants = None; let build_constants = || { let lagrange_coeffs = base.lagrange_coeffs(); assert_eq!(lagrange_coeffs.len(), NUM_WINDOWS); let z = base.z(); assert_eq!(z.len(), NUM_WINDOWS); (lagrange_coeffs, z) }; // Assign fixed columns for given fixed base for window in 0..NUM_WINDOWS { coords_check_toggle.enable(region, window + offset)?; // Assign x-coordinate Lagrange interpolation coefficients for k in 0..H { region.assign_fixed( || { format!( "Lagrange interpolation coeff for window: {:?}, k: {:?}", window, k ) }, self.lagrange_coeffs[k], window + offset, || { if constants.as_ref().is_none() { constants = Some(build_constants()); } let lagrange_coeffs = &constants.as_ref().unwrap().0; Value::known(lagrange_coeffs[window][k]) }, )?; } // Assign z-values for each window region.assign_fixed( || format!("z-value for window: {:?}", window), self.fixed_z, window + offset, || { let z = &constants.as_ref().unwrap().1; Value::known(pallas::Base::from(z[window])) }, )?; } Ok(()) } /// Assigns the values used to process a window. fn process_window<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, w: usize, k_usize: Value<usize>, window_scalar: Value<pallas::Scalar>, base: &F, ) -> Result<NonIdentityEccPoint, Error> { let base_value = base.generator(); let base_u = base.u(); assert_eq!(base_u.len(), NUM_WINDOWS); // Compute [window_scalar]B let mul_b = { let mul_b = window_scalar.map(|scalar| base_value * scalar); let mul_b = mul_b.map(|mul_b| mul_b.to_affine().coordinates().unwrap()); let x = mul_b.map(|mul_b| { let x = *mul_b.x(); assert!(x!= pallas::Base::zero()); x.into() }); let x = region.assign_advice( || format!("mul_b_x, window {}", w), self.add_config.x_p, offset + w, || x, )?; let y = mul_b.map(|mul_b| { let y = *mul_b.y(); assert!(y!= pallas::Base::zero()); y.into() }); let y = region.assign_advice( || format!("mul_b_y, window {}", w), self.add_config.y_p, offset + w, || y, )?; NonIdentityEccPoint::from_coordinates_unchecked(x, y) }; // Assign u = (y_p + z_w).sqrt() let u_val = k_usize.map(|k| pallas::Base::from_repr(base_u[w][k]).unwrap()); region.assign_advice(|| "u", self.u, offset + w, || u_val)?; Ok(mul_b) } fn initialize_accumulator<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, base: &F, scalar: &ScalarFixed, ) -> Result<NonIdentityEccPoint, Error> { // Recall that the message at each window `w` is represented as // `m_w = [(k_w + 2) ⋅ 8^w]B`. // When `w = 0`, we have `m_0 = [(k_0 + 2)]B`. let w = 0; let k0 = scalar.windows_field()[0]; let k0_usize = scalar.windows_usize()[0]; self.process_lower_bits::<_, NUM_WINDOWS>(region, offset, w, k0, k0_usize, base) } fn add_incomplete<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, mut acc: NonIdentityEccPoint, base: &F, scalar: &ScalarFixed, ) -> Result<NonIdentityEccPoint, Error> { let scalar_windows_field = scalar.windows_field(); let scalar_windows_usize = scalar.windows_usize(); assert_eq!(scalar_windows_field.len(), NUM_WINDOWS); for (w, (k, k_usize)) in scalar_windows_field .into_iter() .zip(scalar_windows_usize) .enumerate() // The MSB is processed separately. .take(NUM_WINDOWS - 1) // Skip k_0 (already processed). .skip(1) { // Compute [(k_w + 2) ⋅ 8^w]B // // This assigns the coordinates of the returned point into the input cells for // the incomplete addition gate, which will then copy them into themselves. let mul_b = self.process_lower_bits::<_, NUM_WINDOWS>(region, offset, w, k, k_usize, base)?; // Add to the accumulator. // // After the first loop, the accumulator will already be in the input cells // for the incomplete addition gate, and will be copied into themselves. acc = self .add_incomplete_config .assign_region(&mul_b, &acc, offset + w, region)?; } Ok(acc) } /// Assigns the values used to process a window that does not contain the MSB. fn process_lower_bits<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, w: usize, k: Value<pallas::Scalar>, k_usize: Value<usize>, base: &F, ) -> Result<NonIdentityEccPoint, Error> { // `scalar = [(k_w + 2) ⋅ 8^w] let scalar = k.map(|k| (k + *TWO_SCALAR) * (*H_SCALAR).pow(&[w as u64, 0, 0, 0])); self.process_window::<_, NUM_WINDOWS>(region, offset, w, k_usize, scalar, base) } /// Assigns the values used to process the window containing the MSB. fn process_msb<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, base: &F, scalar: &ScalarFixed, ) -> Result<NonIdentityEccPoint, Error> { let k_usize = scalar.windows_usize()[NUM_WINDOWS - 1]; // offset_acc = \sum_{j = 0}^{NUM_WINDOWS - 2} 2^{FIXED_BASE_WINDOW_SIZE*j + 1} let offset_acc = (0..(NUM_WINDOWS - 1)).fold(pallas::Scalar::zero(), |acc, w| { acc + (*TWO_SCALAR).pow(&[FIXED_BASE_WINDOW_SIZE as u64 * w as u64 + 1, 0, 0, 0]) }); // `scalar = [k * 8^(NUM_WINDOWS - 1) - offset_acc]`. let scalar = scalar.windows_field()[scalar.windows_field().len() - 1] .map(|k| k * (*H_SCALAR).pow(&[(NUM_WINDOWS - 1) as u64, 0, 0, 0]) - offset_acc); self.process_window::<_, NUM_WINDOWS>( region, offset, NUM_WINDOWS - 1, k_usize, scalar, base, ) } } enum ScalarFixed { FullWidth(EccScalarFixed), Short(EccScalarFixedShort), BaseFieldElem(EccBaseFieldElemFixed), } impl From<&EccScalarFixed> for ScalarFixed { fn from(scalar_fixed: &EccScalarFixed) -> Self { Self::FullWidth(scalar_fixed.clone()) } } impl From<&EccScalarFixedShort> for ScalarFixed { fn from(scalar_fixed: &EccScalarFixedShort) -> Self { Self::Short(scalar_fixed.clone()) } } impl From<&EccBaseFieldElemFixed> for ScalarFixed { fn from(base_field_elem: &EccBaseFieldElemFixed) -> Self { Self::BaseFieldElem(base_field_elem.clone()) } } impl ScalarFixed { /// The scalar decomposition was done in the base field. For computation /// outside the circuit, we now convert them back into the scalar field. /// /// This function does not require that the base field fits inside the scalar field, /// because the window size fits into either field. fn windows_field(&self) -> Vec<Value<pallas::Scalar>> { let running_sum_to_windows = |zs: Vec<AssignedCell<pallas::Base, pallas::Base>>| { (0..(zs.len() - 1)) .map(|idx| { let z_cur = zs[idx].value(); let z_next = zs[idx + 1].value(); let word = z_cur - z_next * Value::known(*H_BASE); // This assumes that the endianness of the encodings of pallas::Base // and pallas::Scalar are the same. They happen to be, but we need to // be careful if this is generalised. word.map(|word| pallas::Scalar::from_repr(word.to_repr()).unwrap()) }) .collect::<Vec<_>>() }; match self { Self::BaseFieldElem(scalar) => running_sum_to_windows(scalar.running_sum.to_vec()), Self::Short(scalar) => running_sum_to_windows( scalar .running_sum .as_ref() .expect("EccScalarFixedShort has been constrained") .to_vec(), ), Self::FullWidth(scalar) => scalar .windows .as_ref() .expect("EccScalarFixed has been witnessed") .iter() .map(|bits| { // This assumes that the endianness of the encodings of pallas::Base // and pallas::Scalar are the same. They happen to be, but we need to // be careful if this is generalised. bits.value() .map(|value| pallas::Scalar::from_repr(value.to_repr()).unwrap()) }) .collect::<Vec<_>>(), } } /// The scalar decomposition is guaranteed to be in three-bit windows, so we construct /// `usize` indices from the lowest three bits of each window field element for /// convenient indexing into `u`-values. fn windows_usize(&self) -> Vec<Value<usize>> {
}
random_line_split
mul_fixed.rs
Advice, Column, ConstraintSystem, Constraints, Error, Expression, Fixed, Selector, VirtualCells, }, poly::Rotation, }; use lazy_static::lazy_static; use pasta_curves::{arithmetic::CurveAffine, pallas}; pub mod base_field_elem; pub mod full_width; pub mod short; lazy_static! { static ref TWO_SCALAR: pallas::Scalar = pallas::Scalar::from(2); // H = 2^3 (3-bit window) static ref H_SCALAR: pallas::Scalar = pallas::Scalar::from(H as u64); static ref H_BASE: pallas::Base = pallas::Base::from(H as u64); } #[derive(Clone, Debug, Eq, PartialEq)] pub struct Config<FixedPoints: super::FixedPoints<pallas::Affine>> { running_sum_config: RunningSumConfig<pallas::Base, FIXED_BASE_WINDOW_SIZE>, // The fixed Lagrange interpolation coefficients for `x_p`. lagrange_coeffs: [Column<Fixed>; H], // The fixed `z` for each window such that `y + z = u^2`. fixed_z: Column<Fixed>, // Decomposition of an `n-1`-bit scalar into `k`-bit windows: // a = a_0 + 2^k(a_1) + 2^{2k}(a_2) +... + 2^{(n-1)k}(a_{n-1}) window: Column<Advice>, // y-coordinate of accumulator (only used in the final row). u: Column<Advice>, // Configuration for `add` add_config: add::Config, // Configuration for `add_incomplete` add_incomplete_config: add_incomplete::Config, _marker: PhantomData<FixedPoints>, } impl<FixedPoints: super::FixedPoints<pallas::Affine>> Config<FixedPoints> { #[allow(clippy::too_many_arguments)] pub(super) fn configure( meta: &mut ConstraintSystem<pallas::Base>, lagrange_coeffs: [Column<Fixed>; H], window: Column<Advice>, u: Column<Advice>, add_config: add::Config, add_incomplete_config: add_incomplete::Config, ) -> Self { meta.enable_equality(window); meta.enable_equality(u); let q_running_sum = meta.selector(); let running_sum_config = RunningSumConfig::configure(meta, q_running_sum, window); let config = Self { running_sum_config, lagrange_coeffs, fixed_z: meta.fixed_column(), window, u, add_config, add_incomplete_config, _marker: PhantomData, }; // Check relationships between `add_config` and `add_incomplete_config`. assert_eq!( config.add_config.x_p, config.add_incomplete_config.x_p, "add and add_incomplete are used internally in mul_fixed." ); assert_eq!( config.add_config.y_p, config.add_incomplete_config.y_p, "add and add_incomplete are used internally in mul_fixed." ); for advice in [config.window, config.u].iter() { assert_ne!( *advice, config.add_config.x_qr, "Do not overlap with output columns of add." ); assert_ne!( *advice, config.add_config.y_qr, "Do not overlap with output columns of add." ); } config.running_sum_coords_gate(meta); config } /// Check that each window in the running sum decomposition uses the correct y_p /// and interpolated x_p. /// /// This gate is used both in the mul_fixed::base_field_elem and mul_fixed::short /// helpers, which decompose the scalar using a running sum. /// /// This gate is not used in the mul_fixed::full_width helper, since the full-width /// scalar is witnessed directly as three-bit windows instead of being decomposed /// via a running sum. fn running_sum_coords_gate(&self, meta: &mut ConstraintSystem<pallas::Base>) { meta.create_gate("Running sum coordinates check", |meta| { let q_mul_fixed_running_sum = meta.query_selector(self.running_sum_config.q_range_check()); let z_cur = meta.query_advice(self.window, Rotation::cur()); let z_next = meta.query_advice(self.window, Rotation::next()); // z_{i+1} = (z_i - a_i) / 2^3 // => a_i = z_i - z_{i+1} * 2^3 let word = z_cur - z_next * pallas::Base::from(H as u64); Constraints::with_selector(q_mul_fixed_running_sum, self.coords_check(meta, word)) }); } /// [Specification](https://p.z.cash/halo2-0.1:ecc-fixed-mul-coordinates). #[allow(clippy::op_ref)] fn coords_check( &self, meta: &mut VirtualCells<'_, pallas::Base>, window: Expression<pallas::Base>, ) -> Vec<(&'static str, Expression<pallas::Base>)> { let y_p = meta.query_advice(self.add_config.y_p, Rotation::cur()); let x_p = meta.query_advice(self.add_config.x_p, Rotation::cur()); let z = meta.query_fixed(self.fixed_z); let u = meta.query_advice(self.u, Rotation::cur()); let window_pow: Vec<Expression<pallas::Base>> = (0..H) .map(|pow| { (0..pow).fold(Expression::Constant(pallas::Base::one()), |acc, _| { acc * window.clone() }) }) .collect(); let interpolated_x = window_pow.iter().zip(self.lagrange_coeffs.iter()).fold( Expression::Constant(pallas::Base::zero()), |acc, (window_pow, coeff)| acc + (window_pow.clone() * meta.query_fixed(*coeff)), ); // Check interpolation of x-coordinate let x_check = interpolated_x - x_p.clone(); // Check that `y + z = u^2`, where `z` is fixed and `u`, `y` are witnessed let y_check = u.square() - y_p.clone() - z; // Check that (x, y) is on the curve let on_curve = y_p.square() - x_p.clone().square() * x_p - Expression::Constant(pallas::Affine::b()); vec![ ("check x", x_check), ("check y", y_check), ("on-curve", on_curve), ] } #[allow(clippy::type_complexity)] fn assign_region_inner<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, scalar: &ScalarFixed, base: &F, coords_check_toggle: Selector, ) -> Result<(NonIdentityEccPoint, NonIdentityEccPoint), Error> { // Assign fixed columns for given fixed base self.assign_fixed_constants::<F, NUM_WINDOWS>(region, offset, base, coords_check_toggle)?; // Initialize accumulator let acc = self.initialize_accumulator::<F, NUM_WINDOWS>(region, offset, base, scalar)?; // Process all windows excluding least and most significant windows let acc = self.add_incomplete::<F, NUM_WINDOWS>(region, offset, acc, base, scalar)?; // Process most significant window let mul_b = self.process_msb::<F, NUM_WINDOWS>(region, offset, base, scalar)?; Ok((acc, mul_b)) } /// [Specification](https://p.z.cash/halo2-0.1:ecc-fixed-mul-load-base). fn assign_fixed_constants<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, base: &F, coords_check_toggle: Selector, ) -> Result<(), Error> { let mut constants = None; let build_constants = || { let lagrange_coeffs = base.lagrange_coeffs(); assert_eq!(lagrange_coeffs.len(), NUM_WINDOWS); let z = base.z(); assert_eq!(z.len(), NUM_WINDOWS); (lagrange_coeffs, z) }; // Assign fixed columns for given fixed base for window in 0..NUM_WINDOWS { coords_check_toggle.enable(region, window + offset)?; // Assign x-coordinate Lagrange interpolation coefficients for k in 0..H { region.assign_fixed( || { format!( "Lagrange interpolation coeff for window: {:?}, k: {:?}", window, k ) }, self.lagrange_coeffs[k], window + offset, || { if constants.as_ref().is_none() { constants = Some(build_constants()); } let lagrange_coeffs = &constants.as_ref().unwrap().0; Value::known(lagrange_coeffs[window][k]) }, )?; } // Assign z-values for each window region.assign_fixed( || format!("z-value for window: {:?}", window), self.fixed_z, window + offset, || { let z = &constants.as_ref().unwrap().1; Value::known(pallas::Base::from(z[window])) }, )?; } Ok(()) } /// Assigns the values used to process a window. fn process_window<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, w: usize, k_usize: Value<usize>, window_scalar: Value<pallas::Scalar>, base: &F, ) -> Result<NonIdentityEccPoint, Error> { let base_value = base.generator(); let base_u = base.u(); assert_eq!(base_u.len(), NUM_WINDOWS); // Compute [window_scalar]B let mul_b = { let mul_b = window_scalar.map(|scalar| base_value * scalar); let mul_b = mul_b.map(|mul_b| mul_b.to_affine().coordinates().unwrap()); let x = mul_b.map(|mul_b| { let x = *mul_b.x(); assert!(x!= pallas::Base::zero()); x.into() }); let x = region.assign_advice( || format!("mul_b_x, window {}", w), self.add_config.x_p, offset + w, || x, )?; let y = mul_b.map(|mul_b| { let y = *mul_b.y(); assert!(y!= pallas::Base::zero()); y.into() }); let y = region.assign_advice( || format!("mul_b_y, window {}", w), self.add_config.y_p, offset + w, || y, )?; NonIdentityEccPoint::from_coordinates_unchecked(x, y) }; // Assign u = (y_p + z_w).sqrt() let u_val = k_usize.map(|k| pallas::Base::from_repr(base_u[w][k]).unwrap()); region.assign_advice(|| "u", self.u, offset + w, || u_val)?; Ok(mul_b) } fn initialize_accumulator<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, base: &F, scalar: &ScalarFixed, ) -> Result<NonIdentityEccPoint, Error> { // Recall that the message at each window `w` is represented as // `m_w = [(k_w + 2) ⋅ 8^w]B`. // When `w = 0`, we have `m_0 = [(k_0 + 2)]B`. let w = 0; let k0 = scalar.windows_field()[0]; let k0_usize = scalar.windows_usize()[0]; self.process_lower_bits::<_, NUM_WINDOWS>(region, offset, w, k0, k0_usize, base) } fn add_incomplete<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, mut acc: NonIdentityEccPoint, base: &F, scalar: &ScalarFixed, ) -> Result<NonIdentityEccPoint, Error> { let scalar_windows_field = scalar.windows_field(); let scalar_windows_usize = scalar.windows_usize(); assert_eq!(scalar_windows_field.len(), NUM_WINDOWS); for (w, (k, k_usize)) in scalar_windows_field .into_iter() .zip(scalar_windows_usize) .enumerate() // The MSB is processed separately. .take(NUM_WINDOWS - 1) // Skip k_0 (already processed). .skip(1) { // Compute [(k_w + 2) ⋅ 8^w]B // // This assigns the coordinates of the returned point into the input cells for // the incomplete addition gate, which will then copy them into themselves. let mul_b = self.process_lower_bits::<_, NUM_WINDOWS>(region, offset, w, k, k_usize, base)?; // Add to the accumulator. // // After the first loop, the accumulator will already be in the input cells // for the incomplete addition gate, and will be copied into themselves. acc = self .add_incomplete_config .assign_region(&mul_b, &acc, offset + w, region)?; } Ok(acc) } /// Assigns the values used to process a window that does not contain the MSB. fn process_lower_bits<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, w: usize, k: Value<pallas::Scalar>, k_usize: Value<usize>, base: &F, ) -> Result<NonIdentityEccPoint, Error> { // `scalar = [(k_w + 2) ⋅ 8^w] let scalar = k.map(|k| (k + *TWO_SCALAR) * (*H_SCALAR).pow(&[w as u64, 0, 0, 0])); self.process_window::<_, NUM_WINDOWS>(region, offset, w, k_usize, scalar, base) } /// Assigns the values used to process the window containing the MSB. fn process_msb<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, base: &F, scalar: &ScalarFixed, ) -> Result<NonIdentityEccPoint, Error> { let k_usize = scalar.windows_usize()[NUM_WINDOWS - 1]; // offset_acc = \sum_{j = 0}^{NUM_WINDOWS - 2} 2^{FIXED_BASE_WINDOW_SIZE*j + 1} let offset_acc = (0..(NUM_WINDOWS - 1)).fold(pallas::Scalar::zero(), |acc, w| { acc + (*TWO_SCALAR).pow(&[FIXED_BASE_WINDOW_SIZE as u64 * w as u64 + 1, 0, 0, 0]) }); // `scalar = [k * 8^(NUM_WINDOWS - 1) - offset_acc]`. let scalar = scalar.windows_field()[scalar.windows_field().len() - 1] .map(|k| k * (*H_SCALAR).pow(&[(NUM_WINDOWS - 1) as u64, 0, 0, 0]) - offset_acc); self.process_window::<_, NUM_WINDOWS>( region, offset, NUM_WINDOWS - 1, k_usize, scalar, base, ) } } enum ScalarFixed { FullWidth(EccScalarFixed), Short(EccScalarFixedShort), BaseFieldElem(EccBaseFieldElemFixed), } impl From<&EccScalarFixed> for ScalarFixed { fn from(scalar_fixed: &EccScalarFixed) -> Self {
pl From<&EccScalarFixedShort> for ScalarFixed { fn from(scalar_fixed: &EccScalarFixedShort) -> Self { Self::Short(scalar_fixed.clone()) } } impl From<&EccBaseFieldElemFixed> for ScalarFixed { fn from(base_field_elem: &EccBaseFieldElemFixed) -> Self { Self::BaseFieldElem(base_field_elem.clone()) } } impl ScalarFixed { /// The scalar decomposition was done in the base field. For computation /// outside the circuit, we now convert them back into the scalar field. /// /// This function does not require that the base field fits inside the scalar field, /// because the window size fits into either field. fn windows_field(&self) -> Vec<Value<pallas::Scalar>> { let running_sum_to_windows = |zs: Vec<AssignedCell<pallas::Base, pallas::Base>>| { (0..(zs.len() - 1)) .map(|idx| { let z_cur = zs[idx].value(); let z_next = zs[idx + 1].value(); let word = z_cur - z_next * Value::known(*H_BASE); // This assumes that the endianness of the encodings of pallas::Base // and pallas::Scalar are the same. They happen to be, but we need to // be careful if this is generalised. word.map(|word| pallas::Scalar::from_repr(word.to_repr()).unwrap()) }) .collect::<Vec<_>>() }; match self { Self::BaseFieldElem(scalar) => running_sum_to_windows(scalar.running_sum.to_vec()), Self::Short(scalar) => running_sum_to_windows( scalar .running_sum .as_ref() .expect("EccScalarFixedShort has been constrained") .to_vec(), ), Self::FullWidth(scalar) => scalar .windows .as_ref() .expect("EccScalarFixed has been witnessed") .iter() .map(|bits| { // This assumes that the endianness of the encodings of pallas::Base // and pallas::Scalar are the same. They happen to be, but we need to // be careful if this is generalised. bits.value() .map(|value| pallas::Scalar::from_repr(value.to_repr()).unwrap()) }) .collect::<Vec<_>>(), } } /// The scalar decomposition is guaranteed to be in three-bit windows, so we construct /// `usize` indices from the lowest three bits of each window field element for /// convenient indexing into `u`-values. fn windows_usize(&self) -> Vec<Value<usize>> {
Self::FullWidth(scalar_fixed.clone()) } } im
identifier_body
mul_fixed.rs
Advice, Column, ConstraintSystem, Constraints, Error, Expression, Fixed, Selector, VirtualCells, }, poly::Rotation, }; use lazy_static::lazy_static; use pasta_curves::{arithmetic::CurveAffine, pallas}; pub mod base_field_elem; pub mod full_width; pub mod short; lazy_static! { static ref TWO_SCALAR: pallas::Scalar = pallas::Scalar::from(2); // H = 2^3 (3-bit window) static ref H_SCALAR: pallas::Scalar = pallas::Scalar::from(H as u64); static ref H_BASE: pallas::Base = pallas::Base::from(H as u64); } #[derive(Clone, Debug, Eq, PartialEq)] pub struct Config<FixedPoints: super::FixedPoints<pallas::Affine>> { running_sum_config: RunningSumConfig<pallas::Base, FIXED_BASE_WINDOW_SIZE>, // The fixed Lagrange interpolation coefficients for `x_p`. lagrange_coeffs: [Column<Fixed>; H], // The fixed `z` for each window such that `y + z = u^2`. fixed_z: Column<Fixed>, // Decomposition of an `n-1`-bit scalar into `k`-bit windows: // a = a_0 + 2^k(a_1) + 2^{2k}(a_2) +... + 2^{(n-1)k}(a_{n-1}) window: Column<Advice>, // y-coordinate of accumulator (only used in the final row). u: Column<Advice>, // Configuration for `add` add_config: add::Config, // Configuration for `add_incomplete` add_incomplete_config: add_incomplete::Config, _marker: PhantomData<FixedPoints>, } impl<FixedPoints: super::FixedPoints<pallas::Affine>> Config<FixedPoints> { #[allow(clippy::too_many_arguments)] pub(super) fn configure( meta: &mut ConstraintSystem<pallas::Base>, lagrange_coeffs: [Column<Fixed>; H], window: Column<Advice>, u: Column<Advice>, add_config: add::Config, add_incomplete_config: add_incomplete::Config, ) -> Self { meta.enable_equality(window); meta.enable_equality(u); let q_running_sum = meta.selector(); let running_sum_config = RunningSumConfig::configure(meta, q_running_sum, window); let config = Self { running_sum_config, lagrange_coeffs, fixed_z: meta.fixed_column(), window, u, add_config, add_incomplete_config, _marker: PhantomData, }; // Check relationships between `add_config` and `add_incomplete_config`. assert_eq!( config.add_config.x_p, config.add_incomplete_config.x_p, "add and add_incomplete are used internally in mul_fixed." ); assert_eq!( config.add_config.y_p, config.add_incomplete_config.y_p, "add and add_incomplete are used internally in mul_fixed." ); for advice in [config.window, config.u].iter() { assert_ne!( *advice, config.add_config.x_qr, "Do not overlap with output columns of add." ); assert_ne!( *advice, config.add_config.y_qr, "Do not overlap with output columns of add." ); } config.running_sum_coords_gate(meta); config } /// Check that each window in the running sum decomposition uses the correct y_p /// and interpolated x_p. /// /// This gate is used both in the mul_fixed::base_field_elem and mul_fixed::short /// helpers, which decompose the scalar using a running sum. /// /// This gate is not used in the mul_fixed::full_width helper, since the full-width /// scalar is witnessed directly as three-bit windows instead of being decomposed /// via a running sum. fn running_sum_coords_gate(&self, meta: &mut ConstraintSystem<pallas::Base>) { meta.create_gate("Running sum coordinates check", |meta| { let q_mul_fixed_running_sum = meta.query_selector(self.running_sum_config.q_range_check()); let z_cur = meta.query_advice(self.window, Rotation::cur()); let z_next = meta.query_advice(self.window, Rotation::next()); // z_{i+1} = (z_i - a_i) / 2^3 // => a_i = z_i - z_{i+1} * 2^3 let word = z_cur - z_next * pallas::Base::from(H as u64); Constraints::with_selector(q_mul_fixed_running_sum, self.coords_check(meta, word)) }); } /// [Specification](https://p.z.cash/halo2-0.1:ecc-fixed-mul-coordinates). #[allow(clippy::op_ref)] fn coords_check( &self, meta: &mut VirtualCells<'_, pallas::Base>, window: Expression<pallas::Base>, ) -> Vec<(&'static str, Expression<pallas::Base>)> { let y_p = meta.query_advice(self.add_config.y_p, Rotation::cur()); let x_p = meta.query_advice(self.add_config.x_p, Rotation::cur()); let z = meta.query_fixed(self.fixed_z); let u = meta.query_advice(self.u, Rotation::cur()); let window_pow: Vec<Expression<pallas::Base>> = (0..H) .map(|pow| { (0..pow).fold(Expression::Constant(pallas::Base::one()), |acc, _| { acc * window.clone() }) }) .collect(); let interpolated_x = window_pow.iter().zip(self.lagrange_coeffs.iter()).fold( Expression::Constant(pallas::Base::zero()), |acc, (window_pow, coeff)| acc + (window_pow.clone() * meta.query_fixed(*coeff)), ); // Check interpolation of x-coordinate let x_check = interpolated_x - x_p.clone(); // Check that `y + z = u^2`, where `z` is fixed and `u`, `y` are witnessed let y_check = u.square() - y_p.clone() - z; // Check that (x, y) is on the curve let on_curve = y_p.square() - x_p.clone().square() * x_p - Expression::Constant(pallas::Affine::b()); vec![ ("check x", x_check), ("check y", y_check), ("on-curve", on_curve), ] } #[allow(clippy::type_complexity)] fn assign_region_inner<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, scalar: &ScalarFixed, base: &F, coords_check_toggle: Selector, ) -> Result<(NonIdentityEccPoint, NonIdentityEccPoint), Error> { // Assign fixed columns for given fixed base self.assign_fixed_constants::<F, NUM_WINDOWS>(region, offset, base, coords_check_toggle)?; // Initialize accumulator let acc = self.initialize_accumulator::<F, NUM_WINDOWS>(region, offset, base, scalar)?; // Process all windows excluding least and most significant windows let acc = self.add_incomplete::<F, NUM_WINDOWS>(region, offset, acc, base, scalar)?; // Process most significant window let mul_b = self.process_msb::<F, NUM_WINDOWS>(region, offset, base, scalar)?; Ok((acc, mul_b)) } /// [Specification](https://p.z.cash/halo2-0.1:ecc-fixed-mul-load-base). fn assign_fixed_constants<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, base: &F, coords_check_toggle: Selector, ) -> Result<(), Error> { let mut constants = None; let build_constants = || { let lagrange_coeffs = base.lagrange_coeffs(); assert_eq!(lagrange_coeffs.len(), NUM_WINDOWS); let z = base.z(); assert_eq!(z.len(), NUM_WINDOWS); (lagrange_coeffs, z) }; // Assign fixed columns for given fixed base for window in 0..NUM_WINDOWS { coords_check_toggle.enable(region, window + offset)?; // Assign x-coordinate Lagrange interpolation coefficients for k in 0..H { region.assign_fixed( || { format!( "Lagrange interpolation coeff for window: {:?}, k: {:?}", window, k ) }, self.lagrange_coeffs[k], window + offset, || { if constants.as_ref().is_none() { constants = Some(build_constants()); } let lagrange_coeffs = &constants.as_ref().unwrap().0; Value::known(lagrange_coeffs[window][k]) }, )?; } // Assign z-values for each window region.assign_fixed( || format!("z-value for window: {:?}", window), self.fixed_z, window + offset, || { let z = &constants.as_ref().unwrap().1; Value::known(pallas::Base::from(z[window])) }, )?; } Ok(()) } /// Assigns the values used to process a window. fn process_window<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, w: usize, k_usize: Value<usize>, window_scalar: Value<pallas::Scalar>, base: &F, ) -> Result<NonIdentityEccPoint, Error> { let base_value = base.generator(); let base_u = base.u(); assert_eq!(base_u.len(), NUM_WINDOWS); // Compute [window_scalar]B let mul_b = { let mul_b = window_scalar.map(|scalar| base_value * scalar); let mul_b = mul_b.map(|mul_b| mul_b.to_affine().coordinates().unwrap()); let x = mul_b.map(|mul_b| { let x = *mul_b.x(); assert!(x!= pallas::Base::zero()); x.into() }); let x = region.assign_advice( || format!("mul_b_x, window {}", w), self.add_config.x_p, offset + w, || x, )?; let y = mul_b.map(|mul_b| { let y = *mul_b.y(); assert!(y!= pallas::Base::zero()); y.into() }); let y = region.assign_advice( || format!("mul_b_y, window {}", w), self.add_config.y_p, offset + w, || y, )?; NonIdentityEccPoint::from_coordinates_unchecked(x, y) }; // Assign u = (y_p + z_w).sqrt() let u_val = k_usize.map(|k| pallas::Base::from_repr(base_u[w][k]).unwrap()); region.assign_advice(|| "u", self.u, offset + w, || u_val)?; Ok(mul_b) } fn initialize_accumulator<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, base: &F, scalar: &ScalarFixed, ) -> Result<NonIdentityEccPoint, Error> { // Recall that the message at each window `w` is represented as // `m_w = [(k_w + 2) ⋅ 8^w]B`. // When `w = 0`, we have `m_0 = [(k_0 + 2)]B`. let w = 0; let k0 = scalar.windows_field()[0]; let k0_usize = scalar.windows_usize()[0]; self.process_lower_bits::<_, NUM_WINDOWS>(region, offset, w, k0, k0_usize, base) } fn add_incomplete<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, mut acc: NonIdentityEccPoint, base: &F, scalar: &ScalarFixed, ) -> Result<NonIdentityEccPoint, Error> { let scalar_windows_field = scalar.windows_field(); let scalar_windows_usize = scalar.windows_usize(); assert_eq!(scalar_windows_field.len(), NUM_WINDOWS); for (w, (k, k_usize)) in scalar_windows_field .into_iter() .zip(scalar_windows_usize) .enumerate() // The MSB is processed separately. .take(NUM_WINDOWS - 1) // Skip k_0 (already processed). .skip(1) { // Compute [(k_w + 2) ⋅ 8^w]B // // This assigns the coordinates of the returned point into the input cells for // the incomplete addition gate, which will then copy them into themselves. let mul_b = self.process_lower_bits::<_, NUM_WINDOWS>(region, offset, w, k, k_usize, base)?; // Add to the accumulator. // // After the first loop, the accumulator will already be in the input cells // for the incomplete addition gate, and will be copied into themselves. acc = self .add_incomplete_config .assign_region(&mul_b, &acc, offset + w, region)?; } Ok(acc) } /// Assigns the values used to process a window that does not contain the MSB. fn process_lower_bits<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, w: usize, k: Value<pallas::Scalar>, k_usize: Value<usize>, base: &F, ) -> Result<NonIdentityEccPoint, Error> { // `scalar = [(k_w + 2) ⋅ 8^w] let scalar = k.map(|k| (k + *TWO_SCALAR) * (*H_SCALAR).pow(&[w as u64, 0, 0, 0])); self.process_window::<_, NUM_WINDOWS>(region, offset, w, k_usize, scalar, base) } /// Assigns the values used to process the window containing the MSB. fn process_msb<F: FixedPoint<pallas::Affine>, const NUM_WINDOWS: usize>( &self, region: &mut Region<'_, pallas::Base>, offset: usize, base: &F, scalar: &ScalarFixed, ) -> Result<NonIdentityEccPoint, Error> { let k_usize = scalar.windows_usize()[NUM_WINDOWS - 1]; // offset_acc = \sum_{j = 0}^{NUM_WINDOWS - 2} 2^{FIXED_BASE_WINDOW_SIZE*j + 1} let offset_acc = (0..(NUM_WINDOWS - 1)).fold(pallas::Scalar::zero(), |acc, w| { acc + (*TWO_SCALAR).pow(&[FIXED_BASE_WINDOW_SIZE as u64 * w as u64 + 1, 0, 0, 0]) }); // `scalar = [k * 8^(NUM_WINDOWS - 1) - offset_acc]`. let scalar = scalar.windows_field()[scalar.windows_field().len() - 1] .map(|k| k * (*H_SCALAR).pow(&[(NUM_WINDOWS - 1) as u64, 0, 0, 0]) - offset_acc); self.process_window::<_, NUM_WINDOWS>( region, offset, NUM_WINDOWS - 1, k_usize, scalar, base, ) } } enum ScalarFixed { FullWidth(EccScalarFixed), Short(EccScalarFixedShort), BaseFieldElem(EccBaseFieldElemFixed), } impl From<&EccScalarFixed> for ScalarFixed { fn from(scalar_fixed: &EccScalarFixed) -> Self { Self::FullWidth(scalar_fixed.clone()) } } impl From<&EccScalarFixedShort> for ScalarFixed { fn from(scalar_fixed: &EccScalarFixedShort) -> Self { Self::Short(scalar_fixed.clone()) } } impl From<&EccBaseFieldElemFixed> for ScalarFixed { fn from(base_field_elem: &EccBaseFieldElemFixed) -> Self { Self::BaseFieldElem(base_field_elem.clone()) } } impl ScalarFixed { /// The scalar decomposition was done in the base field. For computation /// outside the circuit, we now convert them back into the scalar field. /// /// This function does not require that the base field fits inside the scalar field, /// because the window size fits into either field. fn window
) -> Vec<Value<pallas::Scalar>> { let running_sum_to_windows = |zs: Vec<AssignedCell<pallas::Base, pallas::Base>>| { (0..(zs.len() - 1)) .map(|idx| { let z_cur = zs[idx].value(); let z_next = zs[idx + 1].value(); let word = z_cur - z_next * Value::known(*H_BASE); // This assumes that the endianness of the encodings of pallas::Base // and pallas::Scalar are the same. They happen to be, but we need to // be careful if this is generalised. word.map(|word| pallas::Scalar::from_repr(word.to_repr()).unwrap()) }) .collect::<Vec<_>>() }; match self { Self::BaseFieldElem(scalar) => running_sum_to_windows(scalar.running_sum.to_vec()), Self::Short(scalar) => running_sum_to_windows( scalar .running_sum .as_ref() .expect("EccScalarFixedShort has been constrained") .to_vec(), ), Self::FullWidth(scalar) => scalar .windows .as_ref() .expect("EccScalarFixed has been witnessed") .iter() .map(|bits| { // This assumes that the endianness of the encodings of pallas::Base // and pallas::Scalar are the same. They happen to be, but we need to // be careful if this is generalised. bits.value() .map(|value| pallas::Scalar::from_repr(value.to_repr()).unwrap()) }) .collect::<Vec<_>>(), } } /// The scalar decomposition is guaranteed to be in three-bit windows, so we construct /// `usize` indices from the lowest three bits of each window field element for /// convenient indexing into `u`-values. fn windows_usize(&self) -> Vec<Value<usize>> {
s_field(&self
identifier_name
terminal.rs
//! Provides a low-level terminal interface use std::io; use std::time::Duration; use mortal::{self, PrepareConfig, PrepareState, TerminalReadGuard, TerminalWriteGuard}; use crate::sys; pub use mortal::{CursorMode, Signal, SignalSet, Size}; /// Default `Terminal` interface pub struct DefaultTerminal(mortal::Terminal); /// Represents the result of a `Terminal` read operation pub enum RawRead { /// `n` bytes were read from the device Bytes(usize), /// The terminal window was resized Resize(Size), /// A signal was received while waiting for input Signal(Signal), } /// Defines a low-level interface to the terminal pub trait Terminal: Sized + Send + Sync { // TODO: When generic associated types are implemented (and stabilized), // boxed trait objects may be replaced by `Reader` and `Writer`. /// Returned by `prepare`; passed to `restore` to restore state. type PrepareState; /* /// Holds an exclusive read lock and provides read operations type Reader: TerminalReader; /// Holds an exclusive write lock and provides write operations type Writer: TerminalWriter; */ /// Returns the name of the terminal. fn name(&self) -> &str; /// Acquires a lock on terminal read operations and returns a value holding /// that lock and granting access to such operations. /// /// The lock must not be released until the returned value is dropped. fn lock_read<'a>(&'a self) -> Box<dyn TerminalReader<Self> + 'a>; /// Acquires a lock on terminal write operations and returns a value holding /// that lock and granting access to such operations. /// /// The lock must not be released until the returned value is dropped. fn lock_write<'a>(&'a self) -> Box<dyn TerminalWriter<Self> + 'a>; } /// Holds a lock on `Terminal` read operations pub trait TerminalReader<Term: Terminal> { /// Prepares the terminal for line reading and editing operations. /// /// If `block_signals` is `true`, the terminal will be configured to treat /// special characters that would otherwise be interpreted as signals as /// their literal value. /// /// If `block_signals` is `false`, a signal contained in the `report_signals` /// set may be returned. /// /// # Notes /// /// This method may be called more than once. However, if the state values /// are not restored in reverse order in which they were created, /// the state of the underlying terminal device becomes undefined. fn prepare(&mut self, block_signals: bool, report_signals: SignalSet) -> io::Result<Term::PrepareState>; /// Like `prepare`, but called when the write lock is already held. /// /// # Safety /// /// This method must be called with a `TerminalWriter` instance returned /// by the same `Terminal` instance to which this `TerminalReader` belongs. unsafe fn prepare_with_lock(&mut self, lock: &mut dyn TerminalWriter<Term>, block_signals: bool, report_signals: SignalSet) -> io::Result<Term::PrepareState>; /// Restores the terminal state using the given state data. fn restore(&mut self, state: Term::PrepareState) -> io::Result<()>; /// Like `restore`, but called when the write lock is already held. /// /// # Safety /// /// This method must be called with a `TerminalWriter` instance returned /// by the same `Terminal` instance to which this `TerminalReader` belongs. unsafe fn restore_with_lock(&mut self, lock: &mut dyn TerminalWriter<Term>, state: Term::PrepareState) -> io::Result<()>; /// Reads some input from the terminal and appends it to the given buffer. fn read(&mut self, buf: &mut Vec<u8>) -> io::Result<RawRead>; /// Waits `timeout` for user input. If `timeout` is `None`, waits indefinitely. /// /// Returns `Ok(true)` if input becomes available within the given timeout /// or if a signal is received. /// /// Returns `Ok(false)` if the timeout expires before input becomes available. fn wait_for_input(&mut self, timeout: Option<Duration>) -> io::Result<bool>; } /// Holds a lock on `Terminal` write operations pub trait TerminalWriter<Term: Terminal> { /// Returns the size of the terminal window fn size(&self) -> io::Result<Size>; /// Presents a clear terminal screen, with cursor at first row, first column. /// /// If the terminal possesses a scrolling window over a buffer, this shall /// have the effect of moving the visible window down such that it shows /// an empty view of the buffer, preserving some or all of existing buffer /// contents, where possible. fn clear_screen(&mut self) -> io::Result<()>; /// Clears characters on the line occupied by the cursor, beginning with the /// cursor and ending at the end of the line. Also clears all characters on /// all lines after the cursor. fn clear_to_screen_end(&mut self) -> io::Result<()>; /// Moves the cursor up `n` cells; `n` may be zero. fn move_up(&mut self, n: usize) -> io::Result<()>; /// Moves the cursor down `n` cells; `n` may be zero. fn move_down(&mut self, n: usize) -> io::Result<()>; /// Moves the cursor left `n` cells; `n` may be zero. fn move_left(&mut self, n: usize) -> io::Result<()>; /// Moves the cursor right `n` cells; `n` may be zero. fn move_right(&mut self, n: usize) -> io::Result<()>; /// Moves the cursor to the first column of the current line fn move_to_first_column(&mut self) -> io::Result<()>; /// Set the current cursor mode fn set_cursor_mode(&mut self, mode: CursorMode) -> io::Result<()>; /// Writes output to the terminal. /// /// For each carriage return `'\r'` written to the terminal, the cursor /// should be moved to the first column of the current line. /// /// For each newline `'\n'` written to the terminal, the cursor should /// be moved to the first column of the following line. /// /// The terminal interface shall not automatically move the cursor to the next /// line when `write` causes a character to be written to the final column. fn write(&mut self, s: &str) -> io::Result<()>; /// Flushes any currently buffered output data. /// /// `TerminalWriter` instances may not buffer data on all systems. /// /// Data must be flushed when the `TerminalWriter` instance is dropped. fn flush(&mut self) -> io::Result<()>; } impl DefaultTerminal { /// Opens access to the terminal device associated with standard output. pub fn new() -> io::Result<DefaultTerminal> { mortal::Terminal::new().map(DefaultTerminal) } /// Opens access to the terminal device associated with standard error. pub fn stderr() -> io::Result<DefaultTerminal> { mortal::Terminal::stderr().map(DefaultTerminal) } unsafe fn cast_writer<'a>(writer: &'a mut dyn TerminalWriter<Self>) -> &'a mut TerminalWriteGuard<'a> { &mut *(writer as *mut _ as *mut TerminalWriteGuard) } } impl Terminal for DefaultTerminal { type PrepareState = PrepareState; fn name(&self) -> &str { self.0.name() } fn lock_read<'a>(&'a self) -> Box<dyn TerminalReader<Self> + 'a> { Box::new(self.0.lock_read().unwrap()) } fn lock_write<'a>(&'a self) -> Box<dyn TerminalWriter<Self> + 'a> { Box::new(self.0.lock_write().unwrap()) } } impl<'a> TerminalReader<DefaultTerminal> for TerminalReadGuard<'a> { fn prepare(&mut self, block_signals: bool, report_signals: SignalSet) -> io::Result<PrepareState> { self.prepare(PrepareConfig{ block_signals, enable_control_flow:!block_signals, enable_keypad: false, report_signals, .. PrepareConfig::default() }) } unsafe fn prepare_with_lock(&mut self, lock: &mut dyn TerminalWriter<DefaultTerminal>, block_signals: bool, report_signals: SignalSet) -> io::Result<PrepareState> { let lock = DefaultTerminal::cast_writer(lock); self.prepare_with_lock(lock, PrepareConfig{ block_signals, enable_control_flow:!block_signals, enable_keypad: false, report_signals, .. PrepareConfig::default() }) } fn restore(&mut self, state: PrepareState) -> io::Result<()> { self.restore(state) } unsafe fn restore_with_lock(&mut self, lock: &mut dyn TerminalWriter<DefaultTerminal>, state: PrepareState) -> io::Result<()> { let lock = DefaultTerminal::cast_writer(lock); self.restore_with_lock(lock, state) } fn read(&mut self, buf: &mut Vec<u8>) -> io::Result<RawRead> { sys::terminal_read(self, buf) } fn wait_for_input(&mut self, timeout: Option<Duration>) -> io::Result<bool> { self.wait_event(timeout) } } impl<'a> TerminalWriter<DefaultTerminal> for TerminalWriteGuard<'a> { fn size(&self) -> io::Result<Size> { self.size() } fn clear_screen(&mut self) -> io::Result<()> { self.clear_screen() } fn clear_to_screen_end(&mut self) -> io::Result<()> { self.clear_to_screen_end() } fn move_up(&mut self, n: usize) -> io::Result<()> { self.move_up(n) } fn move_down(&mut self, n: usize) -> io::Result<()> { self.move_down(n) } fn move_left(&mut self, n: usize) -> io::Result<()> { self.move_left(n) } fn move_right(&mut self, n: usize) -> io::Result<()> { self.move_right(n) } fn
(&mut self) -> io::Result<()> { self.move_to_first_column() } fn set_cursor_mode(&mut self, mode: CursorMode) -> io::Result<()> { self.set_cursor_mode(mode) } fn write(&mut self, s: &str) -> io::Result<()> { self.write_str(s) } fn flush(&mut self) -> io::Result<()> { self.flush() } }
move_to_first_column
identifier_name
terminal.rs
//! Provides a low-level terminal interface use std::io; use std::time::Duration; use mortal::{self, PrepareConfig, PrepareState, TerminalReadGuard, TerminalWriteGuard}; use crate::sys; pub use mortal::{CursorMode, Signal, SignalSet, Size}; /// Default `Terminal` interface pub struct DefaultTerminal(mortal::Terminal); /// Represents the result of a `Terminal` read operation pub enum RawRead { /// `n` bytes were read from the device Bytes(usize), /// The terminal window was resized Resize(Size), /// A signal was received while waiting for input Signal(Signal), } /// Defines a low-level interface to the terminal pub trait Terminal: Sized + Send + Sync { // TODO: When generic associated types are implemented (and stabilized), // boxed trait objects may be replaced by `Reader` and `Writer`. /// Returned by `prepare`; passed to `restore` to restore state. type PrepareState; /* /// Holds an exclusive read lock and provides read operations type Reader: TerminalReader; /// Holds an exclusive write lock and provides write operations type Writer: TerminalWriter; */ /// Returns the name of the terminal. fn name(&self) -> &str; /// Acquires a lock on terminal read operations and returns a value holding /// that lock and granting access to such operations. /// /// The lock must not be released until the returned value is dropped. fn lock_read<'a>(&'a self) -> Box<dyn TerminalReader<Self> + 'a>; /// Acquires a lock on terminal write operations and returns a value holding /// that lock and granting access to such operations. /// /// The lock must not be released until the returned value is dropped. fn lock_write<'a>(&'a self) -> Box<dyn TerminalWriter<Self> + 'a>; } /// Holds a lock on `Terminal` read operations pub trait TerminalReader<Term: Terminal> { /// Prepares the terminal for line reading and editing operations. /// /// If `block_signals` is `true`, the terminal will be configured to treat /// special characters that would otherwise be interpreted as signals as /// their literal value. /// /// If `block_signals` is `false`, a signal contained in the `report_signals` /// set may be returned. /// /// # Notes /// /// This method may be called more than once. However, if the state values /// are not restored in reverse order in which they were created, /// the state of the underlying terminal device becomes undefined. fn prepare(&mut self, block_signals: bool, report_signals: SignalSet) -> io::Result<Term::PrepareState>; /// Like `prepare`, but called when the write lock is already held. /// /// # Safety /// /// This method must be called with a `TerminalWriter` instance returned /// by the same `Terminal` instance to which this `TerminalReader` belongs. unsafe fn prepare_with_lock(&mut self, lock: &mut dyn TerminalWriter<Term>, block_signals: bool, report_signals: SignalSet) -> io::Result<Term::PrepareState>; /// Restores the terminal state using the given state data. fn restore(&mut self, state: Term::PrepareState) -> io::Result<()>; /// Like `restore`, but called when the write lock is already held. /// /// # Safety /// /// This method must be called with a `TerminalWriter` instance returned /// by the same `Terminal` instance to which this `TerminalReader` belongs. unsafe fn restore_with_lock(&mut self, lock: &mut dyn TerminalWriter<Term>, state: Term::PrepareState) -> io::Result<()>; /// Reads some input from the terminal and appends it to the given buffer. fn read(&mut self, buf: &mut Vec<u8>) -> io::Result<RawRead>; /// Waits `timeout` for user input. If `timeout` is `None`, waits indefinitely. /// /// Returns `Ok(true)` if input becomes available within the given timeout /// or if a signal is received. /// /// Returns `Ok(false)` if the timeout expires before input becomes available. fn wait_for_input(&mut self, timeout: Option<Duration>) -> io::Result<bool>; } /// Holds a lock on `Terminal` write operations pub trait TerminalWriter<Term: Terminal> { /// Returns the size of the terminal window fn size(&self) -> io::Result<Size>; /// Presents a clear terminal screen, with cursor at first row, first column. /// /// If the terminal possesses a scrolling window over a buffer, this shall /// have the effect of moving the visible window down such that it shows /// an empty view of the buffer, preserving some or all of existing buffer /// contents, where possible. fn clear_screen(&mut self) -> io::Result<()>; /// Clears characters on the line occupied by the cursor, beginning with the /// cursor and ending at the end of the line. Also clears all characters on /// all lines after the cursor. fn clear_to_screen_end(&mut self) -> io::Result<()>; /// Moves the cursor up `n` cells; `n` may be zero. fn move_up(&mut self, n: usize) -> io::Result<()>; /// Moves the cursor down `n` cells; `n` may be zero. fn move_down(&mut self, n: usize) -> io::Result<()>; /// Moves the cursor left `n` cells; `n` may be zero. fn move_left(&mut self, n: usize) -> io::Result<()>; /// Moves the cursor right `n` cells; `n` may be zero. fn move_right(&mut self, n: usize) -> io::Result<()>; /// Moves the cursor to the first column of the current line fn move_to_first_column(&mut self) -> io::Result<()>; /// Set the current cursor mode fn set_cursor_mode(&mut self, mode: CursorMode) -> io::Result<()>; /// Writes output to the terminal. /// /// For each carriage return `'\r'` written to the terminal, the cursor /// should be moved to the first column of the current line. /// /// For each newline `'\n'` written to the terminal, the cursor should /// be moved to the first column of the following line. /// /// The terminal interface shall not automatically move the cursor to the next /// line when `write` causes a character to be written to the final column. fn write(&mut self, s: &str) -> io::Result<()>; /// Flushes any currently buffered output data. /// /// `TerminalWriter` instances may not buffer data on all systems. /// /// Data must be flushed when the `TerminalWriter` instance is dropped. fn flush(&mut self) -> io::Result<()>; } impl DefaultTerminal { /// Opens access to the terminal device associated with standard output. pub fn new() -> io::Result<DefaultTerminal> { mortal::Terminal::new().map(DefaultTerminal) } /// Opens access to the terminal device associated with standard error. pub fn stderr() -> io::Result<DefaultTerminal> { mortal::Terminal::stderr().map(DefaultTerminal) } unsafe fn cast_writer<'a>(writer: &'a mut dyn TerminalWriter<Self>) -> &'a mut TerminalWriteGuard<'a> { &mut *(writer as *mut _ as *mut TerminalWriteGuard) } } impl Terminal for DefaultTerminal { type PrepareState = PrepareState; fn name(&self) -> &str { self.0.name() } fn lock_read<'a>(&'a self) -> Box<dyn TerminalReader<Self> + 'a> { Box::new(self.0.lock_read().unwrap()) } fn lock_write<'a>(&'a self) -> Box<dyn TerminalWriter<Self> + 'a> { Box::new(self.0.lock_write().unwrap()) } } impl<'a> TerminalReader<DefaultTerminal> for TerminalReadGuard<'a> { fn prepare(&mut self, block_signals: bool, report_signals: SignalSet) -> io::Result<PrepareState> { self.prepare(PrepareConfig{ block_signals, enable_control_flow:!block_signals, enable_keypad: false, report_signals, .. PrepareConfig::default() }) } unsafe fn prepare_with_lock(&mut self, lock: &mut dyn TerminalWriter<DefaultTerminal>, block_signals: bool, report_signals: SignalSet) -> io::Result<PrepareState> { let lock = DefaultTerminal::cast_writer(lock); self.prepare_with_lock(lock, PrepareConfig{ block_signals, enable_control_flow:!block_signals, enable_keypad: false, report_signals, .. PrepareConfig::default() }) } fn restore(&mut self, state: PrepareState) -> io::Result<()> { self.restore(state) } unsafe fn restore_with_lock(&mut self, lock: &mut dyn TerminalWriter<DefaultTerminal>, state: PrepareState) -> io::Result<()> { let lock = DefaultTerminal::cast_writer(lock); self.restore_with_lock(lock, state) } fn read(&mut self, buf: &mut Vec<u8>) -> io::Result<RawRead> { sys::terminal_read(self, buf) } fn wait_for_input(&mut self, timeout: Option<Duration>) -> io::Result<bool> { self.wait_event(timeout) } } impl<'a> TerminalWriter<DefaultTerminal> for TerminalWriteGuard<'a> { fn size(&self) -> io::Result<Size> { self.size() } fn clear_screen(&mut self) -> io::Result<()> { self.clear_screen() } fn clear_to_screen_end(&mut self) -> io::Result<()> { self.clear_to_screen_end() } fn move_up(&mut self, n: usize) -> io::Result<()> { self.move_up(n) } fn move_down(&mut self, n: usize) -> io::Result<()>
fn move_left(&mut self, n: usize) -> io::Result<()> { self.move_left(n) } fn move_right(&mut self, n: usize) -> io::Result<()> { self.move_right(n) } fn move_to_first_column(&mut self) -> io::Result<()> { self.move_to_first_column() } fn set_cursor_mode(&mut self, mode: CursorMode) -> io::Result<()> { self.set_cursor_mode(mode) } fn write(&mut self, s: &str) -> io::Result<()> { self.write_str(s) } fn flush(&mut self) -> io::Result<()> { self.flush() } }
{ self.move_down(n) }
identifier_body
terminal.rs
//! Provides a low-level terminal interface use std::io; use std::time::Duration; use mortal::{self, PrepareConfig, PrepareState, TerminalReadGuard, TerminalWriteGuard}; use crate::sys; pub use mortal::{CursorMode, Signal, SignalSet, Size}; /// Default `Terminal` interface pub struct DefaultTerminal(mortal::Terminal); /// Represents the result of a `Terminal` read operation pub enum RawRead { /// `n` bytes were read from the device Bytes(usize), /// The terminal window was resized Resize(Size), /// A signal was received while waiting for input Signal(Signal), } /// Defines a low-level interface to the terminal pub trait Terminal: Sized + Send + Sync { // TODO: When generic associated types are implemented (and stabilized), // boxed trait objects may be replaced by `Reader` and `Writer`. /// Returned by `prepare`; passed to `restore` to restore state. type PrepareState; /* /// Holds an exclusive read lock and provides read operations type Reader: TerminalReader; /// Holds an exclusive write lock and provides write operations type Writer: TerminalWriter; */ /// Returns the name of the terminal. fn name(&self) -> &str; /// Acquires a lock on terminal read operations and returns a value holding /// that lock and granting access to such operations. /// /// The lock must not be released until the returned value is dropped. fn lock_read<'a>(&'a self) -> Box<dyn TerminalReader<Self> + 'a>; /// Acquires a lock on terminal write operations and returns a value holding /// that lock and granting access to such operations. /// /// The lock must not be released until the returned value is dropped. fn lock_write<'a>(&'a self) -> Box<dyn TerminalWriter<Self> + 'a>; } /// Holds a lock on `Terminal` read operations pub trait TerminalReader<Term: Terminal> { /// Prepares the terminal for line reading and editing operations. /// /// If `block_signals` is `true`, the terminal will be configured to treat /// special characters that would otherwise be interpreted as signals as /// their literal value. /// /// If `block_signals` is `false`, a signal contained in the `report_signals` /// set may be returned. /// /// # Notes /// /// This method may be called more than once. However, if the state values /// are not restored in reverse order in which they were created, /// the state of the underlying terminal device becomes undefined. fn prepare(&mut self, block_signals: bool, report_signals: SignalSet) -> io::Result<Term::PrepareState>; /// Like `prepare`, but called when the write lock is already held. /// /// # Safety /// /// This method must be called with a `TerminalWriter` instance returned /// by the same `Terminal` instance to which this `TerminalReader` belongs. unsafe fn prepare_with_lock(&mut self, lock: &mut dyn TerminalWriter<Term>, block_signals: bool, report_signals: SignalSet) -> io::Result<Term::PrepareState>; /// Restores the terminal state using the given state data. fn restore(&mut self, state: Term::PrepareState) -> io::Result<()>; /// Like `restore`, but called when the write lock is already held. /// /// # Safety /// /// This method must be called with a `TerminalWriter` instance returned /// by the same `Terminal` instance to which this `TerminalReader` belongs. unsafe fn restore_with_lock(&mut self, lock: &mut dyn TerminalWriter<Term>, state: Term::PrepareState) -> io::Result<()>; /// Reads some input from the terminal and appends it to the given buffer. fn read(&mut self, buf: &mut Vec<u8>) -> io::Result<RawRead>; /// Waits `timeout` for user input. If `timeout` is `None`, waits indefinitely. /// /// Returns `Ok(true)` if input becomes available within the given timeout /// or if a signal is received. /// /// Returns `Ok(false)` if the timeout expires before input becomes available. fn wait_for_input(&mut self, timeout: Option<Duration>) -> io::Result<bool>; } /// Holds a lock on `Terminal` write operations pub trait TerminalWriter<Term: Terminal> { /// Returns the size of the terminal window fn size(&self) -> io::Result<Size>; /// Presents a clear terminal screen, with cursor at first row, first column. /// /// If the terminal possesses a scrolling window over a buffer, this shall /// have the effect of moving the visible window down such that it shows /// an empty view of the buffer, preserving some or all of existing buffer /// contents, where possible. fn clear_screen(&mut self) -> io::Result<()>; /// Clears characters on the line occupied by the cursor, beginning with the /// cursor and ending at the end of the line. Also clears all characters on /// all lines after the cursor. fn clear_to_screen_end(&mut self) -> io::Result<()>; /// Moves the cursor up `n` cells; `n` may be zero. fn move_up(&mut self, n: usize) -> io::Result<()>; /// Moves the cursor down `n` cells; `n` may be zero. fn move_down(&mut self, n: usize) -> io::Result<()>; /// Moves the cursor left `n` cells; `n` may be zero. fn move_left(&mut self, n: usize) -> io::Result<()>; /// Moves the cursor right `n` cells; `n` may be zero. fn move_right(&mut self, n: usize) -> io::Result<()>; /// Moves the cursor to the first column of the current line fn move_to_first_column(&mut self) -> io::Result<()>; /// Set the current cursor mode fn set_cursor_mode(&mut self, mode: CursorMode) -> io::Result<()>; /// Writes output to the terminal. /// /// For each carriage return `'\r'` written to the terminal, the cursor /// should be moved to the first column of the current line. /// /// For each newline `'\n'` written to the terminal, the cursor should /// be moved to the first column of the following line.
/// Flushes any currently buffered output data. /// /// `TerminalWriter` instances may not buffer data on all systems. /// /// Data must be flushed when the `TerminalWriter` instance is dropped. fn flush(&mut self) -> io::Result<()>; } impl DefaultTerminal { /// Opens access to the terminal device associated with standard output. pub fn new() -> io::Result<DefaultTerminal> { mortal::Terminal::new().map(DefaultTerminal) } /// Opens access to the terminal device associated with standard error. pub fn stderr() -> io::Result<DefaultTerminal> { mortal::Terminal::stderr().map(DefaultTerminal) } unsafe fn cast_writer<'a>(writer: &'a mut dyn TerminalWriter<Self>) -> &'a mut TerminalWriteGuard<'a> { &mut *(writer as *mut _ as *mut TerminalWriteGuard) } } impl Terminal for DefaultTerminal { type PrepareState = PrepareState; fn name(&self) -> &str { self.0.name() } fn lock_read<'a>(&'a self) -> Box<dyn TerminalReader<Self> + 'a> { Box::new(self.0.lock_read().unwrap()) } fn lock_write<'a>(&'a self) -> Box<dyn TerminalWriter<Self> + 'a> { Box::new(self.0.lock_write().unwrap()) } } impl<'a> TerminalReader<DefaultTerminal> for TerminalReadGuard<'a> { fn prepare(&mut self, block_signals: bool, report_signals: SignalSet) -> io::Result<PrepareState> { self.prepare(PrepareConfig{ block_signals, enable_control_flow:!block_signals, enable_keypad: false, report_signals, .. PrepareConfig::default() }) } unsafe fn prepare_with_lock(&mut self, lock: &mut dyn TerminalWriter<DefaultTerminal>, block_signals: bool, report_signals: SignalSet) -> io::Result<PrepareState> { let lock = DefaultTerminal::cast_writer(lock); self.prepare_with_lock(lock, PrepareConfig{ block_signals, enable_control_flow:!block_signals, enable_keypad: false, report_signals, .. PrepareConfig::default() }) } fn restore(&mut self, state: PrepareState) -> io::Result<()> { self.restore(state) } unsafe fn restore_with_lock(&mut self, lock: &mut dyn TerminalWriter<DefaultTerminal>, state: PrepareState) -> io::Result<()> { let lock = DefaultTerminal::cast_writer(lock); self.restore_with_lock(lock, state) } fn read(&mut self, buf: &mut Vec<u8>) -> io::Result<RawRead> { sys::terminal_read(self, buf) } fn wait_for_input(&mut self, timeout: Option<Duration>) -> io::Result<bool> { self.wait_event(timeout) } } impl<'a> TerminalWriter<DefaultTerminal> for TerminalWriteGuard<'a> { fn size(&self) -> io::Result<Size> { self.size() } fn clear_screen(&mut self) -> io::Result<()> { self.clear_screen() } fn clear_to_screen_end(&mut self) -> io::Result<()> { self.clear_to_screen_end() } fn move_up(&mut self, n: usize) -> io::Result<()> { self.move_up(n) } fn move_down(&mut self, n: usize) -> io::Result<()> { self.move_down(n) } fn move_left(&mut self, n: usize) -> io::Result<()> { self.move_left(n) } fn move_right(&mut self, n: usize) -> io::Result<()> { self.move_right(n) } fn move_to_first_column(&mut self) -> io::Result<()> { self.move_to_first_column() } fn set_cursor_mode(&mut self, mode: CursorMode) -> io::Result<()> { self.set_cursor_mode(mode) } fn write(&mut self, s: &str) -> io::Result<()> { self.write_str(s) } fn flush(&mut self) -> io::Result<()> { self.flush() } }
/// /// The terminal interface shall not automatically move the cursor to the next /// line when `write` causes a character to be written to the final column. fn write(&mut self, s: &str) -> io::Result<()>;
random_line_split
test_expand.rs
use super::utils::check; use hex_literal::hex; #[test] fn aes128_expand_key_test() { use super::aes128::expand_key; let keys = [0x00; 16]; check( unsafe { &expand_key(&keys) }, &[ [0x0000000000000000, 0x0000000000000000], [0x6263636362636363, 0x6263636362636363], [0x9b9898c9f9fbfbaa, 0x9b9898c9f9fbfbaa], [0x90973450696ccffa, 0xf2f457330b0fac99], [0xee06da7b876a1581, 0x759e42b27e91ee2b], [0x7f2e2b88f8443e09, 0x8dda7cbbf34b9290], [0xec614b851425758c, 0x99ff09376ab49ba7], [0x217517873550620b, 0xacaf6b3cc61bf09b], [0x0ef903333ba96138, 0x97060a04511dfa9f], [0xb1d4d8e28a7db9da, 0x1d7bb3de4c664941], [0xb4ef5bcb3e92e211, 0x23e951cf6f8f188e], ], ); let keys = [0xff; 16]; check( unsafe { &expand_key(&keys) }, &[ [0xffffffffffffffff, 0xffffffffffffffff], [0xe8e9e9e917161616, 0xe8e9e9e917161616], [0xadaeae19bab8b80f, 0x525151e6454747f0], [0x090e2277b3b69a78, 0xe1e7cb9ea4a08c6e], [0xe16abd3e52dc2746, 0xb33becd8179b60b6], [0xe5baf3ceb766d488, 0x045d385013c658e6], [0x71d07db3c6b6a93b, 0xc2eb916bd12dc98d], [0xe90d208d2fbb89b6, 0xed5018dd3c7dd150], [0x96337366b988fad0, 0x54d8e20d68a5335d], [0x8bf03f233278c5f3, 0x66a027fe0e0514a3], [0xd60a3588e472f07b, 0x82d2d7858cd7c326], ], ); let keys = hex!("000102030405060708090a0b0c0d0e0f"); check( unsafe { &expand_key(&keys) }, &[ [0x0001020304050607, 0x08090a0b0c0d0e0f], [0xd6aa74fdd2af72fa, 0xdaa678f1d6ab76fe], [0xb692cf0b643dbdf1, 0xbe9bc5006830b3fe], [0xb6ff744ed2c2c9bf, 0x6c590cbf0469bf41], [0x47f7f7bc95353e03, 0xf96c32bcfd058dfd], [0x3caaa3e8a99f9deb, 0x50f3af57adf622aa], [0x5e390f7df7a69296, 0xa7553dc10aa31f6b], [0x14f9701ae35fe28c, 0x440adf4d4ea9c026], [0x47438735a41c65b9, 0xe016baf4aebf7ad2], [0x549932d1f0855768, 0x1093ed9cbe2c974e], [0x13111d7fe3944a17, 0xf307a78b4d2b30c5],
], ); let keys = hex!("6920e299a5202a6d656e636869746f2a"); check( unsafe { &expand_key(&keys) }, &[ [0x6920e299a5202a6d, 0x656e636869746f2a], [0xfa8807605fa82d0d, 0x3ac64e6553b2214f], [0xcf75838d90ddae80, 0xaa1be0e5f9a9c1aa], [0x180d2f1488d08194, 0x22cb6171db62a0db], [0xbaed96ad323d1739, 0x10f67648cb94d693], [0x881b4ab2ba265d8b, 0xaad02bc36144fd50], [0xb34f195d096944d6, 0xa3b96f15c2fd9245], [0xa7007778ae6933ae, 0x0dd05cbbcf2dcefe], [0xff8bccf251e2ff5c, 0x5c32a3e7931f6d19], [0x24b7182e7555e772, 0x29674495ba78298c], [0xae127cdadb479ba8, 0xf220df3d4858f6b1], ], ); let keys = hex!("2b7e151628aed2a6abf7158809cf4f3c"); check( unsafe { &expand_key(&keys) }, &[ [0x2b7e151628aed2a6, 0xabf7158809cf4f3c], [0xa0fafe1788542cb1, 0x23a339392a6c7605], [0xf2c295f27a96b943, 0x5935807a7359f67f], [0x3d80477d4716fe3e, 0x1e237e446d7a883b], [0xef44a541a8525b7f, 0xb671253bdb0bad00], [0xd4d1c6f87c839d87, 0xcaf2b8bc11f915bc], [0x6d88a37a110b3efd, 0xdbf98641ca0093fd], [0x4e54f70e5f5fc9f3, 0x84a64fb24ea6dc4f], [0xead27321b58dbad2, 0x312bf5607f8d292f], [0xac7766f319fadc21, 0x28d12941575c006e], [0xd014f9a8c9ee2589, 0xe13f0cc8b6630ca6], ], ); } #[test] fn aes192_expand_key_test() { use super::aes192::expand_key; let keys = [0x00; 24]; check( unsafe { &expand_key(&keys) }, &[ [0x0000000000000000, 0x0000000000000000], [0x0000000000000000, 0x6263636362636363], [0x6263636362636363, 0x6263636362636363], [0x9b9898c9f9fbfbaa, 0x9b9898c9f9fbfbaa], [0x9b9898c9f9fbfbaa, 0x90973450696ccffa], [0xf2f457330b0fac99, 0x90973450696ccffa], [0xc81d19a9a171d653, 0x53858160588a2df9], [0xc81d19a9a171d653, 0x7bebf49bda9a22c8], [0x891fa3a8d1958e51, 0x198897f8b8f941ab], [0xc26896f718f2b43f, 0x91ed1797407899c6], [0x59f00e3ee1094f95, 0x83ecbc0f9b1e0830], [0x0af31fa74a8b8661, 0x137b885ff272c7ca], [0x432ac886d834c0b6, 0xd2c7df11984c5970], ], ); let keys = [0xff; 24]; check( unsafe { &expand_key(&keys) }, &[ [0xffffffffffffffff, 0xffffffffffffffff], [0xffffffffffffffff, 0xe8e9e9e917161616], [0xe8e9e9e917161616, 0xe8e9e9e917161616], [0xadaeae19bab8b80f, 0x525151e6454747f0], [0xadaeae19bab8b80f, 0xc5c2d8ed7f7a60e2], [0x2d2b3104686c76f4, 0xc5c2d8ed7f7a60e2], [0x1712403f686820dd, 0x454311d92d2f672d], [0xe8edbfc09797df22, 0x8f8cd3b7e7e4f36a], [0xa2a7e2b38f88859e, 0x67653a5ef0f2e57c], [0x2655c33bc1b13051, 0x6316d2e2ec9e577c], [0x8bfb6d227b09885e, 0x67919b1aa620ab4b], [0xc53679a929a82ed5, 0xa25343f7d95acba9], [0x598e482fffaee364, 0x3a989acd1330b418], ], ); let keys = hex!("000102030405060708090a0b0c0d0e0f1011121314151617"); check( unsafe { &expand_key(&keys) }, &[ [0x0001020304050607, 0x08090a0b0c0d0e0f], [0x1011121314151617, 0x5846f2f95c43f4fe], [0x544afef55847f0fa, 0x4856e2e95c43f4fe], [0x40f949b31cbabd4d, 0x48f043b810b7b342], [0x58e151ab04a2a555, 0x7effb5416245080c], [0x2ab54bb43a02f8f6, 0x62e3a95d66410c08], [0xf501857297448d7e, 0xbdf1c6ca87f33e3c], [0xe510976183519b69, 0x34157c9ea351f1e0], [0x1ea0372a99530916, 0x7c439e77ff12051e], [0xdd7e0e887e2fff68, 0x608fc842f9dcc154], [0x859f5f237a8d5a3d, 0xc0c02952beefd63a], [0xde601e7827bcdf2c, 0xa223800fd8aeda32], [0xa4970a331a78dc09, 0xc418c271e3a41d5d], ], ); let keys = hex!("8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b"); check( unsafe { &expand_key(&keys) }, &[ [0x8e73b0f7da0e6452, 0xc810f32b809079e5], [0x62f8ead2522c6b7b, 0xfe0c91f72402f5a5], [0xec12068e6c827f6b, 0x0e7a95b95c56fec2], [0x4db7b4bd69b54118, 0x85a74796e92538fd], [0xe75fad44bb095386, 0x485af05721efb14f], [0xa448f6d94d6dce24, 0xaa326360113b30e6], [0xa25e7ed583b1cf9a, 0x27f939436a94f767], [0xc0a69407d19da4e1, 0xec1786eb6fa64971], [0x485f703222cb8755, 0xe26d135233f0b7b3], [0x40beeb282f18a259, 0x6747d26b458c553e], [0xa7e1466c9411f1df, 0x821f750aad07d753], [0xca4005388fcc5006, 0x282d166abc3ce7b5], [0xe98ba06f448c773c, 0x8ecc720401002202], ], ); } #[test] fn aes256_expand_key_test() { use super::aes256::expand_key; let keys = [0x00; 32]; check( unsafe { &expand_key(&keys) }, &[ [0x0000000000000000, 0x0000000000000000], [0x0000000000000000, 0x0000000000000000], [0x6263636362636363, 0x6263636362636363], [0xaafbfbfbaafbfbfb, 0xaafbfbfbaafbfbfb], [0x6f6c6ccf0d0f0fac, 0x6f6c6ccf0d0f0fac], [0x7d8d8d6ad7767691, 0x7d8d8d6ad7767691], [0x5354edc15e5be26d, 0x31378ea23c38810e], [0x968a81c141fcf750, 0x3c717a3aeb070cab], [0x9eaa8f28c0f16d45, 0xf1c6e3e7cdfe62e9], [0x2b312bdf6acddc8f, 0x56bca6b5bdbbaa1e], [0x6406fd52a4f79017, 0x553173f098cf1119], [0x6dbba90b07767584, 0x51cad331ec71792f], [0xe7b0e89c4347788b, 0x16760b7b8eb91a62], [0x74ed0ba1739b7e25, 0x2251ad14ce20d43b], [0x10f80a1753bf729c, 0x45c979e7cb706385], ], ); let keys = [0xff; 32]; check( unsafe { &expand_key(&keys) }, &[ [0xffffffffffffffff, 0xffffffffffffffff], [0xffffffffffffffff, 0xffffffffffffffff], [0xe8e9e9e917161616, 0xe8e9e9e917161616], [0x0fb8b8b8f0474747, 0x0fb8b8b8f0474747], [0x4a4949655d5f5f73, 0xb5b6b69aa2a0a08c], [0x355858dcc51f1f9b, 0xcaa7a7233ae0e064], [0xafa80ae5f2f75596, 0x4741e30ce5e14380], [0xeca0421129bf5d8a, 0xe318faa9d9f81acd], [0xe60ab7d014fde246, 0x53bc014ab65d42ca], [0xa2ec6e658b5333ef, 0x684bc946b1b3d38b], [0x9b6c8a188f91685e, 0xdc2d69146a702bde], [0xa0bd9f782beeac97, 0x43a565d1f216b65a], [0xfc22349173b35ccf, 0xaf9e35dbc5ee1e05], [0x0695ed132d7b4184, 0x6ede24559cc8920f], [0x546d424f27de1e80, 0x88402b5b4dae355e], ], ); let keys = hex!("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"); check( unsafe { &expand_key(&keys) }, &[ [0x0001020304050607, 0x08090a0b0c0d0e0f], [0x1011121314151617, 0x18191a1b1c1d1e1f], [0xa573c29fa176c498, 0xa97fce93a572c09c], [0x1651a8cd0244beda, 0x1a5da4c10640bade], [0xae87dff00ff11b68, 0xa68ed5fb03fc1567], [0x6de1f1486fa54f92, 0x75f8eb5373b8518d], [0xc656827fc9a79917, 0x6f294cec6cd5598b], [0x3de23a75524775e7, 0x27bf9eb45407cf39], [0x0bdc905fc27b0948, 0xad5245a4c1871c2f], [0x45f5a66017b2d387, 0x300d4d33640a820a], [0x7ccff71cbeb4fe54, 0x13e6bbf0d261a7df], [0xf01afafee7a82979, 0xd7a5644ab3afe640], [0x2541fe719bf50025, 0x8813bbd55a721c0a], [0x4e5a6699a9f24fe0, 0x7e572baacdf8cdea], [0x24fc79ccbf0979e9, 0x371ac23c6d68de36], ], ); let keys = hex!("603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4"); check( unsafe { &expand_key(&keys) }, &[ [0x603deb1015ca71be, 0x2b73aef0857d7781], [0x1f352c073b6108d7, 0x2d9810a30914dff4], [0x9ba354118e6925af, 0xa51a8b5f2067fcde], [0xa8b09c1a93d194cd, 0xbe49846eb75d5b9a], [0xd59aecb85bf3c917, 0xfee94248de8ebe96], [0xb5a9328a2678a647, 0x983122292f6c79b3], [0x812c81addadf48ba, 0x24360af2fab8b464], [0x98c5bfc9bebd198e, 0x268c3ba709e04214], [0x68007bacb2df3316, 0x96e939e46c518d80], [0xc814e20476a9fb8a, 0x5025c02d59c58239], [0xde1369676ccc5a71, 0xfa2563959674ee15], [0x5886ca5d2e2f31d7, 0x7e0af1fa27cf73c3], [0x749c47ab18501dda, 0xe2757e4f7401905a], [0xcafaaae3e4d59b34, 0x9adf6acebd10190d], [0xfe4890d1e6188d0b, 0x046df344706c631e], ], ); }
random_line_split
test_expand.rs
use super::utils::check; use hex_literal::hex; #[test] fn aes128_expand_key_test() { use super::aes128::expand_key; let keys = [0x00; 16]; check( unsafe { &expand_key(&keys) }, &[ [0x0000000000000000, 0x0000000000000000], [0x6263636362636363, 0x6263636362636363], [0x9b9898c9f9fbfbaa, 0x9b9898c9f9fbfbaa], [0x90973450696ccffa, 0xf2f457330b0fac99], [0xee06da7b876a1581, 0x759e42b27e91ee2b], [0x7f2e2b88f8443e09, 0x8dda7cbbf34b9290], [0xec614b851425758c, 0x99ff09376ab49ba7], [0x217517873550620b, 0xacaf6b3cc61bf09b], [0x0ef903333ba96138, 0x97060a04511dfa9f], [0xb1d4d8e28a7db9da, 0x1d7bb3de4c664941], [0xb4ef5bcb3e92e211, 0x23e951cf6f8f188e], ], ); let keys = [0xff; 16]; check( unsafe { &expand_key(&keys) }, &[ [0xffffffffffffffff, 0xffffffffffffffff], [0xe8e9e9e917161616, 0xe8e9e9e917161616], [0xadaeae19bab8b80f, 0x525151e6454747f0], [0x090e2277b3b69a78, 0xe1e7cb9ea4a08c6e], [0xe16abd3e52dc2746, 0xb33becd8179b60b6], [0xe5baf3ceb766d488, 0x045d385013c658e6], [0x71d07db3c6b6a93b, 0xc2eb916bd12dc98d], [0xe90d208d2fbb89b6, 0xed5018dd3c7dd150], [0x96337366b988fad0, 0x54d8e20d68a5335d], [0x8bf03f233278c5f3, 0x66a027fe0e0514a3], [0xd60a3588e472f07b, 0x82d2d7858cd7c326], ], ); let keys = hex!("000102030405060708090a0b0c0d0e0f"); check( unsafe { &expand_key(&keys) }, &[ [0x0001020304050607, 0x08090a0b0c0d0e0f], [0xd6aa74fdd2af72fa, 0xdaa678f1d6ab76fe], [0xb692cf0b643dbdf1, 0xbe9bc5006830b3fe], [0xb6ff744ed2c2c9bf, 0x6c590cbf0469bf41], [0x47f7f7bc95353e03, 0xf96c32bcfd058dfd], [0x3caaa3e8a99f9deb, 0x50f3af57adf622aa], [0x5e390f7df7a69296, 0xa7553dc10aa31f6b], [0x14f9701ae35fe28c, 0x440adf4d4ea9c026], [0x47438735a41c65b9, 0xe016baf4aebf7ad2], [0x549932d1f0855768, 0x1093ed9cbe2c974e], [0x13111d7fe3944a17, 0xf307a78b4d2b30c5], ], ); let keys = hex!("6920e299a5202a6d656e636869746f2a"); check( unsafe { &expand_key(&keys) }, &[ [0x6920e299a5202a6d, 0x656e636869746f2a], [0xfa8807605fa82d0d, 0x3ac64e6553b2214f], [0xcf75838d90ddae80, 0xaa1be0e5f9a9c1aa], [0x180d2f1488d08194, 0x22cb6171db62a0db], [0xbaed96ad323d1739, 0x10f67648cb94d693], [0x881b4ab2ba265d8b, 0xaad02bc36144fd50], [0xb34f195d096944d6, 0xa3b96f15c2fd9245], [0xa7007778ae6933ae, 0x0dd05cbbcf2dcefe], [0xff8bccf251e2ff5c, 0x5c32a3e7931f6d19], [0x24b7182e7555e772, 0x29674495ba78298c], [0xae127cdadb479ba8, 0xf220df3d4858f6b1], ], ); let keys = hex!("2b7e151628aed2a6abf7158809cf4f3c"); check( unsafe { &expand_key(&keys) }, &[ [0x2b7e151628aed2a6, 0xabf7158809cf4f3c], [0xa0fafe1788542cb1, 0x23a339392a6c7605], [0xf2c295f27a96b943, 0x5935807a7359f67f], [0x3d80477d4716fe3e, 0x1e237e446d7a883b], [0xef44a541a8525b7f, 0xb671253bdb0bad00], [0xd4d1c6f87c839d87, 0xcaf2b8bc11f915bc], [0x6d88a37a110b3efd, 0xdbf98641ca0093fd], [0x4e54f70e5f5fc9f3, 0x84a64fb24ea6dc4f], [0xead27321b58dbad2, 0x312bf5607f8d292f], [0xac7766f319fadc21, 0x28d12941575c006e], [0xd014f9a8c9ee2589, 0xe13f0cc8b6630ca6], ], ); } #[test] fn aes192_expand_key_test()
], ); let keys = [0xff; 24]; check( unsafe { &expand_key(&keys) }, &[ [0xffffffffffffffff, 0xffffffffffffffff], [0xffffffffffffffff, 0xe8e9e9e917161616], [0xe8e9e9e917161616, 0xe8e9e9e917161616], [0xadaeae19bab8b80f, 0x525151e6454747f0], [0xadaeae19bab8b80f, 0xc5c2d8ed7f7a60e2], [0x2d2b3104686c76f4, 0xc5c2d8ed7f7a60e2], [0x1712403f686820dd, 0x454311d92d2f672d], [0xe8edbfc09797df22, 0x8f8cd3b7e7e4f36a], [0xa2a7e2b38f88859e, 0x67653a5ef0f2e57c], [0x2655c33bc1b13051, 0x6316d2e2ec9e577c], [0x8bfb6d227b09885e, 0x67919b1aa620ab4b], [0xc53679a929a82ed5, 0xa25343f7d95acba9], [0x598e482fffaee364, 0x3a989acd1330b418], ], ); let keys = hex!("000102030405060708090a0b0c0d0e0f1011121314151617"); check( unsafe { &expand_key(&keys) }, &[ [0x0001020304050607, 0x08090a0b0c0d0e0f], [0x1011121314151617, 0x5846f2f95c43f4fe], [0x544afef55847f0fa, 0x4856e2e95c43f4fe], [0x40f949b31cbabd4d, 0x48f043b810b7b342], [0x58e151ab04a2a555, 0x7effb5416245080c], [0x2ab54bb43a02f8f6, 0x62e3a95d66410c08], [0xf501857297448d7e, 0xbdf1c6ca87f33e3c], [0xe510976183519b69, 0x34157c9ea351f1e0], [0x1ea0372a99530916, 0x7c439e77ff12051e], [0xdd7e0e887e2fff68, 0x608fc842f9dcc154], [0x859f5f237a8d5a3d, 0xc0c02952beefd63a], [0xde601e7827bcdf2c, 0xa223800fd8aeda32], [0xa4970a331a78dc09, 0xc418c271e3a41d5d], ], ); let keys = hex!("8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b"); check( unsafe { &expand_key(&keys) }, &[ [0x8e73b0f7da0e6452, 0xc810f32b809079e5], [0x62f8ead2522c6b7b, 0xfe0c91f72402f5a5], [0xec12068e6c827f6b, 0x0e7a95b95c56fec2], [0x4db7b4bd69b54118, 0x85a74796e92538fd], [0xe75fad44bb095386, 0x485af05721efb14f], [0xa448f6d94d6dce24, 0xaa326360113b30e6], [0xa25e7ed583b1cf9a, 0x27f939436a94f767], [0xc0a69407d19da4e1, 0xec1786eb6fa64971], [0x485f703222cb8755, 0xe26d135233f0b7b3], [0x40beeb282f18a259, 0x6747d26b458c553e], [0xa7e1466c9411f1df, 0x821f750aad07d753], [0xca4005388fcc5006, 0x282d166abc3ce7b5], [0xe98ba06f448c773c, 0x8ecc720401002202], ], ); } #[test] fn aes256_expand_key_test() { use super::aes256::expand_key; let keys = [0x00; 32]; check( unsafe { &expand_key(&keys) }, &[ [0x0000000000000000, 0x0000000000000000], [0x0000000000000000, 0x0000000000000000], [0x6263636362636363, 0x6263636362636363], [0xaafbfbfbaafbfbfb, 0xaafbfbfbaafbfbfb], [0x6f6c6ccf0d0f0fac, 0x6f6c6ccf0d0f0fac], [0x7d8d8d6ad7767691, 0x7d8d8d6ad7767691], [0x5354edc15e5be26d, 0x31378ea23c38810e], [0x968a81c141fcf750, 0x3c717a3aeb070cab], [0x9eaa8f28c0f16d45, 0xf1c6e3e7cdfe62e9], [0x2b312bdf6acddc8f, 0x56bca6b5bdbbaa1e], [0x6406fd52a4f79017, 0x553173f098cf1119], [0x6dbba90b07767584, 0x51cad331ec71792f], [0xe7b0e89c4347788b, 0x16760b7b8eb91a62], [0x74ed0ba1739b7e25, 0x2251ad14ce20d43b], [0x10f80a1753bf729c, 0x45c979e7cb706385], ], ); let keys = [0xff; 32]; check( unsafe { &expand_key(&keys) }, &[ [0xffffffffffffffff, 0xffffffffffffffff], [0xffffffffffffffff, 0xffffffffffffffff], [0xe8e9e9e917161616, 0xe8e9e9e917161616], [0x0fb8b8b8f0474747, 0x0fb8b8b8f0474747], [0x4a4949655d5f5f73, 0xb5b6b69aa2a0a08c], [0x355858dcc51f1f9b, 0xcaa7a7233ae0e064], [0xafa80ae5f2f75596, 0x4741e30ce5e14380], [0xeca0421129bf5d8a, 0xe318faa9d9f81acd], [0xe60ab7d014fde246, 0x53bc014ab65d42ca], [0xa2ec6e658b5333ef, 0x684bc946b1b3d38b], [0x9b6c8a188f91685e, 0xdc2d69146a702bde], [0xa0bd9f782beeac97, 0x43a565d1f216b65a], [0xfc22349173b35ccf, 0xaf9e35dbc5ee1e05], [0x0695ed132d7b4184, 0x6ede24559cc8920f], [0x546d424f27de1e80, 0x88402b5b4dae355e], ], ); let keys = hex!("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"); check( unsafe { &expand_key(&keys) }, &[ [0x0001020304050607, 0x08090a0b0c0d0e0f], [0x1011121314151617, 0x18191a1b1c1d1e1f], [0xa573c29fa176c498, 0xa97fce93a572c09c], [0x1651a8cd0244beda, 0x1a5da4c10640bade], [0xae87dff00ff11b68, 0xa68ed5fb03fc1567], [0x6de1f1486fa54f92, 0x75f8eb5373b8518d], [0xc656827fc9a79917, 0x6f294cec6cd5598b], [0x3de23a75524775e7, 0x27bf9eb45407cf39], [0x0bdc905fc27b0948, 0xad5245a4c1871c2f], [0x45f5a66017b2d387, 0x300d4d33640a820a], [0x7ccff71cbeb4fe54, 0x13e6bbf0d261a7df], [0xf01afafee7a82979, 0xd7a5644ab3afe640], [0x2541fe719bf50025, 0x8813bbd55a721c0a], [0x4e5a6699a9f24fe0, 0x7e572baacdf8cdea], [0x24fc79ccbf0979e9, 0x371ac23c6d68de36], ], ); let keys = hex!("603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4"); check( unsafe { &expand_key(&keys) }, &[ [0x603deb1015ca71be, 0x2b73aef0857d7781], [0x1f352c073b6108d7, 0x2d9810a30914dff4], [0x9ba354118e6925af, 0xa51a8b5f2067fcde], [0xa8b09c1a93d194cd, 0xbe49846eb75d5b9a], [0xd59aecb85bf3c917, 0xfee94248de8ebe96], [0xb5a9328a2678a647, 0x983122292f6c79b3], [0x812c81addadf48ba, 0x24360af2fab8b464], [0x98c5bfc9bebd198e, 0x268c3ba709e04214], [0x68007bacb2df3316, 0x96e939e46c518d80], [0xc814e20476a9fb8a, 0x5025c02d59c58239], [0xde1369676ccc5a71, 0xfa2563959674ee15], [0x5886ca5d2e2f31d7, 0x7e0af1fa27cf73c3], [0x749c47ab18501dda, 0xe2757e4f7401905a], [0xcafaaae3e4d59b34, 0x9adf6acebd10190d], [0xfe4890d1e6188d0b, 0x046df344706c631e], ], ); }
{ use super::aes192::expand_key; let keys = [0x00; 24]; check( unsafe { &expand_key(&keys) }, &[ [0x0000000000000000, 0x0000000000000000], [0x0000000000000000, 0x6263636362636363], [0x6263636362636363, 0x6263636362636363], [0x9b9898c9f9fbfbaa, 0x9b9898c9f9fbfbaa], [0x9b9898c9f9fbfbaa, 0x90973450696ccffa], [0xf2f457330b0fac99, 0x90973450696ccffa], [0xc81d19a9a171d653, 0x53858160588a2df9], [0xc81d19a9a171d653, 0x7bebf49bda9a22c8], [0x891fa3a8d1958e51, 0x198897f8b8f941ab], [0xc26896f718f2b43f, 0x91ed1797407899c6], [0x59f00e3ee1094f95, 0x83ecbc0f9b1e0830], [0x0af31fa74a8b8661, 0x137b885ff272c7ca], [0x432ac886d834c0b6, 0xd2c7df11984c5970],
identifier_body
test_expand.rs
use super::utils::check; use hex_literal::hex; #[test] fn aes128_expand_key_test() { use super::aes128::expand_key; let keys = [0x00; 16]; check( unsafe { &expand_key(&keys) }, &[ [0x0000000000000000, 0x0000000000000000], [0x6263636362636363, 0x6263636362636363], [0x9b9898c9f9fbfbaa, 0x9b9898c9f9fbfbaa], [0x90973450696ccffa, 0xf2f457330b0fac99], [0xee06da7b876a1581, 0x759e42b27e91ee2b], [0x7f2e2b88f8443e09, 0x8dda7cbbf34b9290], [0xec614b851425758c, 0x99ff09376ab49ba7], [0x217517873550620b, 0xacaf6b3cc61bf09b], [0x0ef903333ba96138, 0x97060a04511dfa9f], [0xb1d4d8e28a7db9da, 0x1d7bb3de4c664941], [0xb4ef5bcb3e92e211, 0x23e951cf6f8f188e], ], ); let keys = [0xff; 16]; check( unsafe { &expand_key(&keys) }, &[ [0xffffffffffffffff, 0xffffffffffffffff], [0xe8e9e9e917161616, 0xe8e9e9e917161616], [0xadaeae19bab8b80f, 0x525151e6454747f0], [0x090e2277b3b69a78, 0xe1e7cb9ea4a08c6e], [0xe16abd3e52dc2746, 0xb33becd8179b60b6], [0xe5baf3ceb766d488, 0x045d385013c658e6], [0x71d07db3c6b6a93b, 0xc2eb916bd12dc98d], [0xe90d208d2fbb89b6, 0xed5018dd3c7dd150], [0x96337366b988fad0, 0x54d8e20d68a5335d], [0x8bf03f233278c5f3, 0x66a027fe0e0514a3], [0xd60a3588e472f07b, 0x82d2d7858cd7c326], ], ); let keys = hex!("000102030405060708090a0b0c0d0e0f"); check( unsafe { &expand_key(&keys) }, &[ [0x0001020304050607, 0x08090a0b0c0d0e0f], [0xd6aa74fdd2af72fa, 0xdaa678f1d6ab76fe], [0xb692cf0b643dbdf1, 0xbe9bc5006830b3fe], [0xb6ff744ed2c2c9bf, 0x6c590cbf0469bf41], [0x47f7f7bc95353e03, 0xf96c32bcfd058dfd], [0x3caaa3e8a99f9deb, 0x50f3af57adf622aa], [0x5e390f7df7a69296, 0xa7553dc10aa31f6b], [0x14f9701ae35fe28c, 0x440adf4d4ea9c026], [0x47438735a41c65b9, 0xe016baf4aebf7ad2], [0x549932d1f0855768, 0x1093ed9cbe2c974e], [0x13111d7fe3944a17, 0xf307a78b4d2b30c5], ], ); let keys = hex!("6920e299a5202a6d656e636869746f2a"); check( unsafe { &expand_key(&keys) }, &[ [0x6920e299a5202a6d, 0x656e636869746f2a], [0xfa8807605fa82d0d, 0x3ac64e6553b2214f], [0xcf75838d90ddae80, 0xaa1be0e5f9a9c1aa], [0x180d2f1488d08194, 0x22cb6171db62a0db], [0xbaed96ad323d1739, 0x10f67648cb94d693], [0x881b4ab2ba265d8b, 0xaad02bc36144fd50], [0xb34f195d096944d6, 0xa3b96f15c2fd9245], [0xa7007778ae6933ae, 0x0dd05cbbcf2dcefe], [0xff8bccf251e2ff5c, 0x5c32a3e7931f6d19], [0x24b7182e7555e772, 0x29674495ba78298c], [0xae127cdadb479ba8, 0xf220df3d4858f6b1], ], ); let keys = hex!("2b7e151628aed2a6abf7158809cf4f3c"); check( unsafe { &expand_key(&keys) }, &[ [0x2b7e151628aed2a6, 0xabf7158809cf4f3c], [0xa0fafe1788542cb1, 0x23a339392a6c7605], [0xf2c295f27a96b943, 0x5935807a7359f67f], [0x3d80477d4716fe3e, 0x1e237e446d7a883b], [0xef44a541a8525b7f, 0xb671253bdb0bad00], [0xd4d1c6f87c839d87, 0xcaf2b8bc11f915bc], [0x6d88a37a110b3efd, 0xdbf98641ca0093fd], [0x4e54f70e5f5fc9f3, 0x84a64fb24ea6dc4f], [0xead27321b58dbad2, 0x312bf5607f8d292f], [0xac7766f319fadc21, 0x28d12941575c006e], [0xd014f9a8c9ee2589, 0xe13f0cc8b6630ca6], ], ); } #[test] fn aes192_expand_key_test() { use super::aes192::expand_key; let keys = [0x00; 24]; check( unsafe { &expand_key(&keys) }, &[ [0x0000000000000000, 0x0000000000000000], [0x0000000000000000, 0x6263636362636363], [0x6263636362636363, 0x6263636362636363], [0x9b9898c9f9fbfbaa, 0x9b9898c9f9fbfbaa], [0x9b9898c9f9fbfbaa, 0x90973450696ccffa], [0xf2f457330b0fac99, 0x90973450696ccffa], [0xc81d19a9a171d653, 0x53858160588a2df9], [0xc81d19a9a171d653, 0x7bebf49bda9a22c8], [0x891fa3a8d1958e51, 0x198897f8b8f941ab], [0xc26896f718f2b43f, 0x91ed1797407899c6], [0x59f00e3ee1094f95, 0x83ecbc0f9b1e0830], [0x0af31fa74a8b8661, 0x137b885ff272c7ca], [0x432ac886d834c0b6, 0xd2c7df11984c5970], ], ); let keys = [0xff; 24]; check( unsafe { &expand_key(&keys) }, &[ [0xffffffffffffffff, 0xffffffffffffffff], [0xffffffffffffffff, 0xe8e9e9e917161616], [0xe8e9e9e917161616, 0xe8e9e9e917161616], [0xadaeae19bab8b80f, 0x525151e6454747f0], [0xadaeae19bab8b80f, 0xc5c2d8ed7f7a60e2], [0x2d2b3104686c76f4, 0xc5c2d8ed7f7a60e2], [0x1712403f686820dd, 0x454311d92d2f672d], [0xe8edbfc09797df22, 0x8f8cd3b7e7e4f36a], [0xa2a7e2b38f88859e, 0x67653a5ef0f2e57c], [0x2655c33bc1b13051, 0x6316d2e2ec9e577c], [0x8bfb6d227b09885e, 0x67919b1aa620ab4b], [0xc53679a929a82ed5, 0xa25343f7d95acba9], [0x598e482fffaee364, 0x3a989acd1330b418], ], ); let keys = hex!("000102030405060708090a0b0c0d0e0f1011121314151617"); check( unsafe { &expand_key(&keys) }, &[ [0x0001020304050607, 0x08090a0b0c0d0e0f], [0x1011121314151617, 0x5846f2f95c43f4fe], [0x544afef55847f0fa, 0x4856e2e95c43f4fe], [0x40f949b31cbabd4d, 0x48f043b810b7b342], [0x58e151ab04a2a555, 0x7effb5416245080c], [0x2ab54bb43a02f8f6, 0x62e3a95d66410c08], [0xf501857297448d7e, 0xbdf1c6ca87f33e3c], [0xe510976183519b69, 0x34157c9ea351f1e0], [0x1ea0372a99530916, 0x7c439e77ff12051e], [0xdd7e0e887e2fff68, 0x608fc842f9dcc154], [0x859f5f237a8d5a3d, 0xc0c02952beefd63a], [0xde601e7827bcdf2c, 0xa223800fd8aeda32], [0xa4970a331a78dc09, 0xc418c271e3a41d5d], ], ); let keys = hex!("8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b"); check( unsafe { &expand_key(&keys) }, &[ [0x8e73b0f7da0e6452, 0xc810f32b809079e5], [0x62f8ead2522c6b7b, 0xfe0c91f72402f5a5], [0xec12068e6c827f6b, 0x0e7a95b95c56fec2], [0x4db7b4bd69b54118, 0x85a74796e92538fd], [0xe75fad44bb095386, 0x485af05721efb14f], [0xa448f6d94d6dce24, 0xaa326360113b30e6], [0xa25e7ed583b1cf9a, 0x27f939436a94f767], [0xc0a69407d19da4e1, 0xec1786eb6fa64971], [0x485f703222cb8755, 0xe26d135233f0b7b3], [0x40beeb282f18a259, 0x6747d26b458c553e], [0xa7e1466c9411f1df, 0x821f750aad07d753], [0xca4005388fcc5006, 0x282d166abc3ce7b5], [0xe98ba06f448c773c, 0x8ecc720401002202], ], ); } #[test] fn
() { use super::aes256::expand_key; let keys = [0x00; 32]; check( unsafe { &expand_key(&keys) }, &[ [0x0000000000000000, 0x0000000000000000], [0x0000000000000000, 0x0000000000000000], [0x6263636362636363, 0x6263636362636363], [0xaafbfbfbaafbfbfb, 0xaafbfbfbaafbfbfb], [0x6f6c6ccf0d0f0fac, 0x6f6c6ccf0d0f0fac], [0x7d8d8d6ad7767691, 0x7d8d8d6ad7767691], [0x5354edc15e5be26d, 0x31378ea23c38810e], [0x968a81c141fcf750, 0x3c717a3aeb070cab], [0x9eaa8f28c0f16d45, 0xf1c6e3e7cdfe62e9], [0x2b312bdf6acddc8f, 0x56bca6b5bdbbaa1e], [0x6406fd52a4f79017, 0x553173f098cf1119], [0x6dbba90b07767584, 0x51cad331ec71792f], [0xe7b0e89c4347788b, 0x16760b7b8eb91a62], [0x74ed0ba1739b7e25, 0x2251ad14ce20d43b], [0x10f80a1753bf729c, 0x45c979e7cb706385], ], ); let keys = [0xff; 32]; check( unsafe { &expand_key(&keys) }, &[ [0xffffffffffffffff, 0xffffffffffffffff], [0xffffffffffffffff, 0xffffffffffffffff], [0xe8e9e9e917161616, 0xe8e9e9e917161616], [0x0fb8b8b8f0474747, 0x0fb8b8b8f0474747], [0x4a4949655d5f5f73, 0xb5b6b69aa2a0a08c], [0x355858dcc51f1f9b, 0xcaa7a7233ae0e064], [0xafa80ae5f2f75596, 0x4741e30ce5e14380], [0xeca0421129bf5d8a, 0xe318faa9d9f81acd], [0xe60ab7d014fde246, 0x53bc014ab65d42ca], [0xa2ec6e658b5333ef, 0x684bc946b1b3d38b], [0x9b6c8a188f91685e, 0xdc2d69146a702bde], [0xa0bd9f782beeac97, 0x43a565d1f216b65a], [0xfc22349173b35ccf, 0xaf9e35dbc5ee1e05], [0x0695ed132d7b4184, 0x6ede24559cc8920f], [0x546d424f27de1e80, 0x88402b5b4dae355e], ], ); let keys = hex!("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"); check( unsafe { &expand_key(&keys) }, &[ [0x0001020304050607, 0x08090a0b0c0d0e0f], [0x1011121314151617, 0x18191a1b1c1d1e1f], [0xa573c29fa176c498, 0xa97fce93a572c09c], [0x1651a8cd0244beda, 0x1a5da4c10640bade], [0xae87dff00ff11b68, 0xa68ed5fb03fc1567], [0x6de1f1486fa54f92, 0x75f8eb5373b8518d], [0xc656827fc9a79917, 0x6f294cec6cd5598b], [0x3de23a75524775e7, 0x27bf9eb45407cf39], [0x0bdc905fc27b0948, 0xad5245a4c1871c2f], [0x45f5a66017b2d387, 0x300d4d33640a820a], [0x7ccff71cbeb4fe54, 0x13e6bbf0d261a7df], [0xf01afafee7a82979, 0xd7a5644ab3afe640], [0x2541fe719bf50025, 0x8813bbd55a721c0a], [0x4e5a6699a9f24fe0, 0x7e572baacdf8cdea], [0x24fc79ccbf0979e9, 0x371ac23c6d68de36], ], ); let keys = hex!("603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4"); check( unsafe { &expand_key(&keys) }, &[ [0x603deb1015ca71be, 0x2b73aef0857d7781], [0x1f352c073b6108d7, 0x2d9810a30914dff4], [0x9ba354118e6925af, 0xa51a8b5f2067fcde], [0xa8b09c1a93d194cd, 0xbe49846eb75d5b9a], [0xd59aecb85bf3c917, 0xfee94248de8ebe96], [0xb5a9328a2678a647, 0x983122292f6c79b3], [0x812c81addadf48ba, 0x24360af2fab8b464], [0x98c5bfc9bebd198e, 0x268c3ba709e04214], [0x68007bacb2df3316, 0x96e939e46c518d80], [0xc814e20476a9fb8a, 0x5025c02d59c58239], [0xde1369676ccc5a71, 0xfa2563959674ee15], [0x5886ca5d2e2f31d7, 0x7e0af1fa27cf73c3], [0x749c47ab18501dda, 0xe2757e4f7401905a], [0xcafaaae3e4d59b34, 0x9adf6acebd10190d], [0xfe4890d1e6188d0b, 0x046df344706c631e], ], ); }
aes256_expand_key_test
identifier_name
day11.rs
use std::{collections::HashSet, io::Write}; use itertools::Itertools; use snafu::Snafu; type Result<T> = std::result::Result<T, Error>; #[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Clone)] enum Item { Chip(char), Generator(char), ChipAndGenerator, } impl std::str::FromStr for Item { type Err = Error; fn from_str(s: &str) -> Result<Self>
} impl std::fmt::Debug for Item { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Item::Chip(id) => write!(f, "{}M", id), Item::Generator(id) => write!(f, "{}G", id), Item::ChipAndGenerator => write!(f, "<>"), } } } #[derive(Debug, PartialEq, Eq, Clone)] struct State { elevator: usize, floors: Vec<HashSet<Item>>, } impl std::fmt::Display for State { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { for (i, floor) in self.floors.iter().enumerate() { write!( f, "{} {}: {:?}\n", i, if self.elevator == i { "E" } else { " " }, floor )?; } Ok(()) } } fn would_fry(items: &HashSet<Item>) -> bool { for item in items { match item { Item::Chip(id) => { if items.contains(&Item::Generator(*id)) { // chip is protected by generator continue; } for other_item in items { if let Item::Generator(other_id) = other_item { // TODO: we might not need this if if other_id!= id { // chip gets fried by another generator return true; } } } } Item::Generator(_) => {} Item::ChipAndGenerator => {} } } false } impl State { fn score(&self) -> usize { self.floors .iter() .enumerate() .map(|(i, f)| f.len() * (i + 1) * 10) .sum::<usize>() } fn is_success(&self) -> bool { for floor in &self.floors[..self.floors.len() - 1] { if!floor.is_empty() { return false; } } true } fn get_neighbors(&self) -> Vec<State> { let mut out = Vec::new(); // calculate valid floors that the elevator can move to let mut valid_destinations = Vec::new(); if self.elevator > 0 { valid_destinations.push(self.elevator - 1); }; if self.elevator < self.floors.len() - 1 { valid_destinations.push(self.elevator + 1); } for num_items in 1..=2 { // generate sets of items that can be taken from current floor - none, one, or two for moved_items in self.floors[self.elevator].iter().combinations(num_items) { let moved_items: HashSet<Item> = moved_items.into_iter().cloned().collect(); for destination in &valid_destinations { // take moved_items from self.elevator to destination let current_floor: HashSet<Item> = self.floors[self.elevator] .difference(&moved_items) .cloned() .collect(); let destination_floor: HashSet<Item> = self.floors[*destination] .union(&moved_items) .cloned() .collect(); // do not perform invalid moves if would_fry(&current_floor) || would_fry(&destination_floor) { continue; } let mut new_state: State = self.clone(); new_state.floors[self.elevator] = current_floor; new_state.floors[*destination] = destination_floor; new_state.elevator = *destination; out.push(new_state); } } } out } } impl std::hash::Hash for State { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.elevator.hash(state); let floors: Vec<Vec<Item>> = self .floors .iter() .map(|items| { // build list of ids for which both Chip and Generator are seen let merge: Vec<char> = items .iter() .filter_map(|i| { if let Item::Chip(id) = i { if items.contains(&Item::Generator(*id)) { return Some(*id); } } None }) .collect(); let mut items = items.clone(); for id in &merge { items.remove(&Item::Chip(*id)); items.remove(&Item::Generator(*id)); } let mut items: Vec<Item> = items.into_iter().collect(); for _ in &merge { items.push(Item::ChipAndGenerator); } items.sort(); items }) .collect(); floors.hash(state); } } #[derive(Debug, Snafu)] enum Error { #[snafu(display("I/O error: {}", source))] Io { source: std::io::Error }, #[snafu(display("Int format error for '{}': {}", data, source))] ParseInt { data: String, source: std::num::ParseIntError, }, #[snafu(display("Invalid item: '{}'", data))] ParseItem { data: String }, } fn solve(input: &[&str]) -> Result<usize> { // -> Result<Vec<State>> { let start = State { elevator: 0, floors: input .iter() .map(|l| { if l.trim().is_empty() { Ok(HashSet::new()) } else { l.split(",") .map(|i| i.parse()) .collect::<Result<HashSet<Item>>>() } }) .collect::<Result<_>>()?, }; //let mut queue = vec![(0, Vec::new(), start.clone())]; let mut queue = vec![(0, start.clone())]; let mut seen: HashSet<State> = HashSet::new(); let mut max_steps = 0; let mut best_score = 0; while!queue.is_empty() { //let (steps, path, state) = queue.remove(0); let (steps, state) = queue.remove(0); //println!("{}\n{}", steps, state); if max_steps < steps { max_steps = steps; print!("."); std::io::stdout().flush().unwrap(); } for next_state in state.get_neighbors() { if seen.contains(&next_state) { continue; } let score = next_state.score(); if score > best_score { best_score = score; } // dirty heuristic: don't explore very bad states if score < best_score - 40 { continue; } //let mut next_path = path.clone(); //next_path.push(next_state.clone()); seen.insert(next_state.clone()); //queue.push((steps + 1, next_path, next_state)); queue.push((steps + 1, next_state)); } if state.is_success() { //return Ok(path); return Ok(steps); } } panic!("No solution") } fn main() -> Result<()> { //let input = vec![ // "HM,LM", // The first floor contains a hydrogen-compatible microchip and a lithium-compatible microchip. // "HG", // The second floor contains a hydrogen generator. // "LG", // The third floor contains a lithium generator. // "", // The fourth floor contains nothing relevant. //]; // p plutonium // P promethium // r ruthenium // s strontium // t thulium let input1 = vec![ "tG,tM,pG,sG", // The first floor contains a thulium generator, a thulium-compatible microchip, a plutonium generator, and a strontium generator. "pM,sM", // The second floor contains a plutonium-compatible microchip and a strontium-compatible microchip. "PG,PM,rG,rM", // The third floor contains a promethium generator, a promethium-compatible microchip, a ruthenium generator, and a ruthenium-compatible microchip. "", // The fourth floor contains nothing relevant. ]; if let Ok(path) = solve(&input1[..]) { //println!("\npart 1: solution in {} steps", path.len()); println!("\npart 1: solution in {} steps", path); //for (i, step) in path.into_iter().enumerate() { // println!("STEP {}:\n{}\n", i, step); //} } // d dilithium // e elerium // p plutonium // P promethium // r ruthenium // s strontium // t thulium let input2 = vec!["dG,dM,eG,eM,tG,tM,pG,sG", "pM,sM", "PG,PM,rG,rM", ""]; if let Ok(path) = solve(&input2[..]) { //println!("\npart 2: solution in {} steps", path.len()); println!("\npart 2: solution in {} steps", path); //for (i, step) in path.into_iter().enumerate() { // println!("STEP {}:\n{}\n", i, step); //} } Ok(()) } #[cfg(test)] mod tests { use super::*; #[test] fn it_works() -> Result<()> { Ok(()) } }
{ let s: Vec<char> = s.chars().collect(); if s.len() != 2 { return Err(Error::ParseItem { data: s[0].to_string(), }); } match s[1] { 'M' => Ok(Item::Chip(s[0])), 'G' => Ok(Item::Generator(s[0])), _ => Err(Error::ParseItem { data: s[1].to_string(), }), } }
identifier_body
day11.rs
use std::{collections::HashSet, io::Write}; use itertools::Itertools; use snafu::Snafu; type Result<T> = std::result::Result<T, Error>; #[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Clone)] enum Item { Chip(char), Generator(char), ChipAndGenerator, } impl std::str::FromStr for Item { type Err = Error; fn from_str(s: &str) -> Result<Self> { let s: Vec<char> = s.chars().collect(); if s.len()!= 2 { return Err(Error::ParseItem { data: s[0].to_string(), }); } match s[1] { 'M' => Ok(Item::Chip(s[0])), 'G' => Ok(Item::Generator(s[0])), _ => Err(Error::ParseItem { data: s[1].to_string(), }), } } } impl std::fmt::Debug for Item { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Item::Chip(id) => write!(f, "{}M", id), Item::Generator(id) => write!(f, "{}G", id), Item::ChipAndGenerator => write!(f, "<>"), } } } #[derive(Debug, PartialEq, Eq, Clone)] struct State { elevator: usize, floors: Vec<HashSet<Item>>, } impl std::fmt::Display for State { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { for (i, floor) in self.floors.iter().enumerate() { write!( f, "{} {}: {:?}\n", i, if self.elevator == i { "E" } else { " " }, floor )?; } Ok(()) } } fn would_fry(items: &HashSet<Item>) -> bool { for item in items { match item { Item::Chip(id) => { if items.contains(&Item::Generator(*id)) { // chip is protected by generator continue; } for other_item in items { if let Item::Generator(other_id) = other_item { // TODO: we might not need this if if other_id!= id { // chip gets fried by another generator return true; } } } } Item::Generator(_) => {} Item::ChipAndGenerator => {} } } false } impl State { fn
(&self) -> usize { self.floors .iter() .enumerate() .map(|(i, f)| f.len() * (i + 1) * 10) .sum::<usize>() } fn is_success(&self) -> bool { for floor in &self.floors[..self.floors.len() - 1] { if!floor.is_empty() { return false; } } true } fn get_neighbors(&self) -> Vec<State> { let mut out = Vec::new(); // calculate valid floors that the elevator can move to let mut valid_destinations = Vec::new(); if self.elevator > 0 { valid_destinations.push(self.elevator - 1); }; if self.elevator < self.floors.len() - 1 { valid_destinations.push(self.elevator + 1); } for num_items in 1..=2 { // generate sets of items that can be taken from current floor - none, one, or two for moved_items in self.floors[self.elevator].iter().combinations(num_items) { let moved_items: HashSet<Item> = moved_items.into_iter().cloned().collect(); for destination in &valid_destinations { // take moved_items from self.elevator to destination let current_floor: HashSet<Item> = self.floors[self.elevator] .difference(&moved_items) .cloned() .collect(); let destination_floor: HashSet<Item> = self.floors[*destination] .union(&moved_items) .cloned() .collect(); // do not perform invalid moves if would_fry(&current_floor) || would_fry(&destination_floor) { continue; } let mut new_state: State = self.clone(); new_state.floors[self.elevator] = current_floor; new_state.floors[*destination] = destination_floor; new_state.elevator = *destination; out.push(new_state); } } } out } } impl std::hash::Hash for State { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.elevator.hash(state); let floors: Vec<Vec<Item>> = self .floors .iter() .map(|items| { // build list of ids for which both Chip and Generator are seen let merge: Vec<char> = items .iter() .filter_map(|i| { if let Item::Chip(id) = i { if items.contains(&Item::Generator(*id)) { return Some(*id); } } None }) .collect(); let mut items = items.clone(); for id in &merge { items.remove(&Item::Chip(*id)); items.remove(&Item::Generator(*id)); } let mut items: Vec<Item> = items.into_iter().collect(); for _ in &merge { items.push(Item::ChipAndGenerator); } items.sort(); items }) .collect(); floors.hash(state); } } #[derive(Debug, Snafu)] enum Error { #[snafu(display("I/O error: {}", source))] Io { source: std::io::Error }, #[snafu(display("Int format error for '{}': {}", data, source))] ParseInt { data: String, source: std::num::ParseIntError, }, #[snafu(display("Invalid item: '{}'", data))] ParseItem { data: String }, } fn solve(input: &[&str]) -> Result<usize> { // -> Result<Vec<State>> { let start = State { elevator: 0, floors: input .iter() .map(|l| { if l.trim().is_empty() { Ok(HashSet::new()) } else { l.split(",") .map(|i| i.parse()) .collect::<Result<HashSet<Item>>>() } }) .collect::<Result<_>>()?, }; //let mut queue = vec![(0, Vec::new(), start.clone())]; let mut queue = vec![(0, start.clone())]; let mut seen: HashSet<State> = HashSet::new(); let mut max_steps = 0; let mut best_score = 0; while!queue.is_empty() { //let (steps, path, state) = queue.remove(0); let (steps, state) = queue.remove(0); //println!("{}\n{}", steps, state); if max_steps < steps { max_steps = steps; print!("."); std::io::stdout().flush().unwrap(); } for next_state in state.get_neighbors() { if seen.contains(&next_state) { continue; } let score = next_state.score(); if score > best_score { best_score = score; } // dirty heuristic: don't explore very bad states if score < best_score - 40 { continue; } //let mut next_path = path.clone(); //next_path.push(next_state.clone()); seen.insert(next_state.clone()); //queue.push((steps + 1, next_path, next_state)); queue.push((steps + 1, next_state)); } if state.is_success() { //return Ok(path); return Ok(steps); } } panic!("No solution") } fn main() -> Result<()> { //let input = vec![ // "HM,LM", // The first floor contains a hydrogen-compatible microchip and a lithium-compatible microchip. // "HG", // The second floor contains a hydrogen generator. // "LG", // The third floor contains a lithium generator. // "", // The fourth floor contains nothing relevant. //]; // p plutonium // P promethium // r ruthenium // s strontium // t thulium let input1 = vec![ "tG,tM,pG,sG", // The first floor contains a thulium generator, a thulium-compatible microchip, a plutonium generator, and a strontium generator. "pM,sM", // The second floor contains a plutonium-compatible microchip and a strontium-compatible microchip. "PG,PM,rG,rM", // The third floor contains a promethium generator, a promethium-compatible microchip, a ruthenium generator, and a ruthenium-compatible microchip. "", // The fourth floor contains nothing relevant. ]; if let Ok(path) = solve(&input1[..]) { //println!("\npart 1: solution in {} steps", path.len()); println!("\npart 1: solution in {} steps", path); //for (i, step) in path.into_iter().enumerate() { // println!("STEP {}:\n{}\n", i, step); //} } // d dilithium // e elerium // p plutonium // P promethium // r ruthenium // s strontium // t thulium let input2 = vec!["dG,dM,eG,eM,tG,tM,pG,sG", "pM,sM", "PG,PM,rG,rM", ""]; if let Ok(path) = solve(&input2[..]) { //println!("\npart 2: solution in {} steps", path.len()); println!("\npart 2: solution in {} steps", path); //for (i, step) in path.into_iter().enumerate() { // println!("STEP {}:\n{}\n", i, step); //} } Ok(()) } #[cfg(test)] mod tests { use super::*; #[test] fn it_works() -> Result<()> { Ok(()) } }
score
identifier_name
day11.rs
use std::{collections::HashSet, io::Write}; use itertools::Itertools; use snafu::Snafu; type Result<T> = std::result::Result<T, Error>; #[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Clone)] enum Item { Chip(char), Generator(char), ChipAndGenerator, } impl std::str::FromStr for Item { type Err = Error; fn from_str(s: &str) -> Result<Self> { let s: Vec<char> = s.chars().collect(); if s.len()!= 2 { return Err(Error::ParseItem { data: s[0].to_string(), }); } match s[1] { 'M' => Ok(Item::Chip(s[0])), 'G' => Ok(Item::Generator(s[0])), _ => Err(Error::ParseItem { data: s[1].to_string(), }), } } } impl std::fmt::Debug for Item { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Item::Chip(id) => write!(f, "{}M", id), Item::Generator(id) => write!(f, "{}G", id), Item::ChipAndGenerator => write!(f, "<>"), } } } #[derive(Debug, PartialEq, Eq, Clone)] struct State { elevator: usize, floors: Vec<HashSet<Item>>, } impl std::fmt::Display for State { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { for (i, floor) in self.floors.iter().enumerate() { write!( f, "{} {}: {:?}\n", i, if self.elevator == i { "E" } else { " " }, floor )?; } Ok(()) } } fn would_fry(items: &HashSet<Item>) -> bool { for item in items { match item { Item::Chip(id) => { if items.contains(&Item::Generator(*id)) { // chip is protected by generator continue; } for other_item in items { if let Item::Generator(other_id) = other_item { // TODO: we might not need this if if other_id!= id { // chip gets fried by another generator return true; } } } } Item::Generator(_) => {} Item::ChipAndGenerator => {} } } false } impl State { fn score(&self) -> usize { self.floors .iter() .enumerate()
for floor in &self.floors[..self.floors.len() - 1] { if!floor.is_empty() { return false; } } true } fn get_neighbors(&self) -> Vec<State> { let mut out = Vec::new(); // calculate valid floors that the elevator can move to let mut valid_destinations = Vec::new(); if self.elevator > 0 { valid_destinations.push(self.elevator - 1); }; if self.elevator < self.floors.len() - 1 { valid_destinations.push(self.elevator + 1); } for num_items in 1..=2 { // generate sets of items that can be taken from current floor - none, one, or two for moved_items in self.floors[self.elevator].iter().combinations(num_items) { let moved_items: HashSet<Item> = moved_items.into_iter().cloned().collect(); for destination in &valid_destinations { // take moved_items from self.elevator to destination let current_floor: HashSet<Item> = self.floors[self.elevator] .difference(&moved_items) .cloned() .collect(); let destination_floor: HashSet<Item> = self.floors[*destination] .union(&moved_items) .cloned() .collect(); // do not perform invalid moves if would_fry(&current_floor) || would_fry(&destination_floor) { continue; } let mut new_state: State = self.clone(); new_state.floors[self.elevator] = current_floor; new_state.floors[*destination] = destination_floor; new_state.elevator = *destination; out.push(new_state); } } } out } } impl std::hash::Hash for State { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.elevator.hash(state); let floors: Vec<Vec<Item>> = self .floors .iter() .map(|items| { // build list of ids for which both Chip and Generator are seen let merge: Vec<char> = items .iter() .filter_map(|i| { if let Item::Chip(id) = i { if items.contains(&Item::Generator(*id)) { return Some(*id); } } None }) .collect(); let mut items = items.clone(); for id in &merge { items.remove(&Item::Chip(*id)); items.remove(&Item::Generator(*id)); } let mut items: Vec<Item> = items.into_iter().collect(); for _ in &merge { items.push(Item::ChipAndGenerator); } items.sort(); items }) .collect(); floors.hash(state); } } #[derive(Debug, Snafu)] enum Error { #[snafu(display("I/O error: {}", source))] Io { source: std::io::Error }, #[snafu(display("Int format error for '{}': {}", data, source))] ParseInt { data: String, source: std::num::ParseIntError, }, #[snafu(display("Invalid item: '{}'", data))] ParseItem { data: String }, } fn solve(input: &[&str]) -> Result<usize> { // -> Result<Vec<State>> { let start = State { elevator: 0, floors: input .iter() .map(|l| { if l.trim().is_empty() { Ok(HashSet::new()) } else { l.split(",") .map(|i| i.parse()) .collect::<Result<HashSet<Item>>>() } }) .collect::<Result<_>>()?, }; //let mut queue = vec![(0, Vec::new(), start.clone())]; let mut queue = vec![(0, start.clone())]; let mut seen: HashSet<State> = HashSet::new(); let mut max_steps = 0; let mut best_score = 0; while!queue.is_empty() { //let (steps, path, state) = queue.remove(0); let (steps, state) = queue.remove(0); //println!("{}\n{}", steps, state); if max_steps < steps { max_steps = steps; print!("."); std::io::stdout().flush().unwrap(); } for next_state in state.get_neighbors() { if seen.contains(&next_state) { continue; } let score = next_state.score(); if score > best_score { best_score = score; } // dirty heuristic: don't explore very bad states if score < best_score - 40 { continue; } //let mut next_path = path.clone(); //next_path.push(next_state.clone()); seen.insert(next_state.clone()); //queue.push((steps + 1, next_path, next_state)); queue.push((steps + 1, next_state)); } if state.is_success() { //return Ok(path); return Ok(steps); } } panic!("No solution") } fn main() -> Result<()> { //let input = vec![ // "HM,LM", // The first floor contains a hydrogen-compatible microchip and a lithium-compatible microchip. // "HG", // The second floor contains a hydrogen generator. // "LG", // The third floor contains a lithium generator. // "", // The fourth floor contains nothing relevant. //]; // p plutonium // P promethium // r ruthenium // s strontium // t thulium let input1 = vec![ "tG,tM,pG,sG", // The first floor contains a thulium generator, a thulium-compatible microchip, a plutonium generator, and a strontium generator. "pM,sM", // The second floor contains a plutonium-compatible microchip and a strontium-compatible microchip. "PG,PM,rG,rM", // The third floor contains a promethium generator, a promethium-compatible microchip, a ruthenium generator, and a ruthenium-compatible microchip. "", // The fourth floor contains nothing relevant. ]; if let Ok(path) = solve(&input1[..]) { //println!("\npart 1: solution in {} steps", path.len()); println!("\npart 1: solution in {} steps", path); //for (i, step) in path.into_iter().enumerate() { // println!("STEP {}:\n{}\n", i, step); //} } // d dilithium // e elerium // p plutonium // P promethium // r ruthenium // s strontium // t thulium let input2 = vec!["dG,dM,eG,eM,tG,tM,pG,sG", "pM,sM", "PG,PM,rG,rM", ""]; if let Ok(path) = solve(&input2[..]) { //println!("\npart 2: solution in {} steps", path.len()); println!("\npart 2: solution in {} steps", path); //for (i, step) in path.into_iter().enumerate() { // println!("STEP {}:\n{}\n", i, step); //} } Ok(()) } #[cfg(test)] mod tests { use super::*; #[test] fn it_works() -> Result<()> { Ok(()) } }
.map(|(i, f)| f.len() * (i + 1) * 10) .sum::<usize>() } fn is_success(&self) -> bool {
random_line_split
sink.rs
from(tmpfile_path.to_str().unwrap()); input_file_path.push_str("_input"); let input_file_path = PathBuf::from(input_file_path); let input_file = OpenOptions::new() .read(true) .write(true) .create(true) .truncate(true) .open(&input_file_path)?; // Replace the @@ marker in the args with the actual file path (if input type is File). if input_channel == InputChannel::File { if let Some(elem) = args.iter_mut().find(|e| **e == "@@") { *elem = input_file_path.to_str().unwrap().to_owned(); } else { return Err(anyhow!(format!("No @@ marker in args, even though the input channel is defined as file. args: {:#?}", args))); } } let mut stdout_file = None; if log_stdout { // Setup file for stdout logging. let mut path = workdir.clone(); path.push("stdout"); let file = OpenOptions::new() .read(true) .write(true) .create(true) .truncate(true) .open(&path) .unwrap(); stdout_file = Some((file, path)); } let mut stderr_file = None; if log_stderr { // Setup file for stdout logging. let mut path = workdir.clone(); path.push("stderr"); let file = OpenOptions::new() .read(true) .write(true) .create(true) .truncate(true) .open(&path) .unwrap(); stderr_file = Some((file, path)); } Ok(AflSink { path, args, workdir, input_channel, input_file: (input_file, input_file_path), forkserver_sid: None, bitmap: Bitmap::new_in_shm(BITMAP_DEFAULT_MAP_SIZE, 0x00), send_fd: None, receive_fd: None, log_stdout, log_stderr, stdout_file, stderr_file, config: config.cloned(), bitmap_was_resize: false, }) } pub fn from_config(config: &Config, id: Option<usize>) -> Result<AflSink> { let config_new = config.clone(); let mut workdir = config_new.general.work_dir.clone(); workdir.push( id.map(|id| id.to_string()) .unwrap_or_else(|| "0".to_owned()), ); let sink = AflSink::new( config_new.sink.bin_path, config_new.sink.arguments, workdir, config_new.sink.input_type, Some(config), config.sink.log_stdout, config.sink.log_stderr, )?; Ok(sink) } /// Wait for the given duration for the forkserver read fd to become ready. /// Returns Ok(true) if data becomes ready during the given `timeout`, else /// Ok(false). /// /// # Error /// /// Returns an Error if an unexpected error occurs. fn wait_for_data(&self, timeout: Duration) -> Result<()> { let pollfd = filedescriptor::pollfd { fd: self.receive_fd.unwrap(), events: filedescriptor::POLLIN, revents: 0, }; let mut pollfds = [pollfd]; let nready = filedescriptor::poll(&mut pollfds, Some(timeout)); match nready { Ok(1) => Ok(()), Ok(0) => Err(SinkError::CommunicationTimeoutError(format!( "Did not received data after {:?}", timeout )) .into()), Ok(n) => { unreachable!("Unexpected return value: {}", n); } Err(ref err) => { if let filedescriptor::Error::Poll(err) = err { if err.kind() == io::ErrorKind::Interrupted { return self.wait_for_data(timeout); } } Err(SinkError::FatalError(format!("Failed to poll fd: {:#?}", err)).into()) } } } pub fn start(&mut self) -> Result<()> { // send_pipe[1](we) -> send_pipe[0](forkserver). let send_pipe = [0i32; 2]; // receive_pipe[1](forkserver) -> receive_pipe[0](we). let receive_pipe = [0i32; 2]; // Create pipe for communicating with the forkserver. unsafe { let ret = libc::pipe(send_pipe.as_ptr() as *mut i32); assert_eq!(ret, 0); let ret = libc::pipe(receive_pipe.as_ptr() as *mut i32); assert_eq!(ret, 0); } self.send_fd = Some(send_pipe[1]); let child_receive_fd = send_pipe[0]; self.receive_fd = Some(receive_pipe[0]); let child_send_fd = receive_pipe[1]; let child_pid = unsafe { libc::fork() }; match child_pid { -1 => return Err(anyhow!("Fork failed!")), 0 => { /* Child Be aware that we are forking a potentially multithreaded application here. Since fork() only copies the calling thread, the environment might be left in a dirty state because of, e.g., mutexs that where locked at the time fork was called. Because of this it is only save to call async-signal-safe functions (https://man7.org/linux/man-pages/man7/signal-safety.7.html). Note that loggin function (debug!...) often internally use mutexes to lock the output buffer, thus using logging here is forbidden and likely causes deadlocks. */ let map_shm_id = self.bitmap.shm_id(); unsafe { let ret = libc::setsid(); assert!(ret >= 0); } // Setup args let path = self.path.to_str().map(|s| s.to_owned()).ok_or_else(|| { SinkError::Other(anyhow!("Invalid UTF-8 character in path")) })?; let mut args = self.args.clone(); args.insert(0, path.clone()); let argv_nonref: Vec<CString> = args .iter() .map(|arg| CString::new(arg.as_bytes()).unwrap()) .collect(); let mut argv: Vec<*const c_char> = argv_nonref.iter().map(|arg| arg.as_ptr()).collect(); argv.push(std::ptr::null()); // Setup environment let mut envp: Vec<*const c_char> = Vec::new(); let shm_env_var = CString::new(format!("{}={}", AFL_SHM_ENV_VAR_NAME, map_shm_id)).unwrap(); envp.push(shm_env_var.as_ptr()); let mut env_from_config = Vec::new(); if let Some(cfg) = self.config.as_ref() { cfg.sink.env.iter().for_each(|var| { env_from_config .push(CString::new(format!("{}={}", var.0, var.1).as_bytes()).unwrap()) }) } let afl_maps_size = CString::new(format!("AFL_MAP_SIZE={}", self.bitmap().size())).unwrap(); envp.push(afl_maps_size.as_bytes().as_ptr() as *const i8); env_from_config.iter().for_each(|e| { envp.push(e.as_bytes().as_ptr() as *const i8); }); envp.push(std::ptr::null()); let dev_null_fd = unsafe { let path = CString::new("/dev/null".as_bytes()).unwrap(); libc::open(path.as_ptr(), libc::O_RDONLY) }; if dev_null_fd < 0 { panic!("Failed to open /dev/null"); } match self.input_channel { InputChannel::Stdin => unsafe { libc::dup2(self.input_file.0.as_raw_fd(), 0); }, _ => unsafe { libc::dup2(dev_null_fd, 0); }, } if self.log_stdout { // unsafe { // let fd = self.stdout_file.as_ref().unwrap().0.as_raw_fd(); // libc::dup2(fd, libc::STDOUT_FILENO); // libc::close(fd); // } } else { unsafe { libc::dup2(dev_null_fd, libc::STDOUT_FILENO); } } if self.log_stderr
else { unsafe { libc::dup2(dev_null_fd, libc::STDERR_FILENO); } } unsafe { libc::close(dev_null_fd); } unsafe { // Close the pipe ends used by our parent. libc::close(self.receive_fd.unwrap()); libc::close(self.send_fd.unwrap()); // Remap fds to the ones used by the forkserver. // The fds might have by chance the correct value, in this case // dup2 & close would actually cause us to close the fd we intended to pass. if child_receive_fd!= AFL_READ_FROM_PARENT_FD { let ret = libc::dup2(child_receive_fd, AFL_READ_FROM_PARENT_FD); assert!(ret >= 0); libc::close(child_receive_fd); } if child_send_fd!= AFL_WRITE_TO_PARENT_FD { let ret = libc::dup2(child_send_fd, AFL_WRITE_TO_PARENT_FD); assert!(ret >= 0); libc::close(child_send_fd); } } unsafe { if!self.log_stdout &&!self.log_stderr { // if we log stderr or stdout, the limit will cause our // fuzzer to fail after some time. let mut rlim: libc::rlimit = std::mem::zeroed(); rlim.rlim_cur = n_mib_bytes!(512).try_into().unwrap(); rlim.rlim_max = n_mib_bytes!(512).try_into().unwrap(); let ret = libc::setrlimit(libc::RLIMIT_FSIZE, &rlim as *const libc::rlimit); assert_eq!(ret, 0); } // Disable core dumps let limit_val: libc::rlimit = std::mem::zeroed(); let ret = libc::setrlimit(libc::RLIMIT_CORE, &limit_val); assert_eq!(ret, 0); // Max AS size. let mut rlim: libc::rlimit = std::mem::zeroed(); rlim.rlim_cur = n_mib_bytes!(512).try_into().unwrap(); rlim.rlim_max = n_mib_bytes!(512).try_into().unwrap(); let ret = libc::setrlimit(libc::RLIMIT_AS, &rlim as *const libc::rlimit); assert_eq!(ret, 0); let ret = libc::personality(libc::ADDR_NO_RANDOMIZE as u64); assert_eq!(ret, 0); } if let Err(err) = self.drop_privileges() { log::error!("Failed to drop privileges: {:#?}", err); panic!(); } // Make sure that UID == EUID, since if this is not the case, // ld will ignore LD_PRELOAD which we need to use for targets // that normally load instrumented libraries during runtime. assert_eq!(nix::unistd::getuid(), nix::unistd::geteuid()); assert_eq!(nix::unistd::getegid(), nix::unistd::getegid()); let prog = CString::new(path.as_bytes()).unwrap(); unsafe { libc::execve(prog.as_ptr(), argv.as_ptr(), envp.as_ptr()); } unreachable!("Failed to call execve on '{}'", path); } _ => { /* The parent */ } } /* The parent */ log::info!("Forkserver has pid {}", child_pid); // Note th sid, thus we can kill the child later. // This is a sid since the child calls setsid(). self.forkserver_sid = Some(child_pid); // Close the pipe ends used by the child. unsafe { libc::close(child_receive_fd); libc::close(child_send_fd); } unsafe { libc::fcntl(self.send_fd.unwrap(), libc::F_SETFD, libc::FD_CLOEXEC); libc::fcntl(self.receive_fd.unwrap(), libc::F_SETFD, libc::FD_CLOEXEC); } // Wait for for hello from the child. self.wait_for_data(AFL_DEFAULT_TIMEOUT) .context("Timeout while waiting for forkserver to come up.")?; // Read the available data. let buffer = [0u8; 4]; unsafe { let ret = libc::read( self.receive_fd.unwrap(), buffer.as_ptr() as *mut libc::c_void, 4, ); if ret!= 4 { return Err(anyhow!(format!( "Failed to do handshake with forkserver. ret={}", ret ))); } // Process extended attributes used by AFL++. // Sett src/afl-forkserver.c:689 (afl_fsrv_start) let status = u32::from_ne_bytes(buffer); log::info!("Forkserver status: 0x{:x}", status); if status & FS_OPT_MAPSIZE == FS_OPT_MAPSIZE { log::info!("Got extended option FS_OPT_MAPSIZE from forkserver"); let new_map_size = ((status & 0x00fffffe) >> 1) + 1; log::info!("Target requests a map of size {} bytes", new_map_size); log::info!("Current map size is {} bytes", self.bitmap().size()); if self.bitmap_was_resize { log::info!("Already resized, skipping...."); return Ok(()); } let new_map_size = new_map_size.next_power_of_two() as usize; if new_map_size > self.bitmap().size() { log::info!("Resizing bitmap to {} bytes", new_map_size); self.stop(); let new_map = Bitmap::new_in_shm(new_map_size, 0x00); let _ = mem::replace(self.bitmap(), new_map); self.bitmap_was_resize = true; return self.start();
{ //unsafe { // let fd = self.stderr_file.as_ref().unwrap().0.as_raw_fd(); // libc::dup2(fd, libc::STDERR_FILENO); // libc::close(fd); //} }
conditional_block
sink.rs
.create(true) .truncate(true) .open(&path) .unwrap(); stderr_file = Some((file, path)); } Ok(AflSink { path, args, workdir, input_channel, input_file: (input_file, input_file_path), forkserver_sid: None, bitmap: Bitmap::new_in_shm(BITMAP_DEFAULT_MAP_SIZE, 0x00), send_fd: None, receive_fd: None, log_stdout, log_stderr, stdout_file, stderr_file, config: config.cloned(), bitmap_was_resize: false, }) } pub fn from_config(config: &Config, id: Option<usize>) -> Result<AflSink> { let config_new = config.clone(); let mut workdir = config_new.general.work_dir.clone(); workdir.push( id.map(|id| id.to_string()) .unwrap_or_else(|| "0".to_owned()), ); let sink = AflSink::new( config_new.sink.bin_path, config_new.sink.arguments, workdir, config_new.sink.input_type, Some(config), config.sink.log_stdout, config.sink.log_stderr, )?; Ok(sink) } /// Wait for the given duration for the forkserver read fd to become ready. /// Returns Ok(true) if data becomes ready during the given `timeout`, else /// Ok(false). /// /// # Error /// /// Returns an Error if an unexpected error occurs. fn wait_for_data(&self, timeout: Duration) -> Result<()> { let pollfd = filedescriptor::pollfd { fd: self.receive_fd.unwrap(), events: filedescriptor::POLLIN, revents: 0, }; let mut pollfds = [pollfd]; let nready = filedescriptor::poll(&mut pollfds, Some(timeout)); match nready { Ok(1) => Ok(()), Ok(0) => Err(SinkError::CommunicationTimeoutError(format!( "Did not received data after {:?}", timeout )) .into()), Ok(n) => { unreachable!("Unexpected return value: {}", n); } Err(ref err) => { if let filedescriptor::Error::Poll(err) = err { if err.kind() == io::ErrorKind::Interrupted { return self.wait_for_data(timeout); } } Err(SinkError::FatalError(format!("Failed to poll fd: {:#?}", err)).into()) } } } pub fn start(&mut self) -> Result<()> { // send_pipe[1](we) -> send_pipe[0](forkserver). let send_pipe = [0i32; 2]; // receive_pipe[1](forkserver) -> receive_pipe[0](we). let receive_pipe = [0i32; 2]; // Create pipe for communicating with the forkserver. unsafe { let ret = libc::pipe(send_pipe.as_ptr() as *mut i32); assert_eq!(ret, 0); let ret = libc::pipe(receive_pipe.as_ptr() as *mut i32); assert_eq!(ret, 0); } self.send_fd = Some(send_pipe[1]); let child_receive_fd = send_pipe[0]; self.receive_fd = Some(receive_pipe[0]); let child_send_fd = receive_pipe[1]; let child_pid = unsafe { libc::fork() }; match child_pid { -1 => return Err(anyhow!("Fork failed!")), 0 => { /* Child Be aware that we are forking a potentially multithreaded application here. Since fork() only copies the calling thread, the environment might be left in a dirty state because of, e.g., mutexs that where locked at the time fork was called. Because of this it is only save to call async-signal-safe functions (https://man7.org/linux/man-pages/man7/signal-safety.7.html). Note that loggin function (debug!...) often internally use mutexes to lock the output buffer, thus using logging here is forbidden and likely causes deadlocks. */ let map_shm_id = self.bitmap.shm_id(); unsafe { let ret = libc::setsid(); assert!(ret >= 0); } // Setup args let path = self.path.to_str().map(|s| s.to_owned()).ok_or_else(|| { SinkError::Other(anyhow!("Invalid UTF-8 character in path")) })?; let mut args = self.args.clone(); args.insert(0, path.clone()); let argv_nonref: Vec<CString> = args .iter() .map(|arg| CString::new(arg.as_bytes()).unwrap()) .collect(); let mut argv: Vec<*const c_char> = argv_nonref.iter().map(|arg| arg.as_ptr()).collect(); argv.push(std::ptr::null()); // Setup environment let mut envp: Vec<*const c_char> = Vec::new(); let shm_env_var = CString::new(format!("{}={}", AFL_SHM_ENV_VAR_NAME, map_shm_id)).unwrap(); envp.push(shm_env_var.as_ptr()); let mut env_from_config = Vec::new(); if let Some(cfg) = self.config.as_ref() { cfg.sink.env.iter().for_each(|var| { env_from_config .push(CString::new(format!("{}={}", var.0, var.1).as_bytes()).unwrap()) }) } let afl_maps_size = CString::new(format!("AFL_MAP_SIZE={}", self.bitmap().size())).unwrap(); envp.push(afl_maps_size.as_bytes().as_ptr() as *const i8); env_from_config.iter().for_each(|e| { envp.push(e.as_bytes().as_ptr() as *const i8); }); envp.push(std::ptr::null()); let dev_null_fd = unsafe { let path = CString::new("/dev/null".as_bytes()).unwrap(); libc::open(path.as_ptr(), libc::O_RDONLY) }; if dev_null_fd < 0 { panic!("Failed to open /dev/null"); } match self.input_channel { InputChannel::Stdin => unsafe { libc::dup2(self.input_file.0.as_raw_fd(), 0); }, _ => unsafe { libc::dup2(dev_null_fd, 0); }, } if self.log_stdout { // unsafe { // let fd = self.stdout_file.as_ref().unwrap().0.as_raw_fd(); // libc::dup2(fd, libc::STDOUT_FILENO); // libc::close(fd); // } } else { unsafe { libc::dup2(dev_null_fd, libc::STDOUT_FILENO); } } if self.log_stderr { //unsafe { // let fd = self.stderr_file.as_ref().unwrap().0.as_raw_fd(); // libc::dup2(fd, libc::STDERR_FILENO); // libc::close(fd); //} } else { unsafe { libc::dup2(dev_null_fd, libc::STDERR_FILENO); } } unsafe { libc::close(dev_null_fd); } unsafe { // Close the pipe ends used by our parent. libc::close(self.receive_fd.unwrap()); libc::close(self.send_fd.unwrap()); // Remap fds to the ones used by the forkserver. // The fds might have by chance the correct value, in this case // dup2 & close would actually cause us to close the fd we intended to pass. if child_receive_fd!= AFL_READ_FROM_PARENT_FD { let ret = libc::dup2(child_receive_fd, AFL_READ_FROM_PARENT_FD); assert!(ret >= 0); libc::close(child_receive_fd); } if child_send_fd!= AFL_WRITE_TO_PARENT_FD { let ret = libc::dup2(child_send_fd, AFL_WRITE_TO_PARENT_FD); assert!(ret >= 0); libc::close(child_send_fd); } } unsafe { if!self.log_stdout &&!self.log_stderr { // if we log stderr or stdout, the limit will cause our // fuzzer to fail after some time. let mut rlim: libc::rlimit = std::mem::zeroed(); rlim.rlim_cur = n_mib_bytes!(512).try_into().unwrap(); rlim.rlim_max = n_mib_bytes!(512).try_into().unwrap(); let ret = libc::setrlimit(libc::RLIMIT_FSIZE, &rlim as *const libc::rlimit); assert_eq!(ret, 0); } // Disable core dumps let limit_val: libc::rlimit = std::mem::zeroed(); let ret = libc::setrlimit(libc::RLIMIT_CORE, &limit_val); assert_eq!(ret, 0); // Max AS size. let mut rlim: libc::rlimit = std::mem::zeroed(); rlim.rlim_cur = n_mib_bytes!(512).try_into().unwrap(); rlim.rlim_max = n_mib_bytes!(512).try_into().unwrap(); let ret = libc::setrlimit(libc::RLIMIT_AS, &rlim as *const libc::rlimit); assert_eq!(ret, 0); let ret = libc::personality(libc::ADDR_NO_RANDOMIZE as u64); assert_eq!(ret, 0); } if let Err(err) = self.drop_privileges() { log::error!("Failed to drop privileges: {:#?}", err); panic!(); } // Make sure that UID == EUID, since if this is not the case, // ld will ignore LD_PRELOAD which we need to use for targets // that normally load instrumented libraries during runtime. assert_eq!(nix::unistd::getuid(), nix::unistd::geteuid()); assert_eq!(nix::unistd::getegid(), nix::unistd::getegid()); let prog = CString::new(path.as_bytes()).unwrap(); unsafe { libc::execve(prog.as_ptr(), argv.as_ptr(), envp.as_ptr()); } unreachable!("Failed to call execve on '{}'", path); } _ => { /* The parent */ } } /* The parent */ log::info!("Forkserver has pid {}", child_pid); // Note th sid, thus we can kill the child later. // This is a sid since the child calls setsid(). self.forkserver_sid = Some(child_pid); // Close the pipe ends used by the child. unsafe { libc::close(child_receive_fd); libc::close(child_send_fd); } unsafe { libc::fcntl(self.send_fd.unwrap(), libc::F_SETFD, libc::FD_CLOEXEC); libc::fcntl(self.receive_fd.unwrap(), libc::F_SETFD, libc::FD_CLOEXEC); } // Wait for for hello from the child. self.wait_for_data(AFL_DEFAULT_TIMEOUT) .context("Timeout while waiting for forkserver to come up.")?; // Read the available data. let buffer = [0u8; 4]; unsafe { let ret = libc::read( self.receive_fd.unwrap(), buffer.as_ptr() as *mut libc::c_void, 4, ); if ret!= 4 { return Err(anyhow!(format!( "Failed to do handshake with forkserver. ret={}", ret ))); } // Process extended attributes used by AFL++. // Sett src/afl-forkserver.c:689 (afl_fsrv_start) let status = u32::from_ne_bytes(buffer); log::info!("Forkserver status: 0x{:x}", status); if status & FS_OPT_MAPSIZE == FS_OPT_MAPSIZE { log::info!("Got extended option FS_OPT_MAPSIZE from forkserver"); let new_map_size = ((status & 0x00fffffe) >> 1) + 1; log::info!("Target requests a map of size {} bytes", new_map_size); log::info!("Current map size is {} bytes", self.bitmap().size()); if self.bitmap_was_resize { log::info!("Already resized, skipping...."); return Ok(()); } let new_map_size = new_map_size.next_power_of_two() as usize; if new_map_size > self.bitmap().size() { log::info!("Resizing bitmap to {} bytes", new_map_size); self.stop(); let new_map = Bitmap::new_in_shm(new_map_size, 0x00); let _ = mem::replace(self.bitmap(), new_map); self.bitmap_was_resize = true; return self.start(); } } } // if self.stdout_file.is_some() { // // Take the the stdout file thus its fd gets dropped. // self.stdout_file.take(); // } // if self.stderr_file.is_some() { // // Take the the stderr file thus its fd gets dropped. // self.stderr_file.take(); // } // We are ready to fuzz! Ok(()) } fn drop_privileges(&mut self) -> Result<()> { let uid_gid = self .config .as_ref() .map(|config| config.general.jail_uid_gid()) .unwrap_or(None); if uid_gid.is_some() { jail::acquire_privileges()?; } if let Some((uid, gid)) = uid_gid { jail::drop_privileges(uid, gid, true)?; } Ok(()) } /// Stops the forksever. Must be called before calling start() again. /// It is save to call this function multiple times. pub fn stop(&mut self) { if let Some(sid) = self.forkserver_sid.take() { unsafe { libc::close(self.send_fd.unwrap()); libc::close(self.receive_fd.unwrap()); let ret = libc::killpg(sid, SIGKILL); assert!(ret == 0); // reap it libc::waitpid(sid, std::ptr::null_mut() as *mut libc::c_int, 0); } } } /// Write the given bytes into the sinks input channel. This function /// is only allowed to be called on sinks with InputChannel::Stdin or InputChannel::File /// input channel. pub fn
write
identifier_name
sink.rs
child_pid { -1 => return Err(anyhow!("Fork failed!")), 0 => { /* Child Be aware that we are forking a potentially multithreaded application here. Since fork() only copies the calling thread, the environment might be left in a dirty state because of, e.g., mutexs that where locked at the time fork was called. Because of this it is only save to call async-signal-safe functions (https://man7.org/linux/man-pages/man7/signal-safety.7.html). Note that loggin function (debug!...) often internally use mutexes to lock the output buffer, thus using logging here is forbidden and likely causes deadlocks. */ let map_shm_id = self.bitmap.shm_id(); unsafe { let ret = libc::setsid(); assert!(ret >= 0); } // Setup args let path = self.path.to_str().map(|s| s.to_owned()).ok_or_else(|| { SinkError::Other(anyhow!("Invalid UTF-8 character in path")) })?; let mut args = self.args.clone(); args.insert(0, path.clone()); let argv_nonref: Vec<CString> = args .iter() .map(|arg| CString::new(arg.as_bytes()).unwrap()) .collect(); let mut argv: Vec<*const c_char> = argv_nonref.iter().map(|arg| arg.as_ptr()).collect(); argv.push(std::ptr::null()); // Setup environment let mut envp: Vec<*const c_char> = Vec::new(); let shm_env_var = CString::new(format!("{}={}", AFL_SHM_ENV_VAR_NAME, map_shm_id)).unwrap(); envp.push(shm_env_var.as_ptr()); let mut env_from_config = Vec::new(); if let Some(cfg) = self.config.as_ref() { cfg.sink.env.iter().for_each(|var| { env_from_config .push(CString::new(format!("{}={}", var.0, var.1).as_bytes()).unwrap()) }) } let afl_maps_size = CString::new(format!("AFL_MAP_SIZE={}", self.bitmap().size())).unwrap(); envp.push(afl_maps_size.as_bytes().as_ptr() as *const i8); env_from_config.iter().for_each(|e| { envp.push(e.as_bytes().as_ptr() as *const i8); }); envp.push(std::ptr::null()); let dev_null_fd = unsafe { let path = CString::new("/dev/null".as_bytes()).unwrap(); libc::open(path.as_ptr(), libc::O_RDONLY) }; if dev_null_fd < 0 { panic!("Failed to open /dev/null"); } match self.input_channel { InputChannel::Stdin => unsafe { libc::dup2(self.input_file.0.as_raw_fd(), 0); }, _ => unsafe { libc::dup2(dev_null_fd, 0); }, } if self.log_stdout { // unsafe { // let fd = self.stdout_file.as_ref().unwrap().0.as_raw_fd(); // libc::dup2(fd, libc::STDOUT_FILENO); // libc::close(fd); // } } else { unsafe { libc::dup2(dev_null_fd, libc::STDOUT_FILENO); } } if self.log_stderr { //unsafe { // let fd = self.stderr_file.as_ref().unwrap().0.as_raw_fd(); // libc::dup2(fd, libc::STDERR_FILENO); // libc::close(fd); //} } else { unsafe { libc::dup2(dev_null_fd, libc::STDERR_FILENO); } } unsafe { libc::close(dev_null_fd); } unsafe { // Close the pipe ends used by our parent. libc::close(self.receive_fd.unwrap()); libc::close(self.send_fd.unwrap()); // Remap fds to the ones used by the forkserver. // The fds might have by chance the correct value, in this case // dup2 & close would actually cause us to close the fd we intended to pass. if child_receive_fd!= AFL_READ_FROM_PARENT_FD { let ret = libc::dup2(child_receive_fd, AFL_READ_FROM_PARENT_FD); assert!(ret >= 0); libc::close(child_receive_fd); } if child_send_fd!= AFL_WRITE_TO_PARENT_FD { let ret = libc::dup2(child_send_fd, AFL_WRITE_TO_PARENT_FD); assert!(ret >= 0); libc::close(child_send_fd); } } unsafe { if!self.log_stdout &&!self.log_stderr { // if we log stderr or stdout, the limit will cause our // fuzzer to fail after some time. let mut rlim: libc::rlimit = std::mem::zeroed(); rlim.rlim_cur = n_mib_bytes!(512).try_into().unwrap(); rlim.rlim_max = n_mib_bytes!(512).try_into().unwrap(); let ret = libc::setrlimit(libc::RLIMIT_FSIZE, &rlim as *const libc::rlimit); assert_eq!(ret, 0); } // Disable core dumps let limit_val: libc::rlimit = std::mem::zeroed(); let ret = libc::setrlimit(libc::RLIMIT_CORE, &limit_val); assert_eq!(ret, 0); // Max AS size. let mut rlim: libc::rlimit = std::mem::zeroed(); rlim.rlim_cur = n_mib_bytes!(512).try_into().unwrap(); rlim.rlim_max = n_mib_bytes!(512).try_into().unwrap(); let ret = libc::setrlimit(libc::RLIMIT_AS, &rlim as *const libc::rlimit); assert_eq!(ret, 0); let ret = libc::personality(libc::ADDR_NO_RANDOMIZE as u64); assert_eq!(ret, 0); } if let Err(err) = self.drop_privileges() { log::error!("Failed to drop privileges: {:#?}", err); panic!(); } // Make sure that UID == EUID, since if this is not the case, // ld will ignore LD_PRELOAD which we need to use for targets // that normally load instrumented libraries during runtime. assert_eq!(nix::unistd::getuid(), nix::unistd::geteuid()); assert_eq!(nix::unistd::getegid(), nix::unistd::getegid()); let prog = CString::new(path.as_bytes()).unwrap(); unsafe { libc::execve(prog.as_ptr(), argv.as_ptr(), envp.as_ptr()); } unreachable!("Failed to call execve on '{}'", path); } _ => { /* The parent */ } } /* The parent */ log::info!("Forkserver has pid {}", child_pid); // Note th sid, thus we can kill the child later. // This is a sid since the child calls setsid(). self.forkserver_sid = Some(child_pid); // Close the pipe ends used by the child. unsafe { libc::close(child_receive_fd); libc::close(child_send_fd); } unsafe { libc::fcntl(self.send_fd.unwrap(), libc::F_SETFD, libc::FD_CLOEXEC); libc::fcntl(self.receive_fd.unwrap(), libc::F_SETFD, libc::FD_CLOEXEC); } // Wait for for hello from the child. self.wait_for_data(AFL_DEFAULT_TIMEOUT) .context("Timeout while waiting for forkserver to come up.")?; // Read the available data. let buffer = [0u8; 4]; unsafe { let ret = libc::read( self.receive_fd.unwrap(), buffer.as_ptr() as *mut libc::c_void, 4, ); if ret!= 4 { return Err(anyhow!(format!( "Failed to do handshake with forkserver. ret={}", ret ))); } // Process extended attributes used by AFL++. // Sett src/afl-forkserver.c:689 (afl_fsrv_start) let status = u32::from_ne_bytes(buffer); log::info!("Forkserver status: 0x{:x}", status); if status & FS_OPT_MAPSIZE == FS_OPT_MAPSIZE { log::info!("Got extended option FS_OPT_MAPSIZE from forkserver"); let new_map_size = ((status & 0x00fffffe) >> 1) + 1; log::info!("Target requests a map of size {} bytes", new_map_size); log::info!("Current map size is {} bytes", self.bitmap().size()); if self.bitmap_was_resize { log::info!("Already resized, skipping...."); return Ok(()); } let new_map_size = new_map_size.next_power_of_two() as usize; if new_map_size > self.bitmap().size() { log::info!("Resizing bitmap to {} bytes", new_map_size); self.stop(); let new_map = Bitmap::new_in_shm(new_map_size, 0x00); let _ = mem::replace(self.bitmap(), new_map); self.bitmap_was_resize = true; return self.start(); } } } // if self.stdout_file.is_some() { // // Take the the stdout file thus its fd gets dropped. // self.stdout_file.take(); // } // if self.stderr_file.is_some() { // // Take the the stderr file thus its fd gets dropped. // self.stderr_file.take(); // } // We are ready to fuzz! Ok(()) } fn drop_privileges(&mut self) -> Result<()> { let uid_gid = self .config .as_ref() .map(|config| config.general.jail_uid_gid()) .unwrap_or(None); if uid_gid.is_some() { jail::acquire_privileges()?; } if let Some((uid, gid)) = uid_gid { jail::drop_privileges(uid, gid, true)?; } Ok(()) } /// Stops the forksever. Must be called before calling start() again. /// It is save to call this function multiple times. pub fn stop(&mut self) { if let Some(sid) = self.forkserver_sid.take() { unsafe { libc::close(self.send_fd.unwrap()); libc::close(self.receive_fd.unwrap()); let ret = libc::killpg(sid, SIGKILL); assert!(ret == 0); // reap it libc::waitpid(sid, std::ptr::null_mut() as *mut libc::c_int, 0); } } } /// Write the given bytes into the sinks input channel. This function /// is only allowed to be called on sinks with InputChannel::Stdin or InputChannel::File /// input channel. pub fn write(&mut self, data: &[u8]) { debug_assert!( self.input_channel == InputChannel::Stdin || self.input_channel == InputChannel::File ); self.input_file.0.seek(SeekFrom::Start(0)).unwrap(); self.input_file.0.set_len(0).unwrap(); self.input_file.0.write_all(data).unwrap(); self.input_file.0.seek(SeekFrom::Start(0)).unwrap(); self.input_file.0.sync_all().unwrap(); } pub fn run(&mut self, timeout: Duration) -> Result<RunResult> { self.bitmap().reset(); let buffer = [0u8; 4]; let buf_ptr = buffer.as_ptr() as *mut libc::c_void; // Tell the forkserver to fork. log::trace!("Requesting fork"); let ret = repeat_on_interrupt(|| unsafe { libc::write(self.send_fd.unwrap(), buf_ptr, 4) }); if ret!= 4 { error!("Fork request failed"); return Err(anyhow!("Failed to write to send_fd: {}", ret)); } log::trace!("Waiting for child pid"); self.wait_for_data(AFL_DEFAULT_TIMEOUT) .context("Failed to retrive child pid from forkserver")?; let ret = repeat_on_interrupt(|| unsafe { libc::read(self.receive_fd.unwrap(), buf_ptr, 4) }); if ret!= 4 { error!("Failed to retrive child pid"); return Err(anyhow!("Failed to read from receive_non_blocking_fd")); } let child_pid = i32::from_le_bytes(buffer); log::trace!("Got child pid {}", child_pid); if child_pid <= 0 { log::error!("Child pid '{}' is invalid", child_pid); return Err(anyhow!( "Failed to parse child_pid. child_pid={}, bytes={:?}", child_pid, buffer )); } log::trace!("Waiting for child termination"); match self.wait_for_data(timeout) { Ok(_) => (), Err(err) => { log::trace!("Child timed out: {:#?}", err); // Kill the child since it appears to have timed out. let kill_ret = nix::sys::signal::kill( nix::unistd::Pid::from_raw(child_pid), nix::sys::signal::SIGKILL, ); if let Err(ref err) = kill_ret { // This might just be caused by the fact that the child won the race // and terminated before we killed it. log::trace!("Failed to kill child: {:#?}", err); } if let Err(err) = self .wait_for_data(AFL_DEFAULT_TIMEOUT) .context("Child did not acknowledge termination request") { let reason = try_get_child_exit_reason(self.forkserver_sid.unwrap()); log::error!( "Exit reason: {:#?}, child_pid={:?}, kill_ret={:?}", reason, child_pid, kill_ret ); return Err(err.context(format!("child_exit_reason={:#?}", reason))); } // Consume exit status. let ret = unsafe { libc::read(self.receive_fd.unwrap(), buf_ptr, 4) }; if ret!= 4 { log::error!("Expected {}!= 4", ret); } return Ok(RunResult::TimedOut); } }
log::trace!("Child terminated, getting exit status");
random_line_split
player.rs
use std::collections::HashMap; use std::convert::TryInto; use std::path::PathBuf; use std::sync::mpsc::Sender; use std::time::{Duration, Instant}; use dbus::arg::RefArg; use dbus::blocking::stdintf::org_freedesktop_dbus::Properties; use dbus::blocking::BlockingSender; use dbus::blocking::{Connection, Proxy}; use dbus::{arg, Message}; use log::{debug, info, warn}; use url::Url; const MPRIS2_PREFIX: &str = "org.mpris.MediaPlayer2."; const MPRIS2_PATH: &str = "/org/mpris/MediaPlayer2"; type DbusStringMap = HashMap<String, arg::Variant<Box<dyn arg::RefArg>>>; pub type ConnectionProxy<'a> = Proxy<'a, &'a Connection>; #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub enum PlaybackStatus { Playing, Paused, Stopped, } #[derive(Clone, Debug)] pub struct Metadata { album: Option<String>, title: String, artists: Option<Vec<String>>, file_path: PathBuf, length: i64, } impl Metadata { #[allow(dead_code)] pub fn album(&self) -> &Option<String> { &self.album } #[allow(dead_code)] pub fn title(&self) -> &String { &self.title } #[allow(dead_code)] pub fn artists(&self) -> &Option<Vec<String>> { &self.artists } pub fn file_path(&self) -> &PathBuf { &self.file_path } #[allow(dead_code)] pub fn length(&self) -> i64 { self.length } } #[derive(Debug)] pub enum Event { PlayerShutDown, PlaybackStatusChange(PlaybackStatus), Seeked { position: Duration }, MetadataChange(Option<Metadata>), } #[derive(Debug)] pub struct Progress { /// If player is stopped, metadata will be None metadata: Option<Metadata>, playback_status: PlaybackStatus, /// When this Progress was constructed, in order to calculate how old it is. instant: Instant, /// Position at the time of construction position: Duration, } impl Progress { pub fn new( playback_status: PlaybackStatus, position: Duration, metadata: Option<Metadata>, ) -> Progress { Progress { metadata, playback_status, instant: Instant::now(), position, } } pub fn metadata(&self) -> &Option<Metadata> { &self.metadata } pub fn take_metadata(self) -> Option<Metadata> { self.metadata } pub fn playback_status(&self) -> PlaybackStatus { self.playback_status } pub fn instant(&self) -> Instant { self.instant } pub fn position(&self) -> Duration { self.position } } fn query_player_property<T>(p: &ConnectionProxy, name: &str) -> Result<T, String> where for<'b> T: dbus::arg::Get<'b>, { p.get("org.mpris.MediaPlayer2.Player", name) .map_err(|e| e.to_string()) } pub fn query_player_position(p: &ConnectionProxy) -> Result<Duration, String> { let v = query_player_property::<i64>(p, "Position")?; if v < 0 { panic!("Wrong position value"); } Ok(Duration::from_micros(v.try_into().unwrap())) } fn query_player_playback_status(p: &ConnectionProxy) -> Result<PlaybackStatus, String> { query_player_property::<String>(p, "PlaybackStatus").map(|v| parse_playback_status(&v)) } fn parse_player_metadata<T: arg::RefArg>( metadata_map: HashMap<String, T>, ) -> Result<Option<Metadata>, String> { debug!("metadata_map = {:?}", metadata_map); let file_path_encoded = match metadata_map.get("xesam:url") { Some(url) => url .as_str() .ok_or("url metadata should be string")? .to_string(), // If playlist has reached end, new metadata event is sent, // but it doesn't contain any of the following keys None => return Ok(None), }; let file_path_url = Url::parse(&file_path_encoded) .map_err(|e| format!("invalid format of url metadata: {}", e.to_string()))?; let file_path = file_path_url .to_file_path() .map_err(|_| format!("invalid format of url metadata: {}", file_path_url))?; let album = metadata_map .get("xesam:album") .map(|v| { v.as_str() .ok_or("album metadata should be string") .map(|x| x.to_string()) }) .transpose()?; let title = metadata_map["xesam:title"] .as_str() .ok_or("title metadata should be string")? .to_string(); let length = metadata_map["mpris:length"] .as_i64() .ok_or("length metadata should be i64")?; let artists = metadata_map .get("xesam:artist") .map(|v| { v.as_iter() .ok_or("artist metadata should be iterator")? .next() .ok_or("artist metadata should contain at least one entry")? .as_iter() .ok_or("artist metadata should have nested iterator")? .map(|x| { Ok(x.as_str() .ok_or("artist metadata values should be string")? .to_string()) }) .collect::<Result<Vec<String>, &'static str>>() }) .transpose()?; Ok(Some(Metadata { album, title, artists, file_path, length, })) } fn query_player_metadata(p: &ConnectionProxy) -> Result<Option<Metadata>, String> { query_player_property::<DbusStringMap>(p, "Metadata").and_then(parse_player_metadata) } pub fn query_progress(p: &ConnectionProxy) -> Result<Progress, String> { let playback_status = query_player_playback_status(p)?; let position = query_player_position(p)?; let instant = Instant::now(); let metadata = if playback_status!= PlaybackStatus::Stopped { query_player_metadata(p)? } else { None }; Ok(Progress { metadata, playback_status, instant, position, }) } fn parse_playback_status(playback_status: &str) -> PlaybackStatus { match playback_status { "Playing" => PlaybackStatus::Playing, "Paused" => PlaybackStatus::Paused, "Stopped" => PlaybackStatus::Stopped, _ => panic!(""), } } fn query_unique_owner_name<S: Into<String>>(c: &Connection, bus_name: S) -> Result<String, String> { let get_name_owner = Message::new_method_call( "org.freedesktop.DBus", "/", "org.freedesktop.DBus", "GetNameOwner", ) .map_err(|e| e.to_string())? .append1(bus_name.into());
.map(|reply| { reply .get1() .expect("GetNameOwner must have name as first member") }) } fn query_all_player_buses(c: &Connection) -> Result<Vec<String>, String> { let list_names = Message::new_method_call( "org.freedesktop.DBus", "/", "org.freedesktop.DBus", "ListNames", )?; let reply = c .send_with_reply_and_block(list_names, Duration::from_millis(500)) .map_err(|e| e.to_string())?; let names: arg::Array<&str, _> = reply.read1().map_err(|e| e.to_string())?; Ok(names .filter(|name| name.starts_with(MPRIS2_PREFIX)) .map(|str_ref| str_ref.to_owned()) .collect()) } fn get_message_item_dict( a: &arg::Variant<Box<dyn arg::RefArg>>, ) -> HashMap<String, Box<&dyn arg::RefArg>> { let mut it = a.as_iter().unwrap(); let d_variant = it.next().unwrap(); let d_it = d_variant.as_iter().unwrap(); let v = d_it.collect::<Vec<_>>(); v.chunks(2) .map(|c| { let key = c[0].as_str().unwrap(); (key.to_string(), Box::new(c[1])) }) .collect() } #[derive(Debug)] pub struct DbusPropertiesChangedHappened { pub interface_name: String, pub changed_properties: DbusStringMap, pub invalidated_properties: Vec<String>, } impl dbus::message::SignalArgs for DbusPropertiesChangedHappened { const NAME: &'static str = "PropertiesChanged"; const INTERFACE: &'static str = "org.freedesktop.DBus.Properties"; } impl arg::ReadAll for DbusPropertiesChangedHappened { fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> { Ok(Self { interface_name: i.read()?, changed_properties: i.read()?, invalidated_properties: i.read()?, }) } } #[derive(Debug)] pub struct MediaPlayer2SeekedHappened { pub position_us: i64, } impl dbus::message::SignalArgs for MediaPlayer2SeekedHappened { const NAME: &'static str = "Seeked"; const INTERFACE: &'static str = "org.mpris.MediaPlayer2.Player"; } impl arg::ReadAll for MediaPlayer2SeekedHappened { fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> { Ok(Self { position_us: i.read()?, }) } } #[derive(Debug)] pub struct DbusNameOwnedChanged { pub name: String, pub new_owner: String, pub old_owner: String, } impl dbus::message::SignalArgs for DbusNameOwnedChanged { const NAME: &'static str = "NameOwnerChanged"; const INTERFACE: &'static str = "org.freedesktop.DBus"; } impl arg::ReadAll for DbusNameOwnedChanged { fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> { Ok(Self { name: i.read()?, new_owner: i.read()?, old_owner: i.read()?, }) } } pub fn get_connection_proxy<'a>( c: &'a Connection, player_owner_name: &'a str, ) -> ConnectionProxy<'a> { c.with_proxy(player_owner_name, MPRIS2_PATH, Duration::from_millis(5000)) } fn get_mediaplayer2_seeked_handler( sender: Sender<Event>, ) -> impl Fn(MediaPlayer2SeekedHappened, &Connection) -> bool { move |e: MediaPlayer2SeekedHappened, _: &Connection| { debug!("Seek happened: {:?}", e); if e.position_us < 0 { panic!( "Position value must be positive number, found {}", e.position_us ); } sender .send(Event::Seeked { position: Duration::from_micros(e.position_us as u64), }) .unwrap(); true } } fn get_dbus_properties_changed_handler( sender: Sender<Event>, ) -> impl Fn(DbusPropertiesChangedHappened, &Connection) -> bool { move |e: DbusPropertiesChangedHappened, _: &Connection| { debug!("DBus.Properties happened: {:?}", e); if e.interface_name == "org.mpris.MediaPlayer2.Player" { for (k, v) in &e.changed_properties { match k.as_ref() { "PlaybackStatus" => { let playback_status = v.as_str().unwrap(); debug!("playback_status = {:?}", playback_status); sender .send(Event::PlaybackStatusChange(parse_playback_status( &playback_status, ))) .unwrap(); } "Metadata" => { let metadata_map = get_message_item_dict(v); debug!("metadata_map = {:?}", metadata_map); let metadata = parse_player_metadata(metadata_map).unwrap(); sender.send(Event::MetadataChange(metadata)).unwrap(); } _ => { warn!("Unknown PropertiesChanged event:"); for p in &e.changed_properties { warn!(" changed_property = {:?}", p); } warn!( " invalidated_properties = {:?}", e.invalidated_properties ); } } } } true } } fn get_dbus_name_owned_changed_handler( sender: Sender<Event>, player_owner_name: String, ) -> impl Fn(DbusNameOwnedChanged, &Connection) -> bool { move |e: DbusNameOwnedChanged, _: &Connection| { debug!("DbusNameOwnedChanged happened: {:?}", e); if e.name == player_owner_name && e.old_owner.is_empty() && e.new_owner == player_owner_name { sender.send(Event::PlayerShutDown).unwrap(); } true } } pub fn subscribe<'a>( c: &'a Connection, player: &str, sender: &Sender<Event>, ) -> Result<String, String> { let all_player_buses = query_all_player_buses(&c)?; let player_bus = format!("{}{}", MPRIS2_PREFIX, player); if!all_player_buses.contains(&player_bus) { info!("all players = {:?}", all_player_buses); return Err("Player not running".to_owned()); } let player_owner_name = query_unique_owner_name(&c, player_bus)?; debug!("player_owner_name = {:?}", player_owner_name); let p = get_connection_proxy(c, &player_owner_name); p.match_signal(get_dbus_properties_changed_handler(sender.clone())) .map_err(|e| e.to_string())?; p.match_signal(get_mediaplayer2_seeked_handler(sender.clone())) .map_err(|e| e.to_string())?; // p.match_signal(|_: MediaPlayer2TrackListChangeHappened, _: &Connection| { // debug!("TrackList happened"); // true // }).map_err(|e| e.to_string())?; let proxy_generic_dbus = c.with_proxy( "org.freedesktop.DBus", "/org/freedesktop/DBus", Duration::from_millis(5000), ); proxy_generic_dbus .match_signal(get_dbus_name_owned_changed_handler( sender.clone(), player_owner_name.clone(), )) .map_err(|e| e.to_string())?; Ok(player_owner_name) }
c.send_with_reply_and_block(get_name_owner, Duration::from_millis(100)) .map_err(|e| e.to_string())
random_line_split
player.rs
use std::collections::HashMap; use std::convert::TryInto; use std::path::PathBuf; use std::sync::mpsc::Sender; use std::time::{Duration, Instant}; use dbus::arg::RefArg; use dbus::blocking::stdintf::org_freedesktop_dbus::Properties; use dbus::blocking::BlockingSender; use dbus::blocking::{Connection, Proxy}; use dbus::{arg, Message}; use log::{debug, info, warn}; use url::Url; const MPRIS2_PREFIX: &str = "org.mpris.MediaPlayer2."; const MPRIS2_PATH: &str = "/org/mpris/MediaPlayer2"; type DbusStringMap = HashMap<String, arg::Variant<Box<dyn arg::RefArg>>>; pub type ConnectionProxy<'a> = Proxy<'a, &'a Connection>; #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub enum PlaybackStatus { Playing, Paused, Stopped, } #[derive(Clone, Debug)] pub struct Metadata { album: Option<String>, title: String, artists: Option<Vec<String>>, file_path: PathBuf, length: i64, } impl Metadata { #[allow(dead_code)] pub fn album(&self) -> &Option<String> { &self.album } #[allow(dead_code)] pub fn title(&self) -> &String { &self.title } #[allow(dead_code)] pub fn artists(&self) -> &Option<Vec<String>> { &self.artists } pub fn file_path(&self) -> &PathBuf { &self.file_path } #[allow(dead_code)] pub fn length(&self) -> i64 { self.length } } #[derive(Debug)] pub enum Event { PlayerShutDown, PlaybackStatusChange(PlaybackStatus), Seeked { position: Duration }, MetadataChange(Option<Metadata>), } #[derive(Debug)] pub struct Progress { /// If player is stopped, metadata will be None metadata: Option<Metadata>, playback_status: PlaybackStatus, /// When this Progress was constructed, in order to calculate how old it is. instant: Instant, /// Position at the time of construction position: Duration, } impl Progress { pub fn new( playback_status: PlaybackStatus, position: Duration, metadata: Option<Metadata>, ) -> Progress { Progress { metadata, playback_status, instant: Instant::now(), position, } } pub fn metadata(&self) -> &Option<Metadata> { &self.metadata } pub fn take_metadata(self) -> Option<Metadata> { self.metadata } pub fn playback_status(&self) -> PlaybackStatus { self.playback_status } pub fn instant(&self) -> Instant { self.instant } pub fn position(&self) -> Duration { self.position } } fn query_player_property<T>(p: &ConnectionProxy, name: &str) -> Result<T, String> where for<'b> T: dbus::arg::Get<'b>, { p.get("org.mpris.MediaPlayer2.Player", name) .map_err(|e| e.to_string()) } pub fn query_player_position(p: &ConnectionProxy) -> Result<Duration, String> { let v = query_player_property::<i64>(p, "Position")?; if v < 0 { panic!("Wrong position value"); } Ok(Duration::from_micros(v.try_into().unwrap())) } fn query_player_playback_status(p: &ConnectionProxy) -> Result<PlaybackStatus, String>
fn parse_player_metadata<T: arg::RefArg>( metadata_map: HashMap<String, T>, ) -> Result<Option<Metadata>, String> { debug!("metadata_map = {:?}", metadata_map); let file_path_encoded = match metadata_map.get("xesam:url") { Some(url) => url .as_str() .ok_or("url metadata should be string")? .to_string(), // If playlist has reached end, new metadata event is sent, // but it doesn't contain any of the following keys None => return Ok(None), }; let file_path_url = Url::parse(&file_path_encoded) .map_err(|e| format!("invalid format of url metadata: {}", e.to_string()))?; let file_path = file_path_url .to_file_path() .map_err(|_| format!("invalid format of url metadata: {}", file_path_url))?; let album = metadata_map .get("xesam:album") .map(|v| { v.as_str() .ok_or("album metadata should be string") .map(|x| x.to_string()) }) .transpose()?; let title = metadata_map["xesam:title"] .as_str() .ok_or("title metadata should be string")? .to_string(); let length = metadata_map["mpris:length"] .as_i64() .ok_or("length metadata should be i64")?; let artists = metadata_map .get("xesam:artist") .map(|v| { v.as_iter() .ok_or("artist metadata should be iterator")? .next() .ok_or("artist metadata should contain at least one entry")? .as_iter() .ok_or("artist metadata should have nested iterator")? .map(|x| { Ok(x.as_str() .ok_or("artist metadata values should be string")? .to_string()) }) .collect::<Result<Vec<String>, &'static str>>() }) .transpose()?; Ok(Some(Metadata { album, title, artists, file_path, length, })) } fn query_player_metadata(p: &ConnectionProxy) -> Result<Option<Metadata>, String> { query_player_property::<DbusStringMap>(p, "Metadata").and_then(parse_player_metadata) } pub fn query_progress(p: &ConnectionProxy) -> Result<Progress, String> { let playback_status = query_player_playback_status(p)?; let position = query_player_position(p)?; let instant = Instant::now(); let metadata = if playback_status!= PlaybackStatus::Stopped { query_player_metadata(p)? } else { None }; Ok(Progress { metadata, playback_status, instant, position, }) } fn parse_playback_status(playback_status: &str) -> PlaybackStatus { match playback_status { "Playing" => PlaybackStatus::Playing, "Paused" => PlaybackStatus::Paused, "Stopped" => PlaybackStatus::Stopped, _ => panic!(""), } } fn query_unique_owner_name<S: Into<String>>(c: &Connection, bus_name: S) -> Result<String, String> { let get_name_owner = Message::new_method_call( "org.freedesktop.DBus", "/", "org.freedesktop.DBus", "GetNameOwner", ) .map_err(|e| e.to_string())? .append1(bus_name.into()); c.send_with_reply_and_block(get_name_owner, Duration::from_millis(100)) .map_err(|e| e.to_string()) .map(|reply| { reply .get1() .expect("GetNameOwner must have name as first member") }) } fn query_all_player_buses(c: &Connection) -> Result<Vec<String>, String> { let list_names = Message::new_method_call( "org.freedesktop.DBus", "/", "org.freedesktop.DBus", "ListNames", )?; let reply = c .send_with_reply_and_block(list_names, Duration::from_millis(500)) .map_err(|e| e.to_string())?; let names: arg::Array<&str, _> = reply.read1().map_err(|e| e.to_string())?; Ok(names .filter(|name| name.starts_with(MPRIS2_PREFIX)) .map(|str_ref| str_ref.to_owned()) .collect()) } fn get_message_item_dict( a: &arg::Variant<Box<dyn arg::RefArg>>, ) -> HashMap<String, Box<&dyn arg::RefArg>> { let mut it = a.as_iter().unwrap(); let d_variant = it.next().unwrap(); let d_it = d_variant.as_iter().unwrap(); let v = d_it.collect::<Vec<_>>(); v.chunks(2) .map(|c| { let key = c[0].as_str().unwrap(); (key.to_string(), Box::new(c[1])) }) .collect() } #[derive(Debug)] pub struct DbusPropertiesChangedHappened { pub interface_name: String, pub changed_properties: DbusStringMap, pub invalidated_properties: Vec<String>, } impl dbus::message::SignalArgs for DbusPropertiesChangedHappened { const NAME: &'static str = "PropertiesChanged"; const INTERFACE: &'static str = "org.freedesktop.DBus.Properties"; } impl arg::ReadAll for DbusPropertiesChangedHappened { fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> { Ok(Self { interface_name: i.read()?, changed_properties: i.read()?, invalidated_properties: i.read()?, }) } } #[derive(Debug)] pub struct MediaPlayer2SeekedHappened { pub position_us: i64, } impl dbus::message::SignalArgs for MediaPlayer2SeekedHappened { const NAME: &'static str = "Seeked"; const INTERFACE: &'static str = "org.mpris.MediaPlayer2.Player"; } impl arg::ReadAll for MediaPlayer2SeekedHappened { fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> { Ok(Self { position_us: i.read()?, }) } } #[derive(Debug)] pub struct DbusNameOwnedChanged { pub name: String, pub new_owner: String, pub old_owner: String, } impl dbus::message::SignalArgs for DbusNameOwnedChanged { const NAME: &'static str = "NameOwnerChanged"; const INTERFACE: &'static str = "org.freedesktop.DBus"; } impl arg::ReadAll for DbusNameOwnedChanged { fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> { Ok(Self { name: i.read()?, new_owner: i.read()?, old_owner: i.read()?, }) } } pub fn get_connection_proxy<'a>( c: &'a Connection, player_owner_name: &'a str, ) -> ConnectionProxy<'a> { c.with_proxy(player_owner_name, MPRIS2_PATH, Duration::from_millis(5000)) } fn get_mediaplayer2_seeked_handler( sender: Sender<Event>, ) -> impl Fn(MediaPlayer2SeekedHappened, &Connection) -> bool { move |e: MediaPlayer2SeekedHappened, _: &Connection| { debug!("Seek happened: {:?}", e); if e.position_us < 0 { panic!( "Position value must be positive number, found {}", e.position_us ); } sender .send(Event::Seeked { position: Duration::from_micros(e.position_us as u64), }) .unwrap(); true } } fn get_dbus_properties_changed_handler( sender: Sender<Event>, ) -> impl Fn(DbusPropertiesChangedHappened, &Connection) -> bool { move |e: DbusPropertiesChangedHappened, _: &Connection| { debug!("DBus.Properties happened: {:?}", e); if e.interface_name == "org.mpris.MediaPlayer2.Player" { for (k, v) in &e.changed_properties { match k.as_ref() { "PlaybackStatus" => { let playback_status = v.as_str().unwrap(); debug!("playback_status = {:?}", playback_status); sender .send(Event::PlaybackStatusChange(parse_playback_status( &playback_status, ))) .unwrap(); } "Metadata" => { let metadata_map = get_message_item_dict(v); debug!("metadata_map = {:?}", metadata_map); let metadata = parse_player_metadata(metadata_map).unwrap(); sender.send(Event::MetadataChange(metadata)).unwrap(); } _ => { warn!("Unknown PropertiesChanged event:"); for p in &e.changed_properties { warn!(" changed_property = {:?}", p); } warn!( " invalidated_properties = {:?}", e.invalidated_properties ); } } } } true } } fn get_dbus_name_owned_changed_handler( sender: Sender<Event>, player_owner_name: String, ) -> impl Fn(DbusNameOwnedChanged, &Connection) -> bool { move |e: DbusNameOwnedChanged, _: &Connection| { debug!("DbusNameOwnedChanged happened: {:?}", e); if e.name == player_owner_name && e.old_owner.is_empty() && e.new_owner == player_owner_name { sender.send(Event::PlayerShutDown).unwrap(); } true } } pub fn subscribe<'a>( c: &'a Connection, player: &str, sender: &Sender<Event>, ) -> Result<String, String> { let all_player_buses = query_all_player_buses(&c)?; let player_bus = format!("{}{}", MPRIS2_PREFIX, player); if!all_player_buses.contains(&player_bus) { info!("all players = {:?}", all_player_buses); return Err("Player not running".to_owned()); } let player_owner_name = query_unique_owner_name(&c, player_bus)?; debug!("player_owner_name = {:?}", player_owner_name); let p = get_connection_proxy(c, &player_owner_name); p.match_signal(get_dbus_properties_changed_handler(sender.clone())) .map_err(|e| e.to_string())?; p.match_signal(get_mediaplayer2_seeked_handler(sender.clone())) .map_err(|e| e.to_string())?; // p.match_signal(|_: MediaPlayer2TrackListChangeHappened, _: &Connection| { // debug!("TrackList happened"); // true // }).map_err(|e| e.to_string())?; let proxy_generic_dbus = c.with_proxy( "org.freedesktop.DBus", "/org/freedesktop/DBus", Duration::from_millis(5000), ); proxy_generic_dbus .match_signal(get_dbus_name_owned_changed_handler( sender.clone(), player_owner_name.clone(), )) .map_err(|e| e.to_string())?; Ok(player_owner_name) }
{ query_player_property::<String>(p, "PlaybackStatus").map(|v| parse_playback_status(&v)) }
identifier_body
player.rs
use std::collections::HashMap; use std::convert::TryInto; use std::path::PathBuf; use std::sync::mpsc::Sender; use std::time::{Duration, Instant}; use dbus::arg::RefArg; use dbus::blocking::stdintf::org_freedesktop_dbus::Properties; use dbus::blocking::BlockingSender; use dbus::blocking::{Connection, Proxy}; use dbus::{arg, Message}; use log::{debug, info, warn}; use url::Url; const MPRIS2_PREFIX: &str = "org.mpris.MediaPlayer2."; const MPRIS2_PATH: &str = "/org/mpris/MediaPlayer2"; type DbusStringMap = HashMap<String, arg::Variant<Box<dyn arg::RefArg>>>; pub type ConnectionProxy<'a> = Proxy<'a, &'a Connection>; #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub enum PlaybackStatus { Playing, Paused, Stopped, } #[derive(Clone, Debug)] pub struct Metadata { album: Option<String>, title: String, artists: Option<Vec<String>>, file_path: PathBuf, length: i64, } impl Metadata { #[allow(dead_code)] pub fn album(&self) -> &Option<String> { &self.album } #[allow(dead_code)] pub fn title(&self) -> &String { &self.title } #[allow(dead_code)] pub fn artists(&self) -> &Option<Vec<String>> { &self.artists } pub fn file_path(&self) -> &PathBuf { &self.file_path } #[allow(dead_code)] pub fn length(&self) -> i64 { self.length } } #[derive(Debug)] pub enum Event { PlayerShutDown, PlaybackStatusChange(PlaybackStatus), Seeked { position: Duration }, MetadataChange(Option<Metadata>), } #[derive(Debug)] pub struct Progress { /// If player is stopped, metadata will be None metadata: Option<Metadata>, playback_status: PlaybackStatus, /// When this Progress was constructed, in order to calculate how old it is. instant: Instant, /// Position at the time of construction position: Duration, } impl Progress { pub fn new( playback_status: PlaybackStatus, position: Duration, metadata: Option<Metadata>, ) -> Progress { Progress { metadata, playback_status, instant: Instant::now(), position, } } pub fn metadata(&self) -> &Option<Metadata> { &self.metadata } pub fn take_metadata(self) -> Option<Metadata> { self.metadata } pub fn playback_status(&self) -> PlaybackStatus { self.playback_status } pub fn instant(&self) -> Instant { self.instant } pub fn
(&self) -> Duration { self.position } } fn query_player_property<T>(p: &ConnectionProxy, name: &str) -> Result<T, String> where for<'b> T: dbus::arg::Get<'b>, { p.get("org.mpris.MediaPlayer2.Player", name) .map_err(|e| e.to_string()) } pub fn query_player_position(p: &ConnectionProxy) -> Result<Duration, String> { let v = query_player_property::<i64>(p, "Position")?; if v < 0 { panic!("Wrong position value"); } Ok(Duration::from_micros(v.try_into().unwrap())) } fn query_player_playback_status(p: &ConnectionProxy) -> Result<PlaybackStatus, String> { query_player_property::<String>(p, "PlaybackStatus").map(|v| parse_playback_status(&v)) } fn parse_player_metadata<T: arg::RefArg>( metadata_map: HashMap<String, T>, ) -> Result<Option<Metadata>, String> { debug!("metadata_map = {:?}", metadata_map); let file_path_encoded = match metadata_map.get("xesam:url") { Some(url) => url .as_str() .ok_or("url metadata should be string")? .to_string(), // If playlist has reached end, new metadata event is sent, // but it doesn't contain any of the following keys None => return Ok(None), }; let file_path_url = Url::parse(&file_path_encoded) .map_err(|e| format!("invalid format of url metadata: {}", e.to_string()))?; let file_path = file_path_url .to_file_path() .map_err(|_| format!("invalid format of url metadata: {}", file_path_url))?; let album = metadata_map .get("xesam:album") .map(|v| { v.as_str() .ok_or("album metadata should be string") .map(|x| x.to_string()) }) .transpose()?; let title = metadata_map["xesam:title"] .as_str() .ok_or("title metadata should be string")? .to_string(); let length = metadata_map["mpris:length"] .as_i64() .ok_or("length metadata should be i64")?; let artists = metadata_map .get("xesam:artist") .map(|v| { v.as_iter() .ok_or("artist metadata should be iterator")? .next() .ok_or("artist metadata should contain at least one entry")? .as_iter() .ok_or("artist metadata should have nested iterator")? .map(|x| { Ok(x.as_str() .ok_or("artist metadata values should be string")? .to_string()) }) .collect::<Result<Vec<String>, &'static str>>() }) .transpose()?; Ok(Some(Metadata { album, title, artists, file_path, length, })) } fn query_player_metadata(p: &ConnectionProxy) -> Result<Option<Metadata>, String> { query_player_property::<DbusStringMap>(p, "Metadata").and_then(parse_player_metadata) } pub fn query_progress(p: &ConnectionProxy) -> Result<Progress, String> { let playback_status = query_player_playback_status(p)?; let position = query_player_position(p)?; let instant = Instant::now(); let metadata = if playback_status!= PlaybackStatus::Stopped { query_player_metadata(p)? } else { None }; Ok(Progress { metadata, playback_status, instant, position, }) } fn parse_playback_status(playback_status: &str) -> PlaybackStatus { match playback_status { "Playing" => PlaybackStatus::Playing, "Paused" => PlaybackStatus::Paused, "Stopped" => PlaybackStatus::Stopped, _ => panic!(""), } } fn query_unique_owner_name<S: Into<String>>(c: &Connection, bus_name: S) -> Result<String, String> { let get_name_owner = Message::new_method_call( "org.freedesktop.DBus", "/", "org.freedesktop.DBus", "GetNameOwner", ) .map_err(|e| e.to_string())? .append1(bus_name.into()); c.send_with_reply_and_block(get_name_owner, Duration::from_millis(100)) .map_err(|e| e.to_string()) .map(|reply| { reply .get1() .expect("GetNameOwner must have name as first member") }) } fn query_all_player_buses(c: &Connection) -> Result<Vec<String>, String> { let list_names = Message::new_method_call( "org.freedesktop.DBus", "/", "org.freedesktop.DBus", "ListNames", )?; let reply = c .send_with_reply_and_block(list_names, Duration::from_millis(500)) .map_err(|e| e.to_string())?; let names: arg::Array<&str, _> = reply.read1().map_err(|e| e.to_string())?; Ok(names .filter(|name| name.starts_with(MPRIS2_PREFIX)) .map(|str_ref| str_ref.to_owned()) .collect()) } fn get_message_item_dict( a: &arg::Variant<Box<dyn arg::RefArg>>, ) -> HashMap<String, Box<&dyn arg::RefArg>> { let mut it = a.as_iter().unwrap(); let d_variant = it.next().unwrap(); let d_it = d_variant.as_iter().unwrap(); let v = d_it.collect::<Vec<_>>(); v.chunks(2) .map(|c| { let key = c[0].as_str().unwrap(); (key.to_string(), Box::new(c[1])) }) .collect() } #[derive(Debug)] pub struct DbusPropertiesChangedHappened { pub interface_name: String, pub changed_properties: DbusStringMap, pub invalidated_properties: Vec<String>, } impl dbus::message::SignalArgs for DbusPropertiesChangedHappened { const NAME: &'static str = "PropertiesChanged"; const INTERFACE: &'static str = "org.freedesktop.DBus.Properties"; } impl arg::ReadAll for DbusPropertiesChangedHappened { fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> { Ok(Self { interface_name: i.read()?, changed_properties: i.read()?, invalidated_properties: i.read()?, }) } } #[derive(Debug)] pub struct MediaPlayer2SeekedHappened { pub position_us: i64, } impl dbus::message::SignalArgs for MediaPlayer2SeekedHappened { const NAME: &'static str = "Seeked"; const INTERFACE: &'static str = "org.mpris.MediaPlayer2.Player"; } impl arg::ReadAll for MediaPlayer2SeekedHappened { fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> { Ok(Self { position_us: i.read()?, }) } } #[derive(Debug)] pub struct DbusNameOwnedChanged { pub name: String, pub new_owner: String, pub old_owner: String, } impl dbus::message::SignalArgs for DbusNameOwnedChanged { const NAME: &'static str = "NameOwnerChanged"; const INTERFACE: &'static str = "org.freedesktop.DBus"; } impl arg::ReadAll for DbusNameOwnedChanged { fn read(i: &mut arg::Iter) -> Result<Self, arg::TypeMismatchError> { Ok(Self { name: i.read()?, new_owner: i.read()?, old_owner: i.read()?, }) } } pub fn get_connection_proxy<'a>( c: &'a Connection, player_owner_name: &'a str, ) -> ConnectionProxy<'a> { c.with_proxy(player_owner_name, MPRIS2_PATH, Duration::from_millis(5000)) } fn get_mediaplayer2_seeked_handler( sender: Sender<Event>, ) -> impl Fn(MediaPlayer2SeekedHappened, &Connection) -> bool { move |e: MediaPlayer2SeekedHappened, _: &Connection| { debug!("Seek happened: {:?}", e); if e.position_us < 0 { panic!( "Position value must be positive number, found {}", e.position_us ); } sender .send(Event::Seeked { position: Duration::from_micros(e.position_us as u64), }) .unwrap(); true } } fn get_dbus_properties_changed_handler( sender: Sender<Event>, ) -> impl Fn(DbusPropertiesChangedHappened, &Connection) -> bool { move |e: DbusPropertiesChangedHappened, _: &Connection| { debug!("DBus.Properties happened: {:?}", e); if e.interface_name == "org.mpris.MediaPlayer2.Player" { for (k, v) in &e.changed_properties { match k.as_ref() { "PlaybackStatus" => { let playback_status = v.as_str().unwrap(); debug!("playback_status = {:?}", playback_status); sender .send(Event::PlaybackStatusChange(parse_playback_status( &playback_status, ))) .unwrap(); } "Metadata" => { let metadata_map = get_message_item_dict(v); debug!("metadata_map = {:?}", metadata_map); let metadata = parse_player_metadata(metadata_map).unwrap(); sender.send(Event::MetadataChange(metadata)).unwrap(); } _ => { warn!("Unknown PropertiesChanged event:"); for p in &e.changed_properties { warn!(" changed_property = {:?}", p); } warn!( " invalidated_properties = {:?}", e.invalidated_properties ); } } } } true } } fn get_dbus_name_owned_changed_handler( sender: Sender<Event>, player_owner_name: String, ) -> impl Fn(DbusNameOwnedChanged, &Connection) -> bool { move |e: DbusNameOwnedChanged, _: &Connection| { debug!("DbusNameOwnedChanged happened: {:?}", e); if e.name == player_owner_name && e.old_owner.is_empty() && e.new_owner == player_owner_name { sender.send(Event::PlayerShutDown).unwrap(); } true } } pub fn subscribe<'a>( c: &'a Connection, player: &str, sender: &Sender<Event>, ) -> Result<String, String> { let all_player_buses = query_all_player_buses(&c)?; let player_bus = format!("{}{}", MPRIS2_PREFIX, player); if!all_player_buses.contains(&player_bus) { info!("all players = {:?}", all_player_buses); return Err("Player not running".to_owned()); } let player_owner_name = query_unique_owner_name(&c, player_bus)?; debug!("player_owner_name = {:?}", player_owner_name); let p = get_connection_proxy(c, &player_owner_name); p.match_signal(get_dbus_properties_changed_handler(sender.clone())) .map_err(|e| e.to_string())?; p.match_signal(get_mediaplayer2_seeked_handler(sender.clone())) .map_err(|e| e.to_string())?; // p.match_signal(|_: MediaPlayer2TrackListChangeHappened, _: &Connection| { // debug!("TrackList happened"); // true // }).map_err(|e| e.to_string())?; let proxy_generic_dbus = c.with_proxy( "org.freedesktop.DBus", "/org/freedesktop/DBus", Duration::from_millis(5000), ); proxy_generic_dbus .match_signal(get_dbus_name_owned_changed_handler( sender.clone(), player_owner_name.clone(), )) .map_err(|e| e.to_string())?; Ok(player_owner_name) }
position
identifier_name
did.rs
#[cfg(feature = "alloc")] use alloc::string::String; #[cfg(feature = "alloc")] use alloc::string::ToString as _; use core::cmp::Ordering; use core::convert::TryFrom; use core::fmt::Debug; use core::fmt::Display; use core::fmt::Formatter; use core::fmt::Result as FmtResult; use core::hash::Hash; use core::hash::Hasher; use core::str::FromStr; use crate::core::Core; use crate::error::Error; use crate::error::Result; #[derive(Clone, Copy)] pub struct Inspect<'a>(&'a DID); impl Debug for Inspect<'_> { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.debug_struct("DID") .field("method", &self.0.method()) .field("method_id", &self.0.method_id()) .field("path", &self.0.path()) .field("query", &self.0.query()) .field("fragment", &self.0.fragment()) .finish() } } /// A Decentralized Identifier (DID). /// /// [More Info (W3C DID Core)](https://www.w3.org/TR/did-core/) #[derive(Clone)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] #[cfg_attr(feature = "serde", serde(into = "String", try_from = "String"))] pub struct DID { data: String, core: Core, } impl DID { /// The URL scheme for Decentralized Identifiers. pub const SCHEME: &'static str = "did"; /// Parses a [`DID`] from the provided `input`. /// /// # Errors /// /// Returns `Err` if any DID segments are invalid. pub fn parse(input: impl AsRef<str>) -> Result<Self> { Ok(Self { data: input.as_ref().to_string(), core: Core::parse(input)?, }) } /// Returns a wrapped `DID` with a more detailed `Debug` implementation. #[inline] pub const fn inspect(&self) -> Inspect { Inspect(self) } /// Returns the serialized [`DID`]. /// /// This is fast since the serialized value is stored in the [`DID`]. #[inline] pub fn as_str(&self) -> &str { &*self.data } /// Consumes the [`DID`] and returns the serialization. #[cfg(feature = "alloc")] #[inline] pub fn into_string(self) -> String { self.data } /// Returns the [`DID`] scheme. See [`DID::SCHEME`]. #[inline] pub const fn scheme(&self) -> &'static str { DID::SCHEME } /// Returns the [`DID`] authority. #[inline] pub fn authority(&self) -> &str { self.core.authority(self.as_str()) } /// Returns the [`DID`] method name. #[inline] pub fn method(&self) -> &str { self.core.method(self.as_str()) } /// Returns the [`DID`] method-specific ID. #[inline] pub fn method_id(&self) -> &str { self.core.method_id(self.as_str()) } /// Returns the [`DID`] path. #[inline] pub fn path(&self) -> &str { self.core.path(self.as_str()) } /// Returns the [`DID`] method query, if any. #[inline] pub fn query(&self) -> Option<&str> { self.core.query(self.as_str()) } /// Returns the [`DID`] method fragment, if any. #[inline] pub fn fragment(&self) -> Option<&str> { self.core.fragment(self.as_str()) } /// Parses the [`DID`] query and returns an iterator of (key, value) pairs. #[inline] pub fn query_pairs(&self) -> form_urlencoded::Parse { self.core.query_pairs(self.as_str()) } /// Change the method of the [`DID`]. #[inline] pub fn set_method(&mut self, value: impl AsRef<str>) { self.core.set_method(&mut self.data, value.as_ref()); } /// Change the method-specific-id of the [`DID`]. #[inline] pub fn set_method_id(&mut self, value: impl AsRef<str>) { self.core.set_method_id(&mut self.data, value.as_ref()); } /// Change the path of the [`DID`]. #[inline] pub fn set_path(&mut self, value: impl AsRef<str>) { self.core.set_path(&mut self.data, value.as_ref()); } /// Change the query of the [`DID`]. /// /// No serialization is performed. #[inline] pub fn set_query(&mut self, value: Option<&str>) { self.core.set_query(&mut self.data, value); } /// Change the fragment of the [`DID`]. /// /// No serialization is performed. #[inline] pub fn set_fragment(&mut self, value: Option<&str>) { self.core.set_fragment(&mut self.data, value); } /// Creates a new [`DID`] by joining `self` with the relative DID `other`. /// /// # Errors /// /// Returns `Err` if any base or relative DID segments are invalid. #[cfg(feature = "alloc")] pub fn join(&self, other: impl AsRef<str>) -> Result<Self> { let data: &str = other.as_ref(); let core: Core = Core::parse_relative(data)?; resolution::transform_references(self, (data, &core)) } } impl Hash for DID { fn hash<H>(&self, hasher: &mut H) where H: Hasher, { self.as_str().hash(hasher) } } impl PartialEq for DID { fn eq(&self, other: &Self) -> bool { self.as_str() == other.as_str() } } impl Eq for DID {} impl PartialOrd for DID { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.as_str().partial_cmp(other.as_str()) } } impl Ord for DID { fn cmp(&self, other: &Self) -> Ordering { self.as_str().cmp(other.as_str()) } } impl PartialEq<str> for DID { fn eq(&self, other: &str) -> bool { self.as_str() == other } } impl PartialEq<&'_ str> for DID { fn eq(&self, other: &&'_ str) -> bool { self == *other } } impl Debug for DID { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.write_fmt(format_args!("{:?}", self.as_str())) } } impl Display for DID { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.write_fmt(format_args!("{}", self.as_str())) } } impl AsRef<str> for DID { fn as_ref(&self) -> &str { self.data.as_ref() } } impl FromStr for DID { type Err = Error; fn from_str(string: &str) -> Result<Self, Self::Err> { Self::parse(string) } } #[cfg(feature = "alloc")] impl TryFrom<String> for DID { type Error = Error; fn try_from(other: String) -> Result<Self, Self::Error> { Self::parse(other) } } #[cfg(feature = "alloc")] impl From<DID> for String { fn from(other: DID) -> Self { other.into_string() } } // ============================================================================= // Reference Resolution // See RFC 3986 - https://tools.ietf.org/html/rfc3986#section-5 // ============================================================================= #[cfg(feature = "alloc")] mod resolution { use alloc::borrow::Cow; use core::fmt::Display; use core::fmt::Formatter; use core::fmt::Result as FmtResult; use core::str::from_utf8_unchecked; use crate::core::Core; use crate::did::DID; use crate::error::Error; use crate::error::Result; #[derive(Debug)] #[repr(transparent)] pub struct Path<'a>(Cow<'a, str>); impl<'a> Path<'a> { pub const fn new() -> Self { Self(Cow::Borrowed("")) } pub fn push(&mut self, value: impl AsRef<[u8]>) { self .0 .to_mut() .push_str(unsafe { from_utf8_unchecked(value.as_ref()) }); } pub fn pop(&mut self) { if self.0.is_empty() { return; } if let Some(index) = self.0.rfind('/') { self.0.to_mut().replace_range(index.., ""); } } } impl<'a> From<Path<'a>> for Cow<'a, str> { fn from(other: Path<'a>) -> Self { other.0 } } impl Display for Path<'_> { fn fmt(&self, f: &mut Formatter) -> FmtResult { Display::fmt(&self.0, f) } } /// Transform References. /// /// Transforms a DID reference into its target DID. /// /// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.2) #[allow(non_snake_case)] pub fn transform_references(base: &DID, (data, core): (&str, &Core)) -> Result<DID> { let P: &str = core.path(data); let Q: Option<&str> = core.query(data); let mut T: DID = base.clone(); if P.is_empty() { T.set_path(base.path()); T.set_query(Q.or_else(|| base.query())); } else { if P.starts_with('/') { T.set_path(remove_dot_segments(P)); } else { T.set_path(remove_dot_segments(&merge_paths(base, P)?)); } T.set_query(Q); } T.set_method(base.method()); // TODO: Remove? This in inherited via clone T.set_method_id(base.method_id()); // TODO: Remove? This in inherited via clone T.set_fragment(core.fragment(data)); Ok(T) } /// Merge Paths. /// /// Merges a relative-path reference with the path of the base DID. /// /// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.3) pub fn merge_paths<'a>(base: &'a DID, data: &'a str) -> Result<Cow<'a, str>> { // Ensure the base DID has an authority component. // // The DID authority is `<method>:<method-specific-id>` so it should always // be present for non-relative DIDs. if base.method().is_empty() || base.method_id().is_empty() { return Err(Error::InvalidAuthority); } // 1. If the base URI has a defined authority component and an empty // path, then return a string consisting of "/" concatenated with the // reference's path. if base.path().is_empty() { return Ok(data.into()); } // 2. Return a string consisting of the reference's path component // appended to all but the last segment of the base URI's path (i.e., // excluding any characters after the right-most "/" in the base URI // path, or excluding the entire base URI path if it does not contain // any "/" characters). let mut path: &str = base.path(); if let Some(index) = path.rfind('/') { path = &path[..=index]; } Ok([path, data].join("").into()) } /// Remove Dot Segments. /// /// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.4) pub fn remove_dot_segments(path: &str) -> Cow<str> { fn next_segment(input: impl AsRef<[u8]>) -> Option<usize> { match input.as_ref() { [b'/', input @..] => next_segment(input).map(|index| index + 1), input => input.iter().position(|byte| *byte == b'/'), } } let mut output: Path = Path::new(); let mut input: &[u8] = path.as_bytes(); loop { match input { // Remove prefix../ [b'.', b'.', b'/',..] => { input = &input[3..]; } // Remove prefix./ [b'.', b'/',..] => { input = &input[2..]; } // Replace prefix /./ [b'/', b'.', b'/',..] =>
// Replace prefix /. [b'/', b'.'] => { input = &input[..1]; } // Replace prefix /../ [b'/', b'.', b'.', b'/',..] => { input = &input[3..]; output.pop(); } // Replace prefix /.. [b'/', b'.', b'.'] => { input = &input[..2]; output.pop(); } // Remove. [b'.'] => { input = &input[1..]; } // Remove.. [b'.', b'.'] => { input = &input[2..]; } _ => { if let Some(index) = next_segment(input) { output.push(&input[..index]); input = &input[index..]; } else { output.push(input); break; } } } } output.into() } }
{ input = &input[2..]; }
conditional_block
did.rs
#[cfg(feature = "alloc")] use alloc::string::String; #[cfg(feature = "alloc")] use alloc::string::ToString as _; use core::cmp::Ordering; use core::convert::TryFrom; use core::fmt::Debug; use core::fmt::Display; use core::fmt::Formatter; use core::fmt::Result as FmtResult; use core::hash::Hash; use core::hash::Hasher; use core::str::FromStr; use crate::core::Core; use crate::error::Error; use crate::error::Result; #[derive(Clone, Copy)] pub struct Inspect<'a>(&'a DID); impl Debug for Inspect<'_> { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.debug_struct("DID") .field("method", &self.0.method()) .field("method_id", &self.0.method_id()) .field("path", &self.0.path()) .field("query", &self.0.query()) .field("fragment", &self.0.fragment()) .finish() } } /// A Decentralized Identifier (DID). /// /// [More Info (W3C DID Core)](https://www.w3.org/TR/did-core/) #[derive(Clone)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] #[cfg_attr(feature = "serde", serde(into = "String", try_from = "String"))] pub struct DID { data: String, core: Core, } impl DID { /// The URL scheme for Decentralized Identifiers. pub const SCHEME: &'static str = "did"; /// Parses a [`DID`] from the provided `input`. /// /// # Errors /// /// Returns `Err` if any DID segments are invalid. pub fn parse(input: impl AsRef<str>) -> Result<Self> { Ok(Self { data: input.as_ref().to_string(), core: Core::parse(input)?, }) } /// Returns a wrapped `DID` with a more detailed `Debug` implementation. #[inline] pub const fn inspect(&self) -> Inspect { Inspect(self) } /// Returns the serialized [`DID`]. /// /// This is fast since the serialized value is stored in the [`DID`]. #[inline] pub fn as_str(&self) -> &str { &*self.data } /// Consumes the [`DID`] and returns the serialization. #[cfg(feature = "alloc")] #[inline] pub fn into_string(self) -> String { self.data } /// Returns the [`DID`] scheme. See [`DID::SCHEME`]. #[inline] pub const fn scheme(&self) -> &'static str { DID::SCHEME } /// Returns the [`DID`] authority. #[inline] pub fn authority(&self) -> &str { self.core.authority(self.as_str()) } /// Returns the [`DID`] method name. #[inline] pub fn method(&self) -> &str { self.core.method(self.as_str()) } /// Returns the [`DID`] method-specific ID. #[inline] pub fn method_id(&self) -> &str { self.core.method_id(self.as_str()) } /// Returns the [`DID`] path. #[inline] pub fn path(&self) -> &str { self.core.path(self.as_str()) } /// Returns the [`DID`] method query, if any. #[inline] pub fn query(&self) -> Option<&str> { self.core.query(self.as_str()) } /// Returns the [`DID`] method fragment, if any. #[inline] pub fn fragment(&self) -> Option<&str> { self.core.fragment(self.as_str()) } /// Parses the [`DID`] query and returns an iterator of (key, value) pairs. #[inline] pub fn query_pairs(&self) -> form_urlencoded::Parse { self.core.query_pairs(self.as_str()) } /// Change the method of the [`DID`]. #[inline] pub fn set_method(&mut self, value: impl AsRef<str>) { self.core.set_method(&mut self.data, value.as_ref()); } /// Change the method-specific-id of the [`DID`]. #[inline] pub fn set_method_id(&mut self, value: impl AsRef<str>) { self.core.set_method_id(&mut self.data, value.as_ref()); } /// Change the path of the [`DID`]. #[inline] pub fn set_path(&mut self, value: impl AsRef<str>) { self.core.set_path(&mut self.data, value.as_ref()); } /// Change the query of the [`DID`]. /// /// No serialization is performed. #[inline] pub fn set_query(&mut self, value: Option<&str>) { self.core.set_query(&mut self.data, value); } /// Change the fragment of the [`DID`]. /// /// No serialization is performed. #[inline] pub fn set_fragment(&mut self, value: Option<&str>) { self.core.set_fragment(&mut self.data, value); } /// Creates a new [`DID`] by joining `self` with the relative DID `other`. /// /// # Errors /// /// Returns `Err` if any base or relative DID segments are invalid. #[cfg(feature = "alloc")] pub fn join(&self, other: impl AsRef<str>) -> Result<Self> { let data: &str = other.as_ref(); let core: Core = Core::parse_relative(data)?; resolution::transform_references(self, (data, &core)) } } impl Hash for DID { fn hash<H>(&self, hasher: &mut H) where H: Hasher, { self.as_str().hash(hasher) } } impl PartialEq for DID { fn eq(&self, other: &Self) -> bool { self.as_str() == other.as_str() } } impl Eq for DID {} impl PartialOrd for DID { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.as_str().partial_cmp(other.as_str()) } } impl Ord for DID { fn cmp(&self, other: &Self) -> Ordering { self.as_str().cmp(other.as_str()) } } impl PartialEq<str> for DID { fn eq(&self, other: &str) -> bool { self.as_str() == other } } impl PartialEq<&'_ str> for DID { fn eq(&self, other: &&'_ str) -> bool { self == *other } } impl Debug for DID { fn fmt(&self, f: &mut Formatter) -> FmtResult
} impl Display for DID { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.write_fmt(format_args!("{}", self.as_str())) } } impl AsRef<str> for DID { fn as_ref(&self) -> &str { self.data.as_ref() } } impl FromStr for DID { type Err = Error; fn from_str(string: &str) -> Result<Self, Self::Err> { Self::parse(string) } } #[cfg(feature = "alloc")] impl TryFrom<String> for DID { type Error = Error; fn try_from(other: String) -> Result<Self, Self::Error> { Self::parse(other) } } #[cfg(feature = "alloc")] impl From<DID> for String { fn from(other: DID) -> Self { other.into_string() } } // ============================================================================= // Reference Resolution // See RFC 3986 - https://tools.ietf.org/html/rfc3986#section-5 // ============================================================================= #[cfg(feature = "alloc")] mod resolution { use alloc::borrow::Cow; use core::fmt::Display; use core::fmt::Formatter; use core::fmt::Result as FmtResult; use core::str::from_utf8_unchecked; use crate::core::Core; use crate::did::DID; use crate::error::Error; use crate::error::Result; #[derive(Debug)] #[repr(transparent)] pub struct Path<'a>(Cow<'a, str>); impl<'a> Path<'a> { pub const fn new() -> Self { Self(Cow::Borrowed("")) } pub fn push(&mut self, value: impl AsRef<[u8]>) { self .0 .to_mut() .push_str(unsafe { from_utf8_unchecked(value.as_ref()) }); } pub fn pop(&mut self) { if self.0.is_empty() { return; } if let Some(index) = self.0.rfind('/') { self.0.to_mut().replace_range(index.., ""); } } } impl<'a> From<Path<'a>> for Cow<'a, str> { fn from(other: Path<'a>) -> Self { other.0 } } impl Display for Path<'_> { fn fmt(&self, f: &mut Formatter) -> FmtResult { Display::fmt(&self.0, f) } } /// Transform References. /// /// Transforms a DID reference into its target DID. /// /// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.2) #[allow(non_snake_case)] pub fn transform_references(base: &DID, (data, core): (&str, &Core)) -> Result<DID> { let P: &str = core.path(data); let Q: Option<&str> = core.query(data); let mut T: DID = base.clone(); if P.is_empty() { T.set_path(base.path()); T.set_query(Q.or_else(|| base.query())); } else { if P.starts_with('/') { T.set_path(remove_dot_segments(P)); } else { T.set_path(remove_dot_segments(&merge_paths(base, P)?)); } T.set_query(Q); } T.set_method(base.method()); // TODO: Remove? This in inherited via clone T.set_method_id(base.method_id()); // TODO: Remove? This in inherited via clone T.set_fragment(core.fragment(data)); Ok(T) } /// Merge Paths. /// /// Merges a relative-path reference with the path of the base DID. /// /// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.3) pub fn merge_paths<'a>(base: &'a DID, data: &'a str) -> Result<Cow<'a, str>> { // Ensure the base DID has an authority component. // // The DID authority is `<method>:<method-specific-id>` so it should always // be present for non-relative DIDs. if base.method().is_empty() || base.method_id().is_empty() { return Err(Error::InvalidAuthority); } // 1. If the base URI has a defined authority component and an empty // path, then return a string consisting of "/" concatenated with the // reference's path. if base.path().is_empty() { return Ok(data.into()); } // 2. Return a string consisting of the reference's path component // appended to all but the last segment of the base URI's path (i.e., // excluding any characters after the right-most "/" in the base URI // path, or excluding the entire base URI path if it does not contain // any "/" characters). let mut path: &str = base.path(); if let Some(index) = path.rfind('/') { path = &path[..=index]; } Ok([path, data].join("").into()) } /// Remove Dot Segments. /// /// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.4) pub fn remove_dot_segments(path: &str) -> Cow<str> { fn next_segment(input: impl AsRef<[u8]>) -> Option<usize> { match input.as_ref() { [b'/', input @..] => next_segment(input).map(|index| index + 1), input => input.iter().position(|byte| *byte == b'/'), } } let mut output: Path = Path::new(); let mut input: &[u8] = path.as_bytes(); loop { match input { // Remove prefix../ [b'.', b'.', b'/',..] => { input = &input[3..]; } // Remove prefix./ [b'.', b'/',..] => { input = &input[2..]; } // Replace prefix /./ [b'/', b'.', b'/',..] => { input = &input[2..]; } // Replace prefix /. [b'/', b'.'] => { input = &input[..1]; } // Replace prefix /../ [b'/', b'.', b'.', b'/',..] => { input = &input[3..]; output.pop(); } // Replace prefix /.. [b'/', b'.', b'.'] => { input = &input[..2]; output.pop(); } // Remove. [b'.'] => { input = &input[1..]; } // Remove.. [b'.', b'.'] => { input = &input[2..]; } _ => { if let Some(index) = next_segment(input) { output.push(&input[..index]); input = &input[index..]; } else { output.push(input); break; } } } } output.into() } }
{ f.write_fmt(format_args!("{:?}", self.as_str())) }
identifier_body
did.rs
#[cfg(feature = "alloc")] use alloc::string::String; #[cfg(feature = "alloc")] use alloc::string::ToString as _; use core::cmp::Ordering; use core::convert::TryFrom; use core::fmt::Debug; use core::fmt::Display; use core::fmt::Formatter; use core::fmt::Result as FmtResult; use core::hash::Hash; use core::hash::Hasher; use core::str::FromStr; use crate::core::Core; use crate::error::Error; use crate::error::Result; #[derive(Clone, Copy)] pub struct Inspect<'a>(&'a DID); impl Debug for Inspect<'_> { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.debug_struct("DID") .field("method", &self.0.method()) .field("method_id", &self.0.method_id()) .field("path", &self.0.path()) .field("query", &self.0.query()) .field("fragment", &self.0.fragment()) .finish() } } /// A Decentralized Identifier (DID). /// /// [More Info (W3C DID Core)](https://www.w3.org/TR/did-core/) #[derive(Clone)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] #[cfg_attr(feature = "serde", serde(into = "String", try_from = "String"))] pub struct DID { data: String, core: Core, } impl DID { /// The URL scheme for Decentralized Identifiers. pub const SCHEME: &'static str = "did"; /// Parses a [`DID`] from the provided `input`. /// /// # Errors /// /// Returns `Err` if any DID segments are invalid. pub fn parse(input: impl AsRef<str>) -> Result<Self> { Ok(Self { data: input.as_ref().to_string(), core: Core::parse(input)?, }) } /// Returns a wrapped `DID` with a more detailed `Debug` implementation. #[inline] pub const fn inspect(&self) -> Inspect { Inspect(self) } /// Returns the serialized [`DID`]. /// /// This is fast since the serialized value is stored in the [`DID`]. #[inline] pub fn as_str(&self) -> &str { &*self.data } /// Consumes the [`DID`] and returns the serialization. #[cfg(feature = "alloc")] #[inline] pub fn into_string(self) -> String { self.data } /// Returns the [`DID`] scheme. See [`DID::SCHEME`]. #[inline] pub const fn scheme(&self) -> &'static str { DID::SCHEME } /// Returns the [`DID`] authority. #[inline] pub fn authority(&self) -> &str { self.core.authority(self.as_str()) } /// Returns the [`DID`] method name. #[inline] pub fn method(&self) -> &str { self.core.method(self.as_str()) } /// Returns the [`DID`] method-specific ID. #[inline] pub fn method_id(&self) -> &str { self.core.method_id(self.as_str()) } /// Returns the [`DID`] path.
self.core.path(self.as_str()) } /// Returns the [`DID`] method query, if any. #[inline] pub fn query(&self) -> Option<&str> { self.core.query(self.as_str()) } /// Returns the [`DID`] method fragment, if any. #[inline] pub fn fragment(&self) -> Option<&str> { self.core.fragment(self.as_str()) } /// Parses the [`DID`] query and returns an iterator of (key, value) pairs. #[inline] pub fn query_pairs(&self) -> form_urlencoded::Parse { self.core.query_pairs(self.as_str()) } /// Change the method of the [`DID`]. #[inline] pub fn set_method(&mut self, value: impl AsRef<str>) { self.core.set_method(&mut self.data, value.as_ref()); } /// Change the method-specific-id of the [`DID`]. #[inline] pub fn set_method_id(&mut self, value: impl AsRef<str>) { self.core.set_method_id(&mut self.data, value.as_ref()); } /// Change the path of the [`DID`]. #[inline] pub fn set_path(&mut self, value: impl AsRef<str>) { self.core.set_path(&mut self.data, value.as_ref()); } /// Change the query of the [`DID`]. /// /// No serialization is performed. #[inline] pub fn set_query(&mut self, value: Option<&str>) { self.core.set_query(&mut self.data, value); } /// Change the fragment of the [`DID`]. /// /// No serialization is performed. #[inline] pub fn set_fragment(&mut self, value: Option<&str>) { self.core.set_fragment(&mut self.data, value); } /// Creates a new [`DID`] by joining `self` with the relative DID `other`. /// /// # Errors /// /// Returns `Err` if any base or relative DID segments are invalid. #[cfg(feature = "alloc")] pub fn join(&self, other: impl AsRef<str>) -> Result<Self> { let data: &str = other.as_ref(); let core: Core = Core::parse_relative(data)?; resolution::transform_references(self, (data, &core)) } } impl Hash for DID { fn hash<H>(&self, hasher: &mut H) where H: Hasher, { self.as_str().hash(hasher) } } impl PartialEq for DID { fn eq(&self, other: &Self) -> bool { self.as_str() == other.as_str() } } impl Eq for DID {} impl PartialOrd for DID { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.as_str().partial_cmp(other.as_str()) } } impl Ord for DID { fn cmp(&self, other: &Self) -> Ordering { self.as_str().cmp(other.as_str()) } } impl PartialEq<str> for DID { fn eq(&self, other: &str) -> bool { self.as_str() == other } } impl PartialEq<&'_ str> for DID { fn eq(&self, other: &&'_ str) -> bool { self == *other } } impl Debug for DID { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.write_fmt(format_args!("{:?}", self.as_str())) } } impl Display for DID { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.write_fmt(format_args!("{}", self.as_str())) } } impl AsRef<str> for DID { fn as_ref(&self) -> &str { self.data.as_ref() } } impl FromStr for DID { type Err = Error; fn from_str(string: &str) -> Result<Self, Self::Err> { Self::parse(string) } } #[cfg(feature = "alloc")] impl TryFrom<String> for DID { type Error = Error; fn try_from(other: String) -> Result<Self, Self::Error> { Self::parse(other) } } #[cfg(feature = "alloc")] impl From<DID> for String { fn from(other: DID) -> Self { other.into_string() } } // ============================================================================= // Reference Resolution // See RFC 3986 - https://tools.ietf.org/html/rfc3986#section-5 // ============================================================================= #[cfg(feature = "alloc")] mod resolution { use alloc::borrow::Cow; use core::fmt::Display; use core::fmt::Formatter; use core::fmt::Result as FmtResult; use core::str::from_utf8_unchecked; use crate::core::Core; use crate::did::DID; use crate::error::Error; use crate::error::Result; #[derive(Debug)] #[repr(transparent)] pub struct Path<'a>(Cow<'a, str>); impl<'a> Path<'a> { pub const fn new() -> Self { Self(Cow::Borrowed("")) } pub fn push(&mut self, value: impl AsRef<[u8]>) { self .0 .to_mut() .push_str(unsafe { from_utf8_unchecked(value.as_ref()) }); } pub fn pop(&mut self) { if self.0.is_empty() { return; } if let Some(index) = self.0.rfind('/') { self.0.to_mut().replace_range(index.., ""); } } } impl<'a> From<Path<'a>> for Cow<'a, str> { fn from(other: Path<'a>) -> Self { other.0 } } impl Display for Path<'_> { fn fmt(&self, f: &mut Formatter) -> FmtResult { Display::fmt(&self.0, f) } } /// Transform References. /// /// Transforms a DID reference into its target DID. /// /// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.2) #[allow(non_snake_case)] pub fn transform_references(base: &DID, (data, core): (&str, &Core)) -> Result<DID> { let P: &str = core.path(data); let Q: Option<&str> = core.query(data); let mut T: DID = base.clone(); if P.is_empty() { T.set_path(base.path()); T.set_query(Q.or_else(|| base.query())); } else { if P.starts_with('/') { T.set_path(remove_dot_segments(P)); } else { T.set_path(remove_dot_segments(&merge_paths(base, P)?)); } T.set_query(Q); } T.set_method(base.method()); // TODO: Remove? This in inherited via clone T.set_method_id(base.method_id()); // TODO: Remove? This in inherited via clone T.set_fragment(core.fragment(data)); Ok(T) } /// Merge Paths. /// /// Merges a relative-path reference with the path of the base DID. /// /// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.3) pub fn merge_paths<'a>(base: &'a DID, data: &'a str) -> Result<Cow<'a, str>> { // Ensure the base DID has an authority component. // // The DID authority is `<method>:<method-specific-id>` so it should always // be present for non-relative DIDs. if base.method().is_empty() || base.method_id().is_empty() { return Err(Error::InvalidAuthority); } // 1. If the base URI has a defined authority component and an empty // path, then return a string consisting of "/" concatenated with the // reference's path. if base.path().is_empty() { return Ok(data.into()); } // 2. Return a string consisting of the reference's path component // appended to all but the last segment of the base URI's path (i.e., // excluding any characters after the right-most "/" in the base URI // path, or excluding the entire base URI path if it does not contain // any "/" characters). let mut path: &str = base.path(); if let Some(index) = path.rfind('/') { path = &path[..=index]; } Ok([path, data].join("").into()) } /// Remove Dot Segments. /// /// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.4) pub fn remove_dot_segments(path: &str) -> Cow<str> { fn next_segment(input: impl AsRef<[u8]>) -> Option<usize> { match input.as_ref() { [b'/', input @..] => next_segment(input).map(|index| index + 1), input => input.iter().position(|byte| *byte == b'/'), } } let mut output: Path = Path::new(); let mut input: &[u8] = path.as_bytes(); loop { match input { // Remove prefix../ [b'.', b'.', b'/',..] => { input = &input[3..]; } // Remove prefix./ [b'.', b'/',..] => { input = &input[2..]; } // Replace prefix /./ [b'/', b'.', b'/',..] => { input = &input[2..]; } // Replace prefix /. [b'/', b'.'] => { input = &input[..1]; } // Replace prefix /../ [b'/', b'.', b'.', b'/',..] => { input = &input[3..]; output.pop(); } // Replace prefix /.. [b'/', b'.', b'.'] => { input = &input[..2]; output.pop(); } // Remove. [b'.'] => { input = &input[1..]; } // Remove.. [b'.', b'.'] => { input = &input[2..]; } _ => { if let Some(index) = next_segment(input) { output.push(&input[..index]); input = &input[index..]; } else { output.push(input); break; } } } } output.into() } }
#[inline] pub fn path(&self) -> &str {
random_line_split
did.rs
#[cfg(feature = "alloc")] use alloc::string::String; #[cfg(feature = "alloc")] use alloc::string::ToString as _; use core::cmp::Ordering; use core::convert::TryFrom; use core::fmt::Debug; use core::fmt::Display; use core::fmt::Formatter; use core::fmt::Result as FmtResult; use core::hash::Hash; use core::hash::Hasher; use core::str::FromStr; use crate::core::Core; use crate::error::Error; use crate::error::Result; #[derive(Clone, Copy)] pub struct Inspect<'a>(&'a DID); impl Debug for Inspect<'_> { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.debug_struct("DID") .field("method", &self.0.method()) .field("method_id", &self.0.method_id()) .field("path", &self.0.path()) .field("query", &self.0.query()) .field("fragment", &self.0.fragment()) .finish() } } /// A Decentralized Identifier (DID). /// /// [More Info (W3C DID Core)](https://www.w3.org/TR/did-core/) #[derive(Clone)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] #[cfg_attr(feature = "serde", serde(into = "String", try_from = "String"))] pub struct DID { data: String, core: Core, } impl DID { /// The URL scheme for Decentralized Identifiers. pub const SCHEME: &'static str = "did"; /// Parses a [`DID`] from the provided `input`. /// /// # Errors /// /// Returns `Err` if any DID segments are invalid. pub fn parse(input: impl AsRef<str>) -> Result<Self> { Ok(Self { data: input.as_ref().to_string(), core: Core::parse(input)?, }) } /// Returns a wrapped `DID` with a more detailed `Debug` implementation. #[inline] pub const fn inspect(&self) -> Inspect { Inspect(self) } /// Returns the serialized [`DID`]. /// /// This is fast since the serialized value is stored in the [`DID`]. #[inline] pub fn as_str(&self) -> &str { &*self.data } /// Consumes the [`DID`] and returns the serialization. #[cfg(feature = "alloc")] #[inline] pub fn into_string(self) -> String { self.data } /// Returns the [`DID`] scheme. See [`DID::SCHEME`]. #[inline] pub const fn scheme(&self) -> &'static str { DID::SCHEME } /// Returns the [`DID`] authority. #[inline] pub fn authority(&self) -> &str { self.core.authority(self.as_str()) } /// Returns the [`DID`] method name. #[inline] pub fn method(&self) -> &str { self.core.method(self.as_str()) } /// Returns the [`DID`] method-specific ID. #[inline] pub fn method_id(&self) -> &str { self.core.method_id(self.as_str()) } /// Returns the [`DID`] path. #[inline] pub fn path(&self) -> &str { self.core.path(self.as_str()) } /// Returns the [`DID`] method query, if any. #[inline] pub fn query(&self) -> Option<&str> { self.core.query(self.as_str()) } /// Returns the [`DID`] method fragment, if any. #[inline] pub fn fragment(&self) -> Option<&str> { self.core.fragment(self.as_str()) } /// Parses the [`DID`] query and returns an iterator of (key, value) pairs. #[inline] pub fn query_pairs(&self) -> form_urlencoded::Parse { self.core.query_pairs(self.as_str()) } /// Change the method of the [`DID`]. #[inline] pub fn set_method(&mut self, value: impl AsRef<str>) { self.core.set_method(&mut self.data, value.as_ref()); } /// Change the method-specific-id of the [`DID`]. #[inline] pub fn set_method_id(&mut self, value: impl AsRef<str>) { self.core.set_method_id(&mut self.data, value.as_ref()); } /// Change the path of the [`DID`]. #[inline] pub fn set_path(&mut self, value: impl AsRef<str>) { self.core.set_path(&mut self.data, value.as_ref()); } /// Change the query of the [`DID`]. /// /// No serialization is performed. #[inline] pub fn set_query(&mut self, value: Option<&str>) { self.core.set_query(&mut self.data, value); } /// Change the fragment of the [`DID`]. /// /// No serialization is performed. #[inline] pub fn set_fragment(&mut self, value: Option<&str>) { self.core.set_fragment(&mut self.data, value); } /// Creates a new [`DID`] by joining `self` with the relative DID `other`. /// /// # Errors /// /// Returns `Err` if any base or relative DID segments are invalid. #[cfg(feature = "alloc")] pub fn join(&self, other: impl AsRef<str>) -> Result<Self> { let data: &str = other.as_ref(); let core: Core = Core::parse_relative(data)?; resolution::transform_references(self, (data, &core)) } } impl Hash for DID { fn hash<H>(&self, hasher: &mut H) where H: Hasher, { self.as_str().hash(hasher) } } impl PartialEq for DID { fn eq(&self, other: &Self) -> bool { self.as_str() == other.as_str() } } impl Eq for DID {} impl PartialOrd for DID { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.as_str().partial_cmp(other.as_str()) } } impl Ord for DID { fn cmp(&self, other: &Self) -> Ordering { self.as_str().cmp(other.as_str()) } } impl PartialEq<str> for DID { fn eq(&self, other: &str) -> bool { self.as_str() == other } } impl PartialEq<&'_ str> for DID { fn eq(&self, other: &&'_ str) -> bool { self == *other } } impl Debug for DID { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.write_fmt(format_args!("{:?}", self.as_str())) } } impl Display for DID { fn fmt(&self, f: &mut Formatter) -> FmtResult { f.write_fmt(format_args!("{}", self.as_str())) } } impl AsRef<str> for DID { fn as_ref(&self) -> &str { self.data.as_ref() } } impl FromStr for DID { type Err = Error; fn from_str(string: &str) -> Result<Self, Self::Err> { Self::parse(string) } } #[cfg(feature = "alloc")] impl TryFrom<String> for DID { type Error = Error; fn try_from(other: String) -> Result<Self, Self::Error> { Self::parse(other) } } #[cfg(feature = "alloc")] impl From<DID> for String { fn from(other: DID) -> Self { other.into_string() } } // ============================================================================= // Reference Resolution // See RFC 3986 - https://tools.ietf.org/html/rfc3986#section-5 // ============================================================================= #[cfg(feature = "alloc")] mod resolution { use alloc::borrow::Cow; use core::fmt::Display; use core::fmt::Formatter; use core::fmt::Result as FmtResult; use core::str::from_utf8_unchecked; use crate::core::Core; use crate::did::DID; use crate::error::Error; use crate::error::Result; #[derive(Debug)] #[repr(transparent)] pub struct Path<'a>(Cow<'a, str>); impl<'a> Path<'a> { pub const fn new() -> Self { Self(Cow::Borrowed("")) } pub fn push(&mut self, value: impl AsRef<[u8]>) { self .0 .to_mut() .push_str(unsafe { from_utf8_unchecked(value.as_ref()) }); } pub fn pop(&mut self) { if self.0.is_empty() { return; } if let Some(index) = self.0.rfind('/') { self.0.to_mut().replace_range(index.., ""); } } } impl<'a> From<Path<'a>> for Cow<'a, str> { fn from(other: Path<'a>) -> Self { other.0 } } impl Display for Path<'_> { fn fmt(&self, f: &mut Formatter) -> FmtResult { Display::fmt(&self.0, f) } } /// Transform References. /// /// Transforms a DID reference into its target DID. /// /// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.2) #[allow(non_snake_case)] pub fn transform_references(base: &DID, (data, core): (&str, &Core)) -> Result<DID> { let P: &str = core.path(data); let Q: Option<&str> = core.query(data); let mut T: DID = base.clone(); if P.is_empty() { T.set_path(base.path()); T.set_query(Q.or_else(|| base.query())); } else { if P.starts_with('/') { T.set_path(remove_dot_segments(P)); } else { T.set_path(remove_dot_segments(&merge_paths(base, P)?)); } T.set_query(Q); } T.set_method(base.method()); // TODO: Remove? This in inherited via clone T.set_method_id(base.method_id()); // TODO: Remove? This in inherited via clone T.set_fragment(core.fragment(data)); Ok(T) } /// Merge Paths. /// /// Merges a relative-path reference with the path of the base DID. /// /// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.3) pub fn merge_paths<'a>(base: &'a DID, data: &'a str) -> Result<Cow<'a, str>> { // Ensure the base DID has an authority component. // // The DID authority is `<method>:<method-specific-id>` so it should always // be present for non-relative DIDs. if base.method().is_empty() || base.method_id().is_empty() { return Err(Error::InvalidAuthority); } // 1. If the base URI has a defined authority component and an empty // path, then return a string consisting of "/" concatenated with the // reference's path. if base.path().is_empty() { return Ok(data.into()); } // 2. Return a string consisting of the reference's path component // appended to all but the last segment of the base URI's path (i.e., // excluding any characters after the right-most "/" in the base URI // path, or excluding the entire base URI path if it does not contain // any "/" characters). let mut path: &str = base.path(); if let Some(index) = path.rfind('/') { path = &path[..=index]; } Ok([path, data].join("").into()) } /// Remove Dot Segments. /// /// [More Info](https://tools.ietf.org/html/rfc3986#section-5.2.4) pub fn
(path: &str) -> Cow<str> { fn next_segment(input: impl AsRef<[u8]>) -> Option<usize> { match input.as_ref() { [b'/', input @..] => next_segment(input).map(|index| index + 1), input => input.iter().position(|byte| *byte == b'/'), } } let mut output: Path = Path::new(); let mut input: &[u8] = path.as_bytes(); loop { match input { // Remove prefix../ [b'.', b'.', b'/',..] => { input = &input[3..]; } // Remove prefix./ [b'.', b'/',..] => { input = &input[2..]; } // Replace prefix /./ [b'/', b'.', b'/',..] => { input = &input[2..]; } // Replace prefix /. [b'/', b'.'] => { input = &input[..1]; } // Replace prefix /../ [b'/', b'.', b'.', b'/',..] => { input = &input[3..]; output.pop(); } // Replace prefix /.. [b'/', b'.', b'.'] => { input = &input[..2]; output.pop(); } // Remove. [b'.'] => { input = &input[1..]; } // Remove.. [b'.', b'.'] => { input = &input[2..]; } _ => { if let Some(index) = next_segment(input) { output.push(&input[..index]); input = &input[index..]; } else { output.push(input); break; } } } } output.into() } }
remove_dot_segments
identifier_name
main.rs
use ash::extensions::{DebugReport, Surface, Swapchain}; #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] use ash::extensions::{WaylandSurface, XlibSurface}; use ash::version::{DeviceV1_0, EntryV1_0, InstanceV1_0, V1_0}; use ash::vk::Image; use ash::vk::PhysicalDevice; use ash::vk::Semaphore; use ash::vk::SwapchainKHR; use ash::Entry; use ash::{vk, vk_make_version, Device, Instance}; use std::ffi::{CStr, CString}; use std::ptr; const WIDTH: f64 = 800.0; const HEIGHT: f64 = 600.0; fn main() { let entry = create_entry(); let instance = create_instance(&entry); let physical_device = pick_physical_device(&instance); let props = instance.get_physical_device_properties(physical_device); println!("GPU chosen: {:?}", &unsafe { CStr::from_ptr(&props.device_name[0]) }); let device = create_device(&instance, &physical_device); let queue_family_index: u32 = 0; let present_queue = unsafe { device.get_device_queue(queue_family_index, 0) }; let mut events_loop = winit::EventsLoop::new(); let window = winit::WindowBuilder::new() .with_title("Ash - Example") .with_dimensions(winit::dpi::LogicalSize { width: WIDTH, height: HEIGHT, }) .build(&events_loop) .unwrap(); let (swapchain_loader, swapchain) = unsafe { create_swapchain(&entry, &instance, &window, physical_device, &device).unwrap() }; let present_images = unsafe { get_present_images(&swapchain_loader, swapchain).unwrap() }; let present_complete_semaphore = unsafe { create_semaphore(&device).unwrap() }; let rendering_complete_semaphore = unsafe { create_semaphore(&device).unwrap() }; let mut closed = false; while!closed { events_loop.poll_events(|event| match event { winit::Event::WindowEvent { event,.. } => match event { winit::WindowEvent::CloseRequested => closed = true, _ => {} }, _ => {} }); let present_index = unsafe { swapchain_loader .acquire_next_image_khr( swapchain, std::u64::MAX, present_complete_semaphore, vk::Fence::null(), ) .unwrap() }; let present_info = vk::PresentInfoKHR { s_type: vk::StructureType::PresentInfoKhr, p_next: ptr::null(), wait_semaphore_count: 0, // p_wait_semaphores: &rendering_complete_semaphore, p_wait_semaphores: ptr::null(), swapchain_count: 1, p_swapchains: &swapchain, p_image_indices: &present_index, p_results: ptr::null_mut(), }; unsafe { swapchain_loader .queue_present_khr(present_queue, &present_info) .unwrap(); } } } fn create_entry() -> Entry<V1_0> { Entry::new().unwrap() } fn create_instance(entry: &Entry<V1_0>) -> Instance<V1_0> { let app_name = CString::new("Niagara-rs").unwrap(); let raw_name = app_name.as_ptr(); let appinfo = vk::ApplicationInfo { s_type: vk::StructureType::ApplicationInfo, api_version: vk_make_version!(1, 0, 36), p_application_name: raw_name, p_engine_name: raw_name, application_version: 0, engine_version: 0, p_next: ptr::null(), }; let layer_names = [CString::new("VK_LAYER_LUNARG_standard_validation").unwrap()]; let layers_names_raw: Vec<*const i8> = layer_names .iter() .map(|raw_name| raw_name.as_ptr()) .collect(); let extension_names_raw = extension_names(); let create_info = vk::InstanceCreateInfo { s_type: vk::StructureType::InstanceCreateInfo, p_next: ptr::null(), flags: Default::default(), p_application_info: &appinfo, pp_enabled_layer_names: layers_names_raw.as_ptr(), enabled_layer_count: layers_names_raw.len() as u32, pp_enabled_extension_names: extension_names_raw.as_ptr(), enabled_extension_count: extension_names_raw.len() as u32, }; unsafe { let instance = entry .create_instance(&create_info, None) .expect("Instance creation error"); let debug_info = vk::DebugReportCallbackCreateInfoEXT { s_type: vk::StructureType::DebugReportCallbackCreateInfoExt, p_next: ptr::null(), flags: vk::DEBUG_REPORT_ERROR_BIT_EXT | vk::DEBUG_REPORT_WARNING_BIT_EXT | vk::DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, pfn_callback: vulkan_debug_callback, p_user_data: ptr::null_mut(), }; let debug_report_loader = DebugReport::new(entry, &instance).expect("Unable to load debug report"); let _debug_call_back = debug_report_loader .create_debug_report_callback_ext(&debug_info, None) .unwrap(); return instance; } } fn pick_physical_device(instance: &Instance<V1_0>) -> vk::PhysicalDevice { let physical_devices = instance .enumerate_physical_devices() .expect("Physical device error"); if physical_devices.len() == 0 { panic!("No GPU found!"); } let physical_device = physical_devices .iter() .max_by_key(|physical_device| { let props = instance.get_physical_device_properties(**physical_device); match props.device_type { vk::PhysicalDeviceType::DiscreteGpu => 2, vk::PhysicalDeviceType::IntegratedGpu => 1, _ => 0, } }) .expect("No suitable device found!"); return *physical_device; } #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] fn extension_names() -> Vec<*const i8> { vec![ Surface::name().as_ptr(), XlibSurface::name().as_ptr(), DebugReport::name().as_ptr(), ] } fn create_device(instance: &Instance<V1_0>, physical_device: &vk::PhysicalDevice) -> Device<V1_0> { let queue_family_index = 0 as u32; let priorities = [1.0]; let queue_info = vk::types::DeviceQueueCreateInfo { s_type: vk::StructureType::DeviceQueueCreateInfo, p_next: ptr::null(), flags: Default::default(), queue_family_index: queue_family_index as u32, p_queue_priorities: priorities.as_ptr(), queue_count: priorities.len() as u32, }; let device_extension_names_raw = [Swapchain::name().as_ptr()]; let features = vk::PhysicalDeviceFeatures { shader_clip_distance: 1, ..Default::default() }; let device_create_info = vk::DeviceCreateInfo { s_type: vk::StructureType::DeviceCreateInfo, p_next: ptr::null(), flags: Default::default(), p_queue_create_infos: &queue_info, queue_create_info_count: 1, pp_enabled_layer_names: ptr::null(), enabled_layer_count: 0, pp_enabled_extension_names: device_extension_names_raw.as_ptr(), enabled_extension_count: device_extension_names_raw.len() as u32, p_enabled_features: &features, }; unsafe { let device: Device<V1_0> = instance .create_device(*physical_device, &device_create_info, None) .unwrap(); return device; } } #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] unsafe fn create_surface<E: EntryV1_0, I: InstanceV1_0>( entry: &E, instance: &I, window: &winit::Window, ) -> Result<vk::SurfaceKHR, vk::Result> { use winit::os::unix::WindowExt; let x11_display = window.get_xlib_display().unwrap(); let x11_window = window.get_xlib_window().unwrap(); let x11_create_info = vk::XlibSurfaceCreateInfoKHR { s_type: vk::StructureType::XlibSurfaceCreateInfoKhr, p_next: ptr::null(), flags: Default::default(), window: x11_window as vk::Window, dpy: x11_display as *mut vk::Display, }; let xlib_surface_loader = XlibSurface::new(entry, instance).expect("Unable to load xlib surface"); xlib_surface_loader.create_xlib_surface_khr(&x11_create_info, None) } unsafe fn
( entry: &Entry<V1_0>, instance: &Instance<V1_0>, window: &winit::Window, physical_device: PhysicalDevice, device: &Device<V1_0>, ) -> Result<(Swapchain, SwapchainKHR), vk::Result> { let surface = create_surface(entry, instance, window).unwrap(); let surface_loader = Surface::new(entry, instance).expect("Unable to load the Surface extension"); let surface_formats = surface_loader .get_physical_device_surface_formats_khr(physical_device, surface) .unwrap(); let surface_format = surface_formats .iter() .map(|sfmt| match sfmt.format { vk::Format::Undefined => vk::SurfaceFormatKHR { format: vk::Format::B8g8r8Unorm, color_space: sfmt.color_space, }, _ => sfmt.clone(), }) .nth(0) .expect("Unable to find suitable surface format."); let surface_capabilities = surface_loader .get_physical_device_surface_capabilities_khr(physical_device, surface) .unwrap(); let mut desired_image_count = surface_capabilities.min_image_count + 1; if surface_capabilities.max_image_count > 0 && desired_image_count > surface_capabilities.max_image_count { desired_image_count = surface_capabilities.max_image_count; } let surface_resolution = match surface_capabilities.current_extent.width { std::u32::MAX => vk::Extent2D { width: WIDTH as u32, height: HEIGHT as u32, }, _ => surface_capabilities.current_extent, }; let pre_transform = if surface_capabilities .supported_transforms .subset(vk::SURFACE_TRANSFORM_IDENTITY_BIT_KHR) { vk::SURFACE_TRANSFORM_IDENTITY_BIT_KHR } else { surface_capabilities.current_transform }; let present_modes = surface_loader .get_physical_device_surface_present_modes_khr(physical_device, surface) .unwrap(); let present_mode = present_modes .iter() .cloned() .find(|&mode| mode == vk::PresentModeKHR::Mailbox) .unwrap_or(vk::PresentModeKHR::Fifo); let swapchain_create_info = vk::SwapchainCreateInfoKHR { s_type: vk::StructureType::SwapchainCreateInfoKhr, p_next: ptr::null(), flags: Default::default(), surface: surface, min_image_count: desired_image_count, image_color_space: surface_format.color_space, image_format: surface_format.format, image_extent: surface_resolution.clone(), image_usage: vk::IMAGE_USAGE_COLOR_ATTACHMENT_BIT, image_sharing_mode: vk::SharingMode::Exclusive, pre_transform: pre_transform, composite_alpha: vk::COMPOSITE_ALPHA_OPAQUE_BIT_KHR, present_mode: present_mode, clipped: 1, old_swapchain: vk::SwapchainKHR::null(), image_array_layers: 1, p_queue_family_indices: ptr::null(), queue_family_index_count: 0, }; let swapchain_loader = Swapchain::new(instance, device).expect("Unable to load swapchain"); let swapchain = swapchain_loader .create_swapchain_khr(&swapchain_create_info, None) .unwrap(); Ok((swapchain_loader, swapchain)) } unsafe fn get_present_images( swapchain_loader: &Swapchain, swapchain: SwapchainKHR, ) -> Result<Vec<Image>, vk::Result> { swapchain_loader.get_swapchain_images_khr(swapchain) } unsafe fn create_semaphore(device: &Device<V1_0>) -> Result<Semaphore, vk::Result> { let semaphore_create_info = vk::SemaphoreCreateInfo { s_type: vk::StructureType::SemaphoreCreateInfo, p_next: ptr::null(), flags: Default::default(), }; device.create_semaphore(&semaphore_create_info, None) } unsafe extern "system" fn vulkan_debug_callback( _: vk::DebugReportFlagsEXT, _: vk::DebugReportObjectTypeEXT, _: vk::uint64_t, _: vk::size_t, _: vk::int32_t, _: *const vk::c_char, p_message: *const vk::c_char, _: *mut vk::c_void, ) -> u32 { println!("{:?}", CStr::from_ptr(p_message)); vk::VK_FALSE }
create_swapchain
identifier_name
main.rs
use ash::extensions::{DebugReport, Surface, Swapchain}; #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] use ash::extensions::{WaylandSurface, XlibSurface}; use ash::version::{DeviceV1_0, EntryV1_0, InstanceV1_0, V1_0}; use ash::vk::Image; use ash::vk::PhysicalDevice; use ash::vk::Semaphore; use ash::vk::SwapchainKHR; use ash::Entry; use ash::{vk, vk_make_version, Device, Instance}; use std::ffi::{CStr, CString}; use std::ptr; const WIDTH: f64 = 800.0; const HEIGHT: f64 = 600.0; fn main() { let entry = create_entry(); let instance = create_instance(&entry); let physical_device = pick_physical_device(&instance); let props = instance.get_physical_device_properties(physical_device); println!("GPU chosen: {:?}", &unsafe { CStr::from_ptr(&props.device_name[0]) }); let device = create_device(&instance, &physical_device); let queue_family_index: u32 = 0; let present_queue = unsafe { device.get_device_queue(queue_family_index, 0) }; let mut events_loop = winit::EventsLoop::new(); let window = winit::WindowBuilder::new() .with_title("Ash - Example") .with_dimensions(winit::dpi::LogicalSize { width: WIDTH, height: HEIGHT, }) .build(&events_loop) .unwrap(); let (swapchain_loader, swapchain) = unsafe { create_swapchain(&entry, &instance, &window, physical_device, &device).unwrap() }; let present_images = unsafe { get_present_images(&swapchain_loader, swapchain).unwrap() }; let present_complete_semaphore = unsafe { create_semaphore(&device).unwrap() }; let rendering_complete_semaphore = unsafe { create_semaphore(&device).unwrap() }; let mut closed = false; while!closed { events_loop.poll_events(|event| match event { winit::Event::WindowEvent { event,.. } => match event { winit::WindowEvent::CloseRequested => closed = true, _ => {} }, _ => {} }); let present_index = unsafe { swapchain_loader .acquire_next_image_khr( swapchain, std::u64::MAX, present_complete_semaphore, vk::Fence::null(), ) .unwrap() }; let present_info = vk::PresentInfoKHR { s_type: vk::StructureType::PresentInfoKhr, p_next: ptr::null(), wait_semaphore_count: 0, // p_wait_semaphores: &rendering_complete_semaphore, p_wait_semaphores: ptr::null(), swapchain_count: 1, p_swapchains: &swapchain, p_image_indices: &present_index, p_results: ptr::null_mut(), }; unsafe { swapchain_loader .queue_present_khr(present_queue, &present_info) .unwrap(); } } } fn create_entry() -> Entry<V1_0> { Entry::new().unwrap() } fn create_instance(entry: &Entry<V1_0>) -> Instance<V1_0> { let app_name = CString::new("Niagara-rs").unwrap(); let raw_name = app_name.as_ptr(); let appinfo = vk::ApplicationInfo { s_type: vk::StructureType::ApplicationInfo, api_version: vk_make_version!(1, 0, 36), p_application_name: raw_name, p_engine_name: raw_name, application_version: 0, engine_version: 0, p_next: ptr::null(), }; let layer_names = [CString::new("VK_LAYER_LUNARG_standard_validation").unwrap()]; let layers_names_raw: Vec<*const i8> = layer_names .iter() .map(|raw_name| raw_name.as_ptr()) .collect(); let extension_names_raw = extension_names(); let create_info = vk::InstanceCreateInfo { s_type: vk::StructureType::InstanceCreateInfo, p_next: ptr::null(), flags: Default::default(), p_application_info: &appinfo, pp_enabled_layer_names: layers_names_raw.as_ptr(), enabled_layer_count: layers_names_raw.len() as u32, pp_enabled_extension_names: extension_names_raw.as_ptr(), enabled_extension_count: extension_names_raw.len() as u32, }; unsafe { let instance = entry .create_instance(&create_info, None) .expect("Instance creation error"); let debug_info = vk::DebugReportCallbackCreateInfoEXT { s_type: vk::StructureType::DebugReportCallbackCreateInfoExt, p_next: ptr::null(), flags: vk::DEBUG_REPORT_ERROR_BIT_EXT | vk::DEBUG_REPORT_WARNING_BIT_EXT | vk::DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, pfn_callback: vulkan_debug_callback, p_user_data: ptr::null_mut(), }; let debug_report_loader = DebugReport::new(entry, &instance).expect("Unable to load debug report"); let _debug_call_back = debug_report_loader .create_debug_report_callback_ext(&debug_info, None) .unwrap(); return instance; } } fn pick_physical_device(instance: &Instance<V1_0>) -> vk::PhysicalDevice { let physical_devices = instance .enumerate_physical_devices() .expect("Physical device error"); if physical_devices.len() == 0 { panic!("No GPU found!"); } let physical_device = physical_devices .iter() .max_by_key(|physical_device| { let props = instance.get_physical_device_properties(**physical_device); match props.device_type { vk::PhysicalDeviceType::DiscreteGpu => 2, vk::PhysicalDeviceType::IntegratedGpu => 1, _ => 0, } }) .expect("No suitable device found!"); return *physical_device; } #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] fn extension_names() -> Vec<*const i8> { vec![ Surface::name().as_ptr(), XlibSurface::name().as_ptr(), DebugReport::name().as_ptr(), ] } fn create_device(instance: &Instance<V1_0>, physical_device: &vk::PhysicalDevice) -> Device<V1_0> { let queue_family_index = 0 as u32; let priorities = [1.0]; let queue_info = vk::types::DeviceQueueCreateInfo { s_type: vk::StructureType::DeviceQueueCreateInfo, p_next: ptr::null(), flags: Default::default(), queue_family_index: queue_family_index as u32, p_queue_priorities: priorities.as_ptr(), queue_count: priorities.len() as u32, }; let device_extension_names_raw = [Swapchain::name().as_ptr()]; let features = vk::PhysicalDeviceFeatures { shader_clip_distance: 1, ..Default::default() }; let device_create_info = vk::DeviceCreateInfo { s_type: vk::StructureType::DeviceCreateInfo, p_next: ptr::null(), flags: Default::default(), p_queue_create_infos: &queue_info, queue_create_info_count: 1, pp_enabled_layer_names: ptr::null(), enabled_layer_count: 0, pp_enabled_extension_names: device_extension_names_raw.as_ptr(), enabled_extension_count: device_extension_names_raw.len() as u32, p_enabled_features: &features, }; unsafe { let device: Device<V1_0> = instance .create_device(*physical_device, &device_create_info, None) .unwrap(); return device; } } #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] unsafe fn create_surface<E: EntryV1_0, I: InstanceV1_0>( entry: &E, instance: &I, window: &winit::Window, ) -> Result<vk::SurfaceKHR, vk::Result> { use winit::os::unix::WindowExt; let x11_display = window.get_xlib_display().unwrap(); let x11_window = window.get_xlib_window().unwrap(); let x11_create_info = vk::XlibSurfaceCreateInfoKHR { s_type: vk::StructureType::XlibSurfaceCreateInfoKhr, p_next: ptr::null(), flags: Default::default(), window: x11_window as vk::Window, dpy: x11_display as *mut vk::Display, }; let xlib_surface_loader = XlibSurface::new(entry, instance).expect("Unable to load xlib surface"); xlib_surface_loader.create_xlib_surface_khr(&x11_create_info, None) } unsafe fn create_swapchain( entry: &Entry<V1_0>, instance: &Instance<V1_0>, window: &winit::Window, physical_device: PhysicalDevice, device: &Device<V1_0>, ) -> Result<(Swapchain, SwapchainKHR), vk::Result> { let surface = create_surface(entry, instance, window).unwrap(); let surface_loader = Surface::new(entry, instance).expect("Unable to load the Surface extension"); let surface_formats = surface_loader .get_physical_device_surface_formats_khr(physical_device, surface) .unwrap(); let surface_format = surface_formats .iter() .map(|sfmt| match sfmt.format { vk::Format::Undefined => vk::SurfaceFormatKHR { format: vk::Format::B8g8r8Unorm, color_space: sfmt.color_space, }, _ => sfmt.clone(), }) .nth(0) .expect("Unable to find suitable surface format."); let surface_capabilities = surface_loader .get_physical_device_surface_capabilities_khr(physical_device, surface) .unwrap(); let mut desired_image_count = surface_capabilities.min_image_count + 1; if surface_capabilities.max_image_count > 0 && desired_image_count > surface_capabilities.max_image_count { desired_image_count = surface_capabilities.max_image_count; } let surface_resolution = match surface_capabilities.current_extent.width { std::u32::MAX => vk::Extent2D { width: WIDTH as u32, height: HEIGHT as u32, }, _ => surface_capabilities.current_extent, }; let pre_transform = if surface_capabilities .supported_transforms .subset(vk::SURFACE_TRANSFORM_IDENTITY_BIT_KHR) { vk::SURFACE_TRANSFORM_IDENTITY_BIT_KHR } else { surface_capabilities.current_transform }; let present_modes = surface_loader .get_physical_device_surface_present_modes_khr(physical_device, surface) .unwrap(); let present_mode = present_modes .iter() .cloned() .find(|&mode| mode == vk::PresentModeKHR::Mailbox) .unwrap_or(vk::PresentModeKHR::Fifo); let swapchain_create_info = vk::SwapchainCreateInfoKHR { s_type: vk::StructureType::SwapchainCreateInfoKhr, p_next: ptr::null(), flags: Default::default(), surface: surface, min_image_count: desired_image_count, image_color_space: surface_format.color_space, image_format: surface_format.format, image_extent: surface_resolution.clone(), image_usage: vk::IMAGE_USAGE_COLOR_ATTACHMENT_BIT, image_sharing_mode: vk::SharingMode::Exclusive, pre_transform: pre_transform, composite_alpha: vk::COMPOSITE_ALPHA_OPAQUE_BIT_KHR, present_mode: present_mode, clipped: 1, old_swapchain: vk::SwapchainKHR::null(), image_array_layers: 1, p_queue_family_indices: ptr::null(), queue_family_index_count: 0, }; let swapchain_loader = Swapchain::new(instance, device).expect("Unable to load swapchain"); let swapchain = swapchain_loader .create_swapchain_khr(&swapchain_create_info, None) .unwrap(); Ok((swapchain_loader, swapchain)) } unsafe fn get_present_images( swapchain_loader: &Swapchain, swapchain: SwapchainKHR, ) -> Result<Vec<Image>, vk::Result> { swapchain_loader.get_swapchain_images_khr(swapchain) } unsafe fn create_semaphore(device: &Device<V1_0>) -> Result<Semaphore, vk::Result> {
let semaphore_create_info = vk::SemaphoreCreateInfo { s_type: vk::StructureType::SemaphoreCreateInfo, p_next: ptr::null(), flags: Default::default(), }; device.create_semaphore(&semaphore_create_info, None) } unsafe extern "system" fn vulkan_debug_callback( _: vk::DebugReportFlagsEXT, _: vk::DebugReportObjectTypeEXT, _: vk::uint64_t, _: vk::size_t, _: vk::int32_t, _: *const vk::c_char, p_message: *const vk::c_char, _: *mut vk::c_void, ) -> u32 { println!("{:?}", CStr::from_ptr(p_message)); vk::VK_FALSE }
random_line_split
main.rs
use ash::extensions::{DebugReport, Surface, Swapchain}; #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] use ash::extensions::{WaylandSurface, XlibSurface}; use ash::version::{DeviceV1_0, EntryV1_0, InstanceV1_0, V1_0}; use ash::vk::Image; use ash::vk::PhysicalDevice; use ash::vk::Semaphore; use ash::vk::SwapchainKHR; use ash::Entry; use ash::{vk, vk_make_version, Device, Instance}; use std::ffi::{CStr, CString}; use std::ptr; const WIDTH: f64 = 800.0; const HEIGHT: f64 = 600.0; fn main() { let entry = create_entry(); let instance = create_instance(&entry); let physical_device = pick_physical_device(&instance); let props = instance.get_physical_device_properties(physical_device); println!("GPU chosen: {:?}", &unsafe { CStr::from_ptr(&props.device_name[0]) }); let device = create_device(&instance, &physical_device); let queue_family_index: u32 = 0; let present_queue = unsafe { device.get_device_queue(queue_family_index, 0) }; let mut events_loop = winit::EventsLoop::new(); let window = winit::WindowBuilder::new() .with_title("Ash - Example") .with_dimensions(winit::dpi::LogicalSize { width: WIDTH, height: HEIGHT, }) .build(&events_loop) .unwrap(); let (swapchain_loader, swapchain) = unsafe { create_swapchain(&entry, &instance, &window, physical_device, &device).unwrap() }; let present_images = unsafe { get_present_images(&swapchain_loader, swapchain).unwrap() }; let present_complete_semaphore = unsafe { create_semaphore(&device).unwrap() }; let rendering_complete_semaphore = unsafe { create_semaphore(&device).unwrap() }; let mut closed = false; while!closed { events_loop.poll_events(|event| match event { winit::Event::WindowEvent { event,.. } => match event { winit::WindowEvent::CloseRequested => closed = true, _ => {} }, _ => {} }); let present_index = unsafe { swapchain_loader .acquire_next_image_khr( swapchain, std::u64::MAX, present_complete_semaphore, vk::Fence::null(), ) .unwrap() }; let present_info = vk::PresentInfoKHR { s_type: vk::StructureType::PresentInfoKhr, p_next: ptr::null(), wait_semaphore_count: 0, // p_wait_semaphores: &rendering_complete_semaphore, p_wait_semaphores: ptr::null(), swapchain_count: 1, p_swapchains: &swapchain, p_image_indices: &present_index, p_results: ptr::null_mut(), }; unsafe { swapchain_loader .queue_present_khr(present_queue, &present_info) .unwrap(); } } } fn create_entry() -> Entry<V1_0>
fn create_instance(entry: &Entry<V1_0>) -> Instance<V1_0> { let app_name = CString::new("Niagara-rs").unwrap(); let raw_name = app_name.as_ptr(); let appinfo = vk::ApplicationInfo { s_type: vk::StructureType::ApplicationInfo, api_version: vk_make_version!(1, 0, 36), p_application_name: raw_name, p_engine_name: raw_name, application_version: 0, engine_version: 0, p_next: ptr::null(), }; let layer_names = [CString::new("VK_LAYER_LUNARG_standard_validation").unwrap()]; let layers_names_raw: Vec<*const i8> = layer_names .iter() .map(|raw_name| raw_name.as_ptr()) .collect(); let extension_names_raw = extension_names(); let create_info = vk::InstanceCreateInfo { s_type: vk::StructureType::InstanceCreateInfo, p_next: ptr::null(), flags: Default::default(), p_application_info: &appinfo, pp_enabled_layer_names: layers_names_raw.as_ptr(), enabled_layer_count: layers_names_raw.len() as u32, pp_enabled_extension_names: extension_names_raw.as_ptr(), enabled_extension_count: extension_names_raw.len() as u32, }; unsafe { let instance = entry .create_instance(&create_info, None) .expect("Instance creation error"); let debug_info = vk::DebugReportCallbackCreateInfoEXT { s_type: vk::StructureType::DebugReportCallbackCreateInfoExt, p_next: ptr::null(), flags: vk::DEBUG_REPORT_ERROR_BIT_EXT | vk::DEBUG_REPORT_WARNING_BIT_EXT | vk::DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, pfn_callback: vulkan_debug_callback, p_user_data: ptr::null_mut(), }; let debug_report_loader = DebugReport::new(entry, &instance).expect("Unable to load debug report"); let _debug_call_back = debug_report_loader .create_debug_report_callback_ext(&debug_info, None) .unwrap(); return instance; } } fn pick_physical_device(instance: &Instance<V1_0>) -> vk::PhysicalDevice { let physical_devices = instance .enumerate_physical_devices() .expect("Physical device error"); if physical_devices.len() == 0 { panic!("No GPU found!"); } let physical_device = physical_devices .iter() .max_by_key(|physical_device| { let props = instance.get_physical_device_properties(**physical_device); match props.device_type { vk::PhysicalDeviceType::DiscreteGpu => 2, vk::PhysicalDeviceType::IntegratedGpu => 1, _ => 0, } }) .expect("No suitable device found!"); return *physical_device; } #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] fn extension_names() -> Vec<*const i8> { vec![ Surface::name().as_ptr(), XlibSurface::name().as_ptr(), DebugReport::name().as_ptr(), ] } fn create_device(instance: &Instance<V1_0>, physical_device: &vk::PhysicalDevice) -> Device<V1_0> { let queue_family_index = 0 as u32; let priorities = [1.0]; let queue_info = vk::types::DeviceQueueCreateInfo { s_type: vk::StructureType::DeviceQueueCreateInfo, p_next: ptr::null(), flags: Default::default(), queue_family_index: queue_family_index as u32, p_queue_priorities: priorities.as_ptr(), queue_count: priorities.len() as u32, }; let device_extension_names_raw = [Swapchain::name().as_ptr()]; let features = vk::PhysicalDeviceFeatures { shader_clip_distance: 1, ..Default::default() }; let device_create_info = vk::DeviceCreateInfo { s_type: vk::StructureType::DeviceCreateInfo, p_next: ptr::null(), flags: Default::default(), p_queue_create_infos: &queue_info, queue_create_info_count: 1, pp_enabled_layer_names: ptr::null(), enabled_layer_count: 0, pp_enabled_extension_names: device_extension_names_raw.as_ptr(), enabled_extension_count: device_extension_names_raw.len() as u32, p_enabled_features: &features, }; unsafe { let device: Device<V1_0> = instance .create_device(*physical_device, &device_create_info, None) .unwrap(); return device; } } #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))] unsafe fn create_surface<E: EntryV1_0, I: InstanceV1_0>( entry: &E, instance: &I, window: &winit::Window, ) -> Result<vk::SurfaceKHR, vk::Result> { use winit::os::unix::WindowExt; let x11_display = window.get_xlib_display().unwrap(); let x11_window = window.get_xlib_window().unwrap(); let x11_create_info = vk::XlibSurfaceCreateInfoKHR { s_type: vk::StructureType::XlibSurfaceCreateInfoKhr, p_next: ptr::null(), flags: Default::default(), window: x11_window as vk::Window, dpy: x11_display as *mut vk::Display, }; let xlib_surface_loader = XlibSurface::new(entry, instance).expect("Unable to load xlib surface"); xlib_surface_loader.create_xlib_surface_khr(&x11_create_info, None) } unsafe fn create_swapchain( entry: &Entry<V1_0>, instance: &Instance<V1_0>, window: &winit::Window, physical_device: PhysicalDevice, device: &Device<V1_0>, ) -> Result<(Swapchain, SwapchainKHR), vk::Result> { let surface = create_surface(entry, instance, window).unwrap(); let surface_loader = Surface::new(entry, instance).expect("Unable to load the Surface extension"); let surface_formats = surface_loader .get_physical_device_surface_formats_khr(physical_device, surface) .unwrap(); let surface_format = surface_formats .iter() .map(|sfmt| match sfmt.format { vk::Format::Undefined => vk::SurfaceFormatKHR { format: vk::Format::B8g8r8Unorm, color_space: sfmt.color_space, }, _ => sfmt.clone(), }) .nth(0) .expect("Unable to find suitable surface format."); let surface_capabilities = surface_loader .get_physical_device_surface_capabilities_khr(physical_device, surface) .unwrap(); let mut desired_image_count = surface_capabilities.min_image_count + 1; if surface_capabilities.max_image_count > 0 && desired_image_count > surface_capabilities.max_image_count { desired_image_count = surface_capabilities.max_image_count; } let surface_resolution = match surface_capabilities.current_extent.width { std::u32::MAX => vk::Extent2D { width: WIDTH as u32, height: HEIGHT as u32, }, _ => surface_capabilities.current_extent, }; let pre_transform = if surface_capabilities .supported_transforms .subset(vk::SURFACE_TRANSFORM_IDENTITY_BIT_KHR) { vk::SURFACE_TRANSFORM_IDENTITY_BIT_KHR } else { surface_capabilities.current_transform }; let present_modes = surface_loader .get_physical_device_surface_present_modes_khr(physical_device, surface) .unwrap(); let present_mode = present_modes .iter() .cloned() .find(|&mode| mode == vk::PresentModeKHR::Mailbox) .unwrap_or(vk::PresentModeKHR::Fifo); let swapchain_create_info = vk::SwapchainCreateInfoKHR { s_type: vk::StructureType::SwapchainCreateInfoKhr, p_next: ptr::null(), flags: Default::default(), surface: surface, min_image_count: desired_image_count, image_color_space: surface_format.color_space, image_format: surface_format.format, image_extent: surface_resolution.clone(), image_usage: vk::IMAGE_USAGE_COLOR_ATTACHMENT_BIT, image_sharing_mode: vk::SharingMode::Exclusive, pre_transform: pre_transform, composite_alpha: vk::COMPOSITE_ALPHA_OPAQUE_BIT_KHR, present_mode: present_mode, clipped: 1, old_swapchain: vk::SwapchainKHR::null(), image_array_layers: 1, p_queue_family_indices: ptr::null(), queue_family_index_count: 0, }; let swapchain_loader = Swapchain::new(instance, device).expect("Unable to load swapchain"); let swapchain = swapchain_loader .create_swapchain_khr(&swapchain_create_info, None) .unwrap(); Ok((swapchain_loader, swapchain)) } unsafe fn get_present_images( swapchain_loader: &Swapchain, swapchain: SwapchainKHR, ) -> Result<Vec<Image>, vk::Result> { swapchain_loader.get_swapchain_images_khr(swapchain) } unsafe fn create_semaphore(device: &Device<V1_0>) -> Result<Semaphore, vk::Result> { let semaphore_create_info = vk::SemaphoreCreateInfo { s_type: vk::StructureType::SemaphoreCreateInfo, p_next: ptr::null(), flags: Default::default(), }; device.create_semaphore(&semaphore_create_info, None) } unsafe extern "system" fn vulkan_debug_callback( _: vk::DebugReportFlagsEXT, _: vk::DebugReportObjectTypeEXT, _: vk::uint64_t, _: vk::size_t, _: vk::int32_t, _: *const vk::c_char, p_message: *const vk::c_char, _: *mut vk::c_void, ) -> u32 { println!("{:?}", CStr::from_ptr(p_message)); vk::VK_FALSE }
{ Entry::new().unwrap() }
identifier_body
tweetnacl.rs
use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}; use subtle::{Choice, ConditionallySelectable, ConstantTimeEq}; use zeroize::Zeroize; use super::FieldImplementation; pub type Limbs = [i64; 16]; /// Element of the base field of the elliptic curve #[derive(Clone, Copy, Debug, Default, Zeroize)] pub struct FieldElement(pub Limbs); impl ConditionallySelectable for FieldElement { fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { let mut selection = Self::default(); for i in 0..16 { selection.0[i] = i64::conditional_select(&a.0[i], &b.0[i], choice); } selection } fn
(a: &mut Self, b: &mut Self, choice: Choice) { // what TweetNacl originally does // let mask: i64 =!(b - 1); // TweetNacl translated to Choice language // let mask: i64 =!(choice.unwrap_u8() as i64) - 1); // `subtle` definition, which is equivalent // let mask: i64 = -(choice.unwrap_u8() as i64); for (ai, bi) in a.0.iter_mut().zip(b.0.iter_mut()) { // let t = mask & (*ai ^ *bi); // *ai ^= t; // *bi ^= t; i64::conditional_swap(ai, bi, choice); } } } impl FieldImplementation for FieldElement { type Limbs = Limbs; const ZERO: Self = Self([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); const ONE: Self = Self([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); const D: Self = Self([ 0x78a3, 0x1359, 0x4dca, 0x75eb, 0xd8ab, 0x4141, 0x0a4d, 0x0070, 0xe898, 0x7779, 0x4079, 0x8cc7, 0xfe73, 0x2b6f, 0x6cee, 0x5203, ]); const D2: Self = Self([ 0xf159, 0x26b2, 0x9b94, 0xebd6, 0xb156, 0x8283, 0x149a, 0x00e0, 0xd130, 0xeef3, 0x80f2, 0x198e, 0xfce7, 0x56df, 0xd9dc, 0x2406, ]); const EDWARDS_BASEPOINT_X: Self = Self([ 0xd51a, 0x8f25, 0x2d60, 0xc956, 0xa7b2, 0x9525, 0xc760, 0x692c, 0xdc5c, 0xfdd6, 0xe231, 0xc0a4, 0x53fe, 0xcd6e, 0x36d3, 0x2169, ]); const EDWARDS_BASEPOINT_Y: Self = Self([ 0x6658, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, ]); const I: Self = Self([ 0xa0b0, 0x4a0e, 0x1b27, 0xc4ee, 0xe478, 0xad2f, 0x1806, 0x2f43, 0xd7a7, 0x3dfb, 0x0099, 0x2b4d, 0xdf0b, 0x4fc1, 0x2480, 0x2b83, ]); const APLUS2_OVER_FOUR: Self = Self([121666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); const MONTGOMERY_BASEPOINT_U: Self = Self([9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); fn to_bytes(&self) -> [u8; 32] { // make our own private copy let mut fe = *self; // three times' the charm?? // TODO: figure out why :) fe.carry(); fe.carry(); fe.carry(); // let m_buf: FieldElementBuffer = Default::default(); // let mut m: FieldElement = FieldElement(m_buf); let mut m: Limbs = Default::default(); for _j in 0..2 { m[0] = fe.0[0] - 0xffed; for i in 1..15 { m[i] = fe.0[i] - 0xffff - ((m[i - 1] >> 16) & 1); m[i - 1] &= 0xffff; } m[15] = fe.0[15] - 0x7fff - ((m[14] >> 16) & 1); let b = (m[15] >> 16) & 1; m[14] &= 0xffff; FieldElement::conditional_swap(&mut fe, &mut FieldElement(m), ((1 - b) as u8).into()); } let mut bytes: [u8; 32] = Default::default(); for i in 0..16 { bytes[2 * i] = fe.0[i] as u8; //& 0xff; bytes[2 * i + 1] = (fe.0[i] >> 8) as u8; } bytes } fn from_bytes_unchecked(bytes: &[u8; 32]) -> FieldElement { let mut limbs = Limbs::default(); for i in 0..16 { limbs[i] = (bytes[2 * i] as i64) + ((bytes[2 * i + 1] as i64) << 8); } // some kind of safety check // but: also clears the x-coordinate sign bit limbs[15] &= 0x7fff; FieldElement(limbs) } // sv inv25519(gf o,const gf i) // { // // want: o = 1/i in base field // gf c; // int a; // FOR(a,16) c[a]=i[a]; // // exponentiate with 2^255 - 21 // // same as inversion by Fermat's little theorem // for(a=253;a>=0;a--) { // S(c,c); // if(a!=2&&a!=4) M(c,c,i); // } // FOR(a,16) o[a]=c[a]; // } fn inverse(&self) -> FieldElement { // TODO: possibly assert! that fe!= 0? // make our own private copy let mut inverse = *self; // exponentiate with 2**255 - 21, // which by Fermat's little theorem is the same as inversion for i in (0..=253).rev() { inverse = inverse.squared(); if i!= 2 && i!= 4 { inverse = &inverse * self; } } inverse } // sv pow2523(gf o,const gf i) // // the naming here means "to the power of 2^252 - 3 // // again by Fermat's little theorem, this is the same // // as taking the square root, which is needed for // // point decompression // { // gf c; // int a; // FOR(a,16) c[a]=i[a]; // for(a=250;a>=0;a--) { // S(c,c); // if(a!=1) M(c,c,i); // } // FOR(a,16) o[a]=c[a]; // } /// TODO: figure out why this doesn't pass the test at the end fn pow2523(&self) -> FieldElement { let mut sqrt = *self; for i in (0..=250).rev() { sqrt = sqrt.squared(); if i!= 1 { sqrt = &sqrt * self; } } sqrt } } impl ConstantTimeEq for FieldElement { fn ct_eq(&self, other: &Self) -> Choice { let canonical_self = self.to_bytes(); let canonical_other = other.to_bytes(); canonical_self.ct_eq(&canonical_other) } } impl PartialEq for FieldElement { fn eq(&self, other: &Self) -> bool { bool::from(self.ct_eq(other)) } } impl<'a, 'b> Add<&'b FieldElement> for &'a FieldElement { type Output = FieldElement; // TODO: TweetNaCl doesn't do any reduction here, why not? /// Addition of field elements fn add(self, other: &'b FieldElement) -> FieldElement { let mut sum = *self; sum += other; sum } } impl<'b> AddAssign<&'b FieldElement> for FieldElement { fn add_assign(&mut self, other: &'b FieldElement) { for (x, y) in self.0.iter_mut().zip(other.0.iter()) { *x += y; } } } impl<'a> Neg for &'a FieldElement { type Output = FieldElement; /// Subition of field elements fn neg(self) -> FieldElement { let mut negation = *self; for (i, xi) in self.0.iter().enumerate() { negation.0[i] = -xi; } negation } } impl<'a, 'b> Sub<&'b FieldElement> for &'a FieldElement { type Output = FieldElement; // TODO: TweetNaCl doesn't do any reduction here, why not? /// Subition of field elements fn sub(self, other: &'b FieldElement) -> FieldElement { let mut difference = *self; difference -= other; difference } } impl<'b> SubAssign<&'b FieldElement> for FieldElement { fn sub_assign(&mut self, other: &'b FieldElement) { for (x, y) in self.0.iter_mut().zip(other.0.iter()) { *x -= y; } } } impl<'a, 'b> Mul<&'b FieldElement> for &'a FieldElement { type Output = FieldElement; fn mul(self, other: &'b FieldElement) -> FieldElement { // start with so-called "schoolbook multiplication" // TODO: nicer way to do this with iterators? let mut pre_product: [i64; 31] = Default::default(); for i in 0..16 { for j in 0..16 { pre_product[i + j] += self.0[i] * other.0[j]; } } // reduce modulo 2**256 - 38 // (en route to reduction modulo 2**255 - 19) for i in 0..15 { pre_product[i] += 38 * pre_product[i + 16]; } // ble, would prefer to call pre_product just product, // but the two-step initialize then copy doesn't seem // to work syntactically. // also: really hope the initialization of `product` // is optimized away... let mut product: Limbs = Default::default(); product.copy_from_slice(&pre_product[..16]); let mut fe = FieldElement(product); // normalize such that all limbs lie in [0, 2^16) // TODO: why twice? why is twice enough? fe.carry(); fe.carry(); fe } } impl<'b> MulAssign<&'b FieldElement> for FieldElement { fn mul_assign(&mut self, other: &'b FieldElement) { let result = (self as &FieldElement) * other; self.0 = result.0; } } impl FieldElement { fn carry(&mut self) { // TODO: multiplication calls this twice!! // TODO: to_bytes calls this thrice!!! // // What exactly are the guarantees here? // Why don't we do this twice or thrice if it's needed? for i in 0..16 { // add 2**16 self.0[i] += 1 << 16; // "carry" part, everything over radix 2**16 let carry = self.0[i] >> 16; // a) i < 15: add carry bit, subtract 1 to compensate addition of 2^16 // --> o[i + 1] += c - 1 // add carry bit, subtract // b) i == 15: wraps around to index 0 via 2^256 = 38 // --> o[0] += 38 * (c - 1) self.0[(i + 1) * ((i < 15) as usize)] += carry - 1 + 37 * (carry - 1) * ((i == 15) as i64); // get rid of carry bit // TODO: why not get rid of it immediately. kinda clearer self.0[i] -= carry << 16; } } } #[cfg(test)] mod tests { use super::FieldElement; use crate::field::FieldImplementation; use subtle::ConstantTimeEq; #[test] fn test_one_plus_one() { let one = FieldElement::ONE; let two = &one + &one; let expected = FieldElement([2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); // TODO: Implement PartialEq (hopefully in constant time!) assert_eq!(two.0, expected.0); assert!(bool::from(two.ct_eq(&expected))) } #[test] fn test_one_times_zero() { let one = FieldElement::ONE; let zero = FieldElement::ZERO; let result = &one * &zero; // TODO: Implement PartialEq (hopefully in constant time!) assert_eq!(result.0, zero.0); assert!(bool::from(result.ct_eq(&zero))) } #[test] fn test_two_times_three_is_six() { let one = FieldElement::ONE; let two = &one + &one; let three = &two + &one; let two_times_three = &two * &three; // no multiplications, just sum up ONEs let six = (1..=6).fold(FieldElement::ZERO, |partial_sum, _| { &partial_sum + &FieldElement::ONE }); assert_eq!(two_times_three.to_bytes(), six.to_bytes()); assert!(bool::from(two_times_three.ct_eq(&six))); } #[test] fn test_negation() { let d2 = FieldElement::D2; let minus_d2 = -&d2; let maybe_zero = &d2 + &minus_d2; assert_eq!(FieldElement::ZERO.to_bytes(), maybe_zero.to_bytes()); } #[test] fn test_inversion() { let d2 = FieldElement::D2; let maybe_inverse = d2.inverse(); let maybe_one = &d2 * &maybe_inverse; assert_eq!(maybe_one.to_bytes(), FieldElement::ONE.to_bytes()); assert!(bool::from(maybe_one.ct_eq(&FieldElement::ONE))); assert_eq!(maybe_one, FieldElement::ONE); } #[test] fn test_imaginary() { let minus_one = -&FieldElement::ONE; let i_squared = &FieldElement::I * &FieldElement::I; assert_eq!(minus_one, i_squared); } #[test] fn test_square_roots() { let two = &FieldElement::ONE + &FieldElement::ONE; // four has Legendre symbol of minus one let four = &two * &two; let sqrt_minus_four = &four.pow2523() * &four; assert_eq!(&sqrt_minus_four * &sqrt_minus_four, -&four); let sqrt_four = &FieldElement::I * &sqrt_minus_four; assert_eq!(&sqrt_four * &sqrt_four, four); let three = &two + &FieldElement::ONE; // nine has Legendre symbol of one let nine = &three * &three; let sqrt_nine = &nine.pow2523() * &nine; assert_eq!(&sqrt_nine * &sqrt_nine, nine); } }
conditional_swap
identifier_name
tweetnacl.rs
use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}; use subtle::{Choice, ConditionallySelectable, ConstantTimeEq}; use zeroize::Zeroize; use super::FieldImplementation; pub type Limbs = [i64; 16]; /// Element of the base field of the elliptic curve #[derive(Clone, Copy, Debug, Default, Zeroize)] pub struct FieldElement(pub Limbs); impl ConditionallySelectable for FieldElement { fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { let mut selection = Self::default(); for i in 0..16 { selection.0[i] = i64::conditional_select(&a.0[i], &b.0[i], choice); } selection } fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice) { // what TweetNacl originally does // let mask: i64 =!(b - 1); // TweetNacl translated to Choice language // let mask: i64 =!(choice.unwrap_u8() as i64) - 1); // `subtle` definition, which is equivalent // let mask: i64 = -(choice.unwrap_u8() as i64); for (ai, bi) in a.0.iter_mut().zip(b.0.iter_mut()) { // let t = mask & (*ai ^ *bi); // *ai ^= t; // *bi ^= t; i64::conditional_swap(ai, bi, choice); } } } impl FieldImplementation for FieldElement { type Limbs = Limbs; const ZERO: Self = Self([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); const ONE: Self = Self([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); const D: Self = Self([ 0x78a3, 0x1359, 0x4dca, 0x75eb, 0xd8ab, 0x4141, 0x0a4d, 0x0070, 0xe898, 0x7779, 0x4079, 0x8cc7, 0xfe73, 0x2b6f, 0x6cee, 0x5203, ]); const D2: Self = Self([ 0xf159, 0x26b2, 0x9b94, 0xebd6, 0xb156, 0x8283, 0x149a, 0x00e0, 0xd130, 0xeef3, 0x80f2, 0x198e, 0xfce7, 0x56df, 0xd9dc, 0x2406, ]); const EDWARDS_BASEPOINT_X: Self = Self([ 0xd51a, 0x8f25, 0x2d60, 0xc956, 0xa7b2, 0x9525, 0xc760, 0x692c, 0xdc5c, 0xfdd6, 0xe231, 0xc0a4, 0x53fe, 0xcd6e, 0x36d3, 0x2169, ]); const EDWARDS_BASEPOINT_Y: Self = Self([ 0x6658, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, ]); const I: Self = Self([ 0xa0b0, 0x4a0e, 0x1b27, 0xc4ee, 0xe478, 0xad2f, 0x1806, 0x2f43, 0xd7a7, 0x3dfb, 0x0099, 0x2b4d, 0xdf0b, 0x4fc1, 0x2480, 0x2b83, ]); const APLUS2_OVER_FOUR: Self = Self([121666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); const MONTGOMERY_BASEPOINT_U: Self = Self([9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); fn to_bytes(&self) -> [u8; 32] { // make our own private copy let mut fe = *self; // three times' the charm?? // TODO: figure out why :) fe.carry(); fe.carry(); fe.carry(); // let m_buf: FieldElementBuffer = Default::default(); // let mut m: FieldElement = FieldElement(m_buf); let mut m: Limbs = Default::default(); for _j in 0..2 { m[0] = fe.0[0] - 0xffed; for i in 1..15 { m[i] = fe.0[i] - 0xffff - ((m[i - 1] >> 16) & 1); m[i - 1] &= 0xffff; } m[15] = fe.0[15] - 0x7fff - ((m[14] >> 16) & 1); let b = (m[15] >> 16) & 1; m[14] &= 0xffff; FieldElement::conditional_swap(&mut fe, &mut FieldElement(m), ((1 - b) as u8).into()); } let mut bytes: [u8; 32] = Default::default(); for i in 0..16 { bytes[2 * i] = fe.0[i] as u8; //& 0xff; bytes[2 * i + 1] = (fe.0[i] >> 8) as u8; } bytes } fn from_bytes_unchecked(bytes: &[u8; 32]) -> FieldElement { let mut limbs = Limbs::default(); for i in 0..16 { limbs[i] = (bytes[2 * i] as i64) + ((bytes[2 * i + 1] as i64) << 8); } // some kind of safety check // but: also clears the x-coordinate sign bit limbs[15] &= 0x7fff; FieldElement(limbs) } // sv inv25519(gf o,const gf i) // { // // want: o = 1/i in base field // gf c; // int a; // FOR(a,16) c[a]=i[a]; // // exponentiate with 2^255 - 21 // // same as inversion by Fermat's little theorem // for(a=253;a>=0;a--) { // S(c,c); // if(a!=2&&a!=4) M(c,c,i); // } // FOR(a,16) o[a]=c[a]; // } fn inverse(&self) -> FieldElement { // TODO: possibly assert! that fe!= 0? // make our own private copy let mut inverse = *self; // exponentiate with 2**255 - 21, // which by Fermat's little theorem is the same as inversion for i in (0..=253).rev() { inverse = inverse.squared(); if i!= 2 && i!= 4 { inverse = &inverse * self; } } inverse } // sv pow2523(gf o,const gf i) // // the naming here means "to the power of 2^252 - 3 // // again by Fermat's little theorem, this is the same // // as taking the square root, which is needed for // // point decompression // { // gf c; // int a; // FOR(a,16) c[a]=i[a]; // for(a=250;a>=0;a--) { // S(c,c); // if(a!=1) M(c,c,i); // } // FOR(a,16) o[a]=c[a]; // } /// TODO: figure out why this doesn't pass the test at the end fn pow2523(&self) -> FieldElement { let mut sqrt = *self; for i in (0..=250).rev() { sqrt = sqrt.squared(); if i!= 1 { sqrt = &sqrt * self; } } sqrt } } impl ConstantTimeEq for FieldElement { fn ct_eq(&self, other: &Self) -> Choice { let canonical_self = self.to_bytes(); let canonical_other = other.to_bytes(); canonical_self.ct_eq(&canonical_other) } } impl PartialEq for FieldElement { fn eq(&self, other: &Self) -> bool { bool::from(self.ct_eq(other)) } } impl<'a, 'b> Add<&'b FieldElement> for &'a FieldElement { type Output = FieldElement; // TODO: TweetNaCl doesn't do any reduction here, why not? /// Addition of field elements fn add(self, other: &'b FieldElement) -> FieldElement { let mut sum = *self; sum += other; sum } } impl<'b> AddAssign<&'b FieldElement> for FieldElement { fn add_assign(&mut self, other: &'b FieldElement) { for (x, y) in self.0.iter_mut().zip(other.0.iter()) { *x += y; } } } impl<'a> Neg for &'a FieldElement { type Output = FieldElement; /// Subition of field elements fn neg(self) -> FieldElement { let mut negation = *self; for (i, xi) in self.0.iter().enumerate() { negation.0[i] = -xi; } negation } } impl<'a, 'b> Sub<&'b FieldElement> for &'a FieldElement { type Output = FieldElement; // TODO: TweetNaCl doesn't do any reduction here, why not? /// Subition of field elements fn sub(self, other: &'b FieldElement) -> FieldElement { let mut difference = *self; difference -= other; difference } } impl<'b> SubAssign<&'b FieldElement> for FieldElement { fn sub_assign(&mut self, other: &'b FieldElement) { for (x, y) in self.0.iter_mut().zip(other.0.iter()) { *x -= y; } } } impl<'a, 'b> Mul<&'b FieldElement> for &'a FieldElement { type Output = FieldElement; fn mul(self, other: &'b FieldElement) -> FieldElement { // start with so-called "schoolbook multiplication" // TODO: nicer way to do this with iterators? let mut pre_product: [i64; 31] = Default::default(); for i in 0..16 { for j in 0..16 { pre_product[i + j] += self.0[i] * other.0[j]; } } // reduce modulo 2**256 - 38 // (en route to reduction modulo 2**255 - 19) for i in 0..15 { pre_product[i] += 38 * pre_product[i + 16]; } // ble, would prefer to call pre_product just product, // but the two-step initialize then copy doesn't seem // to work syntactically. // also: really hope the initialization of `product` // is optimized away... let mut product: Limbs = Default::default(); product.copy_from_slice(&pre_product[..16]); let mut fe = FieldElement(product); // normalize such that all limbs lie in [0, 2^16) // TODO: why twice? why is twice enough? fe.carry(); fe.carry(); fe } } impl<'b> MulAssign<&'b FieldElement> for FieldElement { fn mul_assign(&mut self, other: &'b FieldElement) { let result = (self as &FieldElement) * other; self.0 = result.0; } } impl FieldElement { fn carry(&mut self) { // TODO: multiplication calls this twice!! // TODO: to_bytes calls this thrice!!! // // What exactly are the guarantees here? // Why don't we do this twice or thrice if it's needed? for i in 0..16 { // add 2**16 self.0[i] += 1 << 16; // "carry" part, everything over radix 2**16 let carry = self.0[i] >> 16; // a) i < 15: add carry bit, subtract 1 to compensate addition of 2^16 // --> o[i + 1] += c - 1 // add carry bit, subtract // b) i == 15: wraps around to index 0 via 2^256 = 38 // --> o[0] += 38 * (c - 1) self.0[(i + 1) * ((i < 15) as usize)] += carry - 1 + 37 * (carry - 1) * ((i == 15) as i64); // get rid of carry bit // TODO: why not get rid of it immediately. kinda clearer self.0[i] -= carry << 16; } } } #[cfg(test)] mod tests { use super::FieldElement; use crate::field::FieldImplementation; use subtle::ConstantTimeEq; #[test] fn test_one_plus_one() { let one = FieldElement::ONE; let two = &one + &one; let expected = FieldElement([2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); // TODO: Implement PartialEq (hopefully in constant time!) assert_eq!(two.0, expected.0); assert!(bool::from(two.ct_eq(&expected))) } #[test] fn test_one_times_zero() { let one = FieldElement::ONE; let zero = FieldElement::ZERO; let result = &one * &zero; // TODO: Implement PartialEq (hopefully in constant time!) assert_eq!(result.0, zero.0); assert!(bool::from(result.ct_eq(&zero))) } #[test] fn test_two_times_three_is_six() { let one = FieldElement::ONE; let two = &one + &one; let three = &two + &one; let two_times_three = &two * &three; // no multiplications, just sum up ONEs let six = (1..=6).fold(FieldElement::ZERO, |partial_sum, _| { &partial_sum + &FieldElement::ONE }); assert_eq!(two_times_three.to_bytes(), six.to_bytes()); assert!(bool::from(two_times_three.ct_eq(&six))); } #[test] fn test_negation() { let d2 = FieldElement::D2; let minus_d2 = -&d2; let maybe_zero = &d2 + &minus_d2; assert_eq!(FieldElement::ZERO.to_bytes(), maybe_zero.to_bytes()); } #[test] fn test_inversion() { let d2 = FieldElement::D2; let maybe_inverse = d2.inverse(); let maybe_one = &d2 * &maybe_inverse;
assert!(bool::from(maybe_one.ct_eq(&FieldElement::ONE))); assert_eq!(maybe_one, FieldElement::ONE); } #[test] fn test_imaginary() { let minus_one = -&FieldElement::ONE; let i_squared = &FieldElement::I * &FieldElement::I; assert_eq!(minus_one, i_squared); } #[test] fn test_square_roots() { let two = &FieldElement::ONE + &FieldElement::ONE; // four has Legendre symbol of minus one let four = &two * &two; let sqrt_minus_four = &four.pow2523() * &four; assert_eq!(&sqrt_minus_four * &sqrt_minus_four, -&four); let sqrt_four = &FieldElement::I * &sqrt_minus_four; assert_eq!(&sqrt_four * &sqrt_four, four); let three = &two + &FieldElement::ONE; // nine has Legendre symbol of one let nine = &three * &three; let sqrt_nine = &nine.pow2523() * &nine; assert_eq!(&sqrt_nine * &sqrt_nine, nine); } }
assert_eq!(maybe_one.to_bytes(), FieldElement::ONE.to_bytes());
random_line_split
tweetnacl.rs
use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}; use subtle::{Choice, ConditionallySelectable, ConstantTimeEq}; use zeroize::Zeroize; use super::FieldImplementation; pub type Limbs = [i64; 16]; /// Element of the base field of the elliptic curve #[derive(Clone, Copy, Debug, Default, Zeroize)] pub struct FieldElement(pub Limbs); impl ConditionallySelectable for FieldElement { fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { let mut selection = Self::default(); for i in 0..16 { selection.0[i] = i64::conditional_select(&a.0[i], &b.0[i], choice); } selection } fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice) { // what TweetNacl originally does // let mask: i64 =!(b - 1); // TweetNacl translated to Choice language // let mask: i64 =!(choice.unwrap_u8() as i64) - 1); // `subtle` definition, which is equivalent // let mask: i64 = -(choice.unwrap_u8() as i64); for (ai, bi) in a.0.iter_mut().zip(b.0.iter_mut()) { // let t = mask & (*ai ^ *bi); // *ai ^= t; // *bi ^= t; i64::conditional_swap(ai, bi, choice); } } } impl FieldImplementation for FieldElement { type Limbs = Limbs; const ZERO: Self = Self([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); const ONE: Self = Self([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); const D: Self = Self([ 0x78a3, 0x1359, 0x4dca, 0x75eb, 0xd8ab, 0x4141, 0x0a4d, 0x0070, 0xe898, 0x7779, 0x4079, 0x8cc7, 0xfe73, 0x2b6f, 0x6cee, 0x5203, ]); const D2: Self = Self([ 0xf159, 0x26b2, 0x9b94, 0xebd6, 0xb156, 0x8283, 0x149a, 0x00e0, 0xd130, 0xeef3, 0x80f2, 0x198e, 0xfce7, 0x56df, 0xd9dc, 0x2406, ]); const EDWARDS_BASEPOINT_X: Self = Self([ 0xd51a, 0x8f25, 0x2d60, 0xc956, 0xa7b2, 0x9525, 0xc760, 0x692c, 0xdc5c, 0xfdd6, 0xe231, 0xc0a4, 0x53fe, 0xcd6e, 0x36d3, 0x2169, ]); const EDWARDS_BASEPOINT_Y: Self = Self([ 0x6658, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, ]); const I: Self = Self([ 0xa0b0, 0x4a0e, 0x1b27, 0xc4ee, 0xe478, 0xad2f, 0x1806, 0x2f43, 0xd7a7, 0x3dfb, 0x0099, 0x2b4d, 0xdf0b, 0x4fc1, 0x2480, 0x2b83, ]); const APLUS2_OVER_FOUR: Self = Self([121666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); const MONTGOMERY_BASEPOINT_U: Self = Self([9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); fn to_bytes(&self) -> [u8; 32] { // make our own private copy let mut fe = *self; // three times' the charm?? // TODO: figure out why :) fe.carry(); fe.carry(); fe.carry(); // let m_buf: FieldElementBuffer = Default::default(); // let mut m: FieldElement = FieldElement(m_buf); let mut m: Limbs = Default::default(); for _j in 0..2 { m[0] = fe.0[0] - 0xffed; for i in 1..15 { m[i] = fe.0[i] - 0xffff - ((m[i - 1] >> 16) & 1); m[i - 1] &= 0xffff; } m[15] = fe.0[15] - 0x7fff - ((m[14] >> 16) & 1); let b = (m[15] >> 16) & 1; m[14] &= 0xffff; FieldElement::conditional_swap(&mut fe, &mut FieldElement(m), ((1 - b) as u8).into()); } let mut bytes: [u8; 32] = Default::default(); for i in 0..16 { bytes[2 * i] = fe.0[i] as u8; //& 0xff; bytes[2 * i + 1] = (fe.0[i] >> 8) as u8; } bytes } fn from_bytes_unchecked(bytes: &[u8; 32]) -> FieldElement { let mut limbs = Limbs::default(); for i in 0..16 { limbs[i] = (bytes[2 * i] as i64) + ((bytes[2 * i + 1] as i64) << 8); } // some kind of safety check // but: also clears the x-coordinate sign bit limbs[15] &= 0x7fff; FieldElement(limbs) } // sv inv25519(gf o,const gf i) // { // // want: o = 1/i in base field // gf c; // int a; // FOR(a,16) c[a]=i[a]; // // exponentiate with 2^255 - 21 // // same as inversion by Fermat's little theorem // for(a=253;a>=0;a--) { // S(c,c); // if(a!=2&&a!=4) M(c,c,i); // } // FOR(a,16) o[a]=c[a]; // } fn inverse(&self) -> FieldElement { // TODO: possibly assert! that fe!= 0? // make our own private copy let mut inverse = *self; // exponentiate with 2**255 - 21, // which by Fermat's little theorem is the same as inversion for i in (0..=253).rev() { inverse = inverse.squared(); if i!= 2 && i!= 4 { inverse = &inverse * self; } } inverse } // sv pow2523(gf o,const gf i) // // the naming here means "to the power of 2^252 - 3 // // again by Fermat's little theorem, this is the same // // as taking the square root, which is needed for // // point decompression // { // gf c; // int a; // FOR(a,16) c[a]=i[a]; // for(a=250;a>=0;a--) { // S(c,c); // if(a!=1) M(c,c,i); // } // FOR(a,16) o[a]=c[a]; // } /// TODO: figure out why this doesn't pass the test at the end fn pow2523(&self) -> FieldElement { let mut sqrt = *self; for i in (0..=250).rev() { sqrt = sqrt.squared(); if i!= 1
} sqrt } } impl ConstantTimeEq for FieldElement { fn ct_eq(&self, other: &Self) -> Choice { let canonical_self = self.to_bytes(); let canonical_other = other.to_bytes(); canonical_self.ct_eq(&canonical_other) } } impl PartialEq for FieldElement { fn eq(&self, other: &Self) -> bool { bool::from(self.ct_eq(other)) } } impl<'a, 'b> Add<&'b FieldElement> for &'a FieldElement { type Output = FieldElement; // TODO: TweetNaCl doesn't do any reduction here, why not? /// Addition of field elements fn add(self, other: &'b FieldElement) -> FieldElement { let mut sum = *self; sum += other; sum } } impl<'b> AddAssign<&'b FieldElement> for FieldElement { fn add_assign(&mut self, other: &'b FieldElement) { for (x, y) in self.0.iter_mut().zip(other.0.iter()) { *x += y; } } } impl<'a> Neg for &'a FieldElement { type Output = FieldElement; /// Subition of field elements fn neg(self) -> FieldElement { let mut negation = *self; for (i, xi) in self.0.iter().enumerate() { negation.0[i] = -xi; } negation } } impl<'a, 'b> Sub<&'b FieldElement> for &'a FieldElement { type Output = FieldElement; // TODO: TweetNaCl doesn't do any reduction here, why not? /// Subition of field elements fn sub(self, other: &'b FieldElement) -> FieldElement { let mut difference = *self; difference -= other; difference } } impl<'b> SubAssign<&'b FieldElement> for FieldElement { fn sub_assign(&mut self, other: &'b FieldElement) { for (x, y) in self.0.iter_mut().zip(other.0.iter()) { *x -= y; } } } impl<'a, 'b> Mul<&'b FieldElement> for &'a FieldElement { type Output = FieldElement; fn mul(self, other: &'b FieldElement) -> FieldElement { // start with so-called "schoolbook multiplication" // TODO: nicer way to do this with iterators? let mut pre_product: [i64; 31] = Default::default(); for i in 0..16 { for j in 0..16 { pre_product[i + j] += self.0[i] * other.0[j]; } } // reduce modulo 2**256 - 38 // (en route to reduction modulo 2**255 - 19) for i in 0..15 { pre_product[i] += 38 * pre_product[i + 16]; } // ble, would prefer to call pre_product just product, // but the two-step initialize then copy doesn't seem // to work syntactically. // also: really hope the initialization of `product` // is optimized away... let mut product: Limbs = Default::default(); product.copy_from_slice(&pre_product[..16]); let mut fe = FieldElement(product); // normalize such that all limbs lie in [0, 2^16) // TODO: why twice? why is twice enough? fe.carry(); fe.carry(); fe } } impl<'b> MulAssign<&'b FieldElement> for FieldElement { fn mul_assign(&mut self, other: &'b FieldElement) { let result = (self as &FieldElement) * other; self.0 = result.0; } } impl FieldElement { fn carry(&mut self) { // TODO: multiplication calls this twice!! // TODO: to_bytes calls this thrice!!! // // What exactly are the guarantees here? // Why don't we do this twice or thrice if it's needed? for i in 0..16 { // add 2**16 self.0[i] += 1 << 16; // "carry" part, everything over radix 2**16 let carry = self.0[i] >> 16; // a) i < 15: add carry bit, subtract 1 to compensate addition of 2^16 // --> o[i + 1] += c - 1 // add carry bit, subtract // b) i == 15: wraps around to index 0 via 2^256 = 38 // --> o[0] += 38 * (c - 1) self.0[(i + 1) * ((i < 15) as usize)] += carry - 1 + 37 * (carry - 1) * ((i == 15) as i64); // get rid of carry bit // TODO: why not get rid of it immediately. kinda clearer self.0[i] -= carry << 16; } } } #[cfg(test)] mod tests { use super::FieldElement; use crate::field::FieldImplementation; use subtle::ConstantTimeEq; #[test] fn test_one_plus_one() { let one = FieldElement::ONE; let two = &one + &one; let expected = FieldElement([2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); // TODO: Implement PartialEq (hopefully in constant time!) assert_eq!(two.0, expected.0); assert!(bool::from(two.ct_eq(&expected))) } #[test] fn test_one_times_zero() { let one = FieldElement::ONE; let zero = FieldElement::ZERO; let result = &one * &zero; // TODO: Implement PartialEq (hopefully in constant time!) assert_eq!(result.0, zero.0); assert!(bool::from(result.ct_eq(&zero))) } #[test] fn test_two_times_three_is_six() { let one = FieldElement::ONE; let two = &one + &one; let three = &two + &one; let two_times_three = &two * &three; // no multiplications, just sum up ONEs let six = (1..=6).fold(FieldElement::ZERO, |partial_sum, _| { &partial_sum + &FieldElement::ONE }); assert_eq!(two_times_three.to_bytes(), six.to_bytes()); assert!(bool::from(two_times_three.ct_eq(&six))); } #[test] fn test_negation() { let d2 = FieldElement::D2; let minus_d2 = -&d2; let maybe_zero = &d2 + &minus_d2; assert_eq!(FieldElement::ZERO.to_bytes(), maybe_zero.to_bytes()); } #[test] fn test_inversion() { let d2 = FieldElement::D2; let maybe_inverse = d2.inverse(); let maybe_one = &d2 * &maybe_inverse; assert_eq!(maybe_one.to_bytes(), FieldElement::ONE.to_bytes()); assert!(bool::from(maybe_one.ct_eq(&FieldElement::ONE))); assert_eq!(maybe_one, FieldElement::ONE); } #[test] fn test_imaginary() { let minus_one = -&FieldElement::ONE; let i_squared = &FieldElement::I * &FieldElement::I; assert_eq!(minus_one, i_squared); } #[test] fn test_square_roots() { let two = &FieldElement::ONE + &FieldElement::ONE; // four has Legendre symbol of minus one let four = &two * &two; let sqrt_minus_four = &four.pow2523() * &four; assert_eq!(&sqrt_minus_four * &sqrt_minus_four, -&four); let sqrt_four = &FieldElement::I * &sqrt_minus_four; assert_eq!(&sqrt_four * &sqrt_four, four); let three = &two + &FieldElement::ONE; // nine has Legendre symbol of one let nine = &three * &three; let sqrt_nine = &nine.pow2523() * &nine; assert_eq!(&sqrt_nine * &sqrt_nine, nine); } }
{ sqrt = &sqrt * self; }
conditional_block
tweetnacl.rs
use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign}; use subtle::{Choice, ConditionallySelectable, ConstantTimeEq}; use zeroize::Zeroize; use super::FieldImplementation; pub type Limbs = [i64; 16]; /// Element of the base field of the elliptic curve #[derive(Clone, Copy, Debug, Default, Zeroize)] pub struct FieldElement(pub Limbs); impl ConditionallySelectable for FieldElement { fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self { let mut selection = Self::default(); for i in 0..16 { selection.0[i] = i64::conditional_select(&a.0[i], &b.0[i], choice); } selection } fn conditional_swap(a: &mut Self, b: &mut Self, choice: Choice) { // what TweetNacl originally does // let mask: i64 =!(b - 1); // TweetNacl translated to Choice language // let mask: i64 =!(choice.unwrap_u8() as i64) - 1); // `subtle` definition, which is equivalent // let mask: i64 = -(choice.unwrap_u8() as i64); for (ai, bi) in a.0.iter_mut().zip(b.0.iter_mut()) { // let t = mask & (*ai ^ *bi); // *ai ^= t; // *bi ^= t; i64::conditional_swap(ai, bi, choice); } } } impl FieldImplementation for FieldElement { type Limbs = Limbs; const ZERO: Self = Self([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); const ONE: Self = Self([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); const D: Self = Self([ 0x78a3, 0x1359, 0x4dca, 0x75eb, 0xd8ab, 0x4141, 0x0a4d, 0x0070, 0xe898, 0x7779, 0x4079, 0x8cc7, 0xfe73, 0x2b6f, 0x6cee, 0x5203, ]); const D2: Self = Self([ 0xf159, 0x26b2, 0x9b94, 0xebd6, 0xb156, 0x8283, 0x149a, 0x00e0, 0xd130, 0xeef3, 0x80f2, 0x198e, 0xfce7, 0x56df, 0xd9dc, 0x2406, ]); const EDWARDS_BASEPOINT_X: Self = Self([ 0xd51a, 0x8f25, 0x2d60, 0xc956, 0xa7b2, 0x9525, 0xc760, 0x692c, 0xdc5c, 0xfdd6, 0xe231, 0xc0a4, 0x53fe, 0xcd6e, 0x36d3, 0x2169, ]); const EDWARDS_BASEPOINT_Y: Self = Self([ 0x6658, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, 0x6666, ]); const I: Self = Self([ 0xa0b0, 0x4a0e, 0x1b27, 0xc4ee, 0xe478, 0xad2f, 0x1806, 0x2f43, 0xd7a7, 0x3dfb, 0x0099, 0x2b4d, 0xdf0b, 0x4fc1, 0x2480, 0x2b83, ]); const APLUS2_OVER_FOUR: Self = Self([121666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); const MONTGOMERY_BASEPOINT_U: Self = Self([9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); fn to_bytes(&self) -> [u8; 32] { // make our own private copy let mut fe = *self; // three times' the charm?? // TODO: figure out why :) fe.carry(); fe.carry(); fe.carry(); // let m_buf: FieldElementBuffer = Default::default(); // let mut m: FieldElement = FieldElement(m_buf); let mut m: Limbs = Default::default(); for _j in 0..2 { m[0] = fe.0[0] - 0xffed; for i in 1..15 { m[i] = fe.0[i] - 0xffff - ((m[i - 1] >> 16) & 1); m[i - 1] &= 0xffff; } m[15] = fe.0[15] - 0x7fff - ((m[14] >> 16) & 1); let b = (m[15] >> 16) & 1; m[14] &= 0xffff; FieldElement::conditional_swap(&mut fe, &mut FieldElement(m), ((1 - b) as u8).into()); } let mut bytes: [u8; 32] = Default::default(); for i in 0..16 { bytes[2 * i] = fe.0[i] as u8; //& 0xff; bytes[2 * i + 1] = (fe.0[i] >> 8) as u8; } bytes } fn from_bytes_unchecked(bytes: &[u8; 32]) -> FieldElement { let mut limbs = Limbs::default(); for i in 0..16 { limbs[i] = (bytes[2 * i] as i64) + ((bytes[2 * i + 1] as i64) << 8); } // some kind of safety check // but: also clears the x-coordinate sign bit limbs[15] &= 0x7fff; FieldElement(limbs) } // sv inv25519(gf o,const gf i) // { // // want: o = 1/i in base field // gf c; // int a; // FOR(a,16) c[a]=i[a]; // // exponentiate with 2^255 - 21 // // same as inversion by Fermat's little theorem // for(a=253;a>=0;a--) { // S(c,c); // if(a!=2&&a!=4) M(c,c,i); // } // FOR(a,16) o[a]=c[a]; // } fn inverse(&self) -> FieldElement { // TODO: possibly assert! that fe!= 0? // make our own private copy let mut inverse = *self; // exponentiate with 2**255 - 21, // which by Fermat's little theorem is the same as inversion for i in (0..=253).rev() { inverse = inverse.squared(); if i!= 2 && i!= 4 { inverse = &inverse * self; } } inverse } // sv pow2523(gf o,const gf i) // // the naming here means "to the power of 2^252 - 3 // // again by Fermat's little theorem, this is the same // // as taking the square root, which is needed for // // point decompression // { // gf c; // int a; // FOR(a,16) c[a]=i[a]; // for(a=250;a>=0;a--) { // S(c,c); // if(a!=1) M(c,c,i); // } // FOR(a,16) o[a]=c[a]; // } /// TODO: figure out why this doesn't pass the test at the end fn pow2523(&self) -> FieldElement { let mut sqrt = *self; for i in (0..=250).rev() { sqrt = sqrt.squared(); if i!= 1 { sqrt = &sqrt * self; } } sqrt } } impl ConstantTimeEq for FieldElement { fn ct_eq(&self, other: &Self) -> Choice { let canonical_self = self.to_bytes(); let canonical_other = other.to_bytes(); canonical_self.ct_eq(&canonical_other) } } impl PartialEq for FieldElement { fn eq(&self, other: &Self) -> bool { bool::from(self.ct_eq(other)) } } impl<'a, 'b> Add<&'b FieldElement> for &'a FieldElement { type Output = FieldElement; // TODO: TweetNaCl doesn't do any reduction here, why not? /// Addition of field elements fn add(self, other: &'b FieldElement) -> FieldElement { let mut sum = *self; sum += other; sum } } impl<'b> AddAssign<&'b FieldElement> for FieldElement { fn add_assign(&mut self, other: &'b FieldElement) { for (x, y) in self.0.iter_mut().zip(other.0.iter()) { *x += y; } } } impl<'a> Neg for &'a FieldElement { type Output = FieldElement; /// Subition of field elements fn neg(self) -> FieldElement { let mut negation = *self; for (i, xi) in self.0.iter().enumerate() { negation.0[i] = -xi; } negation } } impl<'a, 'b> Sub<&'b FieldElement> for &'a FieldElement { type Output = FieldElement; // TODO: TweetNaCl doesn't do any reduction here, why not? /// Subition of field elements fn sub(self, other: &'b FieldElement) -> FieldElement { let mut difference = *self; difference -= other; difference } } impl<'b> SubAssign<&'b FieldElement> for FieldElement { fn sub_assign(&mut self, other: &'b FieldElement)
} impl<'a, 'b> Mul<&'b FieldElement> for &'a FieldElement { type Output = FieldElement; fn mul(self, other: &'b FieldElement) -> FieldElement { // start with so-called "schoolbook multiplication" // TODO: nicer way to do this with iterators? let mut pre_product: [i64; 31] = Default::default(); for i in 0..16 { for j in 0..16 { pre_product[i + j] += self.0[i] * other.0[j]; } } // reduce modulo 2**256 - 38 // (en route to reduction modulo 2**255 - 19) for i in 0..15 { pre_product[i] += 38 * pre_product[i + 16]; } // ble, would prefer to call pre_product just product, // but the two-step initialize then copy doesn't seem // to work syntactically. // also: really hope the initialization of `product` // is optimized away... let mut product: Limbs = Default::default(); product.copy_from_slice(&pre_product[..16]); let mut fe = FieldElement(product); // normalize such that all limbs lie in [0, 2^16) // TODO: why twice? why is twice enough? fe.carry(); fe.carry(); fe } } impl<'b> MulAssign<&'b FieldElement> for FieldElement { fn mul_assign(&mut self, other: &'b FieldElement) { let result = (self as &FieldElement) * other; self.0 = result.0; } } impl FieldElement { fn carry(&mut self) { // TODO: multiplication calls this twice!! // TODO: to_bytes calls this thrice!!! // // What exactly are the guarantees here? // Why don't we do this twice or thrice if it's needed? for i in 0..16 { // add 2**16 self.0[i] += 1 << 16; // "carry" part, everything over radix 2**16 let carry = self.0[i] >> 16; // a) i < 15: add carry bit, subtract 1 to compensate addition of 2^16 // --> o[i + 1] += c - 1 // add carry bit, subtract // b) i == 15: wraps around to index 0 via 2^256 = 38 // --> o[0] += 38 * (c - 1) self.0[(i + 1) * ((i < 15) as usize)] += carry - 1 + 37 * (carry - 1) * ((i == 15) as i64); // get rid of carry bit // TODO: why not get rid of it immediately. kinda clearer self.0[i] -= carry << 16; } } } #[cfg(test)] mod tests { use super::FieldElement; use crate::field::FieldImplementation; use subtle::ConstantTimeEq; #[test] fn test_one_plus_one() { let one = FieldElement::ONE; let two = &one + &one; let expected = FieldElement([2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); // TODO: Implement PartialEq (hopefully in constant time!) assert_eq!(two.0, expected.0); assert!(bool::from(two.ct_eq(&expected))) } #[test] fn test_one_times_zero() { let one = FieldElement::ONE; let zero = FieldElement::ZERO; let result = &one * &zero; // TODO: Implement PartialEq (hopefully in constant time!) assert_eq!(result.0, zero.0); assert!(bool::from(result.ct_eq(&zero))) } #[test] fn test_two_times_three_is_six() { let one = FieldElement::ONE; let two = &one + &one; let three = &two + &one; let two_times_three = &two * &three; // no multiplications, just sum up ONEs let six = (1..=6).fold(FieldElement::ZERO, |partial_sum, _| { &partial_sum + &FieldElement::ONE }); assert_eq!(two_times_three.to_bytes(), six.to_bytes()); assert!(bool::from(two_times_three.ct_eq(&six))); } #[test] fn test_negation() { let d2 = FieldElement::D2; let minus_d2 = -&d2; let maybe_zero = &d2 + &minus_d2; assert_eq!(FieldElement::ZERO.to_bytes(), maybe_zero.to_bytes()); } #[test] fn test_inversion() { let d2 = FieldElement::D2; let maybe_inverse = d2.inverse(); let maybe_one = &d2 * &maybe_inverse; assert_eq!(maybe_one.to_bytes(), FieldElement::ONE.to_bytes()); assert!(bool::from(maybe_one.ct_eq(&FieldElement::ONE))); assert_eq!(maybe_one, FieldElement::ONE); } #[test] fn test_imaginary() { let minus_one = -&FieldElement::ONE; let i_squared = &FieldElement::I * &FieldElement::I; assert_eq!(minus_one, i_squared); } #[test] fn test_square_roots() { let two = &FieldElement::ONE + &FieldElement::ONE; // four has Legendre symbol of minus one let four = &two * &two; let sqrt_minus_four = &four.pow2523() * &four; assert_eq!(&sqrt_minus_four * &sqrt_minus_four, -&four); let sqrt_four = &FieldElement::I * &sqrt_minus_four; assert_eq!(&sqrt_four * &sqrt_four, four); let three = &two + &FieldElement::ONE; // nine has Legendre symbol of one let nine = &three * &three; let sqrt_nine = &nine.pow2523() * &nine; assert_eq!(&sqrt_nine * &sqrt_nine, nine); } }
{ for (x, y) in self.0.iter_mut().zip(other.0.iter()) { *x -= y; } }
identifier_body
control.rs
use crossbeam_utils::thread::scope; use cursive::{ backend::Backend as CursiveBackend, backends::crossterm, event::Key, traits::Nameable, view::ViewWrapper, views::{LayerPosition, NamedView}, View, }; use cursive::{traits::Resizable, views::ResizedView, Cursive}; use cursive_buffered_backend::BufferedBackend; use dirs::config_dir; use serde::{Deserialize, Serialize}; use crate::{ align::{AlignAlgorithm, AlignMode}, backend::{send_cross_actions, Action, Cross, Dummy}, cursor::CursorState, dialog, doublehex::DoubleHexContext, file::FileState, style::Style, view::{self, Aligned, AlignedMessage}, }; use std::{ error::Error, fs::read_to_string, ops::Range, path::PathBuf, sync::mpsc::{channel, Receiver, Sender}, }; type CursiveCallback = Box<dyn Fn(&mut Cursive) +'static + Send>; /// This is the main loop, here we switch between our custom backend and the cursive backend /// when opening dialog boxes. This is done because initially, the cursive backend was too flickery. /// However, this was fixed by using cursive_buffered_backend, so now this is only a minor optimization. pub fn run(x: FileState, y: FileState) { let mut settings = Settings::from_config().unwrap_or_default(); let digits = x.address_digits().max(y.address_digits()); settings.style.addr_width = digits; let mut hv = HexView::new(x, y); loop { *match hv { HexView::Aligned(ref mut v, _, _) => &mut v.dh.style, HexView::Unaligned(ref mut v) => &mut v.dh.style, } = settings.style; let mut cross = Cross::init(); let (hv_new, quit) = hv.process_cross(&mut cross, &settings); hv = hv_new; cross.uninit(); // the column setting can be changed during the non-dialog, // so we need to keep it updated here settings.style = match &hv { HexView::Aligned(v, _, _) => v.dh.style, HexView::Unaligned(v) => v.dh.style, }; let (hv_new, settings_new) = match quit { DelegateEvent::Quit => break, DelegateEvent::OpenDialog(dia) => hv.show_dialog(dia, settings), _ => (hv, settings), }; hv = hv_new; settings = settings_new; } } #[derive(Clone, Debug, Default, Serialize, Deserialize)] pub struct Settings { pub algo: AlignAlgorithm, pub style: Style, } impl Settings { fn config_path() -> Result<PathBuf, std::io::Error> { match std::env::var_os("BIODIFF_CONFIG_DIR") { Some(p) => Ok(PathBuf::from(p)), None => match config_dir() { Some(mut p) => { p.push("biodiff"); Ok(p) } None => Err(std::io::Error::new( std::io::ErrorKind::NotFound, "Could not find configuration directory", )), }, } } fn settings_file() -> Result<PathBuf, std::io::Error> { let mut path = Self::config_path()?; path.push("config.json"); Ok(path) } pub fn from_config() -> Option<Self> { let config = read_to_string(Self::settings_file().ok()?).ok()?; serde_json::from_str(&config).ok() } pub fn save_config(&self) -> Result<(), Box<dyn Error +'static>> { let config = serde_json::to_string(self)?; let r = std::fs::create_dir_all(Self::config_path()?); if let Err(ref e) = r { match e.kind() { std::io::ErrorKind::AlreadyExists => (), _ => r?, } } std::fs::write(Self::settings_file()?, config)?; Ok(()) } } /// An enum containing either an aligned or unaligned hexview, without /// a backend for painting. /// The aligned view also contains a channel for messages, as the alignment /// algorithms need to dynamically append/prepend new blocks to the view /// and the crossbeam backend also sends user events over that. pub enum HexView { Aligned( view::Aligned, Sender<AlignedMessage>, Receiver<AlignedMessage>, ), Unaligned(view::Unaligned), } impl HexView { /// Creates a new unaligned view from two files with given indexes and cursor /// size 16x16. pub fn new(left: FileState, right: FileState) -> Self { HexView::Unaligned(view::Unaligned::new( left, right, DoubleHexContext::new((16, 16)), )) } /// Turns a hexview into an aligned view using the given algorithm parameters fn into_aligned(self, algo: &AlignAlgorithm, select: [Option<Range<usize>>; 2]) -> HexView { let (send, recv) = channel(); match match self { // first destruct our old hexview into its parts HexView::Aligned(a, send, recv) => { a.destruct().map_err(|a| HexView::Aligned(a, send, recv)) } HexView::Unaligned(u) => u.destruct().map_err(HexView::Unaligned), } { // if the cursor was not placed on any index, we currently do nothing // maybe one could think up some better values to align at here or something Err(hv) => hv, Ok((left, right, mut dh)) => { if matches!(algo.mode, AlignMode::Local | AlignMode::Global) { dh.cursor = CursorState::new((dh.cursor.get_size_x(), dh.cursor.get_size_y())) }; HexView::Aligned( view::Aligned::new(left, right, dh, algo, select, send.clone()), send, recv, ) } } } /// Turns a hexview into an unaligned view at the current cursor fn into_unaligned(self) -> HexView { match self { HexView::Aligned(a, send, recv) => match a.destruct() { Ok((left, right, cursor)) => { HexView::Unaligned(view::Unaligned::new(left, right, cursor)) } Err(a) => HexView::Aligned(a, send, recv), }, // we don't need to change anything for unaligned views HexView::Unaligned(_) => self, } } /// Call the relevant event processing functions for the crossterm backend fn event_proc(&mut self, cross: &mut Cross) -> DelegateEvent { match self { HexView::Aligned(ref mut a, ref mut send, ref mut recv) => { aligned_cross(a, cross, send, recv) } HexView::Unaligned(ref mut u) => unaligned_cross(u, cross), } } fn selection(&self) -> [Option<Range<usize>>; 2] { match self { HexView::Aligned(a, _, _) => a.selection_file_ranges(), HexView::Unaligned(u) => u.selection_file_ranges(), } } /// control loop for crossbeam backend, switches the view between aligned and unaligned when /// requested and runs event loops fn process_cross(self, cross: &mut Cross, settings: &Settings) -> (Self, DelegateEvent) { let mut view = self; let mut quit; let quit_reason = loop { let q = view.event_proc(cross); view = match q { // delegate to top-level control loop DelegateEvent::Quit | DelegateEvent::OpenDialog(_) => { quit = match &mut view { HexView::Aligned(v, _, _) =>!v.process_escape(cross), HexView::Unaligned(v) =>!v.process_escape(cross), } .then_some(q); view } DelegateEvent::SwitchToAlign => { quit = None; let select = view.selection(); view.into_aligned(&settings.algo, select) } DelegateEvent::SwitchToUnalign => { quit = None; view.into_unaligned() } }; if let Some(q) = quit { break q; } }; (view, quit_reason) } /// Setup a cursive instance and shows a dialog constructed through the callback given in `dialog`. /// /// Note that the settings are placed into the user_data of the cursive instace and can be modified /// by the callback. fn show_dialog(self, dialog: CursiveCallback, settings: Settings) -> (Self, Settings) { let mut siv = cursive::default(); // this theme is the default theme except that the background color is black siv.set_theme(cursiv_theme()); siv.add_global_callback(Key::Esc, dialog::close_top_maybe_quit); siv.set_user_data(settings); match self { HexView::Aligned(a, send, mut recv) => { siv.add_fullscreen_layer(a.with_name("aligned").full_screen()); let mut sink = siv.cb_sink().clone(); // we create a new thread that converts the `AlignedMessage`s coming from // the alignment threads to callbacks on the cursive instance, so this case // is a bit more complicated than the unaligned one. scope(|s| { let join_handle = s.spawn(|_| cursiv_align_relay(&mut recv, &mut sink)); dialog(&mut siv); siv.try_run_with(|| { // use the buffered backend as it involves way less flickering crossterm::Backend::init() .map(|x| Box::new(BufferedBackend::new(x)) as Box<dyn CursiveBackend>) }) .expect("Could not run"); // misuse the Action::Quit as a signal for the thread to exit send.send(AlignedMessage::UserEvent(Action::Quit)) .expect("Could not tell align relay thread to quit"); join_handle .join() .expect("Could not join align relay thread"); }) .expect("Could not join align relay thread"); // extract the view from the cursive instance match peel_onion(&mut siv) { Some(x) => ( HexView::Aligned(x, send, recv), siv.take_user_data().unwrap(), ), None => panic!("Internal error, could not downcast view"), } } HexView::Unaligned(u) => { siv.add_fullscreen_layer(u.with_name("unaligned").full_screen()); dialog(&mut siv); siv.try_run_with(|| { crossterm::Backend::init() .map(|x| Box::new(BufferedBackend::new(x)) as Box<dyn CursiveBackend>) }) .expect("Could not run"); // extract the view from the cursive instance match peel_onion(&mut siv) { Some(v) => (HexView::Unaligned(v), siv.take_user_data().unwrap()), None => panic!("Internal error, could not downcast view"), } } } } } // this one causes tears to come from my eyes fn peel_onion<V: View>(siv: &mut Cursive) -> Option<V> { siv.screen_mut() .remove_layer(LayerPosition::FromBack(0)) .downcast::<ResizedView<NamedView<V>>>() .ok() .and_then(|view| view.into_inner().ok()) .and_then(|view| view.into_inner().ok()) } /// Default Cursive theme except that the background color is black fn cursiv_theme() -> cursive::theme::Theme { use cursive::theme::{BaseColor::*, Color::*, PaletteColor::*}; let mut cursiv_theme = cursive::theme::load_default(); cursiv_theme.palette[Background] = Dark(Black); cursiv_theme } /// Forwards `AlignedMessage`s from the alignment thread into callbacks for the cursive instance fn cursiv_align_relay(recv: &mut Receiver<AlignedMessage>, sink: &mut cursive::CbSink) { for ev in recv.iter() { match ev { AlignedMessage::UserEvent(Action::Quit) => break, otherwise => { sink.send(Box::new(|siv: &mut Cursive| { siv.call_on_name("aligned", |view: &mut Aligned| { view.process_action(&mut Dummy, otherwise); }) .expect("Could not send new data to view"); })) .expect("Could not send event to view"); } } } } /// This enum is used for delegating actions to higher level event loops. enum DelegateEvent { Quit, SwitchToAlign, SwitchToUnalign, OpenDialog(CursiveCallback), } /// Converts an event to a delegation fn delegate_action(action: Action) -> Option<DelegateEvent> { match action { Action::Quit => Some(DelegateEvent::Quit), Action::Align => Some(DelegateEvent::SwitchToAlign), Action::Unalign => Some(DelegateEvent::SwitchToUnalign), Action::Algorithm => Some(DelegateEvent::OpenDialog(Box::new(dialog::settings))), Action::Goto => Some(DelegateEvent::OpenDialog(Box::new(dialog::goto))), Action::Search => Some(DelegateEvent::OpenDialog(Box::new(dialog::search))), Action::SetOffset => Some(DelegateEvent::OpenDialog(Box::new(dialog::set_offset))), Action::Help => Some(DelegateEvent::OpenDialog(Box::new(dialog::help_window( dialog::MAIN_HELP, )))), _otherwise => None, } } /// This function is the one that processes actions sent by the event reader loop /// setup in `unaligned_cross`. Note that the event reader loop has to stay in the same /// thread, so this process is chosen to not be in the main thread instead. fn unaligned_cross_recv( unaligned: &mut view::Unaligned, cross: &mut Cross, recv: Receiver<Action>, ) -> DelegateEvent { unaligned.refresh(cross); for action in recv.iter() { if let Some(q) = delegate_action(action) { return q; } unaligned.process_action(cross, action); } DelegateEvent::Quit } /// This setups the event processing thread for the crossterm backend and reads crossterm's events fn unaligned_cross(unaligned: &mut view::Unaligned, cross: &mut Cross) -> DelegateEvent { unaligned.refresh(cross); let (mut send, recv) = channel(); let mut quit = DelegateEvent::Quit; scope(|s| { // both this thread and the send_cross_actions function determine when to quit by // checking the output of delegate_action, so make sure this is the same let receiver_thread = s.spawn(|_| unaligned_cross_recv(unaligned, cross, recv)); send_cross_actions(|action| delegate_action(action).is_some(), &mut send); quit = receiver_thread.join().unwrap(); }) .unwrap(); quit } /// This function is the one that processes actions sent by the event reader loop /// setup in `aligned_cross`, and also the ones sent by the alignment process. /// Note that the event reader loop has to stay in the same thread, so this /// process is chosen to not be in the main thread instead. fn aligned_cross_recv( aligned: &mut view::Aligned, cross: &mut Cross, recv: &mut Receiver<AlignedMessage>, ) -> DelegateEvent { for msg in recv.iter() { let msg = match msg { AlignedMessage::UserEvent(action) => { if let Some(q) = delegate_action(action) { return q; } msg } _ => msg, }; aligned.process_action(cross, msg); } DelegateEvent::Quit } /// Using the existing message channel (send, recv), setup a thread that /// processes the messages and also read the crossterm events in the main thread. /// The channel should be the same one used when setting up the Aligned view. fn aligned_cross( aligned: &mut view::Aligned, cross: &mut Cross, send: &mut Sender<AlignedMessage>, recv: &mut Receiver<AlignedMessage>, ) -> DelegateEvent { aligned.refresh(cross); let mut quit = DelegateEvent::Quit; scope(|s| { // both the thread and the send_cross_actions function determine when to quit by // checking the output of delegate_action, so make sure this is the same. let receiver_thread = s.spawn(|_| aligned_cross_recv(aligned, cross, recv)); send_cross_actions(|action| delegate_action(action).is_some(), send); quit = receiver_thread.join().unwrap(); }) .unwrap(); quit
}
random_line_split
control.rs
use crossbeam_utils::thread::scope; use cursive::{ backend::Backend as CursiveBackend, backends::crossterm, event::Key, traits::Nameable, view::ViewWrapper, views::{LayerPosition, NamedView}, View, }; use cursive::{traits::Resizable, views::ResizedView, Cursive}; use cursive_buffered_backend::BufferedBackend; use dirs::config_dir; use serde::{Deserialize, Serialize}; use crate::{ align::{AlignAlgorithm, AlignMode}, backend::{send_cross_actions, Action, Cross, Dummy}, cursor::CursorState, dialog, doublehex::DoubleHexContext, file::FileState, style::Style, view::{self, Aligned, AlignedMessage}, }; use std::{ error::Error, fs::read_to_string, ops::Range, path::PathBuf, sync::mpsc::{channel, Receiver, Sender}, }; type CursiveCallback = Box<dyn Fn(&mut Cursive) +'static + Send>; /// This is the main loop, here we switch between our custom backend and the cursive backend /// when opening dialog boxes. This is done because initially, the cursive backend was too flickery. /// However, this was fixed by using cursive_buffered_backend, so now this is only a minor optimization. pub fn run(x: FileState, y: FileState) { let mut settings = Settings::from_config().unwrap_or_default(); let digits = x.address_digits().max(y.address_digits()); settings.style.addr_width = digits; let mut hv = HexView::new(x, y); loop { *match hv { HexView::Aligned(ref mut v, _, _) => &mut v.dh.style, HexView::Unaligned(ref mut v) => &mut v.dh.style, } = settings.style; let mut cross = Cross::init(); let (hv_new, quit) = hv.process_cross(&mut cross, &settings); hv = hv_new; cross.uninit(); // the column setting can be changed during the non-dialog, // so we need to keep it updated here settings.style = match &hv { HexView::Aligned(v, _, _) => v.dh.style, HexView::Unaligned(v) => v.dh.style, }; let (hv_new, settings_new) = match quit { DelegateEvent::Quit => break, DelegateEvent::OpenDialog(dia) => hv.show_dialog(dia, settings), _ => (hv, settings), }; hv = hv_new; settings = settings_new; } } #[derive(Clone, Debug, Default, Serialize, Deserialize)] pub struct Settings { pub algo: AlignAlgorithm, pub style: Style, } impl Settings { fn config_path() -> Result<PathBuf, std::io::Error> { match std::env::var_os("BIODIFF_CONFIG_DIR") { Some(p) => Ok(PathBuf::from(p)), None => match config_dir() { Some(mut p) => { p.push("biodiff"); Ok(p) } None => Err(std::io::Error::new( std::io::ErrorKind::NotFound, "Could not find configuration directory", )), }, } } fn settings_file() -> Result<PathBuf, std::io::Error> { let mut path = Self::config_path()?; path.push("config.json"); Ok(path) } pub fn from_config() -> Option<Self> { let config = read_to_string(Self::settings_file().ok()?).ok()?; serde_json::from_str(&config).ok() } pub fn save_config(&self) -> Result<(), Box<dyn Error +'static>> { let config = serde_json::to_string(self)?; let r = std::fs::create_dir_all(Self::config_path()?); if let Err(ref e) = r { match e.kind() { std::io::ErrorKind::AlreadyExists => (), _ => r?, } } std::fs::write(Self::settings_file()?, config)?; Ok(()) } } /// An enum containing either an aligned or unaligned hexview, without /// a backend for painting. /// The aligned view also contains a channel for messages, as the alignment /// algorithms need to dynamically append/prepend new blocks to the view /// and the crossbeam backend also sends user events over that. pub enum HexView { Aligned( view::Aligned, Sender<AlignedMessage>, Receiver<AlignedMessage>, ), Unaligned(view::Unaligned), } impl HexView { /// Creates a new unaligned view from two files with given indexes and cursor /// size 16x16. pub fn new(left: FileState, right: FileState) -> Self { HexView::Unaligned(view::Unaligned::new( left, right, DoubleHexContext::new((16, 16)), )) } /// Turns a hexview into an aligned view using the given algorithm parameters fn into_aligned(self, algo: &AlignAlgorithm, select: [Option<Range<usize>>; 2]) -> HexView { let (send, recv) = channel(); match match self { // first destruct our old hexview into its parts HexView::Aligned(a, send, recv) => { a.destruct().map_err(|a| HexView::Aligned(a, send, recv)) } HexView::Unaligned(u) => u.destruct().map_err(HexView::Unaligned), } { // if the cursor was not placed on any index, we currently do nothing // maybe one could think up some better values to align at here or something Err(hv) => hv, Ok((left, right, mut dh)) => { if matches!(algo.mode, AlignMode::Local | AlignMode::Global) { dh.cursor = CursorState::new((dh.cursor.get_size_x(), dh.cursor.get_size_y())) }; HexView::Aligned( view::Aligned::new(left, right, dh, algo, select, send.clone()), send, recv, ) } } } /// Turns a hexview into an unaligned view at the current cursor fn into_unaligned(self) -> HexView { match self { HexView::Aligned(a, send, recv) => match a.destruct() { Ok((left, right, cursor)) => { HexView::Unaligned(view::Unaligned::new(left, right, cursor)) } Err(a) => HexView::Aligned(a, send, recv), }, // we don't need to change anything for unaligned views HexView::Unaligned(_) => self, } } /// Call the relevant event processing functions for the crossterm backend fn event_proc(&mut self, cross: &mut Cross) -> DelegateEvent { match self { HexView::Aligned(ref mut a, ref mut send, ref mut recv) => { aligned_cross(a, cross, send, recv) } HexView::Unaligned(ref mut u) => unaligned_cross(u, cross), } } fn selection(&self) -> [Option<Range<usize>>; 2] { match self { HexView::Aligned(a, _, _) => a.selection_file_ranges(), HexView::Unaligned(u) => u.selection_file_ranges(), } } /// control loop for crossbeam backend, switches the view between aligned and unaligned when /// requested and runs event loops fn process_cross(self, cross: &mut Cross, settings: &Settings) -> (Self, DelegateEvent) { let mut view = self; let mut quit; let quit_reason = loop { let q = view.event_proc(cross); view = match q { // delegate to top-level control loop DelegateEvent::Quit | DelegateEvent::OpenDialog(_) => { quit = match &mut view { HexView::Aligned(v, _, _) =>!v.process_escape(cross), HexView::Unaligned(v) =>!v.process_escape(cross), } .then_some(q); view } DelegateEvent::SwitchToAlign => { quit = None; let select = view.selection(); view.into_aligned(&settings.algo, select) } DelegateEvent::SwitchToUnalign => { quit = None; view.into_unaligned() } }; if let Some(q) = quit { break q; } }; (view, quit_reason) } /// Setup a cursive instance and shows a dialog constructed through the callback given in `dialog`. /// /// Note that the settings are placed into the user_data of the cursive instace and can be modified /// by the callback. fn show_dialog(self, dialog: CursiveCallback, settings: Settings) -> (Self, Settings) { let mut siv = cursive::default(); // this theme is the default theme except that the background color is black siv.set_theme(cursiv_theme()); siv.add_global_callback(Key::Esc, dialog::close_top_maybe_quit); siv.set_user_data(settings); match self { HexView::Aligned(a, send, mut recv) => { siv.add_fullscreen_layer(a.with_name("aligned").full_screen()); let mut sink = siv.cb_sink().clone(); // we create a new thread that converts the `AlignedMessage`s coming from // the alignment threads to callbacks on the cursive instance, so this case // is a bit more complicated than the unaligned one. scope(|s| { let join_handle = s.spawn(|_| cursiv_align_relay(&mut recv, &mut sink)); dialog(&mut siv); siv.try_run_with(|| { // use the buffered backend as it involves way less flickering crossterm::Backend::init() .map(|x| Box::new(BufferedBackend::new(x)) as Box<dyn CursiveBackend>) }) .expect("Could not run"); // misuse the Action::Quit as a signal for the thread to exit send.send(AlignedMessage::UserEvent(Action::Quit)) .expect("Could not tell align relay thread to quit"); join_handle .join() .expect("Could not join align relay thread"); }) .expect("Could not join align relay thread"); // extract the view from the cursive instance match peel_onion(&mut siv) { Some(x) => ( HexView::Aligned(x, send, recv), siv.take_user_data().unwrap(), ), None => panic!("Internal error, could not downcast view"), } } HexView::Unaligned(u) => { siv.add_fullscreen_layer(u.with_name("unaligned").full_screen()); dialog(&mut siv); siv.try_run_with(|| { crossterm::Backend::init() .map(|x| Box::new(BufferedBackend::new(x)) as Box<dyn CursiveBackend>) }) .expect("Could not run"); // extract the view from the cursive instance match peel_onion(&mut siv) { Some(v) => (HexView::Unaligned(v), siv.take_user_data().unwrap()), None => panic!("Internal error, could not downcast view"), } } } } } // this one causes tears to come from my eyes fn peel_onion<V: View>(siv: &mut Cursive) -> Option<V> { siv.screen_mut() .remove_layer(LayerPosition::FromBack(0)) .downcast::<ResizedView<NamedView<V>>>() .ok() .and_then(|view| view.into_inner().ok()) .and_then(|view| view.into_inner().ok()) } /// Default Cursive theme except that the background color is black fn cursiv_theme() -> cursive::theme::Theme { use cursive::theme::{BaseColor::*, Color::*, PaletteColor::*}; let mut cursiv_theme = cursive::theme::load_default(); cursiv_theme.palette[Background] = Dark(Black); cursiv_theme } /// Forwards `AlignedMessage`s from the alignment thread into callbacks for the cursive instance fn cursiv_align_relay(recv: &mut Receiver<AlignedMessage>, sink: &mut cursive::CbSink) { for ev in recv.iter() { match ev { AlignedMessage::UserEvent(Action::Quit) => break, otherwise =>
} } } /// This enum is used for delegating actions to higher level event loops. enum DelegateEvent { Quit, SwitchToAlign, SwitchToUnalign, OpenDialog(CursiveCallback), } /// Converts an event to a delegation fn delegate_action(action: Action) -> Option<DelegateEvent> { match action { Action::Quit => Some(DelegateEvent::Quit), Action::Align => Some(DelegateEvent::SwitchToAlign), Action::Unalign => Some(DelegateEvent::SwitchToUnalign), Action::Algorithm => Some(DelegateEvent::OpenDialog(Box::new(dialog::settings))), Action::Goto => Some(DelegateEvent::OpenDialog(Box::new(dialog::goto))), Action::Search => Some(DelegateEvent::OpenDialog(Box::new(dialog::search))), Action::SetOffset => Some(DelegateEvent::OpenDialog(Box::new(dialog::set_offset))), Action::Help => Some(DelegateEvent::OpenDialog(Box::new(dialog::help_window( dialog::MAIN_HELP, )))), _otherwise => None, } } /// This function is the one that processes actions sent by the event reader loop /// setup in `unaligned_cross`. Note that the event reader loop has to stay in the same /// thread, so this process is chosen to not be in the main thread instead. fn unaligned_cross_recv( unaligned: &mut view::Unaligned, cross: &mut Cross, recv: Receiver<Action>, ) -> DelegateEvent { unaligned.refresh(cross); for action in recv.iter() { if let Some(q) = delegate_action(action) { return q; } unaligned.process_action(cross, action); } DelegateEvent::Quit } /// This setups the event processing thread for the crossterm backend and reads crossterm's events fn unaligned_cross(unaligned: &mut view::Unaligned, cross: &mut Cross) -> DelegateEvent { unaligned.refresh(cross); let (mut send, recv) = channel(); let mut quit = DelegateEvent::Quit; scope(|s| { // both this thread and the send_cross_actions function determine when to quit by // checking the output of delegate_action, so make sure this is the same let receiver_thread = s.spawn(|_| unaligned_cross_recv(unaligned, cross, recv)); send_cross_actions(|action| delegate_action(action).is_some(), &mut send); quit = receiver_thread.join().unwrap(); }) .unwrap(); quit } /// This function is the one that processes actions sent by the event reader loop /// setup in `aligned_cross`, and also the ones sent by the alignment process. /// Note that the event reader loop has to stay in the same thread, so this /// process is chosen to not be in the main thread instead. fn aligned_cross_recv( aligned: &mut view::Aligned, cross: &mut Cross, recv: &mut Receiver<AlignedMessage>, ) -> DelegateEvent { for msg in recv.iter() { let msg = match msg { AlignedMessage::UserEvent(action) => { if let Some(q) = delegate_action(action) { return q; } msg } _ => msg, }; aligned.process_action(cross, msg); } DelegateEvent::Quit } /// Using the existing message channel (send, recv), setup a thread that /// processes the messages and also read the crossterm events in the main thread. /// The channel should be the same one used when setting up the Aligned view. fn aligned_cross( aligned: &mut view::Aligned, cross: &mut Cross, send: &mut Sender<AlignedMessage>, recv: &mut Receiver<AlignedMessage>, ) -> DelegateEvent { aligned.refresh(cross); let mut quit = DelegateEvent::Quit; scope(|s| { // both the thread and the send_cross_actions function determine when to quit by // checking the output of delegate_action, so make sure this is the same. let receiver_thread = s.spawn(|_| aligned_cross_recv(aligned, cross, recv)); send_cross_actions(|action| delegate_action(action).is_some(), send); quit = receiver_thread.join().unwrap(); }) .unwrap(); quit }
{ sink.send(Box::new(|siv: &mut Cursive| { siv.call_on_name("aligned", |view: &mut Aligned| { view.process_action(&mut Dummy, otherwise); }) .expect("Could not send new data to view"); })) .expect("Could not send event to view"); }
conditional_block
control.rs
use crossbeam_utils::thread::scope; use cursive::{ backend::Backend as CursiveBackend, backends::crossterm, event::Key, traits::Nameable, view::ViewWrapper, views::{LayerPosition, NamedView}, View, }; use cursive::{traits::Resizable, views::ResizedView, Cursive}; use cursive_buffered_backend::BufferedBackend; use dirs::config_dir; use serde::{Deserialize, Serialize}; use crate::{ align::{AlignAlgorithm, AlignMode}, backend::{send_cross_actions, Action, Cross, Dummy}, cursor::CursorState, dialog, doublehex::DoubleHexContext, file::FileState, style::Style, view::{self, Aligned, AlignedMessage}, }; use std::{ error::Error, fs::read_to_string, ops::Range, path::PathBuf, sync::mpsc::{channel, Receiver, Sender}, }; type CursiveCallback = Box<dyn Fn(&mut Cursive) +'static + Send>; /// This is the main loop, here we switch between our custom backend and the cursive backend /// when opening dialog boxes. This is done because initially, the cursive backend was too flickery. /// However, this was fixed by using cursive_buffered_backend, so now this is only a minor optimization. pub fn run(x: FileState, y: FileState) { let mut settings = Settings::from_config().unwrap_or_default(); let digits = x.address_digits().max(y.address_digits()); settings.style.addr_width = digits; let mut hv = HexView::new(x, y); loop { *match hv { HexView::Aligned(ref mut v, _, _) => &mut v.dh.style, HexView::Unaligned(ref mut v) => &mut v.dh.style, } = settings.style; let mut cross = Cross::init(); let (hv_new, quit) = hv.process_cross(&mut cross, &settings); hv = hv_new; cross.uninit(); // the column setting can be changed during the non-dialog, // so we need to keep it updated here settings.style = match &hv { HexView::Aligned(v, _, _) => v.dh.style, HexView::Unaligned(v) => v.dh.style, }; let (hv_new, settings_new) = match quit { DelegateEvent::Quit => break, DelegateEvent::OpenDialog(dia) => hv.show_dialog(dia, settings), _ => (hv, settings), }; hv = hv_new; settings = settings_new; } } #[derive(Clone, Debug, Default, Serialize, Deserialize)] pub struct Settings { pub algo: AlignAlgorithm, pub style: Style, } impl Settings { fn config_path() -> Result<PathBuf, std::io::Error> { match std::env::var_os("BIODIFF_CONFIG_DIR") { Some(p) => Ok(PathBuf::from(p)), None => match config_dir() { Some(mut p) => { p.push("biodiff"); Ok(p) } None => Err(std::io::Error::new( std::io::ErrorKind::NotFound, "Could not find configuration directory", )), }, } } fn settings_file() -> Result<PathBuf, std::io::Error> { let mut path = Self::config_path()?; path.push("config.json"); Ok(path) } pub fn from_config() -> Option<Self> { let config = read_to_string(Self::settings_file().ok()?).ok()?; serde_json::from_str(&config).ok() } pub fn save_config(&self) -> Result<(), Box<dyn Error +'static>> { let config = serde_json::to_string(self)?; let r = std::fs::create_dir_all(Self::config_path()?); if let Err(ref e) = r { match e.kind() { std::io::ErrorKind::AlreadyExists => (), _ => r?, } } std::fs::write(Self::settings_file()?, config)?; Ok(()) } } /// An enum containing either an aligned or unaligned hexview, without /// a backend for painting. /// The aligned view also contains a channel for messages, as the alignment /// algorithms need to dynamically append/prepend new blocks to the view /// and the crossbeam backend also sends user events over that. pub enum HexView { Aligned( view::Aligned, Sender<AlignedMessage>, Receiver<AlignedMessage>, ), Unaligned(view::Unaligned), } impl HexView { /// Creates a new unaligned view from two files with given indexes and cursor /// size 16x16. pub fn new(left: FileState, right: FileState) -> Self { HexView::Unaligned(view::Unaligned::new( left, right, DoubleHexContext::new((16, 16)), )) } /// Turns a hexview into an aligned view using the given algorithm parameters fn into_aligned(self, algo: &AlignAlgorithm, select: [Option<Range<usize>>; 2]) -> HexView { let (send, recv) = channel(); match match self { // first destruct our old hexview into its parts HexView::Aligned(a, send, recv) => { a.destruct().map_err(|a| HexView::Aligned(a, send, recv)) } HexView::Unaligned(u) => u.destruct().map_err(HexView::Unaligned), } { // if the cursor was not placed on any index, we currently do nothing // maybe one could think up some better values to align at here or something Err(hv) => hv, Ok((left, right, mut dh)) => { if matches!(algo.mode, AlignMode::Local | AlignMode::Global) { dh.cursor = CursorState::new((dh.cursor.get_size_x(), dh.cursor.get_size_y())) }; HexView::Aligned( view::Aligned::new(left, right, dh, algo, select, send.clone()), send, recv, ) } } } /// Turns a hexview into an unaligned view at the current cursor fn into_unaligned(self) -> HexView { match self { HexView::Aligned(a, send, recv) => match a.destruct() { Ok((left, right, cursor)) => { HexView::Unaligned(view::Unaligned::new(left, right, cursor)) } Err(a) => HexView::Aligned(a, send, recv), }, // we don't need to change anything for unaligned views HexView::Unaligned(_) => self, } } /// Call the relevant event processing functions for the crossterm backend fn event_proc(&mut self, cross: &mut Cross) -> DelegateEvent { match self { HexView::Aligned(ref mut a, ref mut send, ref mut recv) => { aligned_cross(a, cross, send, recv) } HexView::Unaligned(ref mut u) => unaligned_cross(u, cross), } } fn selection(&self) -> [Option<Range<usize>>; 2] { match self { HexView::Aligned(a, _, _) => a.selection_file_ranges(), HexView::Unaligned(u) => u.selection_file_ranges(), } } /// control loop for crossbeam backend, switches the view between aligned and unaligned when /// requested and runs event loops fn process_cross(self, cross: &mut Cross, settings: &Settings) -> (Self, DelegateEvent) { let mut view = self; let mut quit; let quit_reason = loop { let q = view.event_proc(cross); view = match q { // delegate to top-level control loop DelegateEvent::Quit | DelegateEvent::OpenDialog(_) => { quit = match &mut view { HexView::Aligned(v, _, _) =>!v.process_escape(cross), HexView::Unaligned(v) =>!v.process_escape(cross), } .then_some(q); view } DelegateEvent::SwitchToAlign => { quit = None; let select = view.selection(); view.into_aligned(&settings.algo, select) } DelegateEvent::SwitchToUnalign => { quit = None; view.into_unaligned() } }; if let Some(q) = quit { break q; } }; (view, quit_reason) } /// Setup a cursive instance and shows a dialog constructed through the callback given in `dialog`. /// /// Note that the settings are placed into the user_data of the cursive instace and can be modified /// by the callback. fn show_dialog(self, dialog: CursiveCallback, settings: Settings) -> (Self, Settings) { let mut siv = cursive::default(); // this theme is the default theme except that the background color is black siv.set_theme(cursiv_theme()); siv.add_global_callback(Key::Esc, dialog::close_top_maybe_quit); siv.set_user_data(settings); match self { HexView::Aligned(a, send, mut recv) => { siv.add_fullscreen_layer(a.with_name("aligned").full_screen()); let mut sink = siv.cb_sink().clone(); // we create a new thread that converts the `AlignedMessage`s coming from // the alignment threads to callbacks on the cursive instance, so this case // is a bit more complicated than the unaligned one. scope(|s| { let join_handle = s.spawn(|_| cursiv_align_relay(&mut recv, &mut sink)); dialog(&mut siv); siv.try_run_with(|| { // use the buffered backend as it involves way less flickering crossterm::Backend::init() .map(|x| Box::new(BufferedBackend::new(x)) as Box<dyn CursiveBackend>) }) .expect("Could not run"); // misuse the Action::Quit as a signal for the thread to exit send.send(AlignedMessage::UserEvent(Action::Quit)) .expect("Could not tell align relay thread to quit"); join_handle .join() .expect("Could not join align relay thread"); }) .expect("Could not join align relay thread"); // extract the view from the cursive instance match peel_onion(&mut siv) { Some(x) => ( HexView::Aligned(x, send, recv), siv.take_user_data().unwrap(), ), None => panic!("Internal error, could not downcast view"), } } HexView::Unaligned(u) => { siv.add_fullscreen_layer(u.with_name("unaligned").full_screen()); dialog(&mut siv); siv.try_run_with(|| { crossterm::Backend::init() .map(|x| Box::new(BufferedBackend::new(x)) as Box<dyn CursiveBackend>) }) .expect("Could not run"); // extract the view from the cursive instance match peel_onion(&mut siv) { Some(v) => (HexView::Unaligned(v), siv.take_user_data().unwrap()), None => panic!("Internal error, could not downcast view"), } } } } } // this one causes tears to come from my eyes fn peel_onion<V: View>(siv: &mut Cursive) -> Option<V> { siv.screen_mut() .remove_layer(LayerPosition::FromBack(0)) .downcast::<ResizedView<NamedView<V>>>() .ok() .and_then(|view| view.into_inner().ok()) .and_then(|view| view.into_inner().ok()) } /// Default Cursive theme except that the background color is black fn
() -> cursive::theme::Theme { use cursive::theme::{BaseColor::*, Color::*, PaletteColor::*}; let mut cursiv_theme = cursive::theme::load_default(); cursiv_theme.palette[Background] = Dark(Black); cursiv_theme } /// Forwards `AlignedMessage`s from the alignment thread into callbacks for the cursive instance fn cursiv_align_relay(recv: &mut Receiver<AlignedMessage>, sink: &mut cursive::CbSink) { for ev in recv.iter() { match ev { AlignedMessage::UserEvent(Action::Quit) => break, otherwise => { sink.send(Box::new(|siv: &mut Cursive| { siv.call_on_name("aligned", |view: &mut Aligned| { view.process_action(&mut Dummy, otherwise); }) .expect("Could not send new data to view"); })) .expect("Could not send event to view"); } } } } /// This enum is used for delegating actions to higher level event loops. enum DelegateEvent { Quit, SwitchToAlign, SwitchToUnalign, OpenDialog(CursiveCallback), } /// Converts an event to a delegation fn delegate_action(action: Action) -> Option<DelegateEvent> { match action { Action::Quit => Some(DelegateEvent::Quit), Action::Align => Some(DelegateEvent::SwitchToAlign), Action::Unalign => Some(DelegateEvent::SwitchToUnalign), Action::Algorithm => Some(DelegateEvent::OpenDialog(Box::new(dialog::settings))), Action::Goto => Some(DelegateEvent::OpenDialog(Box::new(dialog::goto))), Action::Search => Some(DelegateEvent::OpenDialog(Box::new(dialog::search))), Action::SetOffset => Some(DelegateEvent::OpenDialog(Box::new(dialog::set_offset))), Action::Help => Some(DelegateEvent::OpenDialog(Box::new(dialog::help_window( dialog::MAIN_HELP, )))), _otherwise => None, } } /// This function is the one that processes actions sent by the event reader loop /// setup in `unaligned_cross`. Note that the event reader loop has to stay in the same /// thread, so this process is chosen to not be in the main thread instead. fn unaligned_cross_recv( unaligned: &mut view::Unaligned, cross: &mut Cross, recv: Receiver<Action>, ) -> DelegateEvent { unaligned.refresh(cross); for action in recv.iter() { if let Some(q) = delegate_action(action) { return q; } unaligned.process_action(cross, action); } DelegateEvent::Quit } /// This setups the event processing thread for the crossterm backend and reads crossterm's events fn unaligned_cross(unaligned: &mut view::Unaligned, cross: &mut Cross) -> DelegateEvent { unaligned.refresh(cross); let (mut send, recv) = channel(); let mut quit = DelegateEvent::Quit; scope(|s| { // both this thread and the send_cross_actions function determine when to quit by // checking the output of delegate_action, so make sure this is the same let receiver_thread = s.spawn(|_| unaligned_cross_recv(unaligned, cross, recv)); send_cross_actions(|action| delegate_action(action).is_some(), &mut send); quit = receiver_thread.join().unwrap(); }) .unwrap(); quit } /// This function is the one that processes actions sent by the event reader loop /// setup in `aligned_cross`, and also the ones sent by the alignment process. /// Note that the event reader loop has to stay in the same thread, so this /// process is chosen to not be in the main thread instead. fn aligned_cross_recv( aligned: &mut view::Aligned, cross: &mut Cross, recv: &mut Receiver<AlignedMessage>, ) -> DelegateEvent { for msg in recv.iter() { let msg = match msg { AlignedMessage::UserEvent(action) => { if let Some(q) = delegate_action(action) { return q; } msg } _ => msg, }; aligned.process_action(cross, msg); } DelegateEvent::Quit } /// Using the existing message channel (send, recv), setup a thread that /// processes the messages and also read the crossterm events in the main thread. /// The channel should be the same one used when setting up the Aligned view. fn aligned_cross( aligned: &mut view::Aligned, cross: &mut Cross, send: &mut Sender<AlignedMessage>, recv: &mut Receiver<AlignedMessage>, ) -> DelegateEvent { aligned.refresh(cross); let mut quit = DelegateEvent::Quit; scope(|s| { // both the thread and the send_cross_actions function determine when to quit by // checking the output of delegate_action, so make sure this is the same. let receiver_thread = s.spawn(|_| aligned_cross_recv(aligned, cross, recv)); send_cross_actions(|action| delegate_action(action).is_some(), send); quit = receiver_thread.join().unwrap(); }) .unwrap(); quit }
cursiv_theme
identifier_name
push_active_set.rs
use { crate::weighted_shuffle::WeightedShuffle, indexmap::IndexMap, rand::Rng, solana_bloom::bloom::{AtomicBloom, Bloom}, solana_sdk::{native_token::LAMPORTS_PER_SOL, pubkey::Pubkey}, std::collections::HashMap, }; const NUM_PUSH_ACTIVE_SET_ENTRIES: usize = 25; // Each entry corresponds to a stake bucket for // min stake of { this node, crds value owner } // The entry represents set of gossip nodes to actively // push to for crds values belonging to the bucket. #[derive(Default)] pub(crate) struct PushActiveSet([PushActiveSetEntry; NUM_PUSH_ACTIVE_SET_ENTRIES]); // Keys are gossip nodes to push messages to. // Values are which origins the node has pruned. #[derive(Default)] struct PushActiveSetEntry(IndexMap</*node:*/ Pubkey, /*origins:*/ AtomicBloom<Pubkey>>); impl PushActiveSet { #[cfg(debug_assertions)] const MIN_NUM_BLOOM_ITEMS: usize = 512; #[cfg(not(debug_assertions))] const MIN_NUM_BLOOM_ITEMS: usize = crate::cluster_info::CRDS_UNIQUE_PUBKEY_CAPACITY; pub(crate) fn get_nodes<'a>( &'a self, pubkey: &Pubkey, // This node. origin: &'a Pubkey, // CRDS value owner. // If true forces gossip push even if the node has pruned the origin. should_force_push: impl FnMut(&Pubkey) -> bool + 'a, stakes: &HashMap<Pubkey, u64>, ) -> impl Iterator<Item = &Pubkey> + 'a { let stake = stakes.get(pubkey).min(stakes.get(origin)); self.get_entry(stake).get_nodes(origin, should_force_push) } // Prunes origins for the given gossip node. // We will stop pushing messages from the specified origins to the node. pub(crate) fn prune( &self, pubkey: &Pubkey, // This node. node: &Pubkey, // Gossip node. origins: &[Pubkey], // CRDS value owners. stakes: &HashMap<Pubkey, u64>, ) { let stake = stakes.get(pubkey); for origin in origins { if origin == pubkey { continue; } let stake = stake.min(stakes.get(origin)); self.get_entry(stake).prune(node, origin) } } pub(crate) fn rotate<R: Rng>( &mut self, rng: &mut R, size: usize, // Number of nodes to retain in each active-set entry. cluster_size: usize, // Gossip nodes to be sampled for each push active set. nodes: &[Pubkey], stakes: &HashMap<Pubkey, u64>, ) { let num_bloom_filter_items = cluster_size.max(Self::MIN_NUM_BLOOM_ITEMS); // Active set of nodes to push to are sampled from these gossip nodes, // using sampling probabilities obtained from the stake bucket of each // node. let buckets: Vec<_> = nodes .iter() .map(|node| get_stake_bucket(stakes.get(node))) .collect(); // (k, entry) represents push active set where the stake bucket of // min stake of {this node, crds value owner} // is equal to `k`. The `entry` maintains set of gossip nodes to // actively push to for crds values belonging to this bucket. for (k, entry) in self.0.iter_mut().enumerate() { let weights: Vec<u64> = buckets .iter() .map(|&bucket| { // bucket <- get_stake_bucket(min stake of { // this node, crds value owner and gossip peer // }) // weight <- (bucket + 1)^2 // min stake of {...} is a proxy for how much we care about // the link, and tries to mirror similar logic on the // receiving end when pruning incoming links: // https://github.com/solana-labs/solana/blob/81394cf92/gossip/src/received_cache.rs#L100-L105 let bucket = bucket.min(k) as u64; bucket.saturating_add(1).saturating_pow(2) }) .collect(); entry.rotate(rng, size, num_bloom_filter_items, nodes, &weights); } } fn get_entry(&self, stake: Option<&u64>) -> &PushActiveSetEntry
} impl PushActiveSetEntry { const BLOOM_FALSE_RATE: f64 = 0.1; const BLOOM_MAX_BITS: usize = 1024 * 8 * 4; fn get_nodes<'a>( &'a self, origin: &'a Pubkey, // If true forces gossip push even if the node has pruned the origin. mut should_force_push: impl FnMut(&Pubkey) -> bool + 'a, ) -> impl Iterator<Item = &Pubkey> + 'a { self.0 .iter() .filter(move |(node, bloom_filter)| { !bloom_filter.contains(origin) || should_force_push(node) }) .map(|(node, _bloom_filter)| node) } fn prune( &self, node: &Pubkey, // Gossip node. origin: &Pubkey, // CRDS value owner ) { if let Some(bloom_filter) = self.0.get(node) { bloom_filter.add(origin); } } fn rotate<R: Rng>( &mut self, rng: &mut R, size: usize, // Number of nodes to retain. num_bloom_filter_items: usize, nodes: &[Pubkey], weights: &[u64], ) { debug_assert_eq!(nodes.len(), weights.len()); debug_assert!(weights.iter().all(|&weight| weight!= 0u64)); let shuffle = WeightedShuffle::new("rotate-active-set", weights).shuffle(rng); for node in shuffle.map(|k| &nodes[k]) { // We intend to discard the oldest/first entry in the index-map. if self.0.len() > size { break; } if self.0.contains_key(node) { continue; } let bloom = AtomicBloom::from(Bloom::random( num_bloom_filter_items, Self::BLOOM_FALSE_RATE, Self::BLOOM_MAX_BITS, )); bloom.add(node); self.0.insert(*node, bloom); } // Drop the oldest entry while preserving the ordering of others. while self.0.len() > size { self.0.shift_remove_index(0); } } } // Maps stake to bucket index. fn get_stake_bucket(stake: Option<&u64>) -> usize { let stake = stake.copied().unwrap_or_default() / LAMPORTS_PER_SOL; let bucket = u64::BITS - stake.leading_zeros(); (bucket as usize).min(NUM_PUSH_ACTIVE_SET_ENTRIES - 1) } #[cfg(test)] mod tests { use {super::*, rand::SeedableRng, rand_chacha::ChaChaRng, std::iter::repeat_with}; #[test] fn test_get_stake_bucket() { assert_eq!(get_stake_bucket(None), 0); let buckets = [0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5]; for (k, bucket) in buckets.into_iter().enumerate() { let stake = (k as u64) * LAMPORTS_PER_SOL; assert_eq!(get_stake_bucket(Some(&stake)), bucket); } for (stake, bucket) in [ (4_194_303, 22), (4_194_304, 23), (8_388_607, 23), (8_388_608, 24), ] { let stake = stake * LAMPORTS_PER_SOL; assert_eq!(get_stake_bucket(Some(&stake)), bucket); } assert_eq!( get_stake_bucket(Some(&u64::MAX)), NUM_PUSH_ACTIVE_SET_ENTRIES - 1 ); } #[test] fn test_push_active_set() { const CLUSTER_SIZE: usize = 117; const MAX_STAKE: u64 = (1 << 20) * LAMPORTS_PER_SOL; let mut rng = ChaChaRng::from_seed([189u8; 32]); let pubkey = Pubkey::new_unique(); let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(20).collect(); let stakes = repeat_with(|| rng.gen_range(1..MAX_STAKE)); let mut stakes: HashMap<_, _> = nodes.iter().copied().zip(stakes).collect(); stakes.insert(pubkey, rng.gen_range(1..MAX_STAKE)); let mut active_set = PushActiveSet::default(); assert!(active_set.0.iter().all(|entry| entry.0.is_empty())); active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes); assert!(active_set.0.iter().all(|entry| entry.0.len() == 5)); // Assert that for all entries, each filter already prunes the key. for entry in &active_set.0 { for (node, filter) in entry.0.iter() { assert!(filter.contains(node)); } } let other = &nodes[5]; let origin = &nodes[17]; assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([13, 5, 18, 16, 0].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([13, 18, 16, 0].into_iter().map(|k| &nodes[k]))); active_set.prune(&pubkey, &nodes[5], &[*origin], &stakes); active_set.prune(&pubkey, &nodes[3], &[*origin], &stakes); active_set.prune(&pubkey, &nodes[16], &[*origin], &stakes); assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([13, 18, 0].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([13, 18, 16, 0].into_iter().map(|k| &nodes[k]))); active_set.rotate(&mut rng, 7, CLUSTER_SIZE, &nodes, &stakes); assert!(active_set.0.iter().all(|entry| entry.0.len() == 7)); assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([18, 0, 7, 15, 11].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([18, 16, 0, 7, 15, 11].into_iter().map(|k| &nodes[k]))); let origins = [*origin, *other]; active_set.prune(&pubkey, &nodes[18], &origins, &stakes); active_set.prune(&pubkey, &nodes[0], &origins, &stakes); active_set.prune(&pubkey, &nodes[15], &origins, &stakes); assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([7, 11].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([16, 7, 11].into_iter().map(|k| &nodes[k]))); } #[test] fn test_push_active_set_entry() { const NUM_BLOOM_FILTER_ITEMS: usize = 100; let mut rng = ChaChaRng::from_seed([147u8; 32]); let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(20).collect(); let weights: Vec<_> = repeat_with(|| rng.gen_range(1..1000)).take(20).collect(); let mut entry = PushActiveSetEntry::default(); entry.rotate( &mut rng, 5, // size NUM_BLOOM_FILTER_ITEMS, &nodes, &weights, ); assert_eq!(entry.0.len(), 5); let keys = [&nodes[16], &nodes[11], &nodes[17], &nodes[14], &nodes[5]]; assert!(entry.0.keys().eq(keys)); for origin in &nodes { if!keys.contains(&origin) { assert!(entry.get_nodes(origin, |_| false).eq(keys)); } else { assert!(entry.get_nodes(origin, |_| true).eq(keys)); assert!(entry .get_nodes(origin, |_| false) .eq(keys.into_iter().filter(|&key| key!= origin))); } } // Assert that each filter already prunes the key. for (node, filter) in entry.0.iter() { assert!(filter.contains(node)); } for origin in keys { assert!(entry.get_nodes(origin, |_| true).eq(keys)); assert!(entry .get_nodes(origin, |_| false) .eq(keys.into_iter().filter(|&node| node!= origin))); } // Assert that prune excludes node from get. let origin = &nodes[3]; entry.prune(&nodes[11], origin); entry.prune(&nodes[14], origin); entry.prune(&nodes[19], origin); assert!(entry.get_nodes(origin, |_| true).eq(keys)); assert!(entry.get_nodes(origin, |_| false).eq(keys .into_iter() .filter(|&&node| node!= nodes[11] && node!= nodes[14]))); // Assert that rotate adds new nodes. entry.rotate(&mut rng, 5, NUM_BLOOM_FILTER_ITEMS, &nodes, &weights); let keys = [&nodes[11], &nodes[17], &nodes[14], &nodes[5], &nodes[7]]; assert!(entry.0.keys().eq(keys)); entry.rotate(&mut rng, 6, NUM_BLOOM_FILTER_ITEMS, &nodes, &weights); let keys = [ &nodes[17], &nodes[14], &nodes[5], &nodes[7], &nodes[1], &nodes[13], ]; assert!(entry.0.keys().eq(keys)); entry.rotate(&mut rng, 4, NUM_BLOOM_FILTER_ITEMS, &nodes, &weights); let keys = [&nodes[5], &nodes[7], &nodes[1], &nodes[13]]; assert!(entry.0.keys().eq(keys)); } }
{ &self.0[get_stake_bucket(stake)] }
identifier_body
push_active_set.rs
use { crate::weighted_shuffle::WeightedShuffle, indexmap::IndexMap, rand::Rng, solana_bloom::bloom::{AtomicBloom, Bloom}, solana_sdk::{native_token::LAMPORTS_PER_SOL, pubkey::Pubkey}, std::collections::HashMap, }; const NUM_PUSH_ACTIVE_SET_ENTRIES: usize = 25; // Each entry corresponds to a stake bucket for // min stake of { this node, crds value owner } // The entry represents set of gossip nodes to actively // push to for crds values belonging to the bucket. #[derive(Default)] pub(crate) struct PushActiveSet([PushActiveSetEntry; NUM_PUSH_ACTIVE_SET_ENTRIES]); // Keys are gossip nodes to push messages to. // Values are which origins the node has pruned. #[derive(Default)] struct PushActiveSetEntry(IndexMap</*node:*/ Pubkey, /*origins:*/ AtomicBloom<Pubkey>>); impl PushActiveSet { #[cfg(debug_assertions)] const MIN_NUM_BLOOM_ITEMS: usize = 512; #[cfg(not(debug_assertions))] const MIN_NUM_BLOOM_ITEMS: usize = crate::cluster_info::CRDS_UNIQUE_PUBKEY_CAPACITY; pub(crate) fn get_nodes<'a>( &'a self, pubkey: &Pubkey, // This node. origin: &'a Pubkey, // CRDS value owner. // If true forces gossip push even if the node has pruned the origin. should_force_push: impl FnMut(&Pubkey) -> bool + 'a, stakes: &HashMap<Pubkey, u64>, ) -> impl Iterator<Item = &Pubkey> + 'a { let stake = stakes.get(pubkey).min(stakes.get(origin)); self.get_entry(stake).get_nodes(origin, should_force_push) } // Prunes origins for the given gossip node. // We will stop pushing messages from the specified origins to the node. pub(crate) fn prune( &self, pubkey: &Pubkey, // This node. node: &Pubkey, // Gossip node. origins: &[Pubkey], // CRDS value owners. stakes: &HashMap<Pubkey, u64>, ) { let stake = stakes.get(pubkey); for origin in origins { if origin == pubkey { continue; } let stake = stake.min(stakes.get(origin)); self.get_entry(stake).prune(node, origin) } } pub(crate) fn rotate<R: Rng>( &mut self, rng: &mut R, size: usize, // Number of nodes to retain in each active-set entry. cluster_size: usize, // Gossip nodes to be sampled for each push active set. nodes: &[Pubkey], stakes: &HashMap<Pubkey, u64>, ) { let num_bloom_filter_items = cluster_size.max(Self::MIN_NUM_BLOOM_ITEMS); // Active set of nodes to push to are sampled from these gossip nodes, // using sampling probabilities obtained from the stake bucket of each // node. let buckets: Vec<_> = nodes .iter() .map(|node| get_stake_bucket(stakes.get(node))) .collect(); // (k, entry) represents push active set where the stake bucket of // min stake of {this node, crds value owner} // is equal to `k`. The `entry` maintains set of gossip nodes to // actively push to for crds values belonging to this bucket. for (k, entry) in self.0.iter_mut().enumerate() { let weights: Vec<u64> = buckets .iter() .map(|&bucket| { // bucket <- get_stake_bucket(min stake of { // this node, crds value owner and gossip peer // }) // weight <- (bucket + 1)^2 // min stake of {...} is a proxy for how much we care about // the link, and tries to mirror similar logic on the // receiving end when pruning incoming links: // https://github.com/solana-labs/solana/blob/81394cf92/gossip/src/received_cache.rs#L100-L105 let bucket = bucket.min(k) as u64; bucket.saturating_add(1).saturating_pow(2) }) .collect(); entry.rotate(rng, size, num_bloom_filter_items, nodes, &weights); } } fn get_entry(&self, stake: Option<&u64>) -> &PushActiveSetEntry { &self.0[get_stake_bucket(stake)] } } impl PushActiveSetEntry { const BLOOM_FALSE_RATE: f64 = 0.1; const BLOOM_MAX_BITS: usize = 1024 * 8 * 4; fn get_nodes<'a>( &'a self, origin: &'a Pubkey, // If true forces gossip push even if the node has pruned the origin. mut should_force_push: impl FnMut(&Pubkey) -> bool + 'a, ) -> impl Iterator<Item = &Pubkey> + 'a { self.0 .iter() .filter(move |(node, bloom_filter)| { !bloom_filter.contains(origin) || should_force_push(node) }) .map(|(node, _bloom_filter)| node) } fn
( &self, node: &Pubkey, // Gossip node. origin: &Pubkey, // CRDS value owner ) { if let Some(bloom_filter) = self.0.get(node) { bloom_filter.add(origin); } } fn rotate<R: Rng>( &mut self, rng: &mut R, size: usize, // Number of nodes to retain. num_bloom_filter_items: usize, nodes: &[Pubkey], weights: &[u64], ) { debug_assert_eq!(nodes.len(), weights.len()); debug_assert!(weights.iter().all(|&weight| weight!= 0u64)); let shuffle = WeightedShuffle::new("rotate-active-set", weights).shuffle(rng); for node in shuffle.map(|k| &nodes[k]) { // We intend to discard the oldest/first entry in the index-map. if self.0.len() > size { break; } if self.0.contains_key(node) { continue; } let bloom = AtomicBloom::from(Bloom::random( num_bloom_filter_items, Self::BLOOM_FALSE_RATE, Self::BLOOM_MAX_BITS, )); bloom.add(node); self.0.insert(*node, bloom); } // Drop the oldest entry while preserving the ordering of others. while self.0.len() > size { self.0.shift_remove_index(0); } } } // Maps stake to bucket index. fn get_stake_bucket(stake: Option<&u64>) -> usize { let stake = stake.copied().unwrap_or_default() / LAMPORTS_PER_SOL; let bucket = u64::BITS - stake.leading_zeros(); (bucket as usize).min(NUM_PUSH_ACTIVE_SET_ENTRIES - 1) } #[cfg(test)] mod tests { use {super::*, rand::SeedableRng, rand_chacha::ChaChaRng, std::iter::repeat_with}; #[test] fn test_get_stake_bucket() { assert_eq!(get_stake_bucket(None), 0); let buckets = [0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5]; for (k, bucket) in buckets.into_iter().enumerate() { let stake = (k as u64) * LAMPORTS_PER_SOL; assert_eq!(get_stake_bucket(Some(&stake)), bucket); } for (stake, bucket) in [ (4_194_303, 22), (4_194_304, 23), (8_388_607, 23), (8_388_608, 24), ] { let stake = stake * LAMPORTS_PER_SOL; assert_eq!(get_stake_bucket(Some(&stake)), bucket); } assert_eq!( get_stake_bucket(Some(&u64::MAX)), NUM_PUSH_ACTIVE_SET_ENTRIES - 1 ); } #[test] fn test_push_active_set() { const CLUSTER_SIZE: usize = 117; const MAX_STAKE: u64 = (1 << 20) * LAMPORTS_PER_SOL; let mut rng = ChaChaRng::from_seed([189u8; 32]); let pubkey = Pubkey::new_unique(); let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(20).collect(); let stakes = repeat_with(|| rng.gen_range(1..MAX_STAKE)); let mut stakes: HashMap<_, _> = nodes.iter().copied().zip(stakes).collect(); stakes.insert(pubkey, rng.gen_range(1..MAX_STAKE)); let mut active_set = PushActiveSet::default(); assert!(active_set.0.iter().all(|entry| entry.0.is_empty())); active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes); assert!(active_set.0.iter().all(|entry| entry.0.len() == 5)); // Assert that for all entries, each filter already prunes the key. for entry in &active_set.0 { for (node, filter) in entry.0.iter() { assert!(filter.contains(node)); } } let other = &nodes[5]; let origin = &nodes[17]; assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([13, 5, 18, 16, 0].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([13, 18, 16, 0].into_iter().map(|k| &nodes[k]))); active_set.prune(&pubkey, &nodes[5], &[*origin], &stakes); active_set.prune(&pubkey, &nodes[3], &[*origin], &stakes); active_set.prune(&pubkey, &nodes[16], &[*origin], &stakes); assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([13, 18, 0].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([13, 18, 16, 0].into_iter().map(|k| &nodes[k]))); active_set.rotate(&mut rng, 7, CLUSTER_SIZE, &nodes, &stakes); assert!(active_set.0.iter().all(|entry| entry.0.len() == 7)); assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([18, 0, 7, 15, 11].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([18, 16, 0, 7, 15, 11].into_iter().map(|k| &nodes[k]))); let origins = [*origin, *other]; active_set.prune(&pubkey, &nodes[18], &origins, &stakes); active_set.prune(&pubkey, &nodes[0], &origins, &stakes); active_set.prune(&pubkey, &nodes[15], &origins, &stakes); assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([7, 11].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([16, 7, 11].into_iter().map(|k| &nodes[k]))); } #[test] fn test_push_active_set_entry() { const NUM_BLOOM_FILTER_ITEMS: usize = 100; let mut rng = ChaChaRng::from_seed([147u8; 32]); let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(20).collect(); let weights: Vec<_> = repeat_with(|| rng.gen_range(1..1000)).take(20).collect(); let mut entry = PushActiveSetEntry::default(); entry.rotate( &mut rng, 5, // size NUM_BLOOM_FILTER_ITEMS, &nodes, &weights, ); assert_eq!(entry.0.len(), 5); let keys = [&nodes[16], &nodes[11], &nodes[17], &nodes[14], &nodes[5]]; assert!(entry.0.keys().eq(keys)); for origin in &nodes { if!keys.contains(&origin) { assert!(entry.get_nodes(origin, |_| false).eq(keys)); } else { assert!(entry.get_nodes(origin, |_| true).eq(keys)); assert!(entry .get_nodes(origin, |_| false) .eq(keys.into_iter().filter(|&key| key!= origin))); } } // Assert that each filter already prunes the key. for (node, filter) in entry.0.iter() { assert!(filter.contains(node)); } for origin in keys { assert!(entry.get_nodes(origin, |_| true).eq(keys)); assert!(entry .get_nodes(origin, |_| false) .eq(keys.into_iter().filter(|&node| node!= origin))); } // Assert that prune excludes node from get. let origin = &nodes[3]; entry.prune(&nodes[11], origin); entry.prune(&nodes[14], origin); entry.prune(&nodes[19], origin); assert!(entry.get_nodes(origin, |_| true).eq(keys)); assert!(entry.get_nodes(origin, |_| false).eq(keys .into_iter() .filter(|&&node| node!= nodes[11] && node!= nodes[14]))); // Assert that rotate adds new nodes. entry.rotate(&mut rng, 5, NUM_BLOOM_FILTER_ITEMS, &nodes, &weights); let keys = [&nodes[11], &nodes[17], &nodes[14], &nodes[5], &nodes[7]]; assert!(entry.0.keys().eq(keys)); entry.rotate(&mut rng, 6, NUM_BLOOM_FILTER_ITEMS, &nodes, &weights); let keys = [ &nodes[17], &nodes[14], &nodes[5], &nodes[7], &nodes[1], &nodes[13], ]; assert!(entry.0.keys().eq(keys)); entry.rotate(&mut rng, 4, NUM_BLOOM_FILTER_ITEMS, &nodes, &weights); let keys = [&nodes[5], &nodes[7], &nodes[1], &nodes[13]]; assert!(entry.0.keys().eq(keys)); } }
prune
identifier_name
push_active_set.rs
use { crate::weighted_shuffle::WeightedShuffle, indexmap::IndexMap, rand::Rng, solana_bloom::bloom::{AtomicBloom, Bloom}, solana_sdk::{native_token::LAMPORTS_PER_SOL, pubkey::Pubkey}, std::collections::HashMap, }; const NUM_PUSH_ACTIVE_SET_ENTRIES: usize = 25; // Each entry corresponds to a stake bucket for // min stake of { this node, crds value owner } // The entry represents set of gossip nodes to actively // push to for crds values belonging to the bucket. #[derive(Default)] pub(crate) struct PushActiveSet([PushActiveSetEntry; NUM_PUSH_ACTIVE_SET_ENTRIES]); // Keys are gossip nodes to push messages to. // Values are which origins the node has pruned. #[derive(Default)] struct PushActiveSetEntry(IndexMap</*node:*/ Pubkey, /*origins:*/ AtomicBloom<Pubkey>>); impl PushActiveSet { #[cfg(debug_assertions)] const MIN_NUM_BLOOM_ITEMS: usize = 512; #[cfg(not(debug_assertions))] const MIN_NUM_BLOOM_ITEMS: usize = crate::cluster_info::CRDS_UNIQUE_PUBKEY_CAPACITY; pub(crate) fn get_nodes<'a>( &'a self, pubkey: &Pubkey, // This node. origin: &'a Pubkey, // CRDS value owner. // If true forces gossip push even if the node has pruned the origin. should_force_push: impl FnMut(&Pubkey) -> bool + 'a, stakes: &HashMap<Pubkey, u64>, ) -> impl Iterator<Item = &Pubkey> + 'a { let stake = stakes.get(pubkey).min(stakes.get(origin)); self.get_entry(stake).get_nodes(origin, should_force_push) } // Prunes origins for the given gossip node. // We will stop pushing messages from the specified origins to the node. pub(crate) fn prune( &self, pubkey: &Pubkey, // This node. node: &Pubkey, // Gossip node. origins: &[Pubkey], // CRDS value owners. stakes: &HashMap<Pubkey, u64>, ) { let stake = stakes.get(pubkey); for origin in origins { if origin == pubkey { continue; } let stake = stake.min(stakes.get(origin)); self.get_entry(stake).prune(node, origin) } } pub(crate) fn rotate<R: Rng>( &mut self, rng: &mut R, size: usize, // Number of nodes to retain in each active-set entry. cluster_size: usize, // Gossip nodes to be sampled for each push active set. nodes: &[Pubkey], stakes: &HashMap<Pubkey, u64>, ) { let num_bloom_filter_items = cluster_size.max(Self::MIN_NUM_BLOOM_ITEMS); // Active set of nodes to push to are sampled from these gossip nodes, // using sampling probabilities obtained from the stake bucket of each // node. let buckets: Vec<_> = nodes .iter() .map(|node| get_stake_bucket(stakes.get(node))) .collect(); // (k, entry) represents push active set where the stake bucket of // min stake of {this node, crds value owner} // is equal to `k`. The `entry` maintains set of gossip nodes to // actively push to for crds values belonging to this bucket. for (k, entry) in self.0.iter_mut().enumerate() { let weights: Vec<u64> = buckets .iter() .map(|&bucket| { // bucket <- get_stake_bucket(min stake of { // this node, crds value owner and gossip peer // }) // weight <- (bucket + 1)^2 // min stake of {...} is a proxy for how much we care about // the link, and tries to mirror similar logic on the // receiving end when pruning incoming links: // https://github.com/solana-labs/solana/blob/81394cf92/gossip/src/received_cache.rs#L100-L105 let bucket = bucket.min(k) as u64; bucket.saturating_add(1).saturating_pow(2) }) .collect(); entry.rotate(rng, size, num_bloom_filter_items, nodes, &weights); } } fn get_entry(&self, stake: Option<&u64>) -> &PushActiveSetEntry { &self.0[get_stake_bucket(stake)] } } impl PushActiveSetEntry { const BLOOM_FALSE_RATE: f64 = 0.1; const BLOOM_MAX_BITS: usize = 1024 * 8 * 4; fn get_nodes<'a>( &'a self, origin: &'a Pubkey, // If true forces gossip push even if the node has pruned the origin. mut should_force_push: impl FnMut(&Pubkey) -> bool + 'a, ) -> impl Iterator<Item = &Pubkey> + 'a { self.0 .iter() .filter(move |(node, bloom_filter)| { !bloom_filter.contains(origin) || should_force_push(node) }) .map(|(node, _bloom_filter)| node) } fn prune( &self, node: &Pubkey, // Gossip node. origin: &Pubkey, // CRDS value owner ) { if let Some(bloom_filter) = self.0.get(node) { bloom_filter.add(origin); } } fn rotate<R: Rng>( &mut self, rng: &mut R, size: usize, // Number of nodes to retain. num_bloom_filter_items: usize, nodes: &[Pubkey], weights: &[u64], ) { debug_assert_eq!(nodes.len(), weights.len()); debug_assert!(weights.iter().all(|&weight| weight!= 0u64)); let shuffle = WeightedShuffle::new("rotate-active-set", weights).shuffle(rng); for node in shuffle.map(|k| &nodes[k]) { // We intend to discard the oldest/first entry in the index-map. if self.0.len() > size { break; } if self.0.contains_key(node) { continue; } let bloom = AtomicBloom::from(Bloom::random( num_bloom_filter_items, Self::BLOOM_FALSE_RATE, Self::BLOOM_MAX_BITS, )); bloom.add(node); self.0.insert(*node, bloom); } // Drop the oldest entry while preserving the ordering of others. while self.0.len() > size { self.0.shift_remove_index(0); } } } // Maps stake to bucket index. fn get_stake_bucket(stake: Option<&u64>) -> usize { let stake = stake.copied().unwrap_or_default() / LAMPORTS_PER_SOL; let bucket = u64::BITS - stake.leading_zeros(); (bucket as usize).min(NUM_PUSH_ACTIVE_SET_ENTRIES - 1) } #[cfg(test)] mod tests { use {super::*, rand::SeedableRng, rand_chacha::ChaChaRng, std::iter::repeat_with}; #[test] fn test_get_stake_bucket() { assert_eq!(get_stake_bucket(None), 0); let buckets = [0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5]; for (k, bucket) in buckets.into_iter().enumerate() { let stake = (k as u64) * LAMPORTS_PER_SOL; assert_eq!(get_stake_bucket(Some(&stake)), bucket); } for (stake, bucket) in [ (4_194_303, 22), (4_194_304, 23), (8_388_607, 23), (8_388_608, 24), ] { let stake = stake * LAMPORTS_PER_SOL; assert_eq!(get_stake_bucket(Some(&stake)), bucket); } assert_eq!( get_stake_bucket(Some(&u64::MAX)), NUM_PUSH_ACTIVE_SET_ENTRIES - 1 ); } #[test] fn test_push_active_set() { const CLUSTER_SIZE: usize = 117; const MAX_STAKE: u64 = (1 << 20) * LAMPORTS_PER_SOL; let mut rng = ChaChaRng::from_seed([189u8; 32]); let pubkey = Pubkey::new_unique(); let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(20).collect(); let stakes = repeat_with(|| rng.gen_range(1..MAX_STAKE)); let mut stakes: HashMap<_, _> = nodes.iter().copied().zip(stakes).collect(); stakes.insert(pubkey, rng.gen_range(1..MAX_STAKE)); let mut active_set = PushActiveSet::default(); assert!(active_set.0.iter().all(|entry| entry.0.is_empty())); active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes); assert!(active_set.0.iter().all(|entry| entry.0.len() == 5)); // Assert that for all entries, each filter already prunes the key. for entry in &active_set.0 { for (node, filter) in entry.0.iter() { assert!(filter.contains(node)); } } let other = &nodes[5]; let origin = &nodes[17]; assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([13, 5, 18, 16, 0].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([13, 18, 16, 0].into_iter().map(|k| &nodes[k]))); active_set.prune(&pubkey, &nodes[5], &[*origin], &stakes); active_set.prune(&pubkey, &nodes[3], &[*origin], &stakes); active_set.prune(&pubkey, &nodes[16], &[*origin], &stakes); assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([13, 18, 0].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([13, 18, 16, 0].into_iter().map(|k| &nodes[k]))); active_set.rotate(&mut rng, 7, CLUSTER_SIZE, &nodes, &stakes); assert!(active_set.0.iter().all(|entry| entry.0.len() == 7)); assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([18, 0, 7, 15, 11].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([18, 16, 0, 7, 15, 11].into_iter().map(|k| &nodes[k]))); let origins = [*origin, *other]; active_set.prune(&pubkey, &nodes[18], &origins, &stakes); active_set.prune(&pubkey, &nodes[0], &origins, &stakes); active_set.prune(&pubkey, &nodes[15], &origins, &stakes); assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([7, 11].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([16, 7, 11].into_iter().map(|k| &nodes[k]))); }
#[test] fn test_push_active_set_entry() { const NUM_BLOOM_FILTER_ITEMS: usize = 100; let mut rng = ChaChaRng::from_seed([147u8; 32]); let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(20).collect(); let weights: Vec<_> = repeat_with(|| rng.gen_range(1..1000)).take(20).collect(); let mut entry = PushActiveSetEntry::default(); entry.rotate( &mut rng, 5, // size NUM_BLOOM_FILTER_ITEMS, &nodes, &weights, ); assert_eq!(entry.0.len(), 5); let keys = [&nodes[16], &nodes[11], &nodes[17], &nodes[14], &nodes[5]]; assert!(entry.0.keys().eq(keys)); for origin in &nodes { if!keys.contains(&origin) { assert!(entry.get_nodes(origin, |_| false).eq(keys)); } else { assert!(entry.get_nodes(origin, |_| true).eq(keys)); assert!(entry .get_nodes(origin, |_| false) .eq(keys.into_iter().filter(|&key| key!= origin))); } } // Assert that each filter already prunes the key. for (node, filter) in entry.0.iter() { assert!(filter.contains(node)); } for origin in keys { assert!(entry.get_nodes(origin, |_| true).eq(keys)); assert!(entry .get_nodes(origin, |_| false) .eq(keys.into_iter().filter(|&node| node!= origin))); } // Assert that prune excludes node from get. let origin = &nodes[3]; entry.prune(&nodes[11], origin); entry.prune(&nodes[14], origin); entry.prune(&nodes[19], origin); assert!(entry.get_nodes(origin, |_| true).eq(keys)); assert!(entry.get_nodes(origin, |_| false).eq(keys .into_iter() .filter(|&&node| node!= nodes[11] && node!= nodes[14]))); // Assert that rotate adds new nodes. entry.rotate(&mut rng, 5, NUM_BLOOM_FILTER_ITEMS, &nodes, &weights); let keys = [&nodes[11], &nodes[17], &nodes[14], &nodes[5], &nodes[7]]; assert!(entry.0.keys().eq(keys)); entry.rotate(&mut rng, 6, NUM_BLOOM_FILTER_ITEMS, &nodes, &weights); let keys = [ &nodes[17], &nodes[14], &nodes[5], &nodes[7], &nodes[1], &nodes[13], ]; assert!(entry.0.keys().eq(keys)); entry.rotate(&mut rng, 4, NUM_BLOOM_FILTER_ITEMS, &nodes, &weights); let keys = [&nodes[5], &nodes[7], &nodes[1], &nodes[13]]; assert!(entry.0.keys().eq(keys)); } }
random_line_split
push_active_set.rs
use { crate::weighted_shuffle::WeightedShuffle, indexmap::IndexMap, rand::Rng, solana_bloom::bloom::{AtomicBloom, Bloom}, solana_sdk::{native_token::LAMPORTS_PER_SOL, pubkey::Pubkey}, std::collections::HashMap, }; const NUM_PUSH_ACTIVE_SET_ENTRIES: usize = 25; // Each entry corresponds to a stake bucket for // min stake of { this node, crds value owner } // The entry represents set of gossip nodes to actively // push to for crds values belonging to the bucket. #[derive(Default)] pub(crate) struct PushActiveSet([PushActiveSetEntry; NUM_PUSH_ACTIVE_SET_ENTRIES]); // Keys are gossip nodes to push messages to. // Values are which origins the node has pruned. #[derive(Default)] struct PushActiveSetEntry(IndexMap</*node:*/ Pubkey, /*origins:*/ AtomicBloom<Pubkey>>); impl PushActiveSet { #[cfg(debug_assertions)] const MIN_NUM_BLOOM_ITEMS: usize = 512; #[cfg(not(debug_assertions))] const MIN_NUM_BLOOM_ITEMS: usize = crate::cluster_info::CRDS_UNIQUE_PUBKEY_CAPACITY; pub(crate) fn get_nodes<'a>( &'a self, pubkey: &Pubkey, // This node. origin: &'a Pubkey, // CRDS value owner. // If true forces gossip push even if the node has pruned the origin. should_force_push: impl FnMut(&Pubkey) -> bool + 'a, stakes: &HashMap<Pubkey, u64>, ) -> impl Iterator<Item = &Pubkey> + 'a { let stake = stakes.get(pubkey).min(stakes.get(origin)); self.get_entry(stake).get_nodes(origin, should_force_push) } // Prunes origins for the given gossip node. // We will stop pushing messages from the specified origins to the node. pub(crate) fn prune( &self, pubkey: &Pubkey, // This node. node: &Pubkey, // Gossip node. origins: &[Pubkey], // CRDS value owners. stakes: &HashMap<Pubkey, u64>, ) { let stake = stakes.get(pubkey); for origin in origins { if origin == pubkey { continue; } let stake = stake.min(stakes.get(origin)); self.get_entry(stake).prune(node, origin) } } pub(crate) fn rotate<R: Rng>( &mut self, rng: &mut R, size: usize, // Number of nodes to retain in each active-set entry. cluster_size: usize, // Gossip nodes to be sampled for each push active set. nodes: &[Pubkey], stakes: &HashMap<Pubkey, u64>, ) { let num_bloom_filter_items = cluster_size.max(Self::MIN_NUM_BLOOM_ITEMS); // Active set of nodes to push to are sampled from these gossip nodes, // using sampling probabilities obtained from the stake bucket of each // node. let buckets: Vec<_> = nodes .iter() .map(|node| get_stake_bucket(stakes.get(node))) .collect(); // (k, entry) represents push active set where the stake bucket of // min stake of {this node, crds value owner} // is equal to `k`. The `entry` maintains set of gossip nodes to // actively push to for crds values belonging to this bucket. for (k, entry) in self.0.iter_mut().enumerate() { let weights: Vec<u64> = buckets .iter() .map(|&bucket| { // bucket <- get_stake_bucket(min stake of { // this node, crds value owner and gossip peer // }) // weight <- (bucket + 1)^2 // min stake of {...} is a proxy for how much we care about // the link, and tries to mirror similar logic on the // receiving end when pruning incoming links: // https://github.com/solana-labs/solana/blob/81394cf92/gossip/src/received_cache.rs#L100-L105 let bucket = bucket.min(k) as u64; bucket.saturating_add(1).saturating_pow(2) }) .collect(); entry.rotate(rng, size, num_bloom_filter_items, nodes, &weights); } } fn get_entry(&self, stake: Option<&u64>) -> &PushActiveSetEntry { &self.0[get_stake_bucket(stake)] } } impl PushActiveSetEntry { const BLOOM_FALSE_RATE: f64 = 0.1; const BLOOM_MAX_BITS: usize = 1024 * 8 * 4; fn get_nodes<'a>( &'a self, origin: &'a Pubkey, // If true forces gossip push even if the node has pruned the origin. mut should_force_push: impl FnMut(&Pubkey) -> bool + 'a, ) -> impl Iterator<Item = &Pubkey> + 'a { self.0 .iter() .filter(move |(node, bloom_filter)| { !bloom_filter.contains(origin) || should_force_push(node) }) .map(|(node, _bloom_filter)| node) } fn prune( &self, node: &Pubkey, // Gossip node. origin: &Pubkey, // CRDS value owner ) { if let Some(bloom_filter) = self.0.get(node) { bloom_filter.add(origin); } } fn rotate<R: Rng>( &mut self, rng: &mut R, size: usize, // Number of nodes to retain. num_bloom_filter_items: usize, nodes: &[Pubkey], weights: &[u64], ) { debug_assert_eq!(nodes.len(), weights.len()); debug_assert!(weights.iter().all(|&weight| weight!= 0u64)); let shuffle = WeightedShuffle::new("rotate-active-set", weights).shuffle(rng); for node in shuffle.map(|k| &nodes[k]) { // We intend to discard the oldest/first entry in the index-map. if self.0.len() > size { break; } if self.0.contains_key(node) { continue; } let bloom = AtomicBloom::from(Bloom::random( num_bloom_filter_items, Self::BLOOM_FALSE_RATE, Self::BLOOM_MAX_BITS, )); bloom.add(node); self.0.insert(*node, bloom); } // Drop the oldest entry while preserving the ordering of others. while self.0.len() > size { self.0.shift_remove_index(0); } } } // Maps stake to bucket index. fn get_stake_bucket(stake: Option<&u64>) -> usize { let stake = stake.copied().unwrap_or_default() / LAMPORTS_PER_SOL; let bucket = u64::BITS - stake.leading_zeros(); (bucket as usize).min(NUM_PUSH_ACTIVE_SET_ENTRIES - 1) } #[cfg(test)] mod tests { use {super::*, rand::SeedableRng, rand_chacha::ChaChaRng, std::iter::repeat_with}; #[test] fn test_get_stake_bucket() { assert_eq!(get_stake_bucket(None), 0); let buckets = [0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5]; for (k, bucket) in buckets.into_iter().enumerate() { let stake = (k as u64) * LAMPORTS_PER_SOL; assert_eq!(get_stake_bucket(Some(&stake)), bucket); } for (stake, bucket) in [ (4_194_303, 22), (4_194_304, 23), (8_388_607, 23), (8_388_608, 24), ] { let stake = stake * LAMPORTS_PER_SOL; assert_eq!(get_stake_bucket(Some(&stake)), bucket); } assert_eq!( get_stake_bucket(Some(&u64::MAX)), NUM_PUSH_ACTIVE_SET_ENTRIES - 1 ); } #[test] fn test_push_active_set() { const CLUSTER_SIZE: usize = 117; const MAX_STAKE: u64 = (1 << 20) * LAMPORTS_PER_SOL; let mut rng = ChaChaRng::from_seed([189u8; 32]); let pubkey = Pubkey::new_unique(); let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(20).collect(); let stakes = repeat_with(|| rng.gen_range(1..MAX_STAKE)); let mut stakes: HashMap<_, _> = nodes.iter().copied().zip(stakes).collect(); stakes.insert(pubkey, rng.gen_range(1..MAX_STAKE)); let mut active_set = PushActiveSet::default(); assert!(active_set.0.iter().all(|entry| entry.0.is_empty())); active_set.rotate(&mut rng, 5, CLUSTER_SIZE, &nodes, &stakes); assert!(active_set.0.iter().all(|entry| entry.0.len() == 5)); // Assert that for all entries, each filter already prunes the key. for entry in &active_set.0 { for (node, filter) in entry.0.iter() { assert!(filter.contains(node)); } } let other = &nodes[5]; let origin = &nodes[17]; assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([13, 5, 18, 16, 0].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([13, 18, 16, 0].into_iter().map(|k| &nodes[k]))); active_set.prune(&pubkey, &nodes[5], &[*origin], &stakes); active_set.prune(&pubkey, &nodes[3], &[*origin], &stakes); active_set.prune(&pubkey, &nodes[16], &[*origin], &stakes); assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([13, 18, 0].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([13, 18, 16, 0].into_iter().map(|k| &nodes[k]))); active_set.rotate(&mut rng, 7, CLUSTER_SIZE, &nodes, &stakes); assert!(active_set.0.iter().all(|entry| entry.0.len() == 7)); assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([18, 0, 7, 15, 11].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([18, 16, 0, 7, 15, 11].into_iter().map(|k| &nodes[k]))); let origins = [*origin, *other]; active_set.prune(&pubkey, &nodes[18], &origins, &stakes); active_set.prune(&pubkey, &nodes[0], &origins, &stakes); active_set.prune(&pubkey, &nodes[15], &origins, &stakes); assert!(active_set .get_nodes(&pubkey, origin, |_| false, &stakes) .eq([7, 11].into_iter().map(|k| &nodes[k]))); assert!(active_set .get_nodes(&pubkey, other, |_| false, &stakes) .eq([16, 7, 11].into_iter().map(|k| &nodes[k]))); } #[test] fn test_push_active_set_entry() { const NUM_BLOOM_FILTER_ITEMS: usize = 100; let mut rng = ChaChaRng::from_seed([147u8; 32]); let nodes: Vec<_> = repeat_with(Pubkey::new_unique).take(20).collect(); let weights: Vec<_> = repeat_with(|| rng.gen_range(1..1000)).take(20).collect(); let mut entry = PushActiveSetEntry::default(); entry.rotate( &mut rng, 5, // size NUM_BLOOM_FILTER_ITEMS, &nodes, &weights, ); assert_eq!(entry.0.len(), 5); let keys = [&nodes[16], &nodes[11], &nodes[17], &nodes[14], &nodes[5]]; assert!(entry.0.keys().eq(keys)); for origin in &nodes { if!keys.contains(&origin)
else { assert!(entry.get_nodes(origin, |_| true).eq(keys)); assert!(entry .get_nodes(origin, |_| false) .eq(keys.into_iter().filter(|&key| key!= origin))); } } // Assert that each filter already prunes the key. for (node, filter) in entry.0.iter() { assert!(filter.contains(node)); } for origin in keys { assert!(entry.get_nodes(origin, |_| true).eq(keys)); assert!(entry .get_nodes(origin, |_| false) .eq(keys.into_iter().filter(|&node| node!= origin))); } // Assert that prune excludes node from get. let origin = &nodes[3]; entry.prune(&nodes[11], origin); entry.prune(&nodes[14], origin); entry.prune(&nodes[19], origin); assert!(entry.get_nodes(origin, |_| true).eq(keys)); assert!(entry.get_nodes(origin, |_| false).eq(keys .into_iter() .filter(|&&node| node!= nodes[11] && node!= nodes[14]))); // Assert that rotate adds new nodes. entry.rotate(&mut rng, 5, NUM_BLOOM_FILTER_ITEMS, &nodes, &weights); let keys = [&nodes[11], &nodes[17], &nodes[14], &nodes[5], &nodes[7]]; assert!(entry.0.keys().eq(keys)); entry.rotate(&mut rng, 6, NUM_BLOOM_FILTER_ITEMS, &nodes, &weights); let keys = [ &nodes[17], &nodes[14], &nodes[5], &nodes[7], &nodes[1], &nodes[13], ]; assert!(entry.0.keys().eq(keys)); entry.rotate(&mut rng, 4, NUM_BLOOM_FILTER_ITEMS, &nodes, &weights); let keys = [&nodes[5], &nodes[7], &nodes[1], &nodes[13]]; assert!(entry.0.keys().eq(keys)); } }
{ assert!(entry.get_nodes(origin, |_| false).eq(keys)); }
conditional_block
mod.rs
mod boundingbox; mod material; mod shape; use crate::config::SimulationConfig; use crate::fields::ScalarField; use crate::greenfunctions::cosinebasis::{CosineBasis, Direction}; use crate::world::boundingbox::BoundingBox; use crate::world::shape::Shape; use nalgebra::*; use pbr::ProgressBar; use rayon::iter::*; use serde::Deserialize; use snafu::Snafu; use std::io::Stdout; use std::sync::{Arc, Mutex}; /// A struct representing the world. #[derive(PartialEq, Clone, Debug, Deserialize)] pub struct World { /// The grid size corresponding to this world. size: Vector3<usize>, /// The list with the different shapes. objects: Vec<Shape>, /// The simulation configuration. simulation_config: SimulationConfig, #[serde(skip)] use_progress_bar: bool, } /// A struct that iterates over the different forces of the objects. pub struct ForceIterator<'a> { i: usize, world: &'a World, } impl<'a> Iterator for ForceIterator<'a> { type Item = Vector3<f32>; fn next(&mut self) -> Option<Vector3<f32>> { if self.i < self.world.objects.len() { let force = self.world.force_on(self.i); self.i += 1; Some(force) } else { None } } } /// These errors can be thrown when validating a world. #[derive(Debug, Snafu)] pub enum WorldError { /// An error indicating that one of the shapes is too close too the edge, causing the bounding /// box to cross the boundary. #[snafu(display("shape {} too close to edge", index))] ShapeTooCloseToEdge { /// The index of the violating object. index: usize, }, /// An error indicating that the bounding boxes of two objects in this world intersect and /// and therefore this world is invalid. #[snafu(display("bounding boxes of shapes {} and {} intersect", index_1, index_2))] ShapesIntersect { /// The index of the first object intersecting with the second. index_1: usize, /// The index of the second object intersecting with the first. index_2: usize, }, } impl World { /// Enable or disable the progress bar for the simulation. pub fn set_progress_bar(&mut self, enable: bool) { self.use_progress_bar = enable; } /// Obtain a force iterator for all objects in this world. pub fn forces(&self) -> ForceIterator<'_> { ForceIterator { i: 0, world: &self } } /// Compute the force on the `i`'th object. pub fn force_on(&self, i: usize) -> Vector3<f32>
start_freq, end_freq, start_force, end_force, start_force.norm(), ) } /// This function validates the geometry of the world. The function should be called, because it /// guarantees overflows later on in the simulation. /// /// # Errors /// - If any of the shapes is too close to the world border, the simulation can't be run and a /// `WorldError::ShapeTooCloseToEdge` will be returned. To fix this, move the object or increase /// the grid size. /// /// - If any of the objects are too close too eachother, their boundingboxes might intersect and /// the results will be invalid. If this is the case, a `WorldError::ShapesIntersect` will be /// returned. The violating shape indexes will be contained within. To fix this, move one or /// both of the objects. pub fn validate(&self) -> Result<(), WorldError> { let bbox_world = BoundingBox::new(0, 0, 0, self.size.x, self.size.y, self.size.z); let expanded_boxes = self .objects .iter() .enumerate() .map(|(index, obj)| { obj.bbox() .expanded(2) .map_err(|_| WorldError::ShapeTooCloseToEdge { index }) }) .collect::<Result<Vec<_>, _>>()?; for (i, bbox_1) in expanded_boxes.iter().enumerate() { // Check for intersection with world if!bbox_1.inside(&bbox_world) { return Err(WorldError::ShapeTooCloseToEdge { index: i }); } // Check for intersection with other objects for (j, bbox_2) in expanded_boxes.iter().enumerate() { if i < j && bbox_1.intersects(&bbox_2) { return Err(WorldError::ShapesIntersect { index_1: i, index_2: j, }); } } } Ok(()) } /// Performs a recursive integration between two frequencies. If the difference between the /// midpoint force and the linear interpolation is too large, both sides of the domain will use /// this function recursively to integrate the force. pub fn integrate_force_between_frequencies( &self, i: usize, start_frequency: f32, end_frequency: f32, start_value: Vector3<f32>, end_value: Vector3<f32>, max: f32, ) -> Vector3<f32> { // Do a recursive integration. The function should be smooth. let middle_frequency = 0.5 * (start_frequency + end_frequency); let middle_value = self.force_on_for_freq(i, middle_frequency); let average = (start_value + end_value) / 2.0; if (average - middle_value).norm() * (end_frequency - start_frequency) < self.simulation_config.frequency_threshold * max { // The change in area from the middle value vs extrapolation is less than the threshold 0.5 * (start_value + 2.0 * middle_value + end_value) * (end_frequency - start_frequency) } else { self.integrate_force_between_frequencies( i, start_frequency, middle_frequency, start_value, middle_value, max, ) + self.integrate_force_between_frequencies( i, middle_frequency, end_frequency, middle_value, end_value, max, ) } } /// Compute the force on object `i` for a certain `frequency`. This method will also subtract /// the error forces due to quantization by subtracting the force due to single objects. fn force_on_for_freq(&self, i: usize, frequency: f32) -> Vector3<f32> { // Progress bar let bbox = &self.objects[i].bbox(); let dx = bbox.x1 - bbox.x0 + 4; let dy = bbox.y1 - bbox.y0 + 4; let dz = bbox.z1 - bbox.z0 + 4; let count = 2 * (dx * dy + dy * dz + dz * dx) * (1 + self.objects.len()); let progress_bar = if self.use_progress_bar { let progress_bar = Arc::new(Mutex::new(ProgressBar::new(count as u64))); progress_bar.lock().unwrap().format("[=>~]"); progress_bar.lock().unwrap().tick(); Some(progress_bar) } else { None }; let perm_all_geom = &self.permittivity_field_all_geometry(frequency); let mut total_force = self.force_on_for_freq_and_geometry(frequency, perm_all_geom, bbox, &progress_bar); // Discretization gives rise to forces of an object on itself. Removing these gives more // accurate results. for other in &self.objects { let perm = &self.permittivity_field(frequency, &[*other]); total_force -= self.force_on_for_freq_and_geometry(frequency, perm, bbox, &progress_bar); } if let Some(progress_bar) = progress_bar { progress_bar.lock().unwrap().finish_println(""); } println!( "Force for frequency {}: ({}, {}, {})", frequency, total_force.x, total_force.y, total_force.z ); total_force } /// Compute the force on the geometry inside `BoundingBox`, for the given permittivity field /// `perm` and `BoundingBox` `bbox`. fn force_on_for_freq_and_geometry( &self, frequency: f32, perm: &ScalarField, bbox: &BoundingBox, progress_bar: &Option<Arc<Mutex<ProgressBar<Stdout>>>>, ) -> Vector3<f32> { (0..6) .into_par_iter() .map(|face| { (match face { 0 => CosineBasis::new( Point3::new(bbox.x0 - 2, bbox.y0 - 2, bbox.z0 - 2), Point3::new(bbox.x1 + 2, bbox.y1 + 2, bbox.z0 - 2), frequency, perm, &self.simulation_config, Direction::NegZ, ), 1 => CosineBasis::new( Point3::new(bbox.x0 - 2, bbox.y0 - 2, bbox.z1 + 2), Point3::new(bbox.x1 + 2, bbox.y1 + 2, bbox.z1 + 2), frequency, perm, &self.simulation_config, Direction::Z, ), 2 => CosineBasis::new( Point3::new(bbox.x0 - 2, bbox.y0 - 2, bbox.z0 - 2), Point3::new(bbox.x1 + 2, bbox.y0 - 2, bbox.z1 + 2), frequency, perm, &self.simulation_config, Direction::NegY, ), 3 => CosineBasis::new( Point3::new(bbox.x0 - 2, bbox.y1 + 2, bbox.z0 - 2), Point3::new(bbox.x1 + 2, bbox.y1 + 2, bbox.z1 + 2), frequency, perm, &self.simulation_config, Direction::Y, ), 4 => CosineBasis::new( Point3::new(bbox.x0 - 2, bbox.y0 - 2, bbox.z0 - 2), Point3::new(bbox.x0 - 2, bbox.y1 + 2, bbox.z1 + 2), frequency, perm, &self.simulation_config, Direction::NegX, ), 5 => CosineBasis::new( Point3::new(bbox.x1 + 2, bbox.y0 - 2, bbox.z0 - 2), Point3::new(bbox.x1 + 2, bbox.y1 + 2, bbox.z1 + 2), frequency, perm, &self.simulation_config, Direction::X, ), i => panic!("Face index out of bounds: {}", i), }) .with_progress_bar(progress_bar.clone()) .force() }) .sum() } /// Returns a scalar field representing the permittivity of a vector of bounding boxes. fn permittivity_field(&self, freq: f32, objects: &[Shape]) -> ScalarField { let mut permittivity_field = ScalarField::ones(self.size); for shape in objects { shape.draw_permittivity(&mut permittivity_field, freq); } permittivity_field } /// Returns a scalar field representing the permittivity of the entire geometry. fn permittivity_field_all_geometry(&self, freq: f32) -> ScalarField { self.permittivity_field(freq, &self.objects) } }
{ println!("Geometry:"); println!( "\tWorld size: ({}, {}, {})", self.size.x, self.size.y, self.size.z ); for (i, bbox) in self.objects.iter().enumerate() { println!("\t{}: {}", i, bbox); } println!("{}", self.simulation_config); // The maximum frequency is given by (speed of light) / (grid element size) let start_freq = self.simulation_config.frequency_range[0]; let end_freq = self.simulation_config.frequency_range[1]; let start_force = self.force_on_for_freq(i, start_freq); let end_force = self.force_on_for_freq(i, end_freq); self.integrate_force_between_frequencies( i,
identifier_body
mod.rs
mod boundingbox; mod material; mod shape; use crate::config::SimulationConfig; use crate::fields::ScalarField; use crate::greenfunctions::cosinebasis::{CosineBasis, Direction}; use crate::world::boundingbox::BoundingBox; use crate::world::shape::Shape; use nalgebra::*; use pbr::ProgressBar; use rayon::iter::*; use serde::Deserialize; use snafu::Snafu; use std::io::Stdout; use std::sync::{Arc, Mutex}; /// A struct representing the world. #[derive(PartialEq, Clone, Debug, Deserialize)] pub struct World { /// The grid size corresponding to this world. size: Vector3<usize>, /// The list with the different shapes. objects: Vec<Shape>, /// The simulation configuration. simulation_config: SimulationConfig, #[serde(skip)] use_progress_bar: bool, } /// A struct that iterates over the different forces of the objects. pub struct ForceIterator<'a> { i: usize, world: &'a World, } impl<'a> Iterator for ForceIterator<'a> { type Item = Vector3<f32>; fn next(&mut self) -> Option<Vector3<f32>> { if self.i < self.world.objects.len() { let force = self.world.force_on(self.i); self.i += 1; Some(force) } else { None } } } /// These errors can be thrown when validating a world. #[derive(Debug, Snafu)] pub enum WorldError { /// An error indicating that one of the shapes is too close too the edge, causing the bounding /// box to cross the boundary. #[snafu(display("shape {} too close to edge", index))] ShapeTooCloseToEdge { /// The index of the violating object. index: usize, }, /// An error indicating that the bounding boxes of two objects in this world intersect and /// and therefore this world is invalid. #[snafu(display("bounding boxes of shapes {} and {} intersect", index_1, index_2))] ShapesIntersect { /// The index of the first object intersecting with the second. index_1: usize, /// The index of the second object intersecting with the first. index_2: usize, }, } impl World { /// Enable or disable the progress bar for the simulation. pub fn set_progress_bar(&mut self, enable: bool) { self.use_progress_bar = enable; } /// Obtain a force iterator for all objects in this world. pub fn forces(&self) -> ForceIterator<'_> { ForceIterator { i: 0, world: &self } } /// Compute the force on the `i`'th object. pub fn force_on(&self, i: usize) -> Vector3<f32> { println!("Geometry:"); println!( "\tWorld size: ({}, {}, {})", self.size.x, self.size.y, self.size.z ); for (i, bbox) in self.objects.iter().enumerate() { println!("\t{}: {}", i, bbox); } println!("{}", self.simulation_config); // The maximum frequency is given by (speed of light) / (grid element size) let start_freq = self.simulation_config.frequency_range[0]; let end_freq = self.simulation_config.frequency_range[1]; let start_force = self.force_on_for_freq(i, start_freq); let end_force = self.force_on_for_freq(i, end_freq); self.integrate_force_between_frequencies( i, start_freq, end_freq, start_force, end_force, start_force.norm(), ) } /// This function validates the geometry of the world. The function should be called, because it /// guarantees overflows later on in the simulation. /// /// # Errors /// - If any of the shapes is too close to the world border, the simulation can't be run and a /// `WorldError::ShapeTooCloseToEdge` will be returned. To fix this, move the object or increase /// the grid size. /// /// - If any of the objects are too close too eachother, their boundingboxes might intersect and /// the results will be invalid. If this is the case, a `WorldError::ShapesIntersect` will be /// returned. The violating shape indexes will be contained within. To fix this, move one or /// both of the objects. pub fn validate(&self) -> Result<(), WorldError> { let bbox_world = BoundingBox::new(0, 0, 0, self.size.x, self.size.y, self.size.z); let expanded_boxes = self .objects .iter() .enumerate() .map(|(index, obj)| { obj.bbox() .expanded(2) .map_err(|_| WorldError::ShapeTooCloseToEdge { index }) }) .collect::<Result<Vec<_>, _>>()?; for (i, bbox_1) in expanded_boxes.iter().enumerate() { // Check for intersection with world if!bbox_1.inside(&bbox_world) { return Err(WorldError::ShapeTooCloseToEdge { index: i }); } // Check for intersection with other objects for (j, bbox_2) in expanded_boxes.iter().enumerate() { if i < j && bbox_1.intersects(&bbox_2) { return Err(WorldError::ShapesIntersect { index_1: i, index_2: j, }); } } } Ok(()) } /// Performs a recursive integration between two frequencies. If the difference between the /// midpoint force and the linear interpolation is too large, both sides of the domain will use /// this function recursively to integrate the force. pub fn integrate_force_between_frequencies( &self, i: usize, start_frequency: f32, end_frequency: f32, start_value: Vector3<f32>, end_value: Vector3<f32>, max: f32, ) -> Vector3<f32> { // Do a recursive integration. The function should be smooth. let middle_frequency = 0.5 * (start_frequency + end_frequency); let middle_value = self.force_on_for_freq(i, middle_frequency); let average = (start_value + end_value) / 2.0; if (average - middle_value).norm() * (end_frequency - start_frequency) < self.simulation_config.frequency_threshold * max { // The change in area from the middle value vs extrapolation is less than the threshold 0.5 * (start_value + 2.0 * middle_value + end_value) * (end_frequency - start_frequency) } else { self.integrate_force_between_frequencies( i, start_frequency, middle_frequency, start_value, middle_value, max, ) + self.integrate_force_between_frequencies( i, middle_frequency, end_frequency, middle_value, end_value, max, ) } } /// Compute the force on object `i` for a certain `frequency`. This method will also subtract /// the error forces due to quantization by subtracting the force due to single objects. fn
(&self, i: usize, frequency: f32) -> Vector3<f32> { // Progress bar let bbox = &self.objects[i].bbox(); let dx = bbox.x1 - bbox.x0 + 4; let dy = bbox.y1 - bbox.y0 + 4; let dz = bbox.z1 - bbox.z0 + 4; let count = 2 * (dx * dy + dy * dz + dz * dx) * (1 + self.objects.len()); let progress_bar = if self.use_progress_bar { let progress_bar = Arc::new(Mutex::new(ProgressBar::new(count as u64))); progress_bar.lock().unwrap().format("[=>~]"); progress_bar.lock().unwrap().tick(); Some(progress_bar) } else { None }; let perm_all_geom = &self.permittivity_field_all_geometry(frequency); let mut total_force = self.force_on_for_freq_and_geometry(frequency, perm_all_geom, bbox, &progress_bar); // Discretization gives rise to forces of an object on itself. Removing these gives more // accurate results. for other in &self.objects { let perm = &self.permittivity_field(frequency, &[*other]); total_force -= self.force_on_for_freq_and_geometry(frequency, perm, bbox, &progress_bar); } if let Some(progress_bar) = progress_bar { progress_bar.lock().unwrap().finish_println(""); } println!( "Force for frequency {}: ({}, {}, {})", frequency, total_force.x, total_force.y, total_force.z ); total_force } /// Compute the force on the geometry inside `BoundingBox`, for the given permittivity field /// `perm` and `BoundingBox` `bbox`. fn force_on_for_freq_and_geometry( &self, frequency: f32, perm: &ScalarField, bbox: &BoundingBox, progress_bar: &Option<Arc<Mutex<ProgressBar<Stdout>>>>, ) -> Vector3<f32> { (0..6) .into_par_iter() .map(|face| { (match face { 0 => CosineBasis::new( Point3::new(bbox.x0 - 2, bbox.y0 - 2, bbox.z0 - 2), Point3::new(bbox.x1 + 2, bbox.y1 + 2, bbox.z0 - 2), frequency, perm, &self.simulation_config, Direction::NegZ, ), 1 => CosineBasis::new( Point3::new(bbox.x0 - 2, bbox.y0 - 2, bbox.z1 + 2), Point3::new(bbox.x1 + 2, bbox.y1 + 2, bbox.z1 + 2), frequency, perm, &self.simulation_config, Direction::Z, ), 2 => CosineBasis::new( Point3::new(bbox.x0 - 2, bbox.y0 - 2, bbox.z0 - 2), Point3::new(bbox.x1 + 2, bbox.y0 - 2, bbox.z1 + 2), frequency, perm, &self.simulation_config, Direction::NegY, ), 3 => CosineBasis::new( Point3::new(bbox.x0 - 2, bbox.y1 + 2, bbox.z0 - 2), Point3::new(bbox.x1 + 2, bbox.y1 + 2, bbox.z1 + 2), frequency, perm, &self.simulation_config, Direction::Y, ), 4 => CosineBasis::new( Point3::new(bbox.x0 - 2, bbox.y0 - 2, bbox.z0 - 2), Point3::new(bbox.x0 - 2, bbox.y1 + 2, bbox.z1 + 2), frequency, perm, &self.simulation_config, Direction::NegX, ), 5 => CosineBasis::new( Point3::new(bbox.x1 + 2, bbox.y0 - 2, bbox.z0 - 2), Point3::new(bbox.x1 + 2, bbox.y1 + 2, bbox.z1 + 2), frequency, perm, &self.simulation_config, Direction::X, ), i => panic!("Face index out of bounds: {}", i), }) .with_progress_bar(progress_bar.clone()) .force() }) .sum() } /// Returns a scalar field representing the permittivity of a vector of bounding boxes. fn permittivity_field(&self, freq: f32, objects: &[Shape]) -> ScalarField { let mut permittivity_field = ScalarField::ones(self.size); for shape in objects { shape.draw_permittivity(&mut permittivity_field, freq); } permittivity_field } /// Returns a scalar field representing the permittivity of the entire geometry. fn permittivity_field_all_geometry(&self, freq: f32) -> ScalarField { self.permittivity_field(freq, &self.objects) } }
force_on_for_freq
identifier_name
mod.rs
mod boundingbox; mod material; mod shape; use crate::config::SimulationConfig; use crate::fields::ScalarField; use crate::greenfunctions::cosinebasis::{CosineBasis, Direction}; use crate::world::boundingbox::BoundingBox; use crate::world::shape::Shape; use nalgebra::*; use pbr::ProgressBar; use rayon::iter::*; use serde::Deserialize; use snafu::Snafu; use std::io::Stdout; use std::sync::{Arc, Mutex}; /// A struct representing the world. #[derive(PartialEq, Clone, Debug, Deserialize)] pub struct World { /// The grid size corresponding to this world. size: Vector3<usize>, /// The list with the different shapes. objects: Vec<Shape>, /// The simulation configuration. simulation_config: SimulationConfig, #[serde(skip)] use_progress_bar: bool, } /// A struct that iterates over the different forces of the objects. pub struct ForceIterator<'a> { i: usize, world: &'a World, } impl<'a> Iterator for ForceIterator<'a> { type Item = Vector3<f32>; fn next(&mut self) -> Option<Vector3<f32>> { if self.i < self.world.objects.len() { let force = self.world.force_on(self.i); self.i += 1; Some(force) } else { None } } } /// These errors can be thrown when validating a world. #[derive(Debug, Snafu)] pub enum WorldError { /// An error indicating that one of the shapes is too close too the edge, causing the bounding /// box to cross the boundary. #[snafu(display("shape {} too close to edge", index))] ShapeTooCloseToEdge { /// The index of the violating object. index: usize, }, /// An error indicating that the bounding boxes of two objects in this world intersect and /// and therefore this world is invalid. #[snafu(display("bounding boxes of shapes {} and {} intersect", index_1, index_2))] ShapesIntersect { /// The index of the first object intersecting with the second. index_1: usize, /// The index of the second object intersecting with the first. index_2: usize, }, } impl World { /// Enable or disable the progress bar for the simulation. pub fn set_progress_bar(&mut self, enable: bool) { self.use_progress_bar = enable; } /// Obtain a force iterator for all objects in this world. pub fn forces(&self) -> ForceIterator<'_> { ForceIterator { i: 0, world: &self } } /// Compute the force on the `i`'th object. pub fn force_on(&self, i: usize) -> Vector3<f32> { println!("Geometry:"); println!( "\tWorld size: ({}, {}, {})", self.size.x, self.size.y, self.size.z ); for (i, bbox) in self.objects.iter().enumerate() { println!("\t{}: {}", i, bbox); } println!("{}", self.simulation_config); // The maximum frequency is given by (speed of light) / (grid element size) let start_freq = self.simulation_config.frequency_range[0]; let end_freq = self.simulation_config.frequency_range[1]; let start_force = self.force_on_for_freq(i, start_freq); let end_force = self.force_on_for_freq(i, end_freq); self.integrate_force_between_frequencies( i, start_freq, end_freq, start_force, end_force, start_force.norm(), ) } /// This function validates the geometry of the world. The function should be called, because it /// guarantees overflows later on in the simulation. /// /// # Errors /// - If any of the shapes is too close to the world border, the simulation can't be run and a /// `WorldError::ShapeTooCloseToEdge` will be returned. To fix this, move the object or increase /// the grid size. /// /// - If any of the objects are too close too eachother, their boundingboxes might intersect and /// the results will be invalid. If this is the case, a `WorldError::ShapesIntersect` will be /// returned. The violating shape indexes will be contained within. To fix this, move one or /// both of the objects. pub fn validate(&self) -> Result<(), WorldError> { let bbox_world = BoundingBox::new(0, 0, 0, self.size.x, self.size.y, self.size.z); let expanded_boxes = self .objects .iter() .enumerate() .map(|(index, obj)| { obj.bbox() .expanded(2) .map_err(|_| WorldError::ShapeTooCloseToEdge { index }) }) .collect::<Result<Vec<_>, _>>()?; for (i, bbox_1) in expanded_boxes.iter().enumerate() { // Check for intersection with world if!bbox_1.inside(&bbox_world) { return Err(WorldError::ShapeTooCloseToEdge { index: i }); }
// Check for intersection with other objects for (j, bbox_2) in expanded_boxes.iter().enumerate() { if i < j && bbox_1.intersects(&bbox_2) { return Err(WorldError::ShapesIntersect { index_1: i, index_2: j, }); } } } Ok(()) } /// Performs a recursive integration between two frequencies. If the difference between the /// midpoint force and the linear interpolation is too large, both sides of the domain will use /// this function recursively to integrate the force. pub fn integrate_force_between_frequencies( &self, i: usize, start_frequency: f32, end_frequency: f32, start_value: Vector3<f32>, end_value: Vector3<f32>, max: f32, ) -> Vector3<f32> { // Do a recursive integration. The function should be smooth. let middle_frequency = 0.5 * (start_frequency + end_frequency); let middle_value = self.force_on_for_freq(i, middle_frequency); let average = (start_value + end_value) / 2.0; if (average - middle_value).norm() * (end_frequency - start_frequency) < self.simulation_config.frequency_threshold * max { // The change in area from the middle value vs extrapolation is less than the threshold 0.5 * (start_value + 2.0 * middle_value + end_value) * (end_frequency - start_frequency) } else { self.integrate_force_between_frequencies( i, start_frequency, middle_frequency, start_value, middle_value, max, ) + self.integrate_force_between_frequencies( i, middle_frequency, end_frequency, middle_value, end_value, max, ) } } /// Compute the force on object `i` for a certain `frequency`. This method will also subtract /// the error forces due to quantization by subtracting the force due to single objects. fn force_on_for_freq(&self, i: usize, frequency: f32) -> Vector3<f32> { // Progress bar let bbox = &self.objects[i].bbox(); let dx = bbox.x1 - bbox.x0 + 4; let dy = bbox.y1 - bbox.y0 + 4; let dz = bbox.z1 - bbox.z0 + 4; let count = 2 * (dx * dy + dy * dz + dz * dx) * (1 + self.objects.len()); let progress_bar = if self.use_progress_bar { let progress_bar = Arc::new(Mutex::new(ProgressBar::new(count as u64))); progress_bar.lock().unwrap().format("[=>~]"); progress_bar.lock().unwrap().tick(); Some(progress_bar) } else { None }; let perm_all_geom = &self.permittivity_field_all_geometry(frequency); let mut total_force = self.force_on_for_freq_and_geometry(frequency, perm_all_geom, bbox, &progress_bar); // Discretization gives rise to forces of an object on itself. Removing these gives more // accurate results. for other in &self.objects { let perm = &self.permittivity_field(frequency, &[*other]); total_force -= self.force_on_for_freq_and_geometry(frequency, perm, bbox, &progress_bar); } if let Some(progress_bar) = progress_bar { progress_bar.lock().unwrap().finish_println(""); } println!( "Force for frequency {}: ({}, {}, {})", frequency, total_force.x, total_force.y, total_force.z ); total_force } /// Compute the force on the geometry inside `BoundingBox`, for the given permittivity field /// `perm` and `BoundingBox` `bbox`. fn force_on_for_freq_and_geometry( &self, frequency: f32, perm: &ScalarField, bbox: &BoundingBox, progress_bar: &Option<Arc<Mutex<ProgressBar<Stdout>>>>, ) -> Vector3<f32> { (0..6) .into_par_iter() .map(|face| { (match face { 0 => CosineBasis::new( Point3::new(bbox.x0 - 2, bbox.y0 - 2, bbox.z0 - 2), Point3::new(bbox.x1 + 2, bbox.y1 + 2, bbox.z0 - 2), frequency, perm, &self.simulation_config, Direction::NegZ, ), 1 => CosineBasis::new( Point3::new(bbox.x0 - 2, bbox.y0 - 2, bbox.z1 + 2), Point3::new(bbox.x1 + 2, bbox.y1 + 2, bbox.z1 + 2), frequency, perm, &self.simulation_config, Direction::Z, ), 2 => CosineBasis::new( Point3::new(bbox.x0 - 2, bbox.y0 - 2, bbox.z0 - 2), Point3::new(bbox.x1 + 2, bbox.y0 - 2, bbox.z1 + 2), frequency, perm, &self.simulation_config, Direction::NegY, ), 3 => CosineBasis::new( Point3::new(bbox.x0 - 2, bbox.y1 + 2, bbox.z0 - 2), Point3::new(bbox.x1 + 2, bbox.y1 + 2, bbox.z1 + 2), frequency, perm, &self.simulation_config, Direction::Y, ), 4 => CosineBasis::new( Point3::new(bbox.x0 - 2, bbox.y0 - 2, bbox.z0 - 2), Point3::new(bbox.x0 - 2, bbox.y1 + 2, bbox.z1 + 2), frequency, perm, &self.simulation_config, Direction::NegX, ), 5 => CosineBasis::new( Point3::new(bbox.x1 + 2, bbox.y0 - 2, bbox.z0 - 2), Point3::new(bbox.x1 + 2, bbox.y1 + 2, bbox.z1 + 2), frequency, perm, &self.simulation_config, Direction::X, ), i => panic!("Face index out of bounds: {}", i), }) .with_progress_bar(progress_bar.clone()) .force() }) .sum() } /// Returns a scalar field representing the permittivity of a vector of bounding boxes. fn permittivity_field(&self, freq: f32, objects: &[Shape]) -> ScalarField { let mut permittivity_field = ScalarField::ones(self.size); for shape in objects { shape.draw_permittivity(&mut permittivity_field, freq); } permittivity_field } /// Returns a scalar field representing the permittivity of the entire geometry. fn permittivity_field_all_geometry(&self, freq: f32) -> ScalarField { self.permittivity_field(freq, &self.objects) } }
random_line_split
algorithm.rs
#![allow(unknown_lints)] #![warn(clippy::all)] extern crate ndarray; extern crate petgraph; extern crate rand; extern crate sprs; use crate::protocol_traits::graph::{Graph, GraphObject, Id, Metadata}; use crate::protocol_traits::ledger::LedgerView; use crate::types::walk::{RandomWalk, RandomWalks, SeedSet}; use crate::types::Osrank; use core::iter::Iterator; use fraction::Fraction; use num_traits::Zero; use rand::distributions::WeightedError; use rand::seq::SliceRandom; use rand::{Rng, SeedableRng}; use std::hash::Hash; #[derive(Debug)] pub enum OsrankError {} #[derive(Debug)] pub struct WalkResult<G, I> where I: Eq + Hash, { network_view: G, pub walks: RandomWalks<I>, } fn walks<'a, L, G: 'a, RNG>( starting_nodes: impl Iterator<Item = &'a Id<G::Node>>, network: &G, ledger_view: &L, mut rng: RNG, get_weight: &dyn Fn(&<G::Edge as GraphObject>::Metadata) -> f64, ) -> RandomWalks<Id<G::Node>> where L: LedgerView, G: Graph, Id<G::Node>: Clone + Eq + Hash, RNG: Rng + SeedableRng, { let mut walks = RandomWalks::new(); for i in starting_nodes { for _ in 0..(*ledger_view.get_random_walks_num()) { let mut walk = RandomWalk::new(i.clone()); let mut current_node = i; // TODO distinguish account/project // TODO Should there be a safeguard so this doesn't run forever? while rng.gen::<f64>() < ledger_view.get_damping_factors().project { let neighbors = network.neighbours(&current_node); match neighbors.choose_weighted(&mut rng, |item| { network .lookup_edge_metadata(&item.id) .and_then(|m| Some(get_weight(m))) .unwrap() }) { Ok(next_edge) => { walk.add_next(next_edge.target.clone()); current_node = next_edge.target; } Err(WeightedError::NoItem) => break, Err(error) => panic!("Problem with the neighbors: {:?}", error), } } walks.add_walk(walk); } } walks } // FIXME(adn) It should be possible to make this code parametric over // Dependency<W>, for I have ran into a cryptic error about the SampleBorrow // trait not be implemented, and wasn't able to immediately make the code // typecheck. pub fn random_walk<L, G, RNG>( seed_set: Option<SeedSet<Id<G::Node>>>, network: &G, ledger_view: &L, rng: RNG, get_weight: &dyn Fn(&<G::Edge as GraphObject>::Metadata) -> f64, ) -> Result<WalkResult<G, <G::Node as GraphObject>::Id>, OsrankError> where L: LedgerView, G: Graph + Clone, Id<G::Node>: Clone + Eq + Hash, RNG: Rng + SeedableRng, { match seed_set { Some(seeds) => { let walks = walks(seeds.seedset_iter(), network, ledger_view, rng, get_weight); let mut trusted_node_ids: Vec<&Id<G::Node>> = Vec::new(); for node in network.nodes() { if rank_node::<L, G>(&walks, node.id().clone(), ledger_view) > Osrank::zero() { trusted_node_ids.push(&node.id()); } } Ok(WalkResult { network_view: network.subgraph_by_nodes(trusted_node_ids), walks, }) } None => { let whole_network = (*network).clone(); // FIXME, terrible. let all_node_ids = network.nodes().map(|n| n.id()); let res = WalkResult { network_view: whole_network, walks: walks(all_node_ids, network, ledger_view, rng, get_weight), }; Ok(res) } } } /// Naive version of the algorithm that given a full Network and a precomputed /// set W of random walks, iterates over each edge of the Network and computes /// the osrank. pub fn osrank_naive<L, G, RNG>( seed_set: Option<SeedSet<Id<G::Node>>>, network: &mut G, ledger_view: &L, initial_seed: <RNG as SeedableRng>::Seed, get_weight: Box<dyn Fn(&<G::Edge as GraphObject>::Metadata) -> f64>, from_osrank: Box<dyn Fn(&G::Node, Osrank) -> Metadata<G::Node>>, ) -> Result<(), OsrankError> where L: LedgerView, G: Graph + Clone, Id<G::Node>: Clone + Eq + Hash, RNG: Rng + SeedableRng, <RNG as SeedableRng>::Seed: Clone, { //NOTE(adn) The fact we are creating a new RNG every time we call // `random_walk` is deliberate and something to think about. We probably // want to "restart" the randomness from the initial seed every call to // `random_walk`, which means this function has to consume the RNG. match seed_set { Some(_) => { // Phase1, rank the network and produce a NetworkView. let phase1 = random_walk( seed_set, &*network, ledger_view, RNG::from_seed(initial_seed.clone()), &get_weight, )?; // Phase2, compute the osrank only on the NetworkView let phase2 = random_walk( None, &phase1.network_view, ledger_view, RNG::from_seed(initial_seed.clone()), &get_weight, )?; rank_network(&phase2.walks, &mut *network, ledger_view, &from_osrank) } None => { // Compute osrank on the full NetworkView let create_walks = random_walk( None, &*network, ledger_view, RNG::from_seed(initial_seed.clone()), &get_weight, )?; rank_network( &create_walks.walks, &mut *network, ledger_view, &from_osrank, ) } } } fn rank_node<L, G>( random_walks: &RandomWalks<Id<G::Node>>,
ledger_view: &L, ) -> Osrank where L: LedgerView, G: Graph, <G::Node as GraphObject>::Id: Eq + Clone + Hash, { let total_walks = random_walks.len(); let node_visits = random_walks.count_visits(&node_id); Fraction::from(1.0 - ledger_view.get_damping_factors().project) * Osrank::new(node_visits as u32, total_walks as u32) } pub fn rank_network<'a, L, G: 'a>( random_walks: &RandomWalks<Id<G::Node>>, network_view: &'a mut G, ledger_view: &L, from_osrank: &dyn Fn(&G::Node, Osrank) -> Metadata<G::Node>, ) -> Result<(), OsrankError> where L: LedgerView, G: Graph, <G::Node as GraphObject>::Id: Eq + Clone + Hash, { for node in network_view.nodes_mut() { let rank = rank_node::<L, G>(&random_walks, node.id().clone(), ledger_view); node.set_metadata(from_osrank(&node, rank)) } Ok(()) } #[cfg(test)] mod tests { extern crate rand; extern crate rand_xorshift; use super::*; use crate::protocol_traits::ledger::MockLedger; use crate::types::network::{Artifact, ArtifactType, DependencyType, Network}; use crate::types::Weight; use num_traits::Zero; use rand_xorshift::XorShiftRng; type MockNetwork = Network<f64>; #[test] fn everything_ok() { // build the example network let mut network = Network::default(); for node in &["p1", "p2", "p3"] { network.add_node( node.to_string(), ArtifactType::Project { osrank: Zero::zero(), }, ) } // Create the seed set from all projects let seed_set = SeedSet::from(vec!["p1".to_string(), "p2".to_string(), "p3".to_string()]); for node in &["a1", "a2", "a3", "isle"] { network.add_node( node.to_string(), ArtifactType::Account { osrank: Zero::zero(), }, ) } let edges = [ ("p1", "a1", Weight::new(3, 7)), ("a1", "p1", Weight::new(1, 1)), ("p1", "p2", Weight::new(4, 7)), ("p2", "a2", Weight::new(1, 1)), ("a2", "p2", Weight::new(1, 3)), ("a2", "p3", Weight::new(2, 3)), ("p3", "a2", Weight::new(11, 28)), ("p3", "a3", Weight::new(1, 28)), ("p3", "p1", Weight::new(2, 7)), ("p3", "p2", Weight::new(2, 7)), ("a3", "p3", Weight::new(1, 1)), ]; for edge in &edges { network.add_edge( &edge.0.to_string(), &edge.1.to_string(), 2, DependencyType::Influence(edge.2.as_f64().unwrap()), ) } let mock_ledger = MockLedger::default(); let get_weight = Box::new(|m: &DependencyType<f64>| *m.get_weight()); let set_osrank = Box::new(|node: &Artifact<String>, rank| match node.get_metadata() { ArtifactType::Project { osrank: _ } => ArtifactType::Project { osrank: rank }, ArtifactType::Account { osrank: _ } => ArtifactType::Account { osrank: rank }, }); assert_eq!(network.edge_count(), 11); // This is the insertion point of the Graph API. If we had a GraphAPI // "handle" in scope here, we could extract the seed from some state // and use it in the algorithm. Faking it for now. let initial_seed = [0; 16]; assert_eq!( osrank_naive::<MockLedger, MockNetwork, XorShiftRng>( Some(seed_set), &mut network, &mock_ledger, initial_seed, get_weight, set_osrank ) .unwrap(), () ); assert_eq!( network.nodes().fold(Vec::new(), |mut ranks, node| { // let bla = *node.get_metadata(); ranks.push(format!("{}", *node)); ranks }), vec![ "id: p1 osrank: 0.1425", "id: p2 osrank: 0.2225", "id: p3 osrank: 0.1575", "id: a1 osrank: 0.08", "id: a2 osrank: 0.23", "id: a3 osrank: 0.055", "id: isle osrank: 0" ] ); } }
node_id: Id<G::Node>,
random_line_split
algorithm.rs
#![allow(unknown_lints)] #![warn(clippy::all)] extern crate ndarray; extern crate petgraph; extern crate rand; extern crate sprs; use crate::protocol_traits::graph::{Graph, GraphObject, Id, Metadata}; use crate::protocol_traits::ledger::LedgerView; use crate::types::walk::{RandomWalk, RandomWalks, SeedSet}; use crate::types::Osrank; use core::iter::Iterator; use fraction::Fraction; use num_traits::Zero; use rand::distributions::WeightedError; use rand::seq::SliceRandom; use rand::{Rng, SeedableRng}; use std::hash::Hash; #[derive(Debug)] pub enum OsrankError {} #[derive(Debug)] pub struct
<G, I> where I: Eq + Hash, { network_view: G, pub walks: RandomWalks<I>, } fn walks<'a, L, G: 'a, RNG>( starting_nodes: impl Iterator<Item = &'a Id<G::Node>>, network: &G, ledger_view: &L, mut rng: RNG, get_weight: &dyn Fn(&<G::Edge as GraphObject>::Metadata) -> f64, ) -> RandomWalks<Id<G::Node>> where L: LedgerView, G: Graph, Id<G::Node>: Clone + Eq + Hash, RNG: Rng + SeedableRng, { let mut walks = RandomWalks::new(); for i in starting_nodes { for _ in 0..(*ledger_view.get_random_walks_num()) { let mut walk = RandomWalk::new(i.clone()); let mut current_node = i; // TODO distinguish account/project // TODO Should there be a safeguard so this doesn't run forever? while rng.gen::<f64>() < ledger_view.get_damping_factors().project { let neighbors = network.neighbours(&current_node); match neighbors.choose_weighted(&mut rng, |item| { network .lookup_edge_metadata(&item.id) .and_then(|m| Some(get_weight(m))) .unwrap() }) { Ok(next_edge) => { walk.add_next(next_edge.target.clone()); current_node = next_edge.target; } Err(WeightedError::NoItem) => break, Err(error) => panic!("Problem with the neighbors: {:?}", error), } } walks.add_walk(walk); } } walks } // FIXME(adn) It should be possible to make this code parametric over // Dependency<W>, for I have ran into a cryptic error about the SampleBorrow // trait not be implemented, and wasn't able to immediately make the code // typecheck. pub fn random_walk<L, G, RNG>( seed_set: Option<SeedSet<Id<G::Node>>>, network: &G, ledger_view: &L, rng: RNG, get_weight: &dyn Fn(&<G::Edge as GraphObject>::Metadata) -> f64, ) -> Result<WalkResult<G, <G::Node as GraphObject>::Id>, OsrankError> where L: LedgerView, G: Graph + Clone, Id<G::Node>: Clone + Eq + Hash, RNG: Rng + SeedableRng, { match seed_set { Some(seeds) => { let walks = walks(seeds.seedset_iter(), network, ledger_view, rng, get_weight); let mut trusted_node_ids: Vec<&Id<G::Node>> = Vec::new(); for node in network.nodes() { if rank_node::<L, G>(&walks, node.id().clone(), ledger_view) > Osrank::zero() { trusted_node_ids.push(&node.id()); } } Ok(WalkResult { network_view: network.subgraph_by_nodes(trusted_node_ids), walks, }) } None => { let whole_network = (*network).clone(); // FIXME, terrible. let all_node_ids = network.nodes().map(|n| n.id()); let res = WalkResult { network_view: whole_network, walks: walks(all_node_ids, network, ledger_view, rng, get_weight), }; Ok(res) } } } /// Naive version of the algorithm that given a full Network and a precomputed /// set W of random walks, iterates over each edge of the Network and computes /// the osrank. pub fn osrank_naive<L, G, RNG>( seed_set: Option<SeedSet<Id<G::Node>>>, network: &mut G, ledger_view: &L, initial_seed: <RNG as SeedableRng>::Seed, get_weight: Box<dyn Fn(&<G::Edge as GraphObject>::Metadata) -> f64>, from_osrank: Box<dyn Fn(&G::Node, Osrank) -> Metadata<G::Node>>, ) -> Result<(), OsrankError> where L: LedgerView, G: Graph + Clone, Id<G::Node>: Clone + Eq + Hash, RNG: Rng + SeedableRng, <RNG as SeedableRng>::Seed: Clone, { //NOTE(adn) The fact we are creating a new RNG every time we call // `random_walk` is deliberate and something to think about. We probably // want to "restart" the randomness from the initial seed every call to // `random_walk`, which means this function has to consume the RNG. match seed_set { Some(_) => { // Phase1, rank the network and produce a NetworkView. let phase1 = random_walk( seed_set, &*network, ledger_view, RNG::from_seed(initial_seed.clone()), &get_weight, )?; // Phase2, compute the osrank only on the NetworkView let phase2 = random_walk( None, &phase1.network_view, ledger_view, RNG::from_seed(initial_seed.clone()), &get_weight, )?; rank_network(&phase2.walks, &mut *network, ledger_view, &from_osrank) } None => { // Compute osrank on the full NetworkView let create_walks = random_walk( None, &*network, ledger_view, RNG::from_seed(initial_seed.clone()), &get_weight, )?; rank_network( &create_walks.walks, &mut *network, ledger_view, &from_osrank, ) } } } fn rank_node<L, G>( random_walks: &RandomWalks<Id<G::Node>>, node_id: Id<G::Node>, ledger_view: &L, ) -> Osrank where L: LedgerView, G: Graph, <G::Node as GraphObject>::Id: Eq + Clone + Hash, { let total_walks = random_walks.len(); let node_visits = random_walks.count_visits(&node_id); Fraction::from(1.0 - ledger_view.get_damping_factors().project) * Osrank::new(node_visits as u32, total_walks as u32) } pub fn rank_network<'a, L, G: 'a>( random_walks: &RandomWalks<Id<G::Node>>, network_view: &'a mut G, ledger_view: &L, from_osrank: &dyn Fn(&G::Node, Osrank) -> Metadata<G::Node>, ) -> Result<(), OsrankError> where L: LedgerView, G: Graph, <G::Node as GraphObject>::Id: Eq + Clone + Hash, { for node in network_view.nodes_mut() { let rank = rank_node::<L, G>(&random_walks, node.id().clone(), ledger_view); node.set_metadata(from_osrank(&node, rank)) } Ok(()) } #[cfg(test)] mod tests { extern crate rand; extern crate rand_xorshift; use super::*; use crate::protocol_traits::ledger::MockLedger; use crate::types::network::{Artifact, ArtifactType, DependencyType, Network}; use crate::types::Weight; use num_traits::Zero; use rand_xorshift::XorShiftRng; type MockNetwork = Network<f64>; #[test] fn everything_ok() { // build the example network let mut network = Network::default(); for node in &["p1", "p2", "p3"] { network.add_node( node.to_string(), ArtifactType::Project { osrank: Zero::zero(), }, ) } // Create the seed set from all projects let seed_set = SeedSet::from(vec!["p1".to_string(), "p2".to_string(), "p3".to_string()]); for node in &["a1", "a2", "a3", "isle"] { network.add_node( node.to_string(), ArtifactType::Account { osrank: Zero::zero(), }, ) } let edges = [ ("p1", "a1", Weight::new(3, 7)), ("a1", "p1", Weight::new(1, 1)), ("p1", "p2", Weight::new(4, 7)), ("p2", "a2", Weight::new(1, 1)), ("a2", "p2", Weight::new(1, 3)), ("a2", "p3", Weight::new(2, 3)), ("p3", "a2", Weight::new(11, 28)), ("p3", "a3", Weight::new(1, 28)), ("p3", "p1", Weight::new(2, 7)), ("p3", "p2", Weight::new(2, 7)), ("a3", "p3", Weight::new(1, 1)), ]; for edge in &edges { network.add_edge( &edge.0.to_string(), &edge.1.to_string(), 2, DependencyType::Influence(edge.2.as_f64().unwrap()), ) } let mock_ledger = MockLedger::default(); let get_weight = Box::new(|m: &DependencyType<f64>| *m.get_weight()); let set_osrank = Box::new(|node: &Artifact<String>, rank| match node.get_metadata() { ArtifactType::Project { osrank: _ } => ArtifactType::Project { osrank: rank }, ArtifactType::Account { osrank: _ } => ArtifactType::Account { osrank: rank }, }); assert_eq!(network.edge_count(), 11); // This is the insertion point of the Graph API. If we had a GraphAPI // "handle" in scope here, we could extract the seed from some state // and use it in the algorithm. Faking it for now. let initial_seed = [0; 16]; assert_eq!( osrank_naive::<MockLedger, MockNetwork, XorShiftRng>( Some(seed_set), &mut network, &mock_ledger, initial_seed, get_weight, set_osrank ) .unwrap(), () ); assert_eq!( network.nodes().fold(Vec::new(), |mut ranks, node| { // let bla = *node.get_metadata(); ranks.push(format!("{}", *node)); ranks }), vec![ "id: p1 osrank: 0.1425", "id: p2 osrank: 0.2225", "id: p3 osrank: 0.1575", "id: a1 osrank: 0.08", "id: a2 osrank: 0.23", "id: a3 osrank: 0.055", "id: isle osrank: 0" ] ); } }
WalkResult
identifier_name
channel.rs
use std::any::Any; use std::collections::VecDeque; use std::fmt; use std::sync::{Arc, Mutex}; use futures::sync::oneshot; use futures::Future; use base::types::{ArcType, Type}; use api::generic::A; use api::{ primitive, AsyncPushable, Function, FunctionRef, FutureResult, Generic, Getable, OpaqueValue, OwnedFunction, Pushable, RuntimeResult, VmType, WithVM, IO, }; use gc::{Gc, GcPtr, Traverseable}; use stack::{StackFrame, State}; use thread::{OwnedContext, ThreadInternal}; use types::VmInt; use value::{Callable, GcStr, Userdata, ValueRepr}; use vm::{RootedThread, Status, Thread}; use {Error, ExternModule, Result as VmResult}; pub struct Sender<T> { // No need to traverse this thread reference as any thread having a reference to this `Sender` // would also directly own a reference to the `Thread` thread: GcPtr<Thread>, queue: Arc<Mutex<VecDeque<T>>>, } impl<T> Userdata for Sender<T> where T: Any + Send + Sync + fmt::Debug + Traverseable, { } impl<T> fmt::Debug for Sender<T> where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", *self.queue.lock().unwrap()) } } impl<T> Traverseable for Sender<T> { fn traverse(&self, _gc: &mut Gc) { // No need to traverse in Sender as values can only be accessed through Receiver } } impl<T> Sender<T> { fn send(&self, value: T) { self.queue.lock().unwrap().push_back(value); } } impl<T: Traverseable> Traverseable for Receiver<T> { fn traverse(&self, gc: &mut Gc) { self.queue.lock().unwrap().traverse(gc); } } pub struct Receiver<T> { queue: Arc<Mutex<VecDeque<T>>>, } impl<T> Userdata for Receiver<T> where T: Any + Send + Sync + fmt::Debug + Traverseable, { } impl<T> fmt::Debug for Receiver<T> where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", *self.queue.lock().unwrap()) } } impl<T> Receiver<T> { fn try_recv(&self) -> Result<T, ()> { self.queue.lock().unwrap().pop_front().ok_or(()) } } impl<T: VmType> VmType for Sender<T> where T::Type: Sized, { type Type = Sender<T::Type>; fn make_type(vm: &Thread) -> ArcType { let symbol = vm .global_env() .get_env() .find_type_info("Sender") .unwrap() .name .clone(); Type::app(Type::ident(symbol), collect![T::make_type(vm)]) } } impl<T: VmType> VmType for Receiver<T> where T::Type: Sized, { type Type = Receiver<T::Type>; fn make_type(vm: &Thread) -> ArcType { let symbol = vm .global_env() .get_env() .find_type_info("Receiver") .unwrap() .name .clone(); Type::app(Type::ident(symbol), collect![T::make_type(vm)]) } } field_decl!{ sender, receiver } pub type ChannelRecord<S, R> = record_type!(sender => S, receiver => R); /// FIXME The dummy `a` argument should not be needed to ensure that the channel can only be used /// with a single type fn channel( WithVM { vm,.. }: WithVM<Generic<A>>, ) -> ChannelRecord<Sender<Generic<A>>, Receiver<Generic<A>>> { let sender = Sender { thread: unsafe { GcPtr::from_raw(vm) }, queue: Arc::new(Mutex::new(VecDeque::new())), }; let receiver = Receiver { queue: sender.queue.clone(), }; record_no_decl!(sender => sender, receiver => receiver) } fn recv(receiver: &Receiver<Generic<A>>) -> Result<Generic<A>, ()> { receiver.try_recv().map_err(|_| ()) } fn send(sender: &Sender<Generic<A>>, value: Generic<A>) -> Result<(), ()> { unsafe { let value = sender .thread .deep_clone_value(&sender.thread, value.get_value()) .map_err(|_| ())?; Ok(sender.send(Generic::from(value))) } } extern "C" fn resume(vm: &Thread) -> Status { let mut context = vm.context(); let value = StackFrame::current(&mut context.stack)[0].get_repr(); match value { ValueRepr::Thread(child) => { let lock = StackFrame::current(&mut context.stack).into_lock(); drop(context); let result = child.resume(); context = vm.context(); context.stack.release_lock(lock); match result { Ok(child_context) => { // Prevent dead lock if the following status_push call allocates drop(child_context); let value: Result<(), &str> = Ok(()); value.status_push(vm, &mut context) } Err(Error::Dead) => { let value: Result<(), &str> = Err("Attempted to resume a dead thread"); value.status_push(vm, &mut context) } Err(err) => { let fmt = format!("{}", err); let result = unsafe { ValueRepr::String(GcStr::from_utf8_unchecked( context.alloc_ignore_limit(fmt.as_bytes()), )) }; context.stack.push(result); Status::Error } } } _ => unreachable!(), } } extern "C" fn yield_(_vm: &Thread) -> Status { Status::Yield } fn spawn<'vm>( value: WithVM<'vm, Function<&'vm Thread, fn(())>>, ) -> RuntimeResult<RootedThread, Error> { spawn_(value).into() } fn spawn_<'vm>(value: WithVM<'vm, Function<&'vm Thread, fn(())>>) -> VmResult<RootedThread> { let thread = value.vm.new_thread()?; { let mut context = thread.context(); let callable = match value.value.get_variant().0 { ValueRepr::Closure(c) => State::Closure(c), ValueRepr::Function(c) => State::Extern(c), _ => State::Unknown, }; value.value.push(value.vm, &mut context)?; context.stack.push(ValueRepr::Int(0)); StackFrame::current(&mut context.stack).enter_scope(1, callable); } Ok(thread) } type Action = fn(()) -> OpaqueValue<RootedThread, IO<Generic<A>>>; #[cfg(target_arch = "wasm32")] fn spawn_on<'vm>( _thread: RootedThread, _action: WithVM<'vm, FunctionRef<Action>>, ) -> IO<OpaqueValue<&'vm Thread, IO<Generic<A>>>> { IO::Exception("spawn_on requires the `tokio_core` crate".to_string()) } #[cfg(not(target_arch = "wasm32"))] fn spawn_on<'vm>( thread: RootedThread, action: WithVM<'vm, FunctionRef<Action>>, ) -> IO<OpaqueValue<&'vm Thread, IO<Generic<A>>>> { struct SpawnFuture<F>(Mutex<Option<F>>); impl<F> fmt::Debug for SpawnFuture<F> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Future") } } impl<F> Userdata for SpawnFuture<F> where F: Send +'static, { } impl<F> Traverseable for SpawnFuture<F> { fn traverse(&self, _: &mut Gc) {} } impl<F> VmType for SpawnFuture<F> { type Type = Generic<A>; } fn push_future_wrapper<G>(vm: &Thread, context: &mut OwnedContext, _: &G) where G: Future<Item = OpaqueValue<RootedThread, IO<Generic<A>>>, Error = Error> + Send +'static, { extern "C" fn future_wrapper<F>(vm: &Thread) -> Status where F: Future<Item = OpaqueValue<RootedThread, IO<Generic<A>>>, Error = Error> + Send +'static, { let mut context = vm.context(); let value = StackFrame::current(&mut context.stack)[0].get_repr(); match value { ValueRepr::Userdata(data) => { let data = data.downcast_ref::<SpawnFuture<F>>().unwrap(); let future = data.0.lock().unwrap().take().unwrap(); let lock = StackFrame::current(&mut context.stack).insert_lock(); AsyncPushable::async_status_push( FutureResult::new(future), vm, &mut context, lock, ) } _ => unreachable!(), } } type FutureArg = (); primitive::<fn(FutureArg) -> IO<Generic<A>>>("unknown", future_wrapper::<G>) .push(vm, context) .unwrap(); } use value::PartialApplicationDataDef; let WithVM { vm, value: action } = action; let mut action = OwnedFunction::<Action>::from_value(&thread, action.get_variant()); let future = oneshot::spawn_fn( move || action.call_async(()), &vm.global_env().get_event_loop().expect("event loop"), ); let mut context = vm.context(); push_future_wrapper(vm, &mut context, &future); let callable = match context.stack[context.stack.len() - 1].get_repr() { ValueRepr::Function(ext) => Callable::Extern(ext), _ => unreachable!(), }; SpawnFuture(Mutex::new(Some(future))) .push(vm, &mut context) .unwrap(); let fields = [context.stack.get_values().last().unwrap().clone()]; let def = PartialApplicationDataDef(callable, &fields); let value = ValueRepr::PartialApplication(context.alloc_with(vm, def).unwrap()).into(); context.stack.pop_many(2); // TODO Remove rooting here IO::Value(OpaqueValue::from_value(vm.root_value(value))) } fn new_thread(WithVM { vm,.. }: WithVM<()>) -> IO<RootedThread> { match vm.new_thread() { Ok(thread) => IO::Value(thread), Err(err) => IO::Exception(err.to_string()), } } fn sleep(ms: VmInt) -> IO<()> { use std::time::Duration; ::std::thread::sleep(Duration::from_millis(ms as u64)); IO::Value(()) } fn interrupt(thread: RootedThread) -> IO<()> { thread.interrupt(); IO::Value(()) } mod std { pub use channel; pub mod thread { pub use channel as prim; } } pub fn load_channel<'vm>(vm: &'vm Thread) -> VmResult<ExternModule> { let _ = vm.register_type::<Sender<A>>("Sender", &["a"]); let _ = vm.register_type::<Receiver<A>>("Receiver", &["a"]); ExternModule::new( vm, record!{ type Sender a => Sender<A>, type Receiver a => Sender<A>, channel => primitive!(1 std::channel::channel), recv => primitive!(1 std::channel::recv), send => primitive!(2 std::channel::send), }, ) } pub fn load_thread<'vm>(vm: &'vm Thread) -> VmResult<ExternModule>
{ ExternModule::new( vm, record!{ resume => primitive::<fn(&'vm Thread) -> Result<(), String>>("std.thread.prim.resume", resume), (yield_ "yield") => primitive::<fn(())>("std.thread.prim.yield", yield_), spawn => primitive!(1 std::thread::prim::spawn), spawn_on => primitive!(2 std::thread::prim::spawn_on), new_thread => primitive!(1 std::thread::prim::new_thread), interrupt => primitive!(1 std::thread::prim::interrupt), sleep => primitive!(1 std::thread::prim::sleep) }, ) }
identifier_body
channel.rs
use std::any::Any; use std::collections::VecDeque; use std::fmt; use std::sync::{Arc, Mutex}; use futures::sync::oneshot; use futures::Future; use base::types::{ArcType, Type}; use api::generic::A; use api::{ primitive, AsyncPushable, Function, FunctionRef, FutureResult, Generic, Getable, OpaqueValue, OwnedFunction, Pushable, RuntimeResult, VmType, WithVM, IO, }; use gc::{Gc, GcPtr, Traverseable}; use stack::{StackFrame, State}; use thread::{OwnedContext, ThreadInternal}; use types::VmInt; use value::{Callable, GcStr, Userdata, ValueRepr}; use vm::{RootedThread, Status, Thread}; use {Error, ExternModule, Result as VmResult}; pub struct Sender<T> { // No need to traverse this thread reference as any thread having a reference to this `Sender` // would also directly own a reference to the `Thread` thread: GcPtr<Thread>, queue: Arc<Mutex<VecDeque<T>>>, } impl<T> Userdata for Sender<T> where T: Any + Send + Sync + fmt::Debug + Traverseable, { } impl<T> fmt::Debug for Sender<T> where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", *self.queue.lock().unwrap()) } } impl<T> Traverseable for Sender<T> { fn traverse(&self, _gc: &mut Gc) { // No need to traverse in Sender as values can only be accessed through Receiver } } impl<T> Sender<T> { fn send(&self, value: T) { self.queue.lock().unwrap().push_back(value); } } impl<T: Traverseable> Traverseable for Receiver<T> { fn traverse(&self, gc: &mut Gc) { self.queue.lock().unwrap().traverse(gc); } } pub struct Receiver<T> { queue: Arc<Mutex<VecDeque<T>>>, } impl<T> Userdata for Receiver<T> where T: Any + Send + Sync + fmt::Debug + Traverseable, { } impl<T> fmt::Debug for Receiver<T> where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", *self.queue.lock().unwrap()) } } impl<T> Receiver<T> { fn try_recv(&self) -> Result<T, ()> { self.queue.lock().unwrap().pop_front().ok_or(()) } } impl<T: VmType> VmType for Sender<T> where T::Type: Sized, { type Type = Sender<T::Type>; fn make_type(vm: &Thread) -> ArcType { let symbol = vm .global_env() .get_env() .find_type_info("Sender") .unwrap() .name .clone(); Type::app(Type::ident(symbol), collect![T::make_type(vm)]) } } impl<T: VmType> VmType for Receiver<T> where T::Type: Sized, { type Type = Receiver<T::Type>; fn make_type(vm: &Thread) -> ArcType { let symbol = vm .global_env() .get_env() .find_type_info("Receiver") .unwrap() .name .clone(); Type::app(Type::ident(symbol), collect![T::make_type(vm)]) } } field_decl!{ sender, receiver } pub type ChannelRecord<S, R> = record_type!(sender => S, receiver => R); /// FIXME The dummy `a` argument should not be needed to ensure that the channel can only be used /// with a single type fn channel( WithVM { vm,.. }: WithVM<Generic<A>>, ) -> ChannelRecord<Sender<Generic<A>>, Receiver<Generic<A>>> { let sender = Sender { thread: unsafe { GcPtr::from_raw(vm) }, queue: Arc::new(Mutex::new(VecDeque::new())), }; let receiver = Receiver { queue: sender.queue.clone(), }; record_no_decl!(sender => sender, receiver => receiver) } fn recv(receiver: &Receiver<Generic<A>>) -> Result<Generic<A>, ()> { receiver.try_recv().map_err(|_| ()) } fn send(sender: &Sender<Generic<A>>, value: Generic<A>) -> Result<(), ()> { unsafe { let value = sender .thread .deep_clone_value(&sender.thread, value.get_value()) .map_err(|_| ())?; Ok(sender.send(Generic::from(value))) } } extern "C" fn resume(vm: &Thread) -> Status { let mut context = vm.context(); let value = StackFrame::current(&mut context.stack)[0].get_repr(); match value { ValueRepr::Thread(child) => { let lock = StackFrame::current(&mut context.stack).into_lock(); drop(context); let result = child.resume(); context = vm.context(); context.stack.release_lock(lock); match result { Ok(child_context) => { // Prevent dead lock if the following status_push call allocates drop(child_context); let value: Result<(), &str> = Ok(()); value.status_push(vm, &mut context) } Err(Error::Dead) => { let value: Result<(), &str> = Err("Attempted to resume a dead thread"); value.status_push(vm, &mut context) } Err(err) => { let fmt = format!("{}", err); let result = unsafe { ValueRepr::String(GcStr::from_utf8_unchecked( context.alloc_ignore_limit(fmt.as_bytes()), )) }; context.stack.push(result); Status::Error } } } _ => unreachable!(), } } extern "C" fn yield_(_vm: &Thread) -> Status { Status::Yield } fn spawn<'vm>( value: WithVM<'vm, Function<&'vm Thread, fn(())>>, ) -> RuntimeResult<RootedThread, Error> { spawn_(value).into() } fn spawn_<'vm>(value: WithVM<'vm, Function<&'vm Thread, fn(())>>) -> VmResult<RootedThread> { let thread = value.vm.new_thread()?; { let mut context = thread.context(); let callable = match value.value.get_variant().0 { ValueRepr::Closure(c) => State::Closure(c), ValueRepr::Function(c) => State::Extern(c), _ => State::Unknown, }; value.value.push(value.vm, &mut context)?; context.stack.push(ValueRepr::Int(0)); StackFrame::current(&mut context.stack).enter_scope(1, callable); } Ok(thread) } type Action = fn(()) -> OpaqueValue<RootedThread, IO<Generic<A>>>; #[cfg(target_arch = "wasm32")] fn spawn_on<'vm>( _thread: RootedThread, _action: WithVM<'vm, FunctionRef<Action>>, ) -> IO<OpaqueValue<&'vm Thread, IO<Generic<A>>>> { IO::Exception("spawn_on requires the `tokio_core` crate".to_string()) }
thread: RootedThread, action: WithVM<'vm, FunctionRef<Action>>, ) -> IO<OpaqueValue<&'vm Thread, IO<Generic<A>>>> { struct SpawnFuture<F>(Mutex<Option<F>>); impl<F> fmt::Debug for SpawnFuture<F> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Future") } } impl<F> Userdata for SpawnFuture<F> where F: Send +'static, { } impl<F> Traverseable for SpawnFuture<F> { fn traverse(&self, _: &mut Gc) {} } impl<F> VmType for SpawnFuture<F> { type Type = Generic<A>; } fn push_future_wrapper<G>(vm: &Thread, context: &mut OwnedContext, _: &G) where G: Future<Item = OpaqueValue<RootedThread, IO<Generic<A>>>, Error = Error> + Send +'static, { extern "C" fn future_wrapper<F>(vm: &Thread) -> Status where F: Future<Item = OpaqueValue<RootedThread, IO<Generic<A>>>, Error = Error> + Send +'static, { let mut context = vm.context(); let value = StackFrame::current(&mut context.stack)[0].get_repr(); match value { ValueRepr::Userdata(data) => { let data = data.downcast_ref::<SpawnFuture<F>>().unwrap(); let future = data.0.lock().unwrap().take().unwrap(); let lock = StackFrame::current(&mut context.stack).insert_lock(); AsyncPushable::async_status_push( FutureResult::new(future), vm, &mut context, lock, ) } _ => unreachable!(), } } type FutureArg = (); primitive::<fn(FutureArg) -> IO<Generic<A>>>("unknown", future_wrapper::<G>) .push(vm, context) .unwrap(); } use value::PartialApplicationDataDef; let WithVM { vm, value: action } = action; let mut action = OwnedFunction::<Action>::from_value(&thread, action.get_variant()); let future = oneshot::spawn_fn( move || action.call_async(()), &vm.global_env().get_event_loop().expect("event loop"), ); let mut context = vm.context(); push_future_wrapper(vm, &mut context, &future); let callable = match context.stack[context.stack.len() - 1].get_repr() { ValueRepr::Function(ext) => Callable::Extern(ext), _ => unreachable!(), }; SpawnFuture(Mutex::new(Some(future))) .push(vm, &mut context) .unwrap(); let fields = [context.stack.get_values().last().unwrap().clone()]; let def = PartialApplicationDataDef(callable, &fields); let value = ValueRepr::PartialApplication(context.alloc_with(vm, def).unwrap()).into(); context.stack.pop_many(2); // TODO Remove rooting here IO::Value(OpaqueValue::from_value(vm.root_value(value))) } fn new_thread(WithVM { vm,.. }: WithVM<()>) -> IO<RootedThread> { match vm.new_thread() { Ok(thread) => IO::Value(thread), Err(err) => IO::Exception(err.to_string()), } } fn sleep(ms: VmInt) -> IO<()> { use std::time::Duration; ::std::thread::sleep(Duration::from_millis(ms as u64)); IO::Value(()) } fn interrupt(thread: RootedThread) -> IO<()> { thread.interrupt(); IO::Value(()) } mod std { pub use channel; pub mod thread { pub use channel as prim; } } pub fn load_channel<'vm>(vm: &'vm Thread) -> VmResult<ExternModule> { let _ = vm.register_type::<Sender<A>>("Sender", &["a"]); let _ = vm.register_type::<Receiver<A>>("Receiver", &["a"]); ExternModule::new( vm, record!{ type Sender a => Sender<A>, type Receiver a => Sender<A>, channel => primitive!(1 std::channel::channel), recv => primitive!(1 std::channel::recv), send => primitive!(2 std::channel::send), }, ) } pub fn load_thread<'vm>(vm: &'vm Thread) -> VmResult<ExternModule> { ExternModule::new( vm, record!{ resume => primitive::<fn(&'vm Thread) -> Result<(), String>>("std.thread.prim.resume", resume), (yield_ "yield") => primitive::<fn(())>("std.thread.prim.yield", yield_), spawn => primitive!(1 std::thread::prim::spawn), spawn_on => primitive!(2 std::thread::prim::spawn_on), new_thread => primitive!(1 std::thread::prim::new_thread), interrupt => primitive!(1 std::thread::prim::interrupt), sleep => primitive!(1 std::thread::prim::sleep) }, ) }
#[cfg(not(target_arch = "wasm32"))] fn spawn_on<'vm>(
random_line_split
channel.rs
use std::any::Any; use std::collections::VecDeque; use std::fmt; use std::sync::{Arc, Mutex}; use futures::sync::oneshot; use futures::Future; use base::types::{ArcType, Type}; use api::generic::A; use api::{ primitive, AsyncPushable, Function, FunctionRef, FutureResult, Generic, Getable, OpaqueValue, OwnedFunction, Pushable, RuntimeResult, VmType, WithVM, IO, }; use gc::{Gc, GcPtr, Traverseable}; use stack::{StackFrame, State}; use thread::{OwnedContext, ThreadInternal}; use types::VmInt; use value::{Callable, GcStr, Userdata, ValueRepr}; use vm::{RootedThread, Status, Thread}; use {Error, ExternModule, Result as VmResult}; pub struct Sender<T> { // No need to traverse this thread reference as any thread having a reference to this `Sender` // would also directly own a reference to the `Thread` thread: GcPtr<Thread>, queue: Arc<Mutex<VecDeque<T>>>, } impl<T> Userdata for Sender<T> where T: Any + Send + Sync + fmt::Debug + Traverseable, { } impl<T> fmt::Debug for Sender<T> where T: fmt::Debug, { fn
(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", *self.queue.lock().unwrap()) } } impl<T> Traverseable for Sender<T> { fn traverse(&self, _gc: &mut Gc) { // No need to traverse in Sender as values can only be accessed through Receiver } } impl<T> Sender<T> { fn send(&self, value: T) { self.queue.lock().unwrap().push_back(value); } } impl<T: Traverseable> Traverseable for Receiver<T> { fn traverse(&self, gc: &mut Gc) { self.queue.lock().unwrap().traverse(gc); } } pub struct Receiver<T> { queue: Arc<Mutex<VecDeque<T>>>, } impl<T> Userdata for Receiver<T> where T: Any + Send + Sync + fmt::Debug + Traverseable, { } impl<T> fmt::Debug for Receiver<T> where T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", *self.queue.lock().unwrap()) } } impl<T> Receiver<T> { fn try_recv(&self) -> Result<T, ()> { self.queue.lock().unwrap().pop_front().ok_or(()) } } impl<T: VmType> VmType for Sender<T> where T::Type: Sized, { type Type = Sender<T::Type>; fn make_type(vm: &Thread) -> ArcType { let symbol = vm .global_env() .get_env() .find_type_info("Sender") .unwrap() .name .clone(); Type::app(Type::ident(symbol), collect![T::make_type(vm)]) } } impl<T: VmType> VmType for Receiver<T> where T::Type: Sized, { type Type = Receiver<T::Type>; fn make_type(vm: &Thread) -> ArcType { let symbol = vm .global_env() .get_env() .find_type_info("Receiver") .unwrap() .name .clone(); Type::app(Type::ident(symbol), collect![T::make_type(vm)]) } } field_decl!{ sender, receiver } pub type ChannelRecord<S, R> = record_type!(sender => S, receiver => R); /// FIXME The dummy `a` argument should not be needed to ensure that the channel can only be used /// with a single type fn channel( WithVM { vm,.. }: WithVM<Generic<A>>, ) -> ChannelRecord<Sender<Generic<A>>, Receiver<Generic<A>>> { let sender = Sender { thread: unsafe { GcPtr::from_raw(vm) }, queue: Arc::new(Mutex::new(VecDeque::new())), }; let receiver = Receiver { queue: sender.queue.clone(), }; record_no_decl!(sender => sender, receiver => receiver) } fn recv(receiver: &Receiver<Generic<A>>) -> Result<Generic<A>, ()> { receiver.try_recv().map_err(|_| ()) } fn send(sender: &Sender<Generic<A>>, value: Generic<A>) -> Result<(), ()> { unsafe { let value = sender .thread .deep_clone_value(&sender.thread, value.get_value()) .map_err(|_| ())?; Ok(sender.send(Generic::from(value))) } } extern "C" fn resume(vm: &Thread) -> Status { let mut context = vm.context(); let value = StackFrame::current(&mut context.stack)[0].get_repr(); match value { ValueRepr::Thread(child) => { let lock = StackFrame::current(&mut context.stack).into_lock(); drop(context); let result = child.resume(); context = vm.context(); context.stack.release_lock(lock); match result { Ok(child_context) => { // Prevent dead lock if the following status_push call allocates drop(child_context); let value: Result<(), &str> = Ok(()); value.status_push(vm, &mut context) } Err(Error::Dead) => { let value: Result<(), &str> = Err("Attempted to resume a dead thread"); value.status_push(vm, &mut context) } Err(err) => { let fmt = format!("{}", err); let result = unsafe { ValueRepr::String(GcStr::from_utf8_unchecked( context.alloc_ignore_limit(fmt.as_bytes()), )) }; context.stack.push(result); Status::Error } } } _ => unreachable!(), } } extern "C" fn yield_(_vm: &Thread) -> Status { Status::Yield } fn spawn<'vm>( value: WithVM<'vm, Function<&'vm Thread, fn(())>>, ) -> RuntimeResult<RootedThread, Error> { spawn_(value).into() } fn spawn_<'vm>(value: WithVM<'vm, Function<&'vm Thread, fn(())>>) -> VmResult<RootedThread> { let thread = value.vm.new_thread()?; { let mut context = thread.context(); let callable = match value.value.get_variant().0 { ValueRepr::Closure(c) => State::Closure(c), ValueRepr::Function(c) => State::Extern(c), _ => State::Unknown, }; value.value.push(value.vm, &mut context)?; context.stack.push(ValueRepr::Int(0)); StackFrame::current(&mut context.stack).enter_scope(1, callable); } Ok(thread) } type Action = fn(()) -> OpaqueValue<RootedThread, IO<Generic<A>>>; #[cfg(target_arch = "wasm32")] fn spawn_on<'vm>( _thread: RootedThread, _action: WithVM<'vm, FunctionRef<Action>>, ) -> IO<OpaqueValue<&'vm Thread, IO<Generic<A>>>> { IO::Exception("spawn_on requires the `tokio_core` crate".to_string()) } #[cfg(not(target_arch = "wasm32"))] fn spawn_on<'vm>( thread: RootedThread, action: WithVM<'vm, FunctionRef<Action>>, ) -> IO<OpaqueValue<&'vm Thread, IO<Generic<A>>>> { struct SpawnFuture<F>(Mutex<Option<F>>); impl<F> fmt::Debug for SpawnFuture<F> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Future") } } impl<F> Userdata for SpawnFuture<F> where F: Send +'static, { } impl<F> Traverseable for SpawnFuture<F> { fn traverse(&self, _: &mut Gc) {} } impl<F> VmType for SpawnFuture<F> { type Type = Generic<A>; } fn push_future_wrapper<G>(vm: &Thread, context: &mut OwnedContext, _: &G) where G: Future<Item = OpaqueValue<RootedThread, IO<Generic<A>>>, Error = Error> + Send +'static, { extern "C" fn future_wrapper<F>(vm: &Thread) -> Status where F: Future<Item = OpaqueValue<RootedThread, IO<Generic<A>>>, Error = Error> + Send +'static, { let mut context = vm.context(); let value = StackFrame::current(&mut context.stack)[0].get_repr(); match value { ValueRepr::Userdata(data) => { let data = data.downcast_ref::<SpawnFuture<F>>().unwrap(); let future = data.0.lock().unwrap().take().unwrap(); let lock = StackFrame::current(&mut context.stack).insert_lock(); AsyncPushable::async_status_push( FutureResult::new(future), vm, &mut context, lock, ) } _ => unreachable!(), } } type FutureArg = (); primitive::<fn(FutureArg) -> IO<Generic<A>>>("unknown", future_wrapper::<G>) .push(vm, context) .unwrap(); } use value::PartialApplicationDataDef; let WithVM { vm, value: action } = action; let mut action = OwnedFunction::<Action>::from_value(&thread, action.get_variant()); let future = oneshot::spawn_fn( move || action.call_async(()), &vm.global_env().get_event_loop().expect("event loop"), ); let mut context = vm.context(); push_future_wrapper(vm, &mut context, &future); let callable = match context.stack[context.stack.len() - 1].get_repr() { ValueRepr::Function(ext) => Callable::Extern(ext), _ => unreachable!(), }; SpawnFuture(Mutex::new(Some(future))) .push(vm, &mut context) .unwrap(); let fields = [context.stack.get_values().last().unwrap().clone()]; let def = PartialApplicationDataDef(callable, &fields); let value = ValueRepr::PartialApplication(context.alloc_with(vm, def).unwrap()).into(); context.stack.pop_many(2); // TODO Remove rooting here IO::Value(OpaqueValue::from_value(vm.root_value(value))) } fn new_thread(WithVM { vm,.. }: WithVM<()>) -> IO<RootedThread> { match vm.new_thread() { Ok(thread) => IO::Value(thread), Err(err) => IO::Exception(err.to_string()), } } fn sleep(ms: VmInt) -> IO<()> { use std::time::Duration; ::std::thread::sleep(Duration::from_millis(ms as u64)); IO::Value(()) } fn interrupt(thread: RootedThread) -> IO<()> { thread.interrupt(); IO::Value(()) } mod std { pub use channel; pub mod thread { pub use channel as prim; } } pub fn load_channel<'vm>(vm: &'vm Thread) -> VmResult<ExternModule> { let _ = vm.register_type::<Sender<A>>("Sender", &["a"]); let _ = vm.register_type::<Receiver<A>>("Receiver", &["a"]); ExternModule::new( vm, record!{ type Sender a => Sender<A>, type Receiver a => Sender<A>, channel => primitive!(1 std::channel::channel), recv => primitive!(1 std::channel::recv), send => primitive!(2 std::channel::send), }, ) } pub fn load_thread<'vm>(vm: &'vm Thread) -> VmResult<ExternModule> { ExternModule::new( vm, record!{ resume => primitive::<fn(&'vm Thread) -> Result<(), String>>("std.thread.prim.resume", resume), (yield_ "yield") => primitive::<fn(())>("std.thread.prim.yield", yield_), spawn => primitive!(1 std::thread::prim::spawn), spawn_on => primitive!(2 std::thread::prim::spawn_on), new_thread => primitive!(1 std::thread::prim::new_thread), interrupt => primitive!(1 std::thread::prim::interrupt), sleep => primitive!(1 std::thread::prim::sleep) }, ) }
fmt
identifier_name
gdbstub.rs
use std::io::{self, Read, Write}; use std::ops::{Add, AddAssign}; use std::str; use std::thread; use std::time::Duration; use mio; use mio::tcp::{TcpListener, TcpStream}; use cpu::BreakReason; use dbgcore::{self, ActiveCpu::Arm9}; use hwcore::Message; use msgs; use utils; #[derive(Debug, Error)] pub enum ErrorKind { Hex(::std::num::ParseIntError), Io(io::Error), /// Client should not expect a response NoResponse, /// Could not find next element to parse Parse, } pub type Result<T> = ::std::result::Result<T, ErrorKind>; fn parse_next<T, I: Iterator<Item=T>>(it: &mut I) -> Result<T> { it.next().ok_or(ErrorKind::Parse.into()) } fn parse_next_hex<'a, I: Iterator<Item=&'a str>>(it: &mut I) -> Result<u32> { Ok(utils::from_hex(parse_next(it)?)?) } fn cmd_step(ctx: &mut GdbCtx) -> Result<String> { ctx.dbg.hw().step(); let break_data = BreakData::new(BreakReason::LimitReached, ctx.dbg); let signal = break_data.to_signal(); *ctx.last_halt = break_data; Ok(signal) } fn cmd_continue(ctx: &mut GdbCtx) -> Result<String> { ctx.dbg.resume(); Err(ErrorKind::NoResponse) } struct BreakData { reason: BreakReason, r15: u32, r13: u32 } impl BreakData { fn new(reason: BreakReason, dbg: &mut dbgcore::DbgContext) -> BreakData { let hw = dbg.hw(); BreakData { reason: reason, r15: hw.pause_addr(), r13: hw.read_reg(13), } } fn to_signal(&self) -> String { let reason_str = match self.reason { BreakReason::Breakpoint => format!(";{}:", "swbreak"), _ => String::new(), }; format!("T05{:02X}:{:08X};{:02X}:{:08X}{};", 15, self.r15.swap_bytes(), 13, self.r13.swap_bytes(), reason_str) } } fn handle_gdb_cmd_q(cmd: &str, _ctx: &mut GdbCtx) -> Result<String> { let mut s = cmd.splitn(2, ':'); let ty = parse_next(&mut s)?; let mut out = String::new(); match ty { "fThreadInfo" => out += "m0000000000000001", "sThreadInfo" => out += "l", "C" => out += "QC0000000000000001", "Attached" => out += "1", "Supported" => { out += "PacketSize=400;BreakpointCommands+;swbreak+;vContSupported+"; } _ => warn!("GDB client tried to run unsupported `q` command {}", ty) } Ok(out) } fn handle_gdb_cmd_v(cmd: &str, ctx: &mut GdbCtx) -> Result<String> { let mut s = cmd.splitn(2, |c| c == ',' || c == ':' || c == ';'); let ty = parse_next(&mut s)?; let mut out = String::new(); match ty { "Cont" => { let params = parse_next(&mut s)?; let threads = params.split(';'); for thread in threads { let mut thread_data = thread.split(':'); let action = parse_next(&mut thread_data)?; let thread_name = thread_data.next(); if let Some(name) = thread_name { match (name, utils::from_hex(name)) { | ("-1", _) | (_, Ok(0)) | (_, Ok(1)) => {} | (s, _) => panic!("Attempted to issue command on invalid thread id {}", s) } } match action { "c" => return cmd_continue(ctx), "s" => return cmd_step(ctx), _ => warn!("GDB client tried to run unsupported `vCont` action {}", action) } } } "Cont?" => { let supported = ["c", "s"]; out += "vCont"; for ty in supported.iter() { out += ";"; out += ty; } } _ => warn!("GDB client tried to run unsupported `v` command {}", ty) } Ok(out) } fn handle_gdb_cmd(cmd: &str, ctx: &mut GdbCtx) -> Result<String> { let ty = parse_next(&mut cmd.chars())?; let params = &cmd[1..]; let mut out = String::new(); ctx.dbg.pause(); match ty { 'g' => { let hw = ctx.dbg.hw(); for reg in 0..15 { out += &format!("{:08X}", hw.read_reg(reg).swap_bytes()); } out += &format!("{:08X}", hw.pause_addr().swap_bytes()); for _ in 0..8 { out += "xxxxxxxxxxxxxxxxxxxxxxxx"; // fX registers (12 bytes each) } out += "xxxxxxxx"; // fps register out += &format!("{:08X}", hw.read_cpsr().swap_bytes()); } 'G' => { let mut hw = ctx.dbg.hw(); let mut regs = params; let next_reg = |regstr: &str| -> Result<u32> { let val = utils::from_hex(&regstr[..8])?; Ok(val.swap_bytes()) }; for reg in 0..15 { hw.write_reg(reg, next_reg(regs)?); regs = &regs[8..]; } // register at 15: PC hw.branch_to(next_reg(regs)?); regs = &regs[8..]; // Skip 8 fX registers regs = &regs[8 * 24..]; // Skip fps register regs = &regs[8..]; // register at 25: CPSR hw.write_cpsr(next_reg(regs)?); out += "OK"; } 'H' => { out += "OK"; } 'm' => { let mut hw = ctx.dbg.hw(); let mut params = params.split(','); let addr = parse_next_hex(&mut params)?; let size = parse_next_hex(&mut params)?; let mut buf = [0u8]; for b in 0..size { if let Err(_) = hw.read_mem(addr+b, &mut buf) { out += "00"; } else { out += &format!("{:02X}", buf[0]); } } } 'M' => { let mut hw = ctx.dbg.hw(); let mut params = params.split(|c| c == ',' || c == ':'); let addr = parse_next_hex(&mut params)?; let size = parse_next_hex(&mut params)?; let data = parse_next(&mut params)?; for b in 0..size { let data_byte_range = 2*(b as usize)..2*((b as usize)+1); let byte = utils::from_hex(&data[data_byte_range])? as u8; hw.write_mem(addr+b, &[byte]); } out += "OK"; } 'p' => { let hw = ctx.dbg.hw(); let reg = utils::from_hex(&params)? as usize; let regval = match reg { 0..= 14 => hw.read_reg(reg), 15 => hw.pause_addr(), 25 => hw.read_cpsr(), n => { warn!("GDB requested bad register value {}", n); 0 } }; out += &format!("{:08X}", regval.swap_bytes()); } 'q' => { return handle_gdb_cmd_q(params, ctx); } 's' => { return cmd_step(ctx); } 'c' => { return cmd_continue(ctx); } 'v' => { return handle_gdb_cmd_v(params, ctx); } 'z' | 'Z' => { let mut hw = ctx.dbg.hw(); let mut params = params.split(','); let brk_ty = parse_next(&mut params)?; let addr = parse_next_hex(&mut params)?; let _kind = parse_next(&mut params)?; assert!(brk_ty == "0"); if ty == 'Z' { hw.set_breakpoint(addr); } else { hw.del_breakpoint(addr); } out += "OK"; } '?' => { out += &ctx.last_halt.to_signal(); } x => { warn!("GDB client tried to run unsupported command {}", x); } } Ok(out) } #[derive(Clone, Copy)] struct Checksum(pub u32); impl Add<u8> for Checksum { type Output = Checksum; fn add(self, b: u8) -> Checksum { Checksum((self.0 + (b as u32)) % 256) } } impl AddAssign<u8> for Checksum { fn add_assign(&mut self, b: u8) { self.0 = (*self + b).0; } } enum PacketType { Command(String), CtrlC, AckOk, AckErr, EndOfPacket, Malformed, } fn load_packet<I: Iterator<Item = u8>>(it: &mut I) -> PacketType { let mut it = it.skip_while(|b| *b!= 0x03 && *b!= b'$' && *b!= b'-' && *b!= b'+'); match it.next() { Some(0x3) => return PacketType::CtrlC, Some(b'$') => {} Some(b'+') => return PacketType::AckOk, Some(b'-') => return PacketType::AckErr, None => return PacketType::EndOfPacket, _ => return PacketType::Malformed } let mut string = String::new(); let mut checksum = Checksum(0); for b in it.by_ref().take_while(|b| *b!= b'#') { string.push(b as char); checksum += b; } if let (Some(top), Some(bot)) = (it.next(), it.next()) { let packet_checksum = str::from_utf8(&[top, bot]).ok() .and_then(|s| utils::from_hex(s).ok()); if Some(checksum.0) == packet_checksum { return PacketType::Command(string) } } return PacketType::Malformed } fn write_gdb_packet(data: &str, stream: &mut TcpStream) -> Result<()> { let checksum = data.bytes().fold(Checksum(0), |checksum, b| checksum + b); trace!("Replying with GDB packet: ${}#{:02X}", data, checksum.0); write!(stream, "${}#{:02X}", data, checksum.0)?; stream.flush()?; Ok(()) } fn handle_gdb_packet(data: &[u8], stream: &mut TcpStream, ctx: &mut GdbCtx) -> Result<()> { trace!("Recieving GDB packet: {}", str::from_utf8(data).unwrap()); let mut it = data.iter().cloned(); loop { match load_packet(&mut it) { PacketType::Command(cmd) => { stream.write(b"+")?; stream.flush()?; match handle_gdb_cmd(&cmd, ctx) { Ok(out) => write_gdb_packet(&out, stream)?, Err(e) => { if let ErrorKind::NoResponse = e {} else { return Err(e) } } } } PacketType::CtrlC => { ctx.dbg.pause(); trace!("Recieved GDB packet with CTRL-C signal!"); } PacketType::AckOk => {}, PacketType::AckErr => error!("GDB client replied with error packet!"), PacketType::EndOfPacket => { return Ok(()) } PacketType::Malformed => { trace!("Recieved malformed data {:?}", data); stream.write(b"-")?; stream.flush()?; return Ok(()) } } } } struct GdbCtx<'a, 'b: 'a> { dbg: &'a mut dbgcore::DbgContext<'b>, last_halt: &'a mut BreakData, } const TOKEN_LISTENER: mio::Token = mio::Token(1024); const TOKEN_CLIENT: mio::Token = mio::Token(1025); pub struct GdbStub { debugger: dbgcore::DbgCore, gdb_thread: Option<thread::JoinHandle<msgs::Client<Message>>> } impl GdbStub { pub fn new(msg_client: msgs::Client<Message>, debugger: dbgcore::DbgCore) -> GdbStub { let mut stub = GdbStub { debugger: debugger, gdb_thread: None }; stub.start(msg_client); stub } pub fn start(&mut self, msg_client: msgs::Client<Message>) { let mut debugger = self.debugger.clone(); self.gdb_thread = Some(thread::Builder::new().name("GDBStub".to_owned()).spawn(move || { use mio::Events; let poll = mio::Poll::new() .expect("Could not create mio polling instance!"); let listener = TcpListener::bind(&"127.0.0.1:4567".parse().unwrap()) .expect("Could not bind TcpListener to port!"); poll.register(&listener, TOKEN_LISTENER, mio::Ready::readable(), mio::PollOpt::edge()) .expect("Could not register TcpListener to mio!");
poll: poll, socket: None, }; let mut events = Events::with_capacity(1024); info!("Starting GDB stub on port 4567..."); let mut last_halt = BreakData::new(BreakReason::Trapped, &mut debugger.ctx(Arm9)); 't: loop { connection.poll.poll(&mut events, Some(Duration::from_millis(100))) .expect("Could not poll for network events!"); let mut ctx = GdbCtx { dbg: &mut debugger.ctx(Arm9), last_halt: &mut last_halt }; for event in &events { handle_event(&event, &mut connection, |buf, stream| { handle_gdb_packet(buf, stream, &mut ctx).unwrap(); }); } for msg in msg_client.try_iter() { match msg { Message::Quit => break 't, Message::Arm9Halted(reason) => { if let Some(ref mut stream) = connection.socket { let break_data = BreakData::new(reason, ctx.dbg); write_gdb_packet(&break_data.to_signal(), stream).unwrap(); } } _ => {} } } } msg_client }).unwrap()) } pub fn wait(&mut self) { if let Some(t) = self.gdb_thread.take() { t.join().unwrap(); } } } struct Connection<'a> { listener: &'a TcpListener, poll: mio::Poll, socket: Option<TcpStream>, } fn handle_event<F>(event: &mio::Event, connection: &mut Connection, mut client_responder: F) where F: FnMut(&[u8], &mut TcpStream) { let mut buf = [0u8; 1024]; match event.token() { TOKEN_LISTENER => { match connection.listener.accept() { Ok((socket, _)) => { info!("GDB stub accepting connection"); connection.poll.register(&socket, TOKEN_CLIENT, mio::Ready::readable(), mio::PollOpt::edge()) .expect("Could not register TCP client to mio!"); connection.socket = Some(socket); } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { return; // Socket is not ready anymore, stop accepting } e => panic!("GDB stub IO error! {:?}", e) } } TOKEN_CLIENT => for _ in 0..128 { match connection.socket.as_mut().unwrap().read(&mut buf) { Ok(0) => { connection.socket = None; break; } Ok(l) => client_responder(&buf[..l], connection.socket.as_mut().unwrap()), Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { continue; // Socket is not ready anymore, stop reading } e => panic!("GDB stub IO error! {:?}", e), // Unexpected error } }, _ => unimplemented!() } }
let mut connection = Connection { listener: &listener,
random_line_split
gdbstub.rs
use std::io::{self, Read, Write}; use std::ops::{Add, AddAssign}; use std::str; use std::thread; use std::time::Duration; use mio; use mio::tcp::{TcpListener, TcpStream}; use cpu::BreakReason; use dbgcore::{self, ActiveCpu::Arm9}; use hwcore::Message; use msgs; use utils; #[derive(Debug, Error)] pub enum ErrorKind { Hex(::std::num::ParseIntError), Io(io::Error), /// Client should not expect a response NoResponse, /// Could not find next element to parse Parse, } pub type Result<T> = ::std::result::Result<T, ErrorKind>; fn parse_next<T, I: Iterator<Item=T>>(it: &mut I) -> Result<T> { it.next().ok_or(ErrorKind::Parse.into()) } fn parse_next_hex<'a, I: Iterator<Item=&'a str>>(it: &mut I) -> Result<u32> { Ok(utils::from_hex(parse_next(it)?)?) } fn cmd_step(ctx: &mut GdbCtx) -> Result<String> { ctx.dbg.hw().step(); let break_data = BreakData::new(BreakReason::LimitReached, ctx.dbg); let signal = break_data.to_signal(); *ctx.last_halt = break_data; Ok(signal) } fn cmd_continue(ctx: &mut GdbCtx) -> Result<String> { ctx.dbg.resume(); Err(ErrorKind::NoResponse) } struct BreakData { reason: BreakReason, r15: u32, r13: u32 } impl BreakData { fn new(reason: BreakReason, dbg: &mut dbgcore::DbgContext) -> BreakData { let hw = dbg.hw(); BreakData { reason: reason, r15: hw.pause_addr(), r13: hw.read_reg(13), } } fn to_signal(&self) -> String { let reason_str = match self.reason { BreakReason::Breakpoint => format!(";{}:", "swbreak"), _ => String::new(), }; format!("T05{:02X}:{:08X};{:02X}:{:08X}{};", 15, self.r15.swap_bytes(), 13, self.r13.swap_bytes(), reason_str) } } fn handle_gdb_cmd_q(cmd: &str, _ctx: &mut GdbCtx) -> Result<String> { let mut s = cmd.splitn(2, ':'); let ty = parse_next(&mut s)?; let mut out = String::new(); match ty { "fThreadInfo" => out += "m0000000000000001", "sThreadInfo" => out += "l", "C" => out += "QC0000000000000001", "Attached" => out += "1", "Supported" => { out += "PacketSize=400;BreakpointCommands+;swbreak+;vContSupported+"; } _ => warn!("GDB client tried to run unsupported `q` command {}", ty) } Ok(out) } fn handle_gdb_cmd_v(cmd: &str, ctx: &mut GdbCtx) -> Result<String> { let mut s = cmd.splitn(2, |c| c == ',' || c == ':' || c == ';'); let ty = parse_next(&mut s)?; let mut out = String::new(); match ty { "Cont" => { let params = parse_next(&mut s)?; let threads = params.split(';'); for thread in threads { let mut thread_data = thread.split(':'); let action = parse_next(&mut thread_data)?; let thread_name = thread_data.next(); if let Some(name) = thread_name { match (name, utils::from_hex(name)) { | ("-1", _) | (_, Ok(0)) | (_, Ok(1)) => {} | (s, _) => panic!("Attempted to issue command on invalid thread id {}", s) } } match action { "c" => return cmd_continue(ctx), "s" => return cmd_step(ctx), _ => warn!("GDB client tried to run unsupported `vCont` action {}", action) } } } "Cont?" => { let supported = ["c", "s"]; out += "vCont"; for ty in supported.iter() { out += ";"; out += ty; } } _ => warn!("GDB client tried to run unsupported `v` command {}", ty) } Ok(out) } fn handle_gdb_cmd(cmd: &str, ctx: &mut GdbCtx) -> Result<String> { let ty = parse_next(&mut cmd.chars())?; let params = &cmd[1..]; let mut out = String::new(); ctx.dbg.pause(); match ty { 'g' => { let hw = ctx.dbg.hw(); for reg in 0..15 { out += &format!("{:08X}", hw.read_reg(reg).swap_bytes()); } out += &format!("{:08X}", hw.pause_addr().swap_bytes()); for _ in 0..8 { out += "xxxxxxxxxxxxxxxxxxxxxxxx"; // fX registers (12 bytes each) } out += "xxxxxxxx"; // fps register out += &format!("{:08X}", hw.read_cpsr().swap_bytes()); } 'G' => { let mut hw = ctx.dbg.hw(); let mut regs = params; let next_reg = |regstr: &str| -> Result<u32> { let val = utils::from_hex(&regstr[..8])?; Ok(val.swap_bytes()) }; for reg in 0..15 { hw.write_reg(reg, next_reg(regs)?); regs = &regs[8..]; } // register at 15: PC hw.branch_to(next_reg(regs)?); regs = &regs[8..]; // Skip 8 fX registers regs = &regs[8 * 24..]; // Skip fps register regs = &regs[8..]; // register at 25: CPSR hw.write_cpsr(next_reg(regs)?); out += "OK"; } 'H' => { out += "OK"; } 'm' => { let mut hw = ctx.dbg.hw(); let mut params = params.split(','); let addr = parse_next_hex(&mut params)?; let size = parse_next_hex(&mut params)?; let mut buf = [0u8]; for b in 0..size { if let Err(_) = hw.read_mem(addr+b, &mut buf) { out += "00"; } else { out += &format!("{:02X}", buf[0]); } } } 'M' => { let mut hw = ctx.dbg.hw(); let mut params = params.split(|c| c == ',' || c == ':'); let addr = parse_next_hex(&mut params)?; let size = parse_next_hex(&mut params)?; let data = parse_next(&mut params)?; for b in 0..size { let data_byte_range = 2*(b as usize)..2*((b as usize)+1); let byte = utils::from_hex(&data[data_byte_range])? as u8; hw.write_mem(addr+b, &[byte]); } out += "OK"; } 'p' => { let hw = ctx.dbg.hw(); let reg = utils::from_hex(&params)? as usize; let regval = match reg { 0..= 14 => hw.read_reg(reg), 15 => hw.pause_addr(), 25 => hw.read_cpsr(), n => { warn!("GDB requested bad register value {}", n); 0 } }; out += &format!("{:08X}", regval.swap_bytes()); } 'q' => { return handle_gdb_cmd_q(params, ctx); } 's' => { return cmd_step(ctx); } 'c' => { return cmd_continue(ctx); } 'v' => { return handle_gdb_cmd_v(params, ctx); } 'z' | 'Z' => { let mut hw = ctx.dbg.hw(); let mut params = params.split(','); let brk_ty = parse_next(&mut params)?; let addr = parse_next_hex(&mut params)?; let _kind = parse_next(&mut params)?; assert!(brk_ty == "0"); if ty == 'Z' { hw.set_breakpoint(addr); } else { hw.del_breakpoint(addr); } out += "OK"; } '?' => { out += &ctx.last_halt.to_signal(); } x => { warn!("GDB client tried to run unsupported command {}", x); } } Ok(out) } #[derive(Clone, Copy)] struct Checksum(pub u32); impl Add<u8> for Checksum { type Output = Checksum; fn add(self, b: u8) -> Checksum { Checksum((self.0 + (b as u32)) % 256) } } impl AddAssign<u8> for Checksum { fn add_assign(&mut self, b: u8) { self.0 = (*self + b).0; } } enum PacketType { Command(String), CtrlC, AckOk, AckErr, EndOfPacket, Malformed, } fn load_packet<I: Iterator<Item = u8>>(it: &mut I) -> PacketType { let mut it = it.skip_while(|b| *b!= 0x03 && *b!= b'$' && *b!= b'-' && *b!= b'+'); match it.next() { Some(0x3) => return PacketType::CtrlC, Some(b'$') => {} Some(b'+') => return PacketType::AckOk, Some(b'-') => return PacketType::AckErr, None => return PacketType::EndOfPacket, _ => return PacketType::Malformed } let mut string = String::new(); let mut checksum = Checksum(0); for b in it.by_ref().take_while(|b| *b!= b'#') { string.push(b as char); checksum += b; } if let (Some(top), Some(bot)) = (it.next(), it.next()) { let packet_checksum = str::from_utf8(&[top, bot]).ok() .and_then(|s| utils::from_hex(s).ok()); if Some(checksum.0) == packet_checksum { return PacketType::Command(string) } } return PacketType::Malformed } fn write_gdb_packet(data: &str, stream: &mut TcpStream) -> Result<()> { let checksum = data.bytes().fold(Checksum(0), |checksum, b| checksum + b); trace!("Replying with GDB packet: ${}#{:02X}", data, checksum.0); write!(stream, "${}#{:02X}", data, checksum.0)?; stream.flush()?; Ok(()) } fn handle_gdb_packet(data: &[u8], stream: &mut TcpStream, ctx: &mut GdbCtx) -> Result<()> { trace!("Recieving GDB packet: {}", str::from_utf8(data).unwrap()); let mut it = data.iter().cloned(); loop { match load_packet(&mut it) { PacketType::Command(cmd) => { stream.write(b"+")?; stream.flush()?; match handle_gdb_cmd(&cmd, ctx) { Ok(out) => write_gdb_packet(&out, stream)?, Err(e) => { if let ErrorKind::NoResponse = e {} else { return Err(e) } } } } PacketType::CtrlC => { ctx.dbg.pause(); trace!("Recieved GDB packet with CTRL-C signal!"); } PacketType::AckOk => {}, PacketType::AckErr => error!("GDB client replied with error packet!"), PacketType::EndOfPacket => { return Ok(()) } PacketType::Malformed => { trace!("Recieved malformed data {:?}", data); stream.write(b"-")?; stream.flush()?; return Ok(()) } } } } struct
<'a, 'b: 'a> { dbg: &'a mut dbgcore::DbgContext<'b>, last_halt: &'a mut BreakData, } const TOKEN_LISTENER: mio::Token = mio::Token(1024); const TOKEN_CLIENT: mio::Token = mio::Token(1025); pub struct GdbStub { debugger: dbgcore::DbgCore, gdb_thread: Option<thread::JoinHandle<msgs::Client<Message>>> } impl GdbStub { pub fn new(msg_client: msgs::Client<Message>, debugger: dbgcore::DbgCore) -> GdbStub { let mut stub = GdbStub { debugger: debugger, gdb_thread: None }; stub.start(msg_client); stub } pub fn start(&mut self, msg_client: msgs::Client<Message>) { let mut debugger = self.debugger.clone(); self.gdb_thread = Some(thread::Builder::new().name("GDBStub".to_owned()).spawn(move || { use mio::Events; let poll = mio::Poll::new() .expect("Could not create mio polling instance!"); let listener = TcpListener::bind(&"127.0.0.1:4567".parse().unwrap()) .expect("Could not bind TcpListener to port!"); poll.register(&listener, TOKEN_LISTENER, mio::Ready::readable(), mio::PollOpt::edge()) .expect("Could not register TcpListener to mio!"); let mut connection = Connection { listener: &listener, poll: poll, socket: None, }; let mut events = Events::with_capacity(1024); info!("Starting GDB stub on port 4567..."); let mut last_halt = BreakData::new(BreakReason::Trapped, &mut debugger.ctx(Arm9)); 't: loop { connection.poll.poll(&mut events, Some(Duration::from_millis(100))) .expect("Could not poll for network events!"); let mut ctx = GdbCtx { dbg: &mut debugger.ctx(Arm9), last_halt: &mut last_halt }; for event in &events { handle_event(&event, &mut connection, |buf, stream| { handle_gdb_packet(buf, stream, &mut ctx).unwrap(); }); } for msg in msg_client.try_iter() { match msg { Message::Quit => break 't, Message::Arm9Halted(reason) => { if let Some(ref mut stream) = connection.socket { let break_data = BreakData::new(reason, ctx.dbg); write_gdb_packet(&break_data.to_signal(), stream).unwrap(); } } _ => {} } } } msg_client }).unwrap()) } pub fn wait(&mut self) { if let Some(t) = self.gdb_thread.take() { t.join().unwrap(); } } } struct Connection<'a> { listener: &'a TcpListener, poll: mio::Poll, socket: Option<TcpStream>, } fn handle_event<F>(event: &mio::Event, connection: &mut Connection, mut client_responder: F) where F: FnMut(&[u8], &mut TcpStream) { let mut buf = [0u8; 1024]; match event.token() { TOKEN_LISTENER => { match connection.listener.accept() { Ok((socket, _)) => { info!("GDB stub accepting connection"); connection.poll.register(&socket, TOKEN_CLIENT, mio::Ready::readable(), mio::PollOpt::edge()) .expect("Could not register TCP client to mio!"); connection.socket = Some(socket); } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { return; // Socket is not ready anymore, stop accepting } e => panic!("GDB stub IO error! {:?}", e) } } TOKEN_CLIENT => for _ in 0..128 { match connection.socket.as_mut().unwrap().read(&mut buf) { Ok(0) => { connection.socket = None; break; } Ok(l) => client_responder(&buf[..l], connection.socket.as_mut().unwrap()), Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { continue; // Socket is not ready anymore, stop reading } e => panic!("GDB stub IO error! {:?}", e), // Unexpected error } }, _ => unimplemented!() } }
GdbCtx
identifier_name
gdbstub.rs
use std::io::{self, Read, Write}; use std::ops::{Add, AddAssign}; use std::str; use std::thread; use std::time::Duration; use mio; use mio::tcp::{TcpListener, TcpStream}; use cpu::BreakReason; use dbgcore::{self, ActiveCpu::Arm9}; use hwcore::Message; use msgs; use utils; #[derive(Debug, Error)] pub enum ErrorKind { Hex(::std::num::ParseIntError), Io(io::Error), /// Client should not expect a response NoResponse, /// Could not find next element to parse Parse, } pub type Result<T> = ::std::result::Result<T, ErrorKind>; fn parse_next<T, I: Iterator<Item=T>>(it: &mut I) -> Result<T> { it.next().ok_or(ErrorKind::Parse.into()) } fn parse_next_hex<'a, I: Iterator<Item=&'a str>>(it: &mut I) -> Result<u32> { Ok(utils::from_hex(parse_next(it)?)?) } fn cmd_step(ctx: &mut GdbCtx) -> Result<String> { ctx.dbg.hw().step(); let break_data = BreakData::new(BreakReason::LimitReached, ctx.dbg); let signal = break_data.to_signal(); *ctx.last_halt = break_data; Ok(signal) } fn cmd_continue(ctx: &mut GdbCtx) -> Result<String> { ctx.dbg.resume(); Err(ErrorKind::NoResponse) } struct BreakData { reason: BreakReason, r15: u32, r13: u32 } impl BreakData { fn new(reason: BreakReason, dbg: &mut dbgcore::DbgContext) -> BreakData { let hw = dbg.hw(); BreakData { reason: reason, r15: hw.pause_addr(), r13: hw.read_reg(13), } } fn to_signal(&self) -> String
} fn handle_gdb_cmd_q(cmd: &str, _ctx: &mut GdbCtx) -> Result<String> { let mut s = cmd.splitn(2, ':'); let ty = parse_next(&mut s)?; let mut out = String::new(); match ty { "fThreadInfo" => out += "m0000000000000001", "sThreadInfo" => out += "l", "C" => out += "QC0000000000000001", "Attached" => out += "1", "Supported" => { out += "PacketSize=400;BreakpointCommands+;swbreak+;vContSupported+"; } _ => warn!("GDB client tried to run unsupported `q` command {}", ty) } Ok(out) } fn handle_gdb_cmd_v(cmd: &str, ctx: &mut GdbCtx) -> Result<String> { let mut s = cmd.splitn(2, |c| c == ',' || c == ':' || c == ';'); let ty = parse_next(&mut s)?; let mut out = String::new(); match ty { "Cont" => { let params = parse_next(&mut s)?; let threads = params.split(';'); for thread in threads { let mut thread_data = thread.split(':'); let action = parse_next(&mut thread_data)?; let thread_name = thread_data.next(); if let Some(name) = thread_name { match (name, utils::from_hex(name)) { | ("-1", _) | (_, Ok(0)) | (_, Ok(1)) => {} | (s, _) => panic!("Attempted to issue command on invalid thread id {}", s) } } match action { "c" => return cmd_continue(ctx), "s" => return cmd_step(ctx), _ => warn!("GDB client tried to run unsupported `vCont` action {}", action) } } } "Cont?" => { let supported = ["c", "s"]; out += "vCont"; for ty in supported.iter() { out += ";"; out += ty; } } _ => warn!("GDB client tried to run unsupported `v` command {}", ty) } Ok(out) } fn handle_gdb_cmd(cmd: &str, ctx: &mut GdbCtx) -> Result<String> { let ty = parse_next(&mut cmd.chars())?; let params = &cmd[1..]; let mut out = String::new(); ctx.dbg.pause(); match ty { 'g' => { let hw = ctx.dbg.hw(); for reg in 0..15 { out += &format!("{:08X}", hw.read_reg(reg).swap_bytes()); } out += &format!("{:08X}", hw.pause_addr().swap_bytes()); for _ in 0..8 { out += "xxxxxxxxxxxxxxxxxxxxxxxx"; // fX registers (12 bytes each) } out += "xxxxxxxx"; // fps register out += &format!("{:08X}", hw.read_cpsr().swap_bytes()); } 'G' => { let mut hw = ctx.dbg.hw(); let mut regs = params; let next_reg = |regstr: &str| -> Result<u32> { let val = utils::from_hex(&regstr[..8])?; Ok(val.swap_bytes()) }; for reg in 0..15 { hw.write_reg(reg, next_reg(regs)?); regs = &regs[8..]; } // register at 15: PC hw.branch_to(next_reg(regs)?); regs = &regs[8..]; // Skip 8 fX registers regs = &regs[8 * 24..]; // Skip fps register regs = &regs[8..]; // register at 25: CPSR hw.write_cpsr(next_reg(regs)?); out += "OK"; } 'H' => { out += "OK"; } 'm' => { let mut hw = ctx.dbg.hw(); let mut params = params.split(','); let addr = parse_next_hex(&mut params)?; let size = parse_next_hex(&mut params)?; let mut buf = [0u8]; for b in 0..size { if let Err(_) = hw.read_mem(addr+b, &mut buf) { out += "00"; } else { out += &format!("{:02X}", buf[0]); } } } 'M' => { let mut hw = ctx.dbg.hw(); let mut params = params.split(|c| c == ',' || c == ':'); let addr = parse_next_hex(&mut params)?; let size = parse_next_hex(&mut params)?; let data = parse_next(&mut params)?; for b in 0..size { let data_byte_range = 2*(b as usize)..2*((b as usize)+1); let byte = utils::from_hex(&data[data_byte_range])? as u8; hw.write_mem(addr+b, &[byte]); } out += "OK"; } 'p' => { let hw = ctx.dbg.hw(); let reg = utils::from_hex(&params)? as usize; let regval = match reg { 0..= 14 => hw.read_reg(reg), 15 => hw.pause_addr(), 25 => hw.read_cpsr(), n => { warn!("GDB requested bad register value {}", n); 0 } }; out += &format!("{:08X}", regval.swap_bytes()); } 'q' => { return handle_gdb_cmd_q(params, ctx); } 's' => { return cmd_step(ctx); } 'c' => { return cmd_continue(ctx); } 'v' => { return handle_gdb_cmd_v(params, ctx); } 'z' | 'Z' => { let mut hw = ctx.dbg.hw(); let mut params = params.split(','); let brk_ty = parse_next(&mut params)?; let addr = parse_next_hex(&mut params)?; let _kind = parse_next(&mut params)?; assert!(brk_ty == "0"); if ty == 'Z' { hw.set_breakpoint(addr); } else { hw.del_breakpoint(addr); } out += "OK"; } '?' => { out += &ctx.last_halt.to_signal(); } x => { warn!("GDB client tried to run unsupported command {}", x); } } Ok(out) } #[derive(Clone, Copy)] struct Checksum(pub u32); impl Add<u8> for Checksum { type Output = Checksum; fn add(self, b: u8) -> Checksum { Checksum((self.0 + (b as u32)) % 256) } } impl AddAssign<u8> for Checksum { fn add_assign(&mut self, b: u8) { self.0 = (*self + b).0; } } enum PacketType { Command(String), CtrlC, AckOk, AckErr, EndOfPacket, Malformed, } fn load_packet<I: Iterator<Item = u8>>(it: &mut I) -> PacketType { let mut it = it.skip_while(|b| *b!= 0x03 && *b!= b'$' && *b!= b'-' && *b!= b'+'); match it.next() { Some(0x3) => return PacketType::CtrlC, Some(b'$') => {} Some(b'+') => return PacketType::AckOk, Some(b'-') => return PacketType::AckErr, None => return PacketType::EndOfPacket, _ => return PacketType::Malformed } let mut string = String::new(); let mut checksum = Checksum(0); for b in it.by_ref().take_while(|b| *b!= b'#') { string.push(b as char); checksum += b; } if let (Some(top), Some(bot)) = (it.next(), it.next()) { let packet_checksum = str::from_utf8(&[top, bot]).ok() .and_then(|s| utils::from_hex(s).ok()); if Some(checksum.0) == packet_checksum { return PacketType::Command(string) } } return PacketType::Malformed } fn write_gdb_packet(data: &str, stream: &mut TcpStream) -> Result<()> { let checksum = data.bytes().fold(Checksum(0), |checksum, b| checksum + b); trace!("Replying with GDB packet: ${}#{:02X}", data, checksum.0); write!(stream, "${}#{:02X}", data, checksum.0)?; stream.flush()?; Ok(()) } fn handle_gdb_packet(data: &[u8], stream: &mut TcpStream, ctx: &mut GdbCtx) -> Result<()> { trace!("Recieving GDB packet: {}", str::from_utf8(data).unwrap()); let mut it = data.iter().cloned(); loop { match load_packet(&mut it) { PacketType::Command(cmd) => { stream.write(b"+")?; stream.flush()?; match handle_gdb_cmd(&cmd, ctx) { Ok(out) => write_gdb_packet(&out, stream)?, Err(e) => { if let ErrorKind::NoResponse = e {} else { return Err(e) } } } } PacketType::CtrlC => { ctx.dbg.pause(); trace!("Recieved GDB packet with CTRL-C signal!"); } PacketType::AckOk => {}, PacketType::AckErr => error!("GDB client replied with error packet!"), PacketType::EndOfPacket => { return Ok(()) } PacketType::Malformed => { trace!("Recieved malformed data {:?}", data); stream.write(b"-")?; stream.flush()?; return Ok(()) } } } } struct GdbCtx<'a, 'b: 'a> { dbg: &'a mut dbgcore::DbgContext<'b>, last_halt: &'a mut BreakData, } const TOKEN_LISTENER: mio::Token = mio::Token(1024); const TOKEN_CLIENT: mio::Token = mio::Token(1025); pub struct GdbStub { debugger: dbgcore::DbgCore, gdb_thread: Option<thread::JoinHandle<msgs::Client<Message>>> } impl GdbStub { pub fn new(msg_client: msgs::Client<Message>, debugger: dbgcore::DbgCore) -> GdbStub { let mut stub = GdbStub { debugger: debugger, gdb_thread: None }; stub.start(msg_client); stub } pub fn start(&mut self, msg_client: msgs::Client<Message>) { let mut debugger = self.debugger.clone(); self.gdb_thread = Some(thread::Builder::new().name("GDBStub".to_owned()).spawn(move || { use mio::Events; let poll = mio::Poll::new() .expect("Could not create mio polling instance!"); let listener = TcpListener::bind(&"127.0.0.1:4567".parse().unwrap()) .expect("Could not bind TcpListener to port!"); poll.register(&listener, TOKEN_LISTENER, mio::Ready::readable(), mio::PollOpt::edge()) .expect("Could not register TcpListener to mio!"); let mut connection = Connection { listener: &listener, poll: poll, socket: None, }; let mut events = Events::with_capacity(1024); info!("Starting GDB stub on port 4567..."); let mut last_halt = BreakData::new(BreakReason::Trapped, &mut debugger.ctx(Arm9)); 't: loop { connection.poll.poll(&mut events, Some(Duration::from_millis(100))) .expect("Could not poll for network events!"); let mut ctx = GdbCtx { dbg: &mut debugger.ctx(Arm9), last_halt: &mut last_halt }; for event in &events { handle_event(&event, &mut connection, |buf, stream| { handle_gdb_packet(buf, stream, &mut ctx).unwrap(); }); } for msg in msg_client.try_iter() { match msg { Message::Quit => break 't, Message::Arm9Halted(reason) => { if let Some(ref mut stream) = connection.socket { let break_data = BreakData::new(reason, ctx.dbg); write_gdb_packet(&break_data.to_signal(), stream).unwrap(); } } _ => {} } } } msg_client }).unwrap()) } pub fn wait(&mut self) { if let Some(t) = self.gdb_thread.take() { t.join().unwrap(); } } } struct Connection<'a> { listener: &'a TcpListener, poll: mio::Poll, socket: Option<TcpStream>, } fn handle_event<F>(event: &mio::Event, connection: &mut Connection, mut client_responder: F) where F: FnMut(&[u8], &mut TcpStream) { let mut buf = [0u8; 1024]; match event.token() { TOKEN_LISTENER => { match connection.listener.accept() { Ok((socket, _)) => { info!("GDB stub accepting connection"); connection.poll.register(&socket, TOKEN_CLIENT, mio::Ready::readable(), mio::PollOpt::edge()) .expect("Could not register TCP client to mio!"); connection.socket = Some(socket); } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { return; // Socket is not ready anymore, stop accepting } e => panic!("GDB stub IO error! {:?}", e) } } TOKEN_CLIENT => for _ in 0..128 { match connection.socket.as_mut().unwrap().read(&mut buf) { Ok(0) => { connection.socket = None; break; } Ok(l) => client_responder(&buf[..l], connection.socket.as_mut().unwrap()), Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { continue; // Socket is not ready anymore, stop reading } e => panic!("GDB stub IO error! {:?}", e), // Unexpected error } }, _ => unimplemented!() } }
{ let reason_str = match self.reason { BreakReason::Breakpoint => format!(";{}:", "swbreak"), _ => String::new(), }; format!("T05{:02X}:{:08X};{:02X}:{:08X}{};", 15, self.r15.swap_bytes(), 13, self.r13.swap_bytes(), reason_str) }
identifier_body
spinning_square.rs
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use anyhow::{Context as _, Error}; use carnelian::{ app::{Config, ViewCreationParameters}, color::Color, derive_handle_message_with_default, drawing::{load_font, path_for_rectangle, path_for_rounded_rectangle, FontFace}, input::{self}, render::{BlendMode, Context as RenderContext, Fill, FillRule, Layer, Path, Style}, scene::{ facets::{Facet, FacetId, TextFacetOptions}, scene::{Scene, SceneBuilder, SceneOrder}, LayerGroup, }, App, AppAssistant, AppAssistantPtr, AppSender, AssistantCreatorFunc, Coord, LocalBoxFuture, MessageTarget, Point, Rect, Size, ViewAssistant, ViewAssistantContext, ViewAssistantPtr, ViewKey, }; use euclid::{point2, size2, vec2, Angle, Transform2D}; use fidl::prelude::*; use fidl_test_placeholders::{EchoMarker, EchoRequest, EchoRequestStream}; use fuchsia_async as fasync; use fuchsia_zircon::Time; use futures::prelude::*; use std::{f32::consts::PI, path::PathBuf}; struct SpinningSquareAppAssistant { app_sender: AppSender, } impl SpinningSquareAppAssistant { fn new(app_sender: AppSender) -> Self { Self { app_sender } } } impl AppAssistant for SpinningSquareAppAssistant { fn setup(&mut self) -> Result<(), Error> { Ok(()) } fn create_view_assistant_with_parameters( &mut self, params: ViewCreationParameters, ) -> Result<ViewAssistantPtr, Error> { let additional = params.options.is_some(); let direction = params .options .and_then(|options| options.downcast_ref::<Direction>().map(|direction| *direction)) .unwrap_or(Direction::CounterClockwise); SpinningSquareViewAssistant::new( params.view_key, direction, self.app_sender.clone(), additional, ) } /// Return the list of names of services this app wants to provide fn outgoing_services_names(&self) -> Vec<&'static str> { [EchoMarker::PROTOCOL_NAME].to_vec() } /// Handle a request to connect to a service provided by this app fn handle_service_connection_request( &mut self, _service_name: &str, channel: fasync::Channel, ) -> Result<(), Error> { Self::create_echo_server(channel, false); Ok(()) } fn filter_config(&mut self, config: &mut Config) { config.display_resource_release_delay = std::time::Duration::new(0, 0); } } impl SpinningSquareAppAssistant { fn create_echo_server(channel: fasync::Channel, quiet: bool) { fasync::Task::local( async move { let mut stream = EchoRequestStream::from_channel(channel); while let Some(EchoRequest::EchoString { value, responder }) = stream.try_next().await.context("error running echo server")? { if!quiet { println!("Spinning Square received echo request for string {:?}", value); } responder .send(value.as_ref().map(|s| &**s)) .context("error sending response")?; if!quiet { println!("echo response sent successfully"); } } Ok(()) } .unwrap_or_else(|e: anyhow::Error| eprintln!("{:?}", e)), ) .detach(); } } struct SceneDetails { scene: Scene, square: FacetId, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum Direction { Clockwise, CounterClockwise, } impl Direction { pub fn toggle(self) -> Self { match self { Self::Clockwise => Self::CounterClockwise, Self::CounterClockwise => Self::Clockwise, } } } #[derive(Debug)] pub struct ToggleRoundedMessage {} #[derive(Debug)] pub struct ToggleDirectionMessage {} struct SpinningSquareFacet { direction: Direction, square_color: Color, rounded: bool, start: Time, square_path: Option<Path>, size: Size, } impl SpinningSquareFacet { fn new(square_color: Color, start: Time, size: Size, direction: Direction) -> Self { Self { direction, square_color, rounded: false, start, square_path: None, size } } fn clone_square_path(&self) -> Path { self.square_path.as_ref().expect("square_path").clone() } fn handle_toggle_rounded_message(&mut self, _msg: &ToggleRoundedMessage) { self.rounded =!self.rounded; self.square_path = None; } fn handle_toggle_direction_message(&mut self, _msg: &ToggleDirectionMessage) { self.direction = self.direction.toggle(); } fn handle_other_message(&mut self, _msg: &carnelian::Message) { println!("handle_other_message"); } } impl Facet for SpinningSquareFacet { fn update_layers( &mut self, size: Size, layer_group: &mut dyn LayerGroup, render_context: &mut RenderContext, view_context: &ViewAssistantContext, ) -> Result<(), Error> { const SPEED: f32 = 0.25; const SECONDS_PER_NANOSECOND: f32 = 1e-9; const SQUARE_PATH_SIZE: Coord = 1.0; const SQUARE_PATH_SIZE_2: Coord = SQUARE_PATH_SIZE / 2.0; const CORNER_RADIUS: Coord = SQUARE_PATH_SIZE / 4.0; let center_x = size.width * 0.5; let center_y = size.height * 0.5; self.size = size; let square_size = size.width.min(size.height) * 0.6; let presentation_time = view_context.presentation_time; let t = ((presentation_time.into_nanos() - self.start.into_nanos()) as f32 * SECONDS_PER_NANOSECOND * SPEED) % 1.0; let angle = t * PI * 2.0 * if self.direction == Direction::CounterClockwise { -1.0 } else { 1.0 }; if self.square_path.is_none() { let top_left = point2(-SQUARE_PATH_SIZE_2, -SQUARE_PATH_SIZE_2); let square = Rect::new(top_left, size2(SQUARE_PATH_SIZE, SQUARE_PATH_SIZE)); let square_path = if self.rounded { path_for_rounded_rectangle(&square, CORNER_RADIUS, render_context) } else { path_for_rectangle(&square, render_context) }; self.square_path.replace(square_path); } let transformation = Transform2D::rotation(Angle::radians(angle)) .then_scale(square_size, square_size) .then_translate(vec2(center_x, center_y)); let mut raster_builder = render_context.raster_builder().expect("raster_builder"); raster_builder.add(&self.clone_square_path(), Some(&transformation)); let square_raster = raster_builder.build(); layer_group.insert( SceneOrder::default(), Layer { raster: square_raster, clip: None, style: Style { fill_rule: FillRule::NonZero, fill: Fill::Solid(self.square_color), blend_mode: BlendMode::Over, }, }, ); Ok(()) } derive_handle_message_with_default!(handle_other_message, ToggleRoundedMessage => handle_toggle_rounded_message, ToggleDirectionMessage => handle_toggle_direction_message ); fn calculate_size(&self, _available: Size) -> Size { self.size } } struct SpinningSquareViewAssistant { direction: Direction, view_key: ViewKey, background_color: Color, square_color: Color, start: Time, app_sender: AppSender, scene_details: Option<SceneDetails>, face: FontFace, additional: bool, } impl SpinningSquareViewAssistant { fn new( view_key: ViewKey, direction: Direction, app_sender: AppSender, additional: bool, ) -> Result<ViewAssistantPtr, Error> { let square_color = Color { r: 0xbb, g: 0x00, b: 0xff, a: 0xbb }; let background_color = Color { r: 0x3f, g: 0x8a, b: 0x99, a: 0xff }; let start = Time::get_monotonic(); let face = load_font(PathBuf::from("/pkg/data/fonts/RobotoSlab-Regular.ttf"))?; Ok(Box::new(SpinningSquareViewAssistant { direction, view_key, background_color, square_color, start, scene_details: None, app_sender, face, additional, })) } fn ensure_scene_built(&mut self, size: Size) { if self.scene_details.is_none() { let min_dimension = size.width.min(size.height); let font_size = (min_dimension / 5.0).ceil().min(64.0); let mut builder = SceneBuilder::new().background_color(self.background_color).animated(true); let mut square = None; builder.group().stack().center().contents(|builder| { if self.additional { let key_text = format!("{}", self.view_key); let _ = builder.text( self.face.clone(), &key_text, font_size, Point::zero(), TextFacetOptions::default(), ); } let square_facet = SpinningSquareFacet::new(self.square_color, self.start, size, self.direction); square = Some(builder.facet(Box::new(square_facet))); const STRIPE_COUNT: usize = 5; let stripe_height = size.height / (STRIPE_COUNT * 2 + 1) as f32; const STRIPE_WIDTH_RATIO: f32 = 0.8; let stripe_size = size2(size.width * STRIPE_WIDTH_RATIO, stripe_height); builder.group().column().max_size().space_evenly().contents(|builder| { for _ in 0..STRIPE_COUNT { builder.rectangle(stripe_size, Color::white()); } }); }); let square = square.expect("square"); let scene = builder.build(); self.scene_details = Some(SceneDetails { scene, square }); } } fn toggle_rounded(&mut self) { if let Some(scene_details) = self.scene_details.as_mut() { // since we have the scene, we could call send_message directly, // but this lets us demonstrate facet-targeted messages. self.app_sender.queue_message( MessageTarget::Facet(self.view_key, scene_details.square), Box::new(ToggleRoundedMessage {}), ); self.app_sender.request_render(self.view_key); } } fn move_backward(&mut self) { if let Some(scene_details) = self.scene_details.as_mut() { scene_details .scene .move_facet_backward(scene_details.square) .unwrap_or_else(|e| println!("error in move_facet_backward: {}", e)); self.app_sender.request_render(self.view_key); } } fn move_forward(&mut self) { if let Some(scene_details) = self.scene_details.as_mut() { scene_details .scene .move_facet_forward(scene_details.square) .unwrap_or_else(|e| println!("error in move_facet_forward: {}", e)); self.app_sender.request_render(self.view_key); } } fn toggle_direction(&mut self) { if let Some(scene_details) = self.scene_details.as_mut() { self.app_sender.queue_message( MessageTarget::Facet(self.view_key, scene_details.square), Box::new(ToggleDirectionMessage {}), ); self.app_sender.request_render(self.view_key); } } fn make_new_view(&mut self) { let direction = self.direction.toggle(); self.app_sender.create_additional_view(Some(Box::new(direction))); } fn close_additional_view(&mut self) { if self.additional { self.app_sender.close_additional_view(self.view_key); } else { println!("Cannot close initial window"); } } } impl ViewAssistant for SpinningSquareViewAssistant { fn resize(&mut self, new_size: &Size) -> Result<(), Error> { self.scene_details = None; self.ensure_scene_built(*new_size); Ok(()) } fn get_scene(&mut self, size: Size) -> Option<&mut Scene> { self.ensure_scene_built(size); Some(&mut self.scene_details.as_mut().unwrap().scene) } fn handle_keyboard_event( &mut self, _context: &mut ViewAssistantContext, _event: &input::Event, keyboard_event: &input::keyboard::Event, ) -> Result<(), Error> { const SPACE: u32 ='' as u32; const B: u32 = 'b' as u32; const F: u32 = 'f' as u32; const D: u32 = 'd' as u32; const V: u32 = 'v' as u32; const C: u32 = 'c' as u32; if let Some(code_point) = keyboard_event.code_point
Ok(()) } } fn make_app_assistant_fut( app_sender: &AppSender, ) -> LocalBoxFuture<'_, Result<AppAssistantPtr, Error>> { let f = async move { let assistant = Box::new(SpinningSquareAppAssistant::new(app_sender.clone())); Ok::<AppAssistantPtr, Error>(assistant) }; Box::pin(f) } fn make_app_assistant() -> AssistantCreatorFunc { Box::new(make_app_assistant_fut) } fn main() -> Result<(), Error> { fuchsia_trace_provider::trace_provider_create_with_fdio(); App::run(make_app_assistant()) }
{ if keyboard_event.phase == input::keyboard::Phase::Pressed || keyboard_event.phase == input::keyboard::Phase::Repeat { match code_point { SPACE => self.toggle_rounded(), B => self.move_backward(), F => self.move_forward(), D => self.toggle_direction(), V => self.make_new_view(), C => self.close_additional_view(), _ => println!("code_point = {}", code_point), } } }
conditional_block
spinning_square.rs
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use anyhow::{Context as _, Error}; use carnelian::{ app::{Config, ViewCreationParameters}, color::Color, derive_handle_message_with_default, drawing::{load_font, path_for_rectangle, path_for_rounded_rectangle, FontFace}, input::{self}, render::{BlendMode, Context as RenderContext, Fill, FillRule, Layer, Path, Style}, scene::{ facets::{Facet, FacetId, TextFacetOptions}, scene::{Scene, SceneBuilder, SceneOrder}, LayerGroup, }, App, AppAssistant, AppAssistantPtr, AppSender, AssistantCreatorFunc, Coord, LocalBoxFuture, MessageTarget, Point, Rect, Size, ViewAssistant, ViewAssistantContext, ViewAssistantPtr, ViewKey, }; use euclid::{point2, size2, vec2, Angle, Transform2D}; use fidl::prelude::*; use fidl_test_placeholders::{EchoMarker, EchoRequest, EchoRequestStream}; use fuchsia_async as fasync; use fuchsia_zircon::Time; use futures::prelude::*; use std::{f32::consts::PI, path::PathBuf}; struct SpinningSquareAppAssistant { app_sender: AppSender, } impl SpinningSquareAppAssistant { fn new(app_sender: AppSender) -> Self { Self { app_sender } } } impl AppAssistant for SpinningSquareAppAssistant { fn setup(&mut self) -> Result<(), Error> { Ok(()) } fn create_view_assistant_with_parameters( &mut self, params: ViewCreationParameters, ) -> Result<ViewAssistantPtr, Error> { let additional = params.options.is_some(); let direction = params .options .and_then(|options| options.downcast_ref::<Direction>().map(|direction| *direction)) .unwrap_or(Direction::CounterClockwise); SpinningSquareViewAssistant::new( params.view_key, direction, self.app_sender.clone(), additional, ) } /// Return the list of names of services this app wants to provide fn outgoing_services_names(&self) -> Vec<&'static str> { [EchoMarker::PROTOCOL_NAME].to_vec() } /// Handle a request to connect to a service provided by this app fn handle_service_connection_request( &mut self, _service_name: &str, channel: fasync::Channel, ) -> Result<(), Error> { Self::create_echo_server(channel, false); Ok(()) } fn filter_config(&mut self, config: &mut Config) { config.display_resource_release_delay = std::time::Duration::new(0, 0); } } impl SpinningSquareAppAssistant { fn create_echo_server(channel: fasync::Channel, quiet: bool) { fasync::Task::local( async move { let mut stream = EchoRequestStream::from_channel(channel); while let Some(EchoRequest::EchoString { value, responder }) = stream.try_next().await.context("error running echo server")? { if!quiet { println!("Spinning Square received echo request for string {:?}", value); } responder .send(value.as_ref().map(|s| &**s)) .context("error sending response")?; if!quiet { println!("echo response sent successfully"); } } Ok(()) } .unwrap_or_else(|e: anyhow::Error| eprintln!("{:?}", e)), ) .detach(); } } struct SceneDetails { scene: Scene, square: FacetId, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum Direction { Clockwise, CounterClockwise, } impl Direction { pub fn toggle(self) -> Self {
} #[derive(Debug)] pub struct ToggleRoundedMessage {} #[derive(Debug)] pub struct ToggleDirectionMessage {} struct SpinningSquareFacet { direction: Direction, square_color: Color, rounded: bool, start: Time, square_path: Option<Path>, size: Size, } impl SpinningSquareFacet { fn new(square_color: Color, start: Time, size: Size, direction: Direction) -> Self { Self { direction, square_color, rounded: false, start, square_path: None, size } } fn clone_square_path(&self) -> Path { self.square_path.as_ref().expect("square_path").clone() } fn handle_toggle_rounded_message(&mut self, _msg: &ToggleRoundedMessage) { self.rounded =!self.rounded; self.square_path = None; } fn handle_toggle_direction_message(&mut self, _msg: &ToggleDirectionMessage) { self.direction = self.direction.toggle(); } fn handle_other_message(&mut self, _msg: &carnelian::Message) { println!("handle_other_message"); } } impl Facet for SpinningSquareFacet { fn update_layers( &mut self, size: Size, layer_group: &mut dyn LayerGroup, render_context: &mut RenderContext, view_context: &ViewAssistantContext, ) -> Result<(), Error> { const SPEED: f32 = 0.25; const SECONDS_PER_NANOSECOND: f32 = 1e-9; const SQUARE_PATH_SIZE: Coord = 1.0; const SQUARE_PATH_SIZE_2: Coord = SQUARE_PATH_SIZE / 2.0; const CORNER_RADIUS: Coord = SQUARE_PATH_SIZE / 4.0; let center_x = size.width * 0.5; let center_y = size.height * 0.5; self.size = size; let square_size = size.width.min(size.height) * 0.6; let presentation_time = view_context.presentation_time; let t = ((presentation_time.into_nanos() - self.start.into_nanos()) as f32 * SECONDS_PER_NANOSECOND * SPEED) % 1.0; let angle = t * PI * 2.0 * if self.direction == Direction::CounterClockwise { -1.0 } else { 1.0 }; if self.square_path.is_none() { let top_left = point2(-SQUARE_PATH_SIZE_2, -SQUARE_PATH_SIZE_2); let square = Rect::new(top_left, size2(SQUARE_PATH_SIZE, SQUARE_PATH_SIZE)); let square_path = if self.rounded { path_for_rounded_rectangle(&square, CORNER_RADIUS, render_context) } else { path_for_rectangle(&square, render_context) }; self.square_path.replace(square_path); } let transformation = Transform2D::rotation(Angle::radians(angle)) .then_scale(square_size, square_size) .then_translate(vec2(center_x, center_y)); let mut raster_builder = render_context.raster_builder().expect("raster_builder"); raster_builder.add(&self.clone_square_path(), Some(&transformation)); let square_raster = raster_builder.build(); layer_group.insert( SceneOrder::default(), Layer { raster: square_raster, clip: None, style: Style { fill_rule: FillRule::NonZero, fill: Fill::Solid(self.square_color), blend_mode: BlendMode::Over, }, }, ); Ok(()) } derive_handle_message_with_default!(handle_other_message, ToggleRoundedMessage => handle_toggle_rounded_message, ToggleDirectionMessage => handle_toggle_direction_message ); fn calculate_size(&self, _available: Size) -> Size { self.size } } struct SpinningSquareViewAssistant { direction: Direction, view_key: ViewKey, background_color: Color, square_color: Color, start: Time, app_sender: AppSender, scene_details: Option<SceneDetails>, face: FontFace, additional: bool, } impl SpinningSquareViewAssistant { fn new( view_key: ViewKey, direction: Direction, app_sender: AppSender, additional: bool, ) -> Result<ViewAssistantPtr, Error> { let square_color = Color { r: 0xbb, g: 0x00, b: 0xff, a: 0xbb }; let background_color = Color { r: 0x3f, g: 0x8a, b: 0x99, a: 0xff }; let start = Time::get_monotonic(); let face = load_font(PathBuf::from("/pkg/data/fonts/RobotoSlab-Regular.ttf"))?; Ok(Box::new(SpinningSquareViewAssistant { direction, view_key, background_color, square_color, start, scene_details: None, app_sender, face, additional, })) } fn ensure_scene_built(&mut self, size: Size) { if self.scene_details.is_none() { let min_dimension = size.width.min(size.height); let font_size = (min_dimension / 5.0).ceil().min(64.0); let mut builder = SceneBuilder::new().background_color(self.background_color).animated(true); let mut square = None; builder.group().stack().center().contents(|builder| { if self.additional { let key_text = format!("{}", self.view_key); let _ = builder.text( self.face.clone(), &key_text, font_size, Point::zero(), TextFacetOptions::default(), ); } let square_facet = SpinningSquareFacet::new(self.square_color, self.start, size, self.direction); square = Some(builder.facet(Box::new(square_facet))); const STRIPE_COUNT: usize = 5; let stripe_height = size.height / (STRIPE_COUNT * 2 + 1) as f32; const STRIPE_WIDTH_RATIO: f32 = 0.8; let stripe_size = size2(size.width * STRIPE_WIDTH_RATIO, stripe_height); builder.group().column().max_size().space_evenly().contents(|builder| { for _ in 0..STRIPE_COUNT { builder.rectangle(stripe_size, Color::white()); } }); }); let square = square.expect("square"); let scene = builder.build(); self.scene_details = Some(SceneDetails { scene, square }); } } fn toggle_rounded(&mut self) { if let Some(scene_details) = self.scene_details.as_mut() { // since we have the scene, we could call send_message directly, // but this lets us demonstrate facet-targeted messages. self.app_sender.queue_message( MessageTarget::Facet(self.view_key, scene_details.square), Box::new(ToggleRoundedMessage {}), ); self.app_sender.request_render(self.view_key); } } fn move_backward(&mut self) { if let Some(scene_details) = self.scene_details.as_mut() { scene_details .scene .move_facet_backward(scene_details.square) .unwrap_or_else(|e| println!("error in move_facet_backward: {}", e)); self.app_sender.request_render(self.view_key); } } fn move_forward(&mut self) { if let Some(scene_details) = self.scene_details.as_mut() { scene_details .scene .move_facet_forward(scene_details.square) .unwrap_or_else(|e| println!("error in move_facet_forward: {}", e)); self.app_sender.request_render(self.view_key); } } fn toggle_direction(&mut self) { if let Some(scene_details) = self.scene_details.as_mut() { self.app_sender.queue_message( MessageTarget::Facet(self.view_key, scene_details.square), Box::new(ToggleDirectionMessage {}), ); self.app_sender.request_render(self.view_key); } } fn make_new_view(&mut self) { let direction = self.direction.toggle(); self.app_sender.create_additional_view(Some(Box::new(direction))); } fn close_additional_view(&mut self) { if self.additional { self.app_sender.close_additional_view(self.view_key); } else { println!("Cannot close initial window"); } } } impl ViewAssistant for SpinningSquareViewAssistant { fn resize(&mut self, new_size: &Size) -> Result<(), Error> { self.scene_details = None; self.ensure_scene_built(*new_size); Ok(()) } fn get_scene(&mut self, size: Size) -> Option<&mut Scene> { self.ensure_scene_built(size); Some(&mut self.scene_details.as_mut().unwrap().scene) } fn handle_keyboard_event( &mut self, _context: &mut ViewAssistantContext, _event: &input::Event, keyboard_event: &input::keyboard::Event, ) -> Result<(), Error> { const SPACE: u32 ='' as u32; const B: u32 = 'b' as u32; const F: u32 = 'f' as u32; const D: u32 = 'd' as u32; const V: u32 = 'v' as u32; const C: u32 = 'c' as u32; if let Some(code_point) = keyboard_event.code_point { if keyboard_event.phase == input::keyboard::Phase::Pressed || keyboard_event.phase == input::keyboard::Phase::Repeat { match code_point { SPACE => self.toggle_rounded(), B => self.move_backward(), F => self.move_forward(), D => self.toggle_direction(), V => self.make_new_view(), C => self.close_additional_view(), _ => println!("code_point = {}", code_point), } } } Ok(()) } } fn make_app_assistant_fut( app_sender: &AppSender, ) -> LocalBoxFuture<'_, Result<AppAssistantPtr, Error>> { let f = async move { let assistant = Box::new(SpinningSquareAppAssistant::new(app_sender.clone())); Ok::<AppAssistantPtr, Error>(assistant) }; Box::pin(f) } fn make_app_assistant() -> AssistantCreatorFunc { Box::new(make_app_assistant_fut) } fn main() -> Result<(), Error> { fuchsia_trace_provider::trace_provider_create_with_fdio(); App::run(make_app_assistant()) }
match self { Self::Clockwise => Self::CounterClockwise, Self::CounterClockwise => Self::Clockwise, } }
random_line_split
spinning_square.rs
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use anyhow::{Context as _, Error}; use carnelian::{ app::{Config, ViewCreationParameters}, color::Color, derive_handle_message_with_default, drawing::{load_font, path_for_rectangle, path_for_rounded_rectangle, FontFace}, input::{self}, render::{BlendMode, Context as RenderContext, Fill, FillRule, Layer, Path, Style}, scene::{ facets::{Facet, FacetId, TextFacetOptions}, scene::{Scene, SceneBuilder, SceneOrder}, LayerGroup, }, App, AppAssistant, AppAssistantPtr, AppSender, AssistantCreatorFunc, Coord, LocalBoxFuture, MessageTarget, Point, Rect, Size, ViewAssistant, ViewAssistantContext, ViewAssistantPtr, ViewKey, }; use euclid::{point2, size2, vec2, Angle, Transform2D}; use fidl::prelude::*; use fidl_test_placeholders::{EchoMarker, EchoRequest, EchoRequestStream}; use fuchsia_async as fasync; use fuchsia_zircon::Time; use futures::prelude::*; use std::{f32::consts::PI, path::PathBuf}; struct SpinningSquareAppAssistant { app_sender: AppSender, } impl SpinningSquareAppAssistant { fn new(app_sender: AppSender) -> Self { Self { app_sender } } } impl AppAssistant for SpinningSquareAppAssistant { fn setup(&mut self) -> Result<(), Error> { Ok(()) } fn create_view_assistant_with_parameters( &mut self, params: ViewCreationParameters, ) -> Result<ViewAssistantPtr, Error> { let additional = params.options.is_some(); let direction = params .options .and_then(|options| options.downcast_ref::<Direction>().map(|direction| *direction)) .unwrap_or(Direction::CounterClockwise); SpinningSquareViewAssistant::new( params.view_key, direction, self.app_sender.clone(), additional, ) } /// Return the list of names of services this app wants to provide fn outgoing_services_names(&self) -> Vec<&'static str> { [EchoMarker::PROTOCOL_NAME].to_vec() } /// Handle a request to connect to a service provided by this app fn handle_service_connection_request( &mut self, _service_name: &str, channel: fasync::Channel, ) -> Result<(), Error> { Self::create_echo_server(channel, false); Ok(()) } fn filter_config(&mut self, config: &mut Config) { config.display_resource_release_delay = std::time::Duration::new(0, 0); } } impl SpinningSquareAppAssistant { fn create_echo_server(channel: fasync::Channel, quiet: bool) { fasync::Task::local( async move { let mut stream = EchoRequestStream::from_channel(channel); while let Some(EchoRequest::EchoString { value, responder }) = stream.try_next().await.context("error running echo server")? { if!quiet { println!("Spinning Square received echo request for string {:?}", value); } responder .send(value.as_ref().map(|s| &**s)) .context("error sending response")?; if!quiet { println!("echo response sent successfully"); } } Ok(()) } .unwrap_or_else(|e: anyhow::Error| eprintln!("{:?}", e)), ) .detach(); } } struct SceneDetails { scene: Scene, square: FacetId, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum Direction { Clockwise, CounterClockwise, } impl Direction { pub fn toggle(self) -> Self { match self { Self::Clockwise => Self::CounterClockwise, Self::CounterClockwise => Self::Clockwise, } } } #[derive(Debug)] pub struct ToggleRoundedMessage {} #[derive(Debug)] pub struct ToggleDirectionMessage {} struct SpinningSquareFacet { direction: Direction, square_color: Color, rounded: bool, start: Time, square_path: Option<Path>, size: Size, } impl SpinningSquareFacet { fn new(square_color: Color, start: Time, size: Size, direction: Direction) -> Self { Self { direction, square_color, rounded: false, start, square_path: None, size } } fn clone_square_path(&self) -> Path { self.square_path.as_ref().expect("square_path").clone() } fn handle_toggle_rounded_message(&mut self, _msg: &ToggleRoundedMessage) { self.rounded =!self.rounded; self.square_path = None; } fn handle_toggle_direction_message(&mut self, _msg: &ToggleDirectionMessage) { self.direction = self.direction.toggle(); } fn handle_other_message(&mut self, _msg: &carnelian::Message) { println!("handle_other_message"); } } impl Facet for SpinningSquareFacet { fn update_layers( &mut self, size: Size, layer_group: &mut dyn LayerGroup, render_context: &mut RenderContext, view_context: &ViewAssistantContext, ) -> Result<(), Error> { const SPEED: f32 = 0.25; const SECONDS_PER_NANOSECOND: f32 = 1e-9; const SQUARE_PATH_SIZE: Coord = 1.0; const SQUARE_PATH_SIZE_2: Coord = SQUARE_PATH_SIZE / 2.0; const CORNER_RADIUS: Coord = SQUARE_PATH_SIZE / 4.0; let center_x = size.width * 0.5; let center_y = size.height * 0.5; self.size = size; let square_size = size.width.min(size.height) * 0.6; let presentation_time = view_context.presentation_time; let t = ((presentation_time.into_nanos() - self.start.into_nanos()) as f32 * SECONDS_PER_NANOSECOND * SPEED) % 1.0; let angle = t * PI * 2.0 * if self.direction == Direction::CounterClockwise { -1.0 } else { 1.0 }; if self.square_path.is_none() { let top_left = point2(-SQUARE_PATH_SIZE_2, -SQUARE_PATH_SIZE_2); let square = Rect::new(top_left, size2(SQUARE_PATH_SIZE, SQUARE_PATH_SIZE)); let square_path = if self.rounded { path_for_rounded_rectangle(&square, CORNER_RADIUS, render_context) } else { path_for_rectangle(&square, render_context) }; self.square_path.replace(square_path); } let transformation = Transform2D::rotation(Angle::radians(angle)) .then_scale(square_size, square_size) .then_translate(vec2(center_x, center_y)); let mut raster_builder = render_context.raster_builder().expect("raster_builder"); raster_builder.add(&self.clone_square_path(), Some(&transformation)); let square_raster = raster_builder.build(); layer_group.insert( SceneOrder::default(), Layer { raster: square_raster, clip: None, style: Style { fill_rule: FillRule::NonZero, fill: Fill::Solid(self.square_color), blend_mode: BlendMode::Over, }, }, ); Ok(()) } derive_handle_message_with_default!(handle_other_message, ToggleRoundedMessage => handle_toggle_rounded_message, ToggleDirectionMessage => handle_toggle_direction_message ); fn calculate_size(&self, _available: Size) -> Size { self.size } } struct SpinningSquareViewAssistant { direction: Direction, view_key: ViewKey, background_color: Color, square_color: Color, start: Time, app_sender: AppSender, scene_details: Option<SceneDetails>, face: FontFace, additional: bool, } impl SpinningSquareViewAssistant { fn new( view_key: ViewKey, direction: Direction, app_sender: AppSender, additional: bool, ) -> Result<ViewAssistantPtr, Error> { let square_color = Color { r: 0xbb, g: 0x00, b: 0xff, a: 0xbb }; let background_color = Color { r: 0x3f, g: 0x8a, b: 0x99, a: 0xff }; let start = Time::get_monotonic(); let face = load_font(PathBuf::from("/pkg/data/fonts/RobotoSlab-Regular.ttf"))?; Ok(Box::new(SpinningSquareViewAssistant { direction, view_key, background_color, square_color, start, scene_details: None, app_sender, face, additional, })) } fn ensure_scene_built(&mut self, size: Size) { if self.scene_details.is_none() { let min_dimension = size.width.min(size.height); let font_size = (min_dimension / 5.0).ceil().min(64.0); let mut builder = SceneBuilder::new().background_color(self.background_color).animated(true); let mut square = None; builder.group().stack().center().contents(|builder| { if self.additional { let key_text = format!("{}", self.view_key); let _ = builder.text( self.face.clone(), &key_text, font_size, Point::zero(), TextFacetOptions::default(), ); } let square_facet = SpinningSquareFacet::new(self.square_color, self.start, size, self.direction); square = Some(builder.facet(Box::new(square_facet))); const STRIPE_COUNT: usize = 5; let stripe_height = size.height / (STRIPE_COUNT * 2 + 1) as f32; const STRIPE_WIDTH_RATIO: f32 = 0.8; let stripe_size = size2(size.width * STRIPE_WIDTH_RATIO, stripe_height); builder.group().column().max_size().space_evenly().contents(|builder| { for _ in 0..STRIPE_COUNT { builder.rectangle(stripe_size, Color::white()); } }); }); let square = square.expect("square"); let scene = builder.build(); self.scene_details = Some(SceneDetails { scene, square }); } } fn toggle_rounded(&mut self) { if let Some(scene_details) = self.scene_details.as_mut() { // since we have the scene, we could call send_message directly, // but this lets us demonstrate facet-targeted messages. self.app_sender.queue_message( MessageTarget::Facet(self.view_key, scene_details.square), Box::new(ToggleRoundedMessage {}), ); self.app_sender.request_render(self.view_key); } } fn move_backward(&mut self) { if let Some(scene_details) = self.scene_details.as_mut() { scene_details .scene .move_facet_backward(scene_details.square) .unwrap_or_else(|e| println!("error in move_facet_backward: {}", e)); self.app_sender.request_render(self.view_key); } } fn move_forward(&mut self) { if let Some(scene_details) = self.scene_details.as_mut() { scene_details .scene .move_facet_forward(scene_details.square) .unwrap_or_else(|e| println!("error in move_facet_forward: {}", e)); self.app_sender.request_render(self.view_key); } } fn toggle_direction(&mut self) { if let Some(scene_details) = self.scene_details.as_mut() { self.app_sender.queue_message( MessageTarget::Facet(self.view_key, scene_details.square), Box::new(ToggleDirectionMessage {}), ); self.app_sender.request_render(self.view_key); } } fn make_new_view(&mut self) { let direction = self.direction.toggle(); self.app_sender.create_additional_view(Some(Box::new(direction))); } fn close_additional_view(&mut self) { if self.additional { self.app_sender.close_additional_view(self.view_key); } else { println!("Cannot close initial window"); } } } impl ViewAssistant for SpinningSquareViewAssistant { fn
(&mut self, new_size: &Size) -> Result<(), Error> { self.scene_details = None; self.ensure_scene_built(*new_size); Ok(()) } fn get_scene(&mut self, size: Size) -> Option<&mut Scene> { self.ensure_scene_built(size); Some(&mut self.scene_details.as_mut().unwrap().scene) } fn handle_keyboard_event( &mut self, _context: &mut ViewAssistantContext, _event: &input::Event, keyboard_event: &input::keyboard::Event, ) -> Result<(), Error> { const SPACE: u32 ='' as u32; const B: u32 = 'b' as u32; const F: u32 = 'f' as u32; const D: u32 = 'd' as u32; const V: u32 = 'v' as u32; const C: u32 = 'c' as u32; if let Some(code_point) = keyboard_event.code_point { if keyboard_event.phase == input::keyboard::Phase::Pressed || keyboard_event.phase == input::keyboard::Phase::Repeat { match code_point { SPACE => self.toggle_rounded(), B => self.move_backward(), F => self.move_forward(), D => self.toggle_direction(), V => self.make_new_view(), C => self.close_additional_view(), _ => println!("code_point = {}", code_point), } } } Ok(()) } } fn make_app_assistant_fut( app_sender: &AppSender, ) -> LocalBoxFuture<'_, Result<AppAssistantPtr, Error>> { let f = async move { let assistant = Box::new(SpinningSquareAppAssistant::new(app_sender.clone())); Ok::<AppAssistantPtr, Error>(assistant) }; Box::pin(f) } fn make_app_assistant() -> AssistantCreatorFunc { Box::new(make_app_assistant_fut) } fn main() -> Result<(), Error> { fuchsia_trace_provider::trace_provider_create_with_fdio(); App::run(make_app_assistant()) }
resize
identifier_name
opt.rs
//! CLI argument handling use anyhow::Result; use cargo::core::resolver::CliFeatures; use cargo::ops::Packages; use std::fmt; use std::path::PathBuf; use structopt::StructOpt; #[derive(StructOpt)] #[structopt(bin_name = "cargo")] pub(crate) enum Cli { /// Profile a binary with Xcode Instruments. /// /// By default, cargo-instruments will build your main binary. #[structopt( name = "instruments", after_help = "EXAMPLE:\n cargo instruments -t time Profile main binary with the (recommended) Time Profiler." )] Instruments(AppConfig), } #[derive(Debug, StructOpt)] #[structopt(setting = structopt::clap::AppSettings::TrailingVarArg)] pub(crate) struct AppConfig { /// List available templates #[structopt(short = "l", long)] pub(crate) list_templates: bool, /// Specify the instruments template to run /// /// To see available templates, pass `--list-templates`. #[structopt( short = "t", long = "template", value_name = "TEMPLATE", required_unless = "list-templates" )] pub(crate) template_name: Option<String>, /// Specify package for example/bin/bench /// /// For package that has only one bin, it's the same as `--bin PACKAGE_NAME` #[structopt(short = "p", long, value_name = "NAME")] package: Option<String>, /// Example binary to run #[structopt(long, group = "target", value_name = "NAME")] example: Option<String>, /// Binary to run #[structopt(long, group = "target", value_name = "NAME")] bin: Option<String>, /// Benchmark target to run #[structopt(long, group = "target", value_name = "NAME")] bench: Option<String>, /// Pass --release to cargo #[structopt(long, conflicts_with = "profile")] release: bool, /// Pass --profile NAME to cargo #[structopt(long, value_name = "NAME")] profile: Option<String>, /// Output.trace file to the given path /// /// Defaults to `target/instruments/{name}_{template-name}_{date}.trace`. /// /// If the file already exists, a new Run will be added. #[structopt(short = "o", long = "output", value_name = "PATH", parse(from_os_str))] pub(crate) trace_filepath: Option<PathBuf>, /// Limit recording time to the specified value (in milliseconds) /// /// The program will be terminated after this limit is exceeded. #[structopt(long, value_name = "MILLIS")] pub(crate) time_limit: Option<usize>, /// Open the generated.trace file after profiling /// /// The trace file will open in Xcode Instruments. #[structopt(long, hidden = true)] pub(crate) open: bool, /// Do not open the generated trace file in Instruments.app. #[structopt(long)] pub(crate) no_open: bool, /// Features to pass to cargo. #[structopt(long, value_name = "CARGO-FEATURES")] pub(crate) features: Option<String>, /// Path to Cargo.toml #[structopt(long, value_name = "PATH")] pub(crate) manifest_path: Option<PathBuf>, /// Activate all features for the selected target. #[structopt(long, display_order = 1001)] pub(crate) all_features: bool, /// Do not activate the default features for the selected target #[structopt(long, display_order = 1001)] pub(crate) no_default_features: bool, /// Arguments passed to the target binary. /// /// To pass flags, precede child args with `--`, /// e.g. `cargo instruments -- -t test1.txt --slow-mode`. #[structopt(value_name = "ARGS")] pub(crate) target_args: Vec<String>, } /// Represents the kind of target to profile. #[derive(Debug, PartialEq)] pub(crate) enum Target { Main, Example(String), Bin(String), Bench(String), } /// The package in which to look for the specified target (example/bin/bench) #[derive(Clone, Debug, PartialEq)] pub(crate) enum Package { Default, Package(String), } impl From<Package> for Packages { fn from(p: Package) -> Self { match p { Package::Default => Packages::Default, Package::Package(s) => Packages::Packages(vec![s]), } } } impl fmt::Display for Package { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Package::Default => { write!(f, "Default: search all packages for example/bin/bench") } Package::Package(s) => write!(f, "{}", s), } } } impl fmt::Display for Target { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Target::Main => write!(f, "src/main.rs"), Target::Example(bin) => write!(f, "examples/{}.rs", bin), Target::Bin(bin) => write!(f, "bin/{}.rs", bin), Target::Bench(bench) => write!(f, "bench {}", bench), } } } /// Cargo-specific options pub(crate) struct CargoOpts { pub(crate) package: Package, pub(crate) target: Target, pub(crate) profile: String, pub(crate) features: CliFeatures, } impl AppConfig { pub(crate) fn to_cargo_opts(&self) -> Result<CargoOpts> { let package = self.get_package(); let target = self.get_target(); let features = self.features.clone().map(|s| vec![s]).unwrap_or_default(); let features = CliFeatures::from_command_line( &features, self.all_features, !self.no_default_features, )?; let profile = self .profile .clone() .unwrap_or_else(|| (if self.release { "release" } else { "dev" }).to_owned()); Ok(CargoOpts { package, target, profile, features }) } fn
(&self) -> Package { if let Some(ref package) = self.package { Package::Package(package.clone()) } else { Package::Default } } // valid target: --example, --bin, --bench fn get_target(&self) -> Target { if let Some(ref example) = self.example { Target::Example(example.clone()) } else if let Some(ref bin) = self.bin { Target::Bin(bin.clone()) } else if let Some(ref bench) = self.bench { Target::Bench(bench.clone()) } else { Target::Main } } } #[cfg(test)] mod tests { use super::*; #[test] fn defaults() { let opts = AppConfig::from_iter(&["instruments", "-t", "template"]); assert!(opts.example.is_none()); assert!(opts.bin.is_none()); assert!(!opts.release); assert!(opts.trace_filepath.is_none()); assert!(opts.package.is_none()); assert!(opts.manifest_path.is_none()); } #[test] fn package_is_given() { let opts = AppConfig::from_iter(&["instruments", "--package", "foo", "--template", "alloc"]); assert!(opts.example.is_none()); assert!(opts.bin.is_none()); assert!(opts.bench.is_none()); assert_eq!(opts.package.unwrap().as_str(), "foo"); let opts = AppConfig::from_iter(&[ "instruments", "--package", "foo", "--template", "alloc", "--bin", "bin_arg", ]); assert!(opts.example.is_none()); assert!(opts.bench.is_none()); assert_eq!(opts.bin.unwrap().as_str(), "bin_arg"); assert_eq!(opts.package.unwrap().as_str(), "foo"); } #[test] #[should_panic(expected = "cannot be used with one or more of the other")] fn group_is_exclusive() { let opts = AppConfig::from_iter(&["instruments", "-t", "time", "--bin", "bin_arg"]); assert!(opts.example.is_none()); assert_eq!(opts.bin.unwrap().as_str(), "bin_arg"); let opts = AppConfig::from_iter(&["instruments", "-t", "time", "--example", "example_binary"]); assert!(opts.bin.is_none()); assert_eq!(opts.example.unwrap().as_str(), "example_binary"); let _opts = AppConfig::from_iter_safe(&[ "instruments", "-t", "time", "--bin", "thing", "--example", "other", ]) .unwrap(); } #[test] fn limit_millis() { let opts = AppConfig::from_iter(&["instruments", "-t", "time", "--time-limit", "42000"]); assert_eq!(opts.time_limit, Some(42000)); let opts = AppConfig::from_iter(&["instruments", "-t", "time", "--time-limit", "808"]); assert_eq!(opts.time_limit, Some(808)); let opts = AppConfig::from_iter(&["instruments", "-t", "time"]); assert_eq!(opts.time_limit, None); } #[test] fn features() { let opts = &[ "instruments", "--template", "time", "--example", "hello", "--features", "svg im", "--", "hi", ]; let opts = AppConfig::from_iter(opts); assert_eq!(opts.template_name, Some("time".into())); assert_eq!(opts.example, Some("hello".to_string())); assert_eq!(opts.features, Some("svg im".to_string())); let features: Vec<_> = opts .to_cargo_opts() .unwrap() .features .features .iter() .map(|feat| feat.to_string()) .collect(); assert_eq!(features, vec!["im", "svg"]); } #[test] fn var_args() { let opts = AppConfig::from_iter(&[ "instruments", "-t", "alloc", "--time-limit", "808", "--", "hi", "-h", "--bin", ]); assert_eq!(opts.template_name, Some("alloc".into())); assert_eq!(opts.time_limit, Some(808)); assert_eq!(opts.target_args, vec!["hi", "-h", "--bin"]); } #[test] fn manifest_path() { let opts = AppConfig::from_iter(&[ "instruments", "--manifest-path", "/path/to/Cargo.toml", "--template", "alloc", ]); assert!(opts.example.is_none()); assert!(opts.bin.is_none()); assert!(opts.bench.is_none()); assert!(opts.package.is_none()); assert_eq!(opts.manifest_path.unwrap(), PathBuf::from("/path/to/Cargo.toml")); } }
get_package
identifier_name
opt.rs
//! CLI argument handling use anyhow::Result; use cargo::core::resolver::CliFeatures; use cargo::ops::Packages; use std::fmt; use std::path::PathBuf; use structopt::StructOpt; #[derive(StructOpt)] #[structopt(bin_name = "cargo")] pub(crate) enum Cli { /// Profile a binary with Xcode Instruments. /// /// By default, cargo-instruments will build your main binary. #[structopt( name = "instruments", after_help = "EXAMPLE:\n cargo instruments -t time Profile main binary with the (recommended) Time Profiler." )] Instruments(AppConfig), } #[derive(Debug, StructOpt)] #[structopt(setting = structopt::clap::AppSettings::TrailingVarArg)] pub(crate) struct AppConfig { /// List available templates #[structopt(short = "l", long)] pub(crate) list_templates: bool, /// Specify the instruments template to run /// /// To see available templates, pass `--list-templates`. #[structopt( short = "t", long = "template", value_name = "TEMPLATE", required_unless = "list-templates" )] pub(crate) template_name: Option<String>, /// Specify package for example/bin/bench /// /// For package that has only one bin, it's the same as `--bin PACKAGE_NAME` #[structopt(short = "p", long, value_name = "NAME")] package: Option<String>, /// Example binary to run #[structopt(long, group = "target", value_name = "NAME")] example: Option<String>, /// Binary to run #[structopt(long, group = "target", value_name = "NAME")] bin: Option<String>, /// Benchmark target to run #[structopt(long, group = "target", value_name = "NAME")] bench: Option<String>, /// Pass --release to cargo #[structopt(long, conflicts_with = "profile")] release: bool, /// Pass --profile NAME to cargo #[structopt(long, value_name = "NAME")] profile: Option<String>, /// Output.trace file to the given path /// /// Defaults to `target/instruments/{name}_{template-name}_{date}.trace`. /// /// If the file already exists, a new Run will be added. #[structopt(short = "o", long = "output", value_name = "PATH", parse(from_os_str))] pub(crate) trace_filepath: Option<PathBuf>, /// Limit recording time to the specified value (in milliseconds) /// /// The program will be terminated after this limit is exceeded. #[structopt(long, value_name = "MILLIS")] pub(crate) time_limit: Option<usize>, /// Open the generated.trace file after profiling /// /// The trace file will open in Xcode Instruments. #[structopt(long, hidden = true)] pub(crate) open: bool, /// Do not open the generated trace file in Instruments.app. #[structopt(long)] pub(crate) no_open: bool, /// Features to pass to cargo. #[structopt(long, value_name = "CARGO-FEATURES")] pub(crate) features: Option<String>, /// Path to Cargo.toml #[structopt(long, value_name = "PATH")] pub(crate) manifest_path: Option<PathBuf>, /// Activate all features for the selected target. #[structopt(long, display_order = 1001)] pub(crate) all_features: bool, /// Do not activate the default features for the selected target #[structopt(long, display_order = 1001)] pub(crate) no_default_features: bool, /// Arguments passed to the target binary. /// /// To pass flags, precede child args with `--`, /// e.g. `cargo instruments -- -t test1.txt --slow-mode`. #[structopt(value_name = "ARGS")] pub(crate) target_args: Vec<String>, } /// Represents the kind of target to profile. #[derive(Debug, PartialEq)] pub(crate) enum Target { Main, Example(String), Bin(String), Bench(String), } /// The package in which to look for the specified target (example/bin/bench) #[derive(Clone, Debug, PartialEq)] pub(crate) enum Package { Default, Package(String), } impl From<Package> for Packages { fn from(p: Package) -> Self
} impl fmt::Display for Package { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Package::Default => { write!(f, "Default: search all packages for example/bin/bench") } Package::Package(s) => write!(f, "{}", s), } } } impl fmt::Display for Target { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Target::Main => write!(f, "src/main.rs"), Target::Example(bin) => write!(f, "examples/{}.rs", bin), Target::Bin(bin) => write!(f, "bin/{}.rs", bin), Target::Bench(bench) => write!(f, "bench {}", bench), } } } /// Cargo-specific options pub(crate) struct CargoOpts { pub(crate) package: Package, pub(crate) target: Target, pub(crate) profile: String, pub(crate) features: CliFeatures, } impl AppConfig { pub(crate) fn to_cargo_opts(&self) -> Result<CargoOpts> { let package = self.get_package(); let target = self.get_target(); let features = self.features.clone().map(|s| vec![s]).unwrap_or_default(); let features = CliFeatures::from_command_line( &features, self.all_features, !self.no_default_features, )?; let profile = self .profile .clone() .unwrap_or_else(|| (if self.release { "release" } else { "dev" }).to_owned()); Ok(CargoOpts { package, target, profile, features }) } fn get_package(&self) -> Package { if let Some(ref package) = self.package { Package::Package(package.clone()) } else { Package::Default } } // valid target: --example, --bin, --bench fn get_target(&self) -> Target { if let Some(ref example) = self.example { Target::Example(example.clone()) } else if let Some(ref bin) = self.bin { Target::Bin(bin.clone()) } else if let Some(ref bench) = self.bench { Target::Bench(bench.clone()) } else { Target::Main } } } #[cfg(test)] mod tests { use super::*; #[test] fn defaults() { let opts = AppConfig::from_iter(&["instruments", "-t", "template"]); assert!(opts.example.is_none()); assert!(opts.bin.is_none()); assert!(!opts.release); assert!(opts.trace_filepath.is_none()); assert!(opts.package.is_none()); assert!(opts.manifest_path.is_none()); } #[test] fn package_is_given() { let opts = AppConfig::from_iter(&["instruments", "--package", "foo", "--template", "alloc"]); assert!(opts.example.is_none()); assert!(opts.bin.is_none()); assert!(opts.bench.is_none()); assert_eq!(opts.package.unwrap().as_str(), "foo"); let opts = AppConfig::from_iter(&[ "instruments", "--package", "foo", "--template", "alloc", "--bin", "bin_arg", ]); assert!(opts.example.is_none()); assert!(opts.bench.is_none()); assert_eq!(opts.bin.unwrap().as_str(), "bin_arg"); assert_eq!(opts.package.unwrap().as_str(), "foo"); } #[test] #[should_panic(expected = "cannot be used with one or more of the other")] fn group_is_exclusive() { let opts = AppConfig::from_iter(&["instruments", "-t", "time", "--bin", "bin_arg"]); assert!(opts.example.is_none()); assert_eq!(opts.bin.unwrap().as_str(), "bin_arg"); let opts = AppConfig::from_iter(&["instruments", "-t", "time", "--example", "example_binary"]); assert!(opts.bin.is_none()); assert_eq!(opts.example.unwrap().as_str(), "example_binary"); let _opts = AppConfig::from_iter_safe(&[ "instruments", "-t", "time", "--bin", "thing", "--example", "other", ]) .unwrap(); } #[test] fn limit_millis() { let opts = AppConfig::from_iter(&["instruments", "-t", "time", "--time-limit", "42000"]); assert_eq!(opts.time_limit, Some(42000)); let opts = AppConfig::from_iter(&["instruments", "-t", "time", "--time-limit", "808"]); assert_eq!(opts.time_limit, Some(808)); let opts = AppConfig::from_iter(&["instruments", "-t", "time"]); assert_eq!(opts.time_limit, None); } #[test] fn features() { let opts = &[ "instruments", "--template", "time", "--example", "hello", "--features", "svg im", "--", "hi", ]; let opts = AppConfig::from_iter(opts); assert_eq!(opts.template_name, Some("time".into())); assert_eq!(opts.example, Some("hello".to_string())); assert_eq!(opts.features, Some("svg im".to_string())); let features: Vec<_> = opts .to_cargo_opts() .unwrap() .features .features .iter() .map(|feat| feat.to_string()) .collect(); assert_eq!(features, vec!["im", "svg"]); } #[test] fn var_args() { let opts = AppConfig::from_iter(&[ "instruments", "-t", "alloc", "--time-limit", "808", "--", "hi", "-h", "--bin", ]); assert_eq!(opts.template_name, Some("alloc".into())); assert_eq!(opts.time_limit, Some(808)); assert_eq!(opts.target_args, vec!["hi", "-h", "--bin"]); } #[test] fn manifest_path() { let opts = AppConfig::from_iter(&[ "instruments", "--manifest-path", "/path/to/Cargo.toml", "--template", "alloc", ]); assert!(opts.example.is_none()); assert!(opts.bin.is_none()); assert!(opts.bench.is_none()); assert!(opts.package.is_none()); assert_eq!(opts.manifest_path.unwrap(), PathBuf::from("/path/to/Cargo.toml")); } }
{ match p { Package::Default => Packages::Default, Package::Package(s) => Packages::Packages(vec![s]), } }
identifier_body
opt.rs
//! CLI argument handling use anyhow::Result; use cargo::core::resolver::CliFeatures; use cargo::ops::Packages; use std::fmt; use std::path::PathBuf; use structopt::StructOpt; #[derive(StructOpt)] #[structopt(bin_name = "cargo")] pub(crate) enum Cli { /// Profile a binary with Xcode Instruments. /// /// By default, cargo-instruments will build your main binary. #[structopt( name = "instruments", after_help = "EXAMPLE:\n cargo instruments -t time Profile main binary with the (recommended) Time Profiler."
#[derive(Debug, StructOpt)] #[structopt(setting = structopt::clap::AppSettings::TrailingVarArg)] pub(crate) struct AppConfig { /// List available templates #[structopt(short = "l", long)] pub(crate) list_templates: bool, /// Specify the instruments template to run /// /// To see available templates, pass `--list-templates`. #[structopt( short = "t", long = "template", value_name = "TEMPLATE", required_unless = "list-templates" )] pub(crate) template_name: Option<String>, /// Specify package for example/bin/bench /// /// For package that has only one bin, it's the same as `--bin PACKAGE_NAME` #[structopt(short = "p", long, value_name = "NAME")] package: Option<String>, /// Example binary to run #[structopt(long, group = "target", value_name = "NAME")] example: Option<String>, /// Binary to run #[structopt(long, group = "target", value_name = "NAME")] bin: Option<String>, /// Benchmark target to run #[structopt(long, group = "target", value_name = "NAME")] bench: Option<String>, /// Pass --release to cargo #[structopt(long, conflicts_with = "profile")] release: bool, /// Pass --profile NAME to cargo #[structopt(long, value_name = "NAME")] profile: Option<String>, /// Output.trace file to the given path /// /// Defaults to `target/instruments/{name}_{template-name}_{date}.trace`. /// /// If the file already exists, a new Run will be added. #[structopt(short = "o", long = "output", value_name = "PATH", parse(from_os_str))] pub(crate) trace_filepath: Option<PathBuf>, /// Limit recording time to the specified value (in milliseconds) /// /// The program will be terminated after this limit is exceeded. #[structopt(long, value_name = "MILLIS")] pub(crate) time_limit: Option<usize>, /// Open the generated.trace file after profiling /// /// The trace file will open in Xcode Instruments. #[structopt(long, hidden = true)] pub(crate) open: bool, /// Do not open the generated trace file in Instruments.app. #[structopt(long)] pub(crate) no_open: bool, /// Features to pass to cargo. #[structopt(long, value_name = "CARGO-FEATURES")] pub(crate) features: Option<String>, /// Path to Cargo.toml #[structopt(long, value_name = "PATH")] pub(crate) manifest_path: Option<PathBuf>, /// Activate all features for the selected target. #[structopt(long, display_order = 1001)] pub(crate) all_features: bool, /// Do not activate the default features for the selected target #[structopt(long, display_order = 1001)] pub(crate) no_default_features: bool, /// Arguments passed to the target binary. /// /// To pass flags, precede child args with `--`, /// e.g. `cargo instruments -- -t test1.txt --slow-mode`. #[structopt(value_name = "ARGS")] pub(crate) target_args: Vec<String>, } /// Represents the kind of target to profile. #[derive(Debug, PartialEq)] pub(crate) enum Target { Main, Example(String), Bin(String), Bench(String), } /// The package in which to look for the specified target (example/bin/bench) #[derive(Clone, Debug, PartialEq)] pub(crate) enum Package { Default, Package(String), } impl From<Package> for Packages { fn from(p: Package) -> Self { match p { Package::Default => Packages::Default, Package::Package(s) => Packages::Packages(vec![s]), } } } impl fmt::Display for Package { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Package::Default => { write!(f, "Default: search all packages for example/bin/bench") } Package::Package(s) => write!(f, "{}", s), } } } impl fmt::Display for Target { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Target::Main => write!(f, "src/main.rs"), Target::Example(bin) => write!(f, "examples/{}.rs", bin), Target::Bin(bin) => write!(f, "bin/{}.rs", bin), Target::Bench(bench) => write!(f, "bench {}", bench), } } } /// Cargo-specific options pub(crate) struct CargoOpts { pub(crate) package: Package, pub(crate) target: Target, pub(crate) profile: String, pub(crate) features: CliFeatures, } impl AppConfig { pub(crate) fn to_cargo_opts(&self) -> Result<CargoOpts> { let package = self.get_package(); let target = self.get_target(); let features = self.features.clone().map(|s| vec![s]).unwrap_or_default(); let features = CliFeatures::from_command_line( &features, self.all_features, !self.no_default_features, )?; let profile = self .profile .clone() .unwrap_or_else(|| (if self.release { "release" } else { "dev" }).to_owned()); Ok(CargoOpts { package, target, profile, features }) } fn get_package(&self) -> Package { if let Some(ref package) = self.package { Package::Package(package.clone()) } else { Package::Default } } // valid target: --example, --bin, --bench fn get_target(&self) -> Target { if let Some(ref example) = self.example { Target::Example(example.clone()) } else if let Some(ref bin) = self.bin { Target::Bin(bin.clone()) } else if let Some(ref bench) = self.bench { Target::Bench(bench.clone()) } else { Target::Main } } } #[cfg(test)] mod tests { use super::*; #[test] fn defaults() { let opts = AppConfig::from_iter(&["instruments", "-t", "template"]); assert!(opts.example.is_none()); assert!(opts.bin.is_none()); assert!(!opts.release); assert!(opts.trace_filepath.is_none()); assert!(opts.package.is_none()); assert!(opts.manifest_path.is_none()); } #[test] fn package_is_given() { let opts = AppConfig::from_iter(&["instruments", "--package", "foo", "--template", "alloc"]); assert!(opts.example.is_none()); assert!(opts.bin.is_none()); assert!(opts.bench.is_none()); assert_eq!(opts.package.unwrap().as_str(), "foo"); let opts = AppConfig::from_iter(&[ "instruments", "--package", "foo", "--template", "alloc", "--bin", "bin_arg", ]); assert!(opts.example.is_none()); assert!(opts.bench.is_none()); assert_eq!(opts.bin.unwrap().as_str(), "bin_arg"); assert_eq!(opts.package.unwrap().as_str(), "foo"); } #[test] #[should_panic(expected = "cannot be used with one or more of the other")] fn group_is_exclusive() { let opts = AppConfig::from_iter(&["instruments", "-t", "time", "--bin", "bin_arg"]); assert!(opts.example.is_none()); assert_eq!(opts.bin.unwrap().as_str(), "bin_arg"); let opts = AppConfig::from_iter(&["instruments", "-t", "time", "--example", "example_binary"]); assert!(opts.bin.is_none()); assert_eq!(opts.example.unwrap().as_str(), "example_binary"); let _opts = AppConfig::from_iter_safe(&[ "instruments", "-t", "time", "--bin", "thing", "--example", "other", ]) .unwrap(); } #[test] fn limit_millis() { let opts = AppConfig::from_iter(&["instruments", "-t", "time", "--time-limit", "42000"]); assert_eq!(opts.time_limit, Some(42000)); let opts = AppConfig::from_iter(&["instruments", "-t", "time", "--time-limit", "808"]); assert_eq!(opts.time_limit, Some(808)); let opts = AppConfig::from_iter(&["instruments", "-t", "time"]); assert_eq!(opts.time_limit, None); } #[test] fn features() { let opts = &[ "instruments", "--template", "time", "--example", "hello", "--features", "svg im", "--", "hi", ]; let opts = AppConfig::from_iter(opts); assert_eq!(opts.template_name, Some("time".into())); assert_eq!(opts.example, Some("hello".to_string())); assert_eq!(opts.features, Some("svg im".to_string())); let features: Vec<_> = opts .to_cargo_opts() .unwrap() .features .features .iter() .map(|feat| feat.to_string()) .collect(); assert_eq!(features, vec!["im", "svg"]); } #[test] fn var_args() { let opts = AppConfig::from_iter(&[ "instruments", "-t", "alloc", "--time-limit", "808", "--", "hi", "-h", "--bin", ]); assert_eq!(opts.template_name, Some("alloc".into())); assert_eq!(opts.time_limit, Some(808)); assert_eq!(opts.target_args, vec!["hi", "-h", "--bin"]); } #[test] fn manifest_path() { let opts = AppConfig::from_iter(&[ "instruments", "--manifest-path", "/path/to/Cargo.toml", "--template", "alloc", ]); assert!(opts.example.is_none()); assert!(opts.bin.is_none()); assert!(opts.bench.is_none()); assert!(opts.package.is_none()); assert_eq!(opts.manifest_path.unwrap(), PathBuf::from("/path/to/Cargo.toml")); } }
)] Instruments(AppConfig), }
random_line_split
init.rs
use anyhow::{bail, Context, Result}; use nix::{ fcntl, sched, sys, unistd::{Gid, Uid}, }; use oci_spec::Spec; use std::{env, os::unix::io::AsRawFd}; use std::{fs, io::Write, path::Path, path::PathBuf}; use crate::{ capabilities, namespaces::Namespaces, notify_socket::NotifyListener, process::child, rootfs, rootless::Rootless, stdio::FileDescriptor, syscall::{linux::LinuxSyscall, Syscall}, tty, utils, }; // Make sure a given path is on procfs. This is to avoid the security risk that // /proc path is mounted over. Ref: CVE-2019-16884 fn ensure_procfs(path: &Path) -> Result<()> { let procfs_fd = fs::File::open(path)?; let fstat_info = sys::statfs::fstatfs(&procfs_fd.as_raw_fd())?; if fstat_info.filesystem_type()!= sys::statfs::PROC_SUPER_MAGIC { bail!(format!("{:?} is not on the procfs", path)); } Ok(()) } // Get a list of open fds for the calling process. fn get_open_fds() -> Result<Vec<i32>> { const PROCFS_FD_PATH: &str = "/proc/self/fd"; ensure_procfs(Path::new(PROCFS_FD_PATH)) .with_context(|| format!("{} is not the actual procfs", PROCFS_FD_PATH))?; let fds: Vec<i32> = fs::read_dir(PROCFS_FD_PATH)? .filter_map(|entry| match entry { Ok(entry) => Some(entry.path()), Err(_) => None, }) .filter_map(|path| path.file_name().map(|file_name| file_name.to_owned())) .filter_map(|file_name| file_name.to_str().map(String::from)) .filter_map(|file_name| -> Option<i32> { // Convert the file name from string into i32. Since we are looking // at /proc/<pid>/fd, anything that's not a number (i32) can be // ignored. We are only interested in opened fds. match file_name.parse() { Ok(fd) => Some(fd), Err(_) => None, } }) .collect(); Ok(fds) } // Cleanup any extra file descriptors, so the new container process will not // leak a file descriptor from before execve gets executed. The first 3 fd will // stay open: stdio, stdout, and stderr. We would further preserve the next // "preserve_fds" number of fds. Set the rest of fd with CLOEXEC flag, so they // will be closed after execve into the container payload. We can't close the // fds immediatly since we at least still need it for the pipe used to wait on // starting the container. fn cleanup_file_descriptors(preserve_fds: i32) -> Result<()> { let open_fds = get_open_fds().with_context(|| "Failed to obtain opened fds")?; // Include stdin, stdout, and stderr for fd 0, 1, and 2 respectively. let min_fd = preserve_fds + 3; let to_be_cleaned_up_fds: Vec<i32> = open_fds .iter() .filter_map(|&fd| if fd >= min_fd { Some(fd) } else { None }) .collect(); to_be_cleaned_up_fds.iter().for_each(|&fd| { // Intentionally ignore errors here -- the cases where this might fail // are basically file descriptors that have already been closed. let _ = fcntl::fcntl(fd, fcntl::F_SETFD(fcntl::FdFlag::FD_CLOEXEC)); }); Ok(()) } pub struct ContainerInitArgs { /// Flag indicating if an init or a tenant container should be created pub init: bool, /// Interface to operating system primitives pub syscall: LinuxSyscall, /// OCI complient runtime spec pub spec: Spec, /// Root filesystem of the container pub rootfs: PathBuf, /// Socket to communicate the file descriptor of the ptty pub console_socket: Option<FileDescriptor>, /// Options for rootless containers pub rootless: Option<Rootless>, /// Path to the Unix Domain Socket to communicate container start pub notify_path: PathBuf, /// File descriptos preserved/passed to the container init process. pub preserve_fds: i32, /// Pipe used to communicate with the child process pub child: child::ChildProcess, } pub fn container_init(args: ContainerInitArgs) -> Result<()> { let command = &args.syscall; let spec = &args.spec; let linux = &spec.linux.as_ref().context("no linux in spec")?; let namespaces: Namespaces = linux.namespaces.clone().into(); // need to create the notify socket before we pivot root, since the unix // domain socket used here is outside of the rootfs of container let mut notify_socket: NotifyListener = NotifyListener::new(&args.notify_path)?; let proc = &spec.process.as_ref().context("no process in spec")?; let mut envs: Vec<String> = proc.env.clone(); let rootfs = &args.rootfs; let mut child = args.child; // if Out-of-memory score adjustment is set in specification. set the score // value for the current process check // https://dev.to/rrampage/surviving-the-linux-oom-killer-2ki9 for some more // information if let Some(ref resource) = linux.resources { if let Some(oom_score_adj) = resource.oom_score_adj { let mut f = fs::File::create("/proc/self/oom_score_adj")?; f.write_all(oom_score_adj.to_string().as_bytes())?; } } // if new user is specified in specification, this will be true and new // namespace will be created, check // https://man7.org/linux/man-pages/man7/user_namespaces.7.html for more // information if args.rootless.is_some() { // child needs to be dumpable, otherwise the non root parent is not // allowed to write the uid/gid maps prctl::set_dumpable(true).unwrap(); child.request_identifier_mapping()?; child.wait_for_mapping_ack()?; prctl::set_dumpable(false).unwrap(); } // set limits and namespaces to the process for rlimit in proc.rlimits.iter() { command.set_rlimit(rlimit).context("failed to set rlimit")?; } command .set_id(Uid::from_raw(0), Gid::from_raw(0)) .context("failed to become root")?; // set up tty if specified if let Some(csocketfd) = args.console_socket { tty::setup_console(&csocketfd)?; } // join existing namespaces namespaces.apply_setns()?; command.set_hostname(spec.hostname.as_ref().context("no hostname in spec")?)?; if proc.no_new_privileges { let _ = prctl::set_no_new_privileges(true); } if args.init
command.set_id(Uid::from_raw(proc.user.uid), Gid::from_raw(proc.user.gid))?; capabilities::reset_effective(command)?; if let Some(caps) = &proc.capabilities { capabilities::drop_privileges(caps, command)?; } // Take care of LISTEN_FDS used for systemd-active-socket. If the value is // not 0, then we have to preserve those fds as well, and set up the correct // environment variables. let preserve_fds: i32 = match env::var("LISTEN_FDS") { Ok(listen_fds_str) => { let listen_fds = match listen_fds_str.parse::<i32>() { Ok(v) => v, Err(error) => { log::warn!( "LISTEN_FDS entered is not a fd. Ignore the value. {:?}", error ); 0 } }; // The LISTEN_FDS will have to be passed to container init process. // The LISTEN_PID will be set to PID 1. Based on the spec, if // LISTEN_FDS is 0, the variable should be unset, so we just ignore // it here, if it is 0. if listen_fds > 0 { envs.append(&mut vec![ format!("LISTEN_FDS={}", listen_fds), "LISTEN_PID=1".to_string(), ]); } args.preserve_fds + listen_fds } Err(env::VarError::NotPresent) => args.preserve_fds, Err(env::VarError::NotUnicode(value)) => { log::warn!( "LISTEN_FDS entered is malformed: {:?}. Ignore the value.", &value ); args.preserve_fds } }; // clean up and handle perserved fds. cleanup_file_descriptors(preserve_fds).with_context(|| "Failed to clean up extra fds")?; // notify parents that the init process is ready to execute the payload. child.notify_parent()?; // listing on the notify socket for container start command notify_socket.wait_for_container_start()?; let args: &Vec<String> = &proc.args; utils::do_exec(&args[0], args, &envs)?; // After do_exec is called, the process is replaced with the container // payload through execvp, so it should never reach here. unreachable!(); } #[cfg(test)] mod tests { use super::*; use anyhow::{bail, Result}; use nix::{fcntl, sys, unistd}; use std::fs; #[test] fn test_get_open_fds() -> Result<()> { let file = fs::File::open("/dev/null")?; let fd = file.as_raw_fd(); let open_fds = super::get_open_fds()?; if!open_fds.iter().any(|&v| v == fd) { bail!("Failed to find the opened dev null fds: {:?}", open_fds); } // explicitly close the file before the test case returns. drop(file); // The stdio fds should also be contained in the list of opened fds. if!vec![0, 1, 2] .iter() .all(|&stdio_fd| open_fds.iter().any(|&open_fd| open_fd == stdio_fd)) { bail!("Failed to find the stdio fds: {:?}", open_fds); } Ok(()) } #[test] fn test_cleanup_file_descriptors() -> Result<()> { // Open a fd without the CLOEXEC flag. Rust automatically adds the flag, // so we use fcntl::open here for more control. let fd = fcntl::open("/dev/null", fcntl::OFlag::O_RDWR, sys::stat::Mode::empty())?; cleanup_file_descriptors(fd - 1).with_context(|| "Failed to clean up the fds")?; let fd_flag = fcntl::fcntl(fd, fcntl::F_GETFD)?; if (fd_flag & fcntl::FdFlag::FD_CLOEXEC.bits())!= 0 { bail!("CLOEXEC flag is not set correctly"); } unistd::close(fd)?; Ok(()) } }
{ rootfs::prepare_rootfs( spec, rootfs, namespaces .clone_flags .contains(sched::CloneFlags::CLONE_NEWUSER), ) .with_context(|| "Failed to prepare rootfs")?; // change the root of filesystem of the process to the rootfs command .pivot_rootfs(rootfs) .with_context(|| format!("Failed to pivot root to {:?}", rootfs))?; }
conditional_block
init.rs
use anyhow::{bail, Context, Result}; use nix::{ fcntl, sched, sys, unistd::{Gid, Uid}, }; use oci_spec::Spec; use std::{env, os::unix::io::AsRawFd}; use std::{fs, io::Write, path::Path, path::PathBuf}; use crate::{ capabilities, namespaces::Namespaces, notify_socket::NotifyListener, process::child, rootfs, rootless::Rootless, stdio::FileDescriptor, syscall::{linux::LinuxSyscall, Syscall}, tty, utils, }; // Make sure a given path is on procfs. This is to avoid the security risk that // /proc path is mounted over. Ref: CVE-2019-16884 fn ensure_procfs(path: &Path) -> Result<()> { let procfs_fd = fs::File::open(path)?; let fstat_info = sys::statfs::fstatfs(&procfs_fd.as_raw_fd())?; if fstat_info.filesystem_type()!= sys::statfs::PROC_SUPER_MAGIC { bail!(format!("{:?} is not on the procfs", path)); } Ok(()) } // Get a list of open fds for the calling process. fn get_open_fds() -> Result<Vec<i32>> { const PROCFS_FD_PATH: &str = "/proc/self/fd"; ensure_procfs(Path::new(PROCFS_FD_PATH)) .with_context(|| format!("{} is not the actual procfs", PROCFS_FD_PATH))?; let fds: Vec<i32> = fs::read_dir(PROCFS_FD_PATH)? .filter_map(|entry| match entry { Ok(entry) => Some(entry.path()), Err(_) => None, }) .filter_map(|path| path.file_name().map(|file_name| file_name.to_owned())) .filter_map(|file_name| file_name.to_str().map(String::from)) .filter_map(|file_name| -> Option<i32> { // Convert the file name from string into i32. Since we are looking // at /proc/<pid>/fd, anything that's not a number (i32) can be // ignored. We are only interested in opened fds. match file_name.parse() { Ok(fd) => Some(fd), Err(_) => None, } }) .collect(); Ok(fds) } // Cleanup any extra file descriptors, so the new container process will not // leak a file descriptor from before execve gets executed. The first 3 fd will // stay open: stdio, stdout, and stderr. We would further preserve the next // "preserve_fds" number of fds. Set the rest of fd with CLOEXEC flag, so they // will be closed after execve into the container payload. We can't close the // fds immediatly since we at least still need it for the pipe used to wait on // starting the container. fn cleanup_file_descriptors(preserve_fds: i32) -> Result<()> { let open_fds = get_open_fds().with_context(|| "Failed to obtain opened fds")?; // Include stdin, stdout, and stderr for fd 0, 1, and 2 respectively. let min_fd = preserve_fds + 3; let to_be_cleaned_up_fds: Vec<i32> = open_fds .iter() .filter_map(|&fd| if fd >= min_fd { Some(fd) } else { None }) .collect(); to_be_cleaned_up_fds.iter().for_each(|&fd| { // Intentionally ignore errors here -- the cases where this might fail // are basically file descriptors that have already been closed. let _ = fcntl::fcntl(fd, fcntl::F_SETFD(fcntl::FdFlag::FD_CLOEXEC)); }); Ok(()) } pub struct ContainerInitArgs { /// Flag indicating if an init or a tenant container should be created pub init: bool, /// Interface to operating system primitives pub syscall: LinuxSyscall, /// OCI complient runtime spec pub spec: Spec, /// Root filesystem of the container pub rootfs: PathBuf, /// Socket to communicate the file descriptor of the ptty pub console_socket: Option<FileDescriptor>, /// Options for rootless containers pub rootless: Option<Rootless>, /// Path to the Unix Domain Socket to communicate container start pub notify_path: PathBuf, /// File descriptos preserved/passed to the container init process. pub preserve_fds: i32, /// Pipe used to communicate with the child process pub child: child::ChildProcess, } pub fn container_init(args: ContainerInitArgs) -> Result<()> { let command = &args.syscall; let spec = &args.spec; let linux = &spec.linux.as_ref().context("no linux in spec")?; let namespaces: Namespaces = linux.namespaces.clone().into(); // need to create the notify socket before we pivot root, since the unix // domain socket used here is outside of the rootfs of container let mut notify_socket: NotifyListener = NotifyListener::new(&args.notify_path)?; let proc = &spec.process.as_ref().context("no process in spec")?; let mut envs: Vec<String> = proc.env.clone(); let rootfs = &args.rootfs; let mut child = args.child; // if Out-of-memory score adjustment is set in specification. set the score // value for the current process check // https://dev.to/rrampage/surviving-the-linux-oom-killer-2ki9 for some more // information if let Some(ref resource) = linux.resources { if let Some(oom_score_adj) = resource.oom_score_adj { let mut f = fs::File::create("/proc/self/oom_score_adj")?; f.write_all(oom_score_adj.to_string().as_bytes())?; } } // if new user is specified in specification, this will be true and new // namespace will be created, check // https://man7.org/linux/man-pages/man7/user_namespaces.7.html for more // information if args.rootless.is_some() { // child needs to be dumpable, otherwise the non root parent is not // allowed to write the uid/gid maps prctl::set_dumpable(true).unwrap(); child.request_identifier_mapping()?; child.wait_for_mapping_ack()?; prctl::set_dumpable(false).unwrap(); } // set limits and namespaces to the process for rlimit in proc.rlimits.iter() { command.set_rlimit(rlimit).context("failed to set rlimit")?; } command .set_id(Uid::from_raw(0), Gid::from_raw(0)) .context("failed to become root")?; // set up tty if specified if let Some(csocketfd) = args.console_socket { tty::setup_console(&csocketfd)?; } // join existing namespaces namespaces.apply_setns()?; command.set_hostname(spec.hostname.as_ref().context("no hostname in spec")?)?; if proc.no_new_privileges { let _ = prctl::set_no_new_privileges(true); } if args.init { rootfs::prepare_rootfs( spec, rootfs, namespaces .clone_flags .contains(sched::CloneFlags::CLONE_NEWUSER), ) .with_context(|| "Failed to prepare rootfs")?; // change the root of filesystem of the process to the rootfs command .pivot_rootfs(rootfs) .with_context(|| format!("Failed to pivot root to {:?}", rootfs))?; } command.set_id(Uid::from_raw(proc.user.uid), Gid::from_raw(proc.user.gid))?; capabilities::reset_effective(command)?; if let Some(caps) = &proc.capabilities { capabilities::drop_privileges(caps, command)?; } // Take care of LISTEN_FDS used for systemd-active-socket. If the value is // not 0, then we have to preserve those fds as well, and set up the correct // environment variables. let preserve_fds: i32 = match env::var("LISTEN_FDS") { Ok(listen_fds_str) => { let listen_fds = match listen_fds_str.parse::<i32>() { Ok(v) => v, Err(error) => { log::warn!( "LISTEN_FDS entered is not a fd. Ignore the value. {:?}", error ); 0 } }; // The LISTEN_FDS will have to be passed to container init process. // The LISTEN_PID will be set to PID 1. Based on the spec, if // LISTEN_FDS is 0, the variable should be unset, so we just ignore // it here, if it is 0. if listen_fds > 0 { envs.append(&mut vec![ format!("LISTEN_FDS={}", listen_fds), "LISTEN_PID=1".to_string(), ]); } args.preserve_fds + listen_fds } Err(env::VarError::NotPresent) => args.preserve_fds, Err(env::VarError::NotUnicode(value)) => {
"LISTEN_FDS entered is malformed: {:?}. Ignore the value.", &value ); args.preserve_fds } }; // clean up and handle perserved fds. cleanup_file_descriptors(preserve_fds).with_context(|| "Failed to clean up extra fds")?; // notify parents that the init process is ready to execute the payload. child.notify_parent()?; // listing on the notify socket for container start command notify_socket.wait_for_container_start()?; let args: &Vec<String> = &proc.args; utils::do_exec(&args[0], args, &envs)?; // After do_exec is called, the process is replaced with the container // payload through execvp, so it should never reach here. unreachable!(); } #[cfg(test)] mod tests { use super::*; use anyhow::{bail, Result}; use nix::{fcntl, sys, unistd}; use std::fs; #[test] fn test_get_open_fds() -> Result<()> { let file = fs::File::open("/dev/null")?; let fd = file.as_raw_fd(); let open_fds = super::get_open_fds()?; if!open_fds.iter().any(|&v| v == fd) { bail!("Failed to find the opened dev null fds: {:?}", open_fds); } // explicitly close the file before the test case returns. drop(file); // The stdio fds should also be contained in the list of opened fds. if!vec![0, 1, 2] .iter() .all(|&stdio_fd| open_fds.iter().any(|&open_fd| open_fd == stdio_fd)) { bail!("Failed to find the stdio fds: {:?}", open_fds); } Ok(()) } #[test] fn test_cleanup_file_descriptors() -> Result<()> { // Open a fd without the CLOEXEC flag. Rust automatically adds the flag, // so we use fcntl::open here for more control. let fd = fcntl::open("/dev/null", fcntl::OFlag::O_RDWR, sys::stat::Mode::empty())?; cleanup_file_descriptors(fd - 1).with_context(|| "Failed to clean up the fds")?; let fd_flag = fcntl::fcntl(fd, fcntl::F_GETFD)?; if (fd_flag & fcntl::FdFlag::FD_CLOEXEC.bits())!= 0 { bail!("CLOEXEC flag is not set correctly"); } unistd::close(fd)?; Ok(()) } }
log::warn!(
random_line_split
init.rs
use anyhow::{bail, Context, Result}; use nix::{ fcntl, sched, sys, unistd::{Gid, Uid}, }; use oci_spec::Spec; use std::{env, os::unix::io::AsRawFd}; use std::{fs, io::Write, path::Path, path::PathBuf}; use crate::{ capabilities, namespaces::Namespaces, notify_socket::NotifyListener, process::child, rootfs, rootless::Rootless, stdio::FileDescriptor, syscall::{linux::LinuxSyscall, Syscall}, tty, utils, }; // Make sure a given path is on procfs. This is to avoid the security risk that // /proc path is mounted over. Ref: CVE-2019-16884 fn ensure_procfs(path: &Path) -> Result<()> { let procfs_fd = fs::File::open(path)?; let fstat_info = sys::statfs::fstatfs(&procfs_fd.as_raw_fd())?; if fstat_info.filesystem_type()!= sys::statfs::PROC_SUPER_MAGIC { bail!(format!("{:?} is not on the procfs", path)); } Ok(()) } // Get a list of open fds for the calling process. fn get_open_fds() -> Result<Vec<i32>> { const PROCFS_FD_PATH: &str = "/proc/self/fd"; ensure_procfs(Path::new(PROCFS_FD_PATH)) .with_context(|| format!("{} is not the actual procfs", PROCFS_FD_PATH))?; let fds: Vec<i32> = fs::read_dir(PROCFS_FD_PATH)? .filter_map(|entry| match entry { Ok(entry) => Some(entry.path()), Err(_) => None, }) .filter_map(|path| path.file_name().map(|file_name| file_name.to_owned())) .filter_map(|file_name| file_name.to_str().map(String::from)) .filter_map(|file_name| -> Option<i32> { // Convert the file name from string into i32. Since we are looking // at /proc/<pid>/fd, anything that's not a number (i32) can be // ignored. We are only interested in opened fds. match file_name.parse() { Ok(fd) => Some(fd), Err(_) => None, } }) .collect(); Ok(fds) } // Cleanup any extra file descriptors, so the new container process will not // leak a file descriptor from before execve gets executed. The first 3 fd will // stay open: stdio, stdout, and stderr. We would further preserve the next // "preserve_fds" number of fds. Set the rest of fd with CLOEXEC flag, so they // will be closed after execve into the container payload. We can't close the // fds immediatly since we at least still need it for the pipe used to wait on // starting the container. fn cleanup_file_descriptors(preserve_fds: i32) -> Result<()> { let open_fds = get_open_fds().with_context(|| "Failed to obtain opened fds")?; // Include stdin, stdout, and stderr for fd 0, 1, and 2 respectively. let min_fd = preserve_fds + 3; let to_be_cleaned_up_fds: Vec<i32> = open_fds .iter() .filter_map(|&fd| if fd >= min_fd { Some(fd) } else { None }) .collect(); to_be_cleaned_up_fds.iter().for_each(|&fd| { // Intentionally ignore errors here -- the cases where this might fail // are basically file descriptors that have already been closed. let _ = fcntl::fcntl(fd, fcntl::F_SETFD(fcntl::FdFlag::FD_CLOEXEC)); }); Ok(()) } pub struct ContainerInitArgs { /// Flag indicating if an init or a tenant container should be created pub init: bool, /// Interface to operating system primitives pub syscall: LinuxSyscall, /// OCI complient runtime spec pub spec: Spec, /// Root filesystem of the container pub rootfs: PathBuf, /// Socket to communicate the file descriptor of the ptty pub console_socket: Option<FileDescriptor>, /// Options for rootless containers pub rootless: Option<Rootless>, /// Path to the Unix Domain Socket to communicate container start pub notify_path: PathBuf, /// File descriptos preserved/passed to the container init process. pub preserve_fds: i32, /// Pipe used to communicate with the child process pub child: child::ChildProcess, } pub fn container_init(args: ContainerInitArgs) -> Result<()> { let command = &args.syscall; let spec = &args.spec; let linux = &spec.linux.as_ref().context("no linux in spec")?; let namespaces: Namespaces = linux.namespaces.clone().into(); // need to create the notify socket before we pivot root, since the unix // domain socket used here is outside of the rootfs of container let mut notify_socket: NotifyListener = NotifyListener::new(&args.notify_path)?; let proc = &spec.process.as_ref().context("no process in spec")?; let mut envs: Vec<String> = proc.env.clone(); let rootfs = &args.rootfs; let mut child = args.child; // if Out-of-memory score adjustment is set in specification. set the score // value for the current process check // https://dev.to/rrampage/surviving-the-linux-oom-killer-2ki9 for some more // information if let Some(ref resource) = linux.resources { if let Some(oom_score_adj) = resource.oom_score_adj { let mut f = fs::File::create("/proc/self/oom_score_adj")?; f.write_all(oom_score_adj.to_string().as_bytes())?; } } // if new user is specified in specification, this will be true and new // namespace will be created, check // https://man7.org/linux/man-pages/man7/user_namespaces.7.html for more // information if args.rootless.is_some() { // child needs to be dumpable, otherwise the non root parent is not // allowed to write the uid/gid maps prctl::set_dumpable(true).unwrap(); child.request_identifier_mapping()?; child.wait_for_mapping_ack()?; prctl::set_dumpable(false).unwrap(); } // set limits and namespaces to the process for rlimit in proc.rlimits.iter() { command.set_rlimit(rlimit).context("failed to set rlimit")?; } command .set_id(Uid::from_raw(0), Gid::from_raw(0)) .context("failed to become root")?; // set up tty if specified if let Some(csocketfd) = args.console_socket { tty::setup_console(&csocketfd)?; } // join existing namespaces namespaces.apply_setns()?; command.set_hostname(spec.hostname.as_ref().context("no hostname in spec")?)?; if proc.no_new_privileges { let _ = prctl::set_no_new_privileges(true); } if args.init { rootfs::prepare_rootfs( spec, rootfs, namespaces .clone_flags .contains(sched::CloneFlags::CLONE_NEWUSER), ) .with_context(|| "Failed to prepare rootfs")?; // change the root of filesystem of the process to the rootfs command .pivot_rootfs(rootfs) .with_context(|| format!("Failed to pivot root to {:?}", rootfs))?; } command.set_id(Uid::from_raw(proc.user.uid), Gid::from_raw(proc.user.gid))?; capabilities::reset_effective(command)?; if let Some(caps) = &proc.capabilities { capabilities::drop_privileges(caps, command)?; } // Take care of LISTEN_FDS used for systemd-active-socket. If the value is // not 0, then we have to preserve those fds as well, and set up the correct // environment variables. let preserve_fds: i32 = match env::var("LISTEN_FDS") { Ok(listen_fds_str) => { let listen_fds = match listen_fds_str.parse::<i32>() { Ok(v) => v, Err(error) => { log::warn!( "LISTEN_FDS entered is not a fd. Ignore the value. {:?}", error ); 0 } }; // The LISTEN_FDS will have to be passed to container init process. // The LISTEN_PID will be set to PID 1. Based on the spec, if // LISTEN_FDS is 0, the variable should be unset, so we just ignore // it here, if it is 0. if listen_fds > 0 { envs.append(&mut vec![ format!("LISTEN_FDS={}", listen_fds), "LISTEN_PID=1".to_string(), ]); } args.preserve_fds + listen_fds } Err(env::VarError::NotPresent) => args.preserve_fds, Err(env::VarError::NotUnicode(value)) => { log::warn!( "LISTEN_FDS entered is malformed: {:?}. Ignore the value.", &value ); args.preserve_fds } }; // clean up and handle perserved fds. cleanup_file_descriptors(preserve_fds).with_context(|| "Failed to clean up extra fds")?; // notify parents that the init process is ready to execute the payload. child.notify_parent()?; // listing on the notify socket for container start command notify_socket.wait_for_container_start()?; let args: &Vec<String> = &proc.args; utils::do_exec(&args[0], args, &envs)?; // After do_exec is called, the process is replaced with the container // payload through execvp, so it should never reach here. unreachable!(); } #[cfg(test)] mod tests { use super::*; use anyhow::{bail, Result}; use nix::{fcntl, sys, unistd}; use std::fs; #[test] fn test_get_open_fds() -> Result<()> { let file = fs::File::open("/dev/null")?; let fd = file.as_raw_fd(); let open_fds = super::get_open_fds()?; if!open_fds.iter().any(|&v| v == fd) { bail!("Failed to find the opened dev null fds: {:?}", open_fds); } // explicitly close the file before the test case returns. drop(file); // The stdio fds should also be contained in the list of opened fds. if!vec![0, 1, 2] .iter() .all(|&stdio_fd| open_fds.iter().any(|&open_fd| open_fd == stdio_fd)) { bail!("Failed to find the stdio fds: {:?}", open_fds); } Ok(()) } #[test] fn
() -> Result<()> { // Open a fd without the CLOEXEC flag. Rust automatically adds the flag, // so we use fcntl::open here for more control. let fd = fcntl::open("/dev/null", fcntl::OFlag::O_RDWR, sys::stat::Mode::empty())?; cleanup_file_descriptors(fd - 1).with_context(|| "Failed to clean up the fds")?; let fd_flag = fcntl::fcntl(fd, fcntl::F_GETFD)?; if (fd_flag & fcntl::FdFlag::FD_CLOEXEC.bits())!= 0 { bail!("CLOEXEC flag is not set correctly"); } unistd::close(fd)?; Ok(()) } }
test_cleanup_file_descriptors
identifier_name
init.rs
use anyhow::{bail, Context, Result}; use nix::{ fcntl, sched, sys, unistd::{Gid, Uid}, }; use oci_spec::Spec; use std::{env, os::unix::io::AsRawFd}; use std::{fs, io::Write, path::Path, path::PathBuf}; use crate::{ capabilities, namespaces::Namespaces, notify_socket::NotifyListener, process::child, rootfs, rootless::Rootless, stdio::FileDescriptor, syscall::{linux::LinuxSyscall, Syscall}, tty, utils, }; // Make sure a given path is on procfs. This is to avoid the security risk that // /proc path is mounted over. Ref: CVE-2019-16884 fn ensure_procfs(path: &Path) -> Result<()> { let procfs_fd = fs::File::open(path)?; let fstat_info = sys::statfs::fstatfs(&procfs_fd.as_raw_fd())?; if fstat_info.filesystem_type()!= sys::statfs::PROC_SUPER_MAGIC { bail!(format!("{:?} is not on the procfs", path)); } Ok(()) } // Get a list of open fds for the calling process. fn get_open_fds() -> Result<Vec<i32>>
}) .collect(); Ok(fds) } // Cleanup any extra file descriptors, so the new container process will not // leak a file descriptor from before execve gets executed. The first 3 fd will // stay open: stdio, stdout, and stderr. We would further preserve the next // "preserve_fds" number of fds. Set the rest of fd with CLOEXEC flag, so they // will be closed after execve into the container payload. We can't close the // fds immediatly since we at least still need it for the pipe used to wait on // starting the container. fn cleanup_file_descriptors(preserve_fds: i32) -> Result<()> { let open_fds = get_open_fds().with_context(|| "Failed to obtain opened fds")?; // Include stdin, stdout, and stderr for fd 0, 1, and 2 respectively. let min_fd = preserve_fds + 3; let to_be_cleaned_up_fds: Vec<i32> = open_fds .iter() .filter_map(|&fd| if fd >= min_fd { Some(fd) } else { None }) .collect(); to_be_cleaned_up_fds.iter().for_each(|&fd| { // Intentionally ignore errors here -- the cases where this might fail // are basically file descriptors that have already been closed. let _ = fcntl::fcntl(fd, fcntl::F_SETFD(fcntl::FdFlag::FD_CLOEXEC)); }); Ok(()) } pub struct ContainerInitArgs { /// Flag indicating if an init or a tenant container should be created pub init: bool, /// Interface to operating system primitives pub syscall: LinuxSyscall, /// OCI complient runtime spec pub spec: Spec, /// Root filesystem of the container pub rootfs: PathBuf, /// Socket to communicate the file descriptor of the ptty pub console_socket: Option<FileDescriptor>, /// Options for rootless containers pub rootless: Option<Rootless>, /// Path to the Unix Domain Socket to communicate container start pub notify_path: PathBuf, /// File descriptos preserved/passed to the container init process. pub preserve_fds: i32, /// Pipe used to communicate with the child process pub child: child::ChildProcess, } pub fn container_init(args: ContainerInitArgs) -> Result<()> { let command = &args.syscall; let spec = &args.spec; let linux = &spec.linux.as_ref().context("no linux in spec")?; let namespaces: Namespaces = linux.namespaces.clone().into(); // need to create the notify socket before we pivot root, since the unix // domain socket used here is outside of the rootfs of container let mut notify_socket: NotifyListener = NotifyListener::new(&args.notify_path)?; let proc = &spec.process.as_ref().context("no process in spec")?; let mut envs: Vec<String> = proc.env.clone(); let rootfs = &args.rootfs; let mut child = args.child; // if Out-of-memory score adjustment is set in specification. set the score // value for the current process check // https://dev.to/rrampage/surviving-the-linux-oom-killer-2ki9 for some more // information if let Some(ref resource) = linux.resources { if let Some(oom_score_adj) = resource.oom_score_adj { let mut f = fs::File::create("/proc/self/oom_score_adj")?; f.write_all(oom_score_adj.to_string().as_bytes())?; } } // if new user is specified in specification, this will be true and new // namespace will be created, check // https://man7.org/linux/man-pages/man7/user_namespaces.7.html for more // information if args.rootless.is_some() { // child needs to be dumpable, otherwise the non root parent is not // allowed to write the uid/gid maps prctl::set_dumpable(true).unwrap(); child.request_identifier_mapping()?; child.wait_for_mapping_ack()?; prctl::set_dumpable(false).unwrap(); } // set limits and namespaces to the process for rlimit in proc.rlimits.iter() { command.set_rlimit(rlimit).context("failed to set rlimit")?; } command .set_id(Uid::from_raw(0), Gid::from_raw(0)) .context("failed to become root")?; // set up tty if specified if let Some(csocketfd) = args.console_socket { tty::setup_console(&csocketfd)?; } // join existing namespaces namespaces.apply_setns()?; command.set_hostname(spec.hostname.as_ref().context("no hostname in spec")?)?; if proc.no_new_privileges { let _ = prctl::set_no_new_privileges(true); } if args.init { rootfs::prepare_rootfs( spec, rootfs, namespaces .clone_flags .contains(sched::CloneFlags::CLONE_NEWUSER), ) .with_context(|| "Failed to prepare rootfs")?; // change the root of filesystem of the process to the rootfs command .pivot_rootfs(rootfs) .with_context(|| format!("Failed to pivot root to {:?}", rootfs))?; } command.set_id(Uid::from_raw(proc.user.uid), Gid::from_raw(proc.user.gid))?; capabilities::reset_effective(command)?; if let Some(caps) = &proc.capabilities { capabilities::drop_privileges(caps, command)?; } // Take care of LISTEN_FDS used for systemd-active-socket. If the value is // not 0, then we have to preserve those fds as well, and set up the correct // environment variables. let preserve_fds: i32 = match env::var("LISTEN_FDS") { Ok(listen_fds_str) => { let listen_fds = match listen_fds_str.parse::<i32>() { Ok(v) => v, Err(error) => { log::warn!( "LISTEN_FDS entered is not a fd. Ignore the value. {:?}", error ); 0 } }; // The LISTEN_FDS will have to be passed to container init process. // The LISTEN_PID will be set to PID 1. Based on the spec, if // LISTEN_FDS is 0, the variable should be unset, so we just ignore // it here, if it is 0. if listen_fds > 0 { envs.append(&mut vec![ format!("LISTEN_FDS={}", listen_fds), "LISTEN_PID=1".to_string(), ]); } args.preserve_fds + listen_fds } Err(env::VarError::NotPresent) => args.preserve_fds, Err(env::VarError::NotUnicode(value)) => { log::warn!( "LISTEN_FDS entered is malformed: {:?}. Ignore the value.", &value ); args.preserve_fds } }; // clean up and handle perserved fds. cleanup_file_descriptors(preserve_fds).with_context(|| "Failed to clean up extra fds")?; // notify parents that the init process is ready to execute the payload. child.notify_parent()?; // listing on the notify socket for container start command notify_socket.wait_for_container_start()?; let args: &Vec<String> = &proc.args; utils::do_exec(&args[0], args, &envs)?; // After do_exec is called, the process is replaced with the container // payload through execvp, so it should never reach here. unreachable!(); } #[cfg(test)] mod tests { use super::*; use anyhow::{bail, Result}; use nix::{fcntl, sys, unistd}; use std::fs; #[test] fn test_get_open_fds() -> Result<()> { let file = fs::File::open("/dev/null")?; let fd = file.as_raw_fd(); let open_fds = super::get_open_fds()?; if!open_fds.iter().any(|&v| v == fd) { bail!("Failed to find the opened dev null fds: {:?}", open_fds); } // explicitly close the file before the test case returns. drop(file); // The stdio fds should also be contained in the list of opened fds. if!vec![0, 1, 2] .iter() .all(|&stdio_fd| open_fds.iter().any(|&open_fd| open_fd == stdio_fd)) { bail!("Failed to find the stdio fds: {:?}", open_fds); } Ok(()) } #[test] fn test_cleanup_file_descriptors() -> Result<()> { // Open a fd without the CLOEXEC flag. Rust automatically adds the flag, // so we use fcntl::open here for more control. let fd = fcntl::open("/dev/null", fcntl::OFlag::O_RDWR, sys::stat::Mode::empty())?; cleanup_file_descriptors(fd - 1).with_context(|| "Failed to clean up the fds")?; let fd_flag = fcntl::fcntl(fd, fcntl::F_GETFD)?; if (fd_flag & fcntl::FdFlag::FD_CLOEXEC.bits())!= 0 { bail!("CLOEXEC flag is not set correctly"); } unistd::close(fd)?; Ok(()) } }
{ const PROCFS_FD_PATH: &str = "/proc/self/fd"; ensure_procfs(Path::new(PROCFS_FD_PATH)) .with_context(|| format!("{} is not the actual procfs", PROCFS_FD_PATH))?; let fds: Vec<i32> = fs::read_dir(PROCFS_FD_PATH)? .filter_map(|entry| match entry { Ok(entry) => Some(entry.path()), Err(_) => None, }) .filter_map(|path| path.file_name().map(|file_name| file_name.to_owned())) .filter_map(|file_name| file_name.to_str().map(String::from)) .filter_map(|file_name| -> Option<i32> { // Convert the file name from string into i32. Since we are looking // at /proc/<pid>/fd, anything that's not a number (i32) can be // ignored. We are only interested in opened fds. match file_name.parse() { Ok(fd) => Some(fd), Err(_) => None, }
identifier_body
lib.rs
// まとめ // - あらゆるところでパターンは使用されている // - ちゃんと,あり得る可能性をカバーしよう // // python 3.10あたりで,pythonにも導入されるかも? // (rustのsubprocessはpythonから影響を受けていたりなど,インスパイアされあっているような雰囲気) // // // この章は辞書的な感じで,いろんなパターンとそのマッチング方法を列挙している感じ // まずは6章の軽い復習+α enum Coin { Penny, Nickel, Dime, Quarter, } pub fn simple_matching() { let coins = vec![Coin::Penny, Coin::Nickel, Coin::Dime, Coin::Quarter]; for coin in coins.iter() { println!( "matched coin is {}", match coin { Coin::Penny => { println!("Lucky penny!"); 1 } Coin::Nickel => 5, Coin::Dime => 10, Coin::Quarter => 25, } ); } // Lucky penny! // matched coin is 1 // matched coin is 5 // matched coin is 10 // matched coin is 25 } pub fn all_patterns_must_be_covered() { let a = 3; let b = match a { 1 => 1, 2 => 2, 3 => 3, _ => 0, // _パターンでカバーしないとだめ // そうしないと下のコンパイルエラー // patterns `std::i32::MIN..=0i32` and `4i32..=std::i32::MAX` not covered }; println!("matched value is: {}", b); } // この例では輝かないが,if-let記法も道具箱にあるんですよ // matchを使って表現するには冗長な場合に使いましょう pub fn if_let_notation() { let a = 3; if let 1 = a { println!("matched value is: 1"); } else if let 2 = a { println!("matched value is: 2"); } else if let 3 = a { println!("matched value is: 3"); } else { println!("matched value is: others"); } } pub fn all_expressions_must_return_the_same_typed_value() { let a = 3; println!( "matched value is: {}", match a { 1 => "one", 2 => "two", // 3 => 3, // expected '&str', found integer // 異なる型を返すように書くことはできない // どの型を基準にするかは,最初のパターンで返す型に依る _ => "others", } ); // matched value is: others } // ここから18章の内容を本格的に // ifとif-letが入り乱れるパターン // お気に入りの色と年齢に応じて,背景色を変えることを考えているプログラム // ややこしいからあまり使いたくないと感じた pub fn mixed_if_statements() { let favorite_color: Option<&str> = None; let is_tuesday = false; let age: Result<u8, _> = "34".parse(); if let Some(color) = favorite_color { println!("Using your favorite color, {}, as the background", color); } else if is_tuesday { println!("Tuesday is green day"); } else if let Ok(age) = age { if age > 30 { println!("Using purple as the background color"); } else { println!("Using orange as the background color"); } } else { println!("Using blue as the background color"); } // Using purple as the background color } // whileにもパターンを // なにかある限りpopする例 pub fn pop_as_long_as_vector_has_values() { let mut stack = Vec::new(); stack.push(1); stack.push(2); stack.push(3); while let Some(top) = stack.pop() { println!("vector has {} on the top", top); } // vector has 3 on the top // vector has 2 on the top // vector has 1 on the top } // forでもパターンの考え方は使われている // for x in yでは,xがパターンとなる // ここでは,タプルとして分解する方法を示す pub fn for_statement_can_use_patterns() { let v = vec!['a', 'b', 'c']; for (i, v) in v.iter().enumerate() { println!("{} is at index {}", v, i); } // a is at index 0 // a is at index 1 // a is at index 2 } // 実はいつもつかうlet文もパターンの考え方を使っている pub fn let_statement_uses_pattern() { // let PATTERN = EXPRESSION; // ここでマッチしたものを変数_xに束縛することを意味するパターン // なんでもいいよと言っている let _x = 5; // mismatched type // expected a tuple with 3 elements, found one with 2 elements // let (_x, _y) = (1, 2, 3); // こうすると_x, _yのみに束縛できる(マッチングできる) let (_x, _y, _) = (1, 2, 3); } // 難所:論駁可能性について(ろんばくかのうせいについて) // '論駁'を英辞郎で検索すると'反論'へ誘導される // 論駁可能,不可能の2つの形態が,パターンにはある #[allow(irrefutable_let_patterns)] pub fn various_refutables() { // 論駁が可能なもの // なんらかの可能性がある値について,合致しないことがあるパターン let some_option_value = Some(3); // 合致しないパターンを考慮した制御フローを書かないとコンパイルエラーになる // 下のようにNoneの可能性を考慮して処理する if let Some(x) = some_option_value { println!("some option value is: {}", x); } // 論駁が不可能なもの // あらゆるものにマッチする // ワーニングがでる // irrefutable if-let pattern if let _x = 5 { println!("x matches 5"); } // これは論駁可能 // ワーニングはでない // 5はかならずしもxに束縛されている値と等しいわけではない let x = 5; if let 5 = x { println!("5 matches x"); } } // あたらしいyを導入するアームのあるmatch式 #[allow(unreachable_patterns)] pub fn match_arm_begins_new_scope() { let x = Some(5); let y = 10; match x { Some(5) => println!("Got 50"), // 新しい変数yがあらわれた // 前に宣言しているyとは別物 // このマッチアームはSome(x)の値がなんであれマッチするので,この処理が実行される // 外側のyと比較したい場合はマッチガード条件式を使用する必要がある Some(y) => println!("Matched, y = {:?}", y), // マッチガード条件式 // これはパターンではないため,新しい変数を導入しない Some(n) if n == y => println!("Matched, n = {:?}", n), _ => println!("Default case, x = {:?}", x), } } // 複数のパターンにマッチさせる pub fn multiple_match() { let x = 1; match x { 1 | 2 => println!("one or two"), 3 => println!("three"), _ => println!("anything"), } } //..=で値の範囲に合致させる pub fn range_match() { let x = 5; match x { 1..=5 => println!("one through five"), _ => println!("something else"), } // one through five let x = 'k'; match x { 'a'..='j' => println!("early ASCII letter"), 'k'..='z' => println!("late ASCII letter"), _ => println!("something else"), } // late ASCII letter } struct Point2D { x: i32, y: i32, } pub fn decomposition_and_matching_of_struct_may_be_tricky() { let p = Point2D { x: 0, y: 7 }; // 変数x, yとしてpの要素を取り出す let Point2D { x, y } = p; println!("x of Point is: {}", x); // 0 println!("y of Point is: {}", y); // 7 // match式で要素に応じた場合分け match p { // x軸上にいるか Point2D { x, y: 0 } => println!("On the x axis at: {}", x), // y軸上にいるか Point2D { x: 0, y } => println!("On the y axis at: {}", y), // 7 // それ以外か Point2D { x, y } => println!("On neither axis: ({}, {})", x, y), } } enum Color { Rgb(i32, i32, i32), Hsv(i32, i32, i32), } enum Message { Quit, Move { x: i32, y: i32 }, Write(String), ChangeColor(Color), } pub fn destructure_enum() { let msgs = vec![ Message::Quit, Message::Move { x: 3, y: 4 }, Message::Write("Hello!".to_string()), Message::ChangeColor(Color::Rgb(0, 255, 128)), Message::ChangeColor(Color::Hsv(0, 0, 0)), ]; for msg in msgs.iter() { match msg { Message::Quit => { println!("The Quit variant has no data to destructure."); } Message::Move { x, y } => { println!("Move in the x direction {} and in the y direction {}", x, y); } Message::Write(text) => { println!("Text message: {}", text); } Message::ChangeColor(Color::Rgb(r, g, b)) => { println!("Change the color to red {}, green {}, blue {}", r, g, b); } Message::ChangeColor(Color::Hsv(h, s, v)) => { println!("Change the color to h {}, s {}, v {}", h, s, v); } } } } pub fn destructure_nested_structures() { let ((feet, inches), Point2D { x, y }) = ((3, 10), Point2D { x: 3, y: -10 }); let vs = [("feet", feet), ("inches", inches), ("p.x", x), ("p.y", y)]; for v in vs.iter() { println!("{:?}: {}", v.0, v.1); } } pub fn wild_card_in_the_nest() { // Someの中身がなんであれ無視(Someであれば無視) let mut setting_value = Some(5); let new_setting_value = Some(10); match (setting_value, new_setting_value) { (Some(_), Some(_)) => { println!("Can't overwrite an existing costomized value"); } _ => { setting_value = new_setting_value; } } println!("setting_value: {:?}", setting_value); } // _x記法は所有権を奪う pub fn difference_between_unused_value_and_wild_card() { let s = Some(String::from("Hello?")); if let Some(_) = s { println!("found a string"); } println!("the string: {:?}", s); // Someの中身はDropトレイトあり.Moveされる if let Some(_s) = s { println!("found a string"); } // 使用不可 // println!("the string: {:?}", s); } #[allow(dead_code)] struct Point3D<T: std::fmt::Display> { x: T, y: T, z: T, } // ある範囲の部分を無視する #[allow(unreachable_patterns)] pub fn ignore_a_range_of_structure() { let p = Point3D::<f64> { x: 0.3, y: 6.45, z: -23.0, }; match p { // xのみ興味がある書き方 // それでも,なんでもよいと言っているようなもの // カバー漏れはないためコンパイル可能 Point3D { x,.. } => { println!("x is {}", x); } // ちなみに,同じような守備範囲に思えるものを書いてもよさげ // こちらを先にかくと,こちらが実行される Point3D { x, y,.. } => { println!("x, y is {}, {}", x, y); } } } pub fn ignore_a_range_of_tuple_and_list() { let numbers = (2, 4, 8, 16, 32); match numbers { //..は残りを省略するための書き方 // 下のような,どこまでが残りなのかわからないような書き方はできない // (.., second,..) => { // println!("second is...: {}", second); // } // こうしよう (_, second,..) => { println!("second is...: {}", second); } } } enum Msg { Hello { id: i32 }, } // @バインディング // 値を保持する変数の生成と同時にパターンに一致するか調べる #[allow(unreachable_patterns)] pub fn at_binding() { let msg = Msg::Hello { id: 5 }; match msg { // 値を制限し,その範囲の値が含まれていることが保証される変数を生成 Msg::Hello { id: id_val @ 3..=7 } => { println!("Found an id in range: {}", id_val); } // 変数の導入なし Msg::Hello { id: 5 } => { println!("Found an id: 5"); } Msg::Hello { id } => { println!("Found some other id: {}", id); } } }
identifier_name
lib.rs
// まとめ // - あらゆるところでパターンは使用されている // - ちゃんと,あり得る可能性をカバーしよう // // python 3.10あたりで,pythonにも導入されるかも? // (rustのsubprocessはpythonから影響を受けていたりなど,インスパイアされあっているような雰囲気) // // // この章は辞書的な感じで,いろんなパターンとそのマッチング方法を列挙している感じ // まずは6章の軽い復習+α enum Coin { Penny, Nickel, Dime, Quarter, } pub fn simple_matching() { let coins = vec![Coin::Penny, Coin::Nickel, Coin::Dime, Coin::Quarter]; for coin in coins.iter() { println!( "matched coin is {}", match coin { Coin::Penny => { println!("Lucky penny!"); 1 } Coin::Nickel => 5, Coin::Dime => 10, Coin::Quarter => 25, } ); } // Lucky penny! // matched coin is 1 // matched coin is 5 // matched coin is 10 // matched coin is 25 } pub fn all_patterns_must_be_covered() { let a = 3; let b = match a { 1 => 1, 2 => 2, 3 => 3, _ => 0, // _パターンでカバーしないとだめ // そうしないと下のコンパイルエラー // patterns `std::i32::MIN..=0i32` and `4i32..=std::i32::MAX` not covered }; println!("matched value is: {}", b); } // この例では輝かないが,if-let記法も道具箱にあるんですよ // matchを使って表現するには冗長な場合に使いましょう pub fn if_let_notation() { let a = 3; if let 1 = a { println!("matched value is: 1"); } else if let 2 = a { println!("matched value is: 2"); } else if let 3 = a { println!("matched value is: 3"); } else { println!("matched value is: others"); } } pub fn all_expressions_must_return_the_same_typed_value() { let a = 3; println!( "matched value is: {}", match a { 1 => "one", 2 => "two", // 3 => 3, // expected '&str', found integer // 異なる型を返すように書くことはできない // どの型を基準にするかは,最初のパターンで返す型に依る _ => "others", } ); // matched value is: others } // ここから18章の内容を本格的に // ifとif-letが入り乱れるパターン // お気に入りの色と年齢に応じて,背景色を変えることを考えているプログラム // ややこしいからあまり使いたくないと感じた pub fn mixed_if_statements() { let favorite_color: Option<&str> = None; let is_tuesday = false; let age: Result<u8, _> = "34".parse(); if let Some(color) = favorite_color { println!("Using your favorite color, {}, as the background", color); } else if is_tuesday { println!("Tuesday is green day"); } else if let Ok(age) = age { if age > 30 { println!("Using purple as the background color"); } else { println!("Using orange as the background color"); } } else { println!("Using blue as the background color"); } // Using purple as the background color } // whileにもパターンを // なにかある限りpopする例 pub fn pop
for (i, v) in v.iter().enumerate() { println!("{} is at index {}", v, i); } // a is at index 0 // a is at index 1 // a is at index 2 } // 実はいつもつかうlet文もパターンの考え方を使っている pub fn let_statement_uses_pattern() { // let PATTERN = EXPRESSION; // ここでマッチしたものを変数_xに束縛することを意味するパターン // なんでもいいよと言っている let _x = 5; // mismatched type // expected a tuple with 3 elements, found one with 2 elements // let (_x, _y) = (1, 2, 3); // こうすると_x, _yのみに束縛できる(マッチングできる) let (_x, _y, _) = (1, 2, 3); } // 難所:論駁可能性について(ろんばくかのうせいについて) // '論駁'を英辞郎で検索すると'反論'へ誘導される // 論駁可能,不可能の2つの形態が,パターンにはある #[allow(irrefutable_let_patterns)] pub fn various_refutables() { // 論駁が可能なもの // なんらかの可能性がある値について,合致しないことがあるパターン let some_option_value = Some(3); // 合致しないパターンを考慮した制御フローを書かないとコンパイルエラーになる // 下のようにNoneの可能性を考慮して処理する if let Some(x) = some_option_value { println!("some option value is: {}", x); } // 論駁が不可能なもの // あらゆるものにマッチする // ワーニングがでる // irrefutable if-let pattern if let _x = 5 { println!("x matches 5"); } // これは論駁可能 // ワーニングはでない // 5はかならずしもxに束縛されている値と等しいわけではない let x = 5; if let 5 = x { println!("5 matches x"); } } // あたらしいyを導入するアームのあるmatch式 #[allow(unreachable_patterns)] pub fn match_arm_begins_new_scope() { let x = Some(5); let y = 10; match x { Some(5) => println!("Got 50"), // 新しい変数yがあらわれた // 前に宣言しているyとは別物 // このマッチアームはSome(x)の値がなんであれマッチするので,この処理が実行される // 外側のyと比較したい場合はマッチガード条件式を使用する必要がある Some(y) => println!("Matched, y = {:?}", y), // マッチガード条件式 // これはパターンではないため,新しい変数を導入しない Some(n) if n == y => println!("Matched, n = {:?}", n), _ => println!("Default case, x = {:?}", x), } } // 複数のパターンにマッチさせる pub fn multiple_match() { let x = 1; match x { 1 | 2 => println!("one or two"), 3 => println!("three"), _ => println!("anything"), } } //..=で値の範囲に合致させる pub fn range_match() { let x = 5; match x { 1..=5 => println!("one through five"), _ => println!("something else"), } // one through five let x = 'k'; match x { 'a'..='j' => println!("early ASCII letter"), 'k'..='z' => println!("late ASCII letter"), _ => println!("something else"), } // late ASCII letter } struct Point2D { x: i32, y: i32, } pub fn decomposition_and_matching_of_struct_may_be_tricky() { let p = Point2D { x: 0, y: 7 }; // 変数x, yとしてpの要素を取り出す let Point2D { x, y } = p; println!("x of Point is: {}", x); // 0 println!("y of Point is: {}", y); // 7 // match式で要素に応じた場合分け match p { // x軸上にいるか Point2D { x, y: 0 } => println!("On the x axis at: {}", x), // y軸上にいるか Point2D { x: 0, y } => println!("On the y axis at: {}", y), // 7 // それ以外か Point2D { x, y } => println!("On neither axis: ({}, {})", x, y), } } enum Color { Rgb(i32, i32, i32), Hsv(i32, i32, i32), } enum Message { Quit, Move { x: i32, y: i32 }, Write(String), ChangeColor(Color), } pub fn destructure_enum() { let msgs = vec![ Message::Quit, Message::Move { x: 3, y: 4 }, Message::Write("Hello!".to_string()), Message::ChangeColor(Color::Rgb(0, 255, 128)), Message::ChangeColor(Color::Hsv(0, 0, 0)), ]; for msg in msgs.iter() { match msg { Message::Quit => { println!("The Quit variant has no data to destructure."); } Message::Move { x, y } => { println!("Move in the x direction {} and in the y direction {}", x, y); } Message::Write(text) => { println!("Text message: {}", text); } Message::ChangeColor(Color::Rgb(r, g, b)) => { println!("Change the color to red {}, green {}, blue {}", r, g, b); } Message::ChangeColor(Color::Hsv(h, s, v)) => { println!("Change the color to h {}, s {}, v {}", h, s, v); } } } } pub fn destructure_nested_structures() { let ((feet, inches), Point2D { x, y }) = ((3, 10), Point2D { x: 3, y: -10 }); let vs = [("feet", feet), ("inches", inches), ("p.x", x), ("p.y", y)]; for v in vs.iter() { println!("{:?}: {}", v.0, v.1); } } pub fn wild_card_in_the_nest() { // Someの中身がなんであれ無視(Someであれば無視) let mut setting_value = Some(5); let new_setting_value = Some(10); match (setting_value, new_setting_value) { (Some(_), Some(_)) => { println!("Can't overwrite an existing costomized value"); } _ => { setting_value = new_setting_value; } } println!("setting_value: {:?}", setting_value); } // _x記法は所有権を奪う pub fn difference_between_unused_value_and_wild_card() { let s = Some(String::from("Hello?")); if let Some(_) = s { println!("found a string"); } println!("the string: {:?}", s); // Someの中身はDropトレイトあり.Moveされる if let Some(_s) = s { println!("found a string"); } // 使用不可 // println!("the string: {:?}", s); } #[allow(dead_code)] struct Point3D<T: std::fmt::Display> { x: T, y: T, z: T, } // ある範囲の部分を無視する #[allow(unreachable_patterns)] pub fn ignore_a_range_of_structure() { let p = Point3D::<f64> { x: 0.3, y: 6.45, z: -23.0, }; match p { // xのみ興味がある書き方 // それでも,なんでもよいと言っているようなもの // カバー漏れはないためコンパイル可能 Point3D { x,.. } => { println!("x is {}", x); } // ちなみに,同じような守備範囲に思えるものを書いてもよさげ // こちらを先にかくと,こちらが実行される Point3D { x, y,.. } => { println!("x, y is {}, {}", x, y); } } } pub fn ignore_a_range_of_tuple_and_list() { let numbers = (2, 4, 8, 16, 32); match numbers { //..は残りを省略するための書き方 // 下のような,どこまでが残りなのかわからないような書き方はできない // (.., second,..) => { // println!("second is...: {}", second); // } // こうしよう (_, second,..) => { println!("second is...: {}", second); } } } enum Msg { Hello { id: i32 }, } // @バインディング // 値を保持する変数の生成と同時にパターンに一致するか調べる #[allow(unreachable_patterns)] pub fn at_binding() { let msg = Msg::Hello { id: 5 }; match msg { // 値を制限し,その範囲の値が含まれていることが保証される変数を生成 Msg::Hello { id: id_val @ 3..=7 } => { println!("Found an id in range: {}", id_val); } // 変数の導入なし Msg::Hello { id: 5 } => { println!("Found an id: 5"); } Msg::Hello { id } => { println!("Found some other id: {}", id); } } }
_as_long_as_vector_has_values() { let mut stack = Vec::new(); stack.push(1); stack.push(2); stack.push(3); while let Some(top) = stack.pop() { println!("vector has {} on the top", top); } // vector has 3 on the top // vector has 2 on the top // vector has 1 on the top } // forでもパターンの考え方は使われている // for x in yでは,xがパターンとなる // ここでは,タプルとして分解する方法を示す pub fn for_statement_can_use_patterns() { let v = vec!['a', 'b', 'c'];
identifier_body
lib.rs
// まとめ // - あらゆるところでパターンは使用されている // - ちゃんと,あり得る可能性をカバーしよう // // python 3.10あたりで,pythonにも導入されるかも? // (rustのsubprocessはpythonから影響を受けていたりなど,インスパイアされあっているような雰囲気) // // // この章は辞書的な感じで,いろんなパターンとそのマッチング方法を列挙している感じ // まずは6章の軽い復習+α enum Coin { Penny, Nickel, Dime, Quarter, } pub fn simple_matching() { let coins = vec![Coin::Penny, Coin::Nickel, Coin::Dime, Coin::Quarter]; for coin in coins.iter() { println!( "matched coin is {}", match coin { Coin::Penny => { println!("Lucky penny!"); 1 } Coin::Nickel => 5, Coin::Dime => 10, Coin::Quarter => 25, } ); } // Lucky penny! // matched coin is 1 // matched coin is 5 // matched coin is 10 // matched coin is 25 } pub fn all_patterns_must_be_covered() { let a = 3; let b = match a { 1 => 1, 2 => 2, 3 => 3, _ => 0, // _パターンでカバーしないとだめ // そうしないと下のコンパイルエラー // patterns `std::i32::MIN..=0i32` and `4i32..=std::i32::MAX` not covered }; println!("matched value is: {}", b); } // この例では輝かないが,if-let記法も道具箱にあるんですよ // matchを使って表現するには冗長な場合に使いましょう pub fn if_let_notation() { let a = 3; if let 1 = a { println!("matched value is: 1"); } else if let 2 = a { println!("matched value is: 2"); } else if let 3 = a { println!("matched value is: 3"); } else { println!("matched value is: others"); } } pub fn all_expressions_must_return_the_same_typed_value() { let a = 3; println!( "matched value is: {}", match a { 1 => "one", 2 => "two", // 3 => 3, // expected '&str', found integer // 異なる型を返すように書くことはできない // どの型を基準にするかは,最初のパターンで返す型に依る _ => "others", } ); // matched value is: others } // ここから18章の内容を本格的に // ifとif-letが入り乱れるパターン // お気に入りの色と年齢に応じて,背景色を変えることを考えているプログラム // ややこしいからあまり使いたくないと感じた pub fn mixed_if_statements() { let favorite_color: Option<&str> = None; let is_tuesday = false; let age: Result<u8, _> = "34".parse(); if let Some(color) = favorite_color { println!("Using your favorite color, {}, as the background", color); } else if is_tuesday { println!("Tuesday is green day"); } else if let Ok(age) = age { if age > 30 { println!("Using purple as the background color"); } else { println!("Using orange as the background color"); } } else { println!("Using blue as the background color"); } // Using purple as the background color } // whileにもパターンを // なにかある限りpopする例 pub fn pop_as_long_as_vector_has_values() { let mut stack = Vec::new(); stack.push(1); stack.push(2); stack.push(3); while let Some(top) = stack.pop() { println!("vector has {} on the top", top); } // vector has 3 on the top // vector has 2 on the top // vector has 1 on the top } // forでもパターンの考え方は使われている // for x in yでは,xがパターンとなる // ここでは,タプルとして分解する方法を示す pub fn for_statement_can_use_patterns() { let v = vec!['a', 'b', 'c']; for (i, v) in v.iter().enumerate() { println!("{} is at index {}", v, i); } // a is at index 0 // a is at index 1 // a is at index 2 } // 実はいつもつかうlet文もパターンの考え方を使っている pub fn let_statement_uses_pattern() { // let PATTERN = EXPRESSION; // ここでマッチしたものを変数_xに束縛することを意味するパターン // なんでもいいよと言っている let _x = 5; // mismatched type // expected a tuple with 3 elements, found one with 2 elements // let (_x, _y) = (1, 2, 3); // こうすると_x, _yのみに束縛できる(マッチングできる) let (_x, _y, _) = (1, 2, 3); } // 難所:論駁可能性について(ろんばくかのうせいについて) // '論駁'を英辞郎で検索すると'反論'へ誘導される // 論駁可能,不可能の2つの形態が,パターンにはある #[allow(irrefutable_let_patterns)] pub fn various_refutables() { // 論駁が可能なもの // なんらかの可能性がある値について,合致しないことがあるパターン let some_option_value = Some(3); // 合致しないパターンを考慮した制御フローを書かないとコンパイルエラーになる // 下のようにNoneの可能性を考慮して処理する if let Some(x) = some_option_value { println!("some option value is: {}", x); } // 論駁が不可能なもの // あらゆるものにマッチする // ワーニングがでる // irrefutable if-let pattern if let _x = 5 { println!("x matches 5"); } // これは論駁可能 // ワーニングはでない // 5はかならずしもxに束縛されている値と等しいわけではない let x = 5; if let 5 = x { println!("5 matches x"); } } // あたらしいyを導入するアームのあるmatch式 #[allow(unreachable_patterns)] pub fn match_arm_begins_new_scope() { let x = Some(5); let y = 10; match x { Some(5) => println!("Got 50"), // 新しい変数yがあらわれた // 前に宣言しているyとは別物 // このマッチアームはSome(x)の値がなんであれマッチするので,この処理が実行される // 外側のyと比較したい場合はマッチガード条件式を使用する必要がある Some(y) => println!("Matched, y = {:?}", y), // マッチガード条件式 // これはパターンではないため,新しい変数を導入しない Some(n) if n == y => println!("Matched, n = {:?}", n), _ => println!("Default case, x = {:?}", x), } } // 複数のパターンにマッチさせる pub fn multiple_match() { let x = 1; match x { 1 | 2 => println!("one or two"), 3 => println!("three"), _ => println!("anything"), } } //..=で値の範囲に合致させる pub fn range_match() { let x = 5; match x { 1..=5 => println!("one through five"), _ => println!("something else"), } // one through five let x = 'k'; match x { 'a'..='j' => println!("early ASCII letter"), 'k'..='z' => println!("late ASCII letter"), _ => println!("something else"), } // late ASCII letter } struct Point2D { x: i32, y: i32, } pub fn decomposition_and_matching_of_struct_may_be_tricky() { let p = Point2D { x: 0, y: 7 }; // 変数x, yとしてpの要素を取り出す let Point2D { x, y } = p; println!("x of Point is: {}", x); // 0 println!("y of Point is: {}", y); // 7 // match式で要素に応じた場合分け match p { // x軸上にいるか Point2D { x, y: 0 } => println!("On the x axis at: {}", x), // y軸上にいるか Point2D { x: 0, y } => println!("On the y axis at: {}", y), // 7 // それ以外か Point2D { x, y } => println!("On neither axis: ({}, {})", x, y), } } enum Color { Rgb(i32, i32, i32), Hsv(i32, i32, i32), } enum Message { Quit, Move { x: i32, y: i32 }, Write(String), ChangeColor(Color), } pub fn destructure_enum() {
Message::Quit, Message::Move { x: 3, y: 4 }, Message::Write("Hello!".to_string()), Message::ChangeColor(Color::Rgb(0, 255, 128)), Message::ChangeColor(Color::Hsv(0, 0, 0)), ]; for msg in msgs.iter() { match msg { Message::Quit => { println!("The Quit variant has no data to destructure."); } Message::Move { x, y } => { println!("Move in the x direction {} and in the y direction {}", x, y); } Message::Write(text) => { println!("Text message: {}", text); } Message::ChangeColor(Color::Rgb(r, g, b)) => { println!("Change the color to red {}, green {}, blue {}", r, g, b); } Message::ChangeColor(Color::Hsv(h, s, v)) => { println!("Change the color to h {}, s {}, v {}", h, s, v); } } } } pub fn destructure_nested_structures() { let ((feet, inches), Point2D { x, y }) = ((3, 10), Point2D { x: 3, y: -10 }); let vs = [("feet", feet), ("inches", inches), ("p.x", x), ("p.y", y)]; for v in vs.iter() { println!("{:?}: {}", v.0, v.1); } } pub fn wild_card_in_the_nest() { // Someの中身がなんであれ無視(Someであれば無視) let mut setting_value = Some(5); let new_setting_value = Some(10); match (setting_value, new_setting_value) { (Some(_), Some(_)) => { println!("Can't overwrite an existing costomized value"); } _ => { setting_value = new_setting_value; } } println!("setting_value: {:?}", setting_value); } // _x記法は所有権を奪う pub fn difference_between_unused_value_and_wild_card() { let s = Some(String::from("Hello?")); if let Some(_) = s { println!("found a string"); } println!("the string: {:?}", s); // Someの中身はDropトレイトあり.Moveされる if let Some(_s) = s { println!("found a string"); } // 使用不可 // println!("the string: {:?}", s); } #[allow(dead_code)] struct Point3D<T: std::fmt::Display> { x: T, y: T, z: T, } // ある範囲の部分を無視する #[allow(unreachable_patterns)] pub fn ignore_a_range_of_structure() { let p = Point3D::<f64> { x: 0.3, y: 6.45, z: -23.0, }; match p { // xのみ興味がある書き方 // それでも,なんでもよいと言っているようなもの // カバー漏れはないためコンパイル可能 Point3D { x,.. } => { println!("x is {}", x); } // ちなみに,同じような守備範囲に思えるものを書いてもよさげ // こちらを先にかくと,こちらが実行される Point3D { x, y,.. } => { println!("x, y is {}, {}", x, y); } } } pub fn ignore_a_range_of_tuple_and_list() { let numbers = (2, 4, 8, 16, 32); match numbers { //..は残りを省略するための書き方 // 下のような,どこまでが残りなのかわからないような書き方はできない // (.., second,..) => { // println!("second is...: {}", second); // } // こうしよう (_, second,..) => { println!("second is...: {}", second); } } } enum Msg { Hello { id: i32 }, } // @バインディング // 値を保持する変数の生成と同時にパターンに一致するか調べる #[allow(unreachable_patterns)] pub fn at_binding() { let msg = Msg::Hello { id: 5 }; match msg { // 値を制限し,その範囲の値が含まれていることが保証される変数を生成 Msg::Hello { id: id_val @ 3..=7 } => { println!("Found an id in range: {}", id_val); } // 変数の導入なし Msg::Hello { id: 5 } => { println!("Found an id: 5"); } Msg::Hello { id } => { println!("Found some other id: {}", id); } } }
let msgs = vec![
random_line_split
tools.rs
//! Download management for external tools and applications. Locate and automatically download //! applications (if needed) to use them in the build pipeline. use std::path::{Path, PathBuf}; use anyhow::{anyhow, bail, ensure, Context, Result}; use async_compression::tokio::bufread::GzipDecoder; use directories_next::ProjectDirs; use futures::prelude::*; use tokio::fs::File; use tokio::io::{AsyncRead, AsyncSeekExt, AsyncWriteExt, BufReader, SeekFrom}; use tokio::process::Command; use tokio_tar::{Archive, Entry}; use crate::common::is_executable; /// The application to locate and eventually download when calling [`get`]. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum Application { /// wasm-bindgen for generating the JS bindings. WasmBindgen, /// wasm-opt to improve performance and size of the output file further. WasmOpt, } impl Application { /// Base name of the executable without extension. pub(crate) fn name(&self) -> &str { match self { Self::WasmBindgen => "wasm-bindgen", Self::WasmOpt => "wasm-opt", } } /// Path of the executable within the downloaded archive. fn path(&self) -> &str { if cfg!(windows) { match self { Self::WasmBindgen => "wasm-bindgen.exe", Self::WasmOpt => "bin/wasm-opt.exe", } } else { match self { Self::WasmBindgen => "wasm-bindgen", Self::WasmOpt => "bin/wasm-opt", } } } /// Additonal files included in the archive that are required to run the main binary. fn extra_paths(&self) -> &[&str] { if cfg!(target_os = "macos") && *self == Self::WasmOpt { &["lib/libbinaryen.dylib"] } else { &[] } } /// Default version to use if not set by the user. fn default_version(&self) -> &str { match self { Self::WasmBindgen => "0.2.74", Self::WasmOpt => "version_101", } } /// Target for the current OS as part of the download URL. Can fail as there might be no release /// for the current platform. fn target(&self) -> Result<&str> { Ok(match self { Self::WasmBindgen => { if cfg!(target_os = "windows") { "pc-windows-msvc" } else if cfg!(target_os = "macos") { "apple-darwin" } else if cfg!(target_os = "linux") { "unknown-linux-musl" } else { bail!("unsupported OS") } } Self::WasmOpt => { if cfg!(target_os = "windows") { "windows" } else if cfg!(target_os = "macos") { "macos" } else if cfg!(target_os = "linux") { "linux" } else { bail!("unsupported OS") } } }) } /// Direct URL to the release of an application for download. fn url(&self, version: &str) -> Result<String> { Ok(match self { Self::WasmBindgen => format!( "https://github.com/rustwasm/wasm-bindgen/releases/download/{version}/wasm-bindgen-{version}-x86_64-{target}.tar.gz", version = version, target = self.target()? ), Self::WasmOpt => format!( "https://github.com/WebAssembly/binaryen/releases/download/{version}/binaryen-{version}-x86_64-{target}.tar.gz", version = version, target = self.target()?, ), }) } /// The CLI subcommand, flag or option used to check the application's version. fn version_test(&self) -> &'static str { match self { Application::WasmBindgen => "--version", Application::WasmOpt => "--version", } } /// Format the output of version checking the app. fn format_version_output(&self, text: &str) -> Result<String> {
.nth(1) .with_context(|| format!("missing or malformed version output: {}", text))? .to_owned(), Application::WasmOpt => format!( "version_{}", text.split(' ') .nth(2) .with_context(|| format!("missing or malformed version output: {}", text))? ), }; Ok(formatted_version) } } /// Locate the given application and download it if missing. #[tracing::instrument(level = "trace")] pub async fn get(app: Application, version: Option<&str>) -> Result<PathBuf> { let version = version.unwrap_or_else(|| app.default_version()); if let Some(path) = find_system(app, version).await { tracing::info!(app = app.name(), version = version, "using system installed binary"); return Ok(path); } let cache_dir = cache_dir().await?; let app_dir = cache_dir.join(format!("{}-{}", app.name(), version)); let bin_path = app_dir.join(app.path()); if!is_executable(&bin_path).await? { let path = download(app, version) .await .context("failed downloading release archive")?; let mut file = File::open(&path).await.context("failed opening downloaded file")?; install(app, &mut file, &app_dir).await?; tokio::fs::remove_file(path) .await .context("failed deleting temporary archive")?; } Ok(bin_path) } /// Try to find a globally system installed version of the application and ensure it is the needed /// release version. #[tracing::instrument(level = "trace")] async fn find_system(app: Application, version: &str) -> Option<PathBuf> { let result = || async { let path = which::which(app.name())?; let output = Command::new(&path).arg(app.version_test()).output().await?; ensure!( output.status.success(), "running command `{} {}` failed", path.display(), app.version_test() ); let text = String::from_utf8_lossy(&output.stdout); let system_version = app.format_version_output(&text)?; Ok((path, system_version)) }; match result().await { Ok((path, system_version)) => (system_version == version).then(|| path), Err(e) => { tracing::debug!("system version not found for {}: {}", app.name(), e); None } } } /// Download a file from its remote location in the given version, extract it and make it ready for /// execution at the given location. #[tracing::instrument(level = "trace")] async fn download(app: Application, version: &str) -> Result<PathBuf> { tracing::info!(version = version, "downloading {}", app.name()); let cache_dir = cache_dir().await.context("failed getting the cache directory")?; let temp_out = cache_dir.join(format!("{}-{}.tmp", app.name(), version)); let mut file = File::create(&temp_out) .await .context("failed creating temporary output file")?; let resp = reqwest::get(app.url(version)?) .await .context("error sending HTTP request")?; ensure!( resp.status().is_success(), "error downloading archive file: {:?}\n{}", resp.status(), app.url(version)? ); let mut res_bytes = resp.bytes_stream(); while let Some(chunk_res) = res_bytes.next().await { let chunk = chunk_res.context("error reading chunk from download")?; let _res = file.write(chunk.as_ref()).await; } Ok(temp_out) } /// Install an application from a downloaded archive locating and copying it to the given target /// location. #[tracing::instrument(level = "trace")] async fn install(app: Application, archive_file: &mut File, target: &Path) -> Result<()> { tracing::info!("installing {}", app.name()); let mut archive = Archive::new(GzipDecoder::new(BufReader::new(archive_file))); let mut file = extract_file(&mut archive, target, Path::new(app.path())).await?; set_executable_flag(&mut file).await?; for path in app.extra_paths() { // Archive must be opened for each entry as tar files don't allow jumping forth and back. let mut archive_file = archive .into_inner() .map_err(|_| anyhow!("error seeking app archive"))? .into_inner(); archive_file .seek(SeekFrom::Start(0)) .await .context("error seeking to beginning of archive")?; archive = Archive::new(GzipDecoder::new(archive_file)); extract_file(&mut archive, target, Path::new(path)).await?; } Ok(()) } /// Extract a single file from the given archive and put it into the target location. async fn extract_file<R>(archive: &mut Archive<R>, target: &Path, file: &Path) -> Result<File> where R: AsyncRead + Unpin + Send + Sync, { let mut tar_file = find_tar_entry(archive, file).await?.context("file not found in archive")?; let out = target.join(file); if let Some(parent) = out.parent() { tokio::fs::create_dir_all(parent) .await .context("failed creating output directory")?; } let mut out = File::create(target.join(file)) .await .context("failed creating output file")?; tokio::io::copy(&mut tar_file, &mut out) .await .context("failed copying over final output file from archive")?; Ok(out) } /// Locate the cache dir for trunk and make sure it exists. pub async fn cache_dir() -> Result<PathBuf> { let path = ProjectDirs::from("dev", "trunkrs", "trunk") .context("failed finding project directory")? .cache_dir() .to_owned(); tokio::fs::create_dir_all(&path) .await .context("failed creating cache directory")?; Ok(path) } /// Set the executable flag for a file. Only has an effect on UNIX platforms. async fn set_executable_flag(file: &mut File) -> Result<()> { #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; let mut perms = file.metadata().await.context("failed getting metadata")?.permissions(); perms.set_mode(perms.mode() | 0o100); file.set_permissions(perms) .await .context("failed setting the executable flag")?; } Ok(()) } /// Find an entry in a TAR archive by name and open it for reading. The first part of the path is /// dropped as that's usually the folder name it was created from. async fn find_tar_entry<R>(archive: &mut Archive<R>, path: impl AsRef<Path>) -> Result<Option<Entry<Archive<R>>>> where R: AsyncRead + Unpin + Send + Sync, { let mut entries = archive.entries().context("failed getting archive entries")?; while let Some(entry) = entries.next().await { let entry = entry.context("error while getting archive entry")?; let name = entry.path().context("invalid entry path")?; let mut name = name.components(); name.next(); if name.as_path() == path.as_ref() { return Ok(Some(entry)); } } Ok(None) } #[cfg(test)] mod tests { use super::*; use anyhow::{ensure, Context, Result}; #[tokio::test] async fn download_and_install_binaries() -> Result<()> { let dir = tempfile::tempdir().context("error creating temporary dir")?; for &app in &[Application::WasmBindgen, Application::WasmOpt] { let path = download(app, app.default_version()) .await .context("error downloading app")?; let mut file = File::open(&path).await.context("error opening file")?; install(app, &mut file, dir.path()).await.context("error installing app")?; std::fs::remove_file(path).context("error during cleanup")?; } Ok(()) } macro_rules! table_test_format_version { ($name:ident, $app:expr, $input:literal, $expect:literal) => { #[test] fn $name() -> Result<()> { let app = $app; let output = app .format_version_output($input) .context("unexpected version formatting error")?; ensure!(output == $expect, "version check output does not match: {}!= {}", $expect, output); Ok(()) } }; } table_test_format_version!( wasm_opt_from_source, Application::WasmOpt, "wasm-opt version 101 (version_101)", "version_101" ); table_test_format_version!(wasm_opt_pre_compiled, Application::WasmOpt, "wasm-opt version 101", "version_101"); table_test_format_version!(wasm_bindgen_from_source, Application::WasmBindgen, "wasm-bindgen 0.2.75", "0.2.75"); table_test_format_version!( wasm_bindgen_pre_compiled, Application::WasmBindgen, "wasm-bindgen 0.2.74 (27c7a4d06)", "0.2.74" ); }
let text = text.trim(); let formatted_version = match self { Application::WasmBindgen => text .split(' ')
random_line_split
tools.rs
//! Download management for external tools and applications. Locate and automatically download //! applications (if needed) to use them in the build pipeline. use std::path::{Path, PathBuf}; use anyhow::{anyhow, bail, ensure, Context, Result}; use async_compression::tokio::bufread::GzipDecoder; use directories_next::ProjectDirs; use futures::prelude::*; use tokio::fs::File; use tokio::io::{AsyncRead, AsyncSeekExt, AsyncWriteExt, BufReader, SeekFrom}; use tokio::process::Command; use tokio_tar::{Archive, Entry}; use crate::common::is_executable; /// The application to locate and eventually download when calling [`get`]. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum Application { /// wasm-bindgen for generating the JS bindings. WasmBindgen, /// wasm-opt to improve performance and size of the output file further. WasmOpt, } impl Application { /// Base name of the executable without extension. pub(crate) fn name(&self) -> &str { match self { Self::WasmBindgen => "wasm-bindgen", Self::WasmOpt => "wasm-opt", } } /// Path of the executable within the downloaded archive. fn path(&self) -> &str { if cfg!(windows) { match self { Self::WasmBindgen => "wasm-bindgen.exe", Self::WasmOpt => "bin/wasm-opt.exe", } } else { match self { Self::WasmBindgen => "wasm-bindgen", Self::WasmOpt => "bin/wasm-opt", } } } /// Additonal files included in the archive that are required to run the main binary. fn extra_paths(&self) -> &[&str] { if cfg!(target_os = "macos") && *self == Self::WasmOpt { &["lib/libbinaryen.dylib"] } else { &[] } } /// Default version to use if not set by the user. fn default_version(&self) -> &str { match self { Self::WasmBindgen => "0.2.74", Self::WasmOpt => "version_101", } } /// Target for the current OS as part of the download URL. Can fail as there might be no release /// for the current platform. fn target(&self) -> Result<&str> { Ok(match self { Self::WasmBindgen => { if cfg!(target_os = "windows") { "pc-windows-msvc" } else if cfg!(target_os = "macos") { "apple-darwin" } else if cfg!(target_os = "linux") { "unknown-linux-musl" } else { bail!("unsupported OS") } } Self::WasmOpt => { if cfg!(target_os = "windows") { "windows" } else if cfg!(target_os = "macos") { "macos" } else if cfg!(target_os = "linux") { "linux" } else { bail!("unsupported OS") } } }) } /// Direct URL to the release of an application for download. fn
(&self, version: &str) -> Result<String> { Ok(match self { Self::WasmBindgen => format!( "https://github.com/rustwasm/wasm-bindgen/releases/download/{version}/wasm-bindgen-{version}-x86_64-{target}.tar.gz", version = version, target = self.target()? ), Self::WasmOpt => format!( "https://github.com/WebAssembly/binaryen/releases/download/{version}/binaryen-{version}-x86_64-{target}.tar.gz", version = version, target = self.target()?, ), }) } /// The CLI subcommand, flag or option used to check the application's version. fn version_test(&self) -> &'static str { match self { Application::WasmBindgen => "--version", Application::WasmOpt => "--version", } } /// Format the output of version checking the app. fn format_version_output(&self, text: &str) -> Result<String> { let text = text.trim(); let formatted_version = match self { Application::WasmBindgen => text .split(' ') .nth(1) .with_context(|| format!("missing or malformed version output: {}", text))? .to_owned(), Application::WasmOpt => format!( "version_{}", text.split(' ') .nth(2) .with_context(|| format!("missing or malformed version output: {}", text))? ), }; Ok(formatted_version) } } /// Locate the given application and download it if missing. #[tracing::instrument(level = "trace")] pub async fn get(app: Application, version: Option<&str>) -> Result<PathBuf> { let version = version.unwrap_or_else(|| app.default_version()); if let Some(path) = find_system(app, version).await { tracing::info!(app = app.name(), version = version, "using system installed binary"); return Ok(path); } let cache_dir = cache_dir().await?; let app_dir = cache_dir.join(format!("{}-{}", app.name(), version)); let bin_path = app_dir.join(app.path()); if!is_executable(&bin_path).await? { let path = download(app, version) .await .context("failed downloading release archive")?; let mut file = File::open(&path).await.context("failed opening downloaded file")?; install(app, &mut file, &app_dir).await?; tokio::fs::remove_file(path) .await .context("failed deleting temporary archive")?; } Ok(bin_path) } /// Try to find a globally system installed version of the application and ensure it is the needed /// release version. #[tracing::instrument(level = "trace")] async fn find_system(app: Application, version: &str) -> Option<PathBuf> { let result = || async { let path = which::which(app.name())?; let output = Command::new(&path).arg(app.version_test()).output().await?; ensure!( output.status.success(), "running command `{} {}` failed", path.display(), app.version_test() ); let text = String::from_utf8_lossy(&output.stdout); let system_version = app.format_version_output(&text)?; Ok((path, system_version)) }; match result().await { Ok((path, system_version)) => (system_version == version).then(|| path), Err(e) => { tracing::debug!("system version not found for {}: {}", app.name(), e); None } } } /// Download a file from its remote location in the given version, extract it and make it ready for /// execution at the given location. #[tracing::instrument(level = "trace")] async fn download(app: Application, version: &str) -> Result<PathBuf> { tracing::info!(version = version, "downloading {}", app.name()); let cache_dir = cache_dir().await.context("failed getting the cache directory")?; let temp_out = cache_dir.join(format!("{}-{}.tmp", app.name(), version)); let mut file = File::create(&temp_out) .await .context("failed creating temporary output file")?; let resp = reqwest::get(app.url(version)?) .await .context("error sending HTTP request")?; ensure!( resp.status().is_success(), "error downloading archive file: {:?}\n{}", resp.status(), app.url(version)? ); let mut res_bytes = resp.bytes_stream(); while let Some(chunk_res) = res_bytes.next().await { let chunk = chunk_res.context("error reading chunk from download")?; let _res = file.write(chunk.as_ref()).await; } Ok(temp_out) } /// Install an application from a downloaded archive locating and copying it to the given target /// location. #[tracing::instrument(level = "trace")] async fn install(app: Application, archive_file: &mut File, target: &Path) -> Result<()> { tracing::info!("installing {}", app.name()); let mut archive = Archive::new(GzipDecoder::new(BufReader::new(archive_file))); let mut file = extract_file(&mut archive, target, Path::new(app.path())).await?; set_executable_flag(&mut file).await?; for path in app.extra_paths() { // Archive must be opened for each entry as tar files don't allow jumping forth and back. let mut archive_file = archive .into_inner() .map_err(|_| anyhow!("error seeking app archive"))? .into_inner(); archive_file .seek(SeekFrom::Start(0)) .await .context("error seeking to beginning of archive")?; archive = Archive::new(GzipDecoder::new(archive_file)); extract_file(&mut archive, target, Path::new(path)).await?; } Ok(()) } /// Extract a single file from the given archive and put it into the target location. async fn extract_file<R>(archive: &mut Archive<R>, target: &Path, file: &Path) -> Result<File> where R: AsyncRead + Unpin + Send + Sync, { let mut tar_file = find_tar_entry(archive, file).await?.context("file not found in archive")?; let out = target.join(file); if let Some(parent) = out.parent() { tokio::fs::create_dir_all(parent) .await .context("failed creating output directory")?; } let mut out = File::create(target.join(file)) .await .context("failed creating output file")?; tokio::io::copy(&mut tar_file, &mut out) .await .context("failed copying over final output file from archive")?; Ok(out) } /// Locate the cache dir for trunk and make sure it exists. pub async fn cache_dir() -> Result<PathBuf> { let path = ProjectDirs::from("dev", "trunkrs", "trunk") .context("failed finding project directory")? .cache_dir() .to_owned(); tokio::fs::create_dir_all(&path) .await .context("failed creating cache directory")?; Ok(path) } /// Set the executable flag for a file. Only has an effect on UNIX platforms. async fn set_executable_flag(file: &mut File) -> Result<()> { #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; let mut perms = file.metadata().await.context("failed getting metadata")?.permissions(); perms.set_mode(perms.mode() | 0o100); file.set_permissions(perms) .await .context("failed setting the executable flag")?; } Ok(()) } /// Find an entry in a TAR archive by name and open it for reading. The first part of the path is /// dropped as that's usually the folder name it was created from. async fn find_tar_entry<R>(archive: &mut Archive<R>, path: impl AsRef<Path>) -> Result<Option<Entry<Archive<R>>>> where R: AsyncRead + Unpin + Send + Sync, { let mut entries = archive.entries().context("failed getting archive entries")?; while let Some(entry) = entries.next().await { let entry = entry.context("error while getting archive entry")?; let name = entry.path().context("invalid entry path")?; let mut name = name.components(); name.next(); if name.as_path() == path.as_ref() { return Ok(Some(entry)); } } Ok(None) } #[cfg(test)] mod tests { use super::*; use anyhow::{ensure, Context, Result}; #[tokio::test] async fn download_and_install_binaries() -> Result<()> { let dir = tempfile::tempdir().context("error creating temporary dir")?; for &app in &[Application::WasmBindgen, Application::WasmOpt] { let path = download(app, app.default_version()) .await .context("error downloading app")?; let mut file = File::open(&path).await.context("error opening file")?; install(app, &mut file, dir.path()).await.context("error installing app")?; std::fs::remove_file(path).context("error during cleanup")?; } Ok(()) } macro_rules! table_test_format_version { ($name:ident, $app:expr, $input:literal, $expect:literal) => { #[test] fn $name() -> Result<()> { let app = $app; let output = app .format_version_output($input) .context("unexpected version formatting error")?; ensure!(output == $expect, "version check output does not match: {}!= {}", $expect, output); Ok(()) } }; } table_test_format_version!( wasm_opt_from_source, Application::WasmOpt, "wasm-opt version 101 (version_101)", "version_101" ); table_test_format_version!(wasm_opt_pre_compiled, Application::WasmOpt, "wasm-opt version 101", "version_101"); table_test_format_version!(wasm_bindgen_from_source, Application::WasmBindgen, "wasm-bindgen 0.2.75", "0.2.75"); table_test_format_version!( wasm_bindgen_pre_compiled, Application::WasmBindgen, "wasm-bindgen 0.2.74 (27c7a4d06)", "0.2.74" ); }
url
identifier_name
tools.rs
//! Download management for external tools and applications. Locate and automatically download //! applications (if needed) to use them in the build pipeline. use std::path::{Path, PathBuf}; use anyhow::{anyhow, bail, ensure, Context, Result}; use async_compression::tokio::bufread::GzipDecoder; use directories_next::ProjectDirs; use futures::prelude::*; use tokio::fs::File; use tokio::io::{AsyncRead, AsyncSeekExt, AsyncWriteExt, BufReader, SeekFrom}; use tokio::process::Command; use tokio_tar::{Archive, Entry}; use crate::common::is_executable; /// The application to locate and eventually download when calling [`get`]. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum Application { /// wasm-bindgen for generating the JS bindings. WasmBindgen, /// wasm-opt to improve performance and size of the output file further. WasmOpt, } impl Application { /// Base name of the executable without extension. pub(crate) fn name(&self) -> &str
/// Path of the executable within the downloaded archive. fn path(&self) -> &str { if cfg!(windows) { match self { Self::WasmBindgen => "wasm-bindgen.exe", Self::WasmOpt => "bin/wasm-opt.exe", } } else { match self { Self::WasmBindgen => "wasm-bindgen", Self::WasmOpt => "bin/wasm-opt", } } } /// Additonal files included in the archive that are required to run the main binary. fn extra_paths(&self) -> &[&str] { if cfg!(target_os = "macos") && *self == Self::WasmOpt { &["lib/libbinaryen.dylib"] } else { &[] } } /// Default version to use if not set by the user. fn default_version(&self) -> &str { match self { Self::WasmBindgen => "0.2.74", Self::WasmOpt => "version_101", } } /// Target for the current OS as part of the download URL. Can fail as there might be no release /// for the current platform. fn target(&self) -> Result<&str> { Ok(match self { Self::WasmBindgen => { if cfg!(target_os = "windows") { "pc-windows-msvc" } else if cfg!(target_os = "macos") { "apple-darwin" } else if cfg!(target_os = "linux") { "unknown-linux-musl" } else { bail!("unsupported OS") } } Self::WasmOpt => { if cfg!(target_os = "windows") { "windows" } else if cfg!(target_os = "macos") { "macos" } else if cfg!(target_os = "linux") { "linux" } else { bail!("unsupported OS") } } }) } /// Direct URL to the release of an application for download. fn url(&self, version: &str) -> Result<String> { Ok(match self { Self::WasmBindgen => format!( "https://github.com/rustwasm/wasm-bindgen/releases/download/{version}/wasm-bindgen-{version}-x86_64-{target}.tar.gz", version = version, target = self.target()? ), Self::WasmOpt => format!( "https://github.com/WebAssembly/binaryen/releases/download/{version}/binaryen-{version}-x86_64-{target}.tar.gz", version = version, target = self.target()?, ), }) } /// The CLI subcommand, flag or option used to check the application's version. fn version_test(&self) -> &'static str { match self { Application::WasmBindgen => "--version", Application::WasmOpt => "--version", } } /// Format the output of version checking the app. fn format_version_output(&self, text: &str) -> Result<String> { let text = text.trim(); let formatted_version = match self { Application::WasmBindgen => text .split(' ') .nth(1) .with_context(|| format!("missing or malformed version output: {}", text))? .to_owned(), Application::WasmOpt => format!( "version_{}", text.split(' ') .nth(2) .with_context(|| format!("missing or malformed version output: {}", text))? ), }; Ok(formatted_version) } } /// Locate the given application and download it if missing. #[tracing::instrument(level = "trace")] pub async fn get(app: Application, version: Option<&str>) -> Result<PathBuf> { let version = version.unwrap_or_else(|| app.default_version()); if let Some(path) = find_system(app, version).await { tracing::info!(app = app.name(), version = version, "using system installed binary"); return Ok(path); } let cache_dir = cache_dir().await?; let app_dir = cache_dir.join(format!("{}-{}", app.name(), version)); let bin_path = app_dir.join(app.path()); if!is_executable(&bin_path).await? { let path = download(app, version) .await .context("failed downloading release archive")?; let mut file = File::open(&path).await.context("failed opening downloaded file")?; install(app, &mut file, &app_dir).await?; tokio::fs::remove_file(path) .await .context("failed deleting temporary archive")?; } Ok(bin_path) } /// Try to find a globally system installed version of the application and ensure it is the needed /// release version. #[tracing::instrument(level = "trace")] async fn find_system(app: Application, version: &str) -> Option<PathBuf> { let result = || async { let path = which::which(app.name())?; let output = Command::new(&path).arg(app.version_test()).output().await?; ensure!( output.status.success(), "running command `{} {}` failed", path.display(), app.version_test() ); let text = String::from_utf8_lossy(&output.stdout); let system_version = app.format_version_output(&text)?; Ok((path, system_version)) }; match result().await { Ok((path, system_version)) => (system_version == version).then(|| path), Err(e) => { tracing::debug!("system version not found for {}: {}", app.name(), e); None } } } /// Download a file from its remote location in the given version, extract it and make it ready for /// execution at the given location. #[tracing::instrument(level = "trace")] async fn download(app: Application, version: &str) -> Result<PathBuf> { tracing::info!(version = version, "downloading {}", app.name()); let cache_dir = cache_dir().await.context("failed getting the cache directory")?; let temp_out = cache_dir.join(format!("{}-{}.tmp", app.name(), version)); let mut file = File::create(&temp_out) .await .context("failed creating temporary output file")?; let resp = reqwest::get(app.url(version)?) .await .context("error sending HTTP request")?; ensure!( resp.status().is_success(), "error downloading archive file: {:?}\n{}", resp.status(), app.url(version)? ); let mut res_bytes = resp.bytes_stream(); while let Some(chunk_res) = res_bytes.next().await { let chunk = chunk_res.context("error reading chunk from download")?; let _res = file.write(chunk.as_ref()).await; } Ok(temp_out) } /// Install an application from a downloaded archive locating and copying it to the given target /// location. #[tracing::instrument(level = "trace")] async fn install(app: Application, archive_file: &mut File, target: &Path) -> Result<()> { tracing::info!("installing {}", app.name()); let mut archive = Archive::new(GzipDecoder::new(BufReader::new(archive_file))); let mut file = extract_file(&mut archive, target, Path::new(app.path())).await?; set_executable_flag(&mut file).await?; for path in app.extra_paths() { // Archive must be opened for each entry as tar files don't allow jumping forth and back. let mut archive_file = archive .into_inner() .map_err(|_| anyhow!("error seeking app archive"))? .into_inner(); archive_file .seek(SeekFrom::Start(0)) .await .context("error seeking to beginning of archive")?; archive = Archive::new(GzipDecoder::new(archive_file)); extract_file(&mut archive, target, Path::new(path)).await?; } Ok(()) } /// Extract a single file from the given archive and put it into the target location. async fn extract_file<R>(archive: &mut Archive<R>, target: &Path, file: &Path) -> Result<File> where R: AsyncRead + Unpin + Send + Sync, { let mut tar_file = find_tar_entry(archive, file).await?.context("file not found in archive")?; let out = target.join(file); if let Some(parent) = out.parent() { tokio::fs::create_dir_all(parent) .await .context("failed creating output directory")?; } let mut out = File::create(target.join(file)) .await .context("failed creating output file")?; tokio::io::copy(&mut tar_file, &mut out) .await .context("failed copying over final output file from archive")?; Ok(out) } /// Locate the cache dir for trunk and make sure it exists. pub async fn cache_dir() -> Result<PathBuf> { let path = ProjectDirs::from("dev", "trunkrs", "trunk") .context("failed finding project directory")? .cache_dir() .to_owned(); tokio::fs::create_dir_all(&path) .await .context("failed creating cache directory")?; Ok(path) } /// Set the executable flag for a file. Only has an effect on UNIX platforms. async fn set_executable_flag(file: &mut File) -> Result<()> { #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; let mut perms = file.metadata().await.context("failed getting metadata")?.permissions(); perms.set_mode(perms.mode() | 0o100); file.set_permissions(perms) .await .context("failed setting the executable flag")?; } Ok(()) } /// Find an entry in a TAR archive by name and open it for reading. The first part of the path is /// dropped as that's usually the folder name it was created from. async fn find_tar_entry<R>(archive: &mut Archive<R>, path: impl AsRef<Path>) -> Result<Option<Entry<Archive<R>>>> where R: AsyncRead + Unpin + Send + Sync, { let mut entries = archive.entries().context("failed getting archive entries")?; while let Some(entry) = entries.next().await { let entry = entry.context("error while getting archive entry")?; let name = entry.path().context("invalid entry path")?; let mut name = name.components(); name.next(); if name.as_path() == path.as_ref() { return Ok(Some(entry)); } } Ok(None) } #[cfg(test)] mod tests { use super::*; use anyhow::{ensure, Context, Result}; #[tokio::test] async fn download_and_install_binaries() -> Result<()> { let dir = tempfile::tempdir().context("error creating temporary dir")?; for &app in &[Application::WasmBindgen, Application::WasmOpt] { let path = download(app, app.default_version()) .await .context("error downloading app")?; let mut file = File::open(&path).await.context("error opening file")?; install(app, &mut file, dir.path()).await.context("error installing app")?; std::fs::remove_file(path).context("error during cleanup")?; } Ok(()) } macro_rules! table_test_format_version { ($name:ident, $app:expr, $input:literal, $expect:literal) => { #[test] fn $name() -> Result<()> { let app = $app; let output = app .format_version_output($input) .context("unexpected version formatting error")?; ensure!(output == $expect, "version check output does not match: {}!= {}", $expect, output); Ok(()) } }; } table_test_format_version!( wasm_opt_from_source, Application::WasmOpt, "wasm-opt version 101 (version_101)", "version_101" ); table_test_format_version!(wasm_opt_pre_compiled, Application::WasmOpt, "wasm-opt version 101", "version_101"); table_test_format_version!(wasm_bindgen_from_source, Application::WasmBindgen, "wasm-bindgen 0.2.75", "0.2.75"); table_test_format_version!( wasm_bindgen_pre_compiled, Application::WasmBindgen, "wasm-bindgen 0.2.74 (27c7a4d06)", "0.2.74" ); }
{ match self { Self::WasmBindgen => "wasm-bindgen", Self::WasmOpt => "wasm-opt", } }
identifier_body
mod.rs
//! This module handles connections to Content Manager Server //! First you connect into the ip using a tcp socket //! Then reads/writes into it //! //! Packets are sent at the following format: packet_len + packet_magic + data //! packet length: u32 //! packet magic: VT01 //! //! Apparently, bytes received are in little endian use std::error::Error; use async_trait::async_trait; use bytes::BytesMut; use futures::{SinkExt, StreamExt}; use steam_crypto::SessionKeys; use steam_language_gen::generated::enums::EMsg; use steam_language_gen::SerializableBytes; use tokio::io::AsyncWriteExt; use tokio::net::TcpStream; use tokio::sync::mpsc; use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; use tokio_util::codec::{FramedRead, FramedWrite}; use crate::connection::encryption::handle_encryption_negotiation; use crate::errors::ConnectionError; use crate::messages::codec::PacketMessageCodec; use crate::messages::message::ClientMessage; use crate::{errors::PacketError, messages::packet::PacketMessage}; use atomic::{Atomic, Ordering}; pub(crate) mod encryption; const PACKET_MAGIC_BYTES: &[u8] = br#"VT01"#; /// This should be an abstraction over low-level socket handlers and is not to be used directly. /// [SteamClient] is used for binding and connecting. #[derive(Debug)] pub(crate) struct SteamConnection<S> { /// Stream of data to Steam Content server. May be TCP or Websocket. stream: S, /// Address to which the connection is bound. endpoint: String, /// Current encryption state state: Atomic<EncryptionState>, /// Populated after the initial handshake with Steam session_keys: Option<SessionKeys>, } impl<S> SteamConnection<S> { pub fn change_encryption_state(&self, new_state: EncryptionState) { self.state.swap(new_state, Ordering::AcqRel); } } #[async_trait] trait Connection<S> { async fn new_connection(ip_addr: &str) -> Result<SteamConnection<S>, Box<dyn Error>>; async fn read_packets(&mut self) -> Result<PacketMessage, PacketError>; async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>>; } pub(crate) type PacketTx = UnboundedSender<PacketMessage>; pub(crate) type MessageTx<T> = UnboundedSender<ClientMessage<T>>; pub(crate) type DynBytes = Box<dyn SerializableBytes>; pub(crate) type BytesTx = UnboundedSender<Box<dyn SerializableBytes +'static>>; #[cfg(not(feature = "websockets"))] impl SteamConnection<TcpStream> { async fn main_loop(mut self) -> Result<(), ConnectionError> { let (sender, mut receiver): (UnboundedSender<DynBytes>, UnboundedReceiver<DynBytes>) = mpsc::unbounded_channel(); let connection_state = &mut self.state; let (stream_rx, stream_tx) = self.stream.into_split(); let mut framed_read = FramedRead::new(stream_rx, PacketMessageCodec::default()); let mut framed_write = FramedWrite::new(stream_tx, PacketMessageCodec::default()); tokio::spawn(async move { if let Some(mes) = receiver.recv().await { let message: Vec<u8> = mes.to_bytes(); framed_write.send(message).await.unwrap(); } }); while let Some(packet_message) = framed_read.next().await { let packet_message = packet_message.unwrap(); match packet_message.emsg() { EMsg::ChannelEncryptRequest | EMsg::ChannelEncryptResponse | EMsg::ChannelEncryptResult =>
_ => { unimplemented!() } }; } Ok(()) } } #[cfg(not(feature = "websockets"))] #[async_trait] impl Connection<TcpStream> for SteamConnection<TcpStream> { /// Opens a tcp stream to specified IP async fn new_connection(ip_addr: &str) -> Result<SteamConnection<TcpStream>, Box<dyn Error>> { trace!("Connecting to ip: {}", &ip_addr); let stream = TcpStream::connect(ip_addr).await?; Ok(SteamConnection { stream, endpoint: ip_addr.to_string(), state: Atomic::new(EncryptionState::Disconnected), session_keys: None, }) } #[inline] async fn read_packets(&mut self) -> Result<PacketMessage, PacketError> { let mut framed_stream = FramedRead::new(&mut self.stream, PacketMessageCodec::default()); Ok(framed_stream.next().await.unwrap().unwrap()) } #[inline] async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> { let mut output_buffer = BytesMut::with_capacity(1024); trace!("payload size: {} ", data.len()); output_buffer.extend_from_slice(&(data.len() as u32).to_le_bytes()); output_buffer.extend_from_slice(PACKET_MAGIC_BYTES); output_buffer.extend_from_slice(data); let output_buffer = output_buffer.freeze(); trace!("Writing {} bytes of data to stream..", output_buffer.len()); trace!("Payload bytes: {:?}", output_buffer); let write_result = self.stream.write(&output_buffer).await?; trace!("write result: {}", write_result); Ok(()) } } #[cfg(feature = "websockets")] mod connection_method { use tokio_tls::TlsStream; use tokio_tungstenite::{connect_async, stream::Stream, WebSocketStream}; use super::*; type Ws = WebSocketStream<Stream<TcpStream, TlsStream<TcpStream>>>; #[async_trait] impl Connection<Ws> for SteamConnection<Ws> { async fn new_connection(ws_url: &str) -> Result<SteamConnection<Ws>, Box<dyn Error>> { let formatted_ws_url = format!("wss://{}/cmsocket/", ws_url); debug!("Connecting to addr: {}", formatted_ws_url); let (stream, _) = connect_async(&formatted_ws_url).await?; Ok(SteamConnection { stream, endpoint: formatted_ws_url, state: EncryptionState::Disconnected, }) } #[inline] async fn read_packets(&mut self) -> Result<Vec<u8>, Box<dyn Error>> { let mut data_len: [u8; 4] = [0; 4]; self.stream.get_mut().read_exact(&mut data_len).await?; let mut packet_magic: [u8; 4] = [0; 4]; self.stream.get_mut().read_exact(&mut packet_magic).await?; if packet_magic!= PACKET_MAGIC_BYTES { log::error!("Could not find magic packet on read."); } let mut incoming_data = BytesMut::with_capacity(1024); self.stream.get_mut().read_buf(&mut incoming_data).await?; // sanity check debug!("data length: {}", u32::from_le_bytes(data_len)); trace!("data: {:?}", incoming_data); Ok(incoming_data.to_vec()) } #[inline] async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> { unimplemented!() } } } #[derive(Debug, Copy, Clone)] /// Represents the current state of encryption of the connection. /// Steam is always encrypted, with the exception when the connection is starting. pub(crate) enum EncryptionState { /// After initial connection is established, Steam requests to encrypt messages /// through a [EMsg::ChannelEncryptRequest] Connected, /// We are challenged after Steam returns a [EMsg::ChannelEncryptResult]. /// /// After checking the result for a positive outcome, we should be `Encrypted`, else we get disconnected, /// and try again. Challenged, /// We are encrypted and there is nothing left to do. Encrypted, /// State only after logOff or if encryption fails. Disconnected, } #[cfg(test)] mod tests { use env_logger::Builder; use log::LevelFilter; use steam_language_gen::generated::enums::EMsg; use steam_language_gen::SerializableBytes; // Note this useful idiom: importing names from outer (for mod tests) scope. use super::*; use crate::connection::encryption::handle_encrypt_request; use crate::content_manager::dump_tcp_servers; fn init() { let _ = Builder::from_default_env() .filter_module("steam_api", LevelFilter::Trace) .is_test(true) .try_init(); } #[tokio::test] #[cfg(not(feature = "websockets"))] async fn connect_to_web_server() { init(); let dumped_cm_servers = dump_tcp_servers().await.unwrap(); let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await; assert!(steam_connection.is_ok()); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[cfg(not(feature = "websockets"))] async fn main_loop() { let dumped_cm_servers = dump_tcp_servers().await.unwrap(); let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); steam_connection.main_loop().await.unwrap() } #[tokio::test] #[cfg(not(feature = "websockets"))] async fn test_spawn() { let dumped_cm_servers = dump_tcp_servers().await.unwrap(); let mut steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); let packet_message = steam_connection.read_packets().await.unwrap(); assert_eq!(packet_message.emsg(), EMsg::ChannelEncryptRequest); let answer = handle_encrypt_request(packet_message).to_bytes(); steam_connection.write_packets(&answer).await.unwrap(); let data = steam_connection.read_packets().await.unwrap(); assert_eq!(data.emsg(), EMsg::ChannelEncryptResult); // steam_connection.main_loop().await.unwrap() } // #[tokio::test()] // #[cfg(not(feature = "websockets"))] // async fn answer_encrypt_request() { // init(); // // let cm_servers = CmServerSvList::fetch_servers(env!("STEAM_API")).await; // let dumped_cm_servers = cm_servers.unwrap().dump_tcp_servers(); // // let mut steam_connection: SteamConnection<TcpStream> = // SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); let data = // steam_connection.read_packets().await.unwrap(); let message = EMsg::from_raw_message(&data); // // assert_eq!(message.unwrap(), EMsg::ChannelEncryptRequest); // // // let answer = handle_encrypt_request(PacketMessage::from_rawdata(&data)); // steam_connection.write_packets(answer.as_slice()).await.unwrap(); // let data = steam_connection.read_packets().await.unwrap(); // let message = EMsg::from_raw_message(&data).unwrap(); // assert_eq!(message, EMsg::ChannelEncryptResult); // } #[tokio::test(threaded_scheduler)] #[cfg(feature = "websockets")] async fn connect_to_ws_server() { init(); let get_results = CmServerSvList::fetch_servers("1").await; let fetched_servers = get_results.unwrap().dump_ws_servers(); let steam_connection = SteamConnection::new_connection(&fetched_servers[0]).await; assert!(steam_connection.is_ok()) } }
{ handle_encryption_negotiation(sender.clone(), connection_state, packet_message).unwrap(); }
conditional_block
mod.rs
//! This module handles connections to Content Manager Server //! First you connect into the ip using a tcp socket //! Then reads/writes into it //! //! Packets are sent at the following format: packet_len + packet_magic + data //! packet length: u32 //! packet magic: VT01 //! //! Apparently, bytes received are in little endian use std::error::Error; use async_trait::async_trait; use bytes::BytesMut; use futures::{SinkExt, StreamExt}; use steam_crypto::SessionKeys; use steam_language_gen::generated::enums::EMsg; use steam_language_gen::SerializableBytes; use tokio::io::AsyncWriteExt; use tokio::net::TcpStream; use tokio::sync::mpsc; use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; use tokio_util::codec::{FramedRead, FramedWrite}; use crate::connection::encryption::handle_encryption_negotiation; use crate::errors::ConnectionError; use crate::messages::codec::PacketMessageCodec; use crate::messages::message::ClientMessage; use crate::{errors::PacketError, messages::packet::PacketMessage}; use atomic::{Atomic, Ordering}; pub(crate) mod encryption; const PACKET_MAGIC_BYTES: &[u8] = br#"VT01"#; /// This should be an abstraction over low-level socket handlers and is not to be used directly. /// [SteamClient] is used for binding and connecting. #[derive(Debug)] pub(crate) struct SteamConnection<S> { /// Stream of data to Steam Content server. May be TCP or Websocket. stream: S, /// Address to which the connection is bound. endpoint: String, /// Current encryption state state: Atomic<EncryptionState>, /// Populated after the initial handshake with Steam session_keys: Option<SessionKeys>, } impl<S> SteamConnection<S> { pub fn change_encryption_state(&self, new_state: EncryptionState) { self.state.swap(new_state, Ordering::AcqRel); } } #[async_trait] trait Connection<S> { async fn new_connection(ip_addr: &str) -> Result<SteamConnection<S>, Box<dyn Error>>; async fn read_packets(&mut self) -> Result<PacketMessage, PacketError>; async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>>; } pub(crate) type PacketTx = UnboundedSender<PacketMessage>; pub(crate) type MessageTx<T> = UnboundedSender<ClientMessage<T>>; pub(crate) type DynBytes = Box<dyn SerializableBytes>; pub(crate) type BytesTx = UnboundedSender<Box<dyn SerializableBytes +'static>>; #[cfg(not(feature = "websockets"))] impl SteamConnection<TcpStream> { async fn
(mut self) -> Result<(), ConnectionError> { let (sender, mut receiver): (UnboundedSender<DynBytes>, UnboundedReceiver<DynBytes>) = mpsc::unbounded_channel(); let connection_state = &mut self.state; let (stream_rx, stream_tx) = self.stream.into_split(); let mut framed_read = FramedRead::new(stream_rx, PacketMessageCodec::default()); let mut framed_write = FramedWrite::new(stream_tx, PacketMessageCodec::default()); tokio::spawn(async move { if let Some(mes) = receiver.recv().await { let message: Vec<u8> = mes.to_bytes(); framed_write.send(message).await.unwrap(); } }); while let Some(packet_message) = framed_read.next().await { let packet_message = packet_message.unwrap(); match packet_message.emsg() { EMsg::ChannelEncryptRequest | EMsg::ChannelEncryptResponse | EMsg::ChannelEncryptResult => { handle_encryption_negotiation(sender.clone(), connection_state, packet_message).unwrap(); } _ => { unimplemented!() } }; } Ok(()) } } #[cfg(not(feature = "websockets"))] #[async_trait] impl Connection<TcpStream> for SteamConnection<TcpStream> { /// Opens a tcp stream to specified IP async fn new_connection(ip_addr: &str) -> Result<SteamConnection<TcpStream>, Box<dyn Error>> { trace!("Connecting to ip: {}", &ip_addr); let stream = TcpStream::connect(ip_addr).await?; Ok(SteamConnection { stream, endpoint: ip_addr.to_string(), state: Atomic::new(EncryptionState::Disconnected), session_keys: None, }) } #[inline] async fn read_packets(&mut self) -> Result<PacketMessage, PacketError> { let mut framed_stream = FramedRead::new(&mut self.stream, PacketMessageCodec::default()); Ok(framed_stream.next().await.unwrap().unwrap()) } #[inline] async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> { let mut output_buffer = BytesMut::with_capacity(1024); trace!("payload size: {} ", data.len()); output_buffer.extend_from_slice(&(data.len() as u32).to_le_bytes()); output_buffer.extend_from_slice(PACKET_MAGIC_BYTES); output_buffer.extend_from_slice(data); let output_buffer = output_buffer.freeze(); trace!("Writing {} bytes of data to stream..", output_buffer.len()); trace!("Payload bytes: {:?}", output_buffer); let write_result = self.stream.write(&output_buffer).await?; trace!("write result: {}", write_result); Ok(()) } } #[cfg(feature = "websockets")] mod connection_method { use tokio_tls::TlsStream; use tokio_tungstenite::{connect_async, stream::Stream, WebSocketStream}; use super::*; type Ws = WebSocketStream<Stream<TcpStream, TlsStream<TcpStream>>>; #[async_trait] impl Connection<Ws> for SteamConnection<Ws> { async fn new_connection(ws_url: &str) -> Result<SteamConnection<Ws>, Box<dyn Error>> { let formatted_ws_url = format!("wss://{}/cmsocket/", ws_url); debug!("Connecting to addr: {}", formatted_ws_url); let (stream, _) = connect_async(&formatted_ws_url).await?; Ok(SteamConnection { stream, endpoint: formatted_ws_url, state: EncryptionState::Disconnected, }) } #[inline] async fn read_packets(&mut self) -> Result<Vec<u8>, Box<dyn Error>> { let mut data_len: [u8; 4] = [0; 4]; self.stream.get_mut().read_exact(&mut data_len).await?; let mut packet_magic: [u8; 4] = [0; 4]; self.stream.get_mut().read_exact(&mut packet_magic).await?; if packet_magic!= PACKET_MAGIC_BYTES { log::error!("Could not find magic packet on read."); } let mut incoming_data = BytesMut::with_capacity(1024); self.stream.get_mut().read_buf(&mut incoming_data).await?; // sanity check debug!("data length: {}", u32::from_le_bytes(data_len)); trace!("data: {:?}", incoming_data); Ok(incoming_data.to_vec()) } #[inline] async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> { unimplemented!() } } } #[derive(Debug, Copy, Clone)] /// Represents the current state of encryption of the connection. /// Steam is always encrypted, with the exception when the connection is starting. pub(crate) enum EncryptionState { /// After initial connection is established, Steam requests to encrypt messages /// through a [EMsg::ChannelEncryptRequest] Connected, /// We are challenged after Steam returns a [EMsg::ChannelEncryptResult]. /// /// After checking the result for a positive outcome, we should be `Encrypted`, else we get disconnected, /// and try again. Challenged, /// We are encrypted and there is nothing left to do. Encrypted, /// State only after logOff or if encryption fails. Disconnected, } #[cfg(test)] mod tests { use env_logger::Builder; use log::LevelFilter; use steam_language_gen::generated::enums::EMsg; use steam_language_gen::SerializableBytes; // Note this useful idiom: importing names from outer (for mod tests) scope. use super::*; use crate::connection::encryption::handle_encrypt_request; use crate::content_manager::dump_tcp_servers; fn init() { let _ = Builder::from_default_env() .filter_module("steam_api", LevelFilter::Trace) .is_test(true) .try_init(); } #[tokio::test] #[cfg(not(feature = "websockets"))] async fn connect_to_web_server() { init(); let dumped_cm_servers = dump_tcp_servers().await.unwrap(); let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await; assert!(steam_connection.is_ok()); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[cfg(not(feature = "websockets"))] async fn main_loop() { let dumped_cm_servers = dump_tcp_servers().await.unwrap(); let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); steam_connection.main_loop().await.unwrap() } #[tokio::test] #[cfg(not(feature = "websockets"))] async fn test_spawn() { let dumped_cm_servers = dump_tcp_servers().await.unwrap(); let mut steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); let packet_message = steam_connection.read_packets().await.unwrap(); assert_eq!(packet_message.emsg(), EMsg::ChannelEncryptRequest); let answer = handle_encrypt_request(packet_message).to_bytes(); steam_connection.write_packets(&answer).await.unwrap(); let data = steam_connection.read_packets().await.unwrap(); assert_eq!(data.emsg(), EMsg::ChannelEncryptResult); // steam_connection.main_loop().await.unwrap() } // #[tokio::test()] // #[cfg(not(feature = "websockets"))] // async fn answer_encrypt_request() { // init(); // // let cm_servers = CmServerSvList::fetch_servers(env!("STEAM_API")).await; // let dumped_cm_servers = cm_servers.unwrap().dump_tcp_servers(); // // let mut steam_connection: SteamConnection<TcpStream> = // SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); let data = // steam_connection.read_packets().await.unwrap(); let message = EMsg::from_raw_message(&data); // // assert_eq!(message.unwrap(), EMsg::ChannelEncryptRequest); // // // let answer = handle_encrypt_request(PacketMessage::from_rawdata(&data)); // steam_connection.write_packets(answer.as_slice()).await.unwrap(); // let data = steam_connection.read_packets().await.unwrap(); // let message = EMsg::from_raw_message(&data).unwrap(); // assert_eq!(message, EMsg::ChannelEncryptResult); // } #[tokio::test(threaded_scheduler)] #[cfg(feature = "websockets")] async fn connect_to_ws_server() { init(); let get_results = CmServerSvList::fetch_servers("1").await; let fetched_servers = get_results.unwrap().dump_ws_servers(); let steam_connection = SteamConnection::new_connection(&fetched_servers[0]).await; assert!(steam_connection.is_ok()) } }
main_loop
identifier_name
mod.rs
//! This module handles connections to Content Manager Server //! First you connect into the ip using a tcp socket //! Then reads/writes into it //! //! Packets are sent at the following format: packet_len + packet_magic + data //! packet length: u32 //! packet magic: VT01 //! //! Apparently, bytes received are in little endian use std::error::Error; use async_trait::async_trait; use bytes::BytesMut; use futures::{SinkExt, StreamExt}; use steam_crypto::SessionKeys; use steam_language_gen::generated::enums::EMsg; use steam_language_gen::SerializableBytes; use tokio::io::AsyncWriteExt; use tokio::net::TcpStream; use tokio::sync::mpsc; use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; use tokio_util::codec::{FramedRead, FramedWrite}; use crate::connection::encryption::handle_encryption_negotiation; use crate::errors::ConnectionError; use crate::messages::codec::PacketMessageCodec; use crate::messages::message::ClientMessage; use crate::{errors::PacketError, messages::packet::PacketMessage}; use atomic::{Atomic, Ordering}; pub(crate) mod encryption; const PACKET_MAGIC_BYTES: &[u8] = br#"VT01"#; /// This should be an abstraction over low-level socket handlers and is not to be used directly. /// [SteamClient] is used for binding and connecting. #[derive(Debug)] pub(crate) struct SteamConnection<S> { /// Stream of data to Steam Content server. May be TCP or Websocket. stream: S, /// Address to which the connection is bound. endpoint: String, /// Current encryption state state: Atomic<EncryptionState>, /// Populated after the initial handshake with Steam session_keys: Option<SessionKeys>, } impl<S> SteamConnection<S> { pub fn change_encryption_state(&self, new_state: EncryptionState) { self.state.swap(new_state, Ordering::AcqRel); } } #[async_trait] trait Connection<S> { async fn new_connection(ip_addr: &str) -> Result<SteamConnection<S>, Box<dyn Error>>; async fn read_packets(&mut self) -> Result<PacketMessage, PacketError>; async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>>; } pub(crate) type PacketTx = UnboundedSender<PacketMessage>; pub(crate) type MessageTx<T> = UnboundedSender<ClientMessage<T>>; pub(crate) type DynBytes = Box<dyn SerializableBytes>; pub(crate) type BytesTx = UnboundedSender<Box<dyn SerializableBytes +'static>>; #[cfg(not(feature = "websockets"))] impl SteamConnection<TcpStream> { async fn main_loop(mut self) -> Result<(), ConnectionError> { let (sender, mut receiver): (UnboundedSender<DynBytes>, UnboundedReceiver<DynBytes>) = mpsc::unbounded_channel(); let connection_state = &mut self.state; let (stream_rx, stream_tx) = self.stream.into_split(); let mut framed_read = FramedRead::new(stream_rx, PacketMessageCodec::default()); let mut framed_write = FramedWrite::new(stream_tx, PacketMessageCodec::default()); tokio::spawn(async move { if let Some(mes) = receiver.recv().await { let message: Vec<u8> = mes.to_bytes(); framed_write.send(message).await.unwrap(); } }); while let Some(packet_message) = framed_read.next().await { let packet_message = packet_message.unwrap(); match packet_message.emsg() { EMsg::ChannelEncryptRequest | EMsg::ChannelEncryptResponse | EMsg::ChannelEncryptResult => { handle_encryption_negotiation(sender.clone(), connection_state, packet_message).unwrap(); } _ => { unimplemented!() } }; } Ok(()) } } #[cfg(not(feature = "websockets"))] #[async_trait] impl Connection<TcpStream> for SteamConnection<TcpStream> { /// Opens a tcp stream to specified IP async fn new_connection(ip_addr: &str) -> Result<SteamConnection<TcpStream>, Box<dyn Error>> { trace!("Connecting to ip: {}", &ip_addr); let stream = TcpStream::connect(ip_addr).await?; Ok(SteamConnection { stream, endpoint: ip_addr.to_string(), state: Atomic::new(EncryptionState::Disconnected), session_keys: None, }) } #[inline] async fn read_packets(&mut self) -> Result<PacketMessage, PacketError> { let mut framed_stream = FramedRead::new(&mut self.stream, PacketMessageCodec::default()); Ok(framed_stream.next().await.unwrap().unwrap())
#[inline] async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> { let mut output_buffer = BytesMut::with_capacity(1024); trace!("payload size: {} ", data.len()); output_buffer.extend_from_slice(&(data.len() as u32).to_le_bytes()); output_buffer.extend_from_slice(PACKET_MAGIC_BYTES); output_buffer.extend_from_slice(data); let output_buffer = output_buffer.freeze(); trace!("Writing {} bytes of data to stream..", output_buffer.len()); trace!("Payload bytes: {:?}", output_buffer); let write_result = self.stream.write(&output_buffer).await?; trace!("write result: {}", write_result); Ok(()) } } #[cfg(feature = "websockets")] mod connection_method { use tokio_tls::TlsStream; use tokio_tungstenite::{connect_async, stream::Stream, WebSocketStream}; use super::*; type Ws = WebSocketStream<Stream<TcpStream, TlsStream<TcpStream>>>; #[async_trait] impl Connection<Ws> for SteamConnection<Ws> { async fn new_connection(ws_url: &str) -> Result<SteamConnection<Ws>, Box<dyn Error>> { let formatted_ws_url = format!("wss://{}/cmsocket/", ws_url); debug!("Connecting to addr: {}", formatted_ws_url); let (stream, _) = connect_async(&formatted_ws_url).await?; Ok(SteamConnection { stream, endpoint: formatted_ws_url, state: EncryptionState::Disconnected, }) } #[inline] async fn read_packets(&mut self) -> Result<Vec<u8>, Box<dyn Error>> { let mut data_len: [u8; 4] = [0; 4]; self.stream.get_mut().read_exact(&mut data_len).await?; let mut packet_magic: [u8; 4] = [0; 4]; self.stream.get_mut().read_exact(&mut packet_magic).await?; if packet_magic!= PACKET_MAGIC_BYTES { log::error!("Could not find magic packet on read."); } let mut incoming_data = BytesMut::with_capacity(1024); self.stream.get_mut().read_buf(&mut incoming_data).await?; // sanity check debug!("data length: {}", u32::from_le_bytes(data_len)); trace!("data: {:?}", incoming_data); Ok(incoming_data.to_vec()) } #[inline] async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> { unimplemented!() } } } #[derive(Debug, Copy, Clone)] /// Represents the current state of encryption of the connection. /// Steam is always encrypted, with the exception when the connection is starting. pub(crate) enum EncryptionState { /// After initial connection is established, Steam requests to encrypt messages /// through a [EMsg::ChannelEncryptRequest] Connected, /// We are challenged after Steam returns a [EMsg::ChannelEncryptResult]. /// /// After checking the result for a positive outcome, we should be `Encrypted`, else we get disconnected, /// and try again. Challenged, /// We are encrypted and there is nothing left to do. Encrypted, /// State only after logOff or if encryption fails. Disconnected, } #[cfg(test)] mod tests { use env_logger::Builder; use log::LevelFilter; use steam_language_gen::generated::enums::EMsg; use steam_language_gen::SerializableBytes; // Note this useful idiom: importing names from outer (for mod tests) scope. use super::*; use crate::connection::encryption::handle_encrypt_request; use crate::content_manager::dump_tcp_servers; fn init() { let _ = Builder::from_default_env() .filter_module("steam_api", LevelFilter::Trace) .is_test(true) .try_init(); } #[tokio::test] #[cfg(not(feature = "websockets"))] async fn connect_to_web_server() { init(); let dumped_cm_servers = dump_tcp_servers().await.unwrap(); let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await; assert!(steam_connection.is_ok()); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[cfg(not(feature = "websockets"))] async fn main_loop() { let dumped_cm_servers = dump_tcp_servers().await.unwrap(); let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); steam_connection.main_loop().await.unwrap() } #[tokio::test] #[cfg(not(feature = "websockets"))] async fn test_spawn() { let dumped_cm_servers = dump_tcp_servers().await.unwrap(); let mut steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); let packet_message = steam_connection.read_packets().await.unwrap(); assert_eq!(packet_message.emsg(), EMsg::ChannelEncryptRequest); let answer = handle_encrypt_request(packet_message).to_bytes(); steam_connection.write_packets(&answer).await.unwrap(); let data = steam_connection.read_packets().await.unwrap(); assert_eq!(data.emsg(), EMsg::ChannelEncryptResult); // steam_connection.main_loop().await.unwrap() } // #[tokio::test()] // #[cfg(not(feature = "websockets"))] // async fn answer_encrypt_request() { // init(); // // let cm_servers = CmServerSvList::fetch_servers(env!("STEAM_API")).await; // let dumped_cm_servers = cm_servers.unwrap().dump_tcp_servers(); // // let mut steam_connection: SteamConnection<TcpStream> = // SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); let data = // steam_connection.read_packets().await.unwrap(); let message = EMsg::from_raw_message(&data); // // assert_eq!(message.unwrap(), EMsg::ChannelEncryptRequest); // // // let answer = handle_encrypt_request(PacketMessage::from_rawdata(&data)); // steam_connection.write_packets(answer.as_slice()).await.unwrap(); // let data = steam_connection.read_packets().await.unwrap(); // let message = EMsg::from_raw_message(&data).unwrap(); // assert_eq!(message, EMsg::ChannelEncryptResult); // } #[tokio::test(threaded_scheduler)] #[cfg(feature = "websockets")] async fn connect_to_ws_server() { init(); let get_results = CmServerSvList::fetch_servers("1").await; let fetched_servers = get_results.unwrap().dump_ws_servers(); let steam_connection = SteamConnection::new_connection(&fetched_servers[0]).await; assert!(steam_connection.is_ok()) } }
}
random_line_split
mod.rs
//! This module handles connections to Content Manager Server //! First you connect into the ip using a tcp socket //! Then reads/writes into it //! //! Packets are sent at the following format: packet_len + packet_magic + data //! packet length: u32 //! packet magic: VT01 //! //! Apparently, bytes received are in little endian use std::error::Error; use async_trait::async_trait; use bytes::BytesMut; use futures::{SinkExt, StreamExt}; use steam_crypto::SessionKeys; use steam_language_gen::generated::enums::EMsg; use steam_language_gen::SerializableBytes; use tokio::io::AsyncWriteExt; use tokio::net::TcpStream; use tokio::sync::mpsc; use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; use tokio_util::codec::{FramedRead, FramedWrite}; use crate::connection::encryption::handle_encryption_negotiation; use crate::errors::ConnectionError; use crate::messages::codec::PacketMessageCodec; use crate::messages::message::ClientMessage; use crate::{errors::PacketError, messages::packet::PacketMessage}; use atomic::{Atomic, Ordering}; pub(crate) mod encryption; const PACKET_MAGIC_BYTES: &[u8] = br#"VT01"#; /// This should be an abstraction over low-level socket handlers and is not to be used directly. /// [SteamClient] is used for binding and connecting. #[derive(Debug)] pub(crate) struct SteamConnection<S> { /// Stream of data to Steam Content server. May be TCP or Websocket. stream: S, /// Address to which the connection is bound. endpoint: String, /// Current encryption state state: Atomic<EncryptionState>, /// Populated after the initial handshake with Steam session_keys: Option<SessionKeys>, } impl<S> SteamConnection<S> { pub fn change_encryption_state(&self, new_state: EncryptionState) { self.state.swap(new_state, Ordering::AcqRel); } } #[async_trait] trait Connection<S> { async fn new_connection(ip_addr: &str) -> Result<SteamConnection<S>, Box<dyn Error>>; async fn read_packets(&mut self) -> Result<PacketMessage, PacketError>; async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>>; } pub(crate) type PacketTx = UnboundedSender<PacketMessage>; pub(crate) type MessageTx<T> = UnboundedSender<ClientMessage<T>>; pub(crate) type DynBytes = Box<dyn SerializableBytes>; pub(crate) type BytesTx = UnboundedSender<Box<dyn SerializableBytes +'static>>; #[cfg(not(feature = "websockets"))] impl SteamConnection<TcpStream> { async fn main_loop(mut self) -> Result<(), ConnectionError>
match packet_message.emsg() { EMsg::ChannelEncryptRequest | EMsg::ChannelEncryptResponse | EMsg::ChannelEncryptResult => { handle_encryption_negotiation(sender.clone(), connection_state, packet_message).unwrap(); } _ => { unimplemented!() } }; } Ok(()) } } #[cfg(not(feature = "websockets"))] #[async_trait] impl Connection<TcpStream> for SteamConnection<TcpStream> { /// Opens a tcp stream to specified IP async fn new_connection(ip_addr: &str) -> Result<SteamConnection<TcpStream>, Box<dyn Error>> { trace!("Connecting to ip: {}", &ip_addr); let stream = TcpStream::connect(ip_addr).await?; Ok(SteamConnection { stream, endpoint: ip_addr.to_string(), state: Atomic::new(EncryptionState::Disconnected), session_keys: None, }) } #[inline] async fn read_packets(&mut self) -> Result<PacketMessage, PacketError> { let mut framed_stream = FramedRead::new(&mut self.stream, PacketMessageCodec::default()); Ok(framed_stream.next().await.unwrap().unwrap()) } #[inline] async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> { let mut output_buffer = BytesMut::with_capacity(1024); trace!("payload size: {} ", data.len()); output_buffer.extend_from_slice(&(data.len() as u32).to_le_bytes()); output_buffer.extend_from_slice(PACKET_MAGIC_BYTES); output_buffer.extend_from_slice(data); let output_buffer = output_buffer.freeze(); trace!("Writing {} bytes of data to stream..", output_buffer.len()); trace!("Payload bytes: {:?}", output_buffer); let write_result = self.stream.write(&output_buffer).await?; trace!("write result: {}", write_result); Ok(()) } } #[cfg(feature = "websockets")] mod connection_method { use tokio_tls::TlsStream; use tokio_tungstenite::{connect_async, stream::Stream, WebSocketStream}; use super::*; type Ws = WebSocketStream<Stream<TcpStream, TlsStream<TcpStream>>>; #[async_trait] impl Connection<Ws> for SteamConnection<Ws> { async fn new_connection(ws_url: &str) -> Result<SteamConnection<Ws>, Box<dyn Error>> { let formatted_ws_url = format!("wss://{}/cmsocket/", ws_url); debug!("Connecting to addr: {}", formatted_ws_url); let (stream, _) = connect_async(&formatted_ws_url).await?; Ok(SteamConnection { stream, endpoint: formatted_ws_url, state: EncryptionState::Disconnected, }) } #[inline] async fn read_packets(&mut self) -> Result<Vec<u8>, Box<dyn Error>> { let mut data_len: [u8; 4] = [0; 4]; self.stream.get_mut().read_exact(&mut data_len).await?; let mut packet_magic: [u8; 4] = [0; 4]; self.stream.get_mut().read_exact(&mut packet_magic).await?; if packet_magic!= PACKET_MAGIC_BYTES { log::error!("Could not find magic packet on read."); } let mut incoming_data = BytesMut::with_capacity(1024); self.stream.get_mut().read_buf(&mut incoming_data).await?; // sanity check debug!("data length: {}", u32::from_le_bytes(data_len)); trace!("data: {:?}", incoming_data); Ok(incoming_data.to_vec()) } #[inline] async fn write_packets(&mut self, data: &[u8]) -> Result<(), Box<dyn Error>> { unimplemented!() } } } #[derive(Debug, Copy, Clone)] /// Represents the current state of encryption of the connection. /// Steam is always encrypted, with the exception when the connection is starting. pub(crate) enum EncryptionState { /// After initial connection is established, Steam requests to encrypt messages /// through a [EMsg::ChannelEncryptRequest] Connected, /// We are challenged after Steam returns a [EMsg::ChannelEncryptResult]. /// /// After checking the result for a positive outcome, we should be `Encrypted`, else we get disconnected, /// and try again. Challenged, /// We are encrypted and there is nothing left to do. Encrypted, /// State only after logOff or if encryption fails. Disconnected, } #[cfg(test)] mod tests { use env_logger::Builder; use log::LevelFilter; use steam_language_gen::generated::enums::EMsg; use steam_language_gen::SerializableBytes; // Note this useful idiom: importing names from outer (for mod tests) scope. use super::*; use crate::connection::encryption::handle_encrypt_request; use crate::content_manager::dump_tcp_servers; fn init() { let _ = Builder::from_default_env() .filter_module("steam_api", LevelFilter::Trace) .is_test(true) .try_init(); } #[tokio::test] #[cfg(not(feature = "websockets"))] async fn connect_to_web_server() { init(); let dumped_cm_servers = dump_tcp_servers().await.unwrap(); let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await; assert!(steam_connection.is_ok()); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[cfg(not(feature = "websockets"))] async fn main_loop() { let dumped_cm_servers = dump_tcp_servers().await.unwrap(); let steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); steam_connection.main_loop().await.unwrap() } #[tokio::test] #[cfg(not(feature = "websockets"))] async fn test_spawn() { let dumped_cm_servers = dump_tcp_servers().await.unwrap(); let mut steam_connection = SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); let packet_message = steam_connection.read_packets().await.unwrap(); assert_eq!(packet_message.emsg(), EMsg::ChannelEncryptRequest); let answer = handle_encrypt_request(packet_message).to_bytes(); steam_connection.write_packets(&answer).await.unwrap(); let data = steam_connection.read_packets().await.unwrap(); assert_eq!(data.emsg(), EMsg::ChannelEncryptResult); // steam_connection.main_loop().await.unwrap() } // #[tokio::test()] // #[cfg(not(feature = "websockets"))] // async fn answer_encrypt_request() { // init(); // // let cm_servers = CmServerSvList::fetch_servers(env!("STEAM_API")).await; // let dumped_cm_servers = cm_servers.unwrap().dump_tcp_servers(); // // let mut steam_connection: SteamConnection<TcpStream> = // SteamConnection::new_connection(&dumped_cm_servers[0]).await.unwrap(); let data = // steam_connection.read_packets().await.unwrap(); let message = EMsg::from_raw_message(&data); // // assert_eq!(message.unwrap(), EMsg::ChannelEncryptRequest); // // // let answer = handle_encrypt_request(PacketMessage::from_rawdata(&data)); // steam_connection.write_packets(answer.as_slice()).await.unwrap(); // let data = steam_connection.read_packets().await.unwrap(); // let message = EMsg::from_raw_message(&data).unwrap(); // assert_eq!(message, EMsg::ChannelEncryptResult); // } #[tokio::test(threaded_scheduler)] #[cfg(feature = "websockets")] async fn connect_to_ws_server() { init(); let get_results = CmServerSvList::fetch_servers("1").await; let fetched_servers = get_results.unwrap().dump_ws_servers(); let steam_connection = SteamConnection::new_connection(&fetched_servers[0]).await; assert!(steam_connection.is_ok()) } }
{ let (sender, mut receiver): (UnboundedSender<DynBytes>, UnboundedReceiver<DynBytes>) = mpsc::unbounded_channel(); let connection_state = &mut self.state; let (stream_rx, stream_tx) = self.stream.into_split(); let mut framed_read = FramedRead::new(stream_rx, PacketMessageCodec::default()); let mut framed_write = FramedWrite::new(stream_tx, PacketMessageCodec::default()); tokio::spawn(async move { if let Some(mes) = receiver.recv().await { let message: Vec<u8> = mes.to_bytes(); framed_write.send(message).await.unwrap(); } }); while let Some(packet_message) = framed_read.next().await { let packet_message = packet_message.unwrap();
identifier_body
lib.rs
// The MIT License (MIT) // Copyright (c) 2018 Matrix.Zhang <[email protected]> // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of // the Software, and to permit persons to whom the Software is furnished to do so, // subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. //! This library supports Alibaba's Dayu SMS SDK version of '2017-05-25'. //! //! ## Basic usage //! //! ```rust //!use dayu::Dayu; //!use serde_json::json; //! //!let dayu = Dayu::new() //! .set_access_key("access_key") //! .set_access_secret("access_secret") //! .set_sign_name("阿里云测试短信"); //!dayu.sms_send(&["138XXXXXXXX"], "SMS_123456", Some(&json!({"customer": "Rust"}))).await.unwrap(); //! ``` use std::{ collections::BTreeMap, convert::AsRef, fmt::{self, Display, Formatter}, }; use chrono::{NaiveDate, Utc}; use futures_util::TryFutureExt; use openssl::{hash::MessageDigest, pkey::PKey, sign::Signer}; use reqwest::Client; use serde::{Deserialize, Serialize}; use serde_json::Value; use textnonce::TextNonce; use thiserror::Error; use url::Url; static MAX_PAGE_SIZE: u8 = 50; static REQUEST_FORMAT: &str = "JSON"; static SIGN_METHOD: &str = "HMAC-SHA1"; static SIGNATURE_VERSION: &str = "1.0"; static VERSION: &str = "2017-05-25"; #[derive(Debug, Error)] pub enum DayuError { #[error("config of '{0}' absence")] ConfigAbsence(&'static str), #[error("dayu response error: {0}")] Dayu(DayuFailResponse), #[error("openssl error: {0}")] Openssl(#[from] openssl::error::ErrorStack), #[error("page size '{0}' too large, max is 50")] PageTooLarge(u8), #[error("reqwest error: {0}")] Reqwest(#[from] reqwest::Error), #[error("serde_json error: {0}")] SerdeJson(#[from] serde_json::error::Error), #[error("std io error: {0}")] Stdio(#[from] std::io::Error), #[error("textnonce error: {0}")] TextNonce(String), #[error("url parse error: {0}")] UrlParse(#[from] url::ParseError), } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] pub struct DayuSendRespon
z_id: String, } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] pub struct DayuQueryDetail { pub phone_num: String, pub send_date: String, pub send_status: u8, pub receive_date: String, pub template_code: String, pub content: String, pub err_code: String, } #[derive(Debug, Deserialize)] pub struct DayuQueryDetails { #[serde(rename = "SmsSendDetailDTO")] pub inner: Vec<DayuQueryDetail>, } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] pub struct DayuQueryResponse { pub total_count: i32, pub total_page: Option<u8>, #[serde(rename = "SmsSendDetailDTOs")] pub details: Option<DayuQueryDetails>, } #[derive(Debug, Deserialize, Serialize)] #[serde(rename_all = "PascalCase")] pub struct DayuFailResponse { pub code: String, pub message: String, pub request_id: String, } impl Display for DayuFailResponse { fn fmt(&self, f: &mut Formatter) -> fmt::Result { write!(f, "{}", serde_json::to_string_pretty(self).unwrap()) } } #[derive(Debug, Deserialize)] #[serde(untagged)] pub enum DayuResponse { Send(DayuSendResponse), Query(DayuQueryResponse), Fail(DayuFailResponse), } #[derive(Default, Clone)] pub struct Dayu { client: Client, access_key: String, access_secret: String, sign_name: String, } fn make_url(dayu: &Dayu, action: &str, params: &[(&str, &str)]) -> Result<Url, DayuError> { if dayu.access_key.is_empty() { return Err(DayuError::ConfigAbsence("access_key")); } if dayu.access_secret.is_empty() { return Err(DayuError::ConfigAbsence("access_secret")); } if dayu.sign_name.is_empty() { return Err(DayuError::ConfigAbsence("sign_name")); } let timestamp = Utc::now().format("%Y-%m-%dT%H:%M:%SZ").to_string(); TextNonce::sized(32) .map_err(DayuError::TextNonce) .map(|v| v.to_string()) .and_then(|text_nonce| { let mut map = BTreeMap::new(); map.insert("Format", REQUEST_FORMAT); map.insert("AccessKeyId", &dayu.access_key); map.insert("SignatureMethod", SIGN_METHOD); map.insert("SignatureNonce", &text_nonce); map.insert("SignatureVersion", SIGNATURE_VERSION); map.insert("Timestamp", &timestamp); map.insert("Action", action); map.insert("SignName", &dayu.sign_name); map.insert("Version", VERSION); for &(name, value) in params { if!value.is_empty() { map.insert(name, value); } } let mut forms = map .into_iter() .map(|(key, value)| (key, urlencoding::encode(value).into_owned())) .collect::<Vec<(&str, String)>>(); let mut wait_sign = String::from("GET&%2F&"); wait_sign.push_str( &forms .iter() .fold(vec![], |mut wait_sign, &(key, ref value)| { wait_sign .push(urlencoding::encode(&format!("{}={}", key, value)).into_owned()); wait_sign }) .join(&urlencoding::encode("&")), ); PKey::hmac(format!("{}&", &dayu.access_secret).as_bytes()) .and_then(|pkey| { Signer::new(MessageDigest::sha1(), &pkey).and_then(|mut signer| { signer .update(wait_sign.as_bytes()) .and_then(|_| signer.sign_to_vec()) }) }) .map_err(Into::into) .map(|ref signature| { forms.push(( "Signature", urlencoding::encode(&base64::encode(signature)).into_owned(), )) }) .and_then(|_| { Url::parse("https://dysmsapi.aliyuncs.com") .map_err(Into::into) .map(|mut url| { url.set_query(Some( &forms .into_iter() .map(|(key, value)| format!("{}={}", key, value)) .collect::<Vec<String>>() .join("&"), )); url }) }) }) } macro_rules! do_request { ($dayu:expr, $action:expr, $params:expr, $type:tt) => {{ let url = make_url($dayu, $action, $params)?; $dayu .client .get(url) .send() .and_then(|response| response.json::<DayuResponse>()) .await .map_err(Into::into) .and_then(|json_response| match json_response { DayuResponse::$type(v) => Ok(v), DayuResponse::Fail(fail) => Err(DayuError::Dayu(fail)), _ => unreachable!(), }) }}; } impl Dayu { /// construct new dayu sdk instance pub fn new() -> Self { Self::default() } /// set dayu sdk's access key pub fn set_access_key(mut self, access_key: impl Into<String>) -> Self { self.access_key = access_key.into(); self } /// set dayu sdk's access secret pub fn set_access_secret(mut self, access_secret: impl Into<String>) -> Self { self.access_secret = access_secret.into(); self } /// set dayu sdk's sign name pub fn set_sign_name(mut self, sign_name: impl Into<String>) -> Self { self.sign_name = sign_name.into(); self } /// start send sms /// phones: support multi phone number /// template_code: SMS TEMPLATE CODE /// template_param: SMS TEMPLATE PARAMS as JSON pub async fn sms_send<P: AsRef<str>, T: AsRef<str>>( &self, phones: &[P], template_code: T, template_param: Option<&Value>, ) -> Result<DayuSendResponse, DayuError> { let phone_numbers = phones .iter() .map(AsRef::as_ref) .collect::<Vec<&str>>() .join(","); let template_param = template_param .map(|v| serde_json::to_string(v).unwrap()) .unwrap_or_else(String::new); do_request!( self, "SendSms", &[ ("TemplateCode", template_code.as_ref()), ("PhoneNumbers", &phone_numbers), ("TemplateParam", &template_param), ], Send ) } /// query sms send detail pub async fn sms_query( &self, phone_number: &str, biz_id: Option<&str>, send_date: NaiveDate, current_page: u8, page_size: u8, ) -> Result<DayuQueryResponse, DayuError> { if page_size > MAX_PAGE_SIZE { return Err(DayuError::PageTooLarge(page_size)); } let send_date = send_date.format("%Y%m%d").to_string(); let page_size = page_size.to_string(); let current_page = current_page.to_string(); do_request!( self, "QuerySendDetails", &[ ("PhoneNumber", phone_number), ("BizId", biz_id.unwrap_or("")), ("SendDate", &send_date), ("PageSize", &page_size), ("CurrentPage", &current_page), ], Query ) } }
se { pub bi
identifier_name
lib.rs
// The MIT License (MIT) // Copyright (c) 2018 Matrix.Zhang <[email protected]> // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of // the Software, and to permit persons to whom the Software is furnished to do so, // subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. //! This library supports Alibaba's Dayu SMS SDK version of '2017-05-25'. //! //! ## Basic usage //! //! ```rust //!use dayu::Dayu; //!use serde_json::json; //! //!let dayu = Dayu::new() //! .set_access_key("access_key") //! .set_access_secret("access_secret") //! .set_sign_name("阿里云测试短信"); //!dayu.sms_send(&["138XXXXXXXX"], "SMS_123456", Some(&json!({"customer": "Rust"}))).await.unwrap(); //! ``` use std::{ collections::BTreeMap, convert::AsRef, fmt::{self, Display, Formatter}, }; use chrono::{NaiveDate, Utc}; use futures_util::TryFutureExt; use openssl::{hash::MessageDigest, pkey::PKey, sign::Signer}; use reqwest::Client; use serde::{Deserialize, Serialize}; use serde_json::Value; use textnonce::TextNonce; use thiserror::Error; use url::Url; static MAX_PAGE_SIZE: u8 = 50; static REQUEST_FORMAT: &str = "JSON"; static SIGN_METHOD: &str = "HMAC-SHA1"; static SIGNATURE_VERSION: &str = "1.0"; static VERSION: &str = "2017-05-25"; #[derive(Debug, Error)] pub enum DayuError { #[error("config of '{0}' absence")] ConfigAbsence(&'static str), #[error("dayu response error: {0}")] Dayu(DayuFailResponse), #[error("openssl error: {0}")] Openssl(#[from] openssl::error::ErrorStack), #[error("page size '{0}' too large, max is 50")] PageTooLarge(u8), #[error("reqwest error: {0}")] Reqwest(#[from] reqwest::Error), #[error("serde_json error: {0}")] SerdeJson(#[from] serde_json::error::Error), #[error("std io error: {0}")] Stdio(#[from] std::io::Error), #[error("textnonce error: {0}")] TextNonce(String), #[error("url parse error: {0}")] UrlParse(#[from] url::ParseError), } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] pub struct DayuSendResponse { pub biz_id: String, } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] pub struct DayuQueryDetail { pub phone_num: String, pub send_date: String, pub send_status: u8, pub receive_date: String, pub template_code: String, pub content: String, pub err_code: String, } #[derive(Debug, Deserialize)] pub struct DayuQueryDetails { #[serde(rename = "SmsSendDetailDTO")] pub inner: Vec<DayuQueryDetail>, } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] pub struct DayuQueryResponse { pub total_count: i32, pub total_page: Option<u8>, #[serde(rename = "SmsSendDetailDTOs")] pub details: Option<DayuQueryDetails>, }
#[derive(Debug, Deserialize, Serialize)] #[serde(rename_all = "PascalCase")] pub struct DayuFailResponse { pub code: String, pub message: String, pub request_id: String, } impl Display for DayuFailResponse { fn fmt(&self, f: &mut Formatter) -> fmt::Result { write!(f, "{}", serde_json::to_string_pretty(self).unwrap()) } } #[derive(Debug, Deserialize)] #[serde(untagged)] pub enum DayuResponse { Send(DayuSendResponse), Query(DayuQueryResponse), Fail(DayuFailResponse), } #[derive(Default, Clone)] pub struct Dayu { client: Client, access_key: String, access_secret: String, sign_name: String, } fn make_url(dayu: &Dayu, action: &str, params: &[(&str, &str)]) -> Result<Url, DayuError> { if dayu.access_key.is_empty() { return Err(DayuError::ConfigAbsence("access_key")); } if dayu.access_secret.is_empty() { return Err(DayuError::ConfigAbsence("access_secret")); } if dayu.sign_name.is_empty() { return Err(DayuError::ConfigAbsence("sign_name")); } let timestamp = Utc::now().format("%Y-%m-%dT%H:%M:%SZ").to_string(); TextNonce::sized(32) .map_err(DayuError::TextNonce) .map(|v| v.to_string()) .and_then(|text_nonce| { let mut map = BTreeMap::new(); map.insert("Format", REQUEST_FORMAT); map.insert("AccessKeyId", &dayu.access_key); map.insert("SignatureMethod", SIGN_METHOD); map.insert("SignatureNonce", &text_nonce); map.insert("SignatureVersion", SIGNATURE_VERSION); map.insert("Timestamp", &timestamp); map.insert("Action", action); map.insert("SignName", &dayu.sign_name); map.insert("Version", VERSION); for &(name, value) in params { if!value.is_empty() { map.insert(name, value); } } let mut forms = map .into_iter() .map(|(key, value)| (key, urlencoding::encode(value).into_owned())) .collect::<Vec<(&str, String)>>(); let mut wait_sign = String::from("GET&%2F&"); wait_sign.push_str( &forms .iter() .fold(vec![], |mut wait_sign, &(key, ref value)| { wait_sign .push(urlencoding::encode(&format!("{}={}", key, value)).into_owned()); wait_sign }) .join(&urlencoding::encode("&")), ); PKey::hmac(format!("{}&", &dayu.access_secret).as_bytes()) .and_then(|pkey| { Signer::new(MessageDigest::sha1(), &pkey).and_then(|mut signer| { signer .update(wait_sign.as_bytes()) .and_then(|_| signer.sign_to_vec()) }) }) .map_err(Into::into) .map(|ref signature| { forms.push(( "Signature", urlencoding::encode(&base64::encode(signature)).into_owned(), )) }) .and_then(|_| { Url::parse("https://dysmsapi.aliyuncs.com") .map_err(Into::into) .map(|mut url| { url.set_query(Some( &forms .into_iter() .map(|(key, value)| format!("{}={}", key, value)) .collect::<Vec<String>>() .join("&"), )); url }) }) }) } macro_rules! do_request { ($dayu:expr, $action:expr, $params:expr, $type:tt) => {{ let url = make_url($dayu, $action, $params)?; $dayu .client .get(url) .send() .and_then(|response| response.json::<DayuResponse>()) .await .map_err(Into::into) .and_then(|json_response| match json_response { DayuResponse::$type(v) => Ok(v), DayuResponse::Fail(fail) => Err(DayuError::Dayu(fail)), _ => unreachable!(), }) }}; } impl Dayu { /// construct new dayu sdk instance pub fn new() -> Self { Self::default() } /// set dayu sdk's access key pub fn set_access_key(mut self, access_key: impl Into<String>) -> Self { self.access_key = access_key.into(); self } /// set dayu sdk's access secret pub fn set_access_secret(mut self, access_secret: impl Into<String>) -> Self { self.access_secret = access_secret.into(); self } /// set dayu sdk's sign name pub fn set_sign_name(mut self, sign_name: impl Into<String>) -> Self { self.sign_name = sign_name.into(); self } /// start send sms /// phones: support multi phone number /// template_code: SMS TEMPLATE CODE /// template_param: SMS TEMPLATE PARAMS as JSON pub async fn sms_send<P: AsRef<str>, T: AsRef<str>>( &self, phones: &[P], template_code: T, template_param: Option<&Value>, ) -> Result<DayuSendResponse, DayuError> { let phone_numbers = phones .iter() .map(AsRef::as_ref) .collect::<Vec<&str>>() .join(","); let template_param = template_param .map(|v| serde_json::to_string(v).unwrap()) .unwrap_or_else(String::new); do_request!( self, "SendSms", &[ ("TemplateCode", template_code.as_ref()), ("PhoneNumbers", &phone_numbers), ("TemplateParam", &template_param), ], Send ) } /// query sms send detail pub async fn sms_query( &self, phone_number: &str, biz_id: Option<&str>, send_date: NaiveDate, current_page: u8, page_size: u8, ) -> Result<DayuQueryResponse, DayuError> { if page_size > MAX_PAGE_SIZE { return Err(DayuError::PageTooLarge(page_size)); } let send_date = send_date.format("%Y%m%d").to_string(); let page_size = page_size.to_string(); let current_page = current_page.to_string(); do_request!( self, "QuerySendDetails", &[ ("PhoneNumber", phone_number), ("BizId", biz_id.unwrap_or("")), ("SendDate", &send_date), ("PageSize", &page_size), ("CurrentPage", &current_page), ], Query ) } }
random_line_split
lib.rs
// The MIT License (MIT) // Copyright (c) 2018 Matrix.Zhang <[email protected]> // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of // the Software, and to permit persons to whom the Software is furnished to do so, // subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. //! This library supports Alibaba's Dayu SMS SDK version of '2017-05-25'. //! //! ## Basic usage //! //! ```rust //!use dayu::Dayu; //!use serde_json::json; //! //!let dayu = Dayu::new() //! .set_access_key("access_key") //! .set_access_secret("access_secret") //! .set_sign_name("阿里云测试短信"); //!dayu.sms_send(&["138XXXXXXXX"], "SMS_123456", Some(&json!({"customer": "Rust"}))).await.unwrap(); //! ``` use std::{ collections::BTreeMap, convert::AsRef, fmt::{self, Display, Formatter}, }; use chrono::{NaiveDate, Utc}; use futures_util::TryFutureExt; use openssl::{hash::MessageDigest, pkey::PKey, sign::Signer}; use reqwest::Client; use serde::{Deserialize, Serialize}; use serde_json::Value; use textnonce::TextNonce; use thiserror::Error; use url::Url; static MAX_PAGE_SIZE: u8 = 50; static REQUEST_FORMAT: &str = "JSON"; static SIGN_METHOD: &str = "HMAC-SHA1"; static SIGNATURE_VERSION: &str = "1.0"; static VERSION: &str = "2017-05-25"; #[derive(Debug, Error)] pub enum DayuError { #[error("config of '{0}' absence")] ConfigAbsence(&'static str), #[error("dayu response error: {0}")] Dayu(DayuFailResponse), #[error("openssl error: {0}")] Openssl(#[from] openssl::error::ErrorStack), #[error("page size '{0}' too large, max is 50")] PageTooLarge(u8), #[error("reqwest error: {0}")] Reqwest(#[from] reqwest::Error), #[error("serde_json error: {0}")] SerdeJson(#[from] serde_json::error::Error), #[error("std io error: {0}")] Stdio(#[from] std::io::Error), #[error("textnonce error: {0}")] TextNonce(String), #[error("url parse error: {0}")] UrlParse(#[from] url::ParseError), } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] pub struct DayuSendResponse { pub biz_id: String, } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] pub struct DayuQueryDetail { pub phone_num: String, pub send_date: String, pub send_status: u8, pub receive_date: String, pub template_code: String, pub content: String, pub err_code: String, } #[derive(Debug, Deserialize)] pub struct DayuQueryDetails { #[serde(rename = "SmsSendDetailDTO")] pub inner: Vec<DayuQueryDetail>, } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] pub struct DayuQueryResponse { pub total_count: i32, pub total_page: Option<u8>, #[serde(rename = "SmsSendDetailDTOs")] pub details: Option<DayuQueryDetails>, } #[derive(Debug, Deserialize, Serialize)] #[serde(rename_all = "PascalCase")] pub struct DayuFailResponse { pub code: String, pub message: String, pub request_id: String, } impl Display for DayuFailResponse { fn fmt(&self, f: &mut Formatter) -> fmt::Result { write!(f, "{}", serde_json::to_string_pretty(self).unwrap()) } } #[derive(Debug, Deserialize)] #[serde(untagged)] pub enum DayuResponse { Send(DayuSendResponse), Query(DayuQueryResponse), Fail(DayuFailResponse), } #[derive(Default, Clone)] pub struct Dayu { client: Client, access_key: String, access_secret: String, sign_name: String, } fn make_url(dayu: &Dayu, action: &str, params: &[(&str, &str)]) -> Result<Url, DayuError> { if dayu.access_key.is_empty() { return Err(DayuError::ConfigAbsence("access_key")); } if dayu.access_secret.is_empty() { return Err(DayuError::ConfigAbsence("access_secret")); } if dayu.sign_name.is_empty() { return Err(DayuError::ConfigAbsence("sign_name")); } let timestamp = Utc::now().format("%Y-%m-%dT%H:%M:%SZ").to_string(); TextNonce::sized(32) .map_err(DayuError::TextNonce) .map(|v| v.to_string()) .and_then(|text_nonce| { let mut map = BTreeMap::new(); map.insert("Format", REQUEST_FORMAT); map.insert("AccessKeyId", &dayu.access_key); map.insert("SignatureMethod", SIGN_METHOD); map.insert("SignatureNonce", &text_nonce); map.insert("SignatureVersion", SIGNATURE_VERSION); map.insert("Timestamp", &timestamp); map.insert("Action", action); map.insert("SignName", &dayu.sign_name); map.insert("Version", VERSION); for &(name, value) in params { if!value.is_empty() { map.insert(name, value); } } let mut forms = map .into_iter() .map(|(key, value)| (key, urlencoding::encode(value).into_owned())) .collect::<Vec<(&str, String)>>(); let mut wait_sign = String::from("GET&%2F&"); wait_sign.push_str( &forms .iter() .fold(vec![], |mut wait_sign, &(key, ref value)| { wait_sign .push(urlencoding::encode(&format!("{}={}", key, value)).into_owned()); wait_sign }) .join(&urlencoding::encode("&")), ); PKey::hmac(format!("{}&", &dayu.access_secret).as_bytes()) .and_then(|pkey| { Signer::new(MessageDigest::sha1(), &pkey).and_then(|mut signer| { signer .update(wait_sign.as_bytes()) .and_then(|_| signer.sign_to_vec()) }) }) .map_err(Into::into) .map(|ref signature| { forms.push(( "Signature", urlencoding::encode(&base64::encode(signature)).into_owned(), )) }) .and_then(|_| { Url::parse("https://dysmsapi.aliyuncs.com") .map_err(Into::into) .map(|mut url| { url.set_query(Some( &forms .into_iter() .map(|(key, value)| format!("{}={}", key, value)) .collect::<Vec<String>>() .join("&"), )); url }) }) }) } macro_rules! do_request { ($dayu:expr, $action:expr, $params:expr, $type:tt) => {{ let url = make_url($dayu, $action, $params)?; $dayu .client .get(url) .send() .and_then(|response| response.json::<DayuResponse>()) .await .map_err(Into::into) .and_then(|json_response| match json_response { DayuResponse::$type(v) => Ok(v), DayuResponse::Fail(fail) => Err(DayuError::Dayu(fail)), _ => unreachable!(), }) }}; } impl Dayu { /// construct new dayu sdk instance pub fn new() -> Self { Self::default() } /// set dayu sdk's access key pub fn set_access_key(mut self, access_key: impl Into<String>) -> Self { self.access_key = access_key.into(); self } /// set dayu sdk's access secret pub fn set_access_secret(mut self, access_secret: impl Into<String>) -> Self { self.access_secret = access_secret.into(); self } /// set dayu sdk's sign name pub fn set_sign_name(mut self, sign_name: impl Into<String>) -> Self { self.sign_name = sign_name.into(); self } /// start send sms /// phones: support multi phone number /// template_code: SMS TEMPLATE CODE /// template_param: SMS TEMPLATE PARAMS as JSON pub async fn sms_send<P: AsRef<str>, T: AsRef<str>>( &self, phones: &[P], template_code: T, template_param: Option<&Value>, ) -> Result<DayuSendResponse, DayuError> { let phone_numbers = phones .iter() .map(AsRef::as_ref) .collect::<Vec<&str>>() .join(","); let template_param = template_param .map(|v| serde_json::to_string(v).unwrap()) .unwrap_or_else(String::new); do_request!( self, "SendSms", &[ ("TemplateCode", template_code.as_ref()), ("PhoneNumbers", &phone_numbers), ("TemplateParam", &template_param), ], Send ) } /// query sms send detail pub async fn sms_query( &self, phone_number: &str, biz_id: Option<&str>, send_date: NaiveDate, current_page: u8, page_size: u8, ) -> Result<DayuQueryResponse, DayuError> { if
} }
page_size > MAX_PAGE_SIZE { return Err(DayuError::PageTooLarge(page_size)); } let send_date = send_date.format("%Y%m%d").to_string(); let page_size = page_size.to_string(); let current_page = current_page.to_string(); do_request!( self, "QuerySendDetails", &[ ("PhoneNumber", phone_number), ("BizId", biz_id.unwrap_or("")), ("SendDate", &send_date), ("PageSize", &page_size), ("CurrentPage", &current_page), ], Query )
identifier_body
lib.rs
// The MIT License (MIT) // Copyright (c) 2018 Matrix.Zhang <[email protected]> // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of // the Software, and to permit persons to whom the Software is furnished to do so, // subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. //! This library supports Alibaba's Dayu SMS SDK version of '2017-05-25'. //! //! ## Basic usage //! //! ```rust //!use dayu::Dayu; //!use serde_json::json; //! //!let dayu = Dayu::new() //! .set_access_key("access_key") //! .set_access_secret("access_secret") //! .set_sign_name("阿里云测试短信"); //!dayu.sms_send(&["138XXXXXXXX"], "SMS_123456", Some(&json!({"customer": "Rust"}))).await.unwrap(); //! ``` use std::{ collections::BTreeMap, convert::AsRef, fmt::{self, Display, Formatter}, }; use chrono::{NaiveDate, Utc}; use futures_util::TryFutureExt; use openssl::{hash::MessageDigest, pkey::PKey, sign::Signer}; use reqwest::Client; use serde::{Deserialize, Serialize}; use serde_json::Value; use textnonce::TextNonce; use thiserror::Error; use url::Url; static MAX_PAGE_SIZE: u8 = 50; static REQUEST_FORMAT: &str = "JSON"; static SIGN_METHOD: &str = "HMAC-SHA1"; static SIGNATURE_VERSION: &str = "1.0"; static VERSION: &str = "2017-05-25"; #[derive(Debug, Error)] pub enum DayuError { #[error("config of '{0}' absence")] ConfigAbsence(&'static str), #[error("dayu response error: {0}")] Dayu(DayuFailResponse), #[error("openssl error: {0}")] Openssl(#[from] openssl::error::ErrorStack), #[error("page size '{0}' too large, max is 50")] PageTooLarge(u8), #[error("reqwest error: {0}")] Reqwest(#[from] reqwest::Error), #[error("serde_json error: {0}")] SerdeJson(#[from] serde_json::error::Error), #[error("std io error: {0}")] Stdio(#[from] std::io::Error), #[error("textnonce error: {0}")] TextNonce(String), #[error("url parse error: {0}")] UrlParse(#[from] url::ParseError), } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] pub struct DayuSendResponse { pub biz_id: String, } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] pub struct DayuQueryDetail { pub phone_num: String, pub send_date: String, pub send_status: u8, pub receive_date: String, pub template_code: String, pub content: String, pub err_code: String, } #[derive(Debug, Deserialize)] pub struct DayuQueryDetails { #[serde(rename = "SmsSendDetailDTO")] pub inner: Vec<DayuQueryDetail>, } #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] pub struct DayuQueryResponse { pub total_count: i32, pub total_page: Option<u8>, #[serde(rename = "SmsSendDetailDTOs")] pub details: Option<DayuQueryDetails>, } #[derive(Debug, Deserialize, Serialize)] #[serde(rename_all = "PascalCase")] pub struct DayuFailResponse { pub code: String, pub message: String, pub request_id: String, } impl Display for DayuFailResponse { fn fmt(&self, f: &mut Formatter) -> fmt::Result { write!(f, "{}", serde_json::to_string_pretty(self).unwrap()) } } #[derive(Debug, Deserialize)] #[serde(untagged)] pub enum DayuResponse { Send(DayuSendResponse), Query(DayuQueryResponse), Fail(DayuFailResponse), } #[derive(Default, Clone)] pub struct Dayu { client: Client, access_key: String, access_secret: String, sign_name: String, } fn make_url(dayu: &Dayu, action: &str, params: &[(&str, &str)]) -> Result<Url, DayuError> { if dayu.access_key.is_empty() { return Err(DayuError::ConfigAbsence("access_key")); } if dayu.access_secret.is_empty() { ret
u.sign_name.is_empty() { return Err(DayuError::ConfigAbsence("sign_name")); } let timestamp = Utc::now().format("%Y-%m-%dT%H:%M:%SZ").to_string(); TextNonce::sized(32) .map_err(DayuError::TextNonce) .map(|v| v.to_string()) .and_then(|text_nonce| { let mut map = BTreeMap::new(); map.insert("Format", REQUEST_FORMAT); map.insert("AccessKeyId", &dayu.access_key); map.insert("SignatureMethod", SIGN_METHOD); map.insert("SignatureNonce", &text_nonce); map.insert("SignatureVersion", SIGNATURE_VERSION); map.insert("Timestamp", &timestamp); map.insert("Action", action); map.insert("SignName", &dayu.sign_name); map.insert("Version", VERSION); for &(name, value) in params { if!value.is_empty() { map.insert(name, value); } } let mut forms = map .into_iter() .map(|(key, value)| (key, urlencoding::encode(value).into_owned())) .collect::<Vec<(&str, String)>>(); let mut wait_sign = String::from("GET&%2F&"); wait_sign.push_str( &forms .iter() .fold(vec![], |mut wait_sign, &(key, ref value)| { wait_sign .push(urlencoding::encode(&format!("{}={}", key, value)).into_owned()); wait_sign }) .join(&urlencoding::encode("&")), ); PKey::hmac(format!("{}&", &dayu.access_secret).as_bytes()) .and_then(|pkey| { Signer::new(MessageDigest::sha1(), &pkey).and_then(|mut signer| { signer .update(wait_sign.as_bytes()) .and_then(|_| signer.sign_to_vec()) }) }) .map_err(Into::into) .map(|ref signature| { forms.push(( "Signature", urlencoding::encode(&base64::encode(signature)).into_owned(), )) }) .and_then(|_| { Url::parse("https://dysmsapi.aliyuncs.com") .map_err(Into::into) .map(|mut url| { url.set_query(Some( &forms .into_iter() .map(|(key, value)| format!("{}={}", key, value)) .collect::<Vec<String>>() .join("&"), )); url }) }) }) } macro_rules! do_request { ($dayu:expr, $action:expr, $params:expr, $type:tt) => {{ let url = make_url($dayu, $action, $params)?; $dayu .client .get(url) .send() .and_then(|response| response.json::<DayuResponse>()) .await .map_err(Into::into) .and_then(|json_response| match json_response { DayuResponse::$type(v) => Ok(v), DayuResponse::Fail(fail) => Err(DayuError::Dayu(fail)), _ => unreachable!(), }) }}; } impl Dayu { /// construct new dayu sdk instance pub fn new() -> Self { Self::default() } /// set dayu sdk's access key pub fn set_access_key(mut self, access_key: impl Into<String>) -> Self { self.access_key = access_key.into(); self } /// set dayu sdk's access secret pub fn set_access_secret(mut self, access_secret: impl Into<String>) -> Self { self.access_secret = access_secret.into(); self } /// set dayu sdk's sign name pub fn set_sign_name(mut self, sign_name: impl Into<String>) -> Self { self.sign_name = sign_name.into(); self } /// start send sms /// phones: support multi phone number /// template_code: SMS TEMPLATE CODE /// template_param: SMS TEMPLATE PARAMS as JSON pub async fn sms_send<P: AsRef<str>, T: AsRef<str>>( &self, phones: &[P], template_code: T, template_param: Option<&Value>, ) -> Result<DayuSendResponse, DayuError> { let phone_numbers = phones .iter() .map(AsRef::as_ref) .collect::<Vec<&str>>() .join(","); let template_param = template_param .map(|v| serde_json::to_string(v).unwrap()) .unwrap_or_else(String::new); do_request!( self, "SendSms", &[ ("TemplateCode", template_code.as_ref()), ("PhoneNumbers", &phone_numbers), ("TemplateParam", &template_param), ], Send ) } /// query sms send detail pub async fn sms_query( &self, phone_number: &str, biz_id: Option<&str>, send_date: NaiveDate, current_page: u8, page_size: u8, ) -> Result<DayuQueryResponse, DayuError> { if page_size > MAX_PAGE_SIZE { return Err(DayuError::PageTooLarge(page_size)); } let send_date = send_date.format("%Y%m%d").to_string(); let page_size = page_size.to_string(); let current_page = current_page.to_string(); do_request!( self, "QuerySendDetails", &[ ("PhoneNumber", phone_number), ("BizId", biz_id.unwrap_or("")), ("SendDate", &send_date), ("PageSize", &page_size), ("CurrentPage", &current_page), ], Query ) } }
urn Err(DayuError::ConfigAbsence("access_secret")); } if day
conditional_block
main.rs
#![allow(dead_code)] #![allow(unused_variables)] use std::collections::{HashMap, HashSet}; use std::io::stdin; use std::mem; mod pm; // const MEANING_OF_LIFE: u16 = 456; // no fixed address fn main() { // primitive_types (); // operators(); // scope_and_shadowing(); // println!("const MEANING_OF_LIFE = {}", MEANING_OF_LIFE) // if_statement(); // while_and_loop(); // match_statecment(); // for_loop(); // combination_lock(); // structures(); // enums(); // unions(); // process_value(); // option_T() // array(); // slices(); // tuples(); // pm::pattern_matching(); // generics(); // vectors(); // hashmaps(); // hashsets(); // functions(); // methods(); closures(); // h_o_functions(); } fn h_o_functions() { } fn closures() { let sh = say_hello; sh(); let plus_one = |x:i32| -> i32 {x+1}; let a = 6; println!("{} +1 = {}", a, plus_one(a)); let plus_two = |x:isize| { let mut z = x; z+=2; z }; println!("{} +2 = {}", 3, plus_two(3)); } fn say_hello() {println!("Hello")} fn methods() { struct Point { x: f64, y: f64 } struct Line { start: Point, end: Point, } impl Line { fn len(&self) -> f64 { let dx = self.start.x - self.end.x; let dy = self.start.y - self.end.y; (dx*dx + dy*dy).sqrt() } } let p = Point {x: 3.0, y: 4.0}; let p2 = Point {x: 5.0, y: 10.0}; let myline = Line { start: p, end: p2}; println!("lengh = {}", myline.len()) } fn functions() { print_value(33); let mut z = 1; increase1(&mut z); println!("z is {}", z); let a = 3; let b = 5; let p = product(a, b); } fn product(x: i32, y: i32) -> i32 { // return x*y; x * y } fn increase1(x: &mut i32) { *x += 1; } fn print_value(x: i32) { println!("x is {}", x) } fn hashsets() { let mut greeks = HashSet::new(); greeks.insert("alfa"); greeks.insert("delta"); greeks.insert("hamma"); greeks.insert("delta"); println!("{:?}", greeks); let added_delta = greeks.insert("delta"); if added_delta { println!("We added delta! hooray!") } let added_vega = greeks.insert("vega"); if added_vega { println!("We added vega! hooray!") } if!greeks.contains("kappa") { println!("We don't have kappa") } } fn hashmaps() { let mut shapes = HashMap::new(); shapes.insert(String::from("triangle"), 3); shapes.insert(String::from("square"), 4); println!("hashmaps: {:?}", shapes); println!("a square has {} sides", shapes["square"]); shapes.insert("square".into(), 5); println!("{:?}", shapes); for (key, value) in &shapes { println!("key: {}, value: {}", key, value); } shapes.entry("circle".into()).or_insert(1); { let actual = shapes.entry("circle".into()).or_insert(2); *actual = 0; } println!("{:?}", shapes); let _1_5: HashSet<_> = (1..=5).collect(); let _6_10: HashSet<_> = (6..=10).collect(); let _1_10: HashSet<_> = (1..=10).collect(); let _2_8: HashSet<_> = (2..=8).collect(); //subset } fn vectors() { let mut a = Vec::new(); a.push(1); a.push(2); a.push(3); println!("a = {:?}", a); a.push(44); println!("a = {:?}", a); //usize isize let idx: usize = 2; println!("a[2] = {}", a[idx]); match a.get(6) { Some(x) => println!("a[6] = {}", x), None => println!("error, no such element") } for x in &a { println!("{}", x) } a.push(77); println!("{:?}", a); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); while let Some(x) = a.pop() { println!("{}", x) } } fn generics() { struct Point<T, V> { x: T, y: V, } struct Point1<T> { x: T, y: T, } struct Line<T> { start: Point1<T>, end: Point1<T>, } let a: Point<u16, i32> = Point { x: 0, y: 0 }; let b: Point<f64, f32> = Point { x: 1.2, y: 3.4 }; let c: Point<i32, f64> = Point { x: 3, y: 5.0 }; let d: Point<i32, f64> = Point { x: 1, y: 4.5 }; let x: Point1<f64> = Point1 { x: 1f64, y: 2f64 }; let y: Point1<f64> = Point1 { x: 3f64, y: 4f64 }; let myline = Line { start: x, end: y }; } fn tuples() { let x = 3; let y = 4; let sp = sum_and_product(x, y); let (sum, product) = sp; println!("sp = {:?}", (sum, product)); println!("{0} + {1} = {2}", x, y, sum); println!("{0} + {1} = {2}", x, y, product); let sp2 = sum_and_product(4, 7); let combined = (sp, sp2); println!("{1:?}, {2:?}, {0:?} ", combined.0, combined.1, combined); println!("{1:?}, {2:?}, {0:?} ", combined.0, (combined.0).0, (combined .0).1); let ((c, d), (e, f)) = combined; println!("{},{},{},{}", c, d, e, f); let foo = (true, 42.0, -1i8); println!("{:?}", foo); let meaning = 42; println!("{:?}", meaning); } fn sum_and_product(x: i32, y: i32) -> (i32, i32) { (x + y, x * y) } fn use_slices(slice: &mut [i32]) { println!("first elem = {}, len = {}", slice[0], slice.len()); slice[0] = 4444; } fn slices() { let mut data = [1, 2, 3, 4, 5]; use_slices(&mut data[1..4]); use_slices(&mut data); println!("{:?}", data) } fn array() { let mut a: [i32; 5] = [1, 2, 3, 4, 5, ]; println!("a has {} elements, first is {}", a.len(), a[0]); a[0] = 321; println!("a has {} elements, first is {}", a.len(), a[0]); println!("{:?}", a); if a == [321, 2, 3, 4, 5] { println!("match"); } let b = [1u64; 10]; for i in 0..b.len() { println!("{}", b[i]) }; println!("b took up {} bytes", mem::size_of_val(&b)); println!("b {:?}", b); let mtx: [[f64; 3]; 2] = [ [0.1, 0.2, 0.3], [0.4, 0.5, 0.6] ]; println!("mtx = {:?}", mtx); for i in 0..mtx.len() { for j in 0..mtx[i].len() { if i == j { print!("diagonal: {} ", mtx[i][j]); } } } println!(); } union IntOrFloat { i: i32, f: f32, } fn option_t() { let x = 3.0; let y = 1.0; //Option let result = if y!= 0.0 { Some(x / y) } else { None }; match result { Some(z) => { println!("{}/{} ={}", x, y, z) } None => println!("cannot divide by zero") } if let Some(z) = result { println!("result = {}", z) } } fn process_value(iof: IntOrFloat) { unsafe { match iof { IntOrFloat { i: 42 } => { println!("meaning of life value 42", ); } IntOrFloat { f } => { println!("value = {}", f) } } } } fn unions() { let mut iof = IntOrFloat { i: 123 }; iof.i = 234; let value = unsafe { iof.i }; println!("iof.i = {}", value); process_value(IntOrFloat { i: 5 }) } fn enums() { enum Color { Red, Green, Blue, RgbColor(u8, u8, u8), //tuple Cmyk { cyan: u8, magenta: u8, yellow: u8, black: u8 }, //struct } let c: Color = Color::Cmyk { cyan: 0, magenta: 128, yellow: 0, black: 0 }; match c { Color::Red => println!("r"), Color::Green => println!("g"), Color::Blue => println!("b"), Color::RgbColor(0, 0, 0) => println!("color: black"), Color::RgbColor(r, g, b) => println!("rgb({},{},{})", r, g, b), Color::Cmyk { cyan: _, magenta: _, yellow: _, black: 255 } => println!("black"), Color::Cmyk { cyan: a, magenta: b, yellow: c, black: d } => println!("cmyk({},{},{},{})", a, b, c, d), } } fn structures() { struct Point { x: f64, y: f64, } let p = Point { x: 34.5, y: 4.0 }; println!("point p is at ({}, {})", p.x, p.y); let p2 = Point { x: 3.0, y: 4.0 }; struct Line { start: Point, end: Point, } let myline = Line { start: p, end: p2 }; } enum State { Locked, Failed, Unlocked, } fn combination_lock() { let code = String::from("1234"); let mut state = State::Locked; let mut entry = String::new(); println!(" string = {}, code = {}", entry, code); loop { match state { State::Locked => { let mut input = String::new(); match stdin().read_line(&mut input) { Ok(_) => entry.push_str(&input.trim_end()), Err(_) => continue, } if entry == code { state = State::Unlocked; continue; } if!code.starts_with(&entry) { state = State::Failed } } State::Failed => { println!("Failed"); entry.clear(); state = State::Locked; continue; } State::Unlocked => { println!("Unlocked"); return; } } } } fn match_statement() { let country_code = 44; let country = match country_code { 44 => "UK", 46 => "Sweden", 7 => "Russia", 1..=999 => "unknown", _ => "invalid", }; println!("the country code {} is {}", country_code, country) } fn for_loop() { for x in 1..11 { if x == 3 { continue; } if x == 8 { break; } println!("x = {}", x) } for (pos, y) in (30..42).enumerate() { println!("{} : {}", pos, y) } } fn while_and_loop() { let mut x = 1; while x < 1000 { x *= 2; if x == 64 { continue; } println!("x = {}", x) } let mut y = 1; loop { y *= 2; println!("y = {}", y); if y == 1 << 10 { break; } } } fn if_statement() { let temp = 25; if temp > 30 { println!("really hot outside") } else if temp < 10 { println!("really cold!") } else { println!("temperature is OK") } let day = if temp > 20 { "sunny" } else { "cloudy" }; println!("today is {}", day); println!( "is it {}", if temp > 20 { "hot" } else if temp < 10 { "cold" } else { "OK" } ); println!( "it is {}", if temp > 20 { if temp > 30 { "very hot" } else { "hot" } } else if temp < 10 { "cold" } else { "OK" } ) } fn
() { let a = 123; println!("a = {}", a); let a = 777; println!("a = {}", a); { let a = 888; let b = 456; println!("a = {}, b = {}", a, b); } } fn operators() { //arithmetic operators let mut a = 2 + 3 * 4; println!("{}", a); a += 1; a -= 2; println!("remainder of {}/{} = {}", a, 3, (a % 3)); // let mut a_cubed = i16::pow(a, 3); // let mut a_cubed = i32::pow( 4); let b = 2.5; let b_cubed = f64::powi(b, 3); println!("b = {}", b); let b_to_pi = f64::powf(b, std::f64::consts::PI); println!("{} cubed = {}", b, b_cubed); println!("{} pied = {}", b, b_to_pi); //bitwise rotate let c = 1 | 2; println!("1 | 2 = {}", c); let two_to_10 = 1 << 10; println!("2^10 = {}", two_to_10); //logical let pi_less_4 = std::f64::consts::PI < 4.0; let x = 5; let x_is_5 = x == 5; } fn primitive_types() { let a: u8 = 123; let b: i8 = -123; // println!("a = {}, b ={}", a, b); // a = 432; // b = 567; // b = 122; let mut c = 123456789; // 32-bit signed integer println!("c = {}, size = {} bytes", c, mem::size_of_val(&c)); c = -1; println!("c = {} after modification", c); let z: isize = 123456789; let size_of_z = mem::size_of_val(&z); println!("z = {}, size = {}, {}-bit os", z, size_of_z, size_of_z * 8); let d: char = 'x'; println!("d = {}, size = {}", d, mem::size_of_val(&d)); let e = 2.5; // double precision value, 8 bytes or 64 bits, f 64 println!("e = {}, size = {}", e, mem::size_of_val(&e)); let g = false; println!("g = {}, size = {}", g, mem::size_of_val(&g)); }
scope_and_shadowing
identifier_name
main.rs
#![allow(dead_code)] #![allow(unused_variables)] use std::collections::{HashMap, HashSet}; use std::io::stdin; use std::mem; mod pm; // const MEANING_OF_LIFE: u16 = 456; // no fixed address fn main() { // primitive_types (); // operators(); // scope_and_shadowing(); // println!("const MEANING_OF_LIFE = {}", MEANING_OF_LIFE) // if_statement(); // while_and_loop(); // match_statecment(); // for_loop(); // combination_lock(); // structures(); // enums(); // unions(); // process_value(); // option_T() // array(); // slices(); // tuples(); // pm::pattern_matching(); // generics(); // vectors(); // hashmaps(); // hashsets(); // functions(); // methods(); closures(); // h_o_functions(); } fn h_o_functions() { } fn closures() { let sh = say_hello; sh(); let plus_one = |x:i32| -> i32 {x+1}; let a = 6; println!("{} +1 = {}", a, plus_one(a)); let plus_two = |x:isize| { let mut z = x; z+=2; z }; println!("{} +2 = {}", 3, plus_two(3)); } fn say_hello() {println!("Hello")} fn methods() { struct Point { x: f64, y: f64 } struct Line { start: Point, end: Point, } impl Line { fn len(&self) -> f64 { let dx = self.start.x - self.end.x; let dy = self.start.y - self.end.y; (dx*dx + dy*dy).sqrt() } } let p = Point {x: 3.0, y: 4.0}; let p2 = Point {x: 5.0, y: 10.0}; let myline = Line { start: p, end: p2}; println!("lengh = {}", myline.len()) } fn functions() { print_value(33); let mut z = 1; increase1(&mut z); println!("z is {}", z); let a = 3; let b = 5; let p = product(a, b); } fn product(x: i32, y: i32) -> i32 { // return x*y; x * y } fn increase1(x: &mut i32) { *x += 1; } fn print_value(x: i32) { println!("x is {}", x) } fn hashsets() { let mut greeks = HashSet::new(); greeks.insert("alfa"); greeks.insert("delta"); greeks.insert("hamma"); greeks.insert("delta"); println!("{:?}", greeks); let added_delta = greeks.insert("delta"); if added_delta { println!("We added delta! hooray!") } let added_vega = greeks.insert("vega"); if added_vega { println!("We added vega! hooray!") } if!greeks.contains("kappa") { println!("We don't have kappa") } } fn hashmaps() { let mut shapes = HashMap::new(); shapes.insert(String::from("triangle"), 3); shapes.insert(String::from("square"), 4); println!("hashmaps: {:?}", shapes); println!("a square has {} sides", shapes["square"]); shapes.insert("square".into(), 5); println!("{:?}", shapes); for (key, value) in &shapes { println!("key: {}, value: {}", key, value); } shapes.entry("circle".into()).or_insert(1); { let actual = shapes.entry("circle".into()).or_insert(2); *actual = 0; } println!("{:?}", shapes); let _1_5: HashSet<_> = (1..=5).collect(); let _6_10: HashSet<_> = (6..=10).collect(); let _1_10: HashSet<_> = (1..=10).collect(); let _2_8: HashSet<_> = (2..=8).collect(); //subset } fn vectors() { let mut a = Vec::new(); a.push(1); a.push(2); a.push(3); println!("a = {:?}", a); a.push(44); println!("a = {:?}", a); //usize isize let idx: usize = 2; println!("a[2] = {}", a[idx]); match a.get(6) { Some(x) => println!("a[6] = {}", x), None => println!("error, no such element") } for x in &a { println!("{}", x) } a.push(77); println!("{:?}", a); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); while let Some(x) = a.pop() { println!("{}", x) } } fn generics() { struct Point<T, V> { x: T, y: V, } struct Point1<T> { x: T, y: T, } struct Line<T> { start: Point1<T>, end: Point1<T>, } let a: Point<u16, i32> = Point { x: 0, y: 0 }; let b: Point<f64, f32> = Point { x: 1.2, y: 3.4 }; let c: Point<i32, f64> = Point { x: 3, y: 5.0 }; let d: Point<i32, f64> = Point { x: 1, y: 4.5 }; let x: Point1<f64> = Point1 { x: 1f64, y: 2f64 }; let y: Point1<f64> = Point1 { x: 3f64, y: 4f64 }; let myline = Line { start: x, end: y }; } fn tuples() { let x = 3; let y = 4; let sp = sum_and_product(x, y); let (sum, product) = sp; println!("sp = {:?}", (sum, product)); println!("{0} + {1} = {2}", x, y, sum); println!("{0} + {1} = {2}", x, y, product); let sp2 = sum_and_product(4, 7); let combined = (sp, sp2); println!("{1:?}, {2:?}, {0:?} ", combined.0, combined.1, combined); println!("{1:?}, {2:?}, {0:?} ", combined.0, (combined.0).0, (combined .0).1); let ((c, d), (e, f)) = combined; println!("{},{},{},{}", c, d, e, f); let foo = (true, 42.0, -1i8); println!("{:?}", foo); let meaning = 42; println!("{:?}", meaning); } fn sum_and_product(x: i32, y: i32) -> (i32, i32) { (x + y, x * y) } fn use_slices(slice: &mut [i32]) { println!("first elem = {}, len = {}", slice[0], slice.len()); slice[0] = 4444; } fn slices() { let mut data = [1, 2, 3, 4, 5]; use_slices(&mut data[1..4]); use_slices(&mut data); println!("{:?}", data) } fn array() { let mut a: [i32; 5] = [1, 2, 3, 4, 5, ]; println!("a has {} elements, first is {}", a.len(), a[0]); a[0] = 321; println!("a has {} elements, first is {}", a.len(), a[0]); println!("{:?}", a); if a == [321, 2, 3, 4, 5] { println!("match"); } let b = [1u64; 10]; for i in 0..b.len() { println!("{}", b[i]) }; println!("b took up {} bytes", mem::size_of_val(&b)); println!("b {:?}", b); let mtx: [[f64; 3]; 2] = [ [0.1, 0.2, 0.3], [0.4, 0.5, 0.6] ]; println!("mtx = {:?}", mtx); for i in 0..mtx.len() { for j in 0..mtx[i].len() { if i == j { print!("diagonal: {} ", mtx[i][j]); } } } println!(); } union IntOrFloat { i: i32, f: f32, } fn option_t() { let x = 3.0; let y = 1.0; //Option let result = if y!= 0.0 { Some(x / y) } else { None }; match result { Some(z) => { println!("{}/{} ={}", x, y, z) } None => println!("cannot divide by zero") } if let Some(z) = result { println!("result = {}", z) } } fn process_value(iof: IntOrFloat) { unsafe { match iof { IntOrFloat { i: 42 } => { println!("meaning of life value 42", ); } IntOrFloat { f } => { println!("value = {}", f) } } } } fn unions() { let mut iof = IntOrFloat { i: 123 }; iof.i = 234; let value = unsafe { iof.i }; println!("iof.i = {}", value); process_value(IntOrFloat { i: 5 }) } fn enums()
Color::Cmyk { cyan: a, magenta: b, yellow: c, black: d } => println!("cmyk({},{},{},{})", a, b, c, d), } } fn structures() { struct Point { x: f64, y: f64, } let p = Point { x: 34.5, y: 4.0 }; println!("point p is at ({}, {})", p.x, p.y); let p2 = Point { x: 3.0, y: 4.0 }; struct Line { start: Point, end: Point, } let myline = Line { start: p, end: p2 }; } enum State { Locked, Failed, Unlocked, } fn combination_lock() { let code = String::from("1234"); let mut state = State::Locked; let mut entry = String::new(); println!(" string = {}, code = {}", entry, code); loop { match state { State::Locked => { let mut input = String::new(); match stdin().read_line(&mut input) { Ok(_) => entry.push_str(&input.trim_end()), Err(_) => continue, } if entry == code { state = State::Unlocked; continue; } if!code.starts_with(&entry) { state = State::Failed } } State::Failed => { println!("Failed"); entry.clear(); state = State::Locked; continue; } State::Unlocked => { println!("Unlocked"); return; } } } } fn match_statement() { let country_code = 44; let country = match country_code { 44 => "UK", 46 => "Sweden", 7 => "Russia", 1..=999 => "unknown", _ => "invalid", }; println!("the country code {} is {}", country_code, country) } fn for_loop() { for x in 1..11 { if x == 3 { continue; } if x == 8 { break; } println!("x = {}", x) } for (pos, y) in (30..42).enumerate() { println!("{} : {}", pos, y) } } fn while_and_loop() { let mut x = 1; while x < 1000 { x *= 2; if x == 64 { continue; } println!("x = {}", x) } let mut y = 1; loop { y *= 2; println!("y = {}", y); if y == 1 << 10 { break; } } } fn if_statement() { let temp = 25; if temp > 30 { println!("really hot outside") } else if temp < 10 { println!("really cold!") } else { println!("temperature is OK") } let day = if temp > 20 { "sunny" } else { "cloudy" }; println!("today is {}", day); println!( "is it {}", if temp > 20 { "hot" } else if temp < 10 { "cold" } else { "OK" } ); println!( "it is {}", if temp > 20 { if temp > 30 { "very hot" } else { "hot" } } else if temp < 10 { "cold" } else { "OK" } ) } fn scope_and_shadowing() { let a = 123; println!("a = {}", a); let a = 777; println!("a = {}", a); { let a = 888; let b = 456; println!("a = {}, b = {}", a, b); } } fn operators() { //arithmetic operators let mut a = 2 + 3 * 4; println!("{}", a); a += 1; a -= 2; println!("remainder of {}/{} = {}", a, 3, (a % 3)); // let mut a_cubed = i16::pow(a, 3); // let mut a_cubed = i32::pow( 4); let b = 2.5; let b_cubed = f64::powi(b, 3); println!("b = {}", b); let b_to_pi = f64::powf(b, std::f64::consts::PI); println!("{} cubed = {}", b, b_cubed); println!("{} pied = {}", b, b_to_pi); //bitwise rotate let c = 1 | 2; println!("1 | 2 = {}", c); let two_to_10 = 1 << 10; println!("2^10 = {}", two_to_10); //logical let pi_less_4 = std::f64::consts::PI < 4.0; let x = 5; let x_is_5 = x == 5; } fn primitive_types() { let a: u8 = 123; let b: i8 = -123; // println!("a = {}, b ={}", a, b); // a = 432; // b = 567; // b = 122; let mut c = 123456789; // 32-bit signed integer println!("c = {}, size = {} bytes", c, mem::size_of_val(&c)); c = -1; println!("c = {} after modification", c); let z: isize = 123456789; let size_of_z = mem::size_of_val(&z); println!("z = {}, size = {}, {}-bit os", z, size_of_z, size_of_z * 8); let d: char = 'x'; println!("d = {}, size = {}", d, mem::size_of_val(&d)); let e = 2.5; // double precision value, 8 bytes or 64 bits, f 64 println!("e = {}, size = {}", e, mem::size_of_val(&e)); let g = false; println!("g = {}, size = {}", g, mem::size_of_val(&g)); }
{ enum Color { Red, Green, Blue, RgbColor(u8, u8, u8), //tuple Cmyk { cyan: u8, magenta: u8, yellow: u8, black: u8 }, //struct } let c: Color = Color::Cmyk { cyan: 0, magenta: 128, yellow: 0, black: 0 }; match c { Color::Red => println!("r"), Color::Green => println!("g"), Color::Blue => println!("b"), Color::RgbColor(0, 0, 0) => println!("color: black"), Color::RgbColor(r, g, b) => println!("rgb({},{},{})", r, g, b), Color::Cmyk { cyan: _, magenta: _, yellow: _, black: 255 } => println!("black"),
identifier_body
main.rs
#![allow(dead_code)] #![allow(unused_variables)] use std::collections::{HashMap, HashSet}; use std::io::stdin; use std::mem; mod pm; // const MEANING_OF_LIFE: u16 = 456; // no fixed address fn main() { // primitive_types (); // operators(); // scope_and_shadowing(); // println!("const MEANING_OF_LIFE = {}", MEANING_OF_LIFE) // if_statement(); // while_and_loop(); // match_statecment(); // for_loop(); // combination_lock(); // structures(); // enums(); // unions(); // process_value(); // option_T() // array(); // slices(); // tuples(); // pm::pattern_matching(); // generics(); // vectors(); // hashmaps(); // hashsets(); // functions(); // methods(); closures(); // h_o_functions(); } fn h_o_functions() { } fn closures() { let sh = say_hello; sh(); let plus_one = |x:i32| -> i32 {x+1}; let a = 6; println!("{} +1 = {}", a, plus_one(a)); let plus_two = |x:isize| { let mut z = x; z+=2; z }; println!("{} +2 = {}", 3, plus_two(3)); } fn say_hello() {println!("Hello")} fn methods() { struct Point { x: f64, y: f64 } struct Line { start: Point, end: Point, } impl Line { fn len(&self) -> f64 { let dx = self.start.x - self.end.x; let dy = self.start.y - self.end.y; (dx*dx + dy*dy).sqrt() } } let p = Point {x: 3.0, y: 4.0}; let p2 = Point {x: 5.0, y: 10.0}; let myline = Line { start: p, end: p2}; println!("lengh = {}", myline.len()) } fn functions() { print_value(33); let mut z = 1; increase1(&mut z); println!("z is {}", z); let a = 3; let b = 5; let p = product(a, b); } fn product(x: i32, y: i32) -> i32 { // return x*y; x * y } fn increase1(x: &mut i32) { *x += 1; } fn print_value(x: i32) { println!("x is {}", x) } fn hashsets() { let mut greeks = HashSet::new(); greeks.insert("alfa"); greeks.insert("delta"); greeks.insert("hamma"); greeks.insert("delta"); println!("{:?}", greeks); let added_delta = greeks.insert("delta"); if added_delta { println!("We added delta! hooray!") } let added_vega = greeks.insert("vega"); if added_vega { println!("We added vega! hooray!") } if!greeks.contains("kappa") { println!("We don't have kappa") } } fn hashmaps() { let mut shapes = HashMap::new(); shapes.insert(String::from("triangle"), 3); shapes.insert(String::from("square"), 4); println!("hashmaps: {:?}", shapes);
for (key, value) in &shapes { println!("key: {}, value: {}", key, value); } shapes.entry("circle".into()).or_insert(1); { let actual = shapes.entry("circle".into()).or_insert(2); *actual = 0; } println!("{:?}", shapes); let _1_5: HashSet<_> = (1..=5).collect(); let _6_10: HashSet<_> = (6..=10).collect(); let _1_10: HashSet<_> = (1..=10).collect(); let _2_8: HashSet<_> = (2..=8).collect(); //subset } fn vectors() { let mut a = Vec::new(); a.push(1); a.push(2); a.push(3); println!("a = {:?}", a); a.push(44); println!("a = {:?}", a); //usize isize let idx: usize = 2; println!("a[2] = {}", a[idx]); match a.get(6) { Some(x) => println!("a[6] = {}", x), None => println!("error, no such element") } for x in &a { println!("{}", x) } a.push(77); println!("{:?}", a); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); while let Some(x) = a.pop() { println!("{}", x) } } fn generics() { struct Point<T, V> { x: T, y: V, } struct Point1<T> { x: T, y: T, } struct Line<T> { start: Point1<T>, end: Point1<T>, } let a: Point<u16, i32> = Point { x: 0, y: 0 }; let b: Point<f64, f32> = Point { x: 1.2, y: 3.4 }; let c: Point<i32, f64> = Point { x: 3, y: 5.0 }; let d: Point<i32, f64> = Point { x: 1, y: 4.5 }; let x: Point1<f64> = Point1 { x: 1f64, y: 2f64 }; let y: Point1<f64> = Point1 { x: 3f64, y: 4f64 }; let myline = Line { start: x, end: y }; } fn tuples() { let x = 3; let y = 4; let sp = sum_and_product(x, y); let (sum, product) = sp; println!("sp = {:?}", (sum, product)); println!("{0} + {1} = {2}", x, y, sum); println!("{0} + {1} = {2}", x, y, product); let sp2 = sum_and_product(4, 7); let combined = (sp, sp2); println!("{1:?}, {2:?}, {0:?} ", combined.0, combined.1, combined); println!("{1:?}, {2:?}, {0:?} ", combined.0, (combined.0).0, (combined .0).1); let ((c, d), (e, f)) = combined; println!("{},{},{},{}", c, d, e, f); let foo = (true, 42.0, -1i8); println!("{:?}", foo); let meaning = 42; println!("{:?}", meaning); } fn sum_and_product(x: i32, y: i32) -> (i32, i32) { (x + y, x * y) } fn use_slices(slice: &mut [i32]) { println!("first elem = {}, len = {}", slice[0], slice.len()); slice[0] = 4444; } fn slices() { let mut data = [1, 2, 3, 4, 5]; use_slices(&mut data[1..4]); use_slices(&mut data); println!("{:?}", data) } fn array() { let mut a: [i32; 5] = [1, 2, 3, 4, 5, ]; println!("a has {} elements, first is {}", a.len(), a[0]); a[0] = 321; println!("a has {} elements, first is {}", a.len(), a[0]); println!("{:?}", a); if a == [321, 2, 3, 4, 5] { println!("match"); } let b = [1u64; 10]; for i in 0..b.len() { println!("{}", b[i]) }; println!("b took up {} bytes", mem::size_of_val(&b)); println!("b {:?}", b); let mtx: [[f64; 3]; 2] = [ [0.1, 0.2, 0.3], [0.4, 0.5, 0.6] ]; println!("mtx = {:?}", mtx); for i in 0..mtx.len() { for j in 0..mtx[i].len() { if i == j { print!("diagonal: {} ", mtx[i][j]); } } } println!(); } union IntOrFloat { i: i32, f: f32, } fn option_t() { let x = 3.0; let y = 1.0; //Option let result = if y!= 0.0 { Some(x / y) } else { None }; match result { Some(z) => { println!("{}/{} ={}", x, y, z) } None => println!("cannot divide by zero") } if let Some(z) = result { println!("result = {}", z) } } fn process_value(iof: IntOrFloat) { unsafe { match iof { IntOrFloat { i: 42 } => { println!("meaning of life value 42", ); } IntOrFloat { f } => { println!("value = {}", f) } } } } fn unions() { let mut iof = IntOrFloat { i: 123 }; iof.i = 234; let value = unsafe { iof.i }; println!("iof.i = {}", value); process_value(IntOrFloat { i: 5 }) } fn enums() { enum Color { Red, Green, Blue, RgbColor(u8, u8, u8), //tuple Cmyk { cyan: u8, magenta: u8, yellow: u8, black: u8 }, //struct } let c: Color = Color::Cmyk { cyan: 0, magenta: 128, yellow: 0, black: 0 }; match c { Color::Red => println!("r"), Color::Green => println!("g"), Color::Blue => println!("b"), Color::RgbColor(0, 0, 0) => println!("color: black"), Color::RgbColor(r, g, b) => println!("rgb({},{},{})", r, g, b), Color::Cmyk { cyan: _, magenta: _, yellow: _, black: 255 } => println!("black"), Color::Cmyk { cyan: a, magenta: b, yellow: c, black: d } => println!("cmyk({},{},{},{})", a, b, c, d), } } fn structures() { struct Point { x: f64, y: f64, } let p = Point { x: 34.5, y: 4.0 }; println!("point p is at ({}, {})", p.x, p.y); let p2 = Point { x: 3.0, y: 4.0 }; struct Line { start: Point, end: Point, } let myline = Line { start: p, end: p2 }; } enum State { Locked, Failed, Unlocked, } fn combination_lock() { let code = String::from("1234"); let mut state = State::Locked; let mut entry = String::new(); println!(" string = {}, code = {}", entry, code); loop { match state { State::Locked => { let mut input = String::new(); match stdin().read_line(&mut input) { Ok(_) => entry.push_str(&input.trim_end()), Err(_) => continue, } if entry == code { state = State::Unlocked; continue; } if!code.starts_with(&entry) { state = State::Failed } } State::Failed => { println!("Failed"); entry.clear(); state = State::Locked; continue; } State::Unlocked => { println!("Unlocked"); return; } } } } fn match_statement() { let country_code = 44; let country = match country_code { 44 => "UK", 46 => "Sweden", 7 => "Russia", 1..=999 => "unknown", _ => "invalid", }; println!("the country code {} is {}", country_code, country) } fn for_loop() { for x in 1..11 { if x == 3 { continue; } if x == 8 { break; } println!("x = {}", x) } for (pos, y) in (30..42).enumerate() { println!("{} : {}", pos, y) } } fn while_and_loop() { let mut x = 1; while x < 1000 { x *= 2; if x == 64 { continue; } println!("x = {}", x) } let mut y = 1; loop { y *= 2; println!("y = {}", y); if y == 1 << 10 { break; } } } fn if_statement() { let temp = 25; if temp > 30 { println!("really hot outside") } else if temp < 10 { println!("really cold!") } else { println!("temperature is OK") } let day = if temp > 20 { "sunny" } else { "cloudy" }; println!("today is {}", day); println!( "is it {}", if temp > 20 { "hot" } else if temp < 10 { "cold" } else { "OK" } ); println!( "it is {}", if temp > 20 { if temp > 30 { "very hot" } else { "hot" } } else if temp < 10 { "cold" } else { "OK" } ) } fn scope_and_shadowing() { let a = 123; println!("a = {}", a); let a = 777; println!("a = {}", a); { let a = 888; let b = 456; println!("a = {}, b = {}", a, b); } } fn operators() { //arithmetic operators let mut a = 2 + 3 * 4; println!("{}", a); a += 1; a -= 2; println!("remainder of {}/{} = {}", a, 3, (a % 3)); // let mut a_cubed = i16::pow(a, 3); // let mut a_cubed = i32::pow( 4); let b = 2.5; let b_cubed = f64::powi(b, 3); println!("b = {}", b); let b_to_pi = f64::powf(b, std::f64::consts::PI); println!("{} cubed = {}", b, b_cubed); println!("{} pied = {}", b, b_to_pi); //bitwise rotate let c = 1 | 2; println!("1 | 2 = {}", c); let two_to_10 = 1 << 10; println!("2^10 = {}", two_to_10); //logical let pi_less_4 = std::f64::consts::PI < 4.0; let x = 5; let x_is_5 = x == 5; } fn primitive_types() { let a: u8 = 123; let b: i8 = -123; // println!("a = {}, b ={}", a, b); // a = 432; // b = 567; // b = 122; let mut c = 123456789; // 32-bit signed integer println!("c = {}, size = {} bytes", c, mem::size_of_val(&c)); c = -1; println!("c = {} after modification", c); let z: isize = 123456789; let size_of_z = mem::size_of_val(&z); println!("z = {}, size = {}, {}-bit os", z, size_of_z, size_of_z * 8); let d: char = 'x'; println!("d = {}, size = {}", d, mem::size_of_val(&d)); let e = 2.5; // double precision value, 8 bytes or 64 bits, f 64 println!("e = {}, size = {}", e, mem::size_of_val(&e)); let g = false; println!("g = {}, size = {}", g, mem::size_of_val(&g)); }
println!("a square has {} sides", shapes["square"]); shapes.insert("square".into(), 5); println!("{:?}", shapes);
random_line_split
main.rs
#![allow(dead_code)] #![allow(unused_variables)] use std::collections::{HashMap, HashSet}; use std::io::stdin; use std::mem; mod pm; // const MEANING_OF_LIFE: u16 = 456; // no fixed address fn main() { // primitive_types (); // operators(); // scope_and_shadowing(); // println!("const MEANING_OF_LIFE = {}", MEANING_OF_LIFE) // if_statement(); // while_and_loop(); // match_statecment(); // for_loop(); // combination_lock(); // structures(); // enums(); // unions(); // process_value(); // option_T() // array(); // slices(); // tuples(); // pm::pattern_matching(); // generics(); // vectors(); // hashmaps(); // hashsets(); // functions(); // methods(); closures(); // h_o_functions(); } fn h_o_functions() { } fn closures() { let sh = say_hello; sh(); let plus_one = |x:i32| -> i32 {x+1}; let a = 6; println!("{} +1 = {}", a, plus_one(a)); let plus_two = |x:isize| { let mut z = x; z+=2; z }; println!("{} +2 = {}", 3, plus_two(3)); } fn say_hello() {println!("Hello")} fn methods() { struct Point { x: f64, y: f64 } struct Line { start: Point, end: Point, } impl Line { fn len(&self) -> f64 { let dx = self.start.x - self.end.x; let dy = self.start.y - self.end.y; (dx*dx + dy*dy).sqrt() } } let p = Point {x: 3.0, y: 4.0}; let p2 = Point {x: 5.0, y: 10.0}; let myline = Line { start: p, end: p2}; println!("lengh = {}", myline.len()) } fn functions() { print_value(33); let mut z = 1; increase1(&mut z); println!("z is {}", z); let a = 3; let b = 5; let p = product(a, b); } fn product(x: i32, y: i32) -> i32 { // return x*y; x * y } fn increase1(x: &mut i32) { *x += 1; } fn print_value(x: i32) { println!("x is {}", x) } fn hashsets() { let mut greeks = HashSet::new(); greeks.insert("alfa"); greeks.insert("delta"); greeks.insert("hamma"); greeks.insert("delta"); println!("{:?}", greeks); let added_delta = greeks.insert("delta"); if added_delta { println!("We added delta! hooray!") } let added_vega = greeks.insert("vega"); if added_vega { println!("We added vega! hooray!") } if!greeks.contains("kappa") { println!("We don't have kappa") } } fn hashmaps() { let mut shapes = HashMap::new(); shapes.insert(String::from("triangle"), 3); shapes.insert(String::from("square"), 4); println!("hashmaps: {:?}", shapes); println!("a square has {} sides", shapes["square"]); shapes.insert("square".into(), 5); println!("{:?}", shapes); for (key, value) in &shapes { println!("key: {}, value: {}", key, value); } shapes.entry("circle".into()).or_insert(1); { let actual = shapes.entry("circle".into()).or_insert(2); *actual = 0; } println!("{:?}", shapes); let _1_5: HashSet<_> = (1..=5).collect(); let _6_10: HashSet<_> = (6..=10).collect(); let _1_10: HashSet<_> = (1..=10).collect(); let _2_8: HashSet<_> = (2..=8).collect(); //subset } fn vectors() { let mut a = Vec::new(); a.push(1); a.push(2); a.push(3); println!("a = {:?}", a); a.push(44); println!("a = {:?}", a); //usize isize let idx: usize = 2; println!("a[2] = {}", a[idx]); match a.get(6) { Some(x) => println!("a[6] = {}", x), None => println!("error, no such element") } for x in &a { println!("{}", x) } a.push(77); println!("{:?}", a); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); let last_elem = a.pop(); println!("{:?}", last_elem); while let Some(x) = a.pop() { println!("{}", x) } } fn generics() { struct Point<T, V> { x: T, y: V, } struct Point1<T> { x: T, y: T, } struct Line<T> { start: Point1<T>, end: Point1<T>, } let a: Point<u16, i32> = Point { x: 0, y: 0 }; let b: Point<f64, f32> = Point { x: 1.2, y: 3.4 }; let c: Point<i32, f64> = Point { x: 3, y: 5.0 }; let d: Point<i32, f64> = Point { x: 1, y: 4.5 }; let x: Point1<f64> = Point1 { x: 1f64, y: 2f64 }; let y: Point1<f64> = Point1 { x: 3f64, y: 4f64 }; let myline = Line { start: x, end: y }; } fn tuples() { let x = 3; let y = 4; let sp = sum_and_product(x, y); let (sum, product) = sp; println!("sp = {:?}", (sum, product)); println!("{0} + {1} = {2}", x, y, sum); println!("{0} + {1} = {2}", x, y, product); let sp2 = sum_and_product(4, 7); let combined = (sp, sp2); println!("{1:?}, {2:?}, {0:?} ", combined.0, combined.1, combined); println!("{1:?}, {2:?}, {0:?} ", combined.0, (combined.0).0, (combined .0).1); let ((c, d), (e, f)) = combined; println!("{},{},{},{}", c, d, e, f); let foo = (true, 42.0, -1i8); println!("{:?}", foo); let meaning = 42; println!("{:?}", meaning); } fn sum_and_product(x: i32, y: i32) -> (i32, i32) { (x + y, x * y) } fn use_slices(slice: &mut [i32]) { println!("first elem = {}, len = {}", slice[0], slice.len()); slice[0] = 4444; } fn slices() { let mut data = [1, 2, 3, 4, 5]; use_slices(&mut data[1..4]); use_slices(&mut data); println!("{:?}", data) } fn array() { let mut a: [i32; 5] = [1, 2, 3, 4, 5, ]; println!("a has {} elements, first is {}", a.len(), a[0]); a[0] = 321; println!("a has {} elements, first is {}", a.len(), a[0]); println!("{:?}", a); if a == [321, 2, 3, 4, 5] { println!("match"); } let b = [1u64; 10]; for i in 0..b.len() { println!("{}", b[i]) }; println!("b took up {} bytes", mem::size_of_val(&b)); println!("b {:?}", b); let mtx: [[f64; 3]; 2] = [ [0.1, 0.2, 0.3], [0.4, 0.5, 0.6] ]; println!("mtx = {:?}", mtx); for i in 0..mtx.len() { for j in 0..mtx[i].len() { if i == j { print!("diagonal: {} ", mtx[i][j]); } } } println!(); } union IntOrFloat { i: i32, f: f32, } fn option_t() { let x = 3.0; let y = 1.0; //Option let result = if y!= 0.0 { Some(x / y) } else { None }; match result { Some(z) => { println!("{}/{} ={}", x, y, z) } None => println!("cannot divide by zero") } if let Some(z) = result { println!("result = {}", z) } } fn process_value(iof: IntOrFloat) { unsafe { match iof { IntOrFloat { i: 42 } => { println!("meaning of life value 42", ); } IntOrFloat { f } =>
} } } fn unions() { let mut iof = IntOrFloat { i: 123 }; iof.i = 234; let value = unsafe { iof.i }; println!("iof.i = {}", value); process_value(IntOrFloat { i: 5 }) } fn enums() { enum Color { Red, Green, Blue, RgbColor(u8, u8, u8), //tuple Cmyk { cyan: u8, magenta: u8, yellow: u8, black: u8 }, //struct } let c: Color = Color::Cmyk { cyan: 0, magenta: 128, yellow: 0, black: 0 }; match c { Color::Red => println!("r"), Color::Green => println!("g"), Color::Blue => println!("b"), Color::RgbColor(0, 0, 0) => println!("color: black"), Color::RgbColor(r, g, b) => println!("rgb({},{},{})", r, g, b), Color::Cmyk { cyan: _, magenta: _, yellow: _, black: 255 } => println!("black"), Color::Cmyk { cyan: a, magenta: b, yellow: c, black: d } => println!("cmyk({},{},{},{})", a, b, c, d), } } fn structures() { struct Point { x: f64, y: f64, } let p = Point { x: 34.5, y: 4.0 }; println!("point p is at ({}, {})", p.x, p.y); let p2 = Point { x: 3.0, y: 4.0 }; struct Line { start: Point, end: Point, } let myline = Line { start: p, end: p2 }; } enum State { Locked, Failed, Unlocked, } fn combination_lock() { let code = String::from("1234"); let mut state = State::Locked; let mut entry = String::new(); println!(" string = {}, code = {}", entry, code); loop { match state { State::Locked => { let mut input = String::new(); match stdin().read_line(&mut input) { Ok(_) => entry.push_str(&input.trim_end()), Err(_) => continue, } if entry == code { state = State::Unlocked; continue; } if!code.starts_with(&entry) { state = State::Failed } } State::Failed => { println!("Failed"); entry.clear(); state = State::Locked; continue; } State::Unlocked => { println!("Unlocked"); return; } } } } fn match_statement() { let country_code = 44; let country = match country_code { 44 => "UK", 46 => "Sweden", 7 => "Russia", 1..=999 => "unknown", _ => "invalid", }; println!("the country code {} is {}", country_code, country) } fn for_loop() { for x in 1..11 { if x == 3 { continue; } if x == 8 { break; } println!("x = {}", x) } for (pos, y) in (30..42).enumerate() { println!("{} : {}", pos, y) } } fn while_and_loop() { let mut x = 1; while x < 1000 { x *= 2; if x == 64 { continue; } println!("x = {}", x) } let mut y = 1; loop { y *= 2; println!("y = {}", y); if y == 1 << 10 { break; } } } fn if_statement() { let temp = 25; if temp > 30 { println!("really hot outside") } else if temp < 10 { println!("really cold!") } else { println!("temperature is OK") } let day = if temp > 20 { "sunny" } else { "cloudy" }; println!("today is {}", day); println!( "is it {}", if temp > 20 { "hot" } else if temp < 10 { "cold" } else { "OK" } ); println!( "it is {}", if temp > 20 { if temp > 30 { "very hot" } else { "hot" } } else if temp < 10 { "cold" } else { "OK" } ) } fn scope_and_shadowing() { let a = 123; println!("a = {}", a); let a = 777; println!("a = {}", a); { let a = 888; let b = 456; println!("a = {}, b = {}", a, b); } } fn operators() { //arithmetic operators let mut a = 2 + 3 * 4; println!("{}", a); a += 1; a -= 2; println!("remainder of {}/{} = {}", a, 3, (a % 3)); // let mut a_cubed = i16::pow(a, 3); // let mut a_cubed = i32::pow( 4); let b = 2.5; let b_cubed = f64::powi(b, 3); println!("b = {}", b); let b_to_pi = f64::powf(b, std::f64::consts::PI); println!("{} cubed = {}", b, b_cubed); println!("{} pied = {}", b, b_to_pi); //bitwise rotate let c = 1 | 2; println!("1 | 2 = {}", c); let two_to_10 = 1 << 10; println!("2^10 = {}", two_to_10); //logical let pi_less_4 = std::f64::consts::PI < 4.0; let x = 5; let x_is_5 = x == 5; } fn primitive_types() { let a: u8 = 123; let b: i8 = -123; // println!("a = {}, b ={}", a, b); // a = 432; // b = 567; // b = 122; let mut c = 123456789; // 32-bit signed integer println!("c = {}, size = {} bytes", c, mem::size_of_val(&c)); c = -1; println!("c = {} after modification", c); let z: isize = 123456789; let size_of_z = mem::size_of_val(&z); println!("z = {}, size = {}, {}-bit os", z, size_of_z, size_of_z * 8); let d: char = 'x'; println!("d = {}, size = {}", d, mem::size_of_val(&d)); let e = 2.5; // double precision value, 8 bytes or 64 bits, f 64 println!("e = {}, size = {}", e, mem::size_of_val(&e)); let g = false; println!("g = {}, size = {}", g, mem::size_of_val(&g)); }
{ println!("value = {}", f) }
conditional_block
main.rs
use std::{env, io, fmt}; use std::time::{Duration, SystemTime}; use std::error::Error; use std::collections::HashMap; use tokio::sync; use tokio::net::UdpSocket; use log::{debug, info, warn}; use futures::select; use futures::future::FutureExt; // Delta between NTP epoch (1900-01-01 00:00:00) and Unix epoch (1970-01-01 00:00:00). // Contains 53 non-leap years, and 17 leap years, in seconds, this is: // (53 * 365 + 17 * 366) * 86400 = 2208988800. const EPOCH_DELTA: u64 = 2_208_988_800; // Tag name to use for messages without an explicit tag (i.e. currently those sent via // `/send_after`). const DEFAULT_TAG: &str = "default"; // Convert an OSC timetag into unix timestamp seconds and microseconds. // // [OSC timetags](http://opensoundcontrol.org/spec-1_0) use NTP timestamps // (https://en.wikipedia.org/wiki/Network_Time_Protocol#Timestamps). // // TODO: verify time conversions are actually correct, check against other implementations fn timetag_to_unix(ntp_secs: u32, ntp_frac_secs: u32) -> (u64, u32) { let unix_secs = ntp_secs as u64 - EPOCH_DELTA; let unix_micros = ((ntp_frac_secs as u64) * 1_000_000) >> 32; (unix_secs, unix_micros as u32) } // TODO: verify time conversions are actually correct, check roundtrips fn timetag_to_duration(ntp_secs: u32, ntp_frac_secs: u32) -> Duration { let (unix_secs, unix_micros) = timetag_to_unix(ntp_secs, ntp_frac_secs); // duration of time tag since epoch let tt_since_epoch = Duration::new(unix_secs, unix_micros * 1000); // duration of current system time since epoch let now_since_epoch = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .expect("System is set to before Unix epoch, check clock"); tt_since_epoch - now_since_epoch } /* fn unix_to_timetag(unix_secs: u64, unix_micros: u32) -> (u32, u32) { let ntp_secs = unix_secs + EPOCH_DELTA; let ntp_frac_secs = ((unix_micros as u64 + 1) << 32) / 1_000_000; (ntp_secs as u32, ntp_frac_secs as u32) } */ struct Server { /// Server's listening UDP socket. socket: UdpSocket, /// Internal buffer used for reading/writing UDP packets into. buf: Vec<u8>, /// Maps a tag name to sender/receiver pair. Used for signalling cancellations. tags: HashMap<String, (sync::watch::Sender<bool>, sync::watch::Receiver<bool>)>, } impl Server { pub async fn new(bind_addr: &str) -> Result<Self, io::Error> { debug!("Attempting to bind to: {}", bind_addr); let socket = UdpSocket::bind(bind_addr).await?; info!("Listening on: {}", socket.local_addr()?); Ok(Self { socket, buf: vec![0; 1024], tags: HashMap::new(), }) } /// Main event loop, runs forever after server is started. async fn run(&mut self) -> Result<(), io::Error> { debug!("Starting main event loop"); loop { if let Err(err) = self.next_event().await { warn!("{}", err); } } } /// Called from main server event loop (`run()`) on each iteration. /// /// Waits for incoming UDP packets containing OSC packets, either handling them immediately (in /// the case of e.g. `/flush` messages), or spawning futures to handle them in the future (in /// the case of e.g. `/send_after` bundles). async fn next_event(&mut self) -> Result<(), ServerError> { debug!("Waiting for UDP packet..."); let raw_packet = self.recv_udp_packet().await?; debug!("Received UDP packet (size={})", raw_packet.len()); debug!("Parsing OSC packet..."); let osc_packet = rosc::decoder::decode(raw_packet)?; debug!("Received OSC packet: {:?}", osc_packet); match osc_packet { rosc::OscPacket::Message(msg) => { match msg.addr.as_ref() { "/flush" => self.handle_msg_flush(&msg), addr => { let msg = format!("Ignoring unhandled OSC address: {}", addr); return Err(ServerError::Protocol(msg)); } } }, rosc::OscPacket::Bundle(bundle) => { if let rosc::OscType::Time(ntp_secs, ntp_subsecs) = bundle.timetag { match bundle.content.first() { Some(rosc::OscPacket::Message(msg)) => { match msg.addr.as_ref() { "/send_after" => self.handle_bundle_send_after( DEFAULT_TAG, timetag_to_duration(ntp_secs, ntp_subsecs), &msg.args ), "/send_after_tagged" => { match Self::parse_send_after_tag(&msg.args) { Ok(tag) => self.handle_bundle_send_after( &tag, timetag_to_duration(ntp_secs, ntp_subsecs), &msg.args[1..], // 1st argument is tag, already parsed ), Err(err) => { let msg = format!("Unexpected tag argument: {}", err); return Err(ServerError::Protocol(msg)); }, } }, addr => { let msg = format!("Unhandled OSC address: {}", addr); return Err(ServerError::Protocol(msg)); }, } }, other => { let msg = format!("Unexpected OSC bundle content: {:?}", other); return Err(ServerError::Protocol(msg)); } } } }, } Ok(()) } /// Await UDP packet. Returns slice into server's buffer. async fn recv_udp_packet(&mut self) -> Result<&[u8], io::Error> { let (size, _) = self.socket.recv_from(&mut self.buf).await?; Ok(&self.buf[..size]) } /// Handles /flush messages. fn handle_msg_flush(&mut self, msg: &rosc::OscMessage) { match msg.args.first() { Some(rosc::OscType::String(tag)) => { // Remove tag entry from hash map, and send termination signal to all listening // receivers. if let Some((_k, (tx, _rx))) = self.tags.remove_entry(tag) { debug!("Flushing tag: {}", tag); tx.broadcast(true).unwrap_or_else(|e| { warn!("Failed to broadcast: {}", e); }); } }, other => warn!("Ignoring unexpected /flush message: {:?}", other), }; } /// Handles /send_after and /send_after_tagged bundles. fn handle_bundle_send_after(&mut self, tag: &str, send_after: Duration, msg_args: &[rosc::OscType]) { let udp_addr = match Self::parse_command_address(msg_args) { Ok(addr) => addr, Err(err) => { warn!("Ignoring message: {}", err); return; }, }; // addr and OSX /<foo> addr let osc_cmd_addr = match msg_args.get(2) { Some(rosc::OscType::String(addr)) => addr, other => { warn!("Unexpected addr argument: {:?}", other); return; }, }; // remove host, port, address from command let remaining_args = &msg_args[3..]; debug!("Sending OSC command {:?} in: {}ms", remaining_args, send_after.as_millis()); let new_msg = rosc::OscMessage { addr: osc_cmd_addr.to_owned(), args: remaining_args.to_vec(), }; let packet = rosc::OscPacket::Message(new_msg); let new_buf = match rosc::encoder::encode(&packet) { Ok(buf) => buf, Err(err) => { warn!("Failed to encode requested OSC message: {:?}", err); return; } }; let (_tx, rx) = self.tags.entry(tag.to_owned()) .or_insert_with(|| tokio::sync::watch::channel(false)); let mut rx = rx.clone(); tokio::spawn(async move { // TODO: better way of doing this, configurable addr, etc. let loopback = std::net::Ipv4Addr::new(127, 0, 0, 1); let addr = std::net::SocketAddrV4::new(loopback, 0); // TODO: error handling let mut socket = UdpSocket::bind(addr).await.unwrap(); // check if already cancelled, disregard initial value if not if let Some(true) = rx.recv().await { debug!("cancelled timer"); return; } loop { select! { _ = tokio::time::delay_for(send_after).fuse() => break, cancel = rx.recv().fuse() => { match cancel { Some(true) => { debug!("cancelled timer"); return; }, // `false` should never be set, but ignore if received _ => {}, } }, } } // TODO: error handling debug!("Sending OSC command to: {}", &udp_addr); match socket.send_to(&new_buf, &udp_addr).await { Ok(_) => debug!("OSC command sent"), Err(err) => warn!("Failed to send UDP OSC message: {}", err), } }); } fn parse_send_after_tag(msg_args: &[rosc::OscType]) -> Result<String, String> { match msg_args.first() { Some(rosc::OscType::String(tag)) => Ok(tag.to_owned()), other => Err(format!("Unexpected tag argument: {:?}", other)), } } // TODO: error type /// Parse OSC server address (host and port) from given OSC message arguments (typically from /// `/send_after` messages). fn parse_command_address(msg_args: &[rosc::OscType]) -> Result<String, String> { let host = match msg_args.first() { Some(rosc::OscType::String(host)) => { // Workaround for https://github.com/rust-lang/rust/issues/34202 // affecting OS X / Windows // TODO: check v6 status of Sonic Pi if host == "localhost" { "127.0.0.1" } else { host } }, other => return Err(format!("Unexpected host argument: {:?}", other)), }; let port = match msg_args.get(1) { Some(rosc::OscType::Int(port)) => port, other => return Err(format!("Unexpected port argument: {:?}", other)), }; Ok(format!("{}:{}", host, port)) } } #[derive(Debug)] enum ServerError { /// Network error, typically caused by UDP send/recv here. Io(io::Error), /// OSC error, typically caused by failing to encode/decode OSC data structures. Osc(rosc::OscError), /// Error in cases where valid OSC packets were received, but containing invalid payloads (e.g. /// a `/send_after` containing unexpected arguments). Protocol(String), } impl fmt::Display for ServerError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Self::Io(err) => write!(f, "IO error: {}", err), Self::Osc(err) => write!(f, "Failed to decode OSC packet: {:?}", err), Self::Protocol(err) => write!(f, "{}", err), } } } impl Error for ServerError {} impl From<io::Error> for ServerError { fn from(err: io::Error) -> Self { Self::Io(err) } } impl From<rosc::OscError> for ServerError { fn from(err: rosc::OscError) -> Self { Self::Osc(err) } } #[tokio::main] async fn main() -> Result<(), io::Error> { env_logger::init();
#[cfg(test)] mod tests { use crate::timetag_to_unix; #[test] fn time_tag_to_unix_1() { // 2^32 / 2 fractional seconds, i.e. 500,000μs assert_eq!(timetag_to_unix(3_608_146_800, 2_147_483_648), (1_399_158_000, 500_000)); } #[test] fn time_tag_to_unix_2() { assert_eq!(timetag_to_unix(3549086042, 4010129359), (1340097242, 933680)); } #[test] fn time_tag_to_unix_seconds_only() { assert_eq!(timetag_to_unix(3_608_146_800, 0), (1_399_158_000, 0)); } // TODO: tests for time tags in the past, invalid time tags, once error requirement determined }
let addr = env::args().nth(1).unwrap_or_else(|| "127.0.0.1:4560".to_string()); Server::new(&addr).await?.run().await }
random_line_split
main.rs
use std::{env, io, fmt}; use std::time::{Duration, SystemTime}; use std::error::Error; use std::collections::HashMap; use tokio::sync; use tokio::net::UdpSocket; use log::{debug, info, warn}; use futures::select; use futures::future::FutureExt; // Delta between NTP epoch (1900-01-01 00:00:00) and Unix epoch (1970-01-01 00:00:00). // Contains 53 non-leap years, and 17 leap years, in seconds, this is: // (53 * 365 + 17 * 366) * 86400 = 2208988800. const EPOCH_DELTA: u64 = 2_208_988_800; // Tag name to use for messages without an explicit tag (i.e. currently those sent via // `/send_after`). const DEFAULT_TAG: &str = "default"; // Convert an OSC timetag into unix timestamp seconds and microseconds. // // [OSC timetags](http://opensoundcontrol.org/spec-1_0) use NTP timestamps // (https://en.wikipedia.org/wiki/Network_Time_Protocol#Timestamps). // // TODO: verify time conversions are actually correct, check against other implementations fn timetag_to_unix(ntp_secs: u32, ntp_frac_secs: u32) -> (u64, u32) { let unix_secs = ntp_secs as u64 - EPOCH_DELTA; let unix_micros = ((ntp_frac_secs as u64) * 1_000_000) >> 32; (unix_secs, unix_micros as u32) } // TODO: verify time conversions are actually correct, check roundtrips fn timetag_to_duration(ntp_secs: u32, ntp_frac_secs: u32) -> Duration { let (unix_secs, unix_micros) = timetag_to_unix(ntp_secs, ntp_frac_secs); // duration of time tag since epoch let tt_since_epoch = Duration::new(unix_secs, unix_micros * 1000); // duration of current system time since epoch let now_since_epoch = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .expect("System is set to before Unix epoch, check clock"); tt_since_epoch - now_since_epoch } /* fn unix_to_timetag(unix_secs: u64, unix_micros: u32) -> (u32, u32) { let ntp_secs = unix_secs + EPOCH_DELTA; let ntp_frac_secs = ((unix_micros as u64 + 1) << 32) / 1_000_000; (ntp_secs as u32, ntp_frac_secs as u32) } */ struct Server { /// Server's listening UDP socket. socket: UdpSocket, /// Internal buffer used for reading/writing UDP packets into. buf: Vec<u8>, /// Maps a tag name to sender/receiver pair. Used for signalling cancellations. tags: HashMap<String, (sync::watch::Sender<bool>, sync::watch::Receiver<bool>)>, } impl Server { pub async fn new(bind_addr: &str) -> Result<Self, io::Error> { debug!("Attempting to bind to: {}", bind_addr); let socket = UdpSocket::bind(bind_addr).await?; info!("Listening on: {}", socket.local_addr()?); Ok(Self { socket, buf: vec![0; 1024], tags: HashMap::new(), }) } /// Main event loop, runs forever after server is started. async fn run(&mut self) -> Result<(), io::Error> { debug!("Starting main event loop"); loop { if let Err(err) = self.next_event().await { warn!("{}", err); } } } /// Called from main server event loop (`run()`) on each iteration. /// /// Waits for incoming UDP packets containing OSC packets, either handling them immediately (in /// the case of e.g. `/flush` messages), or spawning futures to handle them in the future (in /// the case of e.g. `/send_after` bundles). async fn next_event(&mut self) -> Result<(), ServerError> { debug!("Waiting for UDP packet..."); let raw_packet = self.recv_udp_packet().await?; debug!("Received UDP packet (size={})", raw_packet.len()); debug!("Parsing OSC packet..."); let osc_packet = rosc::decoder::decode(raw_packet)?; debug!("Received OSC packet: {:?}", osc_packet); match osc_packet { rosc::OscPacket::Message(msg) => { match msg.addr.as_ref() { "/flush" => self.handle_msg_flush(&msg), addr => { let msg = format!("Ignoring unhandled OSC address: {}", addr); return Err(ServerError::Protocol(msg)); } } }, rosc::OscPacket::Bundle(bundle) => { if let rosc::OscType::Time(ntp_secs, ntp_subsecs) = bundle.timetag { match bundle.content.first() { Some(rosc::OscPacket::Message(msg)) => { match msg.addr.as_ref() { "/send_after" => self.handle_bundle_send_after( DEFAULT_TAG, timetag_to_duration(ntp_secs, ntp_subsecs), &msg.args ), "/send_after_tagged" => { match Self::parse_send_after_tag(&msg.args) { Ok(tag) => self.handle_bundle_send_after( &tag, timetag_to_duration(ntp_secs, ntp_subsecs), &msg.args[1..], // 1st argument is tag, already parsed ), Err(err) => { let msg = format!("Unexpected tag argument: {}", err); return Err(ServerError::Protocol(msg)); }, } }, addr => { let msg = format!("Unhandled OSC address: {}", addr); return Err(ServerError::Protocol(msg)); }, } }, other => { let msg = format!("Unexpected OSC bundle content: {:?}", other); return Err(ServerError::Protocol(msg)); } } } }, } Ok(()) } /// Await UDP packet. Returns slice into server's buffer. async fn
(&mut self) -> Result<&[u8], io::Error> { let (size, _) = self.socket.recv_from(&mut self.buf).await?; Ok(&self.buf[..size]) } /// Handles /flush messages. fn handle_msg_flush(&mut self, msg: &rosc::OscMessage) { match msg.args.first() { Some(rosc::OscType::String(tag)) => { // Remove tag entry from hash map, and send termination signal to all listening // receivers. if let Some((_k, (tx, _rx))) = self.tags.remove_entry(tag) { debug!("Flushing tag: {}", tag); tx.broadcast(true).unwrap_or_else(|e| { warn!("Failed to broadcast: {}", e); }); } }, other => warn!("Ignoring unexpected /flush message: {:?}", other), }; } /// Handles /send_after and /send_after_tagged bundles. fn handle_bundle_send_after(&mut self, tag: &str, send_after: Duration, msg_args: &[rosc::OscType]) { let udp_addr = match Self::parse_command_address(msg_args) { Ok(addr) => addr, Err(err) => { warn!("Ignoring message: {}", err); return; }, }; // addr and OSX /<foo> addr let osc_cmd_addr = match msg_args.get(2) { Some(rosc::OscType::String(addr)) => addr, other => { warn!("Unexpected addr argument: {:?}", other); return; }, }; // remove host, port, address from command let remaining_args = &msg_args[3..]; debug!("Sending OSC command {:?} in: {}ms", remaining_args, send_after.as_millis()); let new_msg = rosc::OscMessage { addr: osc_cmd_addr.to_owned(), args: remaining_args.to_vec(), }; let packet = rosc::OscPacket::Message(new_msg); let new_buf = match rosc::encoder::encode(&packet) { Ok(buf) => buf, Err(err) => { warn!("Failed to encode requested OSC message: {:?}", err); return; } }; let (_tx, rx) = self.tags.entry(tag.to_owned()) .or_insert_with(|| tokio::sync::watch::channel(false)); let mut rx = rx.clone(); tokio::spawn(async move { // TODO: better way of doing this, configurable addr, etc. let loopback = std::net::Ipv4Addr::new(127, 0, 0, 1); let addr = std::net::SocketAddrV4::new(loopback, 0); // TODO: error handling let mut socket = UdpSocket::bind(addr).await.unwrap(); // check if already cancelled, disregard initial value if not if let Some(true) = rx.recv().await { debug!("cancelled timer"); return; } loop { select! { _ = tokio::time::delay_for(send_after).fuse() => break, cancel = rx.recv().fuse() => { match cancel { Some(true) => { debug!("cancelled timer"); return; }, // `false` should never be set, but ignore if received _ => {}, } }, } } // TODO: error handling debug!("Sending OSC command to: {}", &udp_addr); match socket.send_to(&new_buf, &udp_addr).await { Ok(_) => debug!("OSC command sent"), Err(err) => warn!("Failed to send UDP OSC message: {}", err), } }); } fn parse_send_after_tag(msg_args: &[rosc::OscType]) -> Result<String, String> { match msg_args.first() { Some(rosc::OscType::String(tag)) => Ok(tag.to_owned()), other => Err(format!("Unexpected tag argument: {:?}", other)), } } // TODO: error type /// Parse OSC server address (host and port) from given OSC message arguments (typically from /// `/send_after` messages). fn parse_command_address(msg_args: &[rosc::OscType]) -> Result<String, String> { let host = match msg_args.first() { Some(rosc::OscType::String(host)) => { // Workaround for https://github.com/rust-lang/rust/issues/34202 // affecting OS X / Windows // TODO: check v6 status of Sonic Pi if host == "localhost" { "127.0.0.1" } else { host } }, other => return Err(format!("Unexpected host argument: {:?}", other)), }; let port = match msg_args.get(1) { Some(rosc::OscType::Int(port)) => port, other => return Err(format!("Unexpected port argument: {:?}", other)), }; Ok(format!("{}:{}", host, port)) } } #[derive(Debug)] enum ServerError { /// Network error, typically caused by UDP send/recv here. Io(io::Error), /// OSC error, typically caused by failing to encode/decode OSC data structures. Osc(rosc::OscError), /// Error in cases where valid OSC packets were received, but containing invalid payloads (e.g. /// a `/send_after` containing unexpected arguments). Protocol(String), } impl fmt::Display for ServerError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Self::Io(err) => write!(f, "IO error: {}", err), Self::Osc(err) => write!(f, "Failed to decode OSC packet: {:?}", err), Self::Protocol(err) => write!(f, "{}", err), } } } impl Error for ServerError {} impl From<io::Error> for ServerError { fn from(err: io::Error) -> Self { Self::Io(err) } } impl From<rosc::OscError> for ServerError { fn from(err: rosc::OscError) -> Self { Self::Osc(err) } } #[tokio::main] async fn main() -> Result<(), io::Error> { env_logger::init(); let addr = env::args().nth(1).unwrap_or_else(|| "127.0.0.1:4560".to_string()); Server::new(&addr).await?.run().await } #[cfg(test)] mod tests { use crate::timetag_to_unix; #[test] fn time_tag_to_unix_1() { // 2^32 / 2 fractional seconds, i.e. 500,000μs assert_eq!(timetag_to_unix(3_608_146_800, 2_147_483_648), (1_399_158_000, 500_000)); } #[test] fn time_tag_to_unix_2() { assert_eq!(timetag_to_unix(3549086042, 4010129359), (1340097242, 933680)); } #[test] fn time_tag_to_unix_seconds_only() { assert_eq!(timetag_to_unix(3_608_146_800, 0), (1_399_158_000, 0)); } // TODO: tests for time tags in the past, invalid time tags, once error requirement determined }
recv_udp_packet
identifier_name
main.rs
use std::{env, io, fmt}; use std::time::{Duration, SystemTime}; use std::error::Error; use std::collections::HashMap; use tokio::sync; use tokio::net::UdpSocket; use log::{debug, info, warn}; use futures::select; use futures::future::FutureExt; // Delta between NTP epoch (1900-01-01 00:00:00) and Unix epoch (1970-01-01 00:00:00). // Contains 53 non-leap years, and 17 leap years, in seconds, this is: // (53 * 365 + 17 * 366) * 86400 = 2208988800. const EPOCH_DELTA: u64 = 2_208_988_800; // Tag name to use for messages without an explicit tag (i.e. currently those sent via // `/send_after`). const DEFAULT_TAG: &str = "default"; // Convert an OSC timetag into unix timestamp seconds and microseconds. // // [OSC timetags](http://opensoundcontrol.org/spec-1_0) use NTP timestamps // (https://en.wikipedia.org/wiki/Network_Time_Protocol#Timestamps). // // TODO: verify time conversions are actually correct, check against other implementations fn timetag_to_unix(ntp_secs: u32, ntp_frac_secs: u32) -> (u64, u32) { let unix_secs = ntp_secs as u64 - EPOCH_DELTA; let unix_micros = ((ntp_frac_secs as u64) * 1_000_000) >> 32; (unix_secs, unix_micros as u32) } // TODO: verify time conversions are actually correct, check roundtrips fn timetag_to_duration(ntp_secs: u32, ntp_frac_secs: u32) -> Duration { let (unix_secs, unix_micros) = timetag_to_unix(ntp_secs, ntp_frac_secs); // duration of time tag since epoch let tt_since_epoch = Duration::new(unix_secs, unix_micros * 1000); // duration of current system time since epoch let now_since_epoch = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .expect("System is set to before Unix epoch, check clock"); tt_since_epoch - now_since_epoch } /* fn unix_to_timetag(unix_secs: u64, unix_micros: u32) -> (u32, u32) { let ntp_secs = unix_secs + EPOCH_DELTA; let ntp_frac_secs = ((unix_micros as u64 + 1) << 32) / 1_000_000; (ntp_secs as u32, ntp_frac_secs as u32) } */ struct Server { /// Server's listening UDP socket. socket: UdpSocket, /// Internal buffer used for reading/writing UDP packets into. buf: Vec<u8>, /// Maps a tag name to sender/receiver pair. Used for signalling cancellations. tags: HashMap<String, (sync::watch::Sender<bool>, sync::watch::Receiver<bool>)>, } impl Server { pub async fn new(bind_addr: &str) -> Result<Self, io::Error> { debug!("Attempting to bind to: {}", bind_addr); let socket = UdpSocket::bind(bind_addr).await?; info!("Listening on: {}", socket.local_addr()?); Ok(Self { socket, buf: vec![0; 1024], tags: HashMap::new(), }) } /// Main event loop, runs forever after server is started. async fn run(&mut self) -> Result<(), io::Error> { debug!("Starting main event loop"); loop { if let Err(err) = self.next_event().await { warn!("{}", err); } } } /// Called from main server event loop (`run()`) on each iteration. /// /// Waits for incoming UDP packets containing OSC packets, either handling them immediately (in /// the case of e.g. `/flush` messages), or spawning futures to handle them in the future (in /// the case of e.g. `/send_after` bundles). async fn next_event(&mut self) -> Result<(), ServerError> { debug!("Waiting for UDP packet..."); let raw_packet = self.recv_udp_packet().await?; debug!("Received UDP packet (size={})", raw_packet.len()); debug!("Parsing OSC packet..."); let osc_packet = rosc::decoder::decode(raw_packet)?; debug!("Received OSC packet: {:?}", osc_packet); match osc_packet { rosc::OscPacket::Message(msg) => { match msg.addr.as_ref() { "/flush" => self.handle_msg_flush(&msg), addr => { let msg = format!("Ignoring unhandled OSC address: {}", addr); return Err(ServerError::Protocol(msg)); } } }, rosc::OscPacket::Bundle(bundle) => { if let rosc::OscType::Time(ntp_secs, ntp_subsecs) = bundle.timetag { match bundle.content.first() { Some(rosc::OscPacket::Message(msg)) => { match msg.addr.as_ref() { "/send_after" => self.handle_bundle_send_after( DEFAULT_TAG, timetag_to_duration(ntp_secs, ntp_subsecs), &msg.args ), "/send_after_tagged" => { match Self::parse_send_after_tag(&msg.args) { Ok(tag) => self.handle_bundle_send_after( &tag, timetag_to_duration(ntp_secs, ntp_subsecs), &msg.args[1..], // 1st argument is tag, already parsed ), Err(err) => { let msg = format!("Unexpected tag argument: {}", err); return Err(ServerError::Protocol(msg)); }, } }, addr => { let msg = format!("Unhandled OSC address: {}", addr); return Err(ServerError::Protocol(msg)); }, } }, other => { let msg = format!("Unexpected OSC bundle content: {:?}", other); return Err(ServerError::Protocol(msg)); } } } }, } Ok(()) } /// Await UDP packet. Returns slice into server's buffer. async fn recv_udp_packet(&mut self) -> Result<&[u8], io::Error> { let (size, _) = self.socket.recv_from(&mut self.buf).await?; Ok(&self.buf[..size]) } /// Handles /flush messages. fn handle_msg_flush(&mut self, msg: &rosc::OscMessage) { match msg.args.first() { Some(rosc::OscType::String(tag)) => { // Remove tag entry from hash map, and send termination signal to all listening // receivers. if let Some((_k, (tx, _rx))) = self.tags.remove_entry(tag) { debug!("Flushing tag: {}", tag); tx.broadcast(true).unwrap_or_else(|e| { warn!("Failed to broadcast: {}", e); }); } }, other => warn!("Ignoring unexpected /flush message: {:?}", other), }; } /// Handles /send_after and /send_after_tagged bundles. fn handle_bundle_send_after(&mut self, tag: &str, send_after: Duration, msg_args: &[rosc::OscType]) { let udp_addr = match Self::parse_command_address(msg_args) { Ok(addr) => addr, Err(err) => { warn!("Ignoring message: {}", err); return; }, }; // addr and OSX /<foo> addr let osc_cmd_addr = match msg_args.get(2) { Some(rosc::OscType::String(addr)) => addr, other => { warn!("Unexpected addr argument: {:?}", other); return; }, }; // remove host, port, address from command let remaining_args = &msg_args[3..]; debug!("Sending OSC command {:?} in: {}ms", remaining_args, send_after.as_millis()); let new_msg = rosc::OscMessage { addr: osc_cmd_addr.to_owned(), args: remaining_args.to_vec(), }; let packet = rosc::OscPacket::Message(new_msg); let new_buf = match rosc::encoder::encode(&packet) { Ok(buf) => buf, Err(err) => { warn!("Failed to encode requested OSC message: {:?}", err); return; } }; let (_tx, rx) = self.tags.entry(tag.to_owned()) .or_insert_with(|| tokio::sync::watch::channel(false)); let mut rx = rx.clone(); tokio::spawn(async move { // TODO: better way of doing this, configurable addr, etc. let loopback = std::net::Ipv4Addr::new(127, 0, 0, 1); let addr = std::net::SocketAddrV4::new(loopback, 0); // TODO: error handling let mut socket = UdpSocket::bind(addr).await.unwrap(); // check if already cancelled, disregard initial value if not if let Some(true) = rx.recv().await { debug!("cancelled timer"); return; } loop { select! { _ = tokio::time::delay_for(send_after).fuse() => break, cancel = rx.recv().fuse() => { match cancel { Some(true) => { debug!("cancelled timer"); return; }, // `false` should never be set, but ignore if received _ => {}, } }, } } // TODO: error handling debug!("Sending OSC command to: {}", &udp_addr); match socket.send_to(&new_buf, &udp_addr).await { Ok(_) => debug!("OSC command sent"), Err(err) => warn!("Failed to send UDP OSC message: {}", err), } }); } fn parse_send_after_tag(msg_args: &[rosc::OscType]) -> Result<String, String> { match msg_args.first() { Some(rosc::OscType::String(tag)) => Ok(tag.to_owned()), other => Err(format!("Unexpected tag argument: {:?}", other)), } } // TODO: error type /// Parse OSC server address (host and port) from given OSC message arguments (typically from /// `/send_after` messages). fn parse_command_address(msg_args: &[rosc::OscType]) -> Result<String, String> { let host = match msg_args.first() { Some(rosc::OscType::String(host)) => { // Workaround for https://github.com/rust-lang/rust/issues/34202 // affecting OS X / Windows // TODO: check v6 status of Sonic Pi if host == "localhost" { "127.0.0.1" } else { host } }, other => return Err(format!("Unexpected host argument: {:?}", other)), }; let port = match msg_args.get(1) { Some(rosc::OscType::Int(port)) => port, other => return Err(format!("Unexpected port argument: {:?}", other)), }; Ok(format!("{}:{}", host, port)) } } #[derive(Debug)] enum ServerError { /// Network error, typically caused by UDP send/recv here. Io(io::Error), /// OSC error, typically caused by failing to encode/decode OSC data structures. Osc(rosc::OscError), /// Error in cases where valid OSC packets were received, but containing invalid payloads (e.g. /// a `/send_after` containing unexpected arguments). Protocol(String), } impl fmt::Display for ServerError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
} impl Error for ServerError {} impl From<io::Error> for ServerError { fn from(err: io::Error) -> Self { Self::Io(err) } } impl From<rosc::OscError> for ServerError { fn from(err: rosc::OscError) -> Self { Self::Osc(err) } } #[tokio::main] async fn main() -> Result<(), io::Error> { env_logger::init(); let addr = env::args().nth(1).unwrap_or_else(|| "127.0.0.1:4560".to_string()); Server::new(&addr).await?.run().await } #[cfg(test)] mod tests { use crate::timetag_to_unix; #[test] fn time_tag_to_unix_1() { // 2^32 / 2 fractional seconds, i.e. 500,000μs assert_eq!(timetag_to_unix(3_608_146_800, 2_147_483_648), (1_399_158_000, 500_000)); } #[test] fn time_tag_to_unix_2() { assert_eq!(timetag_to_unix(3549086042, 4010129359), (1340097242, 933680)); } #[test] fn time_tag_to_unix_seconds_only() { assert_eq!(timetag_to_unix(3_608_146_800, 0), (1_399_158_000, 0)); } // TODO: tests for time tags in the past, invalid time tags, once error requirement determined }
{ match self { Self::Io(err) => write!(f, "IO error: {}", err), Self::Osc(err) => write!(f, "Failed to decode OSC packet: {:?}", err), Self::Protocol(err) => write!(f, "{}", err), } }
identifier_body
lz4.rs
} if self.cur == self.input.len() { break } // Read off the next i16 offset { let back = (self.bump() as usize) | ((self.bump() as usize) << 8); debug!("found back {}", back); self.start = self.end - back; } // Slosh around some bytes now { let mut len = self.length(code & 0xf); let literal = self.end - self.start; if literal < 4 { static DECR: [usize; 4] = [0, 3, 2, 3]; self.cp(4, DECR[literal]); } else { len += 4; } self.cp(len, 0); } } self.end } fn length(&mut self, code: u8) -> usize { let mut ret = code as usize; if code == 0xf { loop { let tmp = self.bump(); ret += tmp as usize; if tmp!= 0xff { break } } } ret } fn bump(&mut self) -> u8 { let ret = self.input[self.cur]; self.cur += 1; ret } #[inline] fn cp(&mut self, len: usize, decr: usize) { let end = self.end; self.grow_output(end + len); for i in 0..len { self.output[end + i] = (*self.output)[self.start + i]; } self.end += len; self.start += len - decr; } // Extends the output vector to a target number of bytes (in total), but // does not actually initialize the new data. The length of the vector is // updated, but the bytes will all have undefined values. It is assumed that // the next operation is to pave over these bytes (so the initialization is // unnecessary). #[inline] fn grow_output(&mut self, target: usize) { if self.output.capacity() < target { debug!("growing {} to {}", self.output.capacity(), target); //let additional = target - self.output.capacity(); //self.output.reserve(additional); while self.output.len() < target { self.output.push(0); } }else { unsafe { self.output.set_len(target); } } } } struct BlockEncoder<'a> { input: &'a [u8], output: &'a mut Vec<u8>, hash_table: Vec<u32>, pos: u32, anchor: u32, dest_pos: u32 } /// Returns maximum possible size of compressed output /// given source size pub fn compression_bound(size: u32) -> Option<u32> { if size > MAX_INPUT_SIZE { None } else { Some(size + (size / 255) + 16 + 4) } } impl<'a> BlockEncoder<'a> { #[inline(always)] fn seq_at(&self, pos: u32) -> u32 { (self.input[pos as usize + 3] as u32) << 24 | (self.input[pos as usize + 2] as u32) << 16 | (self.input[pos as usize + 1] as u32) << 8 | (self.input[pos as usize] as u32) } fn write_literals(&mut self, len: u32, ml_len: u32, pos: u32) { let mut ln = len; let code = if ln > RUN_MASK - 1 { RUN_MASK as u8 } else { ln as u8 }; if ml_len > ML_MASK - 1 { self.output[self.dest_pos as usize] = (code << ML_BITS as usize) + ML_MASK as u8; } else { self.output[self.dest_pos as usize] = (code << ML_BITS as usize) + ml_len as u8; } self.dest_pos += 1; if code == RUN_MASK as u8 {
// FIXME: find out why slicing syntax fails tests //self.output[self.dest_pos as usize.. (self.dest_pos + len) as usize] = self.input[pos as uint.. (pos + len) as uint]; for i in 0..(len as usize) { self.output[self.dest_pos as usize + i] = self.input[pos as usize + i]; } self.dest_pos += len; } fn encode(&mut self) -> u32 { let input_len = self.input.len() as u32; match compression_bound(input_len) { None => 0, Some(out_size) => { let out_size_usize = out_size as usize; if self.output.capacity() < out_size_usize { let additional = out_size_usize - self.output.capacity(); self.output.reserve(additional); } unsafe {self.output.set_len(out_size_usize); } let mut step = 1u32; let mut limit = INCOMPRESSIBLE; loop { if self.pos + 12 > input_len { let tmp = self.anchor; self.write_literals(self.input.len() as u32 - tmp, 0, tmp); unsafe { self.output.set_len(self.dest_pos as usize) }; return self.dest_pos; } let seq = self.seq_at(self.pos); let hash = (Wrapping(seq) * Wrapping(2654435761)).shr(HASH_SHIFT as usize).0; let mut r = (Wrapping(self.hash_table[hash as usize]) + Wrapping(UNINITHASH)).0; self.hash_table[hash as usize] = (Wrapping(self.pos) - Wrapping(UNINITHASH)).0; if (Wrapping(self.pos) - Wrapping(r)).shr(16).0!= 0 || seq!= self.seq_at(r) { if self.pos - self.anchor > limit { limit = limit << 1; step += 1 + (step >> 2); } self.pos += step; continue; } if step > 1 { self.hash_table[hash as usize] = r - UNINITHASH; self.pos -= step - 1; step = 1; continue; } limit = INCOMPRESSIBLE; let ln = self.pos - self.anchor; let back = self.pos - r; let anchor = self.anchor; self.pos += MIN_MATCH; r += MIN_MATCH; self.anchor = self.pos; while (self.pos < input_len - 5) && self.input[self.pos as usize] == self.input[r as usize] { self.pos += 1; r += 1 } let mut ml_len = self.pos - self.anchor; self.write_literals(ln, ml_len, anchor); self.output[self.dest_pos as usize] = back as u8; self.output[self.dest_pos as usize + 1] = (back >> 8) as u8; self.dest_pos += 2; if ml_len > ML_MASK - 1 { ml_len -= ML_MASK; while ml_len > 254 { ml_len -= 255; self.output[self.dest_pos as usize] = 255; self.dest_pos += 1; } self.output[self.dest_pos as usize] = ml_len as u8; self.dest_pos += 1; } self.anchor = self.pos; } } } } } /// This structure is used to decode a stream of LZ4 blocks. This wraps an /// internal reader which is read from when this decoder's read method is /// called. pub struct Decoder<R> { /// The internally wrapped reader. This is exposed so it may be moved out /// of. Note that if data is read from the reader while decoding is in /// progress the output stream will get corrupted. pub r: R, temp: Vec<u8>, output: Vec<u8>, start: usize, end: usize, eof: bool, header: bool, blk_checksum: bool, stream_checksum: bool, max_block_size: usize, } impl<R: Read + Sized> Decoder<R> { /// Creates a new decoder which will read data from the given stream. The /// inner stream can be re-acquired by moving out of the `r` field of this /// structure. pub fn new(r: R) -> Decoder<R> { Decoder { r: r, temp: Vec::new(), output: Vec::new(), header: false, blk_checksum: false, stream_checksum: false, start: 0, end: 0, eof: false, max_block_size: 0, } } /// Resets this decoder back to its initial state. Note that the underlying /// stream is not seeked on or has any alterations performed on it. pub fn reset(&mut self) { self.header = false; self.eof = false; self.start = 0; self.end = 0; } fn read_header(&mut self) -> io::Result<()> { // Make sure the magic number is what's expected. if try!(self.r.read_u32::<LittleEndian>())!= MAGIC { return Err(io::Error::new(io::ErrorKind::InvalidInput, "")) } let mut bits = [0; 3]; try!(self.r.read(&mut bits[..2])); let flg = bits[0]; let bd = bits[1]; // bits 7/6, the version number. Right now this must be 1 if (flg >> 6)!= 0b01 { return Err(io::Error::new(io::ErrorKind::InvalidInput, "")) } // bit 5 is the "block independence", don't care about this yet // bit 4 is whether blocks have checksums or not self.blk_checksum = (flg & 0x10)!= 0; // bit 3 is whether there is a following stream size let stream_size = (flg & 0x08)!= 0; // bit 2 is whether there is a stream checksum self.stream_checksum = (flg & 0x04)!= 0; // bit 1 is reserved // bit 0 is whether there is a preset dictionary let preset_dictionary = (flg & 0x01)!= 0; static MAX_SIZES: [usize; 8] = [0, 0, 0, 0, // all N/A 64 << 10, // 64KB 256 << 10, // 256 KB 1 << 20, // 1MB 4 << 20]; // 4MB // bit 7 is reserved // bits 6-4 are the maximum block size let max_block_size = MAX_SIZES[(bd >> 4) as usize & 0x7]; // bits 3-0 are reserved // read off other portions of the stream let size = if stream_size { Some(try!(self.r.read_u64::<LittleEndian>())) } else { None }; assert!(!preset_dictionary, "preset dictionaries not supported yet"); debug!("blk: {}", self.blk_checksum); debug!("stream: {}", self.stream_checksum); debug!("max size: {}", max_block_size); debug!("stream size: {:?}", size); self.max_block_size = max_block_size; // XXX: implement checksums let cksum = try!(self.r.read_u8()); debug!("ignoring header checksum: {}", cksum); return Ok(()); } fn decode_block(&mut self) -> io::Result<bool> { match try!(self.r.read_u32::<LittleEndian>()) { // final block, we're done here 0 => return Ok(false), // raw block to read n if n & 0x80000000!= 0 => { let amt = (n & 0x7fffffff) as usize; self.output.truncate(0); self.output.reserve(amt); try!(self.r.push_exactly(amt as u64, &mut self.output)); self.start = 0; self.end = amt; } // actual block to decompress n => { let n = n as usize; self.temp.truncate(0); self.temp.reserve(n); try!(self.r.push_exactly(n as u64, &mut self.temp)); let target = cmp::min(self.max_block_size, 4 * n / 3); self.output.truncate(0); self.output.reserve(target); let mut decoder = BlockDecoder { input: &self.temp[..n], output: &mut self.output, cur: 0, start: 0, end: 0, }; self.start = 0; self.end = decoder.decode(); } } if self.blk_checksum { let cksum = try!(self.r.read_u32::<LittleEndian>()); debug!("ignoring block checksum {}", cksum); } return Ok(true); } /// Tests whether the end of this LZ4 stream has been reached pub fn eof(&mut self) -> bool { self.eof } } impl<R: Read> Read for Decoder<R> { fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> { if self.eof { return Ok(0) } if!self.header { try!(self.read_header()); self.header = true; } let mut amt = dst.len(); let len = amt; while amt > 0 { if self.start == self.end { let keep_going = try!(self.decode_block()); if!keep_going { self.eof = true; break; } } let n = cmp::min(amt, self.end - self.start); unsafe { copy_nonoverlapping( &self.output[self.start], &mut dst[len - amt], n )}; self.start += n; amt -= n; } Ok(len - amt) } } /// This structure is used to compress a stream of bytes using the LZ4 /// compression algorithm. This is a wrapper around an internal writer which /// bytes will be written to. pub struct Encoder<W> { w: W, buf: Vec<u8>, tmp: Vec<u8>, wrote_header: bool, limit: usize, } impl<W: Write> Encoder<W> { /// Creates a new encoder which will have its output written to the given /// output stream. The output stream can be re-acquired by calling /// `finish()` /// /// NOTE: compression isn't actually implemented just yet, this is just a /// skeleton of a future implementation. pub fn new(w: W) -> Encoder<W> { Encoder { w: w, wrote_header: false, buf: Vec::with_capacity(1024), tmp: Vec::new(), limit: 256 * 1024, } } fn encode_block(&mut self) -> io::Result<()> { self.tmp.truncate(0); if self.compress() { try!(self.w.write_u32::<LittleEndian>(self.tmp.len() as u32)); try!(self.w.write(&self.tmp)); } else { try!(self.w.write_u32::<LittleEndian>((self.buf.len() as u32) | 0x80000000)); try!(self.w.write(&self.buf)); } self.buf.truncate(0); Ok(()) } fn compress(&mut self) -> bool { false } /// This function is used to flag that this session of compression is done /// with. The stream is finished up (final bytes are written), and then the /// wrapped writer is returned. pub fn finish(mut self) -> (W, io::Result<()>) { let mut result = self.flush(); for _ in 0..2 { let tmp = self.w.write_u32::<LittleEndian>(0) .map_err(byteorder_err_to_io); result = result.and_then(|_| tmp); } (self.w, result) } } impl<W: Write> Write for Encoder<W> { fn write(&mut self, mut buf: &[u8]) -> io::Result<usize> { if!self.wrote_header { try!(self.w.write_u32::<LittleEndian>(MAGIC)); // version 01, turn on block independence, but turn off // everything else (we have no checksums right now). try!(self.w.write_u8(0b01_100000)); // Maximum block size is 256KB try!(self.w.write_u8(0b0_101_0000));
ln -= RUN_MASK; while ln > 254 { self.output[self.dest_pos as usize] = 255; self.dest_pos += 1; ln -= 255; } self.output[self.dest_pos as usize] = ln as u8; self.dest_pos += 1; }
conditional_block
lz4.rs
} if self.cur == self.input.len() { break } // Read off the next i16 offset { let back = (self.bump() as usize) | ((self.bump() as usize) << 8); debug!("found back {}", back); self.start = self.end - back; } // Slosh around some bytes now { let mut len = self.length(code & 0xf); let literal = self.end - self.start; if literal < 4 { static DECR: [usize; 4] = [0, 3, 2, 3]; self.cp(4, DECR[literal]); } else { len += 4; } self.cp(len, 0); } } self.end } fn length(&mut self, code: u8) -> usize { let mut ret = code as usize; if code == 0xf { loop { let tmp = self.bump(); ret += tmp as usize; if tmp!= 0xff { break } } } ret } fn bump(&mut self) -> u8 { let ret = self.input[self.cur]; self.cur += 1; ret } #[inline] fn cp(&mut self, len: usize, decr: usize) { let end = self.end; self.grow_output(end + len); for i in 0..len { self.output[end + i] = (*self.output)[self.start + i]; } self.end += len; self.start += len - decr; } // Extends the output vector to a target number of bytes (in total), but // does not actually initialize the new data. The length of the vector is // updated, but the bytes will all have undefined values. It is assumed that // the next operation is to pave over these bytes (so the initialization is // unnecessary). #[inline] fn grow_output(&mut self, target: usize) { if self.output.capacity() < target { debug!("growing {} to {}", self.output.capacity(), target); //let additional = target - self.output.capacity(); //self.output.reserve(additional); while self.output.len() < target { self.output.push(0); } }else { unsafe { self.output.set_len(target); } } } } struct BlockEncoder<'a> { input: &'a [u8], output: &'a mut Vec<u8>, hash_table: Vec<u32>, pos: u32, anchor: u32, dest_pos: u32 } /// Returns maximum possible size of compressed output /// given source size pub fn compression_bound(size: u32) -> Option<u32> { if size > MAX_INPUT_SIZE { None } else { Some(size + (size / 255) + 16 + 4) } } impl<'a> BlockEncoder<'a> { #[inline(always)] fn seq_at(&self, pos: u32) -> u32 { (self.input[pos as usize + 3] as u32) << 24 | (self.input[pos as usize + 2] as u32) << 16 | (self.input[pos as usize + 1] as u32) << 8 | (self.input[pos as usize] as u32) } fn write_literals(&mut self, len: u32, ml_len: u32, pos: u32) { let mut ln = len; let code = if ln > RUN_MASK - 1 { RUN_MASK as u8 } else { ln as u8 }; if ml_len > ML_MASK - 1 { self.output[self.dest_pos as usize] = (code << ML_BITS as usize) + ML_MASK as u8; } else { self.output[self.dest_pos as usize] = (code << ML_BITS as usize) + ml_len as u8; } self.dest_pos += 1; if code == RUN_MASK as u8 { ln -= RUN_MASK; while ln > 254 { self.output[self.dest_pos as usize] = 255; self.dest_pos += 1; ln -= 255; } self.output[self.dest_pos as usize] = ln as u8; self.dest_pos += 1; } // FIXME: find out why slicing syntax fails tests //self.output[self.dest_pos as usize.. (self.dest_pos + len) as usize] = self.input[pos as uint.. (pos + len) as uint]; for i in 0..(len as usize) { self.output[self.dest_pos as usize + i] = self.input[pos as usize + i]; } self.dest_pos += len; } fn encode(&mut self) -> u32 { let input_len = self.input.len() as u32; match compression_bound(input_len) { None => 0, Some(out_size) => { let out_size_usize = out_size as usize; if self.output.capacity() < out_size_usize { let additional = out_size_usize - self.output.capacity(); self.output.reserve(additional); } unsafe {self.output.set_len(out_size_usize); } let mut step = 1u32; let mut limit = INCOMPRESSIBLE; loop { if self.pos + 12 > input_len { let tmp = self.anchor; self.write_literals(self.input.len() as u32 - tmp, 0, tmp); unsafe { self.output.set_len(self.dest_pos as usize) }; return self.dest_pos; } let seq = self.seq_at(self.pos); let hash = (Wrapping(seq) * Wrapping(2654435761)).shr(HASH_SHIFT as usize).0; let mut r = (Wrapping(self.hash_table[hash as usize]) + Wrapping(UNINITHASH)).0; self.hash_table[hash as usize] = (Wrapping(self.pos) - Wrapping(UNINITHASH)).0; if (Wrapping(self.pos) - Wrapping(r)).shr(16).0!= 0 || seq!= self.seq_at(r) { if self.pos - self.anchor > limit { limit = limit << 1; step += 1 + (step >> 2); } self.pos += step; continue; } if step > 1 { self.hash_table[hash as usize] = r - UNINITHASH; self.pos -= step - 1; step = 1; continue; } limit = INCOMPRESSIBLE; let ln = self.pos - self.anchor; let back = self.pos - r; let anchor = self.anchor; self.pos += MIN_MATCH; r += MIN_MATCH; self.anchor = self.pos; while (self.pos < input_len - 5) && self.input[self.pos as usize] == self.input[r as usize] { self.pos += 1; r += 1 } let mut ml_len = self.pos - self.anchor; self.write_literals(ln, ml_len, anchor); self.output[self.dest_pos as usize] = back as u8; self.output[self.dest_pos as usize + 1] = (back >> 8) as u8; self.dest_pos += 2; if ml_len > ML_MASK - 1 { ml_len -= ML_MASK; while ml_len > 254 { ml_len -= 255; self.output[self.dest_pos as usize] = 255; self.dest_pos += 1; } self.output[self.dest_pos as usize] = ml_len as u8; self.dest_pos += 1; } self.anchor = self.pos; } } } } } /// This structure is used to decode a stream of LZ4 blocks. This wraps an /// internal reader which is read from when this decoder's read method is /// called. pub struct Decoder<R> { /// The internally wrapped reader. This is exposed so it may be moved out /// of. Note that if data is read from the reader while decoding is in /// progress the output stream will get corrupted. pub r: R, temp: Vec<u8>, output: Vec<u8>, start: usize, end: usize, eof: bool, header: bool, blk_checksum: bool, stream_checksum: bool, max_block_size: usize, } impl<R: Read + Sized> Decoder<R> { /// Creates a new decoder which will read data from the given stream. The /// inner stream can be re-acquired by moving out of the `r` field of this /// structure. pub fn new(r: R) -> Decoder<R> { Decoder { r: r, temp: Vec::new(), output: Vec::new(), header: false, blk_checksum: false, stream_checksum: false, start: 0, end: 0, eof: false, max_block_size: 0, } } /// Resets this decoder back to its initial state. Note that the underlying /// stream is not seeked on or has any alterations performed on it. pub fn reset(&mut self) { self.header = false; self.eof = false; self.start = 0; self.end = 0; } fn read_header(&mut self) -> io::Result<()> { // Make sure the magic number is what's expected. if try!(self.r.read_u32::<LittleEndian>())!= MAGIC { return Err(io::Error::new(io::ErrorKind::InvalidInput, "")) } let mut bits = [0; 3]; try!(self.r.read(&mut bits[..2])); let flg = bits[0]; let bd = bits[1]; // bits 7/6, the version number. Right now this must be 1 if (flg >> 6)!= 0b01 { return Err(io::Error::new(io::ErrorKind::InvalidInput, "")) } // bit 5 is the "block independence", don't care about this yet // bit 4 is whether blocks have checksums or not self.blk_checksum = (flg & 0x10)!= 0; // bit 3 is whether there is a following stream size let stream_size = (flg & 0x08)!= 0; // bit 2 is whether there is a stream checksum self.stream_checksum = (flg & 0x04)!= 0; // bit 1 is reserved // bit 0 is whether there is a preset dictionary let preset_dictionary = (flg & 0x01)!= 0; static MAX_SIZES: [usize; 8] = [0, 0, 0, 0, // all N/A 64 << 10, // 64KB 256 << 10, // 256 KB 1 << 20, // 1MB 4 << 20]; // 4MB // bit 7 is reserved // bits 6-4 are the maximum block size let max_block_size = MAX_SIZES[(bd >> 4) as usize & 0x7]; // bits 3-0 are reserved // read off other portions of the stream let size = if stream_size { Some(try!(self.r.read_u64::<LittleEndian>())) } else { None }; assert!(!preset_dictionary, "preset dictionaries not supported yet"); debug!("blk: {}", self.blk_checksum); debug!("stream: {}", self.stream_checksum); debug!("max size: {}", max_block_size); debug!("stream size: {:?}", size); self.max_block_size = max_block_size; // XXX: implement checksums let cksum = try!(self.r.read_u8()); debug!("ignoring header checksum: {}", cksum); return Ok(()); } fn decode_block(&mut self) -> io::Result<bool> { match try!(self.r.read_u32::<LittleEndian>()) { // final block, we're done here 0 => return Ok(false), // raw block to read n if n & 0x80000000!= 0 => { let amt = (n & 0x7fffffff) as usize; self.output.truncate(0); self.output.reserve(amt); try!(self.r.push_exactly(amt as u64, &mut self.output)); self.start = 0; self.end = amt; } // actual block to decompress n => { let n = n as usize; self.temp.truncate(0); self.temp.reserve(n); try!(self.r.push_exactly(n as u64, &mut self.temp)); let target = cmp::min(self.max_block_size, 4 * n / 3); self.output.truncate(0); self.output.reserve(target); let mut decoder = BlockDecoder { input: &self.temp[..n], output: &mut self.output, cur: 0, start: 0, end: 0, }; self.start = 0; self.end = decoder.decode(); } } if self.blk_checksum { let cksum = try!(self.r.read_u32::<LittleEndian>()); debug!("ignoring block checksum {}", cksum); } return Ok(true); } /// Tests whether the end of this LZ4 stream has been reached pub fn eof(&mut self) -> bool { self.eof } } impl<R: Read> Read for Decoder<R> { fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> { if self.eof { return Ok(0) } if!self.header { try!(self.read_header()); self.header = true; } let mut amt = dst.len(); let len = amt; while amt > 0 { if self.start == self.end { let keep_going = try!(self.decode_block()); if!keep_going { self.eof = true; break; } } let n = cmp::min(amt, self.end - self.start); unsafe { copy_nonoverlapping( &self.output[self.start], &mut dst[len - amt], n )}; self.start += n; amt -= n; } Ok(len - amt) } } /// This structure is used to compress a stream of bytes using the LZ4 /// compression algorithm. This is a wrapper around an internal writer which /// bytes will be written to. pub struct En
> { w: W, buf: Vec<u8>, tmp: Vec<u8>, wrote_header: bool, limit: usize, } impl<W: Write> Encoder<W> { /// Creates a new encoder which will have its output written to the given /// output stream. The output stream can be re-acquired by calling /// `finish()` /// /// NOTE: compression isn't actually implemented just yet, this is just a /// skeleton of a future implementation. pub fn new(w: W) -> Encoder<W> { Encoder { w: w, wrote_header: false, buf: Vec::with_capacity(1024), tmp: Vec::new(), limit: 256 * 1024, } } fn encode_block(&mut self) -> io::Result<()> { self.tmp.truncate(0); if self.compress() { try!(self.w.write_u32::<LittleEndian>(self.tmp.len() as u32)); try!(self.w.write(&self.tmp)); } else { try!(self.w.write_u32::<LittleEndian>((self.buf.len() as u32) | 0x80000000)); try!(self.w.write(&self.buf)); } self.buf.truncate(0); Ok(()) } fn compress(&mut self) -> bool { false } /// This function is used to flag that this session of compression is done /// with. The stream is finished up (final bytes are written), and then the /// wrapped writer is returned. pub fn finish(mut self) -> (W, io::Result<()>) { let mut result = self.flush(); for _ in 0..2 { let tmp = self.w.write_u32::<LittleEndian>(0) .map_err(byteorder_err_to_io); result = result.and_then(|_| tmp); } (self.w, result) } } impl<W: Write> Write for Encoder<W> { fn write(&mut self, mut buf: &[u8]) -> io::Result<usize> { if!self.wrote_header { try!(self.w.write_u32::<LittleEndian>(MAGIC)); // version 01, turn on block independence, but turn off // everything else (we have no checksums right now). try!(self.w.write_u8(0b01_100000)); // Maximum block size is 256KB try!(self.w.write_u8(0b0_101_0000));
coder<W
identifier_name