file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
app.rs | use audio;
use audio::cpal;
use find_folder;
use glium::glutin;
use state;
use std;
use std::cell::{Cell, RefCell};
use std::collections::HashMap;
use std::marker::PhantomData;
use std::path::PathBuf;
use std::sync::{mpsc, Arc};
use std::thread;
use std::time::Duration;
use window::{self, Window};
use ui;
/// An **App** represents the entire context of your application.
///
/// The **App** owns and manages:
///
/// - the event loop (used to drive the application forward)
/// - all OpenGL windows (for graphics and user input, can be referenced via IDs).
pub struct App {
pub(crate) events_loop: glutin::EventsLoop,
pub(crate) windows: RefCell<HashMap<window::Id, Window>>,
pub(super) exit_on_escape: Cell<bool>,
pub(crate) ui: ui::Arrangement,
loop_mode: Cell<LoopMode>,
/// The `App`'s audio-related API.
pub audio: Audio,
/// The current state of the `Mouse`.
pub mouse: state::Mouse,
/// State of the window currently in focus.
pub window: state::Window,
/// State of the keyboard keys.
///
/// `mods` provides state of each of the modifier keys: `shift`, `ctrl`, `alt`, `logo`.
///
/// `down` is the set of keys that are currently pressed.
///
/// NOTE: `down` this is tracked by the nannou `App` so issues might occur if e.g. a key is
/// pressed while the app is in focus and then released when out of focus. Eventually we should
/// change this to query the OS somehow, but I don't think `winit` provides a way to do this
/// yet.
pub keys: state::Keys,
}
/// An **App**'s audio API.
pub struct Audio {
event_loop: Arc<cpal::EventLoop>,
process_fn_tx: RefCell<Option<mpsc::Sender<audio::stream::ProcessFnMsg>>>,
}
/// A handle to the **App** that can be shared across threads.
///
/// This can be used to "wake up" the **App**'s inner event loop.
pub struct Proxy {
events_loop_proxy: glutin::EventsLoopProxy,
}
/// The mode in which the **App** is currently running the event loop.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum LoopMode {
/// Specifies that the application is continuously looping at a consistent rate.
///
/// An application running in the **Rate** loop mode will behave as follows:
///
/// 1. Poll for and collect all pending user input.
/// `update` is then called with all application events that have occurred.
///
/// 2. `update` is called with an `Event::Update`.
///
/// 3. `draw` is called.
///
/// 4. Check the time and sleep for the remainder of the `update_intervale`.
Rate {
/// The minimum interval between emitted updates.
update_interval: Duration,
},
Wait {
/// The number of `update`s (and in turn `draw`s) that should occur since the application
/// last received a non-`Update` event.
updates_following_event: usize,
/// The minimum interval between emitted updates.
update_interval: Duration,
},
}
fn update_interval(fps: f64) -> Duration {
assert!(fps > 0.0);
const NANOSEC_PER_SEC: f64 = 1_000_000_000.0;
let interval_nanosecs = NANOSEC_PER_SEC / fps;
let secs = (interval_nanosecs / NANOSEC_PER_SEC) as u64;
let nanosecs = (interval_nanosecs % NANOSEC_PER_SEC) as u32;
Duration::new(secs, nanosecs)
}
impl LoopMode {
pub const DEFAULT_RATE_FPS: f64 = 60.0;
pub const DEFAULT_UPDATES_FOLLOWING_EVENT: usize = 3;
/// Specify the **Rate** mode with the given frames-per-second.
pub fn rate_fps(fps: f64) -> Self {
let update_interval = update_interval(fps);
LoopMode::Rate { update_interval }
}
/// Specify the **Wait** mode with the given number of updates following each non-`Update`
/// event.
///
/// Uses the default update interval.
pub fn wait(updates_following_event: usize) -> Self {
let update_interval = update_interval(Self::DEFAULT_RATE_FPS);
LoopMode::Wait {
updates_following_event,
update_interval,
}
}
/// Specify the **Wait** mode with the given number of updates following each non-`Update`
/// event.
///
/// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`.
pub fn wait_with_max_fps(updates_following_event: usize, max_fps: f64) -> Self {
let update_interval = update_interval(max_fps);
LoopMode::Wait {
updates_following_event,
update_interval,
}
}
/// Specify the **Wait** mode with the given number of updates following each non-`Update`
/// event.
///
/// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`.
pub fn wait_with_interval(updates_following_event: usize, update_interval: Duration) -> Self {
LoopMode::Wait {
updates_following_event,
update_interval,
}
}
}
impl Default for LoopMode {
fn default() -> Self {
LoopMode::rate_fps(Self::DEFAULT_RATE_FPS)
}
}
impl App {
pub const ASSETS_DIRECTORY_NAME: &'static str = "assets";
pub const DEFAULT_EXIT_ON_ESCAPE: bool = true;
// Create a new `App`.
pub(super) fn new(events_loop: glutin::EventsLoop) -> Self {
let windows = RefCell::new(HashMap::new());
let exit_on_escape = Cell::new(Self::DEFAULT_EXIT_ON_ESCAPE);
let loop_mode = Cell::new(LoopMode::default());
let cpal_event_loop = Arc::new(cpal::EventLoop::new());
let process_fn_tx = RefCell::new(None);
let audio = Audio { event_loop: cpal_event_loop, process_fn_tx };
let ui = ui::Arrangement::new();
let mouse = state::Mouse::new();
let window = state::Window::new();
let keys = state::Keys::default();
App {
events_loop,
windows,
exit_on_escape,
loop_mode,
audio,
ui,
mouse,
window,
keys,
}
}
/// Find and return the absolute path to the project's `assets` directory.
///
/// This method looks for the assets directory in the following order:
///
/// 1. Checks the same directory as the executable.
/// 2. Recursively checks exe's parent directories (to a max depth of 5).
/// 3. Recursively checks exe's children directories (to a max depth of 3).
pub fn assets_path(&self) -> Result<PathBuf, find_folder::Error> {
let exe_path = std::env::current_exe()?;
find_folder::Search::ParentsThenKids(5, 3)
.of(exe_path.parent().expect("executable has no parent directory to search").into())
.for_folder(Self::ASSETS_DIRECTORY_NAME)
}
/// Begin building a new OpenGL window.
pub fn new_window<'a>(&'a self) -> window::Builder<'a,'static> {
window::Builder::new(self)
}
/// The number of windows currently in the application.
pub fn window_count(&self) -> usize {
self.windows.borrow().len()
}
/// A reference to the window with the given `Id`.
pub fn window(&self, id: window::Id) -> Option<std::cell::Ref<Window>> {
let windows = self.windows.borrow();
if!windows.contains_key(&id) {
None
} else {
Some(std::cell::Ref::map(windows, |ws| &ws[&id]))
}
}
/// Return whether or not the `App` is currently set to exit when the `Escape` key is pressed.
pub fn exit_on_escape(&self) -> bool {
self.exit_on_escape.get()
}
/// Specify whether or not the app should close when the `Escape` key is pressed.
///
/// By default this is `true`.
pub fn set_exit_on_escape(&self, b: bool) {
self.exit_on_escape.set(b);
}
/// Returns the **App**'s current **LoopMode**.
pub fn loop_mode(&self) -> LoopMode {
self.loop_mode.get()
}
/// Sets the loop mode of the **App**.
///
/// Note: Setting the loop mode will not affect anything until the end of the current loop
/// iteration. The behaviour of a single loop iteration is described under each of the
/// **LoopMode** variants.
pub fn set_loop_mode(&self, mode: LoopMode) {
self.loop_mode.set(mode);
}
/// A handle to the **App** that can be shared across threads.
///
/// This can be used to "wake up" the **App**'s inner event loop.
pub fn create_proxy(&self) -> Proxy {
let events_loop_proxy = self.events_loop.create_proxy();
Proxy { events_loop_proxy }
}
/// Create a new `Ui` for the window with the given `Id`.
///
/// Returns `None` if there is no window for the given `window_id`.
pub fn new_ui(&self, window_id: window::Id) -> ui::Builder {
ui::Builder::new(self, window_id)
}
}
impl Audio {
/// Enumerate the available audio devices on the system.
///
/// Produces an iterator yielding `audio::Device`s.
pub fn devices(&self) -> audio::Devices {
let devices = cpal::devices();
audio::Devices { devices }
}
/// Enumerate the available audio devices on the system that support input streams.
///
/// Produces an iterator yielding `audio::Device`s.
pub fn input_devices(&self) -> audio::stream::input::Devices {
let devices = cpal::input_devices();
audio::stream::input::Devices { devices }
}
/// Enumerate the available audio devices on the system that support output streams.
///
/// Produces an iterator yielding `audio::Device`s.
pub fn output_devices(&self) -> audio::stream::output::Devices {
let devices = cpal::output_devices();
audio::stream::output::Devices { devices }
}
/// The current default audio input device.
pub fn default_input_device(&self) -> Option<audio::Device> {
cpal::default_input_device()
.map(|device| audio::Device { device })
}
/// The current default audio output device.
pub fn default_output_device(&self) -> Option<audio::Device> {
cpal::default_output_device()
.map(|device| audio::Device { device })
}
/// Begin building a new input audio stream.
///
/// If this is the first time a stream has been created, this method will spawn the
/// `cpal::EventLoop::run` method on its own thread, ready to run built streams.
pub fn new_input_stream<M, F, S>(&self, model: M, capture: F)
-> audio::stream::input::Builder<M, F, S>
{
audio::stream::input::Builder {
capture,
builder: self.new_stream(model),
}
}
/// Begin building a new output audio stream.
///
/// If this is the first time a stream has been created, this method will spawn the
/// `cpal::EventLoop::run` method on its own thread, ready to run built streams.
pub fn new_output_stream<M, F, S>(&self, model: M, render: F)
-> audio::stream::output::Builder<M, F, S>
{
audio::stream::output::Builder {
render,
builder: self.new_stream(model),
}
}
// Builder initialisation shared between input and output streams.
//
// If this is the first time a stream has been created, this method will spawn the
// `cpal::EventLoop::run` method on its own thread, ready to run built streams.
fn new_stream<M, S>(&self, model: M) -> audio::stream::Builder<M, S> {
let process_fn_tx = if self.process_fn_tx.borrow().is_none() | else {
self.process_fn_tx.borrow().as_ref().unwrap().clone()
};
audio::stream::Builder {
event_loop: self.event_loop.clone(),
process_fn_tx: process_fn_tx,
model,
sample_rate: None,
channels: None,
frames_per_buffer: None,
device: None,
sample_format: PhantomData,
}
}
}
impl Proxy {
/// Wake up the application!
///
/// This wakes up the **App**'s inner event loop and inserts an **Awakened** event.
pub fn wakeup(&self) -> Result<(), glutin::EventsLoopClosed> {
self.events_loop_proxy.wakeup()
}
}
| {
let event_loop = self.event_loop.clone();
let (tx, rx) = mpsc::channel();
let mut loop_context = audio::stream::LoopContext::new(rx);
thread::Builder::new()
.name("cpal::EventLoop::run thread".into())
.spawn(move || event_loop.run(move |id, data| loop_context.process(id, data)))
.expect("failed to spawn cpal::EventLoop::run thread");
*self.process_fn_tx.borrow_mut() = Some(tx.clone());
tx
} | conditional_block |
unlinked_file.rs | //! Diagnostic emitted for files that aren't part of any crate.
use std::iter;
use hir::{db::DefDatabase, DefMap, InFile, ModuleSource};
use ide_db::{
base_db::{FileId, FileLoader, SourceDatabase, SourceDatabaseExt},
source_change::SourceChange,
RootDatabase,
};
use syntax::{
ast::{self, edit::IndentLevel, HasModuleItem, HasName},
AstNode, TextRange,
};
use text_edit::TextEdit;
use crate::{fix, Assist, Diagnostic, DiagnosticCode, DiagnosticsContext, Severity};
// Diagnostic: unlinked-file
//
// This diagnostic is shown for files that are not included in any crate, or files that are part of
// crates rust-analyzer failed to discover. The file will not have IDE features available.
pub(crate) fn unlinked_file(
ctx: &DiagnosticsContext<'_>,
acc: &mut Vec<Diagnostic>,
file_id: FileId,
) {
// Limit diagnostic to the first few characters in the file. This matches how VS Code
// renders it with the full span, but on other editors, and is less invasive.
let fixes = fixes(ctx, file_id);
// FIXME: This is a hack for the vscode extension to notice whether there is an autofix or not before having to resolve diagnostics.
// This is to prevent project linking popups from appearing when there is an autofix. https://github.com/rust-lang/rust-analyzer/issues/14523
let message = if fixes.is_none() {
"file not included in crate hierarchy"
} else {
"file not included in module tree"
};
let range = ctx.sema.db.parse(file_id).syntax_node().text_range();
let range = FileLoader::file_text(ctx.sema.db, file_id)
.char_indices()
.take(3)
.last()
.map(|(i, _)| i)
.map(|i| TextRange::up_to(i.try_into().unwrap()))
.unwrap_or(range);
acc.push(
Diagnostic::new(DiagnosticCode::Ra("unlinked-file", Severity::WeakWarning), message, range)
.with_fixes(fixes),
);
}
fn fixes(ctx: &DiagnosticsContext<'_>, file_id: FileId) -> Option<Vec<Assist>> {
// If there's an existing module that could add `mod` or `pub mod` items to include the unlinked file,
// suggest that as a fix.
let source_root = ctx.sema.db.source_root(ctx.sema.db.file_source_root(file_id));
let our_path = source_root.path_for_file(&file_id)?;
let parent = our_path.parent()?;
let (module_name, _) = our_path.name_and_extension()?;
let (parent, module_name) = match module_name {
// for mod.rs we need to actually look up one higher
// and take the parent as our to be module name
"mod" => {
let (name, _) = parent.name_and_extension()?;
(parent.parent()?, name.to_owned())
}
_ => (parent, module_name.to_owned()),
};
// check crate roots, i.e. main.rs, lib.rs,...
'crates: for &krate in &*ctx.sema.db.relevant_crates(file_id) {
let crate_def_map = ctx.sema.db.crate_def_map(krate);
let root_module = &crate_def_map[DefMap::ROOT];
let Some(root_file_id) = root_module.origin.file_id() else { continue };
let Some(crate_root_path) = source_root.path_for_file(&root_file_id) else { continue };
let Some(rel) = parent.strip_prefix(&crate_root_path.parent()?) else { continue };
// try resolving the relative difference of the paths as inline modules
let mut current = root_module;
for ele in rel.as_ref().components() {
let seg = match ele {
std::path::Component::Normal(seg) => seg.to_str()?,
std::path::Component::RootDir => continue,
// shouldn't occur
_ => continue 'crates,
};
match current.children.iter().find(|(name, _)| name.to_smol_str() == seg) {
Some((_, &child)) => current = &crate_def_map[child],
None => continue 'crates,
}
if!current.origin.is_inline() {
continue 'crates;
}
}
let InFile { file_id: parent_file_id, value: source } =
current.definition_source(ctx.sema.db);
let parent_file_id = parent_file_id.file_id()?;
return make_fixes(ctx.sema.db, parent_file_id, source, &module_name, file_id);
}
// if we aren't adding to a crate root, walk backwards such that we support `#[path =...]` overrides if possible
// build all parent paths of the form `../module_name/mod.rs` and `../module_name.rs`
let paths = iter::successors(Some(parent), |prev| prev.parent()).filter_map(|path| {
let parent = path.parent()?;
let (name, _) = path.name_and_extension()?;
Some(([parent.join(&format!("{name}.rs"))?, path.join("mod.rs")?], name.to_owned()))
});
let mut stack = vec![];
let &parent_id =
paths.inspect(|(_, name)| stack.push(name.clone())).find_map(|(paths, _)| {
paths.into_iter().find_map(|path| source_root.file_for_path(&path))
})?;
stack.pop();
'crates: for &krate in ctx.sema.db.relevant_crates(parent_id).iter() {
let crate_def_map = ctx.sema.db.crate_def_map(krate);
let Some((_, module)) = crate_def_map.modules().find(|(_, module)| {
module.origin.file_id() == Some(parent_id) &&!module.origin.is_inline()
}) else {
continue;
};
if stack.is_empty() {
return make_fixes(
ctx.sema.db,
parent_id,
module.definition_source(ctx.sema.db).value,
&module_name,
file_id,
);
} else {
// direct parent file is missing,
// try finding a parent that has an inline tree from here on
let mut current = module;
for s in stack.iter().rev() {
match module.children.iter().find(|(name, _)| name.to_smol_str() == s) {
Some((_, child)) => {
current = &crate_def_map[*child];
}
None => continue 'crates,
}
if!current.origin.is_inline() {
continue 'crates;
}
}
let InFile { file_id: parent_file_id, value: source } =
current.definition_source(ctx.sema.db);
let parent_file_id = parent_file_id.file_id()?;
return make_fixes(ctx.sema.db, parent_file_id, source, &module_name, file_id);
}
}
None
}
fn make_fixes(
db: &RootDatabase,
parent_file_id: FileId,
source: ModuleSource,
new_mod_name: &str,
added_file_id: FileId,
) -> Option<Vec<Assist>> {
fn is_outline_mod(item: &ast::Item) -> bool {
matches!(item, ast::Item::Module(m) if m.item_list().is_none())
}
let mod_decl = format!("mod {new_mod_name};");
let pub_mod_decl = format!("pub mod {new_mod_name};");
let mut mod_decl_builder = TextEdit::builder();
let mut pub_mod_decl_builder = TextEdit::builder();
let mut items = match &source {
ModuleSource::SourceFile(it) => it.items(),
ModuleSource::Module(it) => it.item_list()?.items(),
ModuleSource::BlockExpr(_) => return None,
};
// If there's an existing `mod m;` statement matching the new one, don't emit a fix (it's
// probably `#[cfg]`d out).
for item in items.clone() {
if let ast::Item::Module(m) = item {
if let Some(name) = m.name() {
if m.item_list().is_none() && name.to_string() == new_mod_name {
cov_mark::hit!(unlinked_file_skip_fix_when_mod_already_exists);
return None;
}
}
}
}
// If there are existing `mod m;` items, append after them (after the first group of them, rather).
match items.clone().skip_while(|item|!is_outline_mod(item)).take_while(is_outline_mod).last() {
Some(last) => {
cov_mark::hit!(unlinked_file_append_to_existing_mods);
let offset = last.syntax().text_range().end();
let indent = IndentLevel::from_node(last.syntax());
mod_decl_builder.insert(offset, format!("\n{indent}{mod_decl}"));
pub_mod_decl_builder.insert(offset, format!("\n{indent}{pub_mod_decl}"));
}
None => {
// Prepend before the first item in the file.
match items.next() {
Some(first) => {
cov_mark::hit!(unlinked_file_prepend_before_first_item);
let offset = first.syntax().text_range().start();
let indent = IndentLevel::from_node(first.syntax());
mod_decl_builder.insert(offset, format!("{mod_decl}\n\n{indent}"));
pub_mod_decl_builder.insert(offset, format!("{pub_mod_decl}\n\n{indent}"));
}
None => {
// No items in the file, so just append at the end.
cov_mark::hit!(unlinked_file_empty_file);
let mut indent = IndentLevel::from(0);
let offset = match &source {
ModuleSource::SourceFile(it) => it.syntax().text_range().end(),
ModuleSource::Module(it) => {
indent = IndentLevel::from_node(it.syntax()) + 1;
it.item_list()?.r_curly_token()?.text_range().start()
}
ModuleSource::BlockExpr(it) => {
it.stmt_list()?.r_curly_token()?.text_range().start()
}
};
mod_decl_builder.insert(offset, format!("{indent}{mod_decl}\n"));
pub_mod_decl_builder.insert(offset, format!("{indent}{pub_mod_decl}\n"));
}
}
}
}
let trigger_range = db.parse(added_file_id).tree().syntax().text_range();
Some(vec![
fix(
"add_mod_declaration",
&format!("Insert `{mod_decl}`"),
SourceChange::from_text_edit(parent_file_id, mod_decl_builder.finish()),
trigger_range,
),
fix(
"add_pub_mod_declaration",
&format!("Insert `{pub_mod_decl}`"),
SourceChange::from_text_edit(parent_file_id, pub_mod_decl_builder.finish()),
trigger_range,
),
])
}
#[cfg(test)]
mod tests {
use crate::tests::{check_diagnostics, check_fix, check_fixes, check_no_fix};
#[test]
fn unlinked_file_prepend_first_item() {
cov_mark::check!(unlinked_file_prepend_before_first_item);
// Only tests the first one for `pub mod` since the rest are the same
check_fixes(
r#"
//- /main.rs
fn f() {}
//- /foo.rs
$0
"#,
vec![
r#"
mod foo;
fn f() {}
"#,
r#"
pub mod foo;
fn f() {}
"#,
],
);
}
#[test]
fn unlinked_file_append_mod() {
cov_mark::check!(unlinked_file_append_to_existing_mods);
check_fix(
r#"
//- /main.rs
//! Comment on top
mod preexisting;
mod preexisting2;
struct S;
mod preexisting_bottom;)
//- /foo.rs
$0
"#,
r#"
//! Comment on top
mod preexisting;
mod preexisting2;
mod foo;
struct S;
mod preexisting_bottom;)
"#,
);
}
#[test]
fn unlinked_file_insert_in_empty_file() {
cov_mark::check!(unlinked_file_empty_file);
check_fix(
r#"
//- /main.rs
//- /foo.rs
$0
"#,
r#"
mod foo;
"#,
);
}
#[test]
fn unlinked_file_insert_in_empty_file_mod_file() {
check_fix(
r#"
//- /main.rs
//- /foo/mod.rs
$0
"#,
r#"
mod foo;
"#,
);
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
// bar module
//- /bar/foo/mod.rs
$0
"#,
r#"
// bar module
mod foo;
"#,
);
}
#[test]
fn unlinked_file_old_style_modrs() {
check_fix(
r#"
//- /main.rs
mod submod;
//- /submod/mod.rs
// in mod.rs
//- /submod/foo.rs
$0
"#,
r#"
// in mod.rs
mod foo;
"#,
);
}
#[test]
fn unlinked_file_new_style_mod() {
check_fix(
r#"
//- /main.rs
mod submod;
//- /submod.rs
//- /submod/foo.rs
$0
"#,
r#"
mod foo;
"#,
);
}
#[test]
fn | () {
cov_mark::check!(unlinked_file_skip_fix_when_mod_already_exists);
check_no_fix(
r#"
//- /main.rs
#[cfg(never)]
mod foo;
//- /foo.rs
$0
"#,
);
}
#[test]
fn unlinked_file_with_cfg_on() {
check_diagnostics(
r#"
//- /main.rs
#[cfg(not(never))]
mod foo;
//- /foo.rs
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple() {
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
mod foo {
}
//- /bar/foo/baz.rs
$0
"#,
r#"
mod foo {
mod baz;
}
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple_modrs() {
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
mod baz {
}
//- /bar/baz/foo/mod.rs
$0
"#,
r#"
mod baz {
mod foo;
}
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple_modrs_main() {
check_fix(
r#"
//- /main.rs
mod bar {
}
//- /bar/foo/mod.rs
$0
"#,
r#"
mod bar {
mod foo;
}
"#,
);
}
}
| unlinked_file_with_cfg_off | identifier_name |
unlinked_file.rs | //! Diagnostic emitted for files that aren't part of any crate.
use std::iter;
use hir::{db::DefDatabase, DefMap, InFile, ModuleSource};
use ide_db::{
base_db::{FileId, FileLoader, SourceDatabase, SourceDatabaseExt},
source_change::SourceChange,
RootDatabase,
};
use syntax::{
ast::{self, edit::IndentLevel, HasModuleItem, HasName},
AstNode, TextRange,
};
use text_edit::TextEdit;
use crate::{fix, Assist, Diagnostic, DiagnosticCode, DiagnosticsContext, Severity};
// Diagnostic: unlinked-file
//
// This diagnostic is shown for files that are not included in any crate, or files that are part of
// crates rust-analyzer failed to discover. The file will not have IDE features available.
pub(crate) fn unlinked_file(
ctx: &DiagnosticsContext<'_>,
acc: &mut Vec<Diagnostic>,
file_id: FileId,
) {
// Limit diagnostic to the first few characters in the file. This matches how VS Code
// renders it with the full span, but on other editors, and is less invasive.
let fixes = fixes(ctx, file_id);
// FIXME: This is a hack for the vscode extension to notice whether there is an autofix or not before having to resolve diagnostics.
// This is to prevent project linking popups from appearing when there is an autofix. https://github.com/rust-lang/rust-analyzer/issues/14523
let message = if fixes.is_none() {
"file not included in crate hierarchy"
} else {
"file not included in module tree"
};
let range = ctx.sema.db.parse(file_id).syntax_node().text_range();
let range = FileLoader::file_text(ctx.sema.db, file_id)
.char_indices()
.take(3)
.last()
.map(|(i, _)| i)
.map(|i| TextRange::up_to(i.try_into().unwrap()))
.unwrap_or(range);
acc.push(
Diagnostic::new(DiagnosticCode::Ra("unlinked-file", Severity::WeakWarning), message, range)
.with_fixes(fixes),
);
}
fn fixes(ctx: &DiagnosticsContext<'_>, file_id: FileId) -> Option<Vec<Assist>> {
// If there's an existing module that could add `mod` or `pub mod` items to include the unlinked file,
// suggest that as a fix.
let source_root = ctx.sema.db.source_root(ctx.sema.db.file_source_root(file_id));
let our_path = source_root.path_for_file(&file_id)?;
let parent = our_path.parent()?;
let (module_name, _) = our_path.name_and_extension()?;
let (parent, module_name) = match module_name {
// for mod.rs we need to actually look up one higher
// and take the parent as our to be module name
"mod" => {
let (name, _) = parent.name_and_extension()?;
(parent.parent()?, name.to_owned())
}
_ => (parent, module_name.to_owned()),
};
// check crate roots, i.e. main.rs, lib.rs,...
'crates: for &krate in &*ctx.sema.db.relevant_crates(file_id) {
let crate_def_map = ctx.sema.db.crate_def_map(krate);
let root_module = &crate_def_map[DefMap::ROOT];
let Some(root_file_id) = root_module.origin.file_id() else { continue };
let Some(crate_root_path) = source_root.path_for_file(&root_file_id) else { continue };
let Some(rel) = parent.strip_prefix(&crate_root_path.parent()?) else { continue };
// try resolving the relative difference of the paths as inline modules
let mut current = root_module;
for ele in rel.as_ref().components() {
let seg = match ele {
std::path::Component::Normal(seg) => seg.to_str()?,
std::path::Component::RootDir => continue,
// shouldn't occur
_ => continue 'crates,
};
match current.children.iter().find(|(name, _)| name.to_smol_str() == seg) {
Some((_, &child)) => current = &crate_def_map[child],
None => continue 'crates,
}
if!current.origin.is_inline() {
continue 'crates;
}
}
let InFile { file_id: parent_file_id, value: source } =
current.definition_source(ctx.sema.db);
let parent_file_id = parent_file_id.file_id()?;
return make_fixes(ctx.sema.db, parent_file_id, source, &module_name, file_id);
}
// if we aren't adding to a crate root, walk backwards such that we support `#[path =...]` overrides if possible
// build all parent paths of the form `../module_name/mod.rs` and `../module_name.rs`
let paths = iter::successors(Some(parent), |prev| prev.parent()).filter_map(|path| {
let parent = path.parent()?;
let (name, _) = path.name_and_extension()?;
Some(([parent.join(&format!("{name}.rs"))?, path.join("mod.rs")?], name.to_owned()))
});
let mut stack = vec![];
let &parent_id =
paths.inspect(|(_, name)| stack.push(name.clone())).find_map(|(paths, _)| {
paths.into_iter().find_map(|path| source_root.file_for_path(&path))
})?;
stack.pop();
'crates: for &krate in ctx.sema.db.relevant_crates(parent_id).iter() {
let crate_def_map = ctx.sema.db.crate_def_map(krate);
let Some((_, module)) = crate_def_map.modules().find(|(_, module)| {
module.origin.file_id() == Some(parent_id) &&!module.origin.is_inline()
}) else {
continue;
};
if stack.is_empty() {
return make_fixes(
ctx.sema.db,
parent_id,
module.definition_source(ctx.sema.db).value,
&module_name,
file_id,
);
} else {
// direct parent file is missing,
// try finding a parent that has an inline tree from here on
let mut current = module;
for s in stack.iter().rev() {
match module.children.iter().find(|(name, _)| name.to_smol_str() == s) {
Some((_, child)) => {
current = &crate_def_map[*child];
}
None => continue 'crates,
}
if!current.origin.is_inline() {
continue 'crates;
}
}
let InFile { file_id: parent_file_id, value: source } =
current.definition_source(ctx.sema.db);
let parent_file_id = parent_file_id.file_id()?;
return make_fixes(ctx.sema.db, parent_file_id, source, &module_name, file_id);
}
}
None
}
fn make_fixes(
db: &RootDatabase,
parent_file_id: FileId,
source: ModuleSource,
new_mod_name: &str,
added_file_id: FileId,
) -> Option<Vec<Assist>> {
fn is_outline_mod(item: &ast::Item) -> bool {
matches!(item, ast::Item::Module(m) if m.item_list().is_none())
}
let mod_decl = format!("mod {new_mod_name};");
let pub_mod_decl = format!("pub mod {new_mod_name};");
let mut mod_decl_builder = TextEdit::builder();
let mut pub_mod_decl_builder = TextEdit::builder();
let mut items = match &source {
ModuleSource::SourceFile(it) => it.items(),
ModuleSource::Module(it) => it.item_list()?.items(),
ModuleSource::BlockExpr(_) => return None,
};
// If there's an existing `mod m;` statement matching the new one, don't emit a fix (it's
// probably `#[cfg]`d out).
for item in items.clone() {
if let ast::Item::Module(m) = item {
if let Some(name) = m.name() {
if m.item_list().is_none() && name.to_string() == new_mod_name {
cov_mark::hit!(unlinked_file_skip_fix_when_mod_already_exists);
return None;
}
}
}
}
// If there are existing `mod m;` items, append after them (after the first group of them, rather).
match items.clone().skip_while(|item|!is_outline_mod(item)).take_while(is_outline_mod).last() {
Some(last) => {
cov_mark::hit!(unlinked_file_append_to_existing_mods);
let offset = last.syntax().text_range().end();
let indent = IndentLevel::from_node(last.syntax());
mod_decl_builder.insert(offset, format!("\n{indent}{mod_decl}"));
pub_mod_decl_builder.insert(offset, format!("\n{indent}{pub_mod_decl}"));
}
None => {
// Prepend before the first item in the file.
match items.next() {
Some(first) => {
cov_mark::hit!(unlinked_file_prepend_before_first_item);
let offset = first.syntax().text_range().start();
let indent = IndentLevel::from_node(first.syntax());
mod_decl_builder.insert(offset, format!("{mod_decl}\n\n{indent}"));
pub_mod_decl_builder.insert(offset, format!("{pub_mod_decl}\n\n{indent}"));
}
None => {
// No items in the file, so just append at the end.
cov_mark::hit!(unlinked_file_empty_file);
let mut indent = IndentLevel::from(0);
let offset = match &source {
ModuleSource::SourceFile(it) => it.syntax().text_range().end(),
ModuleSource::Module(it) => {
indent = IndentLevel::from_node(it.syntax()) + 1;
it.item_list()?.r_curly_token()?.text_range().start()
}
ModuleSource::BlockExpr(it) => {
it.stmt_list()?.r_curly_token()?.text_range().start()
}
};
mod_decl_builder.insert(offset, format!("{indent}{mod_decl}\n"));
pub_mod_decl_builder.insert(offset, format!("{indent}{pub_mod_decl}\n"));
}
}
}
}
let trigger_range = db.parse(added_file_id).tree().syntax().text_range();
Some(vec![
fix(
"add_mod_declaration",
&format!("Insert `{mod_decl}`"),
SourceChange::from_text_edit(parent_file_id, mod_decl_builder.finish()),
trigger_range,
),
fix(
"add_pub_mod_declaration",
&format!("Insert `{pub_mod_decl}`"),
SourceChange::from_text_edit(parent_file_id, pub_mod_decl_builder.finish()),
trigger_range,
),
])
}
#[cfg(test)]
mod tests {
use crate::tests::{check_diagnostics, check_fix, check_fixes, check_no_fix};
#[test]
fn unlinked_file_prepend_first_item() {
cov_mark::check!(unlinked_file_prepend_before_first_item);
// Only tests the first one for `pub mod` since the rest are the same
check_fixes(
r#"
//- /main.rs
fn f() {}
//- /foo.rs
$0
"#,
vec![
r#"
mod foo;
fn f() {}
"#,
r#"
pub mod foo;
fn f() {}
"#,
],
);
}
#[test]
fn unlinked_file_append_mod() {
cov_mark::check!(unlinked_file_append_to_existing_mods);
check_fix(
r#"
//- /main.rs
//! Comment on top
mod preexisting;
mod preexisting2;
struct S;
mod preexisting_bottom;)
//- /foo.rs
$0
"#,
r#"
//! Comment on top
mod preexisting;
mod preexisting2;
mod foo;
struct S;
mod preexisting_bottom;)
"#,
);
}
#[test]
fn unlinked_file_insert_in_empty_file() {
cov_mark::check!(unlinked_file_empty_file);
check_fix(
r#"
//- /main.rs
//- /foo.rs
$0
"#,
r#"
mod foo;
"#,
);
}
#[test]
fn unlinked_file_insert_in_empty_file_mod_file() {
check_fix(
r#"
//- /main.rs
//- /foo/mod.rs
$0
"#,
r#"
mod foo;
"#,
);
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
// bar module
//- /bar/foo/mod.rs
$0
"#,
r#"
// bar module
mod foo;
"#,
);
}
#[test]
fn unlinked_file_old_style_modrs() {
check_fix(
r#"
//- /main.rs
mod submod;
//- /submod/mod.rs
// in mod.rs
//- /submod/foo.rs
$0
"#,
r#"
// in mod.rs
mod foo;
"#,
);
}
#[test]
fn unlinked_file_new_style_mod() {
check_fix(
r#"
//- /main.rs
mod submod;
//- /submod.rs
//- /submod/foo.rs
$0
"#,
r#"
mod foo;
"#,
);
}
#[test]
fn unlinked_file_with_cfg_off() {
cov_mark::check!(unlinked_file_skip_fix_when_mod_already_exists);
check_no_fix(
r#"
//- /main.rs
#[cfg(never)]
mod foo;
//- /foo.rs
$0
"#,
);
}
#[test]
fn unlinked_file_with_cfg_on() {
check_diagnostics(
r#"
//- /main.rs
#[cfg(not(never))]
mod foo;
//- /foo.rs
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple() {
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
mod foo {
}
//- /bar/foo/baz.rs
$0
"#,
r#"
mod foo {
mod baz;
}
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple_modrs() {
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
mod baz {
}
//- /bar/baz/foo/mod.rs
$0
"#,
r#"
mod baz {
mod foo;
}
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple_modrs_main() |
}
| {
check_fix(
r#"
//- /main.rs
mod bar {
}
//- /bar/foo/mod.rs
$0
"#,
r#"
mod bar {
mod foo;
}
"#,
);
} | identifier_body |
unlinked_file.rs | //! Diagnostic emitted for files that aren't part of any crate.
use std::iter;
use hir::{db::DefDatabase, DefMap, InFile, ModuleSource};
use ide_db::{
base_db::{FileId, FileLoader, SourceDatabase, SourceDatabaseExt},
source_change::SourceChange,
RootDatabase,
};
use syntax::{
ast::{self, edit::IndentLevel, HasModuleItem, HasName},
AstNode, TextRange,
};
use text_edit::TextEdit;
use crate::{fix, Assist, Diagnostic, DiagnosticCode, DiagnosticsContext, Severity};
// Diagnostic: unlinked-file
//
// This diagnostic is shown for files that are not included in any crate, or files that are part of
// crates rust-analyzer failed to discover. The file will not have IDE features available.
pub(crate) fn unlinked_file(
ctx: &DiagnosticsContext<'_>,
acc: &mut Vec<Diagnostic>,
file_id: FileId,
) {
// Limit diagnostic to the first few characters in the file. This matches how VS Code
// renders it with the full span, but on other editors, and is less invasive.
let fixes = fixes(ctx, file_id);
// FIXME: This is a hack for the vscode extension to notice whether there is an autofix or not before having to resolve diagnostics.
// This is to prevent project linking popups from appearing when there is an autofix. https://github.com/rust-lang/rust-analyzer/issues/14523
let message = if fixes.is_none() {
"file not included in crate hierarchy"
} else {
"file not included in module tree"
};
let range = ctx.sema.db.parse(file_id).syntax_node().text_range();
let range = FileLoader::file_text(ctx.sema.db, file_id)
.char_indices()
.take(3)
.last()
.map(|(i, _)| i)
.map(|i| TextRange::up_to(i.try_into().unwrap()))
.unwrap_or(range);
acc.push(
Diagnostic::new(DiagnosticCode::Ra("unlinked-file", Severity::WeakWarning), message, range)
.with_fixes(fixes),
);
}
fn fixes(ctx: &DiagnosticsContext<'_>, file_id: FileId) -> Option<Vec<Assist>> {
// If there's an existing module that could add `mod` or `pub mod` items to include the unlinked file,
// suggest that as a fix.
let source_root = ctx.sema.db.source_root(ctx.sema.db.file_source_root(file_id));
let our_path = source_root.path_for_file(&file_id)?;
let parent = our_path.parent()?;
let (module_name, _) = our_path.name_and_extension()?;
let (parent, module_name) = match module_name {
// for mod.rs we need to actually look up one higher
// and take the parent as our to be module name
"mod" => {
let (name, _) = parent.name_and_extension()?;
(parent.parent()?, name.to_owned())
}
_ => (parent, module_name.to_owned()),
};
// check crate roots, i.e. main.rs, lib.rs,...
'crates: for &krate in &*ctx.sema.db.relevant_crates(file_id) {
let crate_def_map = ctx.sema.db.crate_def_map(krate);
let root_module = &crate_def_map[DefMap::ROOT];
let Some(root_file_id) = root_module.origin.file_id() else { continue };
let Some(crate_root_path) = source_root.path_for_file(&root_file_id) else { continue };
let Some(rel) = parent.strip_prefix(&crate_root_path.parent()?) else { continue };
// try resolving the relative difference of the paths as inline modules
let mut current = root_module;
for ele in rel.as_ref().components() {
let seg = match ele {
std::path::Component::Normal(seg) => seg.to_str()?,
std::path::Component::RootDir => continue,
// shouldn't occur
_ => continue 'crates,
};
match current.children.iter().find(|(name, _)| name.to_smol_str() == seg) {
Some((_, &child)) => current = &crate_def_map[child],
None => continue 'crates,
}
if!current.origin.is_inline() {
continue 'crates;
}
}
let InFile { file_id: parent_file_id, value: source } =
current.definition_source(ctx.sema.db);
let parent_file_id = parent_file_id.file_id()?;
return make_fixes(ctx.sema.db, parent_file_id, source, &module_name, file_id);
}
// if we aren't adding to a crate root, walk backwards such that we support `#[path =...]` overrides if possible
// build all parent paths of the form `../module_name/mod.rs` and `../module_name.rs`
let paths = iter::successors(Some(parent), |prev| prev.parent()).filter_map(|path| {
let parent = path.parent()?;
let (name, _) = path.name_and_extension()?;
Some(([parent.join(&format!("{name}.rs"))?, path.join("mod.rs")?], name.to_owned()))
});
let mut stack = vec![];
let &parent_id =
paths.inspect(|(_, name)| stack.push(name.clone())).find_map(|(paths, _)| {
paths.into_iter().find_map(|path| source_root.file_for_path(&path))
})?;
stack.pop();
'crates: for &krate in ctx.sema.db.relevant_crates(parent_id).iter() {
let crate_def_map = ctx.sema.db.crate_def_map(krate);
let Some((_, module)) = crate_def_map.modules().find(|(_, module)| {
module.origin.file_id() == Some(parent_id) &&!module.origin.is_inline()
}) else {
continue;
};
if stack.is_empty() {
return make_fixes(
ctx.sema.db,
parent_id,
module.definition_source(ctx.sema.db).value,
&module_name,
file_id,
);
} else {
// direct parent file is missing,
// try finding a parent that has an inline tree from here on
let mut current = module;
for s in stack.iter().rev() {
match module.children.iter().find(|(name, _)| name.to_smol_str() == s) {
Some((_, child)) => {
current = &crate_def_map[*child];
}
None => continue 'crates,
}
if!current.origin.is_inline() {
continue 'crates;
}
}
let InFile { file_id: parent_file_id, value: source } =
current.definition_source(ctx.sema.db);
let parent_file_id = parent_file_id.file_id()?;
return make_fixes(ctx.sema.db, parent_file_id, source, &module_name, file_id);
}
}
None
}
fn make_fixes(
db: &RootDatabase,
parent_file_id: FileId,
source: ModuleSource,
new_mod_name: &str,
added_file_id: FileId,
) -> Option<Vec<Assist>> {
fn is_outline_mod(item: &ast::Item) -> bool {
matches!(item, ast::Item::Module(m) if m.item_list().is_none())
}
let mod_decl = format!("mod {new_mod_name};");
let pub_mod_decl = format!("pub mod {new_mod_name};");
let mut mod_decl_builder = TextEdit::builder();
let mut pub_mod_decl_builder = TextEdit::builder();
let mut items = match &source {
ModuleSource::SourceFile(it) => it.items(),
ModuleSource::Module(it) => it.item_list()?.items(),
ModuleSource::BlockExpr(_) => return None,
};
// If there's an existing `mod m;` statement matching the new one, don't emit a fix (it's
// probably `#[cfg]`d out).
for item in items.clone() {
if let ast::Item::Module(m) = item {
if let Some(name) = m.name() {
if m.item_list().is_none() && name.to_string() == new_mod_name {
cov_mark::hit!(unlinked_file_skip_fix_when_mod_already_exists);
return None;
}
}
}
}
// If there are existing `mod m;` items, append after them (after the first group of them, rather).
match items.clone().skip_while(|item|!is_outline_mod(item)).take_while(is_outline_mod).last() {
Some(last) => {
cov_mark::hit!(unlinked_file_append_to_existing_mods);
let offset = last.syntax().text_range().end();
let indent = IndentLevel::from_node(last.syntax());
mod_decl_builder.insert(offset, format!("\n{indent}{mod_decl}"));
pub_mod_decl_builder.insert(offset, format!("\n{indent}{pub_mod_decl}"));
}
None => {
// Prepend before the first item in the file.
match items.next() {
Some(first) => {
cov_mark::hit!(unlinked_file_prepend_before_first_item);
let offset = first.syntax().text_range().start();
let indent = IndentLevel::from_node(first.syntax());
mod_decl_builder.insert(offset, format!("{mod_decl}\n\n{indent}"));
pub_mod_decl_builder.insert(offset, format!("{pub_mod_decl}\n\n{indent}"));
}
None => {
// No items in the file, so just append at the end.
cov_mark::hit!(unlinked_file_empty_file);
let mut indent = IndentLevel::from(0);
let offset = match &source {
ModuleSource::SourceFile(it) => it.syntax().text_range().end(),
ModuleSource::Module(it) => {
indent = IndentLevel::from_node(it.syntax()) + 1;
it.item_list()?.r_curly_token()?.text_range().start()
}
ModuleSource::BlockExpr(it) => {
it.stmt_list()?.r_curly_token()?.text_range().start()
}
};
mod_decl_builder.insert(offset, format!("{indent}{mod_decl}\n"));
pub_mod_decl_builder.insert(offset, format!("{indent}{pub_mod_decl}\n"));
}
}
}
}
let trigger_range = db.parse(added_file_id).tree().syntax().text_range();
Some(vec![
fix(
"add_mod_declaration",
&format!("Insert `{mod_decl}`"),
SourceChange::from_text_edit(parent_file_id, mod_decl_builder.finish()),
trigger_range,
),
fix(
"add_pub_mod_declaration",
&format!("Insert `{pub_mod_decl}`"),
SourceChange::from_text_edit(parent_file_id, pub_mod_decl_builder.finish()),
trigger_range,
),
])
}
#[cfg(test)]
mod tests {
use crate::tests::{check_diagnostics, check_fix, check_fixes, check_no_fix};
#[test]
fn unlinked_file_prepend_first_item() {
cov_mark::check!(unlinked_file_prepend_before_first_item);
// Only tests the first one for `pub mod` since the rest are the same
check_fixes(
r#"
//- /main.rs
fn f() {}
//- /foo.rs
$0
"#,
vec![
r#"
mod foo;
fn f() {}
"#,
r#"
pub mod foo;
fn f() {}
"#,
],
);
}
#[test]
fn unlinked_file_append_mod() {
cov_mark::check!(unlinked_file_append_to_existing_mods);
check_fix(
r#"
//- /main.rs
//! Comment on top
mod preexisting;
mod preexisting2;
struct S;
mod preexisting_bottom;)
//- /foo.rs
$0
"#,
r#"
//! Comment on top
mod preexisting;
mod preexisting2;
mod foo;
struct S;
mod preexisting_bottom;)
"#,
);
}
#[test]
fn unlinked_file_insert_in_empty_file() {
cov_mark::check!(unlinked_file_empty_file);
check_fix(
r#"
//- /main.rs
//- /foo.rs
$0 | mod foo;
"#,
);
}
#[test]
fn unlinked_file_insert_in_empty_file_mod_file() {
check_fix(
r#"
//- /main.rs
//- /foo/mod.rs
$0
"#,
r#"
mod foo;
"#,
);
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
// bar module
//- /bar/foo/mod.rs
$0
"#,
r#"
// bar module
mod foo;
"#,
);
}
#[test]
fn unlinked_file_old_style_modrs() {
check_fix(
r#"
//- /main.rs
mod submod;
//- /submod/mod.rs
// in mod.rs
//- /submod/foo.rs
$0
"#,
r#"
// in mod.rs
mod foo;
"#,
);
}
#[test]
fn unlinked_file_new_style_mod() {
check_fix(
r#"
//- /main.rs
mod submod;
//- /submod.rs
//- /submod/foo.rs
$0
"#,
r#"
mod foo;
"#,
);
}
#[test]
fn unlinked_file_with_cfg_off() {
cov_mark::check!(unlinked_file_skip_fix_when_mod_already_exists);
check_no_fix(
r#"
//- /main.rs
#[cfg(never)]
mod foo;
//- /foo.rs
$0
"#,
);
}
#[test]
fn unlinked_file_with_cfg_on() {
check_diagnostics(
r#"
//- /main.rs
#[cfg(not(never))]
mod foo;
//- /foo.rs
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple() {
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
mod foo {
}
//- /bar/foo/baz.rs
$0
"#,
r#"
mod foo {
mod baz;
}
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple_modrs() {
check_fix(
r#"
//- /main.rs
mod bar;
//- /bar.rs
mod baz {
}
//- /bar/baz/foo/mod.rs
$0
"#,
r#"
mod baz {
mod foo;
}
"#,
);
}
#[test]
fn unlinked_file_insert_into_inline_simple_modrs_main() {
check_fix(
r#"
//- /main.rs
mod bar {
}
//- /bar/foo/mod.rs
$0
"#,
r#"
mod bar {
mod foo;
}
"#,
);
}
} | "#,
r#" | random_line_split |
lib.rs | NODELAY` (defaults to `true`).
//!
//! - `pool_min` - Lower bound of opened connections for `Pool` (defaults to `10`).
//! - `pool_max` - Upper bound of opened connections for `Pool` (defaults to `20`).
//!
//! - `ping_before_query` - Ping server every time before execute any query. (defaults to `true`).
//! - `send_retries` - Count of retry to send request to server. (defaults to `3`).
//! - `retry_timeout` - Amount of time to wait before next retry. (defaults to `5 sec`).
//! - `ping_timeout` - Timeout for ping (defaults to `500 ms`).
//!
//! - `alt_hosts` - Comma separated list of single address host for load-balancing.
//!
//! example:
//! ```url
//! tcp://user:password@host:9000/clicks?compression=lz4&ping_timeout=42ms
//! ```
//!
//! ## Optional features
//!
//! `clickhouse-rs` puts some functionality behind optional features to optimize compile time
//! for the most common use cases. The following features are available.
//!
//! - `tokio_io` *(enabled by default)* — I/O based on [Tokio](https://tokio.rs/).
//! - `async_std` — I/O based on [async-std](https://async.rs/) (doesn't work together with `tokio_io`).
//! - `tls` — TLS support (allowed only with `tokio_io`).
//!
//! ### Example
//!
//! ```rust
//! # use std::env;
//! use clickhouse_rs::{Block, Pool, errors::Error};
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Error> {
//! let ddl = r"
//! CREATE TABLE IF NOT EXISTS payment (
//! customer_id UInt32,
//! amount UInt32,
//! account_name Nullable(FixedString(3))
//! ) Engine=Memory";
//!
//! let block = Block::new()
//! .column("customer_id", vec![1_u32, 3, 5, 7, 9])
//! .column("amount", vec![2_u32, 4, 6, 8, 10])
//! .column("account_name", vec![Some("foo"), None, None, None, Some("bar")]);
//!
//! # let database_url = env::var("DATABASE_URL").unwrap_or("tcp://localhost:9000?compression=lz4".into());
//! let pool = Pool::new(database_url);
//!
//! let mut client = pool.get_handle().await?;
//! client.execute(ddl).await?;
//! client.insert("payment", block).await?;
//! let block = client.query("SELECT * FROM payment").fetch_all().await?;
//!
//! for row in block.rows() {
//! let id: u32 = row.get("customer_id")?;
//! let amount: u32 = row.get("amount")?;
//! let name: Option<&str> = row.get("account_name")?;
//! println!("Found payment {}: {} {:?}", id, amount, name);
//! }
//! Ok(())
//! }
//! ```
#![recursion_limit = "1024"]
use std::{fmt, future::Future, time::Duration};
use futures_util::{
future, future::BoxFuture, future::FutureExt, stream, stream::BoxStream, StreamExt,
};
use log::{info, warn};
use crate::{
connecting_stream::ConnectingStream,
errors::{DriverError, Error, Result},
io::ClickhouseTransport,
pool::PoolBinding,
retry_guard::retry_guard,
types::{
query_result::stream_blocks::BlockStream, Cmd, Context, IntoOptions, OptionsSource, Packet,
Query, QueryResult, SqlType,
},
};
pub use crate::{
pool::Pool,
types::{block::Block, Options},
};
mod binary;
mod client_info;
mod connecting_stream;
/// Error types.
pub mod errors;
mod io;
/// Pool types.
pub mod pool;
mod retry_guard;
/// Clickhouse types.
pub mod types;
/// This macro is a convenient way to pass row into a block.
///
/// ```rust
/// # use clickhouse_rs::{Block, row, errors::Error};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// block.push(row!{customer_id: 1, amount: 2, account_name: "foo"})?;
/// block.push(row!{customer_id: 4, amount: 4, account_name: "bar"})?;
/// block.push(row!{customer_id: 5, amount: 5, account_name: "baz"})?;
/// # assert_eq!(block.row_count(), 3);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
///
/// If a column name has special characters, you can use the alternative syntax
/// with `=>` to pass an expression as column name:
///
/// ```rust
/// # use clickhouse_rs::{Block, row, errors::Error};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// block.push(row!{"customer.id" => 1, amount: 2, "account.name" => "foo"})?;
/// block.push(row!{"customer.id" => 4, amount: 4, "account.name" => "bar"})?;
/// block.push(row!{"customer.id" => 5, amount: 5, "account.name" => "baz"})?;
/// # assert_eq!(block.row_count(), 3);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
///
/// You can also use `Vec<(String, Value)>` to construct a row and insert it into a block:
///
/// ```rust
/// # use clickhouse_rs::{Block, errors::Error, types::Value};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// for i in 1..10 {
/// let mut row = Vec::new();
/// for j in 1..10 {
/// row.push((format!("#{}", j), Value::from(i * j)));
/// }
/// block.push(row)?;
/// }
/// assert_eq!(block.row_count(), 9);
/// # println!("{:?}", block);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
#[macro_export]
macro_rules! row {
() => { $crate::types::RNil };
( $i:ident, $($tail:tt)* ) => {
row!( $($tail)* ).put(stringify!($i).into(), $i.into())
};
( $i:ident ) => { row!($i: $i) };
( $k:ident: $v:expr ) => {
$crate::types::RNil.put(stringify!($k).into(), $v.into())
};
( $k:ident: $v:expr, $($tail:tt)* ) => {
row!( $($tail)* ).put(stringify!($k).into(), $v.into())
};
( $k:expr => $v:expr ) => {
$crate::types::RNil.put($k.into(), $v.into())
};
( $k:expr => $v:expr, $($tail:tt)* ) => {
row!( $($tail)* ).put($k.into(), $v.into())
};
}
#[macro_export]
macro_rules! try_opt {
($expr:expr) => {
match $expr {
Ok(val) => val,
Err(err) => return Err(err),
}
};
}
#[doc(hidden)]
pub struct Client {
_private: (),
}
/// Clickhouse client handle.
pub struct ClientHandle {
inner: Option<ClickhouseTransport>,
context: Context,
pool: PoolBinding,
}
impl fmt::Debug for ClientHandle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ClientHandle")
.field("server_info", &self.context.server_info)
.finish()
}
}
impl Client {
#[deprecated(since = "0.1.4", note = "please use Pool to connect")]
pub async fn connect(options: Options) -> Result<ClientHandle> {
let source = options.into_options_src();
Self::open(source, None).await
}
pub(crate) async fn open(source: OptionsSource, pool: Option<Pool>) -> Result<ClientHandle> {
let options = try_opt!(source.get());
let compress = options.compression;
let timeout = options.connection_timeout;
let context = Context {
options: source.clone(),
..Context::default()
};
with_timeout(
async move {
let addr = match &pool {
None => &options.addr,
Some(p) => p.get_addr(),
};
info!("try to connect to {}", addr);
if addr.port() == Some(8123) {
warn!("You should use port 9000 instead of 8123 because clickhouse-rs work through the binary interface.");
}
let mut stream = ConnectingStream::new(addr, &options).await?;
stream.set_nodelay(options.nodelay)?;
stream.set_keepalive(options.keepalive)?;
let transport = ClickhouseTransport::new(stream, compress, pool.clone());
let mut handle = ClientHandle {
inner: Some(transport),
context,
pool: match pool {
None => PoolBinding::None,
Some(p) => PoolBinding::Detached(p),
},
};
handle.hello().await?;
Ok(handle)
},
timeout,
)
.await
}
}
impl ClientHandle {
pub(crate) async fn hello(&mut self) -> Result<()> {
let context = self.context.clone();
info!("[hello] -> {:?}", &context);
let mut h = None;
let mut info = None;
let mut stream = self.inner.take().unwrap().call(Cmd::Hello(context.clone()));
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Hello(inner, server_info)) => {
info!("[hello] <- {:?}", &server_info);
h = Some(inner);
info = Some(server_info);
}
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
self.inner = h;
self.context.server_info = info.unwrap();
Ok(())
}
pub async fn ping(&mut self) -> Result<()> {
let timeout = try_opt!(self.context.options.get()).ping_timeout;
with_timeout(
async move {
info!("[ping]");
let mut h = None;
let transport = self.inner.take().unwrap().clear().await?;
let mut stream = transport.call(Cmd::Ping);
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Pong(inner)) => {
info!("[pong]");
h = Some(inner);
}
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
self.inner = h;
Ok(())
},
timeout,
)
.await
}
/// Executes Clickhouse `query` on Conn.
pub fn query<Q>(&mut self, sql: Q) -> QueryResult
where
Query: From<Q>,
{
let query = Query::from(sql);
QueryResult {
client: self,
query,
}
}
/// Convenience method to prepare and execute a single SQL statement.
pub async fn execute<Q>(&mut self, sql: Q) -> Result<()>
where
Query: From<Q>,
{
let transport = self.execute_(sql).await?;
self.inner = Some(transport);
Ok(())
}
async fn execute_<Q>(&mut self, sql: Q) -> Result<ClickhouseTransport>
where
Query: From<Q>,
{
let timeout = try_opt!(self.context.options.get())
.execute_timeout
.unwrap_or_else(|| Duration::from_secs(0));
let context = self.context.clone();
let query = Query::from(sql);
with_timeout(
async {
self.wrap_future(move |c| {
info!("[execute query] {}", query.get_sql());
let transport = c.inner.take().unwrap();
async move {
let mut h = None;
let transport = transport.clear().await?;
let mut stream = transport.call(Cmd::SendQuery(query, context.clone()));
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Eof(inner)) => h = Some(inner),
Ok(Packet::Block(_))
| Ok(Packet::ProfileInfo(_))
| Ok(Packet::Progress(_)) => (),
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
Ok(h.unwrap())
}
})
.await
},
timeout,
)
.await
}
/// Convenience method to insert block of data.
pub async fn insert<Q, B>(&mut self, table: Q, block: B) -> Result<()>
where
Query: From<Q>,
B: AsRef<Block>,
{
let transport = self.insert_(table, block.as_ref()).await?;
self.inner = Some(transport);
Ok(())
}
async fn insert_<Q>(&mut self, table: Q, block: &Block) -> Result<ClickhouseTransport>
where
Query: From<Q>,
{
let timeout = try_opt!(self.context.options.get())
.insert_timeout
.unwrap_or_else(|| Duration::from_secs(0));
let mut names: Vec<_> = Vec::with_capacity(block.column_count());
for column in block.columns() {
names.push(try_opt!(column_name_to_string(column.name())));
}
let fields = names.join(", ");
let query = Query::from(table)
.map_sql(|table| format!("INSERT INTO {} ({}) VALUES", table, fields));
let context = self.context.clone();
with_timeout(
async {
self.wrap_future(move |c| {
info!("[insert] {}", query.get_sql());
let transport = c.inner.take().unwrap();
async move {
let transport = transport.clear().await?;
let stream = transport.call(Cmd::SendQuery(query, context.clone()));
let (transport, b) = stream.read_block().await?;
let dst_block = b.unwrap();
let casted_block = match block.cast_to(&dst_block) {
Ok(value) => value,
Err(err) => return Err(err),
};
let send_cmd = Cmd::Union(
Box::new(Cmd::SendData(casted_block, context.clone())),
Box::new(Cmd::SendData(Block::default(), context.clone())),
);
let (transport, _) = transport.call(send_cmd).read_block().await?;
Ok(transport)
}
})
.await
},
timeout,
)
.await
}
pub(crate) async fn wrap_future<T, R, F>(&mut self, f: F) -> Result<T>
where
F: FnOnce(&mut Self) -> R + Send,
R: Future<Output = Result<T>>,
T:'static,
{
let ping_before_query = try_opt!(self.context.options.get()).ping_before_query;
if ping_before_query {
self.check_connection().await?;
}
f(self).await
}
pub(crate) fn wrap_stream<'a, F>(&'a mut self, f: F) -> BoxStream<'a, Result<Block>>
where
F: (FnOnce(&'a mut Self) -> BlockStream<'a>) + Send +'static,
{
let ping_before_query = match self.context.options.get() {
Ok(val) => val.ping_before_query,
Err(err) => return Box::pin(stream::once(future::err(err))),
};
if ping_before_query {
let fut: BoxFuture<'a, BoxStream<'a, Result<Block>>> = Box::pin(async move {
let inner: BoxStream<'a, Result<Block>> = match self.check_connection().await {
Ok(_) => Box::pin(f(self)),
Err(err) => Box::pin(stream::once(future::err(err))),
};
inner
});
Box::pin(fut.flatten_stream())
} else {
Box::pin(f(self))
}
}
/// Check connection and try to reconnect if necessary.
pub async fn check_connection(&mut self) -> Result<()> {
self.pool.detach();
let source = self.context.options.clone();
let pool = self.pool.clone();
let (send_retries, retry_timeout) = {
let options = try_opt!(source.get());
(options.send_retries, options.retry_timeout)
};
retry_guard(self, &source, pool.into(), send_retries, retry_timeout).await?;
if!self.pool.is_attached() && self.pool.is_some() {
self.pool.attach();
}
Ok(())
}
pub(crate) fn set_inside(&self, value: bool) {
if let Some(ref inner) = self.inner {
inner.set_inside(value);
} else {
unreachable!()
}
}
}
fn column_name_to_string(name: &str) -> Result<String> {
if name.chars().all(|ch| ch.is_numeric()) {
return Ok(name.to_string());
}
if name.chars().any(|ch| ch == '`') {
let err = format!("Column name {:?} shouldn't contains backticks.", name);
return Err(Error::Other(err.into()));
}
Ok(format!("`{}`", name))
}
#[cfg(feature = "async_std")]
async fn with_timeout<F, T>(future: F, duration: Duration) -> F::Output
where
F: Future<Output = Result<T>>,
{
use async_std::io;
use futures_util::future::TryFutureExt;
io::timeout(duration, future.map_err(Into::into))
.map_err(Into::into)
.await
}
#[cfg(not(feature = "async_std"))]
async fn with_timeout<F, T>(future: F, timeout: Duration) -> F::Output
where
F: Future<Output = Result<T>>,
{
| tokio::time::timeout(timeout, future).await?
}
#[cf | identifier_body |
|
lib.rs | @host[:port]/database?param1=value1&...¶mN=valueN
//! ```
//!
//! parameters:
//!
//! - `compression` - Whether or not use compression (defaults to `none`). Possible choices:
//! * `none`
//! * `lz4`
//!
//! - `readonly` - Restricts permissions for read data, write data and change settings queries. (defaults to `none`). Possible choices:
//! * `0` - All queries are allowed.
//! * `1` - Only read data queries are allowed.
//! * `2` - Read data and change settings queries are allowed.
//!
//! - `connection_timeout` - Timeout for connection (defaults to `500 ms`)
//! - `keepalive` - TCP keep alive timeout in milliseconds.
//! - `nodelay` - Whether to enable `TCP_NODELAY` (defaults to `true`).
//!
//! - `pool_min` - Lower bound of opened connections for `Pool` (defaults to `10`).
//! - `pool_max` - Upper bound of opened connections for `Pool` (defaults to `20`).
//!
//! - `ping_before_query` - Ping server every time before execute any query. (defaults to `true`).
//! - `send_retries` - Count of retry to send request to server. (defaults to `3`).
//! - `retry_timeout` - Amount of time to wait before next retry. (defaults to `5 sec`).
//! - `ping_timeout` - Timeout for ping (defaults to `500 ms`).
//!
//! - `alt_hosts` - Comma separated list of single address host for load-balancing.
//!
//! example:
//! ```url
//! tcp://user:password@host:9000/clicks?compression=lz4&ping_timeout=42ms
//! ```
//!
//! ## Optional features
//!
//! `clickhouse-rs` puts some functionality behind optional features to optimize compile time
//! for the most common use cases. The following features are available.
//!
//! - `tokio_io` *(enabled by default)* — I/O based on [Tokio](https://tokio.rs/).
//! - `async_std` — I/O based on [async-std](https://async.rs/) (doesn't work together with `tokio_io`).
//! - `tls` — TLS support (allowed only with `tokio_io`).
//!
//! ### Example
//!
//! ```rust
//! # use std::env;
//! use clickhouse_rs::{Block, Pool, errors::Error};
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Error> {
//! let ddl = r"
//! CREATE TABLE IF NOT EXISTS payment (
//! customer_id UInt32,
//! amount UInt32,
//! account_name Nullable(FixedString(3))
//! ) Engine=Memory";
//!
//! let block = Block::new()
//! .column("customer_id", vec![1_u32, 3, 5, 7, 9])
//! .column("amount", vec![2_u32, 4, 6, 8, 10])
//! .column("account_name", vec![Some("foo"), None, None, None, Some("bar")]);
//!
//! # let database_url = env::var("DATABASE_URL").unwrap_or("tcp://localhost:9000?compression=lz4".into());
//! let pool = Pool::new(database_url);
//!
//! let mut client = pool.get_handle().await?;
//! client.execute(ddl).await?;
//! client.insert("payment", block).await?;
//! let block = client.query("SELECT * FROM payment").fetch_all().await?;
//!
//! for row in block.rows() {
//! let id: u32 = row.get("customer_id")?;
//! let amount: u32 = row.get("amount")?;
//! let name: Option<&str> = row.get("account_name")?;
//! println!("Found payment {}: {} {:?}", id, amount, name);
//! }
//! Ok(())
//! }
//! ```
#![recursion_limit = "1024"]
use std::{fmt, future::Future, time::Duration};
use futures_util::{
future, future::BoxFuture, future::FutureExt, stream, stream::BoxStream, StreamExt,
};
use log::{info, warn};
use crate::{
connecting_stream::ConnectingStream,
errors::{DriverError, Error, Result},
io::ClickhouseTransport,
pool::PoolBinding,
retry_guard::retry_guard,
types::{
query_result::stream_blocks::BlockStream, Cmd, Context, IntoOptions, OptionsSource, Packet,
Query, QueryResult, SqlType,
},
};
pub use crate::{
pool::Pool,
types::{block::Block, Options},
};
mod binary;
mod client_info;
mod connecting_stream;
/// Error types.
pub mod errors;
mod io;
/// Pool types.
pub mod pool;
mod retry_guard;
/// Clickhouse types.
pub mod types;
/// This macro is a convenient way to pass row into a block.
///
/// ```rust
/// # use clickhouse_rs::{Block, row, errors::Error};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// block.push(row!{customer_id: 1, amount: 2, account_name: "foo"})?;
/// block.push(row!{customer_id: 4, amount: 4, account_name: "bar"})?;
/// block.push(row!{customer_id: 5, amount: 5, account_name: "baz"})?;
/// # assert_eq!(block.row_count(), 3);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
///
/// If a column name has special characters, you can use the alternative syntax
/// with `=>` to pass an expression as column name:
///
/// ```rust
/// # use clickhouse_rs::{Block, row, errors::Error};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// block.push(row!{"customer.id" => 1, amount: 2, "account.name" => "foo"})?;
/// block.push(row!{"customer.id" => 4, amount: 4, "account.name" => "bar"})?;
/// block.push(row!{"customer.id" => 5, amount: 5, "account.name" => "baz"})?;
/// # assert_eq!(block.row_count(), 3);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
///
/// You can also use `Vec<(String, Value)>` to construct a row and insert it into a block:
///
/// ```rust
/// # use clickhouse_rs::{Block, errors::Error, types::Value};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// for i in 1..10 {
/// let mut row = Vec::new();
/// for j in 1..10 {
/// row.push((format!("#{}", j), Value::from(i * j)));
/// }
/// block.push(row)?;
/// }
/// assert_eq!(block.row_count(), 9);
/// # println!("{:?}", block);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
#[macro_export]
macro_rules! row {
() => { $crate::types::RNil };
( $i:ident, $($tail:tt)* ) => {
row!( $($tail)* ).put(stringify!($i).into(), $i.into())
};
( $i:ident ) => { row!($i: $i) };
( $k:ident: $v:expr ) => {
$crate::types::RNil.put(stringify!($k).into(), $v.into())
};
( $k:ident: $v:expr, $($tail:tt)* ) => {
row!( $($tail)* ).put(stringify!($k).into(), $v.into())
};
( $k:expr => $v:expr ) => {
$crate::types::RNil.put($k.into(), $v.into())
};
( $k:expr => $v:expr, $($tail:tt)* ) => {
row!( $($tail)* ).put($k.into(), $v.into())
};
}
#[macro_export]
macro_rules! try_opt {
($expr:expr) => {
match $expr {
Ok(val) => val,
Err(err) => return Err(err),
}
};
}
#[doc(hidden)]
pub struct Client {
_private: (),
}
/// Clickhouse client handle.
pub struct ClientHandle {
inner: Option<ClickhouseTransport>,
context: Context,
pool: PoolBinding,
}
impl fmt::Debug for ClientHandle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ClientHandle")
.field("server_info", &self.context.server_info)
.finish()
}
}
impl Client {
#[deprecated(since = "0.1.4", note = "please use Pool to connect")]
pub async fn connect(options: Options) -> Result<ClientHandle> {
let source = options.into_options_src();
Self::open(source, None).await
}
pub(crate) async fn open(source: OptionsSource, pool: Option<Pool>) -> Result<ClientHandle> {
let options = try_opt!(source.get());
let compress = options.compression;
let timeout = options.connection_timeout;
let context = Context {
options: source.clone(),
..Context::default()
};
with_timeout(
async move {
let addr = match &pool {
None => &options.addr,
Some(p) => p.get_addr(),
};
info!("try to connect to {}", addr);
if addr.port() == Some(8123) {
warn!("You should use port 9000 instead of 8123 because clickhouse-rs work through the binary interface.");
}
let mut stream = ConnectingStream::new(addr, &options).await?;
stream.set_nodelay(options.nodelay)?;
stream.set_keepalive(options.keepalive)?;
let transport = ClickhouseTransport::new(stream, compress, pool.clone());
let mut handle = ClientHandle {
inner: Some(transport),
context,
pool: match pool {
None => PoolBinding::None,
Some(p) => PoolBinding::Detached(p),
},
};
handle.hello().await?;
Ok(handle)
},
timeout,
)
.await
}
}
impl ClientHandle {
pub(crate) async fn hello(&mut self) -> Result<()> {
let context = self.context.clone();
info!("[hello] -> {:?}", &context);
let mut h = None;
let mut info = None;
let mut stream = self.inner.take().unwrap().call(Cmd::Hello(context.clone()));
while let Some(packet) = stream.next().await { | }
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
self.inner = h;
self.context.server_info = info.unwrap();
Ok(())
}
pub async fn ping(&mut self) -> Result<()> {
let timeout = try_opt!(self.context.options.get()).ping_timeout;
with_timeout(
async move {
info!("[ping]");
let mut h = None;
let transport = self.inner.take().unwrap().clear().await?;
let mut stream = transport.call(Cmd::Ping);
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Pong(inner)) => {
info!("[pong]");
h = Some(inner);
}
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
self.inner = h;
Ok(())
},
timeout,
)
.await
}
/// Executes Clickhouse `query` on Conn.
pub fn query<Q>(&mut self, sql: Q) -> QueryResult
where
Query: From<Q>,
{
let query = Query::from(sql);
QueryResult {
client: self,
query,
}
}
/// Convenience method to prepare and execute a single SQL statement.
pub async fn execute<Q>(&mut self, sql: Q) -> Result<()>
where
Query: From<Q>,
{
let transport = self.execute_(sql).await?;
self.inner = Some(transport);
Ok(())
}
async fn execute_<Q>(&mut self, sql: Q) -> Result<ClickhouseTransport>
where
Query: From<Q>,
{
let timeout = try_opt!(self.context.options.get())
.execute_timeout
.unwrap_or_else(|| Duration::from_secs(0));
let context = self.context.clone();
let query = Query::from(sql);
with_timeout(
async {
self.wrap_future(move |c| {
info!("[execute query] {}", query.get_sql());
let transport = c.inner.take().unwrap();
async move {
let mut h = None;
let transport = transport.clear().await?;
let mut stream = transport.call(Cmd::SendQuery(query, context.clone()));
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Eof(inner)) => h = Some(inner),
Ok(Packet::Block(_))
| Ok(Packet::ProfileInfo(_))
| Ok(Packet::Progress(_)) => (),
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
Ok(h.unwrap())
}
})
.await
},
timeout,
)
.await
}
/// Convenience method to insert block of data.
pub async fn insert<Q, B>(&mut self, table: Q, block: B) -> Result<()>
where
Query: From<Q>,
B: AsRef<Block>,
{
let transport = self.insert_(table, block.as_ref()).await?;
self.inner = Some(transport);
Ok(())
}
async fn insert_<Q>(&mut self, table: Q, block: &Block) -> Result<ClickhouseTransport>
where
Query: From<Q>,
{
let timeout = try_opt!(self.context.options.get())
.insert_timeout
.unwrap_or_else(|| Duration::from_secs(0));
let mut names: Vec<_> = Vec::with_capacity(block.column_count());
for column in block.columns() {
names.push(try_opt!(column_name_to_string(column.name())));
}
let fields = names.join(", ");
let query = Query::from(table)
.map_sql(|table| format!("INSERT INTO {} ({}) VALUES", table, fields));
let context = self.context.clone();
with_timeout(
async {
self.wrap_future(move |c| {
info!("[insert] {}", query.get_sql());
let transport = c.inner.take().unwrap();
async move {
let transport = transport.clear().await?;
let stream = transport.call(Cmd::SendQuery(query, context.clone()));
let (transport, b) = stream.read_block().await?;
let dst_block = b.unwrap();
let casted_block = match block.cast_to(&dst_block) {
Ok(value) => value,
Err(err) => return Err(err),
};
let send_cmd = Cmd::Union(
Box::new(Cmd::SendData(casted_block, context.clone())),
Box::new(Cmd::SendData(Block::default(), context.clone())),
);
let (transport, _) = transport.call(send_cmd).read_block().await?;
Ok(transport)
}
})
.await
},
timeout,
)
.await
}
pub(crate) async fn wrap_future<T, R, F>(&mut self, f: F) -> Result<T>
where
F: FnOnce(&mut Self) -> R + Send,
R: Future<Output = Result<T>>,
T:'static,
{
let ping_before_query = try_opt!(self.context.options.get()).ping_before_query;
if ping_before_query {
self.check_connection().await?;
}
f(self).await
}
pub(crate) fn wrap_stream<'a, F>(&'a mut self, f: F) -> BoxStream<'a, Result<Block>>
where
F: (FnOnce(&'a mut Self) -> BlockStream<'a>) + Send +'static,
{
let ping_before_query = match self.context.options.get() {
Ok(val) => val.ping_before_query,
Err(err) => return Box::pin(stream::once(future::err(err))),
};
if ping_before_query {
let fut: BoxFuture<'a, BoxStream<'a, Result<Block>>> = Box::pin(async move {
let inner: BoxStream<'a, Result<Block>> = match self.check_connection().await {
Ok(_) => Box::pin(f(self)),
Err(err) => Box::pin(stream::once(future::err(err))),
};
inner
});
Box::pin(fut.flatten_stream())
} else {
Box::pin(f(self))
}
}
/// Check connection and try to reconnect if necessary.
pub async fn check_connection(&mut self) -> Result<()> {
self.pool.detach();
let source = self.context.options.clone();
let pool = self.pool.clone();
let (send_retries, retry_timeout) = {
let options = try_opt!(source.get());
(options.send_retries, options.retry_timeout)
};
retry_guard(self, &source, pool.into(), send_retries, retry_timeout).await?;
if!self.pool.is_attached() && self.pool.is_some() {
self.pool.attach();
}
Ok(())
}
pub(crate) fn set_inside(&self, value: bool) {
if let Some(ref inner) = self.inner {
inner.set_inside(value);
} else {
unreachable!()
}
}
}
fn column_name_to_string(name: &str) -> Result<String> {
if name.chars().all(|ch| ch.is_numeric()) {
return Ok(name.to_string());
}
if name.chars().any(|ch| ch == '`') {
let err = format!("Column name {:?} shouldn't contains backticks.", name);
return Err(Error::Other(err.into()));
}
Ok(format!("`{}`", name))
}
| match packet {
Ok(Packet::Hello(inner, server_info)) => {
info!("[hello] <- {:?}", &server_info);
h = Some(inner);
info = Some(server_info); | random_line_split |
lib.rs | port]/database?param1=value1&...¶mN=valueN
//! ```
//!
//! parameters:
//!
//! - `compression` - Whether or not use compression (defaults to `none`). Possible choices:
//! * `none`
//! * `lz4`
//!
//! - `readonly` - Restricts permissions for read data, write data and change settings queries. (defaults to `none`). Possible choices:
//! * `0` - All queries are allowed.
//! * `1` - Only read data queries are allowed.
//! * `2` - Read data and change settings queries are allowed.
//!
//! - `connection_timeout` - Timeout for connection (defaults to `500 ms`)
//! - `keepalive` - TCP keep alive timeout in milliseconds.
//! - `nodelay` - Whether to enable `TCP_NODELAY` (defaults to `true`).
//!
//! - `pool_min` - Lower bound of opened connections for `Pool` (defaults to `10`).
//! - `pool_max` - Upper bound of opened connections for `Pool` (defaults to `20`).
//!
//! - `ping_before_query` - Ping server every time before execute any query. (defaults to `true`).
//! - `send_retries` - Count of retry to send request to server. (defaults to `3`).
//! - `retry_timeout` - Amount of time to wait before next retry. (defaults to `5 sec`).
//! - `ping_timeout` - Timeout for ping (defaults to `500 ms`).
//!
//! - `alt_hosts` - Comma separated list of single address host for load-balancing.
//!
//! example:
//! ```url
//! tcp://user:password@host:9000/clicks?compression=lz4&ping_timeout=42ms
//! ```
//!
//! ## Optional features
//!
//! `clickhouse-rs` puts some functionality behind optional features to optimize compile time
//! for the most common use cases. The following features are available.
//!
//! - `tokio_io` *(enabled by default)* — I/O based on [Tokio](https://tokio.rs/).
//! - `async_std` — I/O based on [async-std](https://async.rs/) (doesn't work together with `tokio_io`).
//! - `tls` — TLS support (allowed only with `tokio_io`).
//!
//! ### Example
//!
//! ```rust
//! # use std::env;
//! use clickhouse_rs::{Block, Pool, errors::Error};
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Error> {
//! let ddl = r"
//! CREATE TABLE IF NOT EXISTS payment (
//! customer_id UInt32,
//! amount UInt32,
//! account_name Nullable(FixedString(3))
//! ) Engine=Memory";
//!
//! let block = Block::new()
//! .column("customer_id", vec![1_u32, 3, 5, 7, 9])
//! .column("amount", vec![2_u32, 4, 6, 8, 10])
//! .column("account_name", vec![Some("foo"), None, None, None, Some("bar")]);
//!
//! # let database_url = env::var("DATABASE_URL").unwrap_or("tcp://localhost:9000?compression=lz4".into());
//! let pool = Pool::new(database_url);
//!
//! let mut client = pool.get_handle().await?;
//! client.execute(ddl).await?;
//! client.insert("payment", block).await?;
//! let block = client.query("SELECT * FROM payment").fetch_all().await?;
//!
//! for row in block.rows() {
//! let id: u32 = row.get("customer_id")?;
//! let amount: u32 = row.get("amount")?;
//! let name: Option<&str> = row.get("account_name")?;
//! println!("Found payment {}: {} {:?}", id, amount, name);
//! }
//! Ok(())
//! }
//! ```
#![recursion_limit = "1024"]
use std::{fmt, future::Future, time::Duration};
use futures_util::{
future, future::BoxFuture, future::FutureExt, stream, stream::BoxStream, StreamExt,
};
use log::{info, warn};
use crate::{
connecting_stream::ConnectingStream,
errors::{DriverError, Error, Result},
io::ClickhouseTransport,
pool::PoolBinding,
retry_guard::retry_guard,
types::{
query_result::stream_blocks::BlockStream, Cmd, Context, IntoOptions, OptionsSource, Packet,
Query, QueryResult, SqlType,
},
};
pub use crate::{
pool::Pool,
types::{block::Block, Options},
};
mod binary;
mod client_info;
mod connecting_stream;
/// Error types.
pub mod errors;
mod io;
/// Pool types.
pub mod pool;
mod retry_guard;
/// Clickhouse types.
pub mod types;
/// This macro is a convenient way to pass row into a block.
///
/// ```rust
/// # use clickhouse_rs::{Block, row, errors::Error};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// block.push(row!{customer_id: 1, amount: 2, account_name: "foo"})?;
/// block.push(row!{customer_id: 4, amount: 4, account_name: "bar"})?;
/// block.push(row!{customer_id: 5, amount: 5, account_name: "baz"})?;
/// # assert_eq!(block.row_count(), 3);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
///
/// If a column name has special characters, you can use the alternative syntax
/// with `=>` to pass an expression as column name:
///
/// ```rust
/// # use clickhouse_rs::{Block, row, errors::Error};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// block.push(row!{"customer.id" => 1, amount: 2, "account.name" => "foo"})?;
/// block.push(row!{"customer.id" => 4, amount: 4, "account.name" => "bar"})?;
/// block.push(row!{"customer.id" => 5, amount: 5, "account.name" => "baz"})?;
/// # assert_eq!(block.row_count(), 3);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
///
/// You can also use `Vec<(String, Value)>` to construct a row and insert it into a block:
///
/// ```rust
/// # use clickhouse_rs::{Block, errors::Error, types::Value};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// for i in 1..10 {
/// let mut row = Vec::new();
/// for j in 1..10 {
/// row.push((format!("#{}", j), Value::from(i * j)));
/// }
/// block.push(row)?;
/// }
/// assert_eq!(block.row_count(), 9);
/// # println!("{:?}", block);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
#[macro_export]
macro_rules! row {
() => { $crate::types::RNil };
( $i:ident, $($tail:tt)* ) => {
row!( $($tail)* ).put(stringify!($i).into(), $i.into())
};
( $i:ident ) => { row!($i: $i) };
( $k:ident: $v:expr ) => {
$crate::types::RNil.put(stringify!($k).into(), $v.into())
};
( $k:ident: $v:expr, $($tail:tt)* ) => {
row!( $($tail)* ).put(stringify!($k).into(), $v.into())
};
( $k:expr => $v:expr ) => {
$crate::types::RNil.put($k.into(), $v.into())
};
( $k:expr => $v:expr, $($tail:tt)* ) => {
row!( $($tail)* ).put($k.into(), $v.into())
};
}
#[macro_export]
macro_rules! try_opt {
($expr:expr) => {
match $expr {
Ok(val) => val,
Err(err) => return Err(err),
}
};
}
#[doc(hidden)]
pub struct Client {
_private: (),
}
/// Clickhouse client handle.
pub struct ClientHandle {
inner: Option<ClickhouseTransport>,
context: Context,
pool: PoolBinding,
}
impl fmt::Debug for ClientHandle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ClientHandle")
.field("server_info", &self.context.server_info)
.finish()
}
}
impl Client {
#[deprecated(since = "0.1.4", note = "please use Pool to connect")]
pub async fn connec | ns: Options) -> Result<ClientHandle> {
let source = options.into_options_src();
Self::open(source, None).await
}
pub(crate) async fn open(source: OptionsSource, pool: Option<Pool>) -> Result<ClientHandle> {
let options = try_opt!(source.get());
let compress = options.compression;
let timeout = options.connection_timeout;
let context = Context {
options: source.clone(),
..Context::default()
};
with_timeout(
async move {
let addr = match &pool {
None => &options.addr,
Some(p) => p.get_addr(),
};
info!("try to connect to {}", addr);
if addr.port() == Some(8123) {
warn!("You should use port 9000 instead of 8123 because clickhouse-rs work through the binary interface.");
}
let mut stream = ConnectingStream::new(addr, &options).await?;
stream.set_nodelay(options.nodelay)?;
stream.set_keepalive(options.keepalive)?;
let transport = ClickhouseTransport::new(stream, compress, pool.clone());
let mut handle = ClientHandle {
inner: Some(transport),
context,
pool: match pool {
None => PoolBinding::None,
Some(p) => PoolBinding::Detached(p),
},
};
handle.hello().await?;
Ok(handle)
},
timeout,
)
.await
}
}
impl ClientHandle {
pub(crate) async fn hello(&mut self) -> Result<()> {
let context = self.context.clone();
info!("[hello] -> {:?}", &context);
let mut h = None;
let mut info = None;
let mut stream = self.inner.take().unwrap().call(Cmd::Hello(context.clone()));
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Hello(inner, server_info)) => {
info!("[hello] <- {:?}", &server_info);
h = Some(inner);
info = Some(server_info);
}
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
self.inner = h;
self.context.server_info = info.unwrap();
Ok(())
}
pub async fn ping(&mut self) -> Result<()> {
let timeout = try_opt!(self.context.options.get()).ping_timeout;
with_timeout(
async move {
info!("[ping]");
let mut h = None;
let transport = self.inner.take().unwrap().clear().await?;
let mut stream = transport.call(Cmd::Ping);
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Pong(inner)) => {
info!("[pong]");
h = Some(inner);
}
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
self.inner = h;
Ok(())
},
timeout,
)
.await
}
/// Executes Clickhouse `query` on Conn.
pub fn query<Q>(&mut self, sql: Q) -> QueryResult
where
Query: From<Q>,
{
let query = Query::from(sql);
QueryResult {
client: self,
query,
}
}
/// Convenience method to prepare and execute a single SQL statement.
pub async fn execute<Q>(&mut self, sql: Q) -> Result<()>
where
Query: From<Q>,
{
let transport = self.execute_(sql).await?;
self.inner = Some(transport);
Ok(())
}
async fn execute_<Q>(&mut self, sql: Q) -> Result<ClickhouseTransport>
where
Query: From<Q>,
{
let timeout = try_opt!(self.context.options.get())
.execute_timeout
.unwrap_or_else(|| Duration::from_secs(0));
let context = self.context.clone();
let query = Query::from(sql);
with_timeout(
async {
self.wrap_future(move |c| {
info!("[execute query] {}", query.get_sql());
let transport = c.inner.take().unwrap();
async move {
let mut h = None;
let transport = transport.clear().await?;
let mut stream = transport.call(Cmd::SendQuery(query, context.clone()));
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Eof(inner)) => h = Some(inner),
Ok(Packet::Block(_))
| Ok(Packet::ProfileInfo(_))
| Ok(Packet::Progress(_)) => (),
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
Ok(h.unwrap())
}
})
.await
},
timeout,
)
.await
}
/// Convenience method to insert block of data.
pub async fn insert<Q, B>(&mut self, table: Q, block: B) -> Result<()>
where
Query: From<Q>,
B: AsRef<Block>,
{
let transport = self.insert_(table, block.as_ref()).await?;
self.inner = Some(transport);
Ok(())
}
async fn insert_<Q>(&mut self, table: Q, block: &Block) -> Result<ClickhouseTransport>
where
Query: From<Q>,
{
let timeout = try_opt!(self.context.options.get())
.insert_timeout
.unwrap_or_else(|| Duration::from_secs(0));
let mut names: Vec<_> = Vec::with_capacity(block.column_count());
for column in block.columns() {
names.push(try_opt!(column_name_to_string(column.name())));
}
let fields = names.join(", ");
let query = Query::from(table)
.map_sql(|table| format!("INSERT INTO {} ({}) VALUES", table, fields));
let context = self.context.clone();
with_timeout(
async {
self.wrap_future(move |c| {
info!("[insert] {}", query.get_sql());
let transport = c.inner.take().unwrap();
async move {
let transport = transport.clear().await?;
let stream = transport.call(Cmd::SendQuery(query, context.clone()));
let (transport, b) = stream.read_block().await?;
let dst_block = b.unwrap();
let casted_block = match block.cast_to(&dst_block) {
Ok(value) => value,
Err(err) => return Err(err),
};
let send_cmd = Cmd::Union(
Box::new(Cmd::SendData(casted_block, context.clone())),
Box::new(Cmd::SendData(Block::default(), context.clone())),
);
let (transport, _) = transport.call(send_cmd).read_block().await?;
Ok(transport)
}
})
.await
},
timeout,
)
.await
}
pub(crate) async fn wrap_future<T, R, F>(&mut self, f: F) -> Result<T>
where
F: FnOnce(&mut Self) -> R + Send,
R: Future<Output = Result<T>>,
T:'static,
{
let ping_before_query = try_opt!(self.context.options.get()).ping_before_query;
if ping_before_query {
self.check_connection().await?;
}
f(self).await
}
pub(crate) fn wrap_stream<'a, F>(&'a mut self, f: F) -> BoxStream<'a, Result<Block>>
where
F: (FnOnce(&'a mut Self) -> BlockStream<'a>) + Send +'static,
{
let ping_before_query = match self.context.options.get() {
Ok(val) => val.ping_before_query,
Err(err) => return Box::pin(stream::once(future::err(err))),
};
if ping_before_query {
let fut: BoxFuture<'a, BoxStream<'a, Result<Block>>> = Box::pin(async move {
let inner: BoxStream<'a, Result<Block>> = match self.check_connection().await {
Ok(_) => Box::pin(f(self)),
Err(err) => Box::pin(stream::once(future::err(err))),
};
inner
});
Box::pin(fut.flatten_stream())
} else {
Box::pin(f(self))
}
}
/// Check connection and try to reconnect if necessary.
pub async fn check_connection(&mut self) -> Result<()> {
self.pool.detach();
let source = self.context.options.clone();
let pool = self.pool.clone();
let (send_retries, retry_timeout) = {
let options = try_opt!(source.get());
(options.send_retries, options.retry_timeout)
};
retry_guard(self, &source, pool.into(), send_retries, retry_timeout).await?;
if!self.pool.is_attached() && self.pool.is_some() {
self.pool.attach();
}
Ok(())
}
pub(crate) fn set_inside(&self, value: bool) {
if let Some(ref inner) = self.inner {
inner.set_inside(value);
} else {
unreachable!()
}
}
}
fn column_name_to_string(name: &str) -> Result<String> {
if name.chars().all(|ch| ch.is_numeric()) {
return Ok(name.to_string());
}
if name.chars().any(|ch| ch == '`') {
let err = format!("Column name {:?} shouldn't contains backticks.", name);
return Err(Error::Other(err.into()));
}
Ok(format!("`{}`", name)) | t(optio | identifier_name |
lib.rs | host[:port]/database?param1=value1&...¶mN=valueN
//! ```
//!
//! parameters:
//!
//! - `compression` - Whether or not use compression (defaults to `none`). Possible choices:
//! * `none`
//! * `lz4`
//!
//! - `readonly` - Restricts permissions for read data, write data and change settings queries. (defaults to `none`). Possible choices:
//! * `0` - All queries are allowed.
//! * `1` - Only read data queries are allowed.
//! * `2` - Read data and change settings queries are allowed.
//!
//! - `connection_timeout` - Timeout for connection (defaults to `500 ms`)
//! - `keepalive` - TCP keep alive timeout in milliseconds.
//! - `nodelay` - Whether to enable `TCP_NODELAY` (defaults to `true`).
//!
//! - `pool_min` - Lower bound of opened connections for `Pool` (defaults to `10`).
//! - `pool_max` - Upper bound of opened connections for `Pool` (defaults to `20`).
//!
//! - `ping_before_query` - Ping server every time before execute any query. (defaults to `true`).
//! - `send_retries` - Count of retry to send request to server. (defaults to `3`).
//! - `retry_timeout` - Amount of time to wait before next retry. (defaults to `5 sec`).
//! - `ping_timeout` - Timeout for ping (defaults to `500 ms`).
//!
//! - `alt_hosts` - Comma separated list of single address host for load-balancing.
//!
//! example:
//! ```url
//! tcp://user:password@host:9000/clicks?compression=lz4&ping_timeout=42ms
//! ```
//!
//! ## Optional features
//!
//! `clickhouse-rs` puts some functionality behind optional features to optimize compile time
//! for the most common use cases. The following features are available.
//!
//! - `tokio_io` *(enabled by default)* — I/O based on [Tokio](https://tokio.rs/).
//! - `async_std` — I/O based on [async-std](https://async.rs/) (doesn't work together with `tokio_io`).
//! - `tls` — TLS support (allowed only with `tokio_io`).
//!
//! ### Example
//!
//! ```rust
//! # use std::env;
//! use clickhouse_rs::{Block, Pool, errors::Error};
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Error> {
//! let ddl = r"
//! CREATE TABLE IF NOT EXISTS payment (
//! customer_id UInt32,
//! amount UInt32,
//! account_name Nullable(FixedString(3))
//! ) Engine=Memory";
//!
//! let block = Block::new()
//! .column("customer_id", vec![1_u32, 3, 5, 7, 9])
//! .column("amount", vec![2_u32, 4, 6, 8, 10])
//! .column("account_name", vec![Some("foo"), None, None, None, Some("bar")]);
//!
//! # let database_url = env::var("DATABASE_URL").unwrap_or("tcp://localhost:9000?compression=lz4".into());
//! let pool = Pool::new(database_url);
//!
//! let mut client = pool.get_handle().await?;
//! client.execute(ddl).await?;
//! client.insert("payment", block).await?;
//! let block = client.query("SELECT * FROM payment").fetch_all().await?;
//!
//! for row in block.rows() {
//! let id: u32 = row.get("customer_id")?;
//! let amount: u32 = row.get("amount")?;
//! let name: Option<&str> = row.get("account_name")?;
//! println!("Found payment {}: {} {:?}", id, amount, name);
//! }
//! Ok(())
//! }
//! ```
#![recursion_limit = "1024"]
use std::{fmt, future::Future, time::Duration};
use futures_util::{
future, future::BoxFuture, future::FutureExt, stream, stream::BoxStream, StreamExt,
};
use log::{info, warn};
use crate::{
connecting_stream::ConnectingStream,
errors::{DriverError, Error, Result},
io::ClickhouseTransport,
pool::PoolBinding,
retry_guard::retry_guard,
types::{
query_result::stream_blocks::BlockStream, Cmd, Context, IntoOptions, OptionsSource, Packet,
Query, QueryResult, SqlType,
},
};
pub use crate::{
pool::Pool,
types::{block::Block, Options},
};
mod binary;
mod client_info;
mod connecting_stream;
/// Error types.
pub mod errors;
mod io;
/// Pool types.
pub mod pool;
mod retry_guard;
/// Clickhouse types.
pub mod types;
/// This macro is a convenient way to pass row into a block.
///
/// ```rust
/// # use clickhouse_rs::{Block, row, errors::Error};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// block.push(row!{customer_id: 1, amount: 2, account_name: "foo"})?;
/// block.push(row!{customer_id: 4, amount: 4, account_name: "bar"})?;
/// block.push(row!{customer_id: 5, amount: 5, account_name: "baz"})?;
/// # assert_eq!(block.row_count(), 3);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
///
/// If a column name has special characters, you can use the alternative syntax
/// with `=>` to pass an expression as column name:
///
/// ```rust
/// # use clickhouse_rs::{Block, row, errors::Error};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// block.push(row!{"customer.id" => 1, amount: 2, "account.name" => "foo"})?;
/// block.push(row!{"customer.id" => 4, amount: 4, "account.name" => "bar"})?;
/// block.push(row!{"customer.id" => 5, amount: 5, "account.name" => "baz"})?;
/// # assert_eq!(block.row_count(), 3);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
///
/// You can also use `Vec<(String, Value)>` to construct a row and insert it into a block:
///
/// ```rust
/// # use clickhouse_rs::{Block, errors::Error, types::Value};
/// # fn make_block() -> Result<(), Error> {
/// let mut block = Block::new();
/// for i in 1..10 {
/// let mut row = Vec::new();
/// for j in 1..10 {
/// row.push((format!("#{}", j), Value::from(i * j)));
/// }
/// block.push(row)?;
/// }
/// assert_eq!(block.row_count(), 9);
/// # println!("{:?}", block);
/// # Ok(())
/// # }
/// # make_block().unwrap()
/// ```
#[macro_export]
macro_rules! row {
() => { $crate::types::RNil };
( $i:ident, $($tail:tt)* ) => {
row!( $($tail)* ).put(stringify!($i).into(), $i.into())
};
( $i:ident ) => { row!($i: $i) };
( $k:ident: $v:expr ) => {
$crate::types::RNil.put(stringify!($k).into(), $v.into())
};
( $k:ident: $v:expr, $($tail:tt)* ) => {
row!( $($tail)* ).put(stringify!($k).into(), $v.into())
};
( $k:expr => $v:expr ) => {
$crate::types::RNil.put($k.into(), $v.into())
};
( $k:expr => $v:expr, $($tail:tt)* ) => {
row!( $($tail)* ).put($k.into(), $v.into())
};
}
#[macro_export]
macro_rules! try_opt {
($expr:expr) => {
match $expr {
Ok(val) => val,
Err(err) => return Err(err),
}
};
}
#[doc(hidden)]
pub struct Client {
_private: (),
}
/// Clickhouse client handle.
pub struct ClientHandle {
inner: Option<ClickhouseTransport>,
context: Context,
pool: PoolBinding,
}
impl fmt::Debug for ClientHandle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ClientHandle")
.field("server_info", &self.context.server_info)
.finish()
}
}
impl Client {
#[deprecated(since = "0.1.4", note = "please use Pool to connect")]
pub async fn connect(options: Options) -> Result<ClientHandle> {
let source = options.into_options_src();
Self::open(source, None).await
}
pub(crate) async fn open(source: OptionsSource, pool: Option<Pool>) -> Result<ClientHandle> {
let options = try_opt!(source.get());
let compress = options.compression;
let timeout = options.connection_timeout;
let context = Context {
options: source.clone(),
..Context::default()
};
with_timeout(
async move {
let addr = match &pool {
None => &options.addr,
Some(p) => p.get_addr(),
};
info!("try to connect to {}", addr);
if addr.port() == Some(8123) {
warn!("You should use port 9000 instead of 8123 because clickhouse-rs work through the binary interface.");
}
let mut stream = ConnectingStream::new(addr, &options).await?;
stream.set_nodelay(options.nodelay)?;
stream.set_keepalive(options.keepalive)?;
let transport = ClickhouseTransport::new(stream, compress, pool.clone());
let mut handle = ClientHandle {
inner: Some(transport),
context,
pool: match pool {
None => PoolBinding::None,
Some(p) => PoolBinding::Detached(p),
},
};
handle.hello().await?;
Ok(handle)
},
timeout,
)
.await
}
}
impl ClientHandle {
pub(crate) async fn hello(&mut self) -> Result<()> {
let context = self.context.clone();
info!("[hello] -> {:?}", &context);
let mut h = None;
let mut info = None;
let mut stream = self.inner.take().unwrap().call(Cmd::Hello(context.clone()));
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Hello(inner, server_info)) => {
info!("[hello] <- {:?}", &server_info);
h = Some(inner);
info = Some(server_info);
}
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
self.inner = h;
self.context.server_info = info.unwrap();
Ok(())
}
pub async fn ping(&mut self) -> Result<()> {
let timeout = try_opt!(self.context.options.get()).ping_timeout;
with_timeout(
async move {
info!("[ping]");
let mut h = None;
let transport = self.inner.take().unwrap().clear().await?;
let mut stream = transport.call(Cmd::Ping);
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Pong(inner)) => {
info!("[pong]");
h = Some(inner);
}
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
self.inner = h;
Ok(())
},
timeout,
)
.await
}
/// Executes Clickhouse `query` on Conn.
pub fn query<Q>(&mut self, sql: Q) -> QueryResult
where
Query: From<Q>,
{
let query = Query::from(sql);
QueryResult {
client: self,
query,
}
}
/// Convenience method to prepare and execute a single SQL statement.
pub async fn execute<Q>(&mut self, sql: Q) -> Result<()>
where
Query: From<Q>,
{
let transport = self.execute_(sql).await?;
self.inner = Some(transport);
Ok(())
}
async fn execute_<Q>(&mut self, sql: Q) -> Result<ClickhouseTransport>
where
Query: From<Q>,
{
let timeout = try_opt!(self.context.options.get())
.execute_timeout
.unwrap_or_else(|| Duration::from_secs(0));
let context = self.context.clone();
let query = Query::from(sql);
with_timeout(
async {
self.wrap_future(move |c| {
info!("[execute query] {}", query.get_sql());
let transport = c.inner.take().unwrap();
async move {
let mut h = None;
let transport = transport.clear().await?;
let mut stream = transport.call(Cmd::SendQuery(query, context.clone()));
while let Some(packet) = stream.next().await {
match packet {
Ok(Packet::Eof(inner)) => h = Some(inner),
Ok(Packet::Block(_))
| Ok(Packet::ProfileInfo(_))
| Ok(Packet::Progress(_)) => (),
Ok(Packet::Exception(e)) => return Err(Error::Server(e)),
Err(e) => return Err(Error::Io(e)),
_ => return Err(Error::Driver(DriverError::UnexpectedPacket)),
}
}
Ok(h.unwrap())
}
})
.await
},
timeout,
)
.await
}
/// Convenience method to insert block of data.
pub async fn insert<Q, B>(&mut self, table: Q, block: B) -> Result<()>
where
Query: From<Q>,
B: AsRef<Block>,
{
let transport = self.insert_(table, block.as_ref()).await?;
self.inner = Some(transport);
Ok(())
}
async fn insert_<Q>(&mut self, table: Q, block: &Block) -> Result<ClickhouseTransport>
where
Query: From<Q>,
{
let timeout = try_opt!(self.context.options.get())
.insert_timeout
.unwrap_or_else(|| Duration::from_secs(0));
let mut names: Vec<_> = Vec::with_capacity(block.column_count());
for column in block.columns() {
names.push(try_opt!(column_name_to_string(column.name())));
}
let fields = names.join(", ");
let query = Query::from(table)
.map_sql(|table| format!("INSERT INTO {} ({}) VALUES", table, fields));
let context = self.context.clone();
with_timeout(
async {
self.wrap_future(move |c| {
info!("[insert] {}", query.get_sql());
let transport = c.inner.take().unwrap();
async move {
let transport = transport.clear().await?;
let stream = transport.call(Cmd::SendQuery(query, context.clone()));
let (transport, b) = stream.read_block().await?;
let dst_block = b.unwrap();
let casted_block = match block.cast_to(&dst_block) {
Ok(value) => value,
Err(err) => return Err(err),
};
let send_cmd = Cmd::Union(
Box::new(Cmd::SendData(casted_block, context.clone())),
Box::new(Cmd::SendData(Block::default(), context.clone())),
);
let (transport, _) = transport.call(send_cmd).read_block().await?;
Ok(transport)
}
})
.await
},
timeout,
)
.await
}
pub(crate) async fn wrap_future<T, R, F>(&mut self, f: F) -> Result<T>
where
F: FnOnce(&mut Self) -> R + Send,
R: Future<Output = Result<T>>,
T:'static,
{
let ping_before_query = try_opt!(self.context.options.get()).ping_before_query;
if ping_before_query {
self.check_connection().await?;
}
f(self).await
}
pub(crate) fn wrap_stream<'a, F>(&'a mut self, f: F) -> BoxStream<'a, Result<Block>>
where
F: (FnOnce(&'a mut Self) -> BlockStream<'a>) + Send +'static,
{
let ping_before_query = match self.context.options.get() {
Ok(val) => val.ping_before_query,
Err(err) => return Box::pin(stream::once(future::err(err))),
};
if ping_before_query {
let fut: BoxFuture<'a, BoxStream<'a, Result<Block>>> = Box::pin(async move {
let inner: BoxStream<'a, Result<Block>> = match self.check_connection().await {
Ok(_) => Box::pin(f(self)),
Err(err) => Box::pin(stream::once(future::err(err))),
};
inner
});
Box::pin(fut.flatten_stream())
} else {
|
/// Check connection and try to reconnect if necessary.
pub async fn check_connection(&mut self) -> Result<()> {
self.pool.detach();
let source = self.context.options.clone();
let pool = self.pool.clone();
let (send_retries, retry_timeout) = {
let options = try_opt!(source.get());
(options.send_retries, options.retry_timeout)
};
retry_guard(self, &source, pool.into(), send_retries, retry_timeout).await?;
if!self.pool.is_attached() && self.pool.is_some() {
self.pool.attach();
}
Ok(())
}
pub(crate) fn set_inside(&self, value: bool) {
if let Some(ref inner) = self.inner {
inner.set_inside(value);
} else {
unreachable!()
}
}
}
fn column_name_to_string(name: &str) -> Result<String> {
if name.chars().all(|ch| ch.is_numeric()) {
return Ok(name.to_string());
}
if name.chars().any(|ch| ch == '`') {
let err = format!("Column name {:?} shouldn't contains backticks.", name);
return Err(Error::Other(err.into()));
}
Ok(format!("`{}`", name))
} | Box::pin(f(self))
}
} | conditional_block |
main.rs | //! A cargo subcommand for displaying line counts of source code in projects,
//! including a niave `unsafe` counter for Rust source files. This subcommand
//! was originally based off and inspired by the project
//! [tokei](https://github.com/aaronepower/tokei) by
//! [Aaronepower](https://github.com/aaronepower)
//!
//! ## Demo
//!
//! To count the source code in the [Rust](https://github.com/rust-lang/rust)
//! repository (checkout `4c99649`), and print some naive statistics on how much
//! "unsafe" code exists.
//!
//! **NOTE:** The Rust repository is quite large, if you're on a slow internet
//! connection consider using a smaller repository, such as the `cargo-count`
//! repo.
//!
//! ```ignore
//! $ git clone https://github.com/rust-lang/rust
//! $ cd rust
//! $ cargo count --separator, --unsafe-statistics
//! Gathering information...
//! Language Files Lines Blanks Comments Code Unsafe (%)
//! -------- ----- ----- ------ -------- ---- ----------
//! Rust 6,018 528,510 66,984 133,698 327,792 3,163
//! (0.96%)
//! C 54 9,962 1,445 1,492 7,025 7,025
//! (100.00%)
//! CSS 4 1,266 149 52 1,065
//! JavaScript 4 1,118 131 166 821
//! Python 31 4,797 843 585 3,369
//! C Header 13 1,865 284 585 996 996
//! (100.00%)
//! C++ 4 1,611 185 81 1,345 1,345
//! (100.00%)
//! -------- ----- ----- ------ -------- ---- ----------
//! Totals: 6,128 549,129 70,021 136,659 342,413 12,529
//! (3.66%)
//!
//! ```
//!
//! The `--separator,` sets a `,` character as the thousands separator, and
//! `--unsafe-statistics` looks for, and counts lines of `unsafe`.
//!
//! ## Compiling
//!
//! Follow these instructions to compile `cargo-count`, then skip down to
//! Installation.
//!
//! 1. Ensure you have current version of `cargo` and
//! [Rust](https://www.rust-lang.org) installed
//! 2. Clone the project
//! `$ git clone https://github.com/kbknapp/cargo-count && cd cargo-count`
//! 3. Build the project `$ cargo build --release` (**NOTE:** There is a large
//! performance differnce when compiling without optimizations, so I
//! recommend alwasy using `--release` to enable to them)
//! 4. Once complete, the binary will be located at
//! `target/release/cargo-count`
//!
//! ## Installation and Usage
//!
//! All you need to do is place `cargo-count` somewhere in your `$PATH`. Then
//! run `cargo count` anywhere in your project directory. For full details see
//! below.
//!
//! ### Linux / OS X
//!
//! You have two options, place `cargo-count` into a directory that is already
//! located in your `$PATH` variable (To see which directories those are, open
//! a terminal and type `echo "${PATH//:/\n}"`, the quotation marks are
//! important), or you can add a custom directory to your `$PATH`
//!
//! **Option 1**
//! If you have write permission to a directory listed in your `$PATH` or you
//! have root permission (or via `sudo`), simply copy the `cargo-count` to that
//! directory `# sudo cp cargo-count /usr/local/bin`
//!
//! **Option 2**
//! If you do not have root, `sudo`, or write permission to any directory
//! already in `$PATH` you can create a directory inside your home directory,
//! and add that. Many people use `$HOME/.bin` to keep it hidden (and not
//! clutter your home directory), or `$HOME/bin` if you want it to be always
//! visible. Here is an example to make the directory, add it to `$PATH`, and
//! copy `cargo-count` there.
//!
//! Simply change `bin` to whatever you'd like to name the directory, and
//! `.bashrc` to whatever your shell startup file is (usually `.bashrc`,
//! `.bash_profile`, or `.zshrc`)
//!
//! ```sh
//! $ mkdir ~/bin
//! $ echo "export PATH=$PATH:$HOME/bin" >> ~/.bashrc
//! $ cp cargo-count ~/bin
//! $ source ~/.bashrc
//! ```
//!
//! ### Windows
//!
//! On Windows 7/8 you can add directory to the `PATH` variable by opening a
//! command line as an administrator and running
//!
//! ```sh
//! C:\> setx path "%path%;C:\path\to\cargo-count\binary"
//! ```
//!
//! Otherwise, ensure you have the `cargo-count` binary in the directory which
//! you operating in the command line from, because Windows automatically adds
//! your current directory to PATH (i.e. if you open a command line to
//! `C:\my_project\` to use `cargo-count` ensure `cargo-count.exe` is inside
//! that directory as well).
//!
//!
//! ### Options
//!
//! There are a few options for using `cargo-count` which should be somewhat
//! self explanitory.
//!
//! ```ignore
//! USAGE:
//! cargo count [FLAGS] [OPTIONS] [--] [ARGS]
//!
//! FLAGS:
//! -S, --follow-symlinks Follows symlinks and counts source files it
//! finds
//! -a, --all Do not ignore.gitignored paths
//! (Defaults to false when omitted)
//! -h, --help Prints help information
//! --unsafe-statistics Displays lines and percentages of "unsafe"
//! code
//! -V, --version Prints version information
//! -v, --verbose Print verbose output
//!
//! OPTIONS:
//! -l, --language <exts>... Only count these languges (by source code
//! extension)
//! (i.e. '-l js py cpp')
//! -e, --exclude <paths>... Files or directories to exclude
//! (automatically includes '.git')
//! --utf8-rule <rule> Sets the UTF-8 parsing rule (Defaults to
//!'strict')
//! [values: ignore lossy strict]
//! -s, --separator <sep> Set the thousands separator for pretty
//! printing
//!
//! ARGS:
//! to_count... The files or directories (including children) to count
//! (defaults to current working directory when omitted)
//!
//! When using '--exclude <path>' the path given can either be relative to the
//! current
//! directory, or absolute. When '<path>' is a file, it must be relative to the
//! current
//! directory or it will not be found. Example, if the current directory has a
//! child
//! directory named 'target' with a child fild 'test.rs' and you use `--exclude
//! target/test.rs'
//!
//! Globs are also supported. For example, to exclude 'test.rs' files from all
//! child directories
//! of the current directory you could do '--exclude */test.rs'.
//! ```
//!
//! ## License
//!
//! `cargo-count` is released under the terms of the MIT. See the LICENSE-MIT
//! file for the details.
#![cfg_attr(feature = "nightly", feature(plugin))]
#![cfg_attr(feature = "lints", plugin(clippy))]
#![cfg_attr(feature = "lints", allow(explicit_iter_loop))]
#![cfg_attr(feature = "lints", allow(should_implement_trait))]
#![cfg_attr(feature = "lints", allow(unstable_features))]
#![cfg_attr(feature = "lints", deny(warnings))]
#![cfg_attr(
not(any(feature = "nightly", feature = "unstable")),
deny(unstable_features)
)]
#![deny(
missing_docs,
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unused_import_braces,
unused_qualifications
)]
#[macro_use]
extern crate clap;
#[cfg(feature = "color")]
extern crate ansi_term;
extern crate gitignore;
extern crate glob;
extern crate regex;
extern crate tabwriter;
#[cfg(feature = "debug")]
use std::env;
use clap::{App, AppSettings, Arg, SubCommand};
use config::Config;
use count::Counts;
use error::{CliError, CliResult};
use fmt::Format;
#[macro_use]
mod macros;
mod comment;
mod config;
mod count;
mod error;
mod fmt;
mod fsutil;
mod language;
static UTF8_RULES: [&'static str; 3] = ["strict", "lossy", "ignore"];
fn main() {
debugln!(
"executing; cmd=cargo-count; args={:?}",
env::args().collect::<Vec<_>>()
);
let m = App::new("cargo-count")
.version(concat!("v", crate_version!()))
// We have to lie about our binary name since this will be a third party
// subcommand for cargo but we want usage strings to generated properly
.bin_name("cargo")
// Global version uses the version we supplied (Cargo.toml) for all subcommands
// as well
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequired])
// We use a subcommand because everything parsed after `cargo` is sent to the
// third party
// plugin which will then be interpreted as a subcommand/positional arg by clap
.subcommand(SubCommand::with_name("count")
.author("Kevin K. <[email protected]>")
.about("Displays line counts of code for cargo projects")
.args_from_usage("
-e, --exclude [PATH]... 'Files or directories to exclude (automatically includes \'.git\')'
-a, --all 'Do not ignore.gitignore'd paths'
--unsafe-statistics 'Displays lines and percentages of \"unsafe\" code'
-l, --language [EXT]... 'Only count these languges (i.e. \'-l js py cpp\')'
-v, --verbose 'Print verbose output'
-S, --follow-symlinks 'Follows symlinks and counts source files it finds [default: false]'
[PATH]... 'The files or directories (including children) to count (defaults to \
current working directory when omitted)'")
.arg(Arg::from_usage(
"-s, --separator [CHAR] 'Set the thousands separator for pretty printing'")
.use_delimiter(false)
.validator(single_char))
.arg(Arg::from_usage("--utf8-rule [RULE] 'Sets the UTF-8 parsing rule'")
.default_value("strict")
.possible_values(&UTF8_RULES))
.after_help("\
When using '--exclude <PATH>' the path given can either be relative to the current directory, or \
absolute. When '--exclude <PATH>' is a file or path, it must be relative to the current directory \
or it will not be found. Example, if the current directory has a child directory named 'target' \
with a child fild 'test.rs' and you use `--exclude target/test.rs'
\n\
Globs are also supported. For example, to exclude 'test.rs' files from all child directories of \
the current directory you could do '--exclude */test.rs'."))
.get_matches();
if let Some(m) = m.subcommand_matches("count") {
let cfg = Config::from_matches(m).unwrap_or_else(|e| e.exit());
println!("Gathering information...");
if let Err(e) = execute(cfg) {
e.exit();
}
}
}
fn execute(cfg: Config) -> CliResult<()> | counts.fill_from();
cli_try!(counts.count());
cli_try!(counts.write_results());
Ok(())
}
fn single_char(s: String) -> Result<(), String> {
if s.len() == 1 {
Ok(())
} else {
Err(format!(
"the --separator argument option only accepts a single character but found '{}'",
Format::Warning(s)
))
}
}
| {
debugln!("executing; cmd=execute;");
verboseln!(cfg, "{}: {:?}", Format::Warning("Excluding"), cfg.exclude);
verbose!(
cfg,
"{}",
if cfg.exts.is_some() {
format!(
"{} including files with extension: {}\n",
Format::Warning("Only"),
cfg.exts.as_ref().unwrap().join(", ")
)
} else {
"".to_owned()
}
);
debugln!("Checking for files or dirs to count from cli");
let mut counts = Counts::new(&cfg); | identifier_body |
main.rs | //! A cargo subcommand for displaying line counts of source code in projects,
//! including a niave `unsafe` counter for Rust source files. This subcommand
//! was originally based off and inspired by the project
//! [tokei](https://github.com/aaronepower/tokei) by
//! [Aaronepower](https://github.com/aaronepower)
//!
//! ## Demo
//!
//! To count the source code in the [Rust](https://github.com/rust-lang/rust)
//! repository (checkout `4c99649`), and print some naive statistics on how much
//! "unsafe" code exists.
//!
//! **NOTE:** The Rust repository is quite large, if you're on a slow internet
//! connection consider using a smaller repository, such as the `cargo-count`
//! repo.
//!
//! ```ignore
//! $ git clone https://github.com/rust-lang/rust
//! $ cd rust
//! $ cargo count --separator, --unsafe-statistics
//! Gathering information...
//! Language Files Lines Blanks Comments Code Unsafe (%)
//! -------- ----- ----- ------ -------- ---- ----------
//! Rust 6,018 528,510 66,984 133,698 327,792 3,163
//! (0.96%)
//! C 54 9,962 1,445 1,492 7,025 7,025
//! (100.00%)
//! CSS 4 1,266 149 52 1,065
//! JavaScript 4 1,118 131 166 821
//! Python 31 4,797 843 585 3,369
//! C Header 13 1,865 284 585 996 996
//! (100.00%)
//! C++ 4 1,611 185 81 1,345 1,345
//! (100.00%)
//! -------- ----- ----- ------ -------- ---- ----------
//! Totals: 6,128 549,129 70,021 136,659 342,413 12,529
//! (3.66%)
//!
//! ```
//!
//! The `--separator,` sets a `,` character as the thousands separator, and
//! `--unsafe-statistics` looks for, and counts lines of `unsafe`.
//!
//! ## Compiling
//!
//! Follow these instructions to compile `cargo-count`, then skip down to
//! Installation.
//!
//! 1. Ensure you have current version of `cargo` and
//! [Rust](https://www.rust-lang.org) installed
//! 2. Clone the project
//! `$ git clone https://github.com/kbknapp/cargo-count && cd cargo-count`
//! 3. Build the project `$ cargo build --release` (**NOTE:** There is a large
//! performance differnce when compiling without optimizations, so I
//! recommend alwasy using `--release` to enable to them)
//! 4. Once complete, the binary will be located at
//! `target/release/cargo-count`
//!
//! ## Installation and Usage
//!
//! All you need to do is place `cargo-count` somewhere in your `$PATH`. Then
//! run `cargo count` anywhere in your project directory. For full details see
//! below.
//!
//! ### Linux / OS X
//!
//! You have two options, place `cargo-count` into a directory that is already
//! located in your `$PATH` variable (To see which directories those are, open
//! a terminal and type `echo "${PATH//:/\n}"`, the quotation marks are
//! important), or you can add a custom directory to your `$PATH`
//!
//! **Option 1**
//! If you have write permission to a directory listed in your `$PATH` or you
//! have root permission (or via `sudo`), simply copy the `cargo-count` to that
//! directory `# sudo cp cargo-count /usr/local/bin`
//!
//! **Option 2**
//! If you do not have root, `sudo`, or write permission to any directory
//! already in `$PATH` you can create a directory inside your home directory,
//! and add that. Many people use `$HOME/.bin` to keep it hidden (and not
//! clutter your home directory), or `$HOME/bin` if you want it to be always
//! visible. Here is an example to make the directory, add it to `$PATH`, and
//! copy `cargo-count` there.
//!
//! Simply change `bin` to whatever you'd like to name the directory, and
//! `.bashrc` to whatever your shell startup file is (usually `.bashrc`,
//! `.bash_profile`, or `.zshrc`)
//!
//! ```sh
//! $ mkdir ~/bin
//! $ echo "export PATH=$PATH:$HOME/bin" >> ~/.bashrc
//! $ cp cargo-count ~/bin
//! $ source ~/.bashrc
//! ```
//!
//! ### Windows
//!
//! On Windows 7/8 you can add directory to the `PATH` variable by opening a
//! command line as an administrator and running
//!
//! ```sh
//! C:\> setx path "%path%;C:\path\to\cargo-count\binary"
//! ```
//!
//! Otherwise, ensure you have the `cargo-count` binary in the directory which
//! you operating in the command line from, because Windows automatically adds
//! your current directory to PATH (i.e. if you open a command line to
//! `C:\my_project\` to use `cargo-count` ensure `cargo-count.exe` is inside
//! that directory as well).
//!
//!
//! ### Options
//!
//! There are a few options for using `cargo-count` which should be somewhat
//! self explanitory.
//!
//! ```ignore
//! USAGE:
//! cargo count [FLAGS] [OPTIONS] [--] [ARGS]
//!
//! FLAGS:
//! -S, --follow-symlinks Follows symlinks and counts source files it
//! finds
//! -a, --all Do not ignore.gitignored paths
//! (Defaults to false when omitted)
//! -h, --help Prints help information
//! --unsafe-statistics Displays lines and percentages of "unsafe"
//! code
//! -V, --version Prints version information
//! -v, --verbose Print verbose output
//!
//! OPTIONS:
//! -l, --language <exts>... Only count these languges (by source code
//! extension)
//! (i.e. '-l js py cpp')
//! -e, --exclude <paths>... Files or directories to exclude
//! (automatically includes '.git')
//! --utf8-rule <rule> Sets the UTF-8 parsing rule (Defaults to
//!'strict')
//! [values: ignore lossy strict]
//! -s, --separator <sep> Set the thousands separator for pretty
//! printing
//!
//! ARGS:
//! to_count... The files or directories (including children) to count
//! (defaults to current working directory when omitted)
//!
//! When using '--exclude <path>' the path given can either be relative to the
//! current
//! directory, or absolute. When '<path>' is a file, it must be relative to the
//! current
//! directory or it will not be found. Example, if the current directory has a
//! child
//! directory named 'target' with a child fild 'test.rs' and you use `--exclude
//! target/test.rs'
//!
//! Globs are also supported. For example, to exclude 'test.rs' files from all
//! child directories
//! of the current directory you could do '--exclude */test.rs'.
//! ```
//!
//! ## License
//!
//! `cargo-count` is released under the terms of the MIT. See the LICENSE-MIT
//! file for the details.
#![cfg_attr(feature = "nightly", feature(plugin))]
#![cfg_attr(feature = "lints", plugin(clippy))]
#![cfg_attr(feature = "lints", allow(explicit_iter_loop))]
#![cfg_attr(feature = "lints", allow(should_implement_trait))]
#![cfg_attr(feature = "lints", allow(unstable_features))]
#![cfg_attr(feature = "lints", deny(warnings))]
#![cfg_attr(
not(any(feature = "nightly", feature = "unstable")),
deny(unstable_features)
)]
#![deny(
missing_docs,
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unused_import_braces,
unused_qualifications | extern crate clap;
#[cfg(feature = "color")]
extern crate ansi_term;
extern crate gitignore;
extern crate glob;
extern crate regex;
extern crate tabwriter;
#[cfg(feature = "debug")]
use std::env;
use clap::{App, AppSettings, Arg, SubCommand};
use config::Config;
use count::Counts;
use error::{CliError, CliResult};
use fmt::Format;
#[macro_use]
mod macros;
mod comment;
mod config;
mod count;
mod error;
mod fmt;
mod fsutil;
mod language;
static UTF8_RULES: [&'static str; 3] = ["strict", "lossy", "ignore"];
fn main() {
debugln!(
"executing; cmd=cargo-count; args={:?}",
env::args().collect::<Vec<_>>()
);
let m = App::new("cargo-count")
.version(concat!("v", crate_version!()))
// We have to lie about our binary name since this will be a third party
// subcommand for cargo but we want usage strings to generated properly
.bin_name("cargo")
// Global version uses the version we supplied (Cargo.toml) for all subcommands
// as well
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequired])
// We use a subcommand because everything parsed after `cargo` is sent to the
// third party
// plugin which will then be interpreted as a subcommand/positional arg by clap
.subcommand(SubCommand::with_name("count")
.author("Kevin K. <[email protected]>")
.about("Displays line counts of code for cargo projects")
.args_from_usage("
-e, --exclude [PATH]... 'Files or directories to exclude (automatically includes \'.git\')'
-a, --all 'Do not ignore.gitignore'd paths'
--unsafe-statistics 'Displays lines and percentages of \"unsafe\" code'
-l, --language [EXT]... 'Only count these languges (i.e. \'-l js py cpp\')'
-v, --verbose 'Print verbose output'
-S, --follow-symlinks 'Follows symlinks and counts source files it finds [default: false]'
[PATH]... 'The files or directories (including children) to count (defaults to \
current working directory when omitted)'")
.arg(Arg::from_usage(
"-s, --separator [CHAR] 'Set the thousands separator for pretty printing'")
.use_delimiter(false)
.validator(single_char))
.arg(Arg::from_usage("--utf8-rule [RULE] 'Sets the UTF-8 parsing rule'")
.default_value("strict")
.possible_values(&UTF8_RULES))
.after_help("\
When using '--exclude <PATH>' the path given can either be relative to the current directory, or \
absolute. When '--exclude <PATH>' is a file or path, it must be relative to the current directory \
or it will not be found. Example, if the current directory has a child directory named 'target' \
with a child fild 'test.rs' and you use `--exclude target/test.rs'
\n\
Globs are also supported. For example, to exclude 'test.rs' files from all child directories of \
the current directory you could do '--exclude */test.rs'."))
.get_matches();
if let Some(m) = m.subcommand_matches("count") {
let cfg = Config::from_matches(m).unwrap_or_else(|e| e.exit());
println!("Gathering information...");
if let Err(e) = execute(cfg) {
e.exit();
}
}
}
fn execute(cfg: Config) -> CliResult<()> {
debugln!("executing; cmd=execute;");
verboseln!(cfg, "{}: {:?}", Format::Warning("Excluding"), cfg.exclude);
verbose!(
cfg,
"{}",
if cfg.exts.is_some() {
format!(
"{} including files with extension: {}\n",
Format::Warning("Only"),
cfg.exts.as_ref().unwrap().join(", ")
)
} else {
"".to_owned()
}
);
debugln!("Checking for files or dirs to count from cli");
let mut counts = Counts::new(&cfg);
counts.fill_from();
cli_try!(counts.count());
cli_try!(counts.write_results());
Ok(())
}
fn single_char(s: String) -> Result<(), String> {
if s.len() == 1 {
Ok(())
} else {
Err(format!(
"the --separator argument option only accepts a single character but found '{}'",
Format::Warning(s)
))
}
} | )]
#[macro_use] | random_line_split |
main.rs | //! A cargo subcommand for displaying line counts of source code in projects,
//! including a niave `unsafe` counter for Rust source files. This subcommand
//! was originally based off and inspired by the project
//! [tokei](https://github.com/aaronepower/tokei) by
//! [Aaronepower](https://github.com/aaronepower)
//!
//! ## Demo
//!
//! To count the source code in the [Rust](https://github.com/rust-lang/rust)
//! repository (checkout `4c99649`), and print some naive statistics on how much
//! "unsafe" code exists.
//!
//! **NOTE:** The Rust repository is quite large, if you're on a slow internet
//! connection consider using a smaller repository, such as the `cargo-count`
//! repo.
//!
//! ```ignore
//! $ git clone https://github.com/rust-lang/rust
//! $ cd rust
//! $ cargo count --separator, --unsafe-statistics
//! Gathering information...
//! Language Files Lines Blanks Comments Code Unsafe (%)
//! -------- ----- ----- ------ -------- ---- ----------
//! Rust 6,018 528,510 66,984 133,698 327,792 3,163
//! (0.96%)
//! C 54 9,962 1,445 1,492 7,025 7,025
//! (100.00%)
//! CSS 4 1,266 149 52 1,065
//! JavaScript 4 1,118 131 166 821
//! Python 31 4,797 843 585 3,369
//! C Header 13 1,865 284 585 996 996
//! (100.00%)
//! C++ 4 1,611 185 81 1,345 1,345
//! (100.00%)
//! -------- ----- ----- ------ -------- ---- ----------
//! Totals: 6,128 549,129 70,021 136,659 342,413 12,529
//! (3.66%)
//!
//! ```
//!
//! The `--separator,` sets a `,` character as the thousands separator, and
//! `--unsafe-statistics` looks for, and counts lines of `unsafe`.
//!
//! ## Compiling
//!
//! Follow these instructions to compile `cargo-count`, then skip down to
//! Installation.
//!
//! 1. Ensure you have current version of `cargo` and
//! [Rust](https://www.rust-lang.org) installed
//! 2. Clone the project
//! `$ git clone https://github.com/kbknapp/cargo-count && cd cargo-count`
//! 3. Build the project `$ cargo build --release` (**NOTE:** There is a large
//! performance differnce when compiling without optimizations, so I
//! recommend alwasy using `--release` to enable to them)
//! 4. Once complete, the binary will be located at
//! `target/release/cargo-count`
//!
//! ## Installation and Usage
//!
//! All you need to do is place `cargo-count` somewhere in your `$PATH`. Then
//! run `cargo count` anywhere in your project directory. For full details see
//! below.
//!
//! ### Linux / OS X
//!
//! You have two options, place `cargo-count` into a directory that is already
//! located in your `$PATH` variable (To see which directories those are, open
//! a terminal and type `echo "${PATH//:/\n}"`, the quotation marks are
//! important), or you can add a custom directory to your `$PATH`
//!
//! **Option 1**
//! If you have write permission to a directory listed in your `$PATH` or you
//! have root permission (or via `sudo`), simply copy the `cargo-count` to that
//! directory `# sudo cp cargo-count /usr/local/bin`
//!
//! **Option 2**
//! If you do not have root, `sudo`, or write permission to any directory
//! already in `$PATH` you can create a directory inside your home directory,
//! and add that. Many people use `$HOME/.bin` to keep it hidden (and not
//! clutter your home directory), or `$HOME/bin` if you want it to be always
//! visible. Here is an example to make the directory, add it to `$PATH`, and
//! copy `cargo-count` there.
//!
//! Simply change `bin` to whatever you'd like to name the directory, and
//! `.bashrc` to whatever your shell startup file is (usually `.bashrc`,
//! `.bash_profile`, or `.zshrc`)
//!
//! ```sh
//! $ mkdir ~/bin
//! $ echo "export PATH=$PATH:$HOME/bin" >> ~/.bashrc
//! $ cp cargo-count ~/bin
//! $ source ~/.bashrc
//! ```
//!
//! ### Windows
//!
//! On Windows 7/8 you can add directory to the `PATH` variable by opening a
//! command line as an administrator and running
//!
//! ```sh
//! C:\> setx path "%path%;C:\path\to\cargo-count\binary"
//! ```
//!
//! Otherwise, ensure you have the `cargo-count` binary in the directory which
//! you operating in the command line from, because Windows automatically adds
//! your current directory to PATH (i.e. if you open a command line to
//! `C:\my_project\` to use `cargo-count` ensure `cargo-count.exe` is inside
//! that directory as well).
//!
//!
//! ### Options
//!
//! There are a few options for using `cargo-count` which should be somewhat
//! self explanitory.
//!
//! ```ignore
//! USAGE:
//! cargo count [FLAGS] [OPTIONS] [--] [ARGS]
//!
//! FLAGS:
//! -S, --follow-symlinks Follows symlinks and counts source files it
//! finds
//! -a, --all Do not ignore.gitignored paths
//! (Defaults to false when omitted)
//! -h, --help Prints help information
//! --unsafe-statistics Displays lines and percentages of "unsafe"
//! code
//! -V, --version Prints version information
//! -v, --verbose Print verbose output
//!
//! OPTIONS:
//! -l, --language <exts>... Only count these languges (by source code
//! extension)
//! (i.e. '-l js py cpp')
//! -e, --exclude <paths>... Files or directories to exclude
//! (automatically includes '.git')
//! --utf8-rule <rule> Sets the UTF-8 parsing rule (Defaults to
//!'strict')
//! [values: ignore lossy strict]
//! -s, --separator <sep> Set the thousands separator for pretty
//! printing
//!
//! ARGS:
//! to_count... The files or directories (including children) to count
//! (defaults to current working directory when omitted)
//!
//! When using '--exclude <path>' the path given can either be relative to the
//! current
//! directory, or absolute. When '<path>' is a file, it must be relative to the
//! current
//! directory or it will not be found. Example, if the current directory has a
//! child
//! directory named 'target' with a child fild 'test.rs' and you use `--exclude
//! target/test.rs'
//!
//! Globs are also supported. For example, to exclude 'test.rs' files from all
//! child directories
//! of the current directory you could do '--exclude */test.rs'.
//! ```
//!
//! ## License
//!
//! `cargo-count` is released under the terms of the MIT. See the LICENSE-MIT
//! file for the details.
#![cfg_attr(feature = "nightly", feature(plugin))]
#![cfg_attr(feature = "lints", plugin(clippy))]
#![cfg_attr(feature = "lints", allow(explicit_iter_loop))]
#![cfg_attr(feature = "lints", allow(should_implement_trait))]
#![cfg_attr(feature = "lints", allow(unstable_features))]
#![cfg_attr(feature = "lints", deny(warnings))]
#![cfg_attr(
not(any(feature = "nightly", feature = "unstable")),
deny(unstable_features)
)]
#![deny(
missing_docs,
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unused_import_braces,
unused_qualifications
)]
#[macro_use]
extern crate clap;
#[cfg(feature = "color")]
extern crate ansi_term;
extern crate gitignore;
extern crate glob;
extern crate regex;
extern crate tabwriter;
#[cfg(feature = "debug")]
use std::env;
use clap::{App, AppSettings, Arg, SubCommand};
use config::Config;
use count::Counts;
use error::{CliError, CliResult};
use fmt::Format;
#[macro_use]
mod macros;
mod comment;
mod config;
mod count;
mod error;
mod fmt;
mod fsutil;
mod language;
static UTF8_RULES: [&'static str; 3] = ["strict", "lossy", "ignore"];
fn main() {
debugln!(
"executing; cmd=cargo-count; args={:?}",
env::args().collect::<Vec<_>>()
);
let m = App::new("cargo-count")
.version(concat!("v", crate_version!()))
// We have to lie about our binary name since this will be a third party
// subcommand for cargo but we want usage strings to generated properly
.bin_name("cargo")
// Global version uses the version we supplied (Cargo.toml) for all subcommands
// as well
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequired])
// We use a subcommand because everything parsed after `cargo` is sent to the
// third party
// plugin which will then be interpreted as a subcommand/positional arg by clap
.subcommand(SubCommand::with_name("count")
.author("Kevin K. <[email protected]>")
.about("Displays line counts of code for cargo projects")
.args_from_usage("
-e, --exclude [PATH]... 'Files or directories to exclude (automatically includes \'.git\')'
-a, --all 'Do not ignore.gitignore'd paths'
--unsafe-statistics 'Displays lines and percentages of \"unsafe\" code'
-l, --language [EXT]... 'Only count these languges (i.e. \'-l js py cpp\')'
-v, --verbose 'Print verbose output'
-S, --follow-symlinks 'Follows symlinks and counts source files it finds [default: false]'
[PATH]... 'The files or directories (including children) to count (defaults to \
current working directory when omitted)'")
.arg(Arg::from_usage(
"-s, --separator [CHAR] 'Set the thousands separator for pretty printing'")
.use_delimiter(false)
.validator(single_char))
.arg(Arg::from_usage("--utf8-rule [RULE] 'Sets the UTF-8 parsing rule'")
.default_value("strict")
.possible_values(&UTF8_RULES))
.after_help("\
When using '--exclude <PATH>' the path given can either be relative to the current directory, or \
absolute. When '--exclude <PATH>' is a file or path, it must be relative to the current directory \
or it will not be found. Example, if the current directory has a child directory named 'target' \
with a child fild 'test.rs' and you use `--exclude target/test.rs'
\n\
Globs are also supported. For example, to exclude 'test.rs' files from all child directories of \
the current directory you could do '--exclude */test.rs'."))
.get_matches();
if let Some(m) = m.subcommand_matches("count") {
let cfg = Config::from_matches(m).unwrap_or_else(|e| e.exit());
println!("Gathering information...");
if let Err(e) = execute(cfg) {
e.exit();
}
}
}
fn execute(cfg: Config) -> CliResult<()> {
debugln!("executing; cmd=execute;");
verboseln!(cfg, "{}: {:?}", Format::Warning("Excluding"), cfg.exclude);
verbose!(
cfg,
"{}",
if cfg.exts.is_some() {
format!(
"{} including files with extension: {}\n",
Format::Warning("Only"),
cfg.exts.as_ref().unwrap().join(", ")
)
} else {
"".to_owned()
}
);
debugln!("Checking for files or dirs to count from cli");
let mut counts = Counts::new(&cfg);
counts.fill_from();
cli_try!(counts.count());
cli_try!(counts.write_results());
Ok(())
}
fn single_char(s: String) -> Result<(), String> {
if s.len() == 1 | else {
Err(format!(
"the --separator argument option only accepts a single character but found '{}'",
Format::Warning(s)
))
}
}
| {
Ok(())
} | conditional_block |
main.rs | //! A cargo subcommand for displaying line counts of source code in projects,
//! including a niave `unsafe` counter for Rust source files. This subcommand
//! was originally based off and inspired by the project
//! [tokei](https://github.com/aaronepower/tokei) by
//! [Aaronepower](https://github.com/aaronepower)
//!
//! ## Demo
//!
//! To count the source code in the [Rust](https://github.com/rust-lang/rust)
//! repository (checkout `4c99649`), and print some naive statistics on how much
//! "unsafe" code exists.
//!
//! **NOTE:** The Rust repository is quite large, if you're on a slow internet
//! connection consider using a smaller repository, such as the `cargo-count`
//! repo.
//!
//! ```ignore
//! $ git clone https://github.com/rust-lang/rust
//! $ cd rust
//! $ cargo count --separator, --unsafe-statistics
//! Gathering information...
//! Language Files Lines Blanks Comments Code Unsafe (%)
//! -------- ----- ----- ------ -------- ---- ----------
//! Rust 6,018 528,510 66,984 133,698 327,792 3,163
//! (0.96%)
//! C 54 9,962 1,445 1,492 7,025 7,025
//! (100.00%)
//! CSS 4 1,266 149 52 1,065
//! JavaScript 4 1,118 131 166 821
//! Python 31 4,797 843 585 3,369
//! C Header 13 1,865 284 585 996 996
//! (100.00%)
//! C++ 4 1,611 185 81 1,345 1,345
//! (100.00%)
//! -------- ----- ----- ------ -------- ---- ----------
//! Totals: 6,128 549,129 70,021 136,659 342,413 12,529
//! (3.66%)
//!
//! ```
//!
//! The `--separator,` sets a `,` character as the thousands separator, and
//! `--unsafe-statistics` looks for, and counts lines of `unsafe`.
//!
//! ## Compiling
//!
//! Follow these instructions to compile `cargo-count`, then skip down to
//! Installation.
//!
//! 1. Ensure you have current version of `cargo` and
//! [Rust](https://www.rust-lang.org) installed
//! 2. Clone the project
//! `$ git clone https://github.com/kbknapp/cargo-count && cd cargo-count`
//! 3. Build the project `$ cargo build --release` (**NOTE:** There is a large
//! performance differnce when compiling without optimizations, so I
//! recommend alwasy using `--release` to enable to them)
//! 4. Once complete, the binary will be located at
//! `target/release/cargo-count`
//!
//! ## Installation and Usage
//!
//! All you need to do is place `cargo-count` somewhere in your `$PATH`. Then
//! run `cargo count` anywhere in your project directory. For full details see
//! below.
//!
//! ### Linux / OS X
//!
//! You have two options, place `cargo-count` into a directory that is already
//! located in your `$PATH` variable (To see which directories those are, open
//! a terminal and type `echo "${PATH//:/\n}"`, the quotation marks are
//! important), or you can add a custom directory to your `$PATH`
//!
//! **Option 1**
//! If you have write permission to a directory listed in your `$PATH` or you
//! have root permission (or via `sudo`), simply copy the `cargo-count` to that
//! directory `# sudo cp cargo-count /usr/local/bin`
//!
//! **Option 2**
//! If you do not have root, `sudo`, or write permission to any directory
//! already in `$PATH` you can create a directory inside your home directory,
//! and add that. Many people use `$HOME/.bin` to keep it hidden (and not
//! clutter your home directory), or `$HOME/bin` if you want it to be always
//! visible. Here is an example to make the directory, add it to `$PATH`, and
//! copy `cargo-count` there.
//!
//! Simply change `bin` to whatever you'd like to name the directory, and
//! `.bashrc` to whatever your shell startup file is (usually `.bashrc`,
//! `.bash_profile`, or `.zshrc`)
//!
//! ```sh
//! $ mkdir ~/bin
//! $ echo "export PATH=$PATH:$HOME/bin" >> ~/.bashrc
//! $ cp cargo-count ~/bin
//! $ source ~/.bashrc
//! ```
//!
//! ### Windows
//!
//! On Windows 7/8 you can add directory to the `PATH` variable by opening a
//! command line as an administrator and running
//!
//! ```sh
//! C:\> setx path "%path%;C:\path\to\cargo-count\binary"
//! ```
//!
//! Otherwise, ensure you have the `cargo-count` binary in the directory which
//! you operating in the command line from, because Windows automatically adds
//! your current directory to PATH (i.e. if you open a command line to
//! `C:\my_project\` to use `cargo-count` ensure `cargo-count.exe` is inside
//! that directory as well).
//!
//!
//! ### Options
//!
//! There are a few options for using `cargo-count` which should be somewhat
//! self explanitory.
//!
//! ```ignore
//! USAGE:
//! cargo count [FLAGS] [OPTIONS] [--] [ARGS]
//!
//! FLAGS:
//! -S, --follow-symlinks Follows symlinks and counts source files it
//! finds
//! -a, --all Do not ignore.gitignored paths
//! (Defaults to false when omitted)
//! -h, --help Prints help information
//! --unsafe-statistics Displays lines and percentages of "unsafe"
//! code
//! -V, --version Prints version information
//! -v, --verbose Print verbose output
//!
//! OPTIONS:
//! -l, --language <exts>... Only count these languges (by source code
//! extension)
//! (i.e. '-l js py cpp')
//! -e, --exclude <paths>... Files or directories to exclude
//! (automatically includes '.git')
//! --utf8-rule <rule> Sets the UTF-8 parsing rule (Defaults to
//!'strict')
//! [values: ignore lossy strict]
//! -s, --separator <sep> Set the thousands separator for pretty
//! printing
//!
//! ARGS:
//! to_count... The files or directories (including children) to count
//! (defaults to current working directory when omitted)
//!
//! When using '--exclude <path>' the path given can either be relative to the
//! current
//! directory, or absolute. When '<path>' is a file, it must be relative to the
//! current
//! directory or it will not be found. Example, if the current directory has a
//! child
//! directory named 'target' with a child fild 'test.rs' and you use `--exclude
//! target/test.rs'
//!
//! Globs are also supported. For example, to exclude 'test.rs' files from all
//! child directories
//! of the current directory you could do '--exclude */test.rs'.
//! ```
//!
//! ## License
//!
//! `cargo-count` is released under the terms of the MIT. See the LICENSE-MIT
//! file for the details.
#![cfg_attr(feature = "nightly", feature(plugin))]
#![cfg_attr(feature = "lints", plugin(clippy))]
#![cfg_attr(feature = "lints", allow(explicit_iter_loop))]
#![cfg_attr(feature = "lints", allow(should_implement_trait))]
#![cfg_attr(feature = "lints", allow(unstable_features))]
#![cfg_attr(feature = "lints", deny(warnings))]
#![cfg_attr(
not(any(feature = "nightly", feature = "unstable")),
deny(unstable_features)
)]
#![deny(
missing_docs,
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unused_import_braces,
unused_qualifications
)]
#[macro_use]
extern crate clap;
#[cfg(feature = "color")]
extern crate ansi_term;
extern crate gitignore;
extern crate glob;
extern crate regex;
extern crate tabwriter;
#[cfg(feature = "debug")]
use std::env;
use clap::{App, AppSettings, Arg, SubCommand};
use config::Config;
use count::Counts;
use error::{CliError, CliResult};
use fmt::Format;
#[macro_use]
mod macros;
mod comment;
mod config;
mod count;
mod error;
mod fmt;
mod fsutil;
mod language;
static UTF8_RULES: [&'static str; 3] = ["strict", "lossy", "ignore"];
fn main() {
debugln!(
"executing; cmd=cargo-count; args={:?}",
env::args().collect::<Vec<_>>()
);
let m = App::new("cargo-count")
.version(concat!("v", crate_version!()))
// We have to lie about our binary name since this will be a third party
// subcommand for cargo but we want usage strings to generated properly
.bin_name("cargo")
// Global version uses the version we supplied (Cargo.toml) for all subcommands
// as well
.settings(&[AppSettings::GlobalVersion,
AppSettings::SubcommandRequired])
// We use a subcommand because everything parsed after `cargo` is sent to the
// third party
// plugin which will then be interpreted as a subcommand/positional arg by clap
.subcommand(SubCommand::with_name("count")
.author("Kevin K. <[email protected]>")
.about("Displays line counts of code for cargo projects")
.args_from_usage("
-e, --exclude [PATH]... 'Files or directories to exclude (automatically includes \'.git\')'
-a, --all 'Do not ignore.gitignore'd paths'
--unsafe-statistics 'Displays lines and percentages of \"unsafe\" code'
-l, --language [EXT]... 'Only count these languges (i.e. \'-l js py cpp\')'
-v, --verbose 'Print verbose output'
-S, --follow-symlinks 'Follows symlinks and counts source files it finds [default: false]'
[PATH]... 'The files or directories (including children) to count (defaults to \
current working directory when omitted)'")
.arg(Arg::from_usage(
"-s, --separator [CHAR] 'Set the thousands separator for pretty printing'")
.use_delimiter(false)
.validator(single_char))
.arg(Arg::from_usage("--utf8-rule [RULE] 'Sets the UTF-8 parsing rule'")
.default_value("strict")
.possible_values(&UTF8_RULES))
.after_help("\
When using '--exclude <PATH>' the path given can either be relative to the current directory, or \
absolute. When '--exclude <PATH>' is a file or path, it must be relative to the current directory \
or it will not be found. Example, if the current directory has a child directory named 'target' \
with a child fild 'test.rs' and you use `--exclude target/test.rs'
\n\
Globs are also supported. For example, to exclude 'test.rs' files from all child directories of \
the current directory you could do '--exclude */test.rs'."))
.get_matches();
if let Some(m) = m.subcommand_matches("count") {
let cfg = Config::from_matches(m).unwrap_or_else(|e| e.exit());
println!("Gathering information...");
if let Err(e) = execute(cfg) {
e.exit();
}
}
}
fn execute(cfg: Config) -> CliResult<()> {
debugln!("executing; cmd=execute;");
verboseln!(cfg, "{}: {:?}", Format::Warning("Excluding"), cfg.exclude);
verbose!(
cfg,
"{}",
if cfg.exts.is_some() {
format!(
"{} including files with extension: {}\n",
Format::Warning("Only"),
cfg.exts.as_ref().unwrap().join(", ")
)
} else {
"".to_owned()
}
);
debugln!("Checking for files or dirs to count from cli");
let mut counts = Counts::new(&cfg);
counts.fill_from();
cli_try!(counts.count());
cli_try!(counts.write_results());
Ok(())
}
fn | (s: String) -> Result<(), String> {
if s.len() == 1 {
Ok(())
} else {
Err(format!(
"the --separator argument option only accepts a single character but found '{}'",
Format::Warning(s)
))
}
}
| single_char | identifier_name |
route.rs | Lit};
use crate::syn_ext::{syn_to_diag, IdentExt};
use self::syn::{Attribute, parse::Parser};
use crate::http_codegen::{Method, MediaType, RoutePath, DataSegment, Optional};
use crate::attribute::segments::{Source, Kind, Segment};
use crate::{ROUTE_FN_PREFIX, ROUTE_STRUCT_PREFIX, URI_MACRO_PREFIX, ROCKET_PARAM_PREFIX};
/// The raw, parsed `#[route]` attribute.
#[derive(Debug, FromMeta)]
struct RouteAttribute {
#[meta(naked)]
method: SpanWrapped<Method>,
path: RoutePath,
data: Option<SpanWrapped<DataSegment>>,
format: Option<MediaType>,
rank: Option<isize>,
}
/// The raw, parsed `#[method]` (e.g, `get`, `put`, `post`, etc.) attribute.
#[derive(Debug, FromMeta)]
struct MethodRouteAttribute {
#[meta(naked)]
path: RoutePath,
data: Option<SpanWrapped<DataSegment>>,
format: Option<MediaType>,
rank: Option<isize>,
}
/// This structure represents the parsed `route` attribute and associated items.
#[derive(Debug)]
struct Route {
/// The status associated with the code in the `#[route(code)]` attribute.
attribute: RouteAttribute,
/// The function that was decorated with the `route` attribute.
function: syn::ItemFn,
/// The non-static parameters declared in the route segments.
segments: IndexSet<Segment>,
/// The parsed inputs to the user's function. The first ident is the ident
/// as the user wrote it, while the second ident is the identifier that
/// should be used during code generation, the `rocket_ident`.
inputs: Vec<(syn::Ident, syn::Ident, syn::Type)>,
}
fn parse_route(attr: RouteAttribute, function: syn::ItemFn) -> Result<Route> {
// Gather diagnostics as we proceed.
let mut diags = Diagnostics::new();
// Emit a warning if a `data` param was supplied for non-payload methods.
if let Some(ref data) = attr.data {
if!attr.method.0.supports_payload() {
let msg = format!("'{}' does not typically support payloads", attr.method.0);
data.full_span.warning("`data` used with non-payload-supporting method")
.span_note(attr.method.span, msg)
.emit()
}
}
// Collect all of the dynamic segments in an `IndexSet`, checking for dups.
let mut segments: IndexSet<Segment> = IndexSet::new();
fn dup_check<I>(set: &mut IndexSet<Segment>, iter: I, diags: &mut Diagnostics)
where I: Iterator<Item = Segment>
{
for segment in iter.filter(|s| s.kind!= Kind::Static) {
let span = segment.span;
if let Some(previous) = set.replace(segment) {
diags.push(span.error(format!("duplicate parameter: `{}`", previous.name))
.span_note(previous.span, "previous parameter with the same name here"))
}
}
}
dup_check(&mut segments, attr.path.path.iter().cloned(), &mut diags);
attr.path.query.as_ref().map(|q| dup_check(&mut segments, q.iter().cloned(), &mut diags));
dup_check(&mut segments, attr.data.clone().map(|s| s.value.0).into_iter(), &mut diags);
// Check the validity of function arguments.
let mut inputs = vec![];
let mut fn_segments: IndexSet<Segment> = IndexSet::new();
for input in &function.sig.inputs {
let help = "all handler arguments must be of the form: `ident: Type`";
let span = input.span();
let (ident, ty) = match input {
syn::FnArg::Typed(arg) => match *arg.pat {
syn::Pat::Ident(ref pat) => (&pat.ident, &arg.ty),
syn::Pat::Wild(_) => {
diags.push(span.error("handler arguments cannot be ignored").help(help));
continue;
}
_ => {
diags.push(span.error("invalid use of pattern").help(help));
continue;
}
}
// Other cases shouldn't happen since we parsed an `ItemFn`.
_ => {
diags.push(span.error("invalid handler argument").help(help));
continue;
}
};
let rocket_ident = ident.prepend(ROCKET_PARAM_PREFIX);
inputs.push((ident.clone(), rocket_ident, ty.with_stripped_lifetimes()));
fn_segments.insert(ident.into());
}
// Check that all of the declared parameters are function inputs.
let span = match function.sig.inputs.is_empty() {
false => function.sig.inputs.span(),
true => function.span()
};
for missing in segments.difference(&fn_segments) {
diags.push(missing.span.error("unused dynamic parameter")
.span_note(span, format!("expected argument named `{}` here", missing.name)))
}
diags.head_err_or(Route { attribute: attr, function, inputs, segments })
}
fn param_expr(seg: &Segment, ident: &syn::Ident, ty: &syn::Type) -> TokenStream2 {
define_vars_and_mods!(req, data, error, log, request, _None, _Some, _Ok, _Err, Outcome);
let i = seg.index.expect("dynamic parameters must be indexed");
let span = ident.span().unstable().join(ty.span()).unwrap().into();
let name = ident.to_string();
// All dynamic parameter should be found if this function is being called;
// that's the point of statically checking the URI parameters.
let internal_error = quote!({
#log::error("Internal invariant error: expected dynamic parameter not found.");
#log::error("Please report this error to the Rocket issue tracker.");
#Outcome::Forward(#data)
});
// Returned when a dynamic parameter fails to parse.
let parse_error = quote!({
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, #error));
#Outcome::Forward(#data)
});
let expr = match seg.kind {
Kind::Single => quote_spanned! { span =>
match #req.raw_segment_str(#i) {
#_Some(__s) => match <#ty as #request::FromParam>::from_param(__s) {
#_Ok(__v) => __v,
#_Err(#error) => return #parse_error,
},
#_None => return #internal_error
}
},
Kind::Multi => quote_spanned! { span =>
match #req.raw_segments(#i) {
#_Some(__s) => match <#ty as #request::FromSegments>::from_segments(__s) {
#_Ok(__v) => __v,
#_Err(#error) => return #parse_error,
},
#_None => return #internal_error
}
},
Kind::Static => return quote!() | #[allow(non_snake_case, unreachable_patterns, unreachable_code)]
let #ident: #ty = #expr;
}
}
fn data_expr(ident: &syn::Ident, ty: &syn::Type) -> TokenStream2 {
define_vars_and_mods!(req, data, FromData, Outcome, Transform);
let span = ident.span().unstable().join(ty.span()).unwrap().into();
quote_spanned! { span =>
let __transform = <#ty as #FromData>::transform(#req, #data);
#[allow(unreachable_patterns, unreachable_code)]
let __outcome = match __transform {
#Transform::Owned(#Outcome::Success(__v)) => {
#Transform::Owned(#Outcome::Success(__v))
},
#Transform::Borrowed(#Outcome::Success(ref __v)) => {
#Transform::Borrowed(#Outcome::Success(::std::borrow::Borrow::borrow(__v)))
},
#Transform::Borrowed(__o) => #Transform::Borrowed(__o.map(|_| {
unreachable!("Borrowed(Success(..)) case handled in previous block")
})),
#Transform::Owned(__o) => #Transform::Owned(__o),
};
#[allow(non_snake_case, unreachable_patterns, unreachable_code)]
let #ident: #ty = match <#ty as #FromData>::from_data(#req, __outcome) {
#Outcome::Success(__d) => __d,
#Outcome::Forward(__d) => return #Outcome::Forward(__d),
#Outcome::Failure((__c, _)) => return #Outcome::Failure(__c),
};
}
}
fn query_exprs(route: &Route) -> Option<TokenStream2> {
define_vars_and_mods!(_None, _Some, _Ok, _Err, _Option);
define_vars_and_mods!(data, trail, log, request, req, Outcome, SmallVec, Query);
let query_segments = route.attribute.path.query.as_ref()?;
let (mut decls, mut matchers, mut builders) = (vec![], vec![], vec![]);
for segment in query_segments {
let name = &segment.name;
let (ident, ty, span) = if segment.kind!= Kind::Static {
let (ident, ty) = route.inputs.iter()
.find(|(ident, _, _)| ident == &segment.name)
.map(|(_, rocket_ident, ty)| (rocket_ident, ty))
.unwrap();
let span = ident.span().unstable().join(ty.span()).unwrap();
(Some(ident), Some(ty), span.into())
} else {
(None, None, segment.span.into())
};
let decl = match segment.kind {
Kind::Single => quote_spanned! { span =>
#[allow(non_snake_case)]
let mut #ident: #_Option<#ty> = #_None;
},
Kind::Multi => quote_spanned! { span =>
#[allow(non_snake_case)]
let mut #trail = #SmallVec::<[#request::FormItem; 8]>::new();
},
Kind::Static => quote!()
};
let matcher = match segment.kind {
Kind::Single => quote_spanned! { span =>
(_, #name, __v) => {
#[allow(unreachable_patterns, unreachable_code)]
let __v = match <#ty as #request::FromFormValue>::from_form_value(__v) {
#_Ok(__v) => __v,
#_Err(__e) => {
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, __e));
return #Outcome::Forward(#data);
}
};
#ident = #_Some(__v);
}
},
Kind::Static => quote! {
(#name, _, _) => continue,
},
Kind::Multi => quote! {
_ => #trail.push(__i),
}
};
let builder = match segment.kind {
Kind::Single => quote_spanned! { span =>
#[allow(non_snake_case)]
let #ident = match #ident.or_else(<#ty as #request::FromFormValue>::default) {
#_Some(__v) => __v,
#_None => {
#log::warn_(&format!("Missing required query parameter '{}'.", #name));
return #Outcome::Forward(#data);
}
};
},
Kind::Multi => quote_spanned! { span =>
#[allow(non_snake_case)]
let #ident = match <#ty as #request::FromQuery>::from_query(#Query(&#trail)) {
#_Ok(__v) => __v,
#_Err(__e) => {
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, __e));
return #Outcome::Forward(#data);
}
};
},
Kind::Static => quote!()
};
decls.push(decl);
matchers.push(matcher);
builders.push(builder);
}
matchers.push(quote!(_ => continue));
Some(quote! {
#(#decls)*
if let #_Some(__items) = #req.raw_query_items() {
for __i in __items {
match (__i.raw.as_str(), __i.key.as_str(), __i.value) {
#(
#[allow(unreachable_patterns, unreachable_code)]
#matchers
)*
}
}
}
#(
#[allow(unreachable_patterns, unreachable_code)]
#builders
)*
})
}
fn request_guard_expr(ident: &syn::Ident, ty: &syn::Type) -> TokenStream2 {
define_vars_and_mods!(req, data, request, Outcome);
let span = ident.span().unstable().join(ty.span()).unwrap().into();
quote_spanned! { span =>
#[allow(non_snake_case, unreachable_patterns, unreachable_code)]
let #ident: #ty = match <#ty as #request::FromRequest>::from_request(#req) {
#Outcome::Success(__v) => __v,
#Outcome::Forward(_) => return #Outcome::Forward(#data),
#Outcome::Failure((__c, _)) => return #Outcome::Failure(__c),
};
}
}
fn generate_internal_uri_macro(route: &Route) -> TokenStream2 {
let dynamic_args = route.segments.iter()
.filter(|seg| seg.source == Source::Path || seg.source == Source::Query)
.filter(|seg| seg.kind!= Kind::Static)
.map(|seg| &seg.name)
.map(|name| route.inputs.iter().find(|(ident,..)| ident == name).unwrap())
.map(|(ident, _, ty)| quote!(#ident: #ty));
let mut hasher = DefaultHasher::new();
let route_span = route.function.span();
route_span.source_file().path().hash(&mut hasher);
let line_column = route_span.start();
line_column.line.hash(&mut hasher);
line_column.column.hash(&mut hasher);
let mut generated_macro_name = route.function.sig.ident.prepend(URI_MACRO_PREFIX);
generated_macro_name.set_span(Span::call_site().into());
let inner_generated_macro_name = generated_macro_name.append(&hasher.finish().to_string());
let route_uri = route.attribute.path.origin.0.to_string();
quote! {
#[doc(hidden)]
#[macro_export]
macro_rules! #inner_generated_macro_name {
($($token:tt)*) => {{
extern crate std;
extern crate rocket;
rocket::rocket_internal_uri!(#route_uri, (#(#dynamic_args),*), $($token)*)
}};
}
#[doc(hidden)]
pub use #inner_generated_macro_name as #generated_macro_name;
}
}
fn generate_respond_expr(route: &Route) -> TokenStream2 {
let ret_span = match route.function.sig.output {
syn::ReturnType::Default => route.function.sig.ident.span(),
syn::ReturnType::Type(_, ref ty) => ty.span().into()
};
define_vars_and_mods!(req);
define_vars_and_mods!(ret_span => handler);
let user_handler_fn_name = &route.function.sig.ident;
let parameter_names = route.inputs.iter()
.map(|(_, rocket_ident, _)| rocket_ident);
quote_spanned! { ret_span =>
let ___responder = #user_handler_fn_name(#(#parameter_names),*);
#handler::Outcome::from(#req, ___responder)
}
}
fn codegen_route(route: Route) -> Result<TokenStream> {
// Generate the declarations for path, data, and request guard parameters.
let mut data_stmt = None;
let mut req_guard_definitions = vec![];
let mut parameter_definitions = vec![];
for (ident, rocket_ident, ty) in &route.inputs {
let fn_segment: Segment = ident.into();
match route.segments.get(&fn_segment) {
Some(seg) if seg.source == Source::Path => {
parameter_definitions.push(param_expr(seg, rocket_ident, &ty));
}
Some(seg) if seg.source == Source::Data => {
// the data statement needs to come last, so record it specially
data_stmt = Some(data_expr(rocket_ident, &ty));
}
Some(_) => continue, // handle query parameters later
None => {
req_guard_definitions.push(request_guard_expr(rocket_ident, &ty));
}
};
}
// Generate the declarations for query parameters.
if let Some(exprs) = query_exprs(&route) {
parameter_definitions.push(exprs);
}
// Gather everything we need.
define_vars_and_mods!(req, data, handler, Request, Data, StaticRouteInfo);
let (vis, user_handler_fn) = (&route.function.vis, &route.function);
let user_handler_fn_name = &user_handler_fn.sig.ident;
let generated_fn_name = user_handler_fn_name.prepend(ROUTE_FN_PREFIX);
let generated_struct_name = user_handler_fn_name.prepend(ROUTE_STRUCT_PREFIX);
let generated_internal_uri_macro = generate_internal_uri_macro(&route);
let generated_respond_expr = generate_respond_expr(&route);
let method = route.attribute.method;
let path = route.attribute.path.origin.0.to_string();
let rank = Optional(route.attribute.rank);
let format = Optional(route.attribute.format);
Ok(quote! {
#user_handler_fn
/// Rocket code generated wrapping route function.
#vis fn #generated_fn_name<'_b>(
#req: &'_b #Request,
#data: #Data
) -> #handler::Outcome<'_b> {
#(#req_guard_definitions)*
#(#parameter_definitions)*
#data_stmt
#generated_respond_expr
}
/// Rocket code generated wrapping URI macro.
#generated_internal_uri_macro
/// Rocket code generated static route info.
#[allow(non_upper_case_globals)]
#vis static #generated_struct_name: #StaticRouteInfo =
#StaticRouteInfo {
name: stringify!(#user_handler_fn_name),
method: #method,
path: #path,
handler: #generated_fn_name,
format: #format,
rank: #rank,
};
}.into())
}
fn complete_route(args: TokenStream2, input: TokenStream) -> Result<TokenStream> {
let function: syn::ItemFn = syn::parse(input).map_err(syn_to_diag)
.map_err(|diag| diag.help("`#[route]` can only be used on functions"))?;
let full_attr = quote!(#[route(#args)]);
let attrs = Attribute::parse_outer.parse2(full_attr).map_err(syn_to_diag)?;
let attribute = match RouteAttribute::from_attrs("route", &attrs) {
Some(result) => result?,
None => return Err(Span::call_site().error("internal error: bad attribute"))
};
codegen_route(parse_route(attribute, function)?)
}
fn incomplete_route(
method: crate::http::Method,
args: TokenStream2,
input: TokenStream
) -> Result<TokenStream> {
let method_str = method.to_string().to_lowercase();
// FIXME(proc_macro): there should be a way to get this `Span`.
let method_span = StringLit::new(format!("#[{}]", method), Span::call_site())
.subspan(2..2 + method_str.len());
let method_ident = syn::Ident::new(&method_str, method_span.into());
let function: syn::ItemFn = syn::parse(input).map_err(syn_to_diag)
.map_err(|d| d.help(format!("#[{}] can only be used on functions", method_str)))?;
let full_attr = quote!(#[#method_ident(#args)]);
let attrs = Attribute::parse_outer.parse2(full_attr).map_err(syn_to_diag)?;
let method_attribute = match MethodRouteAttribute::from_attrs(&method_str, &attrs) {
Some(result) => result?,
None => return Err(Span::call_site().error("internal error: bad attribute"))
};
let attribute = RouteAttribute {
method: SpanWrapped {
full_span: method_span, span: method_span, value: Method(method)
},
path: method_attribute. | };
quote! { | random_line_split |
route.rs | };
use crate::syn_ext::{syn_to_diag, IdentExt};
use self::syn::{Attribute, parse::Parser};
use crate::http_codegen::{Method, MediaType, RoutePath, DataSegment, Optional};
use crate::attribute::segments::{Source, Kind, Segment};
use crate::{ROUTE_FN_PREFIX, ROUTE_STRUCT_PREFIX, URI_MACRO_PREFIX, ROCKET_PARAM_PREFIX};
/// The raw, parsed `#[route]` attribute.
#[derive(Debug, FromMeta)]
struct | {
#[meta(naked)]
method: SpanWrapped<Method>,
path: RoutePath,
data: Option<SpanWrapped<DataSegment>>,
format: Option<MediaType>,
rank: Option<isize>,
}
/// The raw, parsed `#[method]` (e.g, `get`, `put`, `post`, etc.) attribute.
#[derive(Debug, FromMeta)]
struct MethodRouteAttribute {
#[meta(naked)]
path: RoutePath,
data: Option<SpanWrapped<DataSegment>>,
format: Option<MediaType>,
rank: Option<isize>,
}
/// This structure represents the parsed `route` attribute and associated items.
#[derive(Debug)]
struct Route {
/// The status associated with the code in the `#[route(code)]` attribute.
attribute: RouteAttribute,
/// The function that was decorated with the `route` attribute.
function: syn::ItemFn,
/// The non-static parameters declared in the route segments.
segments: IndexSet<Segment>,
/// The parsed inputs to the user's function. The first ident is the ident
/// as the user wrote it, while the second ident is the identifier that
/// should be used during code generation, the `rocket_ident`.
inputs: Vec<(syn::Ident, syn::Ident, syn::Type)>,
}
fn parse_route(attr: RouteAttribute, function: syn::ItemFn) -> Result<Route> {
// Gather diagnostics as we proceed.
let mut diags = Diagnostics::new();
// Emit a warning if a `data` param was supplied for non-payload methods.
if let Some(ref data) = attr.data {
if!attr.method.0.supports_payload() {
let msg = format!("'{}' does not typically support payloads", attr.method.0);
data.full_span.warning("`data` used with non-payload-supporting method")
.span_note(attr.method.span, msg)
.emit()
}
}
// Collect all of the dynamic segments in an `IndexSet`, checking for dups.
let mut segments: IndexSet<Segment> = IndexSet::new();
fn dup_check<I>(set: &mut IndexSet<Segment>, iter: I, diags: &mut Diagnostics)
where I: Iterator<Item = Segment>
{
for segment in iter.filter(|s| s.kind!= Kind::Static) {
let span = segment.span;
if let Some(previous) = set.replace(segment) {
diags.push(span.error(format!("duplicate parameter: `{}`", previous.name))
.span_note(previous.span, "previous parameter with the same name here"))
}
}
}
dup_check(&mut segments, attr.path.path.iter().cloned(), &mut diags);
attr.path.query.as_ref().map(|q| dup_check(&mut segments, q.iter().cloned(), &mut diags));
dup_check(&mut segments, attr.data.clone().map(|s| s.value.0).into_iter(), &mut diags);
// Check the validity of function arguments.
let mut inputs = vec![];
let mut fn_segments: IndexSet<Segment> = IndexSet::new();
for input in &function.sig.inputs {
let help = "all handler arguments must be of the form: `ident: Type`";
let span = input.span();
let (ident, ty) = match input {
syn::FnArg::Typed(arg) => match *arg.pat {
syn::Pat::Ident(ref pat) => (&pat.ident, &arg.ty),
syn::Pat::Wild(_) => {
diags.push(span.error("handler arguments cannot be ignored").help(help));
continue;
}
_ => {
diags.push(span.error("invalid use of pattern").help(help));
continue;
}
}
// Other cases shouldn't happen since we parsed an `ItemFn`.
_ => {
diags.push(span.error("invalid handler argument").help(help));
continue;
}
};
let rocket_ident = ident.prepend(ROCKET_PARAM_PREFIX);
inputs.push((ident.clone(), rocket_ident, ty.with_stripped_lifetimes()));
fn_segments.insert(ident.into());
}
// Check that all of the declared parameters are function inputs.
let span = match function.sig.inputs.is_empty() {
false => function.sig.inputs.span(),
true => function.span()
};
for missing in segments.difference(&fn_segments) {
diags.push(missing.span.error("unused dynamic parameter")
.span_note(span, format!("expected argument named `{}` here", missing.name)))
}
diags.head_err_or(Route { attribute: attr, function, inputs, segments })
}
fn param_expr(seg: &Segment, ident: &syn::Ident, ty: &syn::Type) -> TokenStream2 {
define_vars_and_mods!(req, data, error, log, request, _None, _Some, _Ok, _Err, Outcome);
let i = seg.index.expect("dynamic parameters must be indexed");
let span = ident.span().unstable().join(ty.span()).unwrap().into();
let name = ident.to_string();
// All dynamic parameter should be found if this function is being called;
// that's the point of statically checking the URI parameters.
let internal_error = quote!({
#log::error("Internal invariant error: expected dynamic parameter not found.");
#log::error("Please report this error to the Rocket issue tracker.");
#Outcome::Forward(#data)
});
// Returned when a dynamic parameter fails to parse.
let parse_error = quote!({
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, #error));
#Outcome::Forward(#data)
});
let expr = match seg.kind {
Kind::Single => quote_spanned! { span =>
match #req.raw_segment_str(#i) {
#_Some(__s) => match <#ty as #request::FromParam>::from_param(__s) {
#_Ok(__v) => __v,
#_Err(#error) => return #parse_error,
},
#_None => return #internal_error
}
},
Kind::Multi => quote_spanned! { span =>
match #req.raw_segments(#i) {
#_Some(__s) => match <#ty as #request::FromSegments>::from_segments(__s) {
#_Ok(__v) => __v,
#_Err(#error) => return #parse_error,
},
#_None => return #internal_error
}
},
Kind::Static => return quote!()
};
quote! {
#[allow(non_snake_case, unreachable_patterns, unreachable_code)]
let #ident: #ty = #expr;
}
}
fn data_expr(ident: &syn::Ident, ty: &syn::Type) -> TokenStream2 {
define_vars_and_mods!(req, data, FromData, Outcome, Transform);
let span = ident.span().unstable().join(ty.span()).unwrap().into();
quote_spanned! { span =>
let __transform = <#ty as #FromData>::transform(#req, #data);
#[allow(unreachable_patterns, unreachable_code)]
let __outcome = match __transform {
#Transform::Owned(#Outcome::Success(__v)) => {
#Transform::Owned(#Outcome::Success(__v))
},
#Transform::Borrowed(#Outcome::Success(ref __v)) => {
#Transform::Borrowed(#Outcome::Success(::std::borrow::Borrow::borrow(__v)))
},
#Transform::Borrowed(__o) => #Transform::Borrowed(__o.map(|_| {
unreachable!("Borrowed(Success(..)) case handled in previous block")
})),
#Transform::Owned(__o) => #Transform::Owned(__o),
};
#[allow(non_snake_case, unreachable_patterns, unreachable_code)]
let #ident: #ty = match <#ty as #FromData>::from_data(#req, __outcome) {
#Outcome::Success(__d) => __d,
#Outcome::Forward(__d) => return #Outcome::Forward(__d),
#Outcome::Failure((__c, _)) => return #Outcome::Failure(__c),
};
}
}
fn query_exprs(route: &Route) -> Option<TokenStream2> {
define_vars_and_mods!(_None, _Some, _Ok, _Err, _Option);
define_vars_and_mods!(data, trail, log, request, req, Outcome, SmallVec, Query);
let query_segments = route.attribute.path.query.as_ref()?;
let (mut decls, mut matchers, mut builders) = (vec![], vec![], vec![]);
for segment in query_segments {
let name = &segment.name;
let (ident, ty, span) = if segment.kind!= Kind::Static {
let (ident, ty) = route.inputs.iter()
.find(|(ident, _, _)| ident == &segment.name)
.map(|(_, rocket_ident, ty)| (rocket_ident, ty))
.unwrap();
let span = ident.span().unstable().join(ty.span()).unwrap();
(Some(ident), Some(ty), span.into())
} else {
(None, None, segment.span.into())
};
let decl = match segment.kind {
Kind::Single => quote_spanned! { span =>
#[allow(non_snake_case)]
let mut #ident: #_Option<#ty> = #_None;
},
Kind::Multi => quote_spanned! { span =>
#[allow(non_snake_case)]
let mut #trail = #SmallVec::<[#request::FormItem; 8]>::new();
},
Kind::Static => quote!()
};
let matcher = match segment.kind {
Kind::Single => quote_spanned! { span =>
(_, #name, __v) => {
#[allow(unreachable_patterns, unreachable_code)]
let __v = match <#ty as #request::FromFormValue>::from_form_value(__v) {
#_Ok(__v) => __v,
#_Err(__e) => {
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, __e));
return #Outcome::Forward(#data);
}
};
#ident = #_Some(__v);
}
},
Kind::Static => quote! {
(#name, _, _) => continue,
},
Kind::Multi => quote! {
_ => #trail.push(__i),
}
};
let builder = match segment.kind {
Kind::Single => quote_spanned! { span =>
#[allow(non_snake_case)]
let #ident = match #ident.or_else(<#ty as #request::FromFormValue>::default) {
#_Some(__v) => __v,
#_None => {
#log::warn_(&format!("Missing required query parameter '{}'.", #name));
return #Outcome::Forward(#data);
}
};
},
Kind::Multi => quote_spanned! { span =>
#[allow(non_snake_case)]
let #ident = match <#ty as #request::FromQuery>::from_query(#Query(&#trail)) {
#_Ok(__v) => __v,
#_Err(__e) => {
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, __e));
return #Outcome::Forward(#data);
}
};
},
Kind::Static => quote!()
};
decls.push(decl);
matchers.push(matcher);
builders.push(builder);
}
matchers.push(quote!(_ => continue));
Some(quote! {
#(#decls)*
if let #_Some(__items) = #req.raw_query_items() {
for __i in __items {
match (__i.raw.as_str(), __i.key.as_str(), __i.value) {
#(
#[allow(unreachable_patterns, unreachable_code)]
#matchers
)*
}
}
}
#(
#[allow(unreachable_patterns, unreachable_code)]
#builders
)*
})
}
fn request_guard_expr(ident: &syn::Ident, ty: &syn::Type) -> TokenStream2 {
define_vars_and_mods!(req, data, request, Outcome);
let span = ident.span().unstable().join(ty.span()).unwrap().into();
quote_spanned! { span =>
#[allow(non_snake_case, unreachable_patterns, unreachable_code)]
let #ident: #ty = match <#ty as #request::FromRequest>::from_request(#req) {
#Outcome::Success(__v) => __v,
#Outcome::Forward(_) => return #Outcome::Forward(#data),
#Outcome::Failure((__c, _)) => return #Outcome::Failure(__c),
};
}
}
fn generate_internal_uri_macro(route: &Route) -> TokenStream2 {
let dynamic_args = route.segments.iter()
.filter(|seg| seg.source == Source::Path || seg.source == Source::Query)
.filter(|seg| seg.kind!= Kind::Static)
.map(|seg| &seg.name)
.map(|name| route.inputs.iter().find(|(ident,..)| ident == name).unwrap())
.map(|(ident, _, ty)| quote!(#ident: #ty));
let mut hasher = DefaultHasher::new();
let route_span = route.function.span();
route_span.source_file().path().hash(&mut hasher);
let line_column = route_span.start();
line_column.line.hash(&mut hasher);
line_column.column.hash(&mut hasher);
let mut generated_macro_name = route.function.sig.ident.prepend(URI_MACRO_PREFIX);
generated_macro_name.set_span(Span::call_site().into());
let inner_generated_macro_name = generated_macro_name.append(&hasher.finish().to_string());
let route_uri = route.attribute.path.origin.0.to_string();
quote! {
#[doc(hidden)]
#[macro_export]
macro_rules! #inner_generated_macro_name {
($($token:tt)*) => {{
extern crate std;
extern crate rocket;
rocket::rocket_internal_uri!(#route_uri, (#(#dynamic_args),*), $($token)*)
}};
}
#[doc(hidden)]
pub use #inner_generated_macro_name as #generated_macro_name;
}
}
fn generate_respond_expr(route: &Route) -> TokenStream2 {
let ret_span = match route.function.sig.output {
syn::ReturnType::Default => route.function.sig.ident.span(),
syn::ReturnType::Type(_, ref ty) => ty.span().into()
};
define_vars_and_mods!(req);
define_vars_and_mods!(ret_span => handler);
let user_handler_fn_name = &route.function.sig.ident;
let parameter_names = route.inputs.iter()
.map(|(_, rocket_ident, _)| rocket_ident);
quote_spanned! { ret_span =>
let ___responder = #user_handler_fn_name(#(#parameter_names),*);
#handler::Outcome::from(#req, ___responder)
}
}
fn codegen_route(route: Route) -> Result<TokenStream> {
// Generate the declarations for path, data, and request guard parameters.
let mut data_stmt = None;
let mut req_guard_definitions = vec![];
let mut parameter_definitions = vec![];
for (ident, rocket_ident, ty) in &route.inputs {
let fn_segment: Segment = ident.into();
match route.segments.get(&fn_segment) {
Some(seg) if seg.source == Source::Path => {
parameter_definitions.push(param_expr(seg, rocket_ident, &ty));
}
Some(seg) if seg.source == Source::Data => {
// the data statement needs to come last, so record it specially
data_stmt = Some(data_expr(rocket_ident, &ty));
}
Some(_) => continue, // handle query parameters later
None => {
req_guard_definitions.push(request_guard_expr(rocket_ident, &ty));
}
};
}
// Generate the declarations for query parameters.
if let Some(exprs) = query_exprs(&route) {
parameter_definitions.push(exprs);
}
// Gather everything we need.
define_vars_and_mods!(req, data, handler, Request, Data, StaticRouteInfo);
let (vis, user_handler_fn) = (&route.function.vis, &route.function);
let user_handler_fn_name = &user_handler_fn.sig.ident;
let generated_fn_name = user_handler_fn_name.prepend(ROUTE_FN_PREFIX);
let generated_struct_name = user_handler_fn_name.prepend(ROUTE_STRUCT_PREFIX);
let generated_internal_uri_macro = generate_internal_uri_macro(&route);
let generated_respond_expr = generate_respond_expr(&route);
let method = route.attribute.method;
let path = route.attribute.path.origin.0.to_string();
let rank = Optional(route.attribute.rank);
let format = Optional(route.attribute.format);
Ok(quote! {
#user_handler_fn
/// Rocket code generated wrapping route function.
#vis fn #generated_fn_name<'_b>(
#req: &'_b #Request,
#data: #Data
) -> #handler::Outcome<'_b> {
#(#req_guard_definitions)*
#(#parameter_definitions)*
#data_stmt
#generated_respond_expr
}
/// Rocket code generated wrapping URI macro.
#generated_internal_uri_macro
/// Rocket code generated static route info.
#[allow(non_upper_case_globals)]
#vis static #generated_struct_name: #StaticRouteInfo =
#StaticRouteInfo {
name: stringify!(#user_handler_fn_name),
method: #method,
path: #path,
handler: #generated_fn_name,
format: #format,
rank: #rank,
};
}.into())
}
fn complete_route(args: TokenStream2, input: TokenStream) -> Result<TokenStream> {
let function: syn::ItemFn = syn::parse(input).map_err(syn_to_diag)
.map_err(|diag| diag.help("`#[route]` can only be used on functions"))?;
let full_attr = quote!(#[route(#args)]);
let attrs = Attribute::parse_outer.parse2(full_attr).map_err(syn_to_diag)?;
let attribute = match RouteAttribute::from_attrs("route", &attrs) {
Some(result) => result?,
None => return Err(Span::call_site().error("internal error: bad attribute"))
};
codegen_route(parse_route(attribute, function)?)
}
fn incomplete_route(
method: crate::http::Method,
args: TokenStream2,
input: TokenStream
) -> Result<TokenStream> {
let method_str = method.to_string().to_lowercase();
// FIXME(proc_macro): there should be a way to get this `Span`.
let method_span = StringLit::new(format!("#[{}]", method), Span::call_site())
.subspan(2..2 + method_str.len());
let method_ident = syn::Ident::new(&method_str, method_span.into());
let function: syn::ItemFn = syn::parse(input).map_err(syn_to_diag)
.map_err(|d| d.help(format!("#[{}] can only be used on functions", method_str)))?;
let full_attr = quote!(#[#method_ident(#args)]);
let attrs = Attribute::parse_outer.parse2(full_attr).map_err(syn_to_diag)?;
let method_attribute = match MethodRouteAttribute::from_attrs(&method_str, &attrs) {
Some(result) => result?,
None => return Err(Span::call_site().error("internal error: bad attribute"))
};
let attribute = RouteAttribute {
method: SpanWrapped {
full_span: method_span, span: method_span, value: Method(method)
},
path: method_ | RouteAttribute | identifier_name |
route.rs | };
use crate::syn_ext::{syn_to_diag, IdentExt};
use self::syn::{Attribute, parse::Parser};
use crate::http_codegen::{Method, MediaType, RoutePath, DataSegment, Optional};
use crate::attribute::segments::{Source, Kind, Segment};
use crate::{ROUTE_FN_PREFIX, ROUTE_STRUCT_PREFIX, URI_MACRO_PREFIX, ROCKET_PARAM_PREFIX};
/// The raw, parsed `#[route]` attribute.
#[derive(Debug, FromMeta)]
struct RouteAttribute {
#[meta(naked)]
method: SpanWrapped<Method>,
path: RoutePath,
data: Option<SpanWrapped<DataSegment>>,
format: Option<MediaType>,
rank: Option<isize>,
}
/// The raw, parsed `#[method]` (e.g, `get`, `put`, `post`, etc.) attribute.
#[derive(Debug, FromMeta)]
struct MethodRouteAttribute {
#[meta(naked)]
path: RoutePath,
data: Option<SpanWrapped<DataSegment>>,
format: Option<MediaType>,
rank: Option<isize>,
}
/// This structure represents the parsed `route` attribute and associated items.
#[derive(Debug)]
struct Route {
/// The status associated with the code in the `#[route(code)]` attribute.
attribute: RouteAttribute,
/// The function that was decorated with the `route` attribute.
function: syn::ItemFn,
/// The non-static parameters declared in the route segments.
segments: IndexSet<Segment>,
/// The parsed inputs to the user's function. The first ident is the ident
/// as the user wrote it, while the second ident is the identifier that
/// should be used during code generation, the `rocket_ident`.
inputs: Vec<(syn::Ident, syn::Ident, syn::Type)>,
}
fn parse_route(attr: RouteAttribute, function: syn::ItemFn) -> Result<Route> {
// Gather diagnostics as we proceed.
let mut diags = Diagnostics::new();
// Emit a warning if a `data` param was supplied for non-payload methods.
if let Some(ref data) = attr.data {
if!attr.method.0.supports_payload() {
let msg = format!("'{}' does not typically support payloads", attr.method.0);
data.full_span.warning("`data` used with non-payload-supporting method")
.span_note(attr.method.span, msg)
.emit()
}
}
// Collect all of the dynamic segments in an `IndexSet`, checking for dups.
let mut segments: IndexSet<Segment> = IndexSet::new();
fn dup_check<I>(set: &mut IndexSet<Segment>, iter: I, diags: &mut Diagnostics)
where I: Iterator<Item = Segment>
|
dup_check(&mut segments, attr.path.path.iter().cloned(), &mut diags);
attr.path.query.as_ref().map(|q| dup_check(&mut segments, q.iter().cloned(), &mut diags));
dup_check(&mut segments, attr.data.clone().map(|s| s.value.0).into_iter(), &mut diags);
// Check the validity of function arguments.
let mut inputs = vec![];
let mut fn_segments: IndexSet<Segment> = IndexSet::new();
for input in &function.sig.inputs {
let help = "all handler arguments must be of the form: `ident: Type`";
let span = input.span();
let (ident, ty) = match input {
syn::FnArg::Typed(arg) => match *arg.pat {
syn::Pat::Ident(ref pat) => (&pat.ident, &arg.ty),
syn::Pat::Wild(_) => {
diags.push(span.error("handler arguments cannot be ignored").help(help));
continue;
}
_ => {
diags.push(span.error("invalid use of pattern").help(help));
continue;
}
}
// Other cases shouldn't happen since we parsed an `ItemFn`.
_ => {
diags.push(span.error("invalid handler argument").help(help));
continue;
}
};
let rocket_ident = ident.prepend(ROCKET_PARAM_PREFIX);
inputs.push((ident.clone(), rocket_ident, ty.with_stripped_lifetimes()));
fn_segments.insert(ident.into());
}
// Check that all of the declared parameters are function inputs.
let span = match function.sig.inputs.is_empty() {
false => function.sig.inputs.span(),
true => function.span()
};
for missing in segments.difference(&fn_segments) {
diags.push(missing.span.error("unused dynamic parameter")
.span_note(span, format!("expected argument named `{}` here", missing.name)))
}
diags.head_err_or(Route { attribute: attr, function, inputs, segments })
}
fn param_expr(seg: &Segment, ident: &syn::Ident, ty: &syn::Type) -> TokenStream2 {
define_vars_and_mods!(req, data, error, log, request, _None, _Some, _Ok, _Err, Outcome);
let i = seg.index.expect("dynamic parameters must be indexed");
let span = ident.span().unstable().join(ty.span()).unwrap().into();
let name = ident.to_string();
// All dynamic parameter should be found if this function is being called;
// that's the point of statically checking the URI parameters.
let internal_error = quote!({
#log::error("Internal invariant error: expected dynamic parameter not found.");
#log::error("Please report this error to the Rocket issue tracker.");
#Outcome::Forward(#data)
});
// Returned when a dynamic parameter fails to parse.
let parse_error = quote!({
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, #error));
#Outcome::Forward(#data)
});
let expr = match seg.kind {
Kind::Single => quote_spanned! { span =>
match #req.raw_segment_str(#i) {
#_Some(__s) => match <#ty as #request::FromParam>::from_param(__s) {
#_Ok(__v) => __v,
#_Err(#error) => return #parse_error,
},
#_None => return #internal_error
}
},
Kind::Multi => quote_spanned! { span =>
match #req.raw_segments(#i) {
#_Some(__s) => match <#ty as #request::FromSegments>::from_segments(__s) {
#_Ok(__v) => __v,
#_Err(#error) => return #parse_error,
},
#_None => return #internal_error
}
},
Kind::Static => return quote!()
};
quote! {
#[allow(non_snake_case, unreachable_patterns, unreachable_code)]
let #ident: #ty = #expr;
}
}
fn data_expr(ident: &syn::Ident, ty: &syn::Type) -> TokenStream2 {
define_vars_and_mods!(req, data, FromData, Outcome, Transform);
let span = ident.span().unstable().join(ty.span()).unwrap().into();
quote_spanned! { span =>
let __transform = <#ty as #FromData>::transform(#req, #data);
#[allow(unreachable_patterns, unreachable_code)]
let __outcome = match __transform {
#Transform::Owned(#Outcome::Success(__v)) => {
#Transform::Owned(#Outcome::Success(__v))
},
#Transform::Borrowed(#Outcome::Success(ref __v)) => {
#Transform::Borrowed(#Outcome::Success(::std::borrow::Borrow::borrow(__v)))
},
#Transform::Borrowed(__o) => #Transform::Borrowed(__o.map(|_| {
unreachable!("Borrowed(Success(..)) case handled in previous block")
})),
#Transform::Owned(__o) => #Transform::Owned(__o),
};
#[allow(non_snake_case, unreachable_patterns, unreachable_code)]
let #ident: #ty = match <#ty as #FromData>::from_data(#req, __outcome) {
#Outcome::Success(__d) => __d,
#Outcome::Forward(__d) => return #Outcome::Forward(__d),
#Outcome::Failure((__c, _)) => return #Outcome::Failure(__c),
};
}
}
fn query_exprs(route: &Route) -> Option<TokenStream2> {
define_vars_and_mods!(_None, _Some, _Ok, _Err, _Option);
define_vars_and_mods!(data, trail, log, request, req, Outcome, SmallVec, Query);
let query_segments = route.attribute.path.query.as_ref()?;
let (mut decls, mut matchers, mut builders) = (vec![], vec![], vec![]);
for segment in query_segments {
let name = &segment.name;
let (ident, ty, span) = if segment.kind!= Kind::Static {
let (ident, ty) = route.inputs.iter()
.find(|(ident, _, _)| ident == &segment.name)
.map(|(_, rocket_ident, ty)| (rocket_ident, ty))
.unwrap();
let span = ident.span().unstable().join(ty.span()).unwrap();
(Some(ident), Some(ty), span.into())
} else {
(None, None, segment.span.into())
};
let decl = match segment.kind {
Kind::Single => quote_spanned! { span =>
#[allow(non_snake_case)]
let mut #ident: #_Option<#ty> = #_None;
},
Kind::Multi => quote_spanned! { span =>
#[allow(non_snake_case)]
let mut #trail = #SmallVec::<[#request::FormItem; 8]>::new();
},
Kind::Static => quote!()
};
let matcher = match segment.kind {
Kind::Single => quote_spanned! { span =>
(_, #name, __v) => {
#[allow(unreachable_patterns, unreachable_code)]
let __v = match <#ty as #request::FromFormValue>::from_form_value(__v) {
#_Ok(__v) => __v,
#_Err(__e) => {
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, __e));
return #Outcome::Forward(#data);
}
};
#ident = #_Some(__v);
}
},
Kind::Static => quote! {
(#name, _, _) => continue,
},
Kind::Multi => quote! {
_ => #trail.push(__i),
}
};
let builder = match segment.kind {
Kind::Single => quote_spanned! { span =>
#[allow(non_snake_case)]
let #ident = match #ident.or_else(<#ty as #request::FromFormValue>::default) {
#_Some(__v) => __v,
#_None => {
#log::warn_(&format!("Missing required query parameter '{}'.", #name));
return #Outcome::Forward(#data);
}
};
},
Kind::Multi => quote_spanned! { span =>
#[allow(non_snake_case)]
let #ident = match <#ty as #request::FromQuery>::from_query(#Query(&#trail)) {
#_Ok(__v) => __v,
#_Err(__e) => {
#log::warn_(&format!("Failed to parse '{}': {:?}", #name, __e));
return #Outcome::Forward(#data);
}
};
},
Kind::Static => quote!()
};
decls.push(decl);
matchers.push(matcher);
builders.push(builder);
}
matchers.push(quote!(_ => continue));
Some(quote! {
#(#decls)*
if let #_Some(__items) = #req.raw_query_items() {
for __i in __items {
match (__i.raw.as_str(), __i.key.as_str(), __i.value) {
#(
#[allow(unreachable_patterns, unreachable_code)]
#matchers
)*
}
}
}
#(
#[allow(unreachable_patterns, unreachable_code)]
#builders
)*
})
}
fn request_guard_expr(ident: &syn::Ident, ty: &syn::Type) -> TokenStream2 {
define_vars_and_mods!(req, data, request, Outcome);
let span = ident.span().unstable().join(ty.span()).unwrap().into();
quote_spanned! { span =>
#[allow(non_snake_case, unreachable_patterns, unreachable_code)]
let #ident: #ty = match <#ty as #request::FromRequest>::from_request(#req) {
#Outcome::Success(__v) => __v,
#Outcome::Forward(_) => return #Outcome::Forward(#data),
#Outcome::Failure((__c, _)) => return #Outcome::Failure(__c),
};
}
}
fn generate_internal_uri_macro(route: &Route) -> TokenStream2 {
let dynamic_args = route.segments.iter()
.filter(|seg| seg.source == Source::Path || seg.source == Source::Query)
.filter(|seg| seg.kind!= Kind::Static)
.map(|seg| &seg.name)
.map(|name| route.inputs.iter().find(|(ident,..)| ident == name).unwrap())
.map(|(ident, _, ty)| quote!(#ident: #ty));
let mut hasher = DefaultHasher::new();
let route_span = route.function.span();
route_span.source_file().path().hash(&mut hasher);
let line_column = route_span.start();
line_column.line.hash(&mut hasher);
line_column.column.hash(&mut hasher);
let mut generated_macro_name = route.function.sig.ident.prepend(URI_MACRO_PREFIX);
generated_macro_name.set_span(Span::call_site().into());
let inner_generated_macro_name = generated_macro_name.append(&hasher.finish().to_string());
let route_uri = route.attribute.path.origin.0.to_string();
quote! {
#[doc(hidden)]
#[macro_export]
macro_rules! #inner_generated_macro_name {
($($token:tt)*) => {{
extern crate std;
extern crate rocket;
rocket::rocket_internal_uri!(#route_uri, (#(#dynamic_args),*), $($token)*)
}};
}
#[doc(hidden)]
pub use #inner_generated_macro_name as #generated_macro_name;
}
}
fn generate_respond_expr(route: &Route) -> TokenStream2 {
let ret_span = match route.function.sig.output {
syn::ReturnType::Default => route.function.sig.ident.span(),
syn::ReturnType::Type(_, ref ty) => ty.span().into()
};
define_vars_and_mods!(req);
define_vars_and_mods!(ret_span => handler);
let user_handler_fn_name = &route.function.sig.ident;
let parameter_names = route.inputs.iter()
.map(|(_, rocket_ident, _)| rocket_ident);
quote_spanned! { ret_span =>
let ___responder = #user_handler_fn_name(#(#parameter_names),*);
#handler::Outcome::from(#req, ___responder)
}
}
fn codegen_route(route: Route) -> Result<TokenStream> {
// Generate the declarations for path, data, and request guard parameters.
let mut data_stmt = None;
let mut req_guard_definitions = vec![];
let mut parameter_definitions = vec![];
for (ident, rocket_ident, ty) in &route.inputs {
let fn_segment: Segment = ident.into();
match route.segments.get(&fn_segment) {
Some(seg) if seg.source == Source::Path => {
parameter_definitions.push(param_expr(seg, rocket_ident, &ty));
}
Some(seg) if seg.source == Source::Data => {
// the data statement needs to come last, so record it specially
data_stmt = Some(data_expr(rocket_ident, &ty));
}
Some(_) => continue, // handle query parameters later
None => {
req_guard_definitions.push(request_guard_expr(rocket_ident, &ty));
}
};
}
// Generate the declarations for query parameters.
if let Some(exprs) = query_exprs(&route) {
parameter_definitions.push(exprs);
}
// Gather everything we need.
define_vars_and_mods!(req, data, handler, Request, Data, StaticRouteInfo);
let (vis, user_handler_fn) = (&route.function.vis, &route.function);
let user_handler_fn_name = &user_handler_fn.sig.ident;
let generated_fn_name = user_handler_fn_name.prepend(ROUTE_FN_PREFIX);
let generated_struct_name = user_handler_fn_name.prepend(ROUTE_STRUCT_PREFIX);
let generated_internal_uri_macro = generate_internal_uri_macro(&route);
let generated_respond_expr = generate_respond_expr(&route);
let method = route.attribute.method;
let path = route.attribute.path.origin.0.to_string();
let rank = Optional(route.attribute.rank);
let format = Optional(route.attribute.format);
Ok(quote! {
#user_handler_fn
/// Rocket code generated wrapping route function.
#vis fn #generated_fn_name<'_b>(
#req: &'_b #Request,
#data: #Data
) -> #handler::Outcome<'_b> {
#(#req_guard_definitions)*
#(#parameter_definitions)*
#data_stmt
#generated_respond_expr
}
/// Rocket code generated wrapping URI macro.
#generated_internal_uri_macro
/// Rocket code generated static route info.
#[allow(non_upper_case_globals)]
#vis static #generated_struct_name: #StaticRouteInfo =
#StaticRouteInfo {
name: stringify!(#user_handler_fn_name),
method: #method,
path: #path,
handler: #generated_fn_name,
format: #format,
rank: #rank,
};
}.into())
}
fn complete_route(args: TokenStream2, input: TokenStream) -> Result<TokenStream> {
let function: syn::ItemFn = syn::parse(input).map_err(syn_to_diag)
.map_err(|diag| diag.help("`#[route]` can only be used on functions"))?;
let full_attr = quote!(#[route(#args)]);
let attrs = Attribute::parse_outer.parse2(full_attr).map_err(syn_to_diag)?;
let attribute = match RouteAttribute::from_attrs("route", &attrs) {
Some(result) => result?,
None => return Err(Span::call_site().error("internal error: bad attribute"))
};
codegen_route(parse_route(attribute, function)?)
}
fn incomplete_route(
method: crate::http::Method,
args: TokenStream2,
input: TokenStream
) -> Result<TokenStream> {
let method_str = method.to_string().to_lowercase();
// FIXME(proc_macro): there should be a way to get this `Span`.
let method_span = StringLit::new(format!("#[{}]", method), Span::call_site())
.subspan(2..2 + method_str.len());
let method_ident = syn::Ident::new(&method_str, method_span.into());
let function: syn::ItemFn = syn::parse(input).map_err(syn_to_diag)
.map_err(|d| d.help(format!("#[{}] can only be used on functions", method_str)))?;
let full_attr = quote!(#[#method_ident(#args)]);
let attrs = Attribute::parse_outer.parse2(full_attr).map_err(syn_to_diag)?;
let method_attribute = match MethodRouteAttribute::from_attrs(&method_str, &attrs) {
Some(result) => result?,
None => return Err(Span::call_site().error("internal error: bad attribute"))
};
let attribute = RouteAttribute {
method: SpanWrapped {
full_span: method_span, span: method_span, value: Method(method)
},
path: method_ | {
for segment in iter.filter(|s| s.kind != Kind::Static) {
let span = segment.span;
if let Some(previous) = set.replace(segment) {
diags.push(span.error(format!("duplicate parameter: `{}`", previous.name))
.span_note(previous.span, "previous parameter with the same name here"))
}
}
} | identifier_body |
sendmail.rs | c_char) ->!;
//char FAST_FUNC *parse_url(char *url, char **user, char **pass);
#[no_mangle]
fn launch_helper(argv: *mut *const libc::c_char);
#[no_mangle]
fn get_cred_or_die(fd: libc::c_int);
#[no_mangle]
fn send_mail_command(fmt: *const libc::c_char, param: *const libc::c_char) -> *mut libc::c_char;
#[no_mangle]
fn printbuf_base64(buf: *const libc::c_char, len: libc::c_uint);
#[no_mangle]
fn printstr_base64(buf: *const libc::c_char);
}
use crate::librb::size_t;
use libc::pid_t;
use libc::uid_t;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct hostent {
pub h_name: *mut libc::c_char,
pub h_aliases: *mut *mut libc::c_char,
pub h_addrtype: libc::c_int,
pub h_length: libc::c_int,
pub h_addr_list: *mut *mut libc::c_char,
}
use crate::libbb::llist::llist_t;
use libc::FILE;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct globals {
pub helper_pid: pid_t,
pub timeout: libc::c_uint,
pub verbose: libc::c_uint,
pub opts: libc::c_uint,
pub user: *mut libc::c_char,
pub pass: *mut libc::c_char,
pub fp0: *mut FILE,
pub opt_charset: *mut libc::c_char,
}
pub type C2RustUnnamed = libc::c_uint;
pub const HDR_BCC: C2RustUnnamed = 2;
pub const HDR_TOCC: C2RustUnnamed = 1;
pub const HDR_OTHER: C2RustUnnamed = 0;
//--- standard options
pub const OPT_t: C2RustUnnamed_0 = 1;
// verbosity
//--- for -amMETHOD
pub const OPT_am_plain: C2RustUnnamed_0 = 512;
// specify connection string
pub const OPT_a: C2RustUnnamed_0 = 128;
// network timeout
pub const OPT_H: C2RustUnnamed_0 = 32;
pub type C2RustUnnamed_0 = libc::c_uint;
// AUTH PLAIN
// authentication tokens
pub const OPT_v: C2RustUnnamed_0 = 256;
// use external connection helper
pub const OPT_S: C2RustUnnamed_0 = 64;
// IMPLIED!
//--- BB specific options
pub const OPT_w: C2RustUnnamed_0 = 16;
// various options. -oi IMPLIED! others are IGNORED!
pub const OPT_i: C2RustUnnamed_0 = 8;
// sender address
pub const OPT_o: C2RustUnnamed_0 = 4;
// read message for recipients, append them to those on cmdline
pub const OPT_f: C2RustUnnamed_0 = 2;
#[inline(always)]
unsafe extern "C" fn bb_ascii_isalnum(mut a: libc::c_uchar) -> libc::c_int {
let mut b: libc::c_uchar = (a as libc::c_int - '0' as i32) as libc::c_uchar;
if b as libc::c_int <= 9i32 {
return (b as libc::c_int <= 9i32) as libc::c_int;
}
b = ((a as libc::c_int | 0x20i32) - 'a' as i32) as libc::c_uchar;
return (b as libc::c_int <= 'z' as i32 - 'a' as i32) as libc::c_int;
}
#[inline(always)]
unsafe extern "C" fn not_const_pp(mut p: *const libc::c_void) -> *mut libc::c_void {
return p as *mut libc::c_void;
}
unsafe extern "C" fn send_r_n(mut s: *const libc::c_char) {
if (*ptr_to_globals).verbose!= 0 {
bb_error_msg(b"send:\'%s\'\x00" as *const u8 as *const libc::c_char, s);
}
printf(b"%s\r\n\x00" as *const u8 as *const libc::c_char, s);
}
unsafe extern "C" fn smtp_checkp(
mut fmt: *const libc::c_char,
mut param: *const libc::c_char,
mut code: libc::c_int,
) -> libc::c_int | );
}
if strlen(answer) <= 3i32 as libc::c_ulong || '-' as i32!= *answer.offset(3) as libc::c_int {
break;
}
free(answer as *mut libc::c_void);
}
if!answer.is_null() {
let mut n: libc::c_int = atoi(answer);
if (*ptr_to_globals).timeout!= 0 {
alarm(0i32 as libc::c_uint);
}
free(answer as *mut libc::c_void);
if -1i32 == code || n == code {
free(msg as *mut libc::c_void);
return n;
}
}
bb_error_msg_and_die(b"%s failed\x00" as *const u8 as *const libc::c_char, msg);
}
unsafe extern "C" fn smtp_check(
mut fmt: *const libc::c_char,
mut code: libc::c_int,
) -> libc::c_int {
return smtp_checkp(fmt, 0 as *const libc::c_char, code);
}
// strip argument of bad chars
unsafe extern "C" fn sane_address(mut str: *mut libc::c_char) -> *mut libc::c_char {
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
trim(str);
s = str;
while *s!= 0 {
/* Standard allows these chars in username without quoting:
* /!#$%&'*+-=?^_`{|}~
* and allows dot (.) with some restrictions.
* I chose to only allow a saner subset.
* I propose to expand it only on user's request.
*/
if bb_ascii_isalnum(*s as libc::c_uchar) == 0
&& strchr(
b"=+_-.@\x00" as *const u8 as *const libc::c_char,
*s as libc::c_int,
)
.is_null()
{
bb_error_msg(
b"bad address \'%s\'\x00" as *const u8 as *const libc::c_char,
str,
);
/* returning "": */
*str.offset(0) = '\u{0}' as i32 as libc::c_char;
return str;
}
s = s.offset(1)
}
return str;
}
// check for an address inside angle brackets, if not found fall back to normal
unsafe extern "C" fn angle_address(mut str: *mut libc::c_char) -> *mut libc::c_char {
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
let mut e: *mut libc::c_char = 0 as *mut libc::c_char;
e = trim(str);
if e!= str && {
e = e.offset(-1);
(*e as libc::c_int) == '>' as i32
} {
s = strrchr(str, '<' as i32);
if!s.is_null() {
*e = '\u{0}' as i32 as libc::c_char;
str = s.offset(1)
}
}
return sane_address(str);
}
unsafe extern "C" fn rcptto(mut s: *const libc::c_char) {
if *s == 0 {
return;
}
// N.B. we don't die if recipient is rejected, for the other recipients may be accepted
if 250i32
!= smtp_checkp(
b"RCPT TO:<%s>\x00" as *const u8 as *const libc::c_char,
s,
-1i32,
)
{
bb_error_msg(
b"Bad recipient: <%s>\x00" as *const u8 as *const libc::c_char,
s,
);
};
}
// send to a list of comma separated addresses
unsafe extern "C" fn rcptto_list(mut list: *const libc::c_char) {
let mut free_me: *mut libc::c_char = xstrdup(list);
let mut str: *mut libc::c_char = free_me;
let mut s: *mut libc::c_char = free_me;
let mut prev: libc::c_char = 0i32 as libc::c_char;
let mut in_quote: libc::c_int = 0i32;
while *s!= 0 {
let fresh0 = s;
s = s.offset(1);
let mut ch: libc::c_char = *fresh0;
if ch as libc::c_int == '\"' as i32 && prev as libc::c_int!= '\\' as i32 {
in_quote = (in_quote == 0) as libc::c_int
} else if in_quote == 0 && ch as libc::c_int == ',' as i32 {
*s.offset(-1i32 as isize) = '\u{0}' as i32 as libc::c_char;
rcptto(angle_address(str));
str = s
}
prev = ch
}
if prev as libc::c_int!= ',' as i32 {
rcptto(angle_address(str));
}
free(free_me as *mut libc::c_void);
}
#[no_mangle]
pub unsafe extern "C" fn sendmail_main(
mut _argc: libc::c_int,
mut argv: *mut *mut libc::c_char,
) -> libc::c_int {
let mut current_block: u64;
let mut opt_connect: *mut libc::c_char = 0 as *mut libc::c_char;
let mut opt_from: *mut libc::c_char = 0 as *mut libc::c_char;
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
let mut list: *mut llist_t = 0 as *mut llist_t;
let mut host: *mut libc::c_char = sane_address(safe_gethostname());
let mut nheaders: libc::c_uint = 0i32 as libc::c_uint;
let mut code: libc::c_int = 0;
let mut last_hdr: C2RustUnnamed = HDR_OTHER;
let mut check_hdr: libc::c_int = 0;
let mut has_to: libc::c_int = 0i32;
// init global variables
let ref mut fresh1 = *(not_const_pp(&ptr_to_globals as *const *mut globals as *const libc::c_void)
as *mut *mut globals);
*fresh1 = xzalloc(::std::mem::size_of::<globals>() as libc::c_ulong) as *mut globals;
asm!("" : : : "memory" : "volatile");
(*ptr_to_globals).opt_charset =
b"us-ascii\x00" as *const u8 as *const libc::c_char as *mut libc::c_char;
// default HOST[:PORT] is $SMTPHOST, or localhost
opt_connect = getenv(b"SMTPHOST\x00" as *const u8 as *const libc::c_char);
if opt_connect.is_null() {
opt_connect = b"127.0.0.1\x00" as *const u8 as *const libc::c_char as *mut libc::c_char
}
// save initial stdin since body is piped!
xdup2(0i32, 3i32);
(*ptr_to_globals).fp0 = xfdopen_for_read(3i32);
// parse options
// N.B. since -H and -S are mutually exclusive they do not interfere in opt_connect
// -a is for ssmtp (http://downloads.openwrt.org/people/nico/man/man8/ssmtp.8.html) compatibility,
// it is still under development.
(*ptr_to_globals).opts = getopt32(
argv,
b"^tf:o:iw:+H:S:a:*:v\x00vv:H--S:S--H\x00" as *const u8 as *const libc::c_char,
&mut opt_from as *mut *mut libc::c_char,
0 as *mut libc::c_void,
&mut (*ptr_to_globals).timeout as *mut libc::c_uint,
&mut opt_connect as *mut *mut libc::c_char,
&mut opt_connect as *mut *mut libc::c_char,
&mut list as *mut *mut llist_t,
&mut (*ptr_to_globals).verbose as *mut libc::c_uint,
);
//argc -= optind;
argv = argv.offset(optind as isize);
// process -a[upm]<token> options
if (*ptr_to_globals).opts & OPT_a as libc::c_int as libc::c_uint!= 0 && list.is_null() {
bb_show_usage();
}
while!list.is_null() {
let mut a: *mut libc::c_char = llist_pop(&mut list) as *mut libc::c_char;
if 'u' as i32 == *a.offset(0) as libc::c_int {
(*ptr_to_globals).user = xstrdup(a.offset(1))
}
if 'p' as i32 == *a.offset(0) as libc::c_int {
(*ptr_to_globals).pass = xstrdup(a.offset(1))
}
if'm' as i32 == *a.offset(0) as libc::c_int {
if *a.offset(1) as libc::c_int | 0x20i32 == 'p' as i32 {
// PLAIN
(*ptr_to_globals).opts |= OPT_am_plain as libc::c_int as libc::c_uint
} else if *a.offset(1) as libc::c_int | 0x20i32 == 'l' as i32 {
} else {
bb_error_msg_and_die(
b"unsupported AUTH method %s\x00" as *const u8 as *const libc::c_char,
a.offset(1),
);
}
}
}
// N.B. list == NULL here
//bb_error_msg("OPT[%x] AU[%s], AP[%s], AM[%s], ARGV[%s]", opts, au, ap, am, *argv);
// connect to server
// connection helper ordered? ->
if (*ptr_to_globals).opts & OPT_H as libc::c_int as libc::c_uint!= 0 {
let mut delay: *const libc::c_char = 0 as *const libc::c_char;
let mut args: [*const libc::c_char; 4] = [
b"sh\x00" as *const u8 as *const libc::c_char,
b"-c\x00" as *const u8 as *const libc::c_char,
opt_connect as *const libc::c_char,
0 as *const libc::c_char,
];
// plug it in
launch_helper(args.as_mut_ptr());
// Now:
// our stdout will go to helper's stdin,
// helper's stdout will be available on our stdin.
// Wait for initial server message.
// If helper (such as openssl) invokes STARTTLS, the initial 220
// is swallowed by helper (and not repeated after TLS is initiated).
// We will send NOOP cmd to server and check the response.
// We should get 220+250 on plain connection, 250 on STARTTLSed session.
//
// The problem here is some servers delay initial 220 message,
// and consider client to be a spammer if it starts sending cmds
// before 220 reached it. The code below is unsafe in this regard:
// in non-STARTTLSed case, we potentially send NOOP before 220
// is sent by server.
//
// If $SMTP_ANTISPAM_DELAY is set, we pause before sending NOOP.
//
delay = getenv(b"SMTP_ANTISPAM_DELAY\x00" as *const u8 as *const libc::c_char);
if!delay.is_null() {
sleep(atoi(delay) as libc::c_uint);
}
code = smtp_check(b"NOOP\x00" as *const u8 as *const libc::c_char, -1i32);
if code == 220i32 {
// we got 220 - this is not STARTTLSed connection,
// eat 250 response to our NOOP
smtp_check(0 as *const libc::c_char, 250i32);
} else if code!= 250i32 {
bb_simple_error_msg_and_die(b"SMTP init failed\x00" as *const u8 as *const libc::c_char);
}
} else {
// vanilla connection
let mut fd: libc::c_int = 0;
fd = create_and_connect_stream_or_die(opt_connect, 25i32);
// and make ourselves a simple IO filter
xmove_fd(fd, 0i32);
xdup2(0i32, 1i32);
// Wait for initial server 220 message
smtp_check(0 as *const libc::c_char, 220i32);
}
// we should start with modern EHLO
if 250i32
!= smtp_checkp(
b"EHLO %s\x00" as *const u8 as *const libc::c_char,
host,
-1i32,
)
{
smtp_checkp(
b"HELO %s\x00" as *const u8 as *const libc::c_char,
host,
250i32,
);
}
// perform authentication
if (*ptr_to_globals).opts & OPT_a as libc::c_int as libc::c_uint!= 0 {
// read credentials unless they are given via -a[up] options
if (*ptr_to_globals).user.is_null() || (*ptr_to_globals).pass.is_null() {
get_cred_or_die(4i32);
}
if (*ptr_to_globals).opts & OPT_am_plain as libc::c_int as libc::c_uint!= 0 {
// C: AUTH PLAIN
// S: 334
// C: base64encoded(auth<NUL>user<NUL>pass)
// S: 235 2.7.0 Authentication successful
//Note: a shorter format is allowed:
// C: AUTH PLAIN base64encoded(auth<NUL>user<NUL>pass)
// S: 235 2.7.0 Authentication successful
smtp_check(
b"AUTH PLAIN\x00" as *const u8 as *const libc::c_char,
334i32,
);
let mut user_len: libc::c_uint = strlen((*ptr_to_globals).user) as libc::c_uint;
let mut pass_len: libc::c_uint = strlen((*ptr_to_globals).pass) as libc::c_uint;
let mut sz: libc::c_uint = (1i32 as libc::c_uint)
.wrapping_add(user_len)
.wrapping_add(1i32 as libc::c_uint)
.wrapping_add(pass_len);
let vla = sz.wrapping_add(1i32 as libc::c_uint) as usize;
let mut plain_auth: Vec<libc::c_char> = ::std::vec::from_elem(0, vla);
// the format is:
// "authorization identity<NUL>username<NUL>password"
// authorization identity is empty.
*plain_auth.as_mut_ptr().offset(0) = '\u{0}' as i32 as libc::c_char;
strcpy(
stpcpy(plain_auth.as_mut_ptr().offset(1), (*ptr_to_globals).user).offset(1),
(*ptr_to_globals).pass,
);
printbuf_base64(plain_auth.as_mut_ptr(), sz);
} else {
// C: AUTH LOGIN
// S: 334 VXNlcm5hbWU6
// ^^^^^^^^^^^^ server says "Username:"
// C: base64encoded(user)
// S: 334 UGFzc3dvcmQ6
// ^^^^^^^^^^^^ server says "Password:"
// C: base64encoded(pass)
// S: 235 2.7.0 Authentication successful
smtp_check(
b"AUTH LOGIN\x00" as *const u8 as *const libc::c_char,
334i32,
);
printstr_base64((*ptr_to_globals).user);
smtp_check(b"\x00" as *const u8 as *const libc::c_char, 334i32);
printstr_base64((*ptr_to_globals).pass);
}
smtp_check(b"\x00" as *const u8 as *const libc::c_char, 235i32);
}
// set sender
// N.B. we have here a very loosely defined algorythm
// since sendmail historically offers no means to specify secrets on cmdline.
// 1) server can require no authentication ->
// we must just provide a (possibly fake) reply address.
// 2) server can require AUTH ->
// we must provide valid username and password along with a (possibly fake) reply address.
// For the sake of security username and password are to be read either from console or from a secured file.
// Since reading from console may defeat usability, the solution is either to read from a predefined
// file descriptor (e.g. 4), or again from a secured file.
// got no sender address? use auth name, then UID username as a last resort
if opt_from.is_null() {
opt_from = xasprintf(
b"%s@%s\x00" as *const u8 as *const libc::c_char,
if!(*ptr_to_globals).user.is_null() {
(*ptr_to_globals).user
} else {
xuid2uname(getuid())
},
(*xgethostbyname(host)).h_name,
)
}
free(host as *mut libc::c_void);
smtp_checkp(
b"MAIL FROM:<%s>\x00" as *const u8 as *const libc::c_char,
opt_from,
250i32,
);
// process message
// read recipients from message and add them to those given on cmdline.
// this means we scan stdin for To:, Cc:, Bcc: lines until an empty line
// and then use the rest of stdin as message body
code = 0i32; // set "analyze headers" mode
's_369: loop {
s = xmalloc_fgetline((*ptr_to_globals).fp0);
if!s.is_null() {
current_block = 16252544171633782868;
} else {
current_block = 228501038991332163;
}
loop {
match current_block {
228501038991332163 =>
// odd case: we didn't stop "analyze headers" mode -> message body is empty. Reenter the loop
// N.B. after reenter code will be > 0
{
if!(code == 0) {
// finalize the message
smtp_check(b".\x00" as *const u8 as *const libc::c_char, 250i32);
break's_369;
}
}
_ =>
// put message lines doubling leading dots
{
if code!= 0 {
// escape leading dots
// N.B. this feature is implied even if no -i (-oi) switch given
// N.B. we need to escape the leading dot regardless of
// whether it is single or not character on the line
if '.' as i32 == *s.offset(0) as libc::c_int {
/*&& '\0' == s[1] */
bb_putchar('.' as i32);
}
// dump read line
send_r_n(s);
free(s as *mut libc::c_void);
continue's_369;
} else {
// analyze headers
// To: or Cc: headers add recipients
check_hdr = (0i32
== strncasecmp(
b"To:\x00" as *const u8 as *const libc::c_char,
s,
3i32 as libc::c_ulong,
)) as libc::c_int;
has_to |= check_hdr;
if (*ptr_to_globals).opts & OPT_t as libc::c_int as libc::c_uint!= 0 {
if check_hdr!= 0
|| 0i32
== strncasecmp(
(b"Bcc:\x00" as *const u8 as *const libc::c_char).offset(1),
s,
3i32 as libc::c_ulong,
)
{
rcptto_list(s.offset(3));
last_hdr = HDR_TOCC;
current_block = 2265380199544777579;
break;
} else if 0i32
== strncasecmp(
| {
let mut answer: *mut libc::c_char = 0 as *mut libc::c_char;
let mut msg: *mut libc::c_char = send_mail_command(fmt, param);
loop
// read stdin
// if the string has a form NNN- -- read next string. E.g. EHLO response
// parse first bytes to a number
// if code = -1 then just return this number
// if code != -1 then checks whether the number equals the code
// if not equal -> die saying msg
{
answer = xmalloc_fgetline(stdin);
if answer.is_null() {
break;
}
if (*ptr_to_globals).verbose != 0 {
bb_error_msg(
b"recv:\'%.*s\'\x00" as *const u8 as *const libc::c_char,
strchrnul(answer, '\r' as i32).wrapping_offset_from(answer) as libc::c_long as libc::c_int,
answer, | identifier_body |
sendmail.rs | c_char) ->!;
//char FAST_FUNC *parse_url(char *url, char **user, char **pass);
#[no_mangle]
fn launch_helper(argv: *mut *const libc::c_char);
#[no_mangle]
fn get_cred_or_die(fd: libc::c_int);
#[no_mangle]
fn send_mail_command(fmt: *const libc::c_char, param: *const libc::c_char) -> *mut libc::c_char;
#[no_mangle]
fn printbuf_base64(buf: *const libc::c_char, len: libc::c_uint);
#[no_mangle]
fn printstr_base64(buf: *const libc::c_char);
}
use crate::librb::size_t;
use libc::pid_t;
use libc::uid_t;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct hostent {
pub h_name: *mut libc::c_char,
pub h_aliases: *mut *mut libc::c_char,
pub h_addrtype: libc::c_int,
pub h_length: libc::c_int,
pub h_addr_list: *mut *mut libc::c_char,
}
use crate::libbb::llist::llist_t;
use libc::FILE;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct globals {
pub helper_pid: pid_t,
pub timeout: libc::c_uint,
pub verbose: libc::c_uint,
pub opts: libc::c_uint,
pub user: *mut libc::c_char,
pub pass: *mut libc::c_char,
pub fp0: *mut FILE,
pub opt_charset: *mut libc::c_char,
}
pub type C2RustUnnamed = libc::c_uint;
pub const HDR_BCC: C2RustUnnamed = 2;
pub const HDR_TOCC: C2RustUnnamed = 1;
pub const HDR_OTHER: C2RustUnnamed = 0;
//--- standard options
pub const OPT_t: C2RustUnnamed_0 = 1;
// verbosity
//--- for -amMETHOD
pub const OPT_am_plain: C2RustUnnamed_0 = 512;
// specify connection string
pub const OPT_a: C2RustUnnamed_0 = 128;
// network timeout
pub const OPT_H: C2RustUnnamed_0 = 32;
pub type C2RustUnnamed_0 = libc::c_uint;
// AUTH PLAIN
// authentication tokens
pub const OPT_v: C2RustUnnamed_0 = 256;
// use external connection helper
pub const OPT_S: C2RustUnnamed_0 = 64;
// IMPLIED!
//--- BB specific options
pub const OPT_w: C2RustUnnamed_0 = 16;
// various options. -oi IMPLIED! others are IGNORED!
pub const OPT_i: C2RustUnnamed_0 = 8;
// sender address
pub const OPT_o: C2RustUnnamed_0 = 4;
// read message for recipients, append them to those on cmdline
pub const OPT_f: C2RustUnnamed_0 = 2;
#[inline(always)]
unsafe extern "C" fn bb_ascii_isalnum(mut a: libc::c_uchar) -> libc::c_int {
let mut b: libc::c_uchar = (a as libc::c_int - '0' as i32) as libc::c_uchar;
if b as libc::c_int <= 9i32 {
return (b as libc::c_int <= 9i32) as libc::c_int;
}
b = ((a as libc::c_int | 0x20i32) - 'a' as i32) as libc::c_uchar;
return (b as libc::c_int <= 'z' as i32 - 'a' as i32) as libc::c_int;
}
#[inline(always)]
unsafe extern "C" fn not_const_pp(mut p: *const libc::c_void) -> *mut libc::c_void {
return p as *mut libc::c_void;
}
unsafe extern "C" fn send_r_n(mut s: *const libc::c_char) {
if (*ptr_to_globals).verbose!= 0 {
bb_error_msg(b"send:\'%s\'\x00" as *const u8 as *const libc::c_char, s);
}
printf(b"%s\r\n\x00" as *const u8 as *const libc::c_char, s);
}
unsafe extern "C" fn smtp_checkp(
mut fmt: *const libc::c_char,
mut param: *const libc::c_char,
mut code: libc::c_int,
) -> libc::c_int {
let mut answer: *mut libc::c_char = 0 as *mut libc::c_char;
let mut msg: *mut libc::c_char = send_mail_command(fmt, param);
loop
// read stdin
// if the string has a form NNN- -- read next string. E.g. EHLO response
// parse first bytes to a number
// if code = -1 then just return this number
// if code!= -1 then checks whether the number equals the code
// if not equal -> die saying msg
{
answer = xmalloc_fgetline(stdin);
if answer.is_null() {
break;
}
if (*ptr_to_globals).verbose!= 0 {
bb_error_msg(
b"recv:\'%.*s\'\x00" as *const u8 as *const libc::c_char,
strchrnul(answer, '\r' as i32).wrapping_offset_from(answer) as libc::c_long as libc::c_int,
answer,
);
}
if strlen(answer) <= 3i32 as libc::c_ulong || '-' as i32!= *answer.offset(3) as libc::c_int {
break;
}
free(answer as *mut libc::c_void);
}
if!answer.is_null() {
let mut n: libc::c_int = atoi(answer);
if (*ptr_to_globals).timeout!= 0 {
alarm(0i32 as libc::c_uint);
}
free(answer as *mut libc::c_void);
if -1i32 == code || n == code {
free(msg as *mut libc::c_void);
return n;
}
}
bb_error_msg_and_die(b"%s failed\x00" as *const u8 as *const libc::c_char, msg);
}
unsafe extern "C" fn smtp_check(
mut fmt: *const libc::c_char,
mut code: libc::c_int,
) -> libc::c_int {
return smtp_checkp(fmt, 0 as *const libc::c_char, code);
}
// strip argument of bad chars
unsafe extern "C" fn sane_address(mut str: *mut libc::c_char) -> *mut libc::c_char {
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
trim(str);
s = str;
while *s!= 0 {
/* Standard allows these chars in username without quoting:
* /!#$%&'*+-=?^_`{|}~
* and allows dot (.) with some restrictions.
* I chose to only allow a saner subset.
* I propose to expand it only on user's request.
*/
if bb_ascii_isalnum(*s as libc::c_uchar) == 0
&& strchr(
b"=+_-.@\x00" as *const u8 as *const libc::c_char,
*s as libc::c_int,
)
.is_null()
{
bb_error_msg(
b"bad address \'%s\'\x00" as *const u8 as *const libc::c_char,
str,
);
/* returning "": */
*str.offset(0) = '\u{0}' as i32 as libc::c_char;
return str;
}
s = s.offset(1)
}
return str;
}
// check for an address inside angle brackets, if not found fall back to normal
unsafe extern "C" fn angle_address(mut str: *mut libc::c_char) -> *mut libc::c_char {
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
let mut e: *mut libc::c_char = 0 as *mut libc::c_char;
e = trim(str);
if e!= str && {
e = e.offset(-1);
(*e as libc::c_int) == '>' as i32
} {
s = strrchr(str, '<' as i32);
if!s.is_null() {
*e = '\u{0}' as i32 as libc::c_char;
str = s.offset(1)
}
}
return sane_address(str);
}
unsafe extern "C" fn rcptto(mut s: *const libc::c_char) {
if *s == 0 {
return;
}
// N.B. we don't die if recipient is rejected, for the other recipients may be accepted
if 250i32
!= smtp_checkp(
b"RCPT TO:<%s>\x00" as *const u8 as *const libc::c_char,
s,
-1i32,
)
{
bb_error_msg(
b"Bad recipient: <%s>\x00" as *const u8 as *const libc::c_char,
s,
);
};
}
// send to a list of comma separated addresses
unsafe extern "C" fn rcptto_list(mut list: *const libc::c_char) {
let mut free_me: *mut libc::c_char = xstrdup(list);
let mut str: *mut libc::c_char = free_me;
let mut s: *mut libc::c_char = free_me;
let mut prev: libc::c_char = 0i32 as libc::c_char;
let mut in_quote: libc::c_int = 0i32;
while *s!= 0 {
let fresh0 = s;
s = s.offset(1);
let mut ch: libc::c_char = *fresh0;
if ch as libc::c_int == '\"' as i32 && prev as libc::c_int!= '\\' as i32 {
in_quote = (in_quote == 0) as libc::c_int
} else if in_quote == 0 && ch as libc::c_int == ',' as i32 {
*s.offset(-1i32 as isize) = '\u{0}' as i32 as libc::c_char;
rcptto(angle_address(str));
str = s
}
prev = ch
}
if prev as libc::c_int!= ',' as i32 {
rcptto(angle_address(str));
}
free(free_me as *mut libc::c_void);
}
#[no_mangle]
pub unsafe extern "C" fn sendmail_main(
mut _argc: libc::c_int,
mut argv: *mut *mut libc::c_char,
) -> libc::c_int {
let mut current_block: u64;
let mut opt_connect: *mut libc::c_char = 0 as *mut libc::c_char;
let mut opt_from: *mut libc::c_char = 0 as *mut libc::c_char;
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
let mut list: *mut llist_t = 0 as *mut llist_t;
let mut host: *mut libc::c_char = sane_address(safe_gethostname());
let mut nheaders: libc::c_uint = 0i32 as libc::c_uint;
let mut code: libc::c_int = 0;
let mut last_hdr: C2RustUnnamed = HDR_OTHER;
let mut check_hdr: libc::c_int = 0;
let mut has_to: libc::c_int = 0i32;
// init global variables
let ref mut fresh1 = *(not_const_pp(&ptr_to_globals as *const *mut globals as *const libc::c_void)
as *mut *mut globals);
*fresh1 = xzalloc(::std::mem::size_of::<globals>() as libc::c_ulong) as *mut globals;
asm!("" : : : "memory" : "volatile");
(*ptr_to_globals).opt_charset =
b"us-ascii\x00" as *const u8 as *const libc::c_char as *mut libc::c_char;
// default HOST[:PORT] is $SMTPHOST, or localhost
opt_connect = getenv(b"SMTPHOST\x00" as *const u8 as *const libc::c_char);
if opt_connect.is_null() {
opt_connect = b"127.0.0.1\x00" as *const u8 as *const libc::c_char as *mut libc::c_char
}
// save initial stdin since body is piped!
xdup2(0i32, 3i32);
(*ptr_to_globals).fp0 = xfdopen_for_read(3i32);
// parse options
// N.B. since -H and -S are mutually exclusive they do not interfere in opt_connect
// -a is for ssmtp (http://downloads.openwrt.org/people/nico/man/man8/ssmtp.8.html) compatibility,
// it is still under development.
(*ptr_to_globals).opts = getopt32(
argv,
b"^tf:o:iw:+H:S:a:*:v\x00vv:H--S:S--H\x00" as *const u8 as *const libc::c_char,
&mut opt_from as *mut *mut libc::c_char,
0 as *mut libc::c_void,
&mut (*ptr_to_globals).timeout as *mut libc::c_uint,
&mut opt_connect as *mut *mut libc::c_char,
&mut opt_connect as *mut *mut libc::c_char,
&mut list as *mut *mut llist_t,
&mut (*ptr_to_globals).verbose as *mut libc::c_uint,
);
//argc -= optind;
argv = argv.offset(optind as isize);
// process -a[upm]<token> options
if (*ptr_to_globals).opts & OPT_a as libc::c_int as libc::c_uint!= 0 && list.is_null() {
bb_show_usage();
}
while!list.is_null() {
let mut a: *mut libc::c_char = llist_pop(&mut list) as *mut libc::c_char;
if 'u' as i32 == *a.offset(0) as libc::c_int {
(*ptr_to_globals).user = xstrdup(a.offset(1))
}
if 'p' as i32 == *a.offset(0) as libc::c_int {
(*ptr_to_globals).pass = xstrdup(a.offset(1))
}
if'm' as i32 == *a.offset(0) as libc::c_int {
if *a.offset(1) as libc::c_int | 0x20i32 == 'p' as i32 {
// PLAIN
(*ptr_to_globals).opts |= OPT_am_plain as libc::c_int as libc::c_uint
} else if *a.offset(1) as libc::c_int | 0x20i32 == 'l' as i32 {
} else {
bb_error_msg_and_die(
b"unsupported AUTH method %s\x00" as *const u8 as *const libc::c_char,
a.offset(1),
);
}
}
}
// N.B. list == NULL here
//bb_error_msg("OPT[%x] AU[%s], AP[%s], AM[%s], ARGV[%s]", opts, au, ap, am, *argv);
// connect to server
// connection helper ordered? ->
if (*ptr_to_globals).opts & OPT_H as libc::c_int as libc::c_uint!= 0 {
let mut delay: *const libc::c_char = 0 as *const libc::c_char;
let mut args: [*const libc::c_char; 4] = [
b"sh\x00" as *const u8 as *const libc::c_char,
b"-c\x00" as *const u8 as *const libc::c_char,
opt_connect as *const libc::c_char,
0 as *const libc::c_char,
];
// plug it in
launch_helper(args.as_mut_ptr());
// Now:
// our stdout will go to helper's stdin,
// helper's stdout will be available on our stdin.
// Wait for initial server message.
// If helper (such as openssl) invokes STARTTLS, the initial 220
// is swallowed by helper (and not repeated after TLS is initiated).
// We will send NOOP cmd to server and check the response.
// We should get 220+250 on plain connection, 250 on STARTTLSed session.
//
// The problem here is some servers delay initial 220 message,
// and consider client to be a spammer if it starts sending cmds
// before 220 reached it. The code below is unsafe in this regard:
// in non-STARTTLSed case, we potentially send NOOP before 220
// is sent by server.
//
// If $SMTP_ANTISPAM_DELAY is set, we pause before sending NOOP.
//
delay = getenv(b"SMTP_ANTISPAM_DELAY\x00" as *const u8 as *const libc::c_char);
if!delay.is_null() {
sleep(atoi(delay) as libc::c_uint);
}
code = smtp_check(b"NOOP\x00" as *const u8 as *const libc::c_char, -1i32);
if code == 220i32 {
// we got 220 - this is not STARTTLSed connection,
// eat 250 response to our NOOP
smtp_check(0 as *const libc::c_char, 250i32);
} else if code!= 250i32 {
bb_simple_error_msg_and_die(b"SMTP init failed\x00" as *const u8 as *const libc::c_char);
}
} else {
// vanilla connection
let mut fd: libc::c_int = 0;
fd = create_and_connect_stream_or_die(opt_connect, 25i32);
// and make ourselves a simple IO filter
xmove_fd(fd, 0i32);
xdup2(0i32, 1i32);
// Wait for initial server 220 message
smtp_check(0 as *const libc::c_char, 220i32);
}
// we should start with modern EHLO
if 250i32
!= smtp_checkp(
b"EHLO %s\x00" as *const u8 as *const libc::c_char,
host,
-1i32,
)
{
smtp_checkp(
b"HELO %s\x00" as *const u8 as *const libc::c_char,
host,
250i32,
);
}
// perform authentication
if (*ptr_to_globals).opts & OPT_a as libc::c_int as libc::c_uint!= 0 {
// read credentials unless they are given via -a[up] options
if (*ptr_to_globals).user.is_null() || (*ptr_to_globals).pass.is_null() {
get_cred_or_die(4i32);
}
if (*ptr_to_globals).opts & OPT_am_plain as libc::c_int as libc::c_uint!= 0 {
// C: AUTH PLAIN
// S: 334
// C: base64encoded(auth<NUL>user<NUL>pass)
// S: 235 2.7.0 Authentication successful
//Note: a shorter format is allowed:
// C: AUTH PLAIN base64encoded(auth<NUL>user<NUL>pass)
// S: 235 2.7.0 Authentication successful
smtp_check(
b"AUTH PLAIN\x00" as *const u8 as *const libc::c_char,
334i32,
);
let mut user_len: libc::c_uint = strlen((*ptr_to_globals).user) as libc::c_uint;
let mut pass_len: libc::c_uint = strlen((*ptr_to_globals).pass) as libc::c_uint;
let mut sz: libc::c_uint = (1i32 as libc::c_uint)
.wrapping_add(user_len)
.wrapping_add(1i32 as libc::c_uint)
.wrapping_add(pass_len);
let vla = sz.wrapping_add(1i32 as libc::c_uint) as usize;
let mut plain_auth: Vec<libc::c_char> = ::std::vec::from_elem(0, vla);
// the format is:
// "authorization identity<NUL>username<NUL>password"
// authorization identity is empty.
*plain_auth.as_mut_ptr().offset(0) = '\u{0}' as i32 as libc::c_char;
strcpy(
stpcpy(plain_auth.as_mut_ptr().offset(1), (*ptr_to_globals).user).offset(1),
(*ptr_to_globals).pass,
);
printbuf_base64(plain_auth.as_mut_ptr(), sz);
} else {
// C: AUTH LOGIN
// S: 334 VXNlcm5hbWU6
// ^^^^^^^^^^^^ server says "Username:"
// C: base64encoded(user)
// S: 334 UGFzc3dvcmQ6
// ^^^^^^^^^^^^ server says "Password:"
// C: base64encoded(pass)
// S: 235 2.7.0 Authentication successful
smtp_check(
b"AUTH LOGIN\x00" as *const u8 as *const libc::c_char,
334i32,
);
printstr_base64((*ptr_to_globals).user);
smtp_check(b"\x00" as *const u8 as *const libc::c_char, 334i32);
printstr_base64((*ptr_to_globals).pass);
}
smtp_check(b"\x00" as *const u8 as *const libc::c_char, 235i32);
}
// set sender
// N.B. we have here a very loosely defined algorythm
// since sendmail historically offers no means to specify secrets on cmdline.
// 1) server can require no authentication ->
// we must just provide a (possibly fake) reply address.
// 2) server can require AUTH ->
// we must provide valid username and password along with a (possibly fake) reply address.
// For the sake of security username and password are to be read either from console or from a secured file.
// Since reading from console may defeat usability, the solution is either to read from a predefined
// file descriptor (e.g. 4), or again from a secured file.
// got no sender address? use auth name, then UID username as a last resort
if opt_from.is_null() {
opt_from = xasprintf(
b"%s@%s\x00" as *const u8 as *const libc::c_char,
if!(*ptr_to_globals).user.is_null() {
(*ptr_to_globals).user
} else {
xuid2uname(getuid())
},
(*xgethostbyname(host)).h_name,
)
}
free(host as *mut libc::c_void);
smtp_checkp(
b"MAIL FROM:<%s>\x00" as *const u8 as *const libc::c_char,
opt_from,
250i32,
);
// process message
// read recipients from message and add them to those given on cmdline.
// this means we scan stdin for To:, Cc:, Bcc: lines until an empty line
// and then use the rest of stdin as message body
code = 0i32; // set "analyze headers" mode
's_369: loop {
s = xmalloc_fgetline((*ptr_to_globals).fp0);
if!s.is_null() | else {
current_block = 228501038991332163;
}
loop {
match current_block {
228501038991332163 =>
// odd case: we didn't stop "analyze headers" mode -> message body is empty. Reenter the loop
// N.B. after reenter code will be > 0
{
if!(code == 0) {
// finalize the message
smtp_check(b".\x00" as *const u8 as *const libc::c_char, 250i32);
break's_369;
}
}
_ =>
// put message lines doubling leading dots
{
if code!= 0 {
// escape leading dots
// N.B. this feature is implied even if no -i (-oi) switch given
// N.B. we need to escape the leading dot regardless of
// whether it is single or not character on the line
if '.' as i32 == *s.offset(0) as libc::c_int {
/*&& '\0' == s[1] */
bb_putchar('.' as i32);
}
// dump read line
send_r_n(s);
free(s as *mut libc::c_void);
continue's_369;
} else {
// analyze headers
// To: or Cc: headers add recipients
check_hdr = (0i32
== strncasecmp(
b"To:\x00" as *const u8 as *const libc::c_char,
s,
3i32 as libc::c_ulong,
)) as libc::c_int;
has_to |= check_hdr;
if (*ptr_to_globals).opts & OPT_t as libc::c_int as libc::c_uint!= 0 {
if check_hdr!= 0
|| 0i32
== strncasecmp(
(b"Bcc:\x00" as *const u8 as *const libc::c_char).offset(1),
s,
3i32 as libc::c_ulong,
)
{
rcptto_list(s.offset(3));
last_hdr = HDR_TOCC;
current_block = 2265380199544777579;
break;
} else if 0i32
== strncasecmp(
| {
current_block = 16252544171633782868;
} | conditional_block |
sendmail.rs | c_char) ->!;
//char FAST_FUNC *parse_url(char *url, char **user, char **pass);
#[no_mangle]
fn launch_helper(argv: *mut *const libc::c_char);
#[no_mangle]
fn get_cred_or_die(fd: libc::c_int);
#[no_mangle]
fn send_mail_command(fmt: *const libc::c_char, param: *const libc::c_char) -> *mut libc::c_char;
#[no_mangle]
fn printbuf_base64(buf: *const libc::c_char, len: libc::c_uint);
#[no_mangle]
fn printstr_base64(buf: *const libc::c_char);
}
use crate::librb::size_t;
use libc::pid_t;
use libc::uid_t;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct hostent {
pub h_name: *mut libc::c_char,
pub h_aliases: *mut *mut libc::c_char,
pub h_addrtype: libc::c_int,
pub h_length: libc::c_int,
pub h_addr_list: *mut *mut libc::c_char,
}
use crate::libbb::llist::llist_t;
use libc::FILE;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct globals {
pub helper_pid: pid_t,
pub timeout: libc::c_uint,
pub verbose: libc::c_uint,
pub opts: libc::c_uint,
pub user: *mut libc::c_char,
pub pass: *mut libc::c_char,
pub fp0: *mut FILE,
pub opt_charset: *mut libc::c_char,
}
pub type C2RustUnnamed = libc::c_uint;
pub const HDR_BCC: C2RustUnnamed = 2;
pub const HDR_TOCC: C2RustUnnamed = 1;
pub const HDR_OTHER: C2RustUnnamed = 0;
//--- standard options
pub const OPT_t: C2RustUnnamed_0 = 1;
// verbosity
//--- for -amMETHOD
pub const OPT_am_plain: C2RustUnnamed_0 = 512;
// specify connection string
pub const OPT_a: C2RustUnnamed_0 = 128;
// network timeout
pub const OPT_H: C2RustUnnamed_0 = 32;
pub type C2RustUnnamed_0 = libc::c_uint;
// AUTH PLAIN
// authentication tokens
pub const OPT_v: C2RustUnnamed_0 = 256;
// use external connection helper
pub const OPT_S: C2RustUnnamed_0 = 64;
// IMPLIED!
//--- BB specific options
pub const OPT_w: C2RustUnnamed_0 = 16;
// various options. -oi IMPLIED! others are IGNORED!
pub const OPT_i: C2RustUnnamed_0 = 8;
// sender address
pub const OPT_o: C2RustUnnamed_0 = 4;
// read message for recipients, append them to those on cmdline
pub const OPT_f: C2RustUnnamed_0 = 2;
#[inline(always)]
unsafe extern "C" fn bb_ascii_isalnum(mut a: libc::c_uchar) -> libc::c_int {
let mut b: libc::c_uchar = (a as libc::c_int - '0' as i32) as libc::c_uchar;
if b as libc::c_int <= 9i32 {
return (b as libc::c_int <= 9i32) as libc::c_int;
}
b = ((a as libc::c_int | 0x20i32) - 'a' as i32) as libc::c_uchar;
return (b as libc::c_int <= 'z' as i32 - 'a' as i32) as libc::c_int;
}
#[inline(always)]
unsafe extern "C" fn not_const_pp(mut p: *const libc::c_void) -> *mut libc::c_void {
return p as *mut libc::c_void;
}
unsafe extern "C" fn send_r_n(mut s: *const libc::c_char) {
if (*ptr_to_globals).verbose!= 0 {
bb_error_msg(b"send:\'%s\'\x00" as *const u8 as *const libc::c_char, s);
}
printf(b"%s\r\n\x00" as *const u8 as *const libc::c_char, s);
}
unsafe extern "C" fn smtp_checkp(
mut fmt: *const libc::c_char,
mut param: *const libc::c_char,
mut code: libc::c_int,
) -> libc::c_int {
let mut answer: *mut libc::c_char = 0 as *mut libc::c_char;
let mut msg: *mut libc::c_char = send_mail_command(fmt, param);
loop
// read stdin
// if the string has a form NNN- -- read next string. E.g. EHLO response
// parse first bytes to a number
// if code = -1 then just return this number
// if code!= -1 then checks whether the number equals the code
// if not equal -> die saying msg
{
answer = xmalloc_fgetline(stdin);
if answer.is_null() {
break;
}
if (*ptr_to_globals).verbose!= 0 {
bb_error_msg(
b"recv:\'%.*s\'\x00" as *const u8 as *const libc::c_char,
strchrnul(answer, '\r' as i32).wrapping_offset_from(answer) as libc::c_long as libc::c_int,
answer,
);
}
if strlen(answer) <= 3i32 as libc::c_ulong || '-' as i32!= *answer.offset(3) as libc::c_int {
break;
}
free(answer as *mut libc::c_void);
}
if!answer.is_null() {
let mut n: libc::c_int = atoi(answer);
if (*ptr_to_globals).timeout!= 0 {
alarm(0i32 as libc::c_uint);
}
free(answer as *mut libc::c_void);
if -1i32 == code || n == code {
free(msg as *mut libc::c_void);
return n;
}
}
bb_error_msg_and_die(b"%s failed\x00" as *const u8 as *const libc::c_char, msg);
}
unsafe extern "C" fn smtp_check(
mut fmt: *const libc::c_char,
mut code: libc::c_int,
) -> libc::c_int {
return smtp_checkp(fmt, 0 as *const libc::c_char, code);
}
// strip argument of bad chars
unsafe extern "C" fn sane_address(mut str: *mut libc::c_char) -> *mut libc::c_char {
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
trim(str);
s = str;
while *s!= 0 {
/* Standard allows these chars in username without quoting:
* /!#$%&'*+-=?^_`{|}~
* and allows dot (.) with some restrictions.
* I chose to only allow a saner subset.
* I propose to expand it only on user's request.
*/
if bb_ascii_isalnum(*s as libc::c_uchar) == 0
&& strchr(
b"=+_-.@\x00" as *const u8 as *const libc::c_char,
*s as libc::c_int,
)
.is_null()
{
bb_error_msg(
b"bad address \'%s\'\x00" as *const u8 as *const libc::c_char,
str,
);
/* returning "": */
*str.offset(0) = '\u{0}' as i32 as libc::c_char;
return str;
}
s = s.offset(1)
}
return str;
}
// check for an address inside angle brackets, if not found fall back to normal
unsafe extern "C" fn angle_address(mut str: *mut libc::c_char) -> *mut libc::c_char {
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
let mut e: *mut libc::c_char = 0 as *mut libc::c_char;
e = trim(str);
if e!= str && {
e = e.offset(-1);
(*e as libc::c_int) == '>' as i32
} {
s = strrchr(str, '<' as i32);
if!s.is_null() {
*e = '\u{0}' as i32 as libc::c_char;
str = s.offset(1)
}
}
return sane_address(str);
}
unsafe extern "C" fn rcptto(mut s: *const libc::c_char) {
if *s == 0 {
return;
}
// N.B. we don't die if recipient is rejected, for the other recipients may be accepted
if 250i32
!= smtp_checkp(
b"RCPT TO:<%s>\x00" as *const u8 as *const libc::c_char,
s,
-1i32,
)
{
bb_error_msg(
b"Bad recipient: <%s>\x00" as *const u8 as *const libc::c_char,
s,
);
};
}
// send to a list of comma separated addresses
unsafe extern "C" fn | (mut list: *const libc::c_char) {
let mut free_me: *mut libc::c_char = xstrdup(list);
let mut str: *mut libc::c_char = free_me;
let mut s: *mut libc::c_char = free_me;
let mut prev: libc::c_char = 0i32 as libc::c_char;
let mut in_quote: libc::c_int = 0i32;
while *s!= 0 {
let fresh0 = s;
s = s.offset(1);
let mut ch: libc::c_char = *fresh0;
if ch as libc::c_int == '\"' as i32 && prev as libc::c_int!= '\\' as i32 {
in_quote = (in_quote == 0) as libc::c_int
} else if in_quote == 0 && ch as libc::c_int == ',' as i32 {
*s.offset(-1i32 as isize) = '\u{0}' as i32 as libc::c_char;
rcptto(angle_address(str));
str = s
}
prev = ch
}
if prev as libc::c_int!= ',' as i32 {
rcptto(angle_address(str));
}
free(free_me as *mut libc::c_void);
}
#[no_mangle]
pub unsafe extern "C" fn sendmail_main(
mut _argc: libc::c_int,
mut argv: *mut *mut libc::c_char,
) -> libc::c_int {
let mut current_block: u64;
let mut opt_connect: *mut libc::c_char = 0 as *mut libc::c_char;
let mut opt_from: *mut libc::c_char = 0 as *mut libc::c_char;
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
let mut list: *mut llist_t = 0 as *mut llist_t;
let mut host: *mut libc::c_char = sane_address(safe_gethostname());
let mut nheaders: libc::c_uint = 0i32 as libc::c_uint;
let mut code: libc::c_int = 0;
let mut last_hdr: C2RustUnnamed = HDR_OTHER;
let mut check_hdr: libc::c_int = 0;
let mut has_to: libc::c_int = 0i32;
// init global variables
let ref mut fresh1 = *(not_const_pp(&ptr_to_globals as *const *mut globals as *const libc::c_void)
as *mut *mut globals);
*fresh1 = xzalloc(::std::mem::size_of::<globals>() as libc::c_ulong) as *mut globals;
asm!("" : : : "memory" : "volatile");
(*ptr_to_globals).opt_charset =
b"us-ascii\x00" as *const u8 as *const libc::c_char as *mut libc::c_char;
// default HOST[:PORT] is $SMTPHOST, or localhost
opt_connect = getenv(b"SMTPHOST\x00" as *const u8 as *const libc::c_char);
if opt_connect.is_null() {
opt_connect = b"127.0.0.1\x00" as *const u8 as *const libc::c_char as *mut libc::c_char
}
// save initial stdin since body is piped!
xdup2(0i32, 3i32);
(*ptr_to_globals).fp0 = xfdopen_for_read(3i32);
// parse options
// N.B. since -H and -S are mutually exclusive they do not interfere in opt_connect
// -a is for ssmtp (http://downloads.openwrt.org/people/nico/man/man8/ssmtp.8.html) compatibility,
// it is still under development.
(*ptr_to_globals).opts = getopt32(
argv,
b"^tf:o:iw:+H:S:a:*:v\x00vv:H--S:S--H\x00" as *const u8 as *const libc::c_char,
&mut opt_from as *mut *mut libc::c_char,
0 as *mut libc::c_void,
&mut (*ptr_to_globals).timeout as *mut libc::c_uint,
&mut opt_connect as *mut *mut libc::c_char,
&mut opt_connect as *mut *mut libc::c_char,
&mut list as *mut *mut llist_t,
&mut (*ptr_to_globals).verbose as *mut libc::c_uint,
);
//argc -= optind;
argv = argv.offset(optind as isize);
// process -a[upm]<token> options
if (*ptr_to_globals).opts & OPT_a as libc::c_int as libc::c_uint!= 0 && list.is_null() {
bb_show_usage();
}
while!list.is_null() {
let mut a: *mut libc::c_char = llist_pop(&mut list) as *mut libc::c_char;
if 'u' as i32 == *a.offset(0) as libc::c_int {
(*ptr_to_globals).user = xstrdup(a.offset(1))
}
if 'p' as i32 == *a.offset(0) as libc::c_int {
(*ptr_to_globals).pass = xstrdup(a.offset(1))
}
if'm' as i32 == *a.offset(0) as libc::c_int {
if *a.offset(1) as libc::c_int | 0x20i32 == 'p' as i32 {
// PLAIN
(*ptr_to_globals).opts |= OPT_am_plain as libc::c_int as libc::c_uint
} else if *a.offset(1) as libc::c_int | 0x20i32 == 'l' as i32 {
} else {
bb_error_msg_and_die(
b"unsupported AUTH method %s\x00" as *const u8 as *const libc::c_char,
a.offset(1),
);
}
}
}
// N.B. list == NULL here
//bb_error_msg("OPT[%x] AU[%s], AP[%s], AM[%s], ARGV[%s]", opts, au, ap, am, *argv);
// connect to server
// connection helper ordered? ->
if (*ptr_to_globals).opts & OPT_H as libc::c_int as libc::c_uint!= 0 {
let mut delay: *const libc::c_char = 0 as *const libc::c_char;
let mut args: [*const libc::c_char; 4] = [
b"sh\x00" as *const u8 as *const libc::c_char,
b"-c\x00" as *const u8 as *const libc::c_char,
opt_connect as *const libc::c_char,
0 as *const libc::c_char,
];
// plug it in
launch_helper(args.as_mut_ptr());
// Now:
// our stdout will go to helper's stdin,
// helper's stdout will be available on our stdin.
// Wait for initial server message.
// If helper (such as openssl) invokes STARTTLS, the initial 220
// is swallowed by helper (and not repeated after TLS is initiated).
// We will send NOOP cmd to server and check the response.
// We should get 220+250 on plain connection, 250 on STARTTLSed session.
//
// The problem here is some servers delay initial 220 message,
// and consider client to be a spammer if it starts sending cmds
// before 220 reached it. The code below is unsafe in this regard:
// in non-STARTTLSed case, we potentially send NOOP before 220
// is sent by server.
//
// If $SMTP_ANTISPAM_DELAY is set, we pause before sending NOOP.
//
delay = getenv(b"SMTP_ANTISPAM_DELAY\x00" as *const u8 as *const libc::c_char);
if!delay.is_null() {
sleep(atoi(delay) as libc::c_uint);
}
code = smtp_check(b"NOOP\x00" as *const u8 as *const libc::c_char, -1i32);
if code == 220i32 {
// we got 220 - this is not STARTTLSed connection,
// eat 250 response to our NOOP
smtp_check(0 as *const libc::c_char, 250i32);
} else if code!= 250i32 {
bb_simple_error_msg_and_die(b"SMTP init failed\x00" as *const u8 as *const libc::c_char);
}
} else {
// vanilla connection
let mut fd: libc::c_int = 0;
fd = create_and_connect_stream_or_die(opt_connect, 25i32);
// and make ourselves a simple IO filter
xmove_fd(fd, 0i32);
xdup2(0i32, 1i32);
// Wait for initial server 220 message
smtp_check(0 as *const libc::c_char, 220i32);
}
// we should start with modern EHLO
if 250i32
!= smtp_checkp(
b"EHLO %s\x00" as *const u8 as *const libc::c_char,
host,
-1i32,
)
{
smtp_checkp(
b"HELO %s\x00" as *const u8 as *const libc::c_char,
host,
250i32,
);
}
// perform authentication
if (*ptr_to_globals).opts & OPT_a as libc::c_int as libc::c_uint!= 0 {
// read credentials unless they are given via -a[up] options
if (*ptr_to_globals).user.is_null() || (*ptr_to_globals).pass.is_null() {
get_cred_or_die(4i32);
}
if (*ptr_to_globals).opts & OPT_am_plain as libc::c_int as libc::c_uint!= 0 {
// C: AUTH PLAIN
// S: 334
// C: base64encoded(auth<NUL>user<NUL>pass)
// S: 235 2.7.0 Authentication successful
//Note: a shorter format is allowed:
// C: AUTH PLAIN base64encoded(auth<NUL>user<NUL>pass)
// S: 235 2.7.0 Authentication successful
smtp_check(
b"AUTH PLAIN\x00" as *const u8 as *const libc::c_char,
334i32,
);
let mut user_len: libc::c_uint = strlen((*ptr_to_globals).user) as libc::c_uint;
let mut pass_len: libc::c_uint = strlen((*ptr_to_globals).pass) as libc::c_uint;
let mut sz: libc::c_uint = (1i32 as libc::c_uint)
.wrapping_add(user_len)
.wrapping_add(1i32 as libc::c_uint)
.wrapping_add(pass_len);
let vla = sz.wrapping_add(1i32 as libc::c_uint) as usize;
let mut plain_auth: Vec<libc::c_char> = ::std::vec::from_elem(0, vla);
// the format is:
// "authorization identity<NUL>username<NUL>password"
// authorization identity is empty.
*plain_auth.as_mut_ptr().offset(0) = '\u{0}' as i32 as libc::c_char;
strcpy(
stpcpy(plain_auth.as_mut_ptr().offset(1), (*ptr_to_globals).user).offset(1),
(*ptr_to_globals).pass,
);
printbuf_base64(plain_auth.as_mut_ptr(), sz);
} else {
// C: AUTH LOGIN
// S: 334 VXNlcm5hbWU6
// ^^^^^^^^^^^^ server says "Username:"
// C: base64encoded(user)
// S: 334 UGFzc3dvcmQ6
// ^^^^^^^^^^^^ server says "Password:"
// C: base64encoded(pass)
// S: 235 2.7.0 Authentication successful
smtp_check(
b"AUTH LOGIN\x00" as *const u8 as *const libc::c_char,
334i32,
);
printstr_base64((*ptr_to_globals).user);
smtp_check(b"\x00" as *const u8 as *const libc::c_char, 334i32);
printstr_base64((*ptr_to_globals).pass);
}
smtp_check(b"\x00" as *const u8 as *const libc::c_char, 235i32);
}
// set sender
// N.B. we have here a very loosely defined algorythm
// since sendmail historically offers no means to specify secrets on cmdline.
// 1) server can require no authentication ->
// we must just provide a (possibly fake) reply address.
// 2) server can require AUTH ->
// we must provide valid username and password along with a (possibly fake) reply address.
// For the sake of security username and password are to be read either from console or from a secured file.
// Since reading from console may defeat usability, the solution is either to read from a predefined
// file descriptor (e.g. 4), or again from a secured file.
// got no sender address? use auth name, then UID username as a last resort
if opt_from.is_null() {
opt_from = xasprintf(
b"%s@%s\x00" as *const u8 as *const libc::c_char,
if!(*ptr_to_globals).user.is_null() {
(*ptr_to_globals).user
} else {
xuid2uname(getuid())
},
(*xgethostbyname(host)).h_name,
)
}
free(host as *mut libc::c_void);
smtp_checkp(
b"MAIL FROM:<%s>\x00" as *const u8 as *const libc::c_char,
opt_from,
250i32,
);
// process message
// read recipients from message and add them to those given on cmdline.
// this means we scan stdin for To:, Cc:, Bcc: lines until an empty line
// and then use the rest of stdin as message body
code = 0i32; // set "analyze headers" mode
's_369: loop {
s = xmalloc_fgetline((*ptr_to_globals).fp0);
if!s.is_null() {
current_block = 16252544171633782868;
} else {
current_block = 228501038991332163;
}
loop {
match current_block {
228501038991332163 =>
// odd case: we didn't stop "analyze headers" mode -> message body is empty. Reenter the loop
// N.B. after reenter code will be > 0
{
if!(code == 0) {
// finalize the message
smtp_check(b".\x00" as *const u8 as *const libc::c_char, 250i32);
break's_369;
}
}
_ =>
// put message lines doubling leading dots
{
if code!= 0 {
// escape leading dots
// N.B. this feature is implied even if no -i (-oi) switch given
// N.B. we need to escape the leading dot regardless of
// whether it is single or not character on the line
if '.' as i32 == *s.offset(0) as libc::c_int {
/*&& '\0' == s[1] */
bb_putchar('.' as i32);
}
// dump read line
send_r_n(s);
free(s as *mut libc::c_void);
continue's_369;
} else {
// analyze headers
// To: or Cc: headers add recipients
check_hdr = (0i32
== strncasecmp(
b"To:\x00" as *const u8 as *const libc::c_char,
s,
3i32 as libc::c_ulong,
)) as libc::c_int;
has_to |= check_hdr;
if (*ptr_to_globals).opts & OPT_t as libc::c_int as libc::c_uint!= 0 {
if check_hdr!= 0
|| 0i32
== strncasecmp(
(b"Bcc:\x00" as *const u8 as *const libc::c_char).offset(1),
s,
3i32 as libc::c_ulong,
)
{
rcptto_list(s.offset(3));
last_hdr = HDR_TOCC;
current_block = 2265380199544777579;
break;
} else if 0i32
== strncasecmp(
| rcptto_list | identifier_name |
sendmail.rs | fmt: *const libc::c_char, param: *const libc::c_char) -> *mut libc::c_char;
#[no_mangle]
fn printbuf_base64(buf: *const libc::c_char, len: libc::c_uint);
#[no_mangle]
fn printstr_base64(buf: *const libc::c_char);
}
use crate::librb::size_t;
use libc::pid_t;
use libc::uid_t;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct hostent {
pub h_name: *mut libc::c_char,
pub h_aliases: *mut *mut libc::c_char,
pub h_addrtype: libc::c_int,
pub h_length: libc::c_int,
pub h_addr_list: *mut *mut libc::c_char,
}
use crate::libbb::llist::llist_t;
use libc::FILE;
#[derive(Copy, Clone)]
#[repr(C)]
pub struct globals {
pub helper_pid: pid_t,
pub timeout: libc::c_uint,
pub verbose: libc::c_uint,
pub opts: libc::c_uint,
pub user: *mut libc::c_char,
pub pass: *mut libc::c_char,
pub fp0: *mut FILE,
pub opt_charset: *mut libc::c_char,
}
pub type C2RustUnnamed = libc::c_uint;
pub const HDR_BCC: C2RustUnnamed = 2;
pub const HDR_TOCC: C2RustUnnamed = 1;
pub const HDR_OTHER: C2RustUnnamed = 0;
//--- standard options
pub const OPT_t: C2RustUnnamed_0 = 1;
// verbosity
//--- for -amMETHOD
pub const OPT_am_plain: C2RustUnnamed_0 = 512;
// specify connection string
pub const OPT_a: C2RustUnnamed_0 = 128;
// network timeout
pub const OPT_H: C2RustUnnamed_0 = 32;
pub type C2RustUnnamed_0 = libc::c_uint;
// AUTH PLAIN
// authentication tokens
pub const OPT_v: C2RustUnnamed_0 = 256;
// use external connection helper
pub const OPT_S: C2RustUnnamed_0 = 64;
// IMPLIED!
//--- BB specific options
pub const OPT_w: C2RustUnnamed_0 = 16;
// various options. -oi IMPLIED! others are IGNORED!
pub const OPT_i: C2RustUnnamed_0 = 8;
// sender address
pub const OPT_o: C2RustUnnamed_0 = 4;
// read message for recipients, append them to those on cmdline
pub const OPT_f: C2RustUnnamed_0 = 2;
#[inline(always)]
unsafe extern "C" fn bb_ascii_isalnum(mut a: libc::c_uchar) -> libc::c_int {
let mut b: libc::c_uchar = (a as libc::c_int - '0' as i32) as libc::c_uchar;
if b as libc::c_int <= 9i32 {
return (b as libc::c_int <= 9i32) as libc::c_int;
}
b = ((a as libc::c_int | 0x20i32) - 'a' as i32) as libc::c_uchar;
return (b as libc::c_int <= 'z' as i32 - 'a' as i32) as libc::c_int;
}
#[inline(always)]
unsafe extern "C" fn not_const_pp(mut p: *const libc::c_void) -> *mut libc::c_void {
return p as *mut libc::c_void;
}
unsafe extern "C" fn send_r_n(mut s: *const libc::c_char) {
if (*ptr_to_globals).verbose!= 0 {
bb_error_msg(b"send:\'%s\'\x00" as *const u8 as *const libc::c_char, s);
}
printf(b"%s\r\n\x00" as *const u8 as *const libc::c_char, s);
}
unsafe extern "C" fn smtp_checkp(
mut fmt: *const libc::c_char,
mut param: *const libc::c_char,
mut code: libc::c_int,
) -> libc::c_int {
let mut answer: *mut libc::c_char = 0 as *mut libc::c_char;
let mut msg: *mut libc::c_char = send_mail_command(fmt, param);
loop
// read stdin
// if the string has a form NNN- -- read next string. E.g. EHLO response
// parse first bytes to a number
// if code = -1 then just return this number
// if code!= -1 then checks whether the number equals the code
// if not equal -> die saying msg
{
answer = xmalloc_fgetline(stdin);
if answer.is_null() {
break;
}
if (*ptr_to_globals).verbose!= 0 {
bb_error_msg(
b"recv:\'%.*s\'\x00" as *const u8 as *const libc::c_char,
strchrnul(answer, '\r' as i32).wrapping_offset_from(answer) as libc::c_long as libc::c_int,
answer,
);
}
if strlen(answer) <= 3i32 as libc::c_ulong || '-' as i32!= *answer.offset(3) as libc::c_int {
break;
}
free(answer as *mut libc::c_void);
}
if!answer.is_null() {
let mut n: libc::c_int = atoi(answer);
if (*ptr_to_globals).timeout!= 0 {
alarm(0i32 as libc::c_uint);
}
free(answer as *mut libc::c_void);
if -1i32 == code || n == code {
free(msg as *mut libc::c_void);
return n;
}
}
bb_error_msg_and_die(b"%s failed\x00" as *const u8 as *const libc::c_char, msg);
}
unsafe extern "C" fn smtp_check(
mut fmt: *const libc::c_char,
mut code: libc::c_int,
) -> libc::c_int {
return smtp_checkp(fmt, 0 as *const libc::c_char, code);
}
// strip argument of bad chars
unsafe extern "C" fn sane_address(mut str: *mut libc::c_char) -> *mut libc::c_char {
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
trim(str);
s = str;
while *s!= 0 {
/* Standard allows these chars in username without quoting:
* /!#$%&'*+-=?^_`{|}~
* and allows dot (.) with some restrictions.
* I chose to only allow a saner subset.
* I propose to expand it only on user's request.
*/
if bb_ascii_isalnum(*s as libc::c_uchar) == 0
&& strchr(
b"=+_-.@\x00" as *const u8 as *const libc::c_char,
*s as libc::c_int,
)
.is_null()
{
bb_error_msg(
b"bad address \'%s\'\x00" as *const u8 as *const libc::c_char,
str,
);
/* returning "": */
*str.offset(0) = '\u{0}' as i32 as libc::c_char;
return str;
}
s = s.offset(1)
}
return str;
}
// check for an address inside angle brackets, if not found fall back to normal
unsafe extern "C" fn angle_address(mut str: *mut libc::c_char) -> *mut libc::c_char {
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
let mut e: *mut libc::c_char = 0 as *mut libc::c_char;
e = trim(str);
if e!= str && {
e = e.offset(-1);
(*e as libc::c_int) == '>' as i32
} {
s = strrchr(str, '<' as i32);
if!s.is_null() {
*e = '\u{0}' as i32 as libc::c_char;
str = s.offset(1)
}
}
return sane_address(str);
}
unsafe extern "C" fn rcptto(mut s: *const libc::c_char) {
if *s == 0 {
return;
}
// N.B. we don't die if recipient is rejected, for the other recipients may be accepted
if 250i32
!= smtp_checkp(
b"RCPT TO:<%s>\x00" as *const u8 as *const libc::c_char,
s,
-1i32,
)
{
bb_error_msg(
b"Bad recipient: <%s>\x00" as *const u8 as *const libc::c_char,
s,
);
};
}
// send to a list of comma separated addresses
unsafe extern "C" fn rcptto_list(mut list: *const libc::c_char) {
let mut free_me: *mut libc::c_char = xstrdup(list);
let mut str: *mut libc::c_char = free_me;
let mut s: *mut libc::c_char = free_me;
let mut prev: libc::c_char = 0i32 as libc::c_char;
let mut in_quote: libc::c_int = 0i32;
while *s!= 0 {
let fresh0 = s;
s = s.offset(1);
let mut ch: libc::c_char = *fresh0;
if ch as libc::c_int == '\"' as i32 && prev as libc::c_int!= '\\' as i32 {
in_quote = (in_quote == 0) as libc::c_int
} else if in_quote == 0 && ch as libc::c_int == ',' as i32 {
*s.offset(-1i32 as isize) = '\u{0}' as i32 as libc::c_char;
rcptto(angle_address(str));
str = s
}
prev = ch
}
if prev as libc::c_int!= ',' as i32 {
rcptto(angle_address(str));
}
free(free_me as *mut libc::c_void);
}
#[no_mangle]
pub unsafe extern "C" fn sendmail_main(
mut _argc: libc::c_int,
mut argv: *mut *mut libc::c_char,
) -> libc::c_int {
let mut current_block: u64;
let mut opt_connect: *mut libc::c_char = 0 as *mut libc::c_char;
let mut opt_from: *mut libc::c_char = 0 as *mut libc::c_char;
let mut s: *mut libc::c_char = 0 as *mut libc::c_char;
let mut list: *mut llist_t = 0 as *mut llist_t;
let mut host: *mut libc::c_char = sane_address(safe_gethostname());
let mut nheaders: libc::c_uint = 0i32 as libc::c_uint;
let mut code: libc::c_int = 0;
let mut last_hdr: C2RustUnnamed = HDR_OTHER;
let mut check_hdr: libc::c_int = 0;
let mut has_to: libc::c_int = 0i32;
// init global variables
let ref mut fresh1 = *(not_const_pp(&ptr_to_globals as *const *mut globals as *const libc::c_void)
as *mut *mut globals);
*fresh1 = xzalloc(::std::mem::size_of::<globals>() as libc::c_ulong) as *mut globals;
asm!("" : : : "memory" : "volatile");
(*ptr_to_globals).opt_charset =
b"us-ascii\x00" as *const u8 as *const libc::c_char as *mut libc::c_char;
// default HOST[:PORT] is $SMTPHOST, or localhost
opt_connect = getenv(b"SMTPHOST\x00" as *const u8 as *const libc::c_char);
if opt_connect.is_null() {
opt_connect = b"127.0.0.1\x00" as *const u8 as *const libc::c_char as *mut libc::c_char
}
// save initial stdin since body is piped!
xdup2(0i32, 3i32);
(*ptr_to_globals).fp0 = xfdopen_for_read(3i32);
// parse options
// N.B. since -H and -S are mutually exclusive they do not interfere in opt_connect
// -a is for ssmtp (http://downloads.openwrt.org/people/nico/man/man8/ssmtp.8.html) compatibility,
// it is still under development.
(*ptr_to_globals).opts = getopt32(
argv,
b"^tf:o:iw:+H:S:a:*:v\x00vv:H--S:S--H\x00" as *const u8 as *const libc::c_char,
&mut opt_from as *mut *mut libc::c_char,
0 as *mut libc::c_void,
&mut (*ptr_to_globals).timeout as *mut libc::c_uint,
&mut opt_connect as *mut *mut libc::c_char,
&mut opt_connect as *mut *mut libc::c_char,
&mut list as *mut *mut llist_t,
&mut (*ptr_to_globals).verbose as *mut libc::c_uint,
);
//argc -= optind;
argv = argv.offset(optind as isize);
// process -a[upm]<token> options
if (*ptr_to_globals).opts & OPT_a as libc::c_int as libc::c_uint!= 0 && list.is_null() {
bb_show_usage();
}
while!list.is_null() {
let mut a: *mut libc::c_char = llist_pop(&mut list) as *mut libc::c_char;
if 'u' as i32 == *a.offset(0) as libc::c_int {
(*ptr_to_globals).user = xstrdup(a.offset(1))
}
if 'p' as i32 == *a.offset(0) as libc::c_int {
(*ptr_to_globals).pass = xstrdup(a.offset(1))
}
if'm' as i32 == *a.offset(0) as libc::c_int {
if *a.offset(1) as libc::c_int | 0x20i32 == 'p' as i32 {
// PLAIN
(*ptr_to_globals).opts |= OPT_am_plain as libc::c_int as libc::c_uint
} else if *a.offset(1) as libc::c_int | 0x20i32 == 'l' as i32 {
} else {
bb_error_msg_and_die(
b"unsupported AUTH method %s\x00" as *const u8 as *const libc::c_char,
a.offset(1),
);
}
}
}
// N.B. list == NULL here
//bb_error_msg("OPT[%x] AU[%s], AP[%s], AM[%s], ARGV[%s]", opts, au, ap, am, *argv);
// connect to server
// connection helper ordered? ->
if (*ptr_to_globals).opts & OPT_H as libc::c_int as libc::c_uint!= 0 {
let mut delay: *const libc::c_char = 0 as *const libc::c_char;
let mut args: [*const libc::c_char; 4] = [
b"sh\x00" as *const u8 as *const libc::c_char,
b"-c\x00" as *const u8 as *const libc::c_char,
opt_connect as *const libc::c_char,
0 as *const libc::c_char,
];
// plug it in
launch_helper(args.as_mut_ptr());
// Now:
// our stdout will go to helper's stdin,
// helper's stdout will be available on our stdin.
// Wait for initial server message.
// If helper (such as openssl) invokes STARTTLS, the initial 220
// is swallowed by helper (and not repeated after TLS is initiated).
// We will send NOOP cmd to server and check the response.
// We should get 220+250 on plain connection, 250 on STARTTLSed session.
//
// The problem here is some servers delay initial 220 message,
// and consider client to be a spammer if it starts sending cmds
// before 220 reached it. The code below is unsafe in this regard:
// in non-STARTTLSed case, we potentially send NOOP before 220
// is sent by server.
//
// If $SMTP_ANTISPAM_DELAY is set, we pause before sending NOOP.
//
delay = getenv(b"SMTP_ANTISPAM_DELAY\x00" as *const u8 as *const libc::c_char);
if!delay.is_null() {
sleep(atoi(delay) as libc::c_uint);
}
code = smtp_check(b"NOOP\x00" as *const u8 as *const libc::c_char, -1i32);
if code == 220i32 {
// we got 220 - this is not STARTTLSed connection,
// eat 250 response to our NOOP
smtp_check(0 as *const libc::c_char, 250i32);
} else if code!= 250i32 {
bb_simple_error_msg_and_die(b"SMTP init failed\x00" as *const u8 as *const libc::c_char);
}
} else {
// vanilla connection
let mut fd: libc::c_int = 0;
fd = create_and_connect_stream_or_die(opt_connect, 25i32);
// and make ourselves a simple IO filter
xmove_fd(fd, 0i32);
xdup2(0i32, 1i32);
// Wait for initial server 220 message
smtp_check(0 as *const libc::c_char, 220i32);
}
// we should start with modern EHLO
if 250i32
!= smtp_checkp(
b"EHLO %s\x00" as *const u8 as *const libc::c_char,
host,
-1i32,
)
{
smtp_checkp(
b"HELO %s\x00" as *const u8 as *const libc::c_char,
host,
250i32,
);
}
// perform authentication
if (*ptr_to_globals).opts & OPT_a as libc::c_int as libc::c_uint!= 0 {
// read credentials unless they are given via -a[up] options
if (*ptr_to_globals).user.is_null() || (*ptr_to_globals).pass.is_null() {
get_cred_or_die(4i32);
}
if (*ptr_to_globals).opts & OPT_am_plain as libc::c_int as libc::c_uint!= 0 {
// C: AUTH PLAIN
// S: 334
// C: base64encoded(auth<NUL>user<NUL>pass)
// S: 235 2.7.0 Authentication successful
//Note: a shorter format is allowed:
// C: AUTH PLAIN base64encoded(auth<NUL>user<NUL>pass)
// S: 235 2.7.0 Authentication successful
smtp_check(
b"AUTH PLAIN\x00" as *const u8 as *const libc::c_char,
334i32,
);
let mut user_len: libc::c_uint = strlen((*ptr_to_globals).user) as libc::c_uint;
let mut pass_len: libc::c_uint = strlen((*ptr_to_globals).pass) as libc::c_uint;
let mut sz: libc::c_uint = (1i32 as libc::c_uint)
.wrapping_add(user_len)
.wrapping_add(1i32 as libc::c_uint)
.wrapping_add(pass_len);
let vla = sz.wrapping_add(1i32 as libc::c_uint) as usize;
let mut plain_auth: Vec<libc::c_char> = ::std::vec::from_elem(0, vla);
// the format is:
// "authorization identity<NUL>username<NUL>password"
// authorization identity is empty.
*plain_auth.as_mut_ptr().offset(0) = '\u{0}' as i32 as libc::c_char;
strcpy(
stpcpy(plain_auth.as_mut_ptr().offset(1), (*ptr_to_globals).user).offset(1),
(*ptr_to_globals).pass,
);
printbuf_base64(plain_auth.as_mut_ptr(), sz);
} else {
// C: AUTH LOGIN
// S: 334 VXNlcm5hbWU6
// ^^^^^^^^^^^^ server says "Username:"
// C: base64encoded(user)
// S: 334 UGFzc3dvcmQ6
// ^^^^^^^^^^^^ server says "Password:"
// C: base64encoded(pass)
// S: 235 2.7.0 Authentication successful
smtp_check(
b"AUTH LOGIN\x00" as *const u8 as *const libc::c_char,
334i32,
);
printstr_base64((*ptr_to_globals).user);
smtp_check(b"\x00" as *const u8 as *const libc::c_char, 334i32);
printstr_base64((*ptr_to_globals).pass);
}
smtp_check(b"\x00" as *const u8 as *const libc::c_char, 235i32);
}
// set sender
// N.B. we have here a very loosely defined algorythm
// since sendmail historically offers no means to specify secrets on cmdline.
// 1) server can require no authentication ->
// we must just provide a (possibly fake) reply address.
// 2) server can require AUTH ->
// we must provide valid username and password along with a (possibly fake) reply address.
// For the sake of security username and password are to be read either from console or from a secured file.
// Since reading from console may defeat usability, the solution is either to read from a predefined
// file descriptor (e.g. 4), or again from a secured file.
// got no sender address? use auth name, then UID username as a last resort
if opt_from.is_null() {
opt_from = xasprintf(
b"%s@%s\x00" as *const u8 as *const libc::c_char,
if!(*ptr_to_globals).user.is_null() {
(*ptr_to_globals).user
} else {
xuid2uname(getuid())
},
(*xgethostbyname(host)).h_name,
)
}
free(host as *mut libc::c_void);
smtp_checkp(
b"MAIL FROM:<%s>\x00" as *const u8 as *const libc::c_char,
opt_from,
250i32,
);
// process message
// read recipients from message and add them to those given on cmdline.
// this means we scan stdin for To:, Cc:, Bcc: lines until an empty line
// and then use the rest of stdin as message body
code = 0i32; // set "analyze headers" mode
's_369: loop {
s = xmalloc_fgetline((*ptr_to_globals).fp0);
if!s.is_null() {
current_block = 16252544171633782868;
} else {
current_block = 228501038991332163;
}
loop {
match current_block {
228501038991332163 =>
// odd case: we didn't stop "analyze headers" mode -> message body is empty. Reenter the loop
// N.B. after reenter code will be > 0
{
if!(code == 0) {
// finalize the message
smtp_check(b".\x00" as *const u8 as *const libc::c_char, 250i32);
break's_369;
}
}
_ =>
// put message lines doubling leading dots
{
if code!= 0 {
// escape leading dots
// N.B. this feature is implied even if no -i (-oi) switch given
// N.B. we need to escape the leading dot regardless of
// whether it is single or not character on the line
if '.' as i32 == *s.offset(0) as libc::c_int {
/*&& '\0' == s[1] */
bb_putchar('.' as i32);
}
// dump read line
send_r_n(s);
free(s as *mut libc::c_void);
continue's_369;
} else {
// analyze headers
// To: or Cc: headers add recipients
check_hdr = (0i32
== strncasecmp(
b"To:\x00" as *const u8 as *const libc::c_char,
s,
3i32 as libc::c_ulong,
)) as libc::c_int;
has_to |= check_hdr;
if (*ptr_to_globals).opts & OPT_t as libc::c_int as libc::c_uint!= 0 {
if check_hdr!= 0
|| 0i32
== strncasecmp(
(b"Bcc:\x00" as *const u8 as *const libc::c_char).offset(1),
s,
3i32 as libc::c_ulong,
)
{
rcptto_list(s.offset(3));
last_hdr = HDR_TOCC;
current_block = 2265380199544777579;
break;
} else if 0i32
== strncasecmp( | b"Bcc:\x00" as *const u8 as *const libc::c_char,
s,
4i32 as libc::c_ulong,
) | random_line_split |
|
game_tree.rs | //! This file contains code that represents the GameState at any point
//! during the game, in a lazily-evaluated tree structure.
use crate::common::gamestate::GameState;
use crate::common::action::Move;
use std::collections::HashMap;
/// Represents an entire game of Fish, starting from the given GameState
/// passed to GameTree::new.
/// Takes the form of a tree structure with the nodes being Turns,
/// leaves being Ends, and branches being the valid_moves mappings.
/// Each node stores the GameState representing the data about the
/// game at that point in time.
/// Uses lazy evaluation to avoid storing the entire data structure
/// in memory. See the LazyGameTree struct for more info.
///
/// Note that there is no case when a player is stuck; we simply
/// skip their turn if they have no moves and move
/// to the next Turn state.
#[derive(Debug)]
pub enum GameTree {
Turn { state: GameState, valid_moves: HashMap<Move, LazyGameTree> },
End(GameState),
}
impl GameTree {
/// Initialize a GameTree from the given initial GameState.
/// The given state does not have to be the start of a game -
/// it is allowed to be any valid game state. It is referred to
/// as the initial state because the generated tree will start from
/// that state with links to each potential subsequent state, but
/// not any previous states.
pub fn new(initial_state: &GameState) -> GameTree {
assert!(initial_state.all_penguins_are_placed(), "{:?}", initial_state);
let valid_moves = initial_state.get_valid_moves();
if valid_moves.is_empty() {
GameTree::End(initial_state.clone())
} else {
let valid_moves = valid_moves.into_iter().map(|move_| {
let lazy_game = LazyGameTree::from_move(&move_, initial_state);
(move_, lazy_game)
}).collect();
GameTree::Turn {
state: initial_state.clone(),
valid_moves,
}
}
}
/// Returns a shared reference to the GameState of the current node of the GameTree
pub fn | (&self) -> &GameState {
match self {
GameTree::Turn { state,.. } => state,
GameTree::End(state) => state,
}
}
/// Returns a mutable reference to the GameState of the current node of the GameTree
pub fn get_state_mut(&mut self) -> &mut GameState {
match self {
GameTree::Turn { state,.. } => state,
GameTree::End(state) => state,
}
}
/// Returns the GameState of the current node of the GameTree
pub fn take_state(self) -> GameState {
match self {
GameTree::Turn { state,.. } => state,
GameTree::End(state) => state,
}
}
/// Returns the `GameTree` that would be produced as a result of taking the given Move.
/// If the move is invalid (not in valid_moves or self is `End`) then None is returned
pub fn get_game_after_move(&mut self, move_: Move) -> Option<&mut GameTree> {
match self {
GameTree::Turn { valid_moves,.. } => {
valid_moves.get_mut(&move_).map(|lazy_game| lazy_game.get_evaluated())
},
GameTree::End(_) => None,
}
}
/// Returns the `GameTree` that would be produced as a result of taking the given Move.
/// If the move is invalid (not in valid_moves or self is `End`) then None is returned
pub fn take_game_after_move(self, move_: Move) -> Option<GameTree> {
match self {
GameTree::Turn { mut valid_moves,.. } => {
valid_moves.remove(&move_).map(|lazy_game| lazy_game.evaluate())
},
GameTree::End(_) => None,
}
}
/// Applies a function to the GameTree for every valid move, returning
/// a HashMap of the same moves mapped to their new results
pub fn map<T, F>(&mut self, mut f: F) -> HashMap<Move, T>
where F: FnMut(&mut GameTree) -> T
{
match self {
GameTree::Turn { valid_moves,.. } => {
valid_moves.iter_mut().map(|(move_, lazy_game)| {
let game = lazy_game.get_evaluated();
(move_.clone(), f(game))
}).collect()
},
GameTree::End(_) => HashMap::new(),
}
}
pub fn is_game_over(&self) -> bool {
match self {
GameTree::Turn {.. } => false,
GameTree::End(_) => true,
}
}
}
/// A LazyGameTree is either an already evaluted GameTree or
/// is an Unevaluated thunk that can be evaluated to return a GameTree.
/// Since Games are stored as recursive trees in memory keeping
/// the branches of each GameTree::Turn as LazyGameTree::Unevaluated saves
/// us from allocating an exponential amount of memory for every
/// possible GameState.
pub enum LazyGameTree {
Evaluated(GameTree),
Unevaluated(Box<dyn FnMut() -> GameTree>),
}
impl LazyGameTree {
/// Retrieves the GameTree from this LazyGameTree,
/// evaluating this LazyGameTree if it hasn't already been
pub fn get_evaluated(&mut self) -> &mut GameTree {
match self {
LazyGameTree::Evaluated(game) => game,
LazyGameTree::Unevaluated(thunk) => {
let game = thunk();
*self = LazyGameTree::Evaluated(game);
self.get_evaluated()
},
}
}
pub fn evaluate(self) -> GameTree {
match self {
LazyGameTree::Evaluated(game) => game,
LazyGameTree::Unevaluated(mut thunk) => thunk(),
}
}
/// Create a Unevaluated LazyGameTree from the given state
/// and the move to take to advance that state. The passed in
/// move must be valid for the given game state.
fn from_move(move_: &Move, state: &GameState) -> LazyGameTree {
let mut state = state.clone();
let move_ = move_.clone();
LazyGameTree::Unevaluated(Box::new(move || {
state.move_avatar_for_current_player(move_)
.expect(&format!("Invalid move for the given GameState passed to LazyGameTree::from_move.\
\nMove: {:?}\nGameState: {:?}", move_, state));
GameTree::new(&state)
}))
}
}
impl std::fmt::Debug for LazyGameTree {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
match self {
LazyGameTree::Evaluated(game) => write!(f, "Evaluated({:?})", game),
LazyGameTree::Unevaluated(_) => write!(f, "Unevaluated(_)"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::server::strategy::tests::take_zigzag_placement;
// Starts a game with a 3 row, 5 column board and all penguins placed.
fn start_game() -> GameTree {
let mut state = GameState::with_default_board(5, 3, 2);
while!state.all_penguins_are_placed() {
take_zigzag_placement(&mut state);
}
GameTree::new(&state)
}
fn get_expected_valid_moves(game: &GameTree) -> Vec<Move> {
let mut expected_valid_moves = vec![];
let state = game.get_state();
let occupied_tiles = state.get_occupied_tiles();
for penguin in state.current_player().penguins.iter() {
let current_tile = state.get_tile(penguin.tile_id.unwrap()).unwrap();
for tile in current_tile.all_reachable_tiles(&state.board, &occupied_tiles) {
expected_valid_moves.push(Move::new(current_tile.tile_id, tile.tile_id))
}
}
expected_valid_moves
}
#[test]
fn test_new() {
// valid_moves generated correctly
// - have expected moves, check if same as generated
// starting gamestate is same as one passed to new
let game = start_game();
let mut valid_moves = game.get_state().get_valid_moves();
let mut expected_valid_moves = get_expected_valid_moves(&game);
expected_valid_moves.sort();
valid_moves.sort();
assert_eq!(expected_valid_moves, valid_moves);
}
#[test]
fn is_initially_unevaluated() {
let game = start_game();
match game {
GameTree::Turn { valid_moves,.. } => {
// Assert all the branches to the tree are initially Unevaluated
assert!(valid_moves.iter().all(|(_, lazy_game)| {
match lazy_game {
LazyGameTree::Evaluated(_) => false,
LazyGameTree::Unevaluated(_) => true,
}
}));
},
GameTree::End(_) => unreachable!("start_game should never return a finished game"),
}
}
#[test]
fn test_get_game_after_move() {
let mut initial_game = start_game();
// record initial moves and the identity of the player whose turn it is
let mut initial_valid_moves = initial_game.get_state().get_valid_moves();
let initial_turn = initial_game.get_state().current_turn;
let game_after_move = initial_game.get_game_after_move(initial_valid_moves[0]).unwrap(); // make a move
// record new moves and the identity of the player whose turn it now is
let mut valid_moves = game_after_move.get_state().get_valid_moves();
let current_turn = game_after_move.get_state().current_turn;
let mut expected_valid_moves = get_expected_valid_moves(&game_after_move);
initial_valid_moves.sort();
valid_moves.sort();
expected_valid_moves.sort();
assert_ne!(initial_turn, current_turn); // turn has changed
assert_ne!(initial_valid_moves, valid_moves); // valid moves have changed
assert_eq!(valid_moves, expected_valid_moves); // new valid moves are correct
}
#[test]
fn test_map() {
let mut game = start_game();
// Map is_game_over across each state and assert that each value is the
// same as if we performed the given move then checked is_game_over for
// the new game state after the move
let winning_moves = game.map(|game_after_move| game_after_move.is_game_over());
for (&move_, &game_over) in winning_moves.iter() {
// Clone the current state, then move the avatar and manually
// apply the is_game_over function to emulate map's behaviour.
let mut state_after_move = game.get_state().clone();
state_after_move.move_avatar_for_current_player(move_);
assert_eq!(state_after_move.is_game_over(), game_over);
}
// ensure map produces a result for every game
match &game {
GameTree::Turn { valid_moves,.. } => assert_eq!(winning_moves.len(), valid_moves.len()),
GameTree::End(_) => unreachable!("start_game should return an in-progress game"),
}
}
}
| get_state | identifier_name |
game_tree.rs | //! This file contains code that represents the GameState at any point
//! during the game, in a lazily-evaluated tree structure.
use crate::common::gamestate::GameState;
use crate::common::action::Move;
use std::collections::HashMap;
/// Represents an entire game of Fish, starting from the given GameState
/// passed to GameTree::new.
/// Takes the form of a tree structure with the nodes being Turns,
/// leaves being Ends, and branches being the valid_moves mappings.
/// Each node stores the GameState representing the data about the
/// game at that point in time.
/// Uses lazy evaluation to avoid storing the entire data structure
/// in memory. See the LazyGameTree struct for more info.
///
/// Note that there is no case when a player is stuck; we simply
/// skip their turn if they have no moves and move
/// to the next Turn state.
#[derive(Debug)]
pub enum GameTree {
Turn { state: GameState, valid_moves: HashMap<Move, LazyGameTree> },
End(GameState),
}
impl GameTree {
/// Initialize a GameTree from the given initial GameState.
/// The given state does not have to be the start of a game -
/// it is allowed to be any valid game state. It is referred to
/// as the initial state because the generated tree will start from
/// that state with links to each potential subsequent state, but
/// not any previous states.
pub fn new(initial_state: &GameState) -> GameTree {
assert!(initial_state.all_penguins_are_placed(), "{:?}", initial_state);
let valid_moves = initial_state.get_valid_moves();
if valid_moves.is_empty() {
GameTree::End(initial_state.clone())
} else {
let valid_moves = valid_moves.into_iter().map(|move_| {
let lazy_game = LazyGameTree::from_move(&move_, initial_state);
(move_, lazy_game)
}).collect();
GameTree::Turn {
state: initial_state.clone(),
valid_moves,
}
}
}
/// Returns a shared reference to the GameState of the current node of the GameTree
pub fn get_state(&self) -> &GameState {
match self {
GameTree::Turn { state,.. } => state,
GameTree::End(state) => state,
}
}
/// Returns a mutable reference to the GameState of the current node of the GameTree
pub fn get_state_mut(&mut self) -> &mut GameState {
match self {
GameTree::Turn { state,.. } => state,
GameTree::End(state) => state,
}
}
/// Returns the GameState of the current node of the GameTree
pub fn take_state(self) -> GameState {
match self {
GameTree::Turn { state,.. } => state,
GameTree::End(state) => state,
}
}
/// Returns the `GameTree` that would be produced as a result of taking the given Move. | GameTree::Turn { valid_moves,.. } => {
valid_moves.get_mut(&move_).map(|lazy_game| lazy_game.get_evaluated())
},
GameTree::End(_) => None,
}
}
/// Returns the `GameTree` that would be produced as a result of taking the given Move.
/// If the move is invalid (not in valid_moves or self is `End`) then None is returned
pub fn take_game_after_move(self, move_: Move) -> Option<GameTree> {
match self {
GameTree::Turn { mut valid_moves,.. } => {
valid_moves.remove(&move_).map(|lazy_game| lazy_game.evaluate())
},
GameTree::End(_) => None,
}
}
/// Applies a function to the GameTree for every valid move, returning
/// a HashMap of the same moves mapped to their new results
pub fn map<T, F>(&mut self, mut f: F) -> HashMap<Move, T>
where F: FnMut(&mut GameTree) -> T
{
match self {
GameTree::Turn { valid_moves,.. } => {
valid_moves.iter_mut().map(|(move_, lazy_game)| {
let game = lazy_game.get_evaluated();
(move_.clone(), f(game))
}).collect()
},
GameTree::End(_) => HashMap::new(),
}
}
pub fn is_game_over(&self) -> bool {
match self {
GameTree::Turn {.. } => false,
GameTree::End(_) => true,
}
}
}
/// A LazyGameTree is either an already evaluted GameTree or
/// is an Unevaluated thunk that can be evaluated to return a GameTree.
/// Since Games are stored as recursive trees in memory keeping
/// the branches of each GameTree::Turn as LazyGameTree::Unevaluated saves
/// us from allocating an exponential amount of memory for every
/// possible GameState.
pub enum LazyGameTree {
Evaluated(GameTree),
Unevaluated(Box<dyn FnMut() -> GameTree>),
}
impl LazyGameTree {
/// Retrieves the GameTree from this LazyGameTree,
/// evaluating this LazyGameTree if it hasn't already been
pub fn get_evaluated(&mut self) -> &mut GameTree {
match self {
LazyGameTree::Evaluated(game) => game,
LazyGameTree::Unevaluated(thunk) => {
let game = thunk();
*self = LazyGameTree::Evaluated(game);
self.get_evaluated()
},
}
}
pub fn evaluate(self) -> GameTree {
match self {
LazyGameTree::Evaluated(game) => game,
LazyGameTree::Unevaluated(mut thunk) => thunk(),
}
}
/// Create a Unevaluated LazyGameTree from the given state
/// and the move to take to advance that state. The passed in
/// move must be valid for the given game state.
fn from_move(move_: &Move, state: &GameState) -> LazyGameTree {
let mut state = state.clone();
let move_ = move_.clone();
LazyGameTree::Unevaluated(Box::new(move || {
state.move_avatar_for_current_player(move_)
.expect(&format!("Invalid move for the given GameState passed to LazyGameTree::from_move.\
\nMove: {:?}\nGameState: {:?}", move_, state));
GameTree::new(&state)
}))
}
}
impl std::fmt::Debug for LazyGameTree {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
match self {
LazyGameTree::Evaluated(game) => write!(f, "Evaluated({:?})", game),
LazyGameTree::Unevaluated(_) => write!(f, "Unevaluated(_)"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::server::strategy::tests::take_zigzag_placement;
// Starts a game with a 3 row, 5 column board and all penguins placed.
fn start_game() -> GameTree {
let mut state = GameState::with_default_board(5, 3, 2);
while!state.all_penguins_are_placed() {
take_zigzag_placement(&mut state);
}
GameTree::new(&state)
}
fn get_expected_valid_moves(game: &GameTree) -> Vec<Move> {
let mut expected_valid_moves = vec![];
let state = game.get_state();
let occupied_tiles = state.get_occupied_tiles();
for penguin in state.current_player().penguins.iter() {
let current_tile = state.get_tile(penguin.tile_id.unwrap()).unwrap();
for tile in current_tile.all_reachable_tiles(&state.board, &occupied_tiles) {
expected_valid_moves.push(Move::new(current_tile.tile_id, tile.tile_id))
}
}
expected_valid_moves
}
#[test]
fn test_new() {
// valid_moves generated correctly
// - have expected moves, check if same as generated
// starting gamestate is same as one passed to new
let game = start_game();
let mut valid_moves = game.get_state().get_valid_moves();
let mut expected_valid_moves = get_expected_valid_moves(&game);
expected_valid_moves.sort();
valid_moves.sort();
assert_eq!(expected_valid_moves, valid_moves);
}
#[test]
fn is_initially_unevaluated() {
let game = start_game();
match game {
GameTree::Turn { valid_moves,.. } => {
// Assert all the branches to the tree are initially Unevaluated
assert!(valid_moves.iter().all(|(_, lazy_game)| {
match lazy_game {
LazyGameTree::Evaluated(_) => false,
LazyGameTree::Unevaluated(_) => true,
}
}));
},
GameTree::End(_) => unreachable!("start_game should never return a finished game"),
}
}
#[test]
fn test_get_game_after_move() {
let mut initial_game = start_game();
// record initial moves and the identity of the player whose turn it is
let mut initial_valid_moves = initial_game.get_state().get_valid_moves();
let initial_turn = initial_game.get_state().current_turn;
let game_after_move = initial_game.get_game_after_move(initial_valid_moves[0]).unwrap(); // make a move
// record new moves and the identity of the player whose turn it now is
let mut valid_moves = game_after_move.get_state().get_valid_moves();
let current_turn = game_after_move.get_state().current_turn;
let mut expected_valid_moves = get_expected_valid_moves(&game_after_move);
initial_valid_moves.sort();
valid_moves.sort();
expected_valid_moves.sort();
assert_ne!(initial_turn, current_turn); // turn has changed
assert_ne!(initial_valid_moves, valid_moves); // valid moves have changed
assert_eq!(valid_moves, expected_valid_moves); // new valid moves are correct
}
#[test]
fn test_map() {
let mut game = start_game();
// Map is_game_over across each state and assert that each value is the
// same as if we performed the given move then checked is_game_over for
// the new game state after the move
let winning_moves = game.map(|game_after_move| game_after_move.is_game_over());
for (&move_, &game_over) in winning_moves.iter() {
// Clone the current state, then move the avatar and manually
// apply the is_game_over function to emulate map's behaviour.
let mut state_after_move = game.get_state().clone();
state_after_move.move_avatar_for_current_player(move_);
assert_eq!(state_after_move.is_game_over(), game_over);
}
// ensure map produces a result for every game
match &game {
GameTree::Turn { valid_moves,.. } => assert_eq!(winning_moves.len(), valid_moves.len()),
GameTree::End(_) => unreachable!("start_game should return an in-progress game"),
}
}
} | /// If the move is invalid (not in valid_moves or self is `End`) then None is returned
pub fn get_game_after_move(&mut self, move_: Move) -> Option<&mut GameTree> {
match self { | random_line_split |
game_tree.rs | //! This file contains code that represents the GameState at any point
//! during the game, in a lazily-evaluated tree structure.
use crate::common::gamestate::GameState;
use crate::common::action::Move;
use std::collections::HashMap;
/// Represents an entire game of Fish, starting from the given GameState
/// passed to GameTree::new.
/// Takes the form of a tree structure with the nodes being Turns,
/// leaves being Ends, and branches being the valid_moves mappings.
/// Each node stores the GameState representing the data about the
/// game at that point in time.
/// Uses lazy evaluation to avoid storing the entire data structure
/// in memory. See the LazyGameTree struct for more info.
///
/// Note that there is no case when a player is stuck; we simply
/// skip their turn if they have no moves and move
/// to the next Turn state.
#[derive(Debug)]
pub enum GameTree {
Turn { state: GameState, valid_moves: HashMap<Move, LazyGameTree> },
End(GameState),
}
impl GameTree {
/// Initialize a GameTree from the given initial GameState.
/// The given state does not have to be the start of a game -
/// it is allowed to be any valid game state. It is referred to
/// as the initial state because the generated tree will start from
/// that state with links to each potential subsequent state, but
/// not any previous states.
pub fn new(initial_state: &GameState) -> GameTree {
assert!(initial_state.all_penguins_are_placed(), "{:?}", initial_state);
let valid_moves = initial_state.get_valid_moves();
if valid_moves.is_empty() {
GameTree::End(initial_state.clone())
} else {
let valid_moves = valid_moves.into_iter().map(|move_| {
let lazy_game = LazyGameTree::from_move(&move_, initial_state);
(move_, lazy_game)
}).collect();
GameTree::Turn {
state: initial_state.clone(),
valid_moves,
}
}
}
/// Returns a shared reference to the GameState of the current node of the GameTree
pub fn get_state(&self) -> &GameState {
match self {
GameTree::Turn { state,.. } => state,
GameTree::End(state) => state,
}
}
/// Returns a mutable reference to the GameState of the current node of the GameTree
pub fn get_state_mut(&mut self) -> &mut GameState {
match self {
GameTree::Turn { state,.. } => state,
GameTree::End(state) => state,
}
}
/// Returns the GameState of the current node of the GameTree
pub fn take_state(self) -> GameState {
match self {
GameTree::Turn { state,.. } => state,
GameTree::End(state) => state,
}
}
/// Returns the `GameTree` that would be produced as a result of taking the given Move.
/// If the move is invalid (not in valid_moves or self is `End`) then None is returned
pub fn get_game_after_move(&mut self, move_: Move) -> Option<&mut GameTree> {
match self {
GameTree::Turn { valid_moves,.. } => {
valid_moves.get_mut(&move_).map(|lazy_game| lazy_game.get_evaluated())
},
GameTree::End(_) => None,
}
}
/// Returns the `GameTree` that would be produced as a result of taking the given Move.
/// If the move is invalid (not in valid_moves or self is `End`) then None is returned
pub fn take_game_after_move(self, move_: Move) -> Option<GameTree> {
match self {
GameTree::Turn { mut valid_moves,.. } => {
valid_moves.remove(&move_).map(|lazy_game| lazy_game.evaluate())
},
GameTree::End(_) => None,
}
}
/// Applies a function to the GameTree for every valid move, returning
/// a HashMap of the same moves mapped to their new results
pub fn map<T, F>(&mut self, mut f: F) -> HashMap<Move, T>
where F: FnMut(&mut GameTree) -> T
{
match self {
GameTree::Turn { valid_moves,.. } => {
valid_moves.iter_mut().map(|(move_, lazy_game)| {
let game = lazy_game.get_evaluated();
(move_.clone(), f(game))
}).collect()
},
GameTree::End(_) => HashMap::new(),
}
}
pub fn is_game_over(&self) -> bool {
match self {
GameTree::Turn {.. } => false,
GameTree::End(_) => true,
}
}
}
/// A LazyGameTree is either an already evaluted GameTree or
/// is an Unevaluated thunk that can be evaluated to return a GameTree.
/// Since Games are stored as recursive trees in memory keeping
/// the branches of each GameTree::Turn as LazyGameTree::Unevaluated saves
/// us from allocating an exponential amount of memory for every
/// possible GameState.
pub enum LazyGameTree {
Evaluated(GameTree),
Unevaluated(Box<dyn FnMut() -> GameTree>),
}
impl LazyGameTree {
/// Retrieves the GameTree from this LazyGameTree,
/// evaluating this LazyGameTree if it hasn't already been
pub fn get_evaluated(&mut self) -> &mut GameTree {
match self {
LazyGameTree::Evaluated(game) => game,
LazyGameTree::Unevaluated(thunk) => {
let game = thunk();
*self = LazyGameTree::Evaluated(game);
self.get_evaluated()
},
}
}
pub fn evaluate(self) -> GameTree {
match self {
LazyGameTree::Evaluated(game) => game,
LazyGameTree::Unevaluated(mut thunk) => thunk(),
}
}
/// Create a Unevaluated LazyGameTree from the given state
/// and the move to take to advance that state. The passed in
/// move must be valid for the given game state.
fn from_move(move_: &Move, state: &GameState) -> LazyGameTree {
let mut state = state.clone();
let move_ = move_.clone();
LazyGameTree::Unevaluated(Box::new(move || {
state.move_avatar_for_current_player(move_)
.expect(&format!("Invalid move for the given GameState passed to LazyGameTree::from_move.\
\nMove: {:?}\nGameState: {:?}", move_, state));
GameTree::new(&state)
}))
}
}
impl std::fmt::Debug for LazyGameTree {
fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
match self {
LazyGameTree::Evaluated(game) => write!(f, "Evaluated({:?})", game),
LazyGameTree::Unevaluated(_) => write!(f, "Unevaluated(_)"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::server::strategy::tests::take_zigzag_placement;
// Starts a game with a 3 row, 5 column board and all penguins placed.
fn start_game() -> GameTree {
let mut state = GameState::with_default_board(5, 3, 2);
while!state.all_penguins_are_placed() {
take_zigzag_placement(&mut state);
}
GameTree::new(&state)
}
fn get_expected_valid_moves(game: &GameTree) -> Vec<Move> |
#[test]
fn test_new() {
// valid_moves generated correctly
// - have expected moves, check if same as generated
// starting gamestate is same as one passed to new
let game = start_game();
let mut valid_moves = game.get_state().get_valid_moves();
let mut expected_valid_moves = get_expected_valid_moves(&game);
expected_valid_moves.sort();
valid_moves.sort();
assert_eq!(expected_valid_moves, valid_moves);
}
#[test]
fn is_initially_unevaluated() {
let game = start_game();
match game {
GameTree::Turn { valid_moves,.. } => {
// Assert all the branches to the tree are initially Unevaluated
assert!(valid_moves.iter().all(|(_, lazy_game)| {
match lazy_game {
LazyGameTree::Evaluated(_) => false,
LazyGameTree::Unevaluated(_) => true,
}
}));
},
GameTree::End(_) => unreachable!("start_game should never return a finished game"),
}
}
#[test]
fn test_get_game_after_move() {
let mut initial_game = start_game();
// record initial moves and the identity of the player whose turn it is
let mut initial_valid_moves = initial_game.get_state().get_valid_moves();
let initial_turn = initial_game.get_state().current_turn;
let game_after_move = initial_game.get_game_after_move(initial_valid_moves[0]).unwrap(); // make a move
// record new moves and the identity of the player whose turn it now is
let mut valid_moves = game_after_move.get_state().get_valid_moves();
let current_turn = game_after_move.get_state().current_turn;
let mut expected_valid_moves = get_expected_valid_moves(&game_after_move);
initial_valid_moves.sort();
valid_moves.sort();
expected_valid_moves.sort();
assert_ne!(initial_turn, current_turn); // turn has changed
assert_ne!(initial_valid_moves, valid_moves); // valid moves have changed
assert_eq!(valid_moves, expected_valid_moves); // new valid moves are correct
}
#[test]
fn test_map() {
let mut game = start_game();
// Map is_game_over across each state and assert that each value is the
// same as if we performed the given move then checked is_game_over for
// the new game state after the move
let winning_moves = game.map(|game_after_move| game_after_move.is_game_over());
for (&move_, &game_over) in winning_moves.iter() {
// Clone the current state, then move the avatar and manually
// apply the is_game_over function to emulate map's behaviour.
let mut state_after_move = game.get_state().clone();
state_after_move.move_avatar_for_current_player(move_);
assert_eq!(state_after_move.is_game_over(), game_over);
}
// ensure map produces a result for every game
match &game {
GameTree::Turn { valid_moves,.. } => assert_eq!(winning_moves.len(), valid_moves.len()),
GameTree::End(_) => unreachable!("start_game should return an in-progress game"),
}
}
}
| {
let mut expected_valid_moves = vec![];
let state = game.get_state();
let occupied_tiles = state.get_occupied_tiles();
for penguin in state.current_player().penguins.iter() {
let current_tile = state.get_tile(penguin.tile_id.unwrap()).unwrap();
for tile in current_tile.all_reachable_tiles(&state.board, &occupied_tiles) {
expected_valid_moves.push(Move::new(current_tile.tile_id, tile.tile_id))
}
}
expected_valid_moves
} | identifier_body |
bigrand.rs | //! Randomization of big integers
use rand::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler};
use rand::prelude::*;
use rand::Rng;
use crate::BigInt;
use crate::BigUint;
use crate::Sign::*;
use crate::big_digit::BigDigit;
use crate::bigint::{into_magnitude, magnitude};
use crate::integer::Integer;
#[cfg(feature = "prime")]
use num_iter::range_step;
use num_traits::Zero;
#[cfg(feature = "prime")]
use num_traits::{FromPrimitive, ToPrimitive};
#[cfg(feature = "prime")]
use crate::prime::probably_prime;
pub trait RandBigInt {
/// Generate a random `BigUint` of the given bit size.
fn gen_biguint(&mut self, bit_size: usize) -> BigUint;
/// Generate a random BigInt of the given bit size.
fn gen_bigint(&mut self, bit_size: usize) -> BigInt;
/// Generate a random `BigUint` less than the given bound. Fails
/// when the bound is zero.
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint;
/// Generate a random `BigUint` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
/// Generate a random `BigInt` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
}
impl<R: Rng +?Sized> RandBigInt for R {
fn gen_biguint(&mut self, bit_size: usize) -> BigUint {
use super::big_digit::BITS;
let (digits, rem) = bit_size.div_rem(&BITS);
let mut data = smallvec![BigDigit::default(); digits + (rem > 0) as usize];
// `fill` is faster than many `gen::<u32>` calls
// Internally this calls `SeedableRng` where implementors are responsible for adjusting endianness for reproducable values.
self.fill(data.as_mut_slice());
if rem > 0 {
data[digits] >>= BITS - rem;
}
BigUint::new_native(data)
}
fn gen_bigint(&mut self, bit_size: usize) -> BigInt {
loop {
// Generate a random BigUint...
let biguint = self.gen_biguint(bit_size);
//...and then randomly assign it a Sign...
let sign = if biguint.is_zero() {
//...except that if the BigUint is zero, we need to try
// again with probability 0.5. This is because otherwise,
// the probability of generating a zero BigInt would be
// double that of any other number.
if self.gen() {
continue;
} else {
NoSign
}
} else if self.gen() {
Plus
} else {
Minus
};
return BigInt::from_biguint(sign, biguint);
}
}
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
assert!(!bound.is_zero());
let bits = bound.bits();
loop {
let n = self.gen_biguint(bits);
if n < *bound {
return n;
}
}
}
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
assert!(*lbound < *ubound);
if lbound.is_zero() {
self.gen_biguint_below(ubound)
} else {
lbound + self.gen_biguint_below(&(ubound - lbound))
}
}
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
assert!(*lbound < *ubound);
if lbound.is_zero() {
BigInt::from(self.gen_biguint_below(magnitude(&ubound)))
} else if ubound.is_zero() {
lbound + BigInt::from(self.gen_biguint_below(magnitude(&lbound)))
} else {
let delta = ubound - lbound;
lbound + BigInt::from(self.gen_biguint_below(magnitude(&delta)))
}
}
}
/// The back-end implementing rand's `UniformSampler` for `BigUint`.
#[derive(Clone, Debug)]
pub struct UniformBigUint {
base: BigUint,
len: BigUint,
}
impl UniformSampler for UniformBigUint {
type X = BigUint;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigUint {
len: high - low,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
Self::new(low_b, high_b.borrow() + 1u32)
}
#[inline]
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + rng.gen_biguint_below(&self.len)
}
#[inline]
fn sample_single<R: Rng +?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_biguint_range(low, high)
}
}
impl SampleUniform for BigUint {
type Sampler = UniformBigUint;
}
/// The back-end implementing rand's `UniformSampler` for `BigInt`.
#[derive(Clone, Debug)]
pub struct UniformBigInt {
base: BigInt,
len: BigUint,
}
impl UniformSampler for UniformBigInt {
type X = BigInt;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigInt {
len: into_magnitude(high - low),
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn | <R: Rng +?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + BigInt::from(rng.gen_biguint_below(&self.len))
}
#[inline]
fn sample_single<R: Rng +?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_bigint_range(low, high)
}
}
impl SampleUniform for BigInt {
type Sampler = UniformBigInt;
}
/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
#[derive(Clone, Copy, Debug)]
pub struct RandomBits {
bits: usize,
}
impl RandomBits {
#[inline]
pub fn new(bits: usize) -> RandomBits {
RandomBits { bits }
}
}
impl Distribution<BigUint> for RandomBits {
#[inline]
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> BigUint {
rng.gen_biguint(self.bits)
}
}
impl Distribution<BigInt> for RandomBits {
#[inline]
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> BigInt {
rng.gen_bigint(self.bits)
}
}
/// A generic trait for generating random primes.
///
/// *Warning*: This is highly dependend on the provided random number generator,
/// to provide actually random primes.
///
/// # Example
#[cfg_attr(feature = "std", doc = " ```")]
#[cfg_attr(not(feature = "std"), doc = " ```ignore")]
/// extern crate rand;
/// extern crate num_bigint_dig as num_bigint;
///
/// use rand::thread_rng;
/// use num_bigint::RandPrime;
///
/// let mut rng = thread_rng();
/// let p = rng.gen_prime(1024);
/// assert_eq!(p.bits(), 1024);
/// ```
///
#[cfg(feature = "prime")]
pub trait RandPrime {
/// Generate a random prime number with as many bits as given.
fn gen_prime(&mut self, bits: usize) -> BigUint;
}
/// A list of small, prime numbers that allows us to rapidly
/// exclude some fraction of composite candidates when searching for a random
/// prime. This list is truncated at the point where smallPrimesProduct exceeds
/// a u64. It does not include two because we ensure that the candidates are
/// odd by construction.
#[cfg(feature = "prime")]
const SMALL_PRIMES: [u8; 15] = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53];
#[cfg(feature = "prime")]
lazy_static! {
/// The product of the values in SMALL_PRIMES and allows us
/// to reduce a candidate prime by this number and then determine whether it's
/// coprime to all the elements of SMALL_PRIMES without further BigUint
/// operations.
static ref SMALL_PRIMES_PRODUCT: BigUint = BigUint::from_u64(16_294_579_238_595_022_365).unwrap();
}
#[cfg(feature = "prime")]
impl<R: Rng +?Sized> RandPrime for R {
fn gen_prime(&mut self, bit_size: usize) -> BigUint {
if bit_size < 2 {
panic!("prime size must be at least 2-bit");
}
let mut b = bit_size % 8;
if b == 0 {
b = 8;
}
let bytes_len = (bit_size + 7) / 8;
let mut bytes = vec![0u8; bytes_len];
loop {
self.fill_bytes(&mut bytes);
// Clear bits in the first byte to make sure the candidate has a size <= bits.
bytes[0] &= ((1u32 << (b as u32)) - 1) as u8;
// Don't let the value be too small, i.e, set the most significant two bits.
// Setting the top two bits, rather than just the top bit,
// means that when two of these values are multiplied together,
// the result isn't ever one bit short.
if b >= 2 {
bytes[0] |= 3u8.wrapping_shl(b as u32 - 2);
} else {
// Here b==1, because b cannot be zero.
bytes[0] |= 1;
if bytes_len > 1 {
bytes[1] |= 0x80;
}
}
// Make the value odd since an even number this large certainly isn't prime.
bytes[bytes_len - 1] |= 1u8;
let mut p = BigUint::from_bytes_be(&bytes);
// must always be a u64, as the SMALL_PRIMES_PRODUCT is a u64
let rem = (&p % &*SMALL_PRIMES_PRODUCT).to_u64().unwrap();
'next: for delta in range_step(0, 1 << 20, 2) {
let m = rem + delta;
for prime in &SMALL_PRIMES {
if m % u64::from(*prime) == 0 && (bit_size > 6 || m!= u64::from(*prime)) {
continue 'next;
}
}
if delta > 0 {
p += BigUint::from_u64(delta).unwrap();
}
break;
}
// There is a tiny possibility that, by adding delta, we caused
// the number to be one bit too long. Thus we check bit length here.
if p.bits() == bit_size && probably_prime(&p, 20) {
return p;
}
}
}
}
| sample | identifier_name |
bigrand.rs | //! Randomization of big integers
use rand::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler};
use rand::prelude::*;
use rand::Rng;
use crate::BigInt;
use crate::BigUint;
use crate::Sign::*;
use crate::big_digit::BigDigit;
use crate::bigint::{into_magnitude, magnitude};
use crate::integer::Integer;
#[cfg(feature = "prime")]
use num_iter::range_step;
use num_traits::Zero;
#[cfg(feature = "prime")]
use num_traits::{FromPrimitive, ToPrimitive};
#[cfg(feature = "prime")]
use crate::prime::probably_prime;
pub trait RandBigInt {
/// Generate a random `BigUint` of the given bit size.
fn gen_biguint(&mut self, bit_size: usize) -> BigUint;
/// Generate a random BigInt of the given bit size.
fn gen_bigint(&mut self, bit_size: usize) -> BigInt;
/// Generate a random `BigUint` less than the given bound. Fails
/// when the bound is zero.
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint;
/// Generate a random `BigUint` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
/// Generate a random `BigInt` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
}
impl<R: Rng +?Sized> RandBigInt for R {
fn gen_biguint(&mut self, bit_size: usize) -> BigUint {
use super::big_digit::BITS;
let (digits, rem) = bit_size.div_rem(&BITS);
let mut data = smallvec![BigDigit::default(); digits + (rem > 0) as usize];
// `fill` is faster than many `gen::<u32>` calls
// Internally this calls `SeedableRng` where implementors are responsible for adjusting endianness for reproducable values.
self.fill(data.as_mut_slice());
if rem > 0 {
data[digits] >>= BITS - rem;
}
BigUint::new_native(data)
}
fn gen_bigint(&mut self, bit_size: usize) -> BigInt {
loop {
// Generate a random BigUint...
let biguint = self.gen_biguint(bit_size);
//...and then randomly assign it a Sign...
let sign = if biguint.is_zero() {
//...except that if the BigUint is zero, we need to try
// again with probability 0.5. This is because otherwise,
// the probability of generating a zero BigInt would be
// double that of any other number.
if self.gen() {
continue;
} else {
NoSign
}
} else if self.gen() {
Plus
} else {
Minus
};
return BigInt::from_biguint(sign, biguint);
}
}
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
assert!(!bound.is_zero());
let bits = bound.bits();
loop {
let n = self.gen_biguint(bits);
if n < *bound {
return n;
}
}
}
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
assert!(*lbound < *ubound);
if lbound.is_zero() {
self.gen_biguint_below(ubound)
} else {
lbound + self.gen_biguint_below(&(ubound - lbound))
}
}
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
assert!(*lbound < *ubound);
if lbound.is_zero() {
BigInt::from(self.gen_biguint_below(magnitude(&ubound)))
} else if ubound.is_zero() {
lbound + BigInt::from(self.gen_biguint_below(magnitude(&lbound)))
} else {
let delta = ubound - lbound;
lbound + BigInt::from(self.gen_biguint_below(magnitude(&delta)))
}
}
}
/// The back-end implementing rand's `UniformSampler` for `BigUint`.
#[derive(Clone, Debug)]
pub struct UniformBigUint {
base: BigUint,
len: BigUint,
}
impl UniformSampler for UniformBigUint {
type X = BigUint;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigUint {
len: high - low,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
Self::new(low_b, high_b.borrow() + 1u32)
}
#[inline]
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + rng.gen_biguint_below(&self.len)
}
#[inline]
fn sample_single<R: Rng +?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_biguint_range(low, high)
}
}
impl SampleUniform for BigUint {
type Sampler = UniformBigUint;
}
/// The back-end implementing rand's `UniformSampler` for `BigInt`.
#[derive(Clone, Debug)]
pub struct UniformBigInt {
base: BigInt,
len: BigUint,
}
impl UniformSampler for UniformBigInt {
type X = BigInt;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigInt {
len: into_magnitude(high - low),
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + BigInt::from(rng.gen_biguint_below(&self.len))
}
#[inline]
fn sample_single<R: Rng +?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_bigint_range(low, high)
}
}
impl SampleUniform for BigInt {
type Sampler = UniformBigInt;
}
/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
#[derive(Clone, Copy, Debug)]
pub struct RandomBits {
bits: usize,
}
impl RandomBits {
#[inline]
pub fn new(bits: usize) -> RandomBits {
RandomBits { bits }
}
}
impl Distribution<BigUint> for RandomBits {
#[inline]
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> BigUint {
rng.gen_biguint(self.bits)
}
}
impl Distribution<BigInt> for RandomBits {
#[inline]
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> BigInt {
rng.gen_bigint(self.bits)
}
}
/// A generic trait for generating random primes.
///
/// *Warning*: This is highly dependend on the provided random number generator,
/// to provide actually random primes.
///
/// # Example
#[cfg_attr(feature = "std", doc = " ```")]
#[cfg_attr(not(feature = "std"), doc = " ```ignore")]
/// extern crate rand;
/// extern crate num_bigint_dig as num_bigint;
///
/// use rand::thread_rng;
/// use num_bigint::RandPrime;
///
/// let mut rng = thread_rng();
/// let p = rng.gen_prime(1024);
/// assert_eq!(p.bits(), 1024);
/// ```
///
#[cfg(feature = "prime")]
pub trait RandPrime {
/// Generate a random prime number with as many bits as given.
fn gen_prime(&mut self, bits: usize) -> BigUint;
}
/// A list of small, prime numbers that allows us to rapidly
/// exclude some fraction of composite candidates when searching for a random
/// prime. This list is truncated at the point where smallPrimesProduct exceeds
/// a u64. It does not include two because we ensure that the candidates are
/// odd by construction.
#[cfg(feature = "prime")]
const SMALL_PRIMES: [u8; 15] = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53];
#[cfg(feature = "prime")]
lazy_static! {
/// The product of the values in SMALL_PRIMES and allows us
/// to reduce a candidate prime by this number and then determine whether it's
/// coprime to all the elements of SMALL_PRIMES without further BigUint
/// operations.
static ref SMALL_PRIMES_PRODUCT: BigUint = BigUint::from_u64(16_294_579_238_595_022_365).unwrap();
}
#[cfg(feature = "prime")]
impl<R: Rng +?Sized> RandPrime for R {
fn gen_prime(&mut self, bit_size: usize) -> BigUint | // means that when two of these values are multiplied together,
// the result isn't ever one bit short.
if b >= 2 {
bytes[0] |= 3u8.wrapping_shl(b as u32 - 2);
} else {
// Here b==1, because b cannot be zero.
bytes[0] |= 1;
if bytes_len > 1 {
bytes[1] |= 0x80;
}
}
// Make the value odd since an even number this large certainly isn't prime.
bytes[bytes_len - 1] |= 1u8;
let mut p = BigUint::from_bytes_be(&bytes);
// must always be a u64, as the SMALL_PRIMES_PRODUCT is a u64
let rem = (&p % &*SMALL_PRIMES_PRODUCT).to_u64().unwrap();
'next: for delta in range_step(0, 1 << 20, 2) {
let m = rem + delta;
for prime in &SMALL_PRIMES {
if m % u64::from(*prime) == 0 && (bit_size > 6 || m!= u64::from(*prime)) {
continue 'next;
}
}
if delta > 0 {
p += BigUint::from_u64(delta).unwrap();
}
break;
}
// There is a tiny possibility that, by adding delta, we caused
// the number to be one bit too long. Thus we check bit length here.
if p.bits() == bit_size && probably_prime(&p, 20) {
return p;
}
}
}
}
| {
if bit_size < 2 {
panic!("prime size must be at least 2-bit");
}
let mut b = bit_size % 8;
if b == 0 {
b = 8;
}
let bytes_len = (bit_size + 7) / 8;
let mut bytes = vec![0u8; bytes_len];
loop {
self.fill_bytes(&mut bytes);
// Clear bits in the first byte to make sure the candidate has a size <= bits.
bytes[0] &= ((1u32 << (b as u32)) - 1) as u8;
// Don't let the value be too small, i.e, set the most significant two bits.
// Setting the top two bits, rather than just the top bit, | identifier_body |
bigrand.rs | //! Randomization of big integers
use rand::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler};
use rand::prelude::*;
use rand::Rng;
use crate::BigInt;
use crate::BigUint;
use crate::Sign::*;
use crate::big_digit::BigDigit;
use crate::bigint::{into_magnitude, magnitude};
use crate::integer::Integer;
#[cfg(feature = "prime")]
use num_iter::range_step;
use num_traits::Zero;
#[cfg(feature = "prime")]
use num_traits::{FromPrimitive, ToPrimitive};
#[cfg(feature = "prime")]
use crate::prime::probably_prime;
pub trait RandBigInt {
/// Generate a random `BigUint` of the given bit size.
fn gen_biguint(&mut self, bit_size: usize) -> BigUint;
/// Generate a random BigInt of the given bit size.
fn gen_bigint(&mut self, bit_size: usize) -> BigInt;
/// Generate a random `BigUint` less than the given bound. Fails
/// when the bound is zero.
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint;
/// Generate a random `BigUint` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
/// Generate a random `BigInt` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
}
impl<R: Rng +?Sized> RandBigInt for R {
fn gen_biguint(&mut self, bit_size: usize) -> BigUint {
use super::big_digit::BITS;
let (digits, rem) = bit_size.div_rem(&BITS);
let mut data = smallvec![BigDigit::default(); digits + (rem > 0) as usize];
// `fill` is faster than many `gen::<u32>` calls
// Internally this calls `SeedableRng` where implementors are responsible for adjusting endianness for reproducable values.
self.fill(data.as_mut_slice());
if rem > 0 {
data[digits] >>= BITS - rem;
}
BigUint::new_native(data)
}
fn gen_bigint(&mut self, bit_size: usize) -> BigInt {
loop {
// Generate a random BigUint...
let biguint = self.gen_biguint(bit_size);
//...and then randomly assign it a Sign...
let sign = if biguint.is_zero() {
//...except that if the BigUint is zero, we need to try
// again with probability 0.5. This is because otherwise,
// the probability of generating a zero BigInt would be
// double that of any other number.
if self.gen() {
continue;
} else {
NoSign
}
} else if self.gen() {
Plus
} else {
Minus
};
return BigInt::from_biguint(sign, biguint);
}
}
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
assert!(!bound.is_zero());
let bits = bound.bits();
loop {
let n = self.gen_biguint(bits);
if n < *bound {
return n;
}
}
}
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
assert!(*lbound < *ubound);
if lbound.is_zero() {
self.gen_biguint_below(ubound)
} else {
lbound + self.gen_biguint_below(&(ubound - lbound))
}
}
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
assert!(*lbound < *ubound);
if lbound.is_zero() {
BigInt::from(self.gen_biguint_below(magnitude(&ubound)))
} else if ubound.is_zero() {
lbound + BigInt::from(self.gen_biguint_below(magnitude(&lbound)))
} else {
let delta = ubound - lbound;
lbound + BigInt::from(self.gen_biguint_below(magnitude(&delta)))
}
}
}
/// The back-end implementing rand's `UniformSampler` for `BigUint`.
#[derive(Clone, Debug)]
pub struct UniformBigUint {
base: BigUint,
len: BigUint,
}
impl UniformSampler for UniformBigUint {
type X = BigUint;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigUint {
len: high - low,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
Self::new(low_b, high_b.borrow() + 1u32)
}
#[inline]
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + rng.gen_biguint_below(&self.len)
}
#[inline]
fn sample_single<R: Rng +?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_biguint_range(low, high)
}
}
impl SampleUniform for BigUint {
type Sampler = UniformBigUint;
}
/// The back-end implementing rand's `UniformSampler` for `BigInt`.
#[derive(Clone, Debug)]
pub struct UniformBigInt {
base: BigInt,
len: BigUint,
}
impl UniformSampler for UniformBigInt {
type X = BigInt;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigInt {
len: into_magnitude(high - low),
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + BigInt::from(rng.gen_biguint_below(&self.len))
}
#[inline]
fn sample_single<R: Rng +?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_bigint_range(low, high)
}
}
impl SampleUniform for BigInt {
type Sampler = UniformBigInt;
}
/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
#[derive(Clone, Copy, Debug)]
pub struct RandomBits {
bits: usize,
}
impl RandomBits {
#[inline]
pub fn new(bits: usize) -> RandomBits {
RandomBits { bits }
}
}
impl Distribution<BigUint> for RandomBits {
#[inline]
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> BigUint {
rng.gen_biguint(self.bits)
}
}
impl Distribution<BigInt> for RandomBits {
#[inline]
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> BigInt {
rng.gen_bigint(self.bits)
}
}
/// A generic trait for generating random primes.
///
/// *Warning*: This is highly dependend on the provided random number generator,
/// to provide actually random primes.
///
/// # Example
#[cfg_attr(feature = "std", doc = " ```")]
#[cfg_attr(not(feature = "std"), doc = " ```ignore")]
/// extern crate rand;
/// extern crate num_bigint_dig as num_bigint;
///
/// use rand::thread_rng;
/// use num_bigint::RandPrime;
///
/// let mut rng = thread_rng();
/// let p = rng.gen_prime(1024);
/// assert_eq!(p.bits(), 1024);
/// ```
///
#[cfg(feature = "prime")]
pub trait RandPrime {
/// Generate a random prime number with as many bits as given.
fn gen_prime(&mut self, bits: usize) -> BigUint;
}
/// A list of small, prime numbers that allows us to rapidly
/// exclude some fraction of composite candidates when searching for a random
/// prime. This list is truncated at the point where smallPrimesProduct exceeds
/// a u64. It does not include two because we ensure that the candidates are | lazy_static! {
/// The product of the values in SMALL_PRIMES and allows us
/// to reduce a candidate prime by this number and then determine whether it's
/// coprime to all the elements of SMALL_PRIMES without further BigUint
/// operations.
static ref SMALL_PRIMES_PRODUCT: BigUint = BigUint::from_u64(16_294_579_238_595_022_365).unwrap();
}
#[cfg(feature = "prime")]
impl<R: Rng +?Sized> RandPrime for R {
fn gen_prime(&mut self, bit_size: usize) -> BigUint {
if bit_size < 2 {
panic!("prime size must be at least 2-bit");
}
let mut b = bit_size % 8;
if b == 0 {
b = 8;
}
let bytes_len = (bit_size + 7) / 8;
let mut bytes = vec![0u8; bytes_len];
loop {
self.fill_bytes(&mut bytes);
// Clear bits in the first byte to make sure the candidate has a size <= bits.
bytes[0] &= ((1u32 << (b as u32)) - 1) as u8;
// Don't let the value be too small, i.e, set the most significant two bits.
// Setting the top two bits, rather than just the top bit,
// means that when two of these values are multiplied together,
// the result isn't ever one bit short.
if b >= 2 {
bytes[0] |= 3u8.wrapping_shl(b as u32 - 2);
} else {
// Here b==1, because b cannot be zero.
bytes[0] |= 1;
if bytes_len > 1 {
bytes[1] |= 0x80;
}
}
// Make the value odd since an even number this large certainly isn't prime.
bytes[bytes_len - 1] |= 1u8;
let mut p = BigUint::from_bytes_be(&bytes);
// must always be a u64, as the SMALL_PRIMES_PRODUCT is a u64
let rem = (&p % &*SMALL_PRIMES_PRODUCT).to_u64().unwrap();
'next: for delta in range_step(0, 1 << 20, 2) {
let m = rem + delta;
for prime in &SMALL_PRIMES {
if m % u64::from(*prime) == 0 && (bit_size > 6 || m!= u64::from(*prime)) {
continue 'next;
}
}
if delta > 0 {
p += BigUint::from_u64(delta).unwrap();
}
break;
}
// There is a tiny possibility that, by adding delta, we caused
// the number to be one bit too long. Thus we check bit length here.
if p.bits() == bit_size && probably_prime(&p, 20) {
return p;
}
}
}
} | /// odd by construction.
#[cfg(feature = "prime")]
const SMALL_PRIMES: [u8; 15] = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53];
#[cfg(feature = "prime")] | random_line_split |
bigrand.rs | //! Randomization of big integers
use rand::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler};
use rand::prelude::*;
use rand::Rng;
use crate::BigInt;
use crate::BigUint;
use crate::Sign::*;
use crate::big_digit::BigDigit;
use crate::bigint::{into_magnitude, magnitude};
use crate::integer::Integer;
#[cfg(feature = "prime")]
use num_iter::range_step;
use num_traits::Zero;
#[cfg(feature = "prime")]
use num_traits::{FromPrimitive, ToPrimitive};
#[cfg(feature = "prime")]
use crate::prime::probably_prime;
pub trait RandBigInt {
/// Generate a random `BigUint` of the given bit size.
fn gen_biguint(&mut self, bit_size: usize) -> BigUint;
/// Generate a random BigInt of the given bit size.
fn gen_bigint(&mut self, bit_size: usize) -> BigInt;
/// Generate a random `BigUint` less than the given bound. Fails
/// when the bound is zero.
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint;
/// Generate a random `BigUint` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
/// Generate a random `BigInt` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
}
impl<R: Rng +?Sized> RandBigInt for R {
fn gen_biguint(&mut self, bit_size: usize) -> BigUint {
use super::big_digit::BITS;
let (digits, rem) = bit_size.div_rem(&BITS);
let mut data = smallvec![BigDigit::default(); digits + (rem > 0) as usize];
// `fill` is faster than many `gen::<u32>` calls
// Internally this calls `SeedableRng` where implementors are responsible for adjusting endianness for reproducable values.
self.fill(data.as_mut_slice());
if rem > 0 {
data[digits] >>= BITS - rem;
}
BigUint::new_native(data)
}
fn gen_bigint(&mut self, bit_size: usize) -> BigInt {
loop {
// Generate a random BigUint...
let biguint = self.gen_biguint(bit_size);
//...and then randomly assign it a Sign...
let sign = if biguint.is_zero() {
//...except that if the BigUint is zero, we need to try
// again with probability 0.5. This is because otherwise,
// the probability of generating a zero BigInt would be
// double that of any other number.
if self.gen() {
continue;
} else {
NoSign
}
} else if self.gen() {
Plus
} else {
Minus
};
return BigInt::from_biguint(sign, biguint);
}
}
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
assert!(!bound.is_zero());
let bits = bound.bits();
loop {
let n = self.gen_biguint(bits);
if n < *bound {
return n;
}
}
}
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
assert!(*lbound < *ubound);
if lbound.is_zero() {
self.gen_biguint_below(ubound)
} else {
lbound + self.gen_biguint_below(&(ubound - lbound))
}
}
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
assert!(*lbound < *ubound);
if lbound.is_zero() {
BigInt::from(self.gen_biguint_below(magnitude(&ubound)))
} else if ubound.is_zero() {
lbound + BigInt::from(self.gen_biguint_below(magnitude(&lbound)))
} else {
let delta = ubound - lbound;
lbound + BigInt::from(self.gen_biguint_below(magnitude(&delta)))
}
}
}
/// The back-end implementing rand's `UniformSampler` for `BigUint`.
#[derive(Clone, Debug)]
pub struct UniformBigUint {
base: BigUint,
len: BigUint,
}
impl UniformSampler for UniformBigUint {
type X = BigUint;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigUint {
len: high - low,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
Self::new(low_b, high_b.borrow() + 1u32)
}
#[inline]
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + rng.gen_biguint_below(&self.len)
}
#[inline]
fn sample_single<R: Rng +?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_biguint_range(low, high)
}
}
impl SampleUniform for BigUint {
type Sampler = UniformBigUint;
}
/// The back-end implementing rand's `UniformSampler` for `BigInt`.
#[derive(Clone, Debug)]
pub struct UniformBigInt {
base: BigInt,
len: BigUint,
}
impl UniformSampler for UniformBigInt {
type X = BigInt;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigInt {
len: into_magnitude(high - low),
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + BigInt::from(rng.gen_biguint_below(&self.len))
}
#[inline]
fn sample_single<R: Rng +?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_bigint_range(low, high)
}
}
impl SampleUniform for BigInt {
type Sampler = UniformBigInt;
}
/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
#[derive(Clone, Copy, Debug)]
pub struct RandomBits {
bits: usize,
}
impl RandomBits {
#[inline]
pub fn new(bits: usize) -> RandomBits {
RandomBits { bits }
}
}
impl Distribution<BigUint> for RandomBits {
#[inline]
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> BigUint {
rng.gen_biguint(self.bits)
}
}
impl Distribution<BigInt> for RandomBits {
#[inline]
fn sample<R: Rng +?Sized>(&self, rng: &mut R) -> BigInt {
rng.gen_bigint(self.bits)
}
}
/// A generic trait for generating random primes.
///
/// *Warning*: This is highly dependend on the provided random number generator,
/// to provide actually random primes.
///
/// # Example
#[cfg_attr(feature = "std", doc = " ```")]
#[cfg_attr(not(feature = "std"), doc = " ```ignore")]
/// extern crate rand;
/// extern crate num_bigint_dig as num_bigint;
///
/// use rand::thread_rng;
/// use num_bigint::RandPrime;
///
/// let mut rng = thread_rng();
/// let p = rng.gen_prime(1024);
/// assert_eq!(p.bits(), 1024);
/// ```
///
#[cfg(feature = "prime")]
pub trait RandPrime {
/// Generate a random prime number with as many bits as given.
fn gen_prime(&mut self, bits: usize) -> BigUint;
}
/// A list of small, prime numbers that allows us to rapidly
/// exclude some fraction of composite candidates when searching for a random
/// prime. This list is truncated at the point where smallPrimesProduct exceeds
/// a u64. It does not include two because we ensure that the candidates are
/// odd by construction.
#[cfg(feature = "prime")]
const SMALL_PRIMES: [u8; 15] = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53];
#[cfg(feature = "prime")]
lazy_static! {
/// The product of the values in SMALL_PRIMES and allows us
/// to reduce a candidate prime by this number and then determine whether it's
/// coprime to all the elements of SMALL_PRIMES without further BigUint
/// operations.
static ref SMALL_PRIMES_PRODUCT: BigUint = BigUint::from_u64(16_294_579_238_595_022_365).unwrap();
}
#[cfg(feature = "prime")]
impl<R: Rng +?Sized> RandPrime for R {
fn gen_prime(&mut self, bit_size: usize) -> BigUint {
if bit_size < 2 {
panic!("prime size must be at least 2-bit");
}
let mut b = bit_size % 8;
if b == 0 {
b = 8;
}
let bytes_len = (bit_size + 7) / 8;
let mut bytes = vec![0u8; bytes_len];
loop {
self.fill_bytes(&mut bytes);
// Clear bits in the first byte to make sure the candidate has a size <= bits.
bytes[0] &= ((1u32 << (b as u32)) - 1) as u8;
// Don't let the value be too small, i.e, set the most significant two bits.
// Setting the top two bits, rather than just the top bit,
// means that when two of these values are multiplied together,
// the result isn't ever one bit short.
if b >= 2 | else {
// Here b==1, because b cannot be zero.
bytes[0] |= 1;
if bytes_len > 1 {
bytes[1] |= 0x80;
}
}
// Make the value odd since an even number this large certainly isn't prime.
bytes[bytes_len - 1] |= 1u8;
let mut p = BigUint::from_bytes_be(&bytes);
// must always be a u64, as the SMALL_PRIMES_PRODUCT is a u64
let rem = (&p % &*SMALL_PRIMES_PRODUCT).to_u64().unwrap();
'next: for delta in range_step(0, 1 << 20, 2) {
let m = rem + delta;
for prime in &SMALL_PRIMES {
if m % u64::from(*prime) == 0 && (bit_size > 6 || m!= u64::from(*prime)) {
continue 'next;
}
}
if delta > 0 {
p += BigUint::from_u64(delta).unwrap();
}
break;
}
// There is a tiny possibility that, by adding delta, we caused
// the number to be one bit too long. Thus we check bit length here.
if p.bits() == bit_size && probably_prime(&p, 20) {
return p;
}
}
}
}
| {
bytes[0] |= 3u8.wrapping_shl(b as u32 - 2);
} | conditional_block |
lib.rs | //!
//! Dynamic, plugin-based [Symbol](https://en.wikipedia.org/wiki/Symbol_(programming)) abstraction.
//!
//! A [Symbol] can be used as an _identifier_ in place of the more primitive workhorse [String].
//! There could be multiple reasons to do so:
//!
//! 1. Mixing of different domains in the same runtime code
//! 2. Handling of naming collisions in multiple namespaces
//! 3. Avoiding memory allocations for statically known namespaces
//! 4. Mix of static and dynamic allocation
//! 5. Associating metadata to the symbols themselves
//!
//! The main use case for symbols is as map keys for in-memory key/value stores.
//!
//! Note that there are probably more reasons _not_ to use symbols than to use them! In most cases, something like
//! `enum` or [String] will do just fine. But sometimes applications process a lot of semi-schematic external input,
//! and you just want Rust to work like any old dynamic programming language again.
//!
//! # Example use cases
//! * Namespaced XML/HTML attributes (in HTML, some are static and some are dynamic. i.e. `data-` attributes)
//! * Key/value stores for "anything"
//! * Some way to abstract away string interners? (this is untested)
//!
//! A [Symbol] is just one plain, non-generic type, that can represent all possible symbol values. It implements all traits to make it
//! usable as a key in maps.
//!
//! # What this crate does not do
//! * Serialization and deserialization of symbols. [Symbol] should not implement `serde` traits, ser/de should instead be handled by each namespace.
//! * Provide any namespaces.
//!
//! # Static symbols
//! Static symbols originate from a namespace where all possible values are statically known at compile time.
//! One instance of a static symbol requires no memory allocation.
//!
//! Creating a static namespace:
//!
//! ```
//! use dyn_symbol::*;
//!
//! struct MyStaticNS {
//! symbols: &'static [(&'static str, &'static str)],
//! }
//!
//! const MY_STATIC_NS: MyStaticNS = MyStaticNS {
//! symbols: &[
//! ("foo", "the first symbol!"),
//! ("bar", "the second symbol!")
//! ]
//! };
//!
//! impl dyn_symbol::namespace::Static for MyStaticNS {
//! fn namespace_name(&self) -> &str {
//! "my"
//! }
//!
//! fn symbol_name(&self, id: u32) -> &str {
//! self.symbols[id as usize].0
//! }
//! }
//!
//! // Define (and export) some symbol constants
//! pub const FOO: Symbol = Symbol::Static(&MY_STATIC_NS, 0);
//! pub const BAR: Symbol = Symbol::Static(&MY_STATIC_NS, 1);
//!
//! assert_eq!(FOO, FOO);
//! assert_eq!(FOO.clone(), FOO.clone());
//! assert_ne!(FOO, BAR);
//! assert_eq!(format!("{:?}", FOO), "my::foo");
//!
//! // We can find the originating namespace later:
//! assert!(FOO.downcast_static::<MyStaticNS>().is_some());
//!
//! // To implement special metadata-extraction (or similar functionality) for a namespace:
//! fn get_symbol_description(symbol: &Symbol) -> Result<&'static str, &'static str> {
//! if let Some((namespace, id)) = symbol.downcast_static::<MyStaticNS>() {
//! Ok(namespace.symbols[id as usize].1)
//! } else {
//! Err("not from this namespace :(")
//! }
//! }
//!
//! assert_eq!(get_symbol_description(&BAR).unwrap(), "the second symbol!");
//! ```
//!
//! For static symbols, the implementations of [Eq]/[Ord]/[Hash](std::hash::Hash) et. al use only the namespace's [type_id](std::any::Any::type_id)
//! plus the symbol's numerical `id`.
//!
//! Typically, the boilerplate code for a static namespace will be generated by macros or `build.rs`.
//!
//! # Dynamic symbols
//! Sometimes the values that a symbol can take are not known upfront. In this case we have to resort to memory allocation.
//! Dynamic symbols implement a different namespace trait: [namespace::Dynamic]. The type that implements this trait also
//! functions as the symbol _instance_ itself:
//!
//! ```
//! use dyn_symbol::*;
//!
//! // This symbol is string-based:
//! struct DynamicNS(String);
//!
//! impl namespace::Dynamic for DynamicNS {
//! fn namespace_name(&self) -> &str {
//! "dynamic"
//! }
//!
//! fn symbol_name(&self) -> &str {
//! &self.0
//! }
//!
//! fn dyn_clone(&self) -> Box<dyn namespace::Dynamic> {
//! Box::new(DynamicNS(self.0.clone()))
//! }
//!
//! /// Note: calling code should already have verified that these are indeed the same types, using `type_id`.
//! fn dyn_eq(&self, rhs: &dyn namespace::Dynamic) -> bool {
//! self.0 == rhs.downcast_ref::<DynamicNS>().unwrap().0
//! }
//!
//! fn dyn_cmp(&self, rhs: &dyn namespace::Dynamic) -> std::cmp::Ordering {
//! self.0.cmp(&rhs.downcast_ref::<DynamicNS>().unwrap().0)
//! }
//!
//! fn dyn_hash(&self, state: &mut dyn std::hash::Hasher) {
//! // we are now in `dyn` land, so the [std::hash::Hash] trait cannot be used:
//! state.write(self.0.as_bytes());
//! state.write_u8(0xff)
//! }
//! }
//!
//! let foo0 = Symbol::Dynamic(Box::new(DynamicNS("foo".into())));
//! let foo1 = Symbol::Dynamic(Box::new(DynamicNS("foo".into())));
//! let bar = Symbol::Dynamic(Box::new(DynamicNS("bar".into())));
//!
//! assert_eq!(foo0, foo1);
//! assert_eq!(foo0.clone(), foo1.clone());
//! assert_ne!(foo0, bar);
//! ```
//!
//! It is entirely up to the Dynamic implementation to consider what kind of symbols are considered equal.
//! The `Eq`/`Hash` symmetry need to hold, though.
//!
//! Dynamic symbols are supported as a companion to static symbols. If your application works mainly with dynamic symbols,
//! you should consider using a different keying mechanism, because of the inherent overhead/indirection/boxing of dynamic symbols.
//!
//! # Type system
//! This crate makes use of [Any](std::any::Any), and consideres namespaces sharing the same [TypeId](std::any::TypeId) to be the _same namespace_.
//! This could make code reuse a bit cumbersome. If one crate exports multiple namespaces, this can be solved by using const generics:
//!
//! ```
//! struct ReusableNamespace<const N: u8>;
//!
//! // impl<const N: u8> namespace::Static for MyNamespace<N> {... }
//!
//! const NS_1: ReusableNamespace<1> = ReusableNamespace;
//! const NS_2: ReusableNamespace<2> = ReusableNamespace;
//!
//! // assert_ne!(NS_1.type_id(), NS_2.type_id());
//! ```
//!
//! This will cause the two namespaces to have differing `type_id`s.
//!
//!
use std::cmp::Ordering;
///
/// A symbol, with support for mixed static/dynamic allocation.
///
pub enum Symbol {
/// Construct a Symbol originating from a static namespace.
/// The first parameter is a trait object pointing back to the namespace,
/// the second parameter is the symbol `id` within that namespace.
Static(&'static dyn namespace::Static, u32),
/// Construct a Symbol with dynamic origins. Dynamic namespaces are unbounded in size,
/// so a memory allocation is needed. This encoding allows dynamic namespaces to support
/// the same semantics that static namespaces do. Instead of just using a [String], we
/// can also encode what kind of string it is.
Dynamic(Box<dyn namespace::Dynamic>),
}
impl Symbol {
pub fn name(&self) -> &str {
match self {
Self::Static(ns, id) => ns.symbol_name(*id),
Self::Dynamic(instance) => instance.symbol_name(),
}
}
///
/// Get access to the associated namespace's `Any` representation.
/// its `type_id` may be used as a reflection tool to get to know about the Symbol's origin.
///
pub fn as_any(&self) -> &dyn std::any::Any {
match self {
Self::Static(ns, _) => ns.as_any(),
Self::Dynamic(instance) => instance.as_any(),
}
}
///
/// Try to downcast this Symbol's originating _static namespace_ to a concrete `&T`,
/// and if successful, return that concrete namespace along with the symbol's static id.
///
pub fn downcast_static<T:'static>(&self) -> Option<(&T, u32)> {
match self {
Self::Static(ns, id) => ns.as_any().downcast_ref::<T>().map(|t| (t, *id)),
Self::Dynamic(_) => None,
}
}
///
/// Try to downcast this Symbol's _dynamic namespace_ as a `&T`.
///
/// Always fails for static namespaces.
///
pub fn downcast_dyn<T:'static>(&self) -> Option<&T> {
match self {
Self::Static(_, _) => None,
Self::Dynamic(instance) => instance.as_any().downcast_ref::<T>(),
}
}
}
impl Clone for Symbol {
fn clone(&self) -> Self {
match self {
Self::Static(static_symbol, id) => Self::Static(*static_symbol, *id),
Self::Dynamic(instance) => Self::Dynamic(instance.dyn_clone()),
}
}
}
impl std::fmt::Debug for Symbol {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self { | f,
"{}::{}",
instance.namespace_name(),
instance.symbol_name()
)
}
}
}
}
impl PartialEq for Symbol {
fn eq(&self, rhs: &Symbol) -> bool {
match (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
*this_id == *rhs_id && this_ns.type_id() == rhs_ns.type_id()
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
this.type_id() == rhs.type_id() && this.dyn_eq(rhs.as_ref())
}
_ => false,
}
}
}
impl Eq for Symbol {}
impl Ord for Symbol {
fn cmp(&self, rhs: &Symbol) -> Ordering {
match (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
let this_type_id = this_ns.type_id();
let rhs_type_id = rhs_ns.type_id();
if this_type_id == rhs_type_id {
this_id.cmp(&rhs_id)
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
let this_type_id = this.type_id();
let rhs_type_id = rhs.type_id();
if this_type_id == rhs_type_id {
this.dyn_cmp(rhs.as_ref())
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Static(_, _), Self::Dynamic(_)) => Ordering::Less,
(Self::Dynamic(_), Self::Static(_, _)) => Ordering::Greater,
}
}
}
impl PartialOrd for Symbol {
fn partial_cmp(&self, other: &Symbol) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl std::hash::Hash for Symbol {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
Self::Static(ns, id) => {
ns.type_id().hash(state);
state.write_u32(*id)
}
Self::Dynamic(dynamic_sym) => {
dynamic_sym.type_id().hash(state);
dynamic_sym.dyn_hash(state)
}
}
}
}
pub mod namespace {
//!
//! Namespace traits that must be implemented by symbol providers.
//!
use downcast_rs::*;
///
/// A static namespace. Symbols in a static namespace are identified with an `id` encoded as a `u32`.
///
pub trait Static: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// A symbol's name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self, id: u32) -> &str;
}
///
/// A dynamic namespace. A dynamic symbol instance is tied to `Self`.
///
pub trait Dynamic: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// The symbol name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self) -> &str;
///
/// Clone this dynamic symbol. Must return a new symbol instance that is `eq` to `&self`.
///
fn dyn_clone(&self) -> Box<dyn Dynamic>;
///
/// Dynamic [eq](std::cmp::PartialEq::eq). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_eq(&self, rhs: &dyn Dynamic) -> bool;
///
/// Dynamic [cmp](std::cmp::Ord::cmp). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_cmp(&self, rhs: &dyn Dynamic) -> std::cmp::Ordering;
///
/// Dynamic [hash](std::hash::Hash::hash). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher);
}
impl_downcast!(Dynamic);
}
#[cfg(test)]
mod tests {
use super::*;
use std::hash::{BuildHasher, Hash, Hasher};
mod _static {
use super::*;
pub struct ClassN<const N: u8> {
class_name: &'static str,
names: &'static [&'static str],
}
impl<const N: u8> namespace::Static for ClassN<N> {
fn namespace_name(&self) -> &str {
self.class_name
}
fn symbol_name(&self, id: u32) -> &str {
self.names[id as usize]
}
}
pub const STATIC_NS_CLASS_A: ClassN<1> = ClassN {
class_name: "A",
names: &["0", "1"],
};
pub const STATIC_NS_CLASS_B: ClassN<2> = ClassN {
class_name: "B",
names: &["0"],
};
}
mod dynamic {
use super::*;
pub struct TestDynamic<const N: u8>(pub String, &'static str);
impl<const N: u8> namespace::Dynamic for TestDynamic<N> {
fn namespace_name(&self) -> &str {
self.1
}
fn symbol_name(&self) -> &str {
&self.0
}
fn dyn_clone(&self) -> Box<dyn namespace::Dynamic> {
Box::new(TestDynamic::<N>(self.0.clone(), self.1))
}
fn dyn_eq(&self, rhs: &dyn namespace::Dynamic) -> bool {
self.0 == rhs.downcast_ref::<TestDynamic<N>>().unwrap().0
}
fn dyn_cmp(&self, rhs: &dyn namespace::Dynamic) -> std::cmp::Ordering {
self.0.cmp(&rhs.downcast_ref::<TestDynamic<N>>().unwrap().0)
}
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher) {
state.write(self.0.as_bytes());
state.write_u8(0xff)
}
}
pub fn sym0(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<0>(str.into(), "dyn0")))
}
pub fn sym1(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<1>(str.into(), "dyn1")))
}
}
const STATIC_A_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 0);
const STATIC_A_1: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 1);
const STATIC_B_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_B, 0);
struct TestState {
random_state: std::collections::hash_map::RandomState,
}
impl TestState {
pub fn new() -> Self {
Self {
random_state: std::collections::hash_map::RandomState::new(),
}
}
fn assert_hash_match(&self, a: &Symbol, b: &Symbol, should_equal: bool) {
let mut hasher_a = self.random_state.build_hasher();
let mut hasher_b = self.random_state.build_hasher();
a.hash(&mut hasher_a);
b.hash(&mut hasher_b);
if should_equal {
assert_eq!(hasher_a.finish(), hasher_b.finish())
} else {
assert_ne!(hasher_a.finish(), hasher_b.finish())
}
}
fn assert_full_eq(&self, a: &Symbol, b: &Symbol) {
assert_eq!(a, b);
assert_eq!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, true)
}
fn assert_full_ne(&self, a: &Symbol, b: &Symbol) {
assert_ne!(a, b);
assert_ne!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, false)
}
}
#[test]
fn test_symbol_size_of() {
let u_size = std::mem::size_of::<usize>();
// This size_of Symbol is computed like this:
// It's at least two words, because of `dyn`.
// it's more than two words because it needs to encode the A/B enum value.
// on 64-bit arch it should be 3 words, because it contains an `u32` too,
// and that should be encoded within the same machine word as the enum discriminant..
// I think...
let expected_word_size = match u_size {
8 => 3 * u_size,
// 4 => 4, Perhaps?
_ => panic!("untested word size"),
};
assert_eq!(std::mem::size_of::<Symbol>(), expected_word_size);
}
#[test]
fn test_debug() {
assert_eq!(format!("{:?}", STATIC_A_0), "A::0");
assert_eq!(format!("{:?}", STATIC_A_1), "A::1");
assert_eq!(format!("{:?}", STATIC_B_0), "B::0");
assert_eq!(format!("{:?}", dynamic::sym0("foo")), "dyn0::foo");
assert_eq!(format!("{:?}", dynamic::sym1("bar")), "dyn1::bar");
}
#[test]
fn test_equality() {
let test_state = TestState::new();
test_state.assert_full_eq(&STATIC_A_0, &STATIC_A_0);
test_state.assert_full_eq(&STATIC_A_1, &STATIC_A_1);
test_state.assert_full_eq(&STATIC_B_0, &STATIC_B_0);
test_state.assert_full_ne(&STATIC_A_0, &STATIC_A_1);
test_state.assert_full_ne(&STATIC_A_1, &STATIC_B_0);
test_state.assert_full_eq(&dynamic::sym0("foo"), &dynamic::sym0("foo"));
}
#[test]
fn test_inequality() {
let test_state = TestState::new();
test_state.assert_full_ne(&STATIC_A_0, &STATIC_A_1);
test_state.assert_full_ne(&STATIC_A_0, &STATIC_B_0);
test_state.assert_full_ne(&dynamic::sym0("foo"), &dynamic::sym0("bar"));
test_state.assert_full_ne(&dynamic::sym0("foo"), &dynamic::sym1("foo"));
}
#[test]
fn test_ord() {
assert_ne!(STATIC_A_0.cmp(&STATIC_A_1), Ordering::Equal);
assert_ne!(STATIC_A_0.cmp(&STATIC_B_0), Ordering::Equal);
assert_ne!(STATIC_A_1.cmp(&STATIC_B_0), Ordering::Equal);
}
} | Self::Static(ns, id) => {
write!(f, "{}::{}", ns.namespace_name(), ns.symbol_name(*id))
}
Self::Dynamic(instance) => {
write!( | random_line_split |
lib.rs | //!
//! Dynamic, plugin-based [Symbol](https://en.wikipedia.org/wiki/Symbol_(programming)) abstraction.
//!
//! A [Symbol] can be used as an _identifier_ in place of the more primitive workhorse [String].
//! There could be multiple reasons to do so:
//!
//! 1. Mixing of different domains in the same runtime code
//! 2. Handling of naming collisions in multiple namespaces
//! 3. Avoiding memory allocations for statically known namespaces
//! 4. Mix of static and dynamic allocation
//! 5. Associating metadata to the symbols themselves
//!
//! The main use case for symbols is as map keys for in-memory key/value stores.
//!
//! Note that there are probably more reasons _not_ to use symbols than to use them! In most cases, something like
//! `enum` or [String] will do just fine. But sometimes applications process a lot of semi-schematic external input,
//! and you just want Rust to work like any old dynamic programming language again.
//!
//! # Example use cases
//! * Namespaced XML/HTML attributes (in HTML, some are static and some are dynamic. i.e. `data-` attributes)
//! * Key/value stores for "anything"
//! * Some way to abstract away string interners? (this is untested)
//!
//! A [Symbol] is just one plain, non-generic type, that can represent all possible symbol values. It implements all traits to make it
//! usable as a key in maps.
//!
//! # What this crate does not do
//! * Serialization and deserialization of symbols. [Symbol] should not implement `serde` traits, ser/de should instead be handled by each namespace.
//! * Provide any namespaces.
//!
//! # Static symbols
//! Static symbols originate from a namespace where all possible values are statically known at compile time.
//! One instance of a static symbol requires no memory allocation.
//!
//! Creating a static namespace:
//!
//! ```
//! use dyn_symbol::*;
//!
//! struct MyStaticNS {
//! symbols: &'static [(&'static str, &'static str)],
//! }
//!
//! const MY_STATIC_NS: MyStaticNS = MyStaticNS {
//! symbols: &[
//! ("foo", "the first symbol!"),
//! ("bar", "the second symbol!")
//! ]
//! };
//!
//! impl dyn_symbol::namespace::Static for MyStaticNS {
//! fn namespace_name(&self) -> &str {
//! "my"
//! }
//!
//! fn symbol_name(&self, id: u32) -> &str {
//! self.symbols[id as usize].0
//! }
//! }
//!
//! // Define (and export) some symbol constants
//! pub const FOO: Symbol = Symbol::Static(&MY_STATIC_NS, 0);
//! pub const BAR: Symbol = Symbol::Static(&MY_STATIC_NS, 1);
//!
//! assert_eq!(FOO, FOO);
//! assert_eq!(FOO.clone(), FOO.clone());
//! assert_ne!(FOO, BAR);
//! assert_eq!(format!("{:?}", FOO), "my::foo");
//!
//! // We can find the originating namespace later:
//! assert!(FOO.downcast_static::<MyStaticNS>().is_some());
//!
//! // To implement special metadata-extraction (or similar functionality) for a namespace:
//! fn get_symbol_description(symbol: &Symbol) -> Result<&'static str, &'static str> {
//! if let Some((namespace, id)) = symbol.downcast_static::<MyStaticNS>() {
//! Ok(namespace.symbols[id as usize].1)
//! } else {
//! Err("not from this namespace :(")
//! }
//! }
//!
//! assert_eq!(get_symbol_description(&BAR).unwrap(), "the second symbol!");
//! ```
//!
//! For static symbols, the implementations of [Eq]/[Ord]/[Hash](std::hash::Hash) et. al use only the namespace's [type_id](std::any::Any::type_id)
//! plus the symbol's numerical `id`.
//!
//! Typically, the boilerplate code for a static namespace will be generated by macros or `build.rs`.
//!
//! # Dynamic symbols
//! Sometimes the values that a symbol can take are not known upfront. In this case we have to resort to memory allocation.
//! Dynamic symbols implement a different namespace trait: [namespace::Dynamic]. The type that implements this trait also
//! functions as the symbol _instance_ itself:
//!
//! ```
//! use dyn_symbol::*;
//!
//! // This symbol is string-based:
//! struct DynamicNS(String);
//!
//! impl namespace::Dynamic for DynamicNS {
//! fn namespace_name(&self) -> &str {
//! "dynamic"
//! }
//!
//! fn symbol_name(&self) -> &str {
//! &self.0
//! }
//!
//! fn dyn_clone(&self) -> Box<dyn namespace::Dynamic> {
//! Box::new(DynamicNS(self.0.clone()))
//! }
//!
//! /// Note: calling code should already have verified that these are indeed the same types, using `type_id`.
//! fn dyn_eq(&self, rhs: &dyn namespace::Dynamic) -> bool {
//! self.0 == rhs.downcast_ref::<DynamicNS>().unwrap().0
//! }
//!
//! fn dyn_cmp(&self, rhs: &dyn namespace::Dynamic) -> std::cmp::Ordering {
//! self.0.cmp(&rhs.downcast_ref::<DynamicNS>().unwrap().0)
//! }
//!
//! fn dyn_hash(&self, state: &mut dyn std::hash::Hasher) {
//! // we are now in `dyn` land, so the [std::hash::Hash] trait cannot be used:
//! state.write(self.0.as_bytes());
//! state.write_u8(0xff)
//! }
//! }
//!
//! let foo0 = Symbol::Dynamic(Box::new(DynamicNS("foo".into())));
//! let foo1 = Symbol::Dynamic(Box::new(DynamicNS("foo".into())));
//! let bar = Symbol::Dynamic(Box::new(DynamicNS("bar".into())));
//!
//! assert_eq!(foo0, foo1);
//! assert_eq!(foo0.clone(), foo1.clone());
//! assert_ne!(foo0, bar);
//! ```
//!
//! It is entirely up to the Dynamic implementation to consider what kind of symbols are considered equal.
//! The `Eq`/`Hash` symmetry need to hold, though.
//!
//! Dynamic symbols are supported as a companion to static symbols. If your application works mainly with dynamic symbols,
//! you should consider using a different keying mechanism, because of the inherent overhead/indirection/boxing of dynamic symbols.
//!
//! # Type system
//! This crate makes use of [Any](std::any::Any), and consideres namespaces sharing the same [TypeId](std::any::TypeId) to be the _same namespace_.
//! This could make code reuse a bit cumbersome. If one crate exports multiple namespaces, this can be solved by using const generics:
//!
//! ```
//! struct ReusableNamespace<const N: u8>;
//!
//! // impl<const N: u8> namespace::Static for MyNamespace<N> {... }
//!
//! const NS_1: ReusableNamespace<1> = ReusableNamespace;
//! const NS_2: ReusableNamespace<2> = ReusableNamespace;
//!
//! // assert_ne!(NS_1.type_id(), NS_2.type_id());
//! ```
//!
//! This will cause the two namespaces to have differing `type_id`s.
//!
//!
use std::cmp::Ordering;
///
/// A symbol, with support for mixed static/dynamic allocation.
///
pub enum Symbol {
/// Construct a Symbol originating from a static namespace.
/// The first parameter is a trait object pointing back to the namespace,
/// the second parameter is the symbol `id` within that namespace.
Static(&'static dyn namespace::Static, u32),
/// Construct a Symbol with dynamic origins. Dynamic namespaces are unbounded in size,
/// so a memory allocation is needed. This encoding allows dynamic namespaces to support
/// the same semantics that static namespaces do. Instead of just using a [String], we
/// can also encode what kind of string it is.
Dynamic(Box<dyn namespace::Dynamic>),
}
impl Symbol {
pub fn name(&self) -> &str {
match self {
Self::Static(ns, id) => ns.symbol_name(*id),
Self::Dynamic(instance) => instance.symbol_name(),
}
}
///
/// Get access to the associated namespace's `Any` representation.
/// its `type_id` may be used as a reflection tool to get to know about the Symbol's origin.
///
pub fn as_any(&self) -> &dyn std::any::Any {
match self {
Self::Static(ns, _) => ns.as_any(),
Self::Dynamic(instance) => instance.as_any(),
}
}
///
/// Try to downcast this Symbol's originating _static namespace_ to a concrete `&T`,
/// and if successful, return that concrete namespace along with the symbol's static id.
///
pub fn downcast_static<T:'static>(&self) -> Option<(&T, u32)> {
match self {
Self::Static(ns, id) => ns.as_any().downcast_ref::<T>().map(|t| (t, *id)),
Self::Dynamic(_) => None,
}
}
///
/// Try to downcast this Symbol's _dynamic namespace_ as a `&T`.
///
/// Always fails for static namespaces.
///
pub fn downcast_dyn<T:'static>(&self) -> Option<&T> {
match self {
Self::Static(_, _) => None,
Self::Dynamic(instance) => instance.as_any().downcast_ref::<T>(),
}
}
}
impl Clone for Symbol {
fn clone(&self) -> Self {
match self {
Self::Static(static_symbol, id) => Self::Static(*static_symbol, *id),
Self::Dynamic(instance) => Self::Dynamic(instance.dyn_clone()),
}
}
}
impl std::fmt::Debug for Symbol {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::Static(ns, id) => {
write!(f, "{}::{}", ns.namespace_name(), ns.symbol_name(*id))
}
Self::Dynamic(instance) => {
write!(
f,
"{}::{}",
instance.namespace_name(),
instance.symbol_name()
)
}
}
}
}
impl PartialEq for Symbol {
fn eq(&self, rhs: &Symbol) -> bool {
match (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
*this_id == *rhs_id && this_ns.type_id() == rhs_ns.type_id()
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
this.type_id() == rhs.type_id() && this.dyn_eq(rhs.as_ref())
}
_ => false,
}
}
}
impl Eq for Symbol {}
impl Ord for Symbol {
fn cmp(&self, rhs: &Symbol) -> Ordering {
match (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
let this_type_id = this_ns.type_id();
let rhs_type_id = rhs_ns.type_id();
if this_type_id == rhs_type_id {
this_id.cmp(&rhs_id)
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
let this_type_id = this.type_id();
let rhs_type_id = rhs.type_id();
if this_type_id == rhs_type_id {
this.dyn_cmp(rhs.as_ref())
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Static(_, _), Self::Dynamic(_)) => Ordering::Less,
(Self::Dynamic(_), Self::Static(_, _)) => Ordering::Greater,
}
}
}
impl PartialOrd for Symbol {
fn partial_cmp(&self, other: &Symbol) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl std::hash::Hash for Symbol {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
Self::Static(ns, id) => {
ns.type_id().hash(state);
state.write_u32(*id)
}
Self::Dynamic(dynamic_sym) => {
dynamic_sym.type_id().hash(state);
dynamic_sym.dyn_hash(state)
}
}
}
}
pub mod namespace {
//!
//! Namespace traits that must be implemented by symbol providers.
//!
use downcast_rs::*;
///
/// A static namespace. Symbols in a static namespace are identified with an `id` encoded as a `u32`.
///
pub trait Static: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// A symbol's name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self, id: u32) -> &str;
}
///
/// A dynamic namespace. A dynamic symbol instance is tied to `Self`.
///
pub trait Dynamic: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// The symbol name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self) -> &str;
///
/// Clone this dynamic symbol. Must return a new symbol instance that is `eq` to `&self`.
///
fn dyn_clone(&self) -> Box<dyn Dynamic>;
///
/// Dynamic [eq](std::cmp::PartialEq::eq). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_eq(&self, rhs: &dyn Dynamic) -> bool;
///
/// Dynamic [cmp](std::cmp::Ord::cmp). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_cmp(&self, rhs: &dyn Dynamic) -> std::cmp::Ordering;
///
/// Dynamic [hash](std::hash::Hash::hash). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher);
}
impl_downcast!(Dynamic);
}
#[cfg(test)]
mod tests {
use super::*;
use std::hash::{BuildHasher, Hash, Hasher};
mod _static {
use super::*;
pub struct ClassN<const N: u8> {
class_name: &'static str,
names: &'static [&'static str],
}
impl<const N: u8> namespace::Static for ClassN<N> {
fn namespace_name(&self) -> &str {
self.class_name
}
fn symbol_name(&self, id: u32) -> &str {
self.names[id as usize]
}
}
pub const STATIC_NS_CLASS_A: ClassN<1> = ClassN {
class_name: "A",
names: &["0", "1"],
};
pub const STATIC_NS_CLASS_B: ClassN<2> = ClassN {
class_name: "B",
names: &["0"],
};
}
mod dynamic {
use super::*;
pub struct TestDynamic<const N: u8>(pub String, &'static str);
impl<const N: u8> namespace::Dynamic for TestDynamic<N> {
fn namespace_name(&self) -> &str {
self.1
}
fn symbol_name(&self) -> &str {
&self.0
}
fn dyn_clone(&self) -> Box<dyn namespace::Dynamic> {
Box::new(TestDynamic::<N>(self.0.clone(), self.1))
}
fn dyn_eq(&self, rhs: &dyn namespace::Dynamic) -> bool {
self.0 == rhs.downcast_ref::<TestDynamic<N>>().unwrap().0
}
fn dyn_cmp(&self, rhs: &dyn namespace::Dynamic) -> std::cmp::Ordering {
self.0.cmp(&rhs.downcast_ref::<TestDynamic<N>>().unwrap().0)
}
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher) {
state.write(self.0.as_bytes());
state.write_u8(0xff)
}
}
pub fn sym0(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<0>(str.into(), "dyn0")))
}
pub fn sym1(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<1>(str.into(), "dyn1")))
}
}
const STATIC_A_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 0);
const STATIC_A_1: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 1);
const STATIC_B_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_B, 0);
struct TestState {
random_state: std::collections::hash_map::RandomState,
}
impl TestState {
pub fn new() -> Self {
Self {
random_state: std::collections::hash_map::RandomState::new(),
}
}
fn assert_hash_match(&self, a: &Symbol, b: &Symbol, should_equal: bool) {
let mut hasher_a = self.random_state.build_hasher();
let mut hasher_b = self.random_state.build_hasher();
a.hash(&mut hasher_a);
b.hash(&mut hasher_b);
if should_equal {
assert_eq!(hasher_a.finish(), hasher_b.finish())
} else {
assert_ne!(hasher_a.finish(), hasher_b.finish())
}
}
fn assert_full_eq(&self, a: &Symbol, b: &Symbol) {
assert_eq!(a, b);
assert_eq!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, true)
}
fn assert_full_ne(&self, a: &Symbol, b: &Symbol) {
assert_ne!(a, b);
assert_ne!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, false)
}
}
#[test]
fn test_symbol_size_of() {
let u_size = std::mem::size_of::<usize>();
// This size_of Symbol is computed like this:
// It's at least two words, because of `dyn`.
// it's more than two words because it needs to encode the A/B enum value.
// on 64-bit arch it should be 3 words, because it contains an `u32` too,
// and that should be encoded within the same machine word as the enum discriminant..
// I think...
let expected_word_size = match u_size {
8 => 3 * u_size,
// 4 => 4, Perhaps?
_ => panic!("untested word size"),
};
assert_eq!(std::mem::size_of::<Symbol>(), expected_word_size);
}
#[test]
fn test_debug() {
assert_eq!(format!("{:?}", STATIC_A_0), "A::0");
assert_eq!(format!("{:?}", STATIC_A_1), "A::1");
assert_eq!(format!("{:?}", STATIC_B_0), "B::0");
assert_eq!(format!("{:?}", dynamic::sym0("foo")), "dyn0::foo");
assert_eq!(format!("{:?}", dynamic::sym1("bar")), "dyn1::bar");
}
#[test]
fn test_equality() |
#[test]
fn test_inequality() {
let test_state = TestState::new();
test_state.assert_full_ne(&STATIC_A_0, &STATIC_A_1);
test_state.assert_full_ne(&STATIC_A_0, &STATIC_B_0);
test_state.assert_full_ne(&dynamic::sym0("foo"), &dynamic::sym0("bar"));
test_state.assert_full_ne(&dynamic::sym0("foo"), &dynamic::sym1("foo"));
}
#[test]
fn test_ord() {
assert_ne!(STATIC_A_0.cmp(&STATIC_A_1), Ordering::Equal);
assert_ne!(STATIC_A_0.cmp(&STATIC_B_0), Ordering::Equal);
assert_ne!(STATIC_A_1.cmp(&STATIC_B_0), Ordering::Equal);
}
}
| {
let test_state = TestState::new();
test_state.assert_full_eq(&STATIC_A_0, &STATIC_A_0);
test_state.assert_full_eq(&STATIC_A_1, &STATIC_A_1);
test_state.assert_full_eq(&STATIC_B_0, &STATIC_B_0);
test_state.assert_full_ne(&STATIC_A_0, &STATIC_A_1);
test_state.assert_full_ne(&STATIC_A_1, &STATIC_B_0);
test_state.assert_full_eq(&dynamic::sym0("foo"), &dynamic::sym0("foo"));
} | identifier_body |
lib.rs | //!
//! Dynamic, plugin-based [Symbol](https://en.wikipedia.org/wiki/Symbol_(programming)) abstraction.
//!
//! A [Symbol] can be used as an _identifier_ in place of the more primitive workhorse [String].
//! There could be multiple reasons to do so:
//!
//! 1. Mixing of different domains in the same runtime code
//! 2. Handling of naming collisions in multiple namespaces
//! 3. Avoiding memory allocations for statically known namespaces
//! 4. Mix of static and dynamic allocation
//! 5. Associating metadata to the symbols themselves
//!
//! The main use case for symbols is as map keys for in-memory key/value stores.
//!
//! Note that there are probably more reasons _not_ to use symbols than to use them! In most cases, something like
//! `enum` or [String] will do just fine. But sometimes applications process a lot of semi-schematic external input,
//! and you just want Rust to work like any old dynamic programming language again.
//!
//! # Example use cases
//! * Namespaced XML/HTML attributes (in HTML, some are static and some are dynamic. i.e. `data-` attributes)
//! * Key/value stores for "anything"
//! * Some way to abstract away string interners? (this is untested)
//!
//! A [Symbol] is just one plain, non-generic type, that can represent all possible symbol values. It implements all traits to make it
//! usable as a key in maps.
//!
//! # What this crate does not do
//! * Serialization and deserialization of symbols. [Symbol] should not implement `serde` traits, ser/de should instead be handled by each namespace.
//! * Provide any namespaces.
//!
//! # Static symbols
//! Static symbols originate from a namespace where all possible values are statically known at compile time.
//! One instance of a static symbol requires no memory allocation.
//!
//! Creating a static namespace:
//!
//! ```
//! use dyn_symbol::*;
//!
//! struct MyStaticNS {
//! symbols: &'static [(&'static str, &'static str)],
//! }
//!
//! const MY_STATIC_NS: MyStaticNS = MyStaticNS {
//! symbols: &[
//! ("foo", "the first symbol!"),
//! ("bar", "the second symbol!")
//! ]
//! };
//!
//! impl dyn_symbol::namespace::Static for MyStaticNS {
//! fn namespace_name(&self) -> &str {
//! "my"
//! }
//!
//! fn symbol_name(&self, id: u32) -> &str {
//! self.symbols[id as usize].0
//! }
//! }
//!
//! // Define (and export) some symbol constants
//! pub const FOO: Symbol = Symbol::Static(&MY_STATIC_NS, 0);
//! pub const BAR: Symbol = Symbol::Static(&MY_STATIC_NS, 1);
//!
//! assert_eq!(FOO, FOO);
//! assert_eq!(FOO.clone(), FOO.clone());
//! assert_ne!(FOO, BAR);
//! assert_eq!(format!("{:?}", FOO), "my::foo");
//!
//! // We can find the originating namespace later:
//! assert!(FOO.downcast_static::<MyStaticNS>().is_some());
//!
//! // To implement special metadata-extraction (or similar functionality) for a namespace:
//! fn get_symbol_description(symbol: &Symbol) -> Result<&'static str, &'static str> {
//! if let Some((namespace, id)) = symbol.downcast_static::<MyStaticNS>() {
//! Ok(namespace.symbols[id as usize].1)
//! } else {
//! Err("not from this namespace :(")
//! }
//! }
//!
//! assert_eq!(get_symbol_description(&BAR).unwrap(), "the second symbol!");
//! ```
//!
//! For static symbols, the implementations of [Eq]/[Ord]/[Hash](std::hash::Hash) et. al use only the namespace's [type_id](std::any::Any::type_id)
//! plus the symbol's numerical `id`.
//!
//! Typically, the boilerplate code for a static namespace will be generated by macros or `build.rs`.
//!
//! # Dynamic symbols
//! Sometimes the values that a symbol can take are not known upfront. In this case we have to resort to memory allocation.
//! Dynamic symbols implement a different namespace trait: [namespace::Dynamic]. The type that implements this trait also
//! functions as the symbol _instance_ itself:
//!
//! ```
//! use dyn_symbol::*;
//!
//! // This symbol is string-based:
//! struct DynamicNS(String);
//!
//! impl namespace::Dynamic for DynamicNS {
//! fn namespace_name(&self) -> &str {
//! "dynamic"
//! }
//!
//! fn symbol_name(&self) -> &str {
//! &self.0
//! }
//!
//! fn dyn_clone(&self) -> Box<dyn namespace::Dynamic> {
//! Box::new(DynamicNS(self.0.clone()))
//! }
//!
//! /// Note: calling code should already have verified that these are indeed the same types, using `type_id`.
//! fn dyn_eq(&self, rhs: &dyn namespace::Dynamic) -> bool {
//! self.0 == rhs.downcast_ref::<DynamicNS>().unwrap().0
//! }
//!
//! fn dyn_cmp(&self, rhs: &dyn namespace::Dynamic) -> std::cmp::Ordering {
//! self.0.cmp(&rhs.downcast_ref::<DynamicNS>().unwrap().0)
//! }
//!
//! fn dyn_hash(&self, state: &mut dyn std::hash::Hasher) {
//! // we are now in `dyn` land, so the [std::hash::Hash] trait cannot be used:
//! state.write(self.0.as_bytes());
//! state.write_u8(0xff)
//! }
//! }
//!
//! let foo0 = Symbol::Dynamic(Box::new(DynamicNS("foo".into())));
//! let foo1 = Symbol::Dynamic(Box::new(DynamicNS("foo".into())));
//! let bar = Symbol::Dynamic(Box::new(DynamicNS("bar".into())));
//!
//! assert_eq!(foo0, foo1);
//! assert_eq!(foo0.clone(), foo1.clone());
//! assert_ne!(foo0, bar);
//! ```
//!
//! It is entirely up to the Dynamic implementation to consider what kind of symbols are considered equal.
//! The `Eq`/`Hash` symmetry need to hold, though.
//!
//! Dynamic symbols are supported as a companion to static symbols. If your application works mainly with dynamic symbols,
//! you should consider using a different keying mechanism, because of the inherent overhead/indirection/boxing of dynamic symbols.
//!
//! # Type system
//! This crate makes use of [Any](std::any::Any), and consideres namespaces sharing the same [TypeId](std::any::TypeId) to be the _same namespace_.
//! This could make code reuse a bit cumbersome. If one crate exports multiple namespaces, this can be solved by using const generics:
//!
//! ```
//! struct ReusableNamespace<const N: u8>;
//!
//! // impl<const N: u8> namespace::Static for MyNamespace<N> {... }
//!
//! const NS_1: ReusableNamespace<1> = ReusableNamespace;
//! const NS_2: ReusableNamespace<2> = ReusableNamespace;
//!
//! // assert_ne!(NS_1.type_id(), NS_2.type_id());
//! ```
//!
//! This will cause the two namespaces to have differing `type_id`s.
//!
//!
use std::cmp::Ordering;
///
/// A symbol, with support for mixed static/dynamic allocation.
///
pub enum Symbol {
/// Construct a Symbol originating from a static namespace.
/// The first parameter is a trait object pointing back to the namespace,
/// the second parameter is the symbol `id` within that namespace.
Static(&'static dyn namespace::Static, u32),
/// Construct a Symbol with dynamic origins. Dynamic namespaces are unbounded in size,
/// so a memory allocation is needed. This encoding allows dynamic namespaces to support
/// the same semantics that static namespaces do. Instead of just using a [String], we
/// can also encode what kind of string it is.
Dynamic(Box<dyn namespace::Dynamic>),
}
impl Symbol {
pub fn name(&self) -> &str {
match self {
Self::Static(ns, id) => ns.symbol_name(*id),
Self::Dynamic(instance) => instance.symbol_name(),
}
}
///
/// Get access to the associated namespace's `Any` representation.
/// its `type_id` may be used as a reflection tool to get to know about the Symbol's origin.
///
pub fn as_any(&self) -> &dyn std::any::Any {
match self {
Self::Static(ns, _) => ns.as_any(),
Self::Dynamic(instance) => instance.as_any(),
}
}
///
/// Try to downcast this Symbol's originating _static namespace_ to a concrete `&T`,
/// and if successful, return that concrete namespace along with the symbol's static id.
///
pub fn downcast_static<T:'static>(&self) -> Option<(&T, u32)> {
match self {
Self::Static(ns, id) => ns.as_any().downcast_ref::<T>().map(|t| (t, *id)),
Self::Dynamic(_) => None,
}
}
///
/// Try to downcast this Symbol's _dynamic namespace_ as a `&T`.
///
/// Always fails for static namespaces.
///
pub fn downcast_dyn<T:'static>(&self) -> Option<&T> {
match self {
Self::Static(_, _) => None,
Self::Dynamic(instance) => instance.as_any().downcast_ref::<T>(),
}
}
}
impl Clone for Symbol {
fn clone(&self) -> Self {
match self {
Self::Static(static_symbol, id) => Self::Static(*static_symbol, *id),
Self::Dynamic(instance) => Self::Dynamic(instance.dyn_clone()),
}
}
}
impl std::fmt::Debug for Symbol {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::Static(ns, id) => {
write!(f, "{}::{}", ns.namespace_name(), ns.symbol_name(*id))
}
Self::Dynamic(instance) => {
write!(
f,
"{}::{}",
instance.namespace_name(),
instance.symbol_name()
)
}
}
}
}
impl PartialEq for Symbol {
fn eq(&self, rhs: &Symbol) -> bool {
match (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
*this_id == *rhs_id && this_ns.type_id() == rhs_ns.type_id()
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
this.type_id() == rhs.type_id() && this.dyn_eq(rhs.as_ref())
}
_ => false,
}
}
}
impl Eq for Symbol {}
impl Ord for Symbol {
fn cmp(&self, rhs: &Symbol) -> Ordering {
match (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
let this_type_id = this_ns.type_id();
let rhs_type_id = rhs_ns.type_id();
if this_type_id == rhs_type_id {
this_id.cmp(&rhs_id)
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
let this_type_id = this.type_id();
let rhs_type_id = rhs.type_id();
if this_type_id == rhs_type_id {
this.dyn_cmp(rhs.as_ref())
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Static(_, _), Self::Dynamic(_)) => Ordering::Less,
(Self::Dynamic(_), Self::Static(_, _)) => Ordering::Greater,
}
}
}
impl PartialOrd for Symbol {
fn partial_cmp(&self, other: &Symbol) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl std::hash::Hash for Symbol {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
Self::Static(ns, id) => {
ns.type_id().hash(state);
state.write_u32(*id)
}
Self::Dynamic(dynamic_sym) => {
dynamic_sym.type_id().hash(state);
dynamic_sym.dyn_hash(state)
}
}
}
}
pub mod namespace {
//!
//! Namespace traits that must be implemented by symbol providers.
//!
use downcast_rs::*;
///
/// A static namespace. Symbols in a static namespace are identified with an `id` encoded as a `u32`.
///
pub trait Static: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// A symbol's name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self, id: u32) -> &str;
}
///
/// A dynamic namespace. A dynamic symbol instance is tied to `Self`.
///
pub trait Dynamic: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// The symbol name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self) -> &str;
///
/// Clone this dynamic symbol. Must return a new symbol instance that is `eq` to `&self`.
///
fn dyn_clone(&self) -> Box<dyn Dynamic>;
///
/// Dynamic [eq](std::cmp::PartialEq::eq). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_eq(&self, rhs: &dyn Dynamic) -> bool;
///
/// Dynamic [cmp](std::cmp::Ord::cmp). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_cmp(&self, rhs: &dyn Dynamic) -> std::cmp::Ordering;
///
/// Dynamic [hash](std::hash::Hash::hash). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher);
}
impl_downcast!(Dynamic);
}
#[cfg(test)]
mod tests {
use super::*;
use std::hash::{BuildHasher, Hash, Hasher};
mod _static {
use super::*;
pub struct ClassN<const N: u8> {
class_name: &'static str,
names: &'static [&'static str],
}
impl<const N: u8> namespace::Static for ClassN<N> {
fn namespace_name(&self) -> &str {
self.class_name
}
fn symbol_name(&self, id: u32) -> &str {
self.names[id as usize]
}
}
pub const STATIC_NS_CLASS_A: ClassN<1> = ClassN {
class_name: "A",
names: &["0", "1"],
};
pub const STATIC_NS_CLASS_B: ClassN<2> = ClassN {
class_name: "B",
names: &["0"],
};
}
mod dynamic {
use super::*;
pub struct TestDynamic<const N: u8>(pub String, &'static str);
impl<const N: u8> namespace::Dynamic for TestDynamic<N> {
fn namespace_name(&self) -> &str {
self.1
}
fn symbol_name(&self) -> &str {
&self.0
}
fn dyn_clone(&self) -> Box<dyn namespace::Dynamic> {
Box::new(TestDynamic::<N>(self.0.clone(), self.1))
}
fn dyn_eq(&self, rhs: &dyn namespace::Dynamic) -> bool {
self.0 == rhs.downcast_ref::<TestDynamic<N>>().unwrap().0
}
fn dyn_cmp(&self, rhs: &dyn namespace::Dynamic) -> std::cmp::Ordering {
self.0.cmp(&rhs.downcast_ref::<TestDynamic<N>>().unwrap().0)
}
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher) {
state.write(self.0.as_bytes());
state.write_u8(0xff)
}
}
pub fn sym0(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<0>(str.into(), "dyn0")))
}
pub fn sym1(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<1>(str.into(), "dyn1")))
}
}
const STATIC_A_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 0);
const STATIC_A_1: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 1);
const STATIC_B_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_B, 0);
struct TestState {
random_state: std::collections::hash_map::RandomState,
}
impl TestState {
pub fn new() -> Self {
Self {
random_state: std::collections::hash_map::RandomState::new(),
}
}
fn assert_hash_match(&self, a: &Symbol, b: &Symbol, should_equal: bool) {
let mut hasher_a = self.random_state.build_hasher();
let mut hasher_b = self.random_state.build_hasher();
a.hash(&mut hasher_a);
b.hash(&mut hasher_b);
if should_equal {
assert_eq!(hasher_a.finish(), hasher_b.finish())
} else {
assert_ne!(hasher_a.finish(), hasher_b.finish())
}
}
fn assert_full_eq(&self, a: &Symbol, b: &Symbol) {
assert_eq!(a, b);
assert_eq!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, true)
}
fn assert_full_ne(&self, a: &Symbol, b: &Symbol) {
assert_ne!(a, b);
assert_ne!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, false)
}
}
#[test]
fn test_symbol_size_of() {
let u_size = std::mem::size_of::<usize>();
// This size_of Symbol is computed like this:
// It's at least two words, because of `dyn`.
// it's more than two words because it needs to encode the A/B enum value.
// on 64-bit arch it should be 3 words, because it contains an `u32` too,
// and that should be encoded within the same machine word as the enum discriminant..
// I think...
let expected_word_size = match u_size {
8 => 3 * u_size,
// 4 => 4, Perhaps?
_ => panic!("untested word size"),
};
assert_eq!(std::mem::size_of::<Symbol>(), expected_word_size);
}
#[test]
fn test_debug() {
assert_eq!(format!("{:?}", STATIC_A_0), "A::0");
assert_eq!(format!("{:?}", STATIC_A_1), "A::1");
assert_eq!(format!("{:?}", STATIC_B_0), "B::0");
assert_eq!(format!("{:?}", dynamic::sym0("foo")), "dyn0::foo");
assert_eq!(format!("{:?}", dynamic::sym1("bar")), "dyn1::bar");
}
#[test]
fn | () {
let test_state = TestState::new();
test_state.assert_full_eq(&STATIC_A_0, &STATIC_A_0);
test_state.assert_full_eq(&STATIC_A_1, &STATIC_A_1);
test_state.assert_full_eq(&STATIC_B_0, &STATIC_B_0);
test_state.assert_full_ne(&STATIC_A_0, &STATIC_A_1);
test_state.assert_full_ne(&STATIC_A_1, &STATIC_B_0);
test_state.assert_full_eq(&dynamic::sym0("foo"), &dynamic::sym0("foo"));
}
#[test]
fn test_inequality() {
let test_state = TestState::new();
test_state.assert_full_ne(&STATIC_A_0, &STATIC_A_1);
test_state.assert_full_ne(&STATIC_A_0, &STATIC_B_0);
test_state.assert_full_ne(&dynamic::sym0("foo"), &dynamic::sym0("bar"));
test_state.assert_full_ne(&dynamic::sym0("foo"), &dynamic::sym1("foo"));
}
#[test]
fn test_ord() {
assert_ne!(STATIC_A_0.cmp(&STATIC_A_1), Ordering::Equal);
assert_ne!(STATIC_A_0.cmp(&STATIC_B_0), Ordering::Equal);
assert_ne!(STATIC_A_1.cmp(&STATIC_B_0), Ordering::Equal);
}
}
| test_equality | identifier_name |
mod.rs | use futures::{select, StreamExt};
use log::{debug, error, info, warn};
use std::collections::HashMap;
use std::sync::RwLock;
use tokio::signal::unix::{signal, SignalKind};
use tokio::sync::mpsc::{self, UnboundedSender};
mod trace;
use crate::protocol::{Message, MessageBody, TryIntoMessage};
use crate::{Broker, Error, ErrorKind, Task};
use trace::{build_tracer, TraceBuilder, TracerTrait};
#[derive(Copy, Clone, Default)]
struct TaskOptions {
timeout: Option<usize>,
max_retries: Option<usize>,
min_retry_delay: usize,
max_retry_delay: usize,
}
impl TaskOptions {
fn overrides<T: Task>(&self, task: &T) -> Self {
Self {
timeout: task.timeout().or(self.timeout),
max_retries: task.max_retries().or(self.max_retries),
min_retry_delay: task.min_retry_delay().unwrap_or(self.min_retry_delay),
max_retry_delay: task.max_retry_delay().unwrap_or(self.max_retry_delay),
}
}
}
#[derive(Clone, Debug)]
enum TaskStatus {
Pending,
Finished,
}
#[derive(Clone, Debug)]
struct TaskEvent {
status: TaskStatus,
}
impl TaskEvent {
fn new(status: TaskStatus) -> Self {
Self { status }
}
}
struct Config<B>
where
B: Broker +'static,
{
name: String,
broker: B,
default_queue_name: String,
task_options: TaskOptions,
}
/// Used to create a `Celery` app with a custom configuration.
pub struct CeleryBuilder<B>
where
B: Broker +'static,
{
config: Config<B>,
}
impl<B> CeleryBuilder<B>
where
B: Broker +'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
fn new(name: &str, broker: B) -> Self {
Self {
config: Config {
name: name.into(),
broker,
default_queue_name: "celery".into(),
task_options: TaskOptions {
timeout: None,
max_retries: None,
min_retry_delay: 0,
max_retry_delay: 3600,
},
},
}
}
/// Set the name of the default queue.
pub fn default_queue_name(mut self, queue_name: &str) -> Self {
self.config.default_queue_name = queue_name.into();
self
}
/// Set a default timeout for tasks.
pub fn task_timeout(mut self, task_timeout: usize) -> Self {
self.config.task_options.timeout = Some(task_timeout);
self
}
/// Set a default maximum number of retries for tasks.
pub fn task_max_retries(mut self, task_max_retries: usize) -> Self {
self.config.task_options.max_retries = Some(task_max_retries);
self
}
/// Set a default minimum retry delay for tasks.
pub fn task_min_retry_delay(mut self, task_min_retry_delay: usize) -> Self {
self.config.task_options.min_retry_delay = task_min_retry_delay;
self
}
/// Set a default maximum retry delay for tasks.
pub fn task_max_retry_delay(mut self, task_max_retry_delay: usize) -> Self {
self.config.task_options.max_retry_delay = task_max_retry_delay;
self
}
/// Construct a `Celery` app with the current configuration.
pub fn build(self) -> Celery<B> {
Celery {
name: self.config.name,
broker: self.config.broker,
default_queue_name: self.config.default_queue_name,
task_trace_builders: RwLock::new(HashMap::new()),
task_options: self.config.task_options,
}
}
}
/// A `Celery` app is used to produce or consume tasks asyncronously.
pub struct Celery<B: Broker> {
/// An arbitrary, human-readable name for the app.
pub name: String,
/// The app's broker.
pub broker: B,
/// The default queue to send and receive from.
pub default_queue_name: String,
/// Mapping of task name to task tracer factory. Used to create a task tracer
/// from an incoming message.
task_trace_builders: RwLock<HashMap<String, TraceBuilder>>,
/// Default task options.
task_options: TaskOptions,
}
impl<B> Celery<B>
where
B: Broker +'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
pub fn builder(name: &str, broker: B) -> CeleryBuilder<B> {
CeleryBuilder::new(name, broker)
}
/// Create a new `Celery` app with the given name and broker.
pub fn new(name: &str, broker: B) -> Self {
Self::builder(name, broker).build()
}
/// Send a task to a remote worker.
pub async fn send_task<T: Task>(&self, task: T, queue: &str) -> Result<(), Error> {
let body = MessageBody::new(task);
let data = serde_json::to_vec(&body)?;
let message = Message::builder(T::NAME, data).build();
debug!("Sending message {:?}", message);
self.broker.send(&message, queue).await
}
/// Register a task.
pub fn register_task<T: Task +'static>(&self) -> Result<(), Error> {
let mut task_trace_builders = self
.task_trace_builders
.write()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if task_trace_builders.contains_key(T::NAME) {
Err(ErrorKind::TaskAlreadyExists(T::NAME.into()).into())
} else {
task_trace_builders.insert(T::NAME.into(), Box::new(build_tracer::<T>));
info!("Registered task {}", T::NAME);
Ok(())
}
}
fn get_task_tracer(
&self,
message: Message,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<Box<dyn TracerTrait>, Error> {
let task_trace_builders = self
.task_trace_builders
.read()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if let Some(build_tracer) = task_trace_builders.get(&message.headers.task) {
Ok(build_tracer(message, self.task_options, event_tx)?)
} else {
Err(ErrorKind::UnregisteredTaskError(message.headers.task).into())
}
}
/// Trie converting a delivery into a `Message`, executing the corresponding task,
/// and communicating with the broker.
async fn try_handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<(), Error> {
let delivery = delivery_result.map_err(|e| e.into())?;
debug!("Received delivery: {:?}", delivery);
let message = match delivery.try_into_message() {
Ok(message) => message,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
let mut tracer = match self.get_task_tracer(message, event_tx) {
Ok(tracer) => tracer,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
if tracer.is_delayed() {
// Task has an ETA, so we need to increment the prefetch count.
if let Err(e) = self.broker.increase_prefetch_count().await {
// If for some reason this operation fails, we should stop tracing
// this task and send it back to the broker to retry.
// Otherwise we could reach the prefetch_count and end up blocking
// other deliveries if there are a high number of messages with a
// future ETA.
self.broker.retry(delivery, None).await?;
return Err(e);
};
}
match tracer.trace().await {
Ok(_) => {
self.broker.ack(delivery).await?;
}
Err(e) => match e.kind() {
ErrorKind::Retry => {
let retry_eta = tracer.retry_eta();
self.broker.retry(delivery, retry_eta).await?
}
_ => self.broker.ack(delivery).await?,
},
};
if tracer.is_delayed() {
self.broker.decrease_prefetch_count().await?;
}
Ok(())
}
/// Wraps `try_handle_delivery` to catch any and all errors that might occur.
async fn handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) {
if let Err(e) = self.try_handle_delivery(delivery_result, event_tx).await {
error!("{}", e);
}
}
/// Consume tasks from a queue.
pub async fn consume(&'static self, queue: &str) -> Result<(), Error> {
// Stream of deliveries from the queue.
let mut deliveries = Box::pin(self.broker.consume(queue).await?.fuse());
// Stream of OS signals.
let mut signals = signal(SignalKind::interrupt())?.fuse();
// A sender and receiver for task related events.
// NOTE: we can use an unbounded channel since we already have backpressure
// from the `prefetch_count` setting.
let (event_tx, event_rx) = mpsc::unbounded_channel::<TaskEvent>();
let mut event_rx = event_rx.fuse();
let mut pending_tasks = 0;
// This is the main loop where we receive deliveries and pass them off
// to be handled by spawning `self.handle_delivery`.
// At the same time we are also listening for a SIGINT (Ctrl+C) interruption.
// If that occurs we break from this loop and move to the warm shutdown loop
// if there are still any pending tasks (tasks being executed, not including
// tasks being delayed due to a future ETA).
loop {
select! {
maybe_delivery_result = deliveries.next() => {
if let Some(delivery_result) = maybe_delivery_result {
let event_tx = event_tx.clone();
tokio::spawn(self.handle_delivery(delivery_result, event_tx));
}
},
_ = signals.next() => {
warn!("Ope! Hitting Ctrl+C again will terminate all running tasks!");
info!("Warm shutdown...");
break;
},
maybe_event = event_rx.next() => {
if let Some(event) = maybe_event {
debug!("Received task event {:?}", event);
match event.status { | },
};
}
if pending_tasks > 0 {
// Warm shutdown loop. When there are still pendings tasks we wait for them
// to finish. We get updates about pending tasks through the `event_rx` channel.
// We also watch for a second SIGINT, in which case we immediately shutdown.
info!("Waiting on {} pending tasks...", pending_tasks);
loop {
select! {
_ = signals.next() => {
warn!("Okay fine, shutting down now. See ya!");
return Err(ErrorKind::ForcedShutdown.into());
},
maybe_event = event_rx.next() => {
if let Some(event) = maybe_event {
debug!("Received task event {:?}", event);
match event.status {
TaskStatus::Pending => pending_tasks += 1,
TaskStatus::Finished => pending_tasks -= 1,
};
if pending_tasks <= 0 {
break;
}
}
},
};
}
}
info!("No more pending tasks. See ya!");
Ok(())
}
} | TaskStatus::Pending => pending_tasks += 1,
TaskStatus::Finished => pending_tasks -= 1,
};
} | random_line_split |
mod.rs | use futures::{select, StreamExt};
use log::{debug, error, info, warn};
use std::collections::HashMap;
use std::sync::RwLock;
use tokio::signal::unix::{signal, SignalKind};
use tokio::sync::mpsc::{self, UnboundedSender};
mod trace;
use crate::protocol::{Message, MessageBody, TryIntoMessage};
use crate::{Broker, Error, ErrorKind, Task};
use trace::{build_tracer, TraceBuilder, TracerTrait};
#[derive(Copy, Clone, Default)]
struct TaskOptions {
timeout: Option<usize>,
max_retries: Option<usize>,
min_retry_delay: usize,
max_retry_delay: usize,
}
impl TaskOptions {
fn overrides<T: Task>(&self, task: &T) -> Self {
Self {
timeout: task.timeout().or(self.timeout),
max_retries: task.max_retries().or(self.max_retries),
min_retry_delay: task.min_retry_delay().unwrap_or(self.min_retry_delay),
max_retry_delay: task.max_retry_delay().unwrap_or(self.max_retry_delay),
}
}
}
#[derive(Clone, Debug)]
enum TaskStatus {
Pending,
Finished,
}
#[derive(Clone, Debug)]
struct TaskEvent {
status: TaskStatus,
}
impl TaskEvent {
fn new(status: TaskStatus) -> Self {
Self { status }
}
}
struct Config<B>
where
B: Broker +'static,
{
name: String,
broker: B,
default_queue_name: String,
task_options: TaskOptions,
}
/// Used to create a `Celery` app with a custom configuration.
pub struct CeleryBuilder<B>
where
B: Broker +'static,
{
config: Config<B>,
}
impl<B> CeleryBuilder<B>
where
B: Broker +'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
fn new(name: &str, broker: B) -> Self {
Self {
config: Config {
name: name.into(),
broker,
default_queue_name: "celery".into(),
task_options: TaskOptions {
timeout: None,
max_retries: None,
min_retry_delay: 0,
max_retry_delay: 3600,
},
},
}
}
/// Set the name of the default queue.
pub fn default_queue_name(mut self, queue_name: &str) -> Self {
self.config.default_queue_name = queue_name.into();
self
}
/// Set a default timeout for tasks.
pub fn task_timeout(mut self, task_timeout: usize) -> Self {
self.config.task_options.timeout = Some(task_timeout);
self
}
/// Set a default maximum number of retries for tasks.
pub fn task_max_retries(mut self, task_max_retries: usize) -> Self {
self.config.task_options.max_retries = Some(task_max_retries);
self
}
/// Set a default minimum retry delay for tasks.
pub fn task_min_retry_delay(mut self, task_min_retry_delay: usize) -> Self {
self.config.task_options.min_retry_delay = task_min_retry_delay;
self
}
/// Set a default maximum retry delay for tasks.
pub fn task_max_retry_delay(mut self, task_max_retry_delay: usize) -> Self {
self.config.task_options.max_retry_delay = task_max_retry_delay;
self
}
/// Construct a `Celery` app with the current configuration.
pub fn build(self) -> Celery<B> {
Celery {
name: self.config.name,
broker: self.config.broker,
default_queue_name: self.config.default_queue_name,
task_trace_builders: RwLock::new(HashMap::new()),
task_options: self.config.task_options,
}
}
}
/// A `Celery` app is used to produce or consume tasks asyncronously.
pub struct Celery<B: Broker> {
/// An arbitrary, human-readable name for the app.
pub name: String,
/// The app's broker.
pub broker: B,
/// The default queue to send and receive from.
pub default_queue_name: String,
/// Mapping of task name to task tracer factory. Used to create a task tracer
/// from an incoming message.
task_trace_builders: RwLock<HashMap<String, TraceBuilder>>,
/// Default task options.
task_options: TaskOptions,
}
impl<B> Celery<B>
where
B: Broker +'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
pub fn builder(name: &str, broker: B) -> CeleryBuilder<B> {
CeleryBuilder::new(name, broker)
}
/// Create a new `Celery` app with the given name and broker.
pub fn new(name: &str, broker: B) -> Self {
Self::builder(name, broker).build()
}
/// Send a task to a remote worker.
pub async fn send_task<T: Task>(&self, task: T, queue: &str) -> Result<(), Error> {
let body = MessageBody::new(task);
let data = serde_json::to_vec(&body)?;
let message = Message::builder(T::NAME, data).build();
debug!("Sending message {:?}", message);
self.broker.send(&message, queue).await
}
/// Register a task.
pub fn register_task<T: Task +'static>(&self) -> Result<(), Error> {
let mut task_trace_builders = self
.task_trace_builders
.write()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if task_trace_builders.contains_key(T::NAME) {
Err(ErrorKind::TaskAlreadyExists(T::NAME.into()).into())
} else {
task_trace_builders.insert(T::NAME.into(), Box::new(build_tracer::<T>));
info!("Registered task {}", T::NAME);
Ok(())
}
}
fn get_task_tracer(
&self,
message: Message,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<Box<dyn TracerTrait>, Error> {
let task_trace_builders = self
.task_trace_builders
.read()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if let Some(build_tracer) = task_trace_builders.get(&message.headers.task) {
Ok(build_tracer(message, self.task_options, event_tx)?)
} else {
Err(ErrorKind::UnregisteredTaskError(message.headers.task).into())
}
}
/// Trie converting a delivery into a `Message`, executing the corresponding task,
/// and communicating with the broker.
async fn try_handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<(), Error> {
let delivery = delivery_result.map_err(|e| e.into())?;
debug!("Received delivery: {:?}", delivery);
let message = match delivery.try_into_message() {
Ok(message) => message,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
let mut tracer = match self.get_task_tracer(message, event_tx) {
Ok(tracer) => tracer,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
if tracer.is_delayed() {
// Task has an ETA, so we need to increment the prefetch count.
if let Err(e) = self.broker.increase_prefetch_count().await {
// If for some reason this operation fails, we should stop tracing
// this task and send it back to the broker to retry.
// Otherwise we could reach the prefetch_count and end up blocking
// other deliveries if there are a high number of messages with a
// future ETA.
self.broker.retry(delivery, None).await?;
return Err(e);
};
}
match tracer.trace().await {
Ok(_) => {
self.broker.ack(delivery).await?;
}
Err(e) => match e.kind() {
ErrorKind::Retry => {
let retry_eta = tracer.retry_eta();
self.broker.retry(delivery, retry_eta).await?
}
_ => self.broker.ack(delivery).await?,
},
};
if tracer.is_delayed() |
Ok(())
}
/// Wraps `try_handle_delivery` to catch any and all errors that might occur.
async fn handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) {
if let Err(e) = self.try_handle_delivery(delivery_result, event_tx).await {
error!("{}", e);
}
}
/// Consume tasks from a queue.
pub async fn consume(&'static self, queue: &str) -> Result<(), Error> {
// Stream of deliveries from the queue.
let mut deliveries = Box::pin(self.broker.consume(queue).await?.fuse());
// Stream of OS signals.
let mut signals = signal(SignalKind::interrupt())?.fuse();
// A sender and receiver for task related events.
// NOTE: we can use an unbounded channel since we already have backpressure
// from the `prefetch_count` setting.
let (event_tx, event_rx) = mpsc::unbounded_channel::<TaskEvent>();
let mut event_rx = event_rx.fuse();
let mut pending_tasks = 0;
// This is the main loop where we receive deliveries and pass them off
// to be handled by spawning `self.handle_delivery`.
// At the same time we are also listening for a SIGINT (Ctrl+C) interruption.
// If that occurs we break from this loop and move to the warm shutdown loop
// if there are still any pending tasks (tasks being executed, not including
// tasks being delayed due to a future ETA).
loop {
select! {
maybe_delivery_result = deliveries.next() => {
if let Some(delivery_result) = maybe_delivery_result {
let event_tx = event_tx.clone();
tokio::spawn(self.handle_delivery(delivery_result, event_tx));
}
},
_ = signals.next() => {
warn!("Ope! Hitting Ctrl+C again will terminate all running tasks!");
info!("Warm shutdown...");
break;
},
maybe_event = event_rx.next() => {
if let Some(event) = maybe_event {
debug!("Received task event {:?}", event);
match event.status {
TaskStatus::Pending => pending_tasks += 1,
TaskStatus::Finished => pending_tasks -= 1,
};
}
},
};
}
if pending_tasks > 0 {
// Warm shutdown loop. When there are still pendings tasks we wait for them
// to finish. We get updates about pending tasks through the `event_rx` channel.
// We also watch for a second SIGINT, in which case we immediately shutdown.
info!("Waiting on {} pending tasks...", pending_tasks);
loop {
select! {
_ = signals.next() => {
warn!("Okay fine, shutting down now. See ya!");
return Err(ErrorKind::ForcedShutdown.into());
},
maybe_event = event_rx.next() => {
if let Some(event) = maybe_event {
debug!("Received task event {:?}", event);
match event.status {
TaskStatus::Pending => pending_tasks += 1,
TaskStatus::Finished => pending_tasks -= 1,
};
if pending_tasks <= 0 {
break;
}
}
},
};
}
}
info!("No more pending tasks. See ya!");
Ok(())
}
}
| {
self.broker.decrease_prefetch_count().await?;
} | conditional_block |
mod.rs | use futures::{select, StreamExt};
use log::{debug, error, info, warn};
use std::collections::HashMap;
use std::sync::RwLock;
use tokio::signal::unix::{signal, SignalKind};
use tokio::sync::mpsc::{self, UnboundedSender};
mod trace;
use crate::protocol::{Message, MessageBody, TryIntoMessage};
use crate::{Broker, Error, ErrorKind, Task};
use trace::{build_tracer, TraceBuilder, TracerTrait};
#[derive(Copy, Clone, Default)]
struct TaskOptions {
timeout: Option<usize>,
max_retries: Option<usize>,
min_retry_delay: usize,
max_retry_delay: usize,
}
impl TaskOptions {
fn | <T: Task>(&self, task: &T) -> Self {
Self {
timeout: task.timeout().or(self.timeout),
max_retries: task.max_retries().or(self.max_retries),
min_retry_delay: task.min_retry_delay().unwrap_or(self.min_retry_delay),
max_retry_delay: task.max_retry_delay().unwrap_or(self.max_retry_delay),
}
}
}
#[derive(Clone, Debug)]
enum TaskStatus {
Pending,
Finished,
}
#[derive(Clone, Debug)]
struct TaskEvent {
status: TaskStatus,
}
impl TaskEvent {
fn new(status: TaskStatus) -> Self {
Self { status }
}
}
struct Config<B>
where
B: Broker +'static,
{
name: String,
broker: B,
default_queue_name: String,
task_options: TaskOptions,
}
/// Used to create a `Celery` app with a custom configuration.
pub struct CeleryBuilder<B>
where
B: Broker +'static,
{
config: Config<B>,
}
impl<B> CeleryBuilder<B>
where
B: Broker +'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
fn new(name: &str, broker: B) -> Self {
Self {
config: Config {
name: name.into(),
broker,
default_queue_name: "celery".into(),
task_options: TaskOptions {
timeout: None,
max_retries: None,
min_retry_delay: 0,
max_retry_delay: 3600,
},
},
}
}
/// Set the name of the default queue.
pub fn default_queue_name(mut self, queue_name: &str) -> Self {
self.config.default_queue_name = queue_name.into();
self
}
/// Set a default timeout for tasks.
pub fn task_timeout(mut self, task_timeout: usize) -> Self {
self.config.task_options.timeout = Some(task_timeout);
self
}
/// Set a default maximum number of retries for tasks.
pub fn task_max_retries(mut self, task_max_retries: usize) -> Self {
self.config.task_options.max_retries = Some(task_max_retries);
self
}
/// Set a default minimum retry delay for tasks.
pub fn task_min_retry_delay(mut self, task_min_retry_delay: usize) -> Self {
self.config.task_options.min_retry_delay = task_min_retry_delay;
self
}
/// Set a default maximum retry delay for tasks.
pub fn task_max_retry_delay(mut self, task_max_retry_delay: usize) -> Self {
self.config.task_options.max_retry_delay = task_max_retry_delay;
self
}
/// Construct a `Celery` app with the current configuration.
pub fn build(self) -> Celery<B> {
Celery {
name: self.config.name,
broker: self.config.broker,
default_queue_name: self.config.default_queue_name,
task_trace_builders: RwLock::new(HashMap::new()),
task_options: self.config.task_options,
}
}
}
/// A `Celery` app is used to produce or consume tasks asyncronously.
pub struct Celery<B: Broker> {
/// An arbitrary, human-readable name for the app.
pub name: String,
/// The app's broker.
pub broker: B,
/// The default queue to send and receive from.
pub default_queue_name: String,
/// Mapping of task name to task tracer factory. Used to create a task tracer
/// from an incoming message.
task_trace_builders: RwLock<HashMap<String, TraceBuilder>>,
/// Default task options.
task_options: TaskOptions,
}
impl<B> Celery<B>
where
B: Broker +'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
pub fn builder(name: &str, broker: B) -> CeleryBuilder<B> {
CeleryBuilder::new(name, broker)
}
/// Create a new `Celery` app with the given name and broker.
pub fn new(name: &str, broker: B) -> Self {
Self::builder(name, broker).build()
}
/// Send a task to a remote worker.
pub async fn send_task<T: Task>(&self, task: T, queue: &str) -> Result<(), Error> {
let body = MessageBody::new(task);
let data = serde_json::to_vec(&body)?;
let message = Message::builder(T::NAME, data).build();
debug!("Sending message {:?}", message);
self.broker.send(&message, queue).await
}
/// Register a task.
pub fn register_task<T: Task +'static>(&self) -> Result<(), Error> {
let mut task_trace_builders = self
.task_trace_builders
.write()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if task_trace_builders.contains_key(T::NAME) {
Err(ErrorKind::TaskAlreadyExists(T::NAME.into()).into())
} else {
task_trace_builders.insert(T::NAME.into(), Box::new(build_tracer::<T>));
info!("Registered task {}", T::NAME);
Ok(())
}
}
fn get_task_tracer(
&self,
message: Message,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<Box<dyn TracerTrait>, Error> {
let task_trace_builders = self
.task_trace_builders
.read()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if let Some(build_tracer) = task_trace_builders.get(&message.headers.task) {
Ok(build_tracer(message, self.task_options, event_tx)?)
} else {
Err(ErrorKind::UnregisteredTaskError(message.headers.task).into())
}
}
/// Trie converting a delivery into a `Message`, executing the corresponding task,
/// and communicating with the broker.
async fn try_handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<(), Error> {
let delivery = delivery_result.map_err(|e| e.into())?;
debug!("Received delivery: {:?}", delivery);
let message = match delivery.try_into_message() {
Ok(message) => message,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
let mut tracer = match self.get_task_tracer(message, event_tx) {
Ok(tracer) => tracer,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
if tracer.is_delayed() {
// Task has an ETA, so we need to increment the prefetch count.
if let Err(e) = self.broker.increase_prefetch_count().await {
// If for some reason this operation fails, we should stop tracing
// this task and send it back to the broker to retry.
// Otherwise we could reach the prefetch_count and end up blocking
// other deliveries if there are a high number of messages with a
// future ETA.
self.broker.retry(delivery, None).await?;
return Err(e);
};
}
match tracer.trace().await {
Ok(_) => {
self.broker.ack(delivery).await?;
}
Err(e) => match e.kind() {
ErrorKind::Retry => {
let retry_eta = tracer.retry_eta();
self.broker.retry(delivery, retry_eta).await?
}
_ => self.broker.ack(delivery).await?,
},
};
if tracer.is_delayed() {
self.broker.decrease_prefetch_count().await?;
}
Ok(())
}
/// Wraps `try_handle_delivery` to catch any and all errors that might occur.
async fn handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) {
if let Err(e) = self.try_handle_delivery(delivery_result, event_tx).await {
error!("{}", e);
}
}
/// Consume tasks from a queue.
pub async fn consume(&'static self, queue: &str) -> Result<(), Error> {
// Stream of deliveries from the queue.
let mut deliveries = Box::pin(self.broker.consume(queue).await?.fuse());
// Stream of OS signals.
let mut signals = signal(SignalKind::interrupt())?.fuse();
// A sender and receiver for task related events.
// NOTE: we can use an unbounded channel since we already have backpressure
// from the `prefetch_count` setting.
let (event_tx, event_rx) = mpsc::unbounded_channel::<TaskEvent>();
let mut event_rx = event_rx.fuse();
let mut pending_tasks = 0;
// This is the main loop where we receive deliveries and pass them off
// to be handled by spawning `self.handle_delivery`.
// At the same time we are also listening for a SIGINT (Ctrl+C) interruption.
// If that occurs we break from this loop and move to the warm shutdown loop
// if there are still any pending tasks (tasks being executed, not including
// tasks being delayed due to a future ETA).
loop {
select! {
maybe_delivery_result = deliveries.next() => {
if let Some(delivery_result) = maybe_delivery_result {
let event_tx = event_tx.clone();
tokio::spawn(self.handle_delivery(delivery_result, event_tx));
}
},
_ = signals.next() => {
warn!("Ope! Hitting Ctrl+C again will terminate all running tasks!");
info!("Warm shutdown...");
break;
},
maybe_event = event_rx.next() => {
if let Some(event) = maybe_event {
debug!("Received task event {:?}", event);
match event.status {
TaskStatus::Pending => pending_tasks += 1,
TaskStatus::Finished => pending_tasks -= 1,
};
}
},
};
}
if pending_tasks > 0 {
// Warm shutdown loop. When there are still pendings tasks we wait for them
// to finish. We get updates about pending tasks through the `event_rx` channel.
// We also watch for a second SIGINT, in which case we immediately shutdown.
info!("Waiting on {} pending tasks...", pending_tasks);
loop {
select! {
_ = signals.next() => {
warn!("Okay fine, shutting down now. See ya!");
return Err(ErrorKind::ForcedShutdown.into());
},
maybe_event = event_rx.next() => {
if let Some(event) = maybe_event {
debug!("Received task event {:?}", event);
match event.status {
TaskStatus::Pending => pending_tasks += 1,
TaskStatus::Finished => pending_tasks -= 1,
};
if pending_tasks <= 0 {
break;
}
}
},
};
}
}
info!("No more pending tasks. See ya!");
Ok(())
}
}
| overrides | identifier_name |
test.rs | //! Contains helpers for Gotham applications to use during testing.
//!
//! See the [`TestServer`] and [`AsyncTestServer`] types for example usage.
use std::convert::TryFrom;
use std::future::Future;
use std::io;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use futures_util::future::{BoxFuture, FutureExt};
use hyper::client::connect::{Connected, Connection};
use hyper::service::Service;
use hyper::Uri;
use log::info;
use pin_project::pin_project;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use tokio::time::Sleep;
use tokio_rustls::client::TlsStream;
use tokio_rustls::rustls::{
self, Certificate, ClientConfig, PrivateKey, RootCertStore, ServerConfig, ServerName,
};
use tokio_rustls::TlsConnector;
use crate::handler::NewHandler;
use crate::test::async_test::{AsyncTestClient, AsyncTestServerInner};
use crate::test::{self, TestClient, TestServerData};
use crate::tls::rustls_wrap;
fn server_config() -> ServerConfig {
let cert = Certificate(include_bytes!("tls_cert.der").to_vec());
let key = PrivateKey(include_bytes!("tls_key.der").to_vec());
ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(vec![cert], key)
.expect("Unable to create TLS server config")
}
/// The `TestServer` type, which is used as a harness when writing test cases for Hyper services
/// (which Gotham's `Router` is). An instance of `TestServer` is run asynchronously within the
/// current thread, and is only accessible by a client returned from the `TestServer`.
///
/// # Examples
///
/// ```rust
/// # extern crate hyper;
/// # extern crate gotham;
/// #
/// # use gotham::state::State;
/// # use hyper::{Body, Response, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # fn main() {
/// use gotham::tls::test::TestServer;
///
/// let test_server = TestServer::new(|| Ok(my_handler)).unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct TestServer {
data: Arc<TestServerData>,
}
impl test::Server for TestServer {
fn run_future<F, O>(&self, future: F) -> O
where
F: Future<Output = O>,
{
self.data.run_future(future)
}
fn request_expiry(&self) -> Sleep {
self.data.request_expiry()
}
}
impl TestServer {
/// Creates a `TestServer` instance for the `Handler` spawned by `new_handler`. This server has
/// the same guarantee given by `hyper::server::Http::bind`, that a new service will be spawned
/// for each connection.
///
/// Timeout will be set to 10 seconds.
pub fn new<NH: NewHandler +'static>(new_handler: NH) -> anyhow::Result<TestServer> {
TestServer::with_timeout(new_handler, 10)
}
/// Sets the request timeout to `timeout` seconds and returns a new `TestServer`.
pub fn with_timeout<NH: NewHandler +'static>(
new_handler: NH,
timeout: u64,
) -> anyhow::Result<TestServer> {
let cfg = server_config();
let data = TestServerData::new(new_handler, timeout, rustls_wrap(cfg))?;
Ok(TestServer {
data: Arc::new(data),
})
}
/// Returns a client connected to the `TestServer`. The transport is handled internally.
pub fn client(&self) -> TestClient<Self, TestConnect> {
self.data.client(self)
}
/// Spawns the given future on the `TestServer`'s internal runtime.
/// This allows you to spawn more futures ontop of the `TestServer` in your
/// tests.
pub fn spawn<F>(&self, future: F)
where
F: Future<Output = ()> + Send +'static,
{
self.data.spawn(future)
}
}
/// An [`AsyncTestServer`], that can be used for testing requests against a server in asynchronous contexts.
/// The [`AsyncTestServer`] runs in the runtime where it is created and an [`AsyncTestClient`] can be
/// created to make asynchronous requests to it.
///
/// This differs from [`crate::plain::test::TestServer`] in that it doesn't come with it's own runtime and therefore
/// doesn't crash when used inside of another runtime.
///
/// # Example
///
/// ```rust
/// # use gotham::state::State;
/// # use hyper::{Response, Body, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # #[tokio::main]
/// # async fn main() {
/// use gotham::tls::test::AsyncTestServer;
///
/// let test_server = AsyncTestServer::new(|| Ok(my_handler)).await.unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().await.unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct AsyncTestServer {
inner: Arc<AsyncTestServerInner>,
}
impl AsyncTestServer {
/// Creates an [`AsyncTestServer`] instance for the [`crate::handler::Handler`](`Handler`) spawned by `new_handler`. This server has
/// the same guarantee given by [`hyper::server::Server::bind`], that a new service will be spawned
/// for each connection.
///
/// Requests will time out after 10 seconds by default. Use [`AsyncTestServer::with_timeout`] for a different timeout.
pub async fn new<NH: NewHandler +'static>(new_handler: NH) -> anyhow::Result<AsyncTestServer> {
AsyncTestServer::new_with_timeout(new_handler, Duration::from_secs(10)).await
}
/// Sets the request timeout to `timeout` seconds and returns a new [`AsyncTestServer`].
pub async fn new_with_timeout<NH: NewHandler +'static>(
new_handler: NH,
timeout: Duration,
) -> anyhow::Result<AsyncTestServer> {
let cfg = server_config();
let inner = AsyncTestServerInner::new(new_handler, timeout, rustls_wrap(cfg)).await?;
Ok(AsyncTestServer {
inner: Arc::new(inner),
})
}
/// Returns a client connected to the [`AsyncTestServer`]. It can be used to make requests against the test server.
/// The transport is handled internally.
pub fn client(&self) -> AsyncTestClient<crate::tls::test::TestConnect> {
self.inner.client()
}
}
#[allow(missing_docs)]
#[pin_project]
pub struct TlsConnectionStream<IO>(#[pin] TlsStream<IO>);
impl<IO: AsyncRead + AsyncWrite + Connection + Unpin> Connection for TlsConnectionStream<IO> {
fn connected(&self) -> Connected {
let (tcp, tls) = self.0.get_ref();
if tls.alpn_protocol() == Some(b"h2") {
tcp.connected().negotiated_h2()
} else {
tcp.connected()
}
}
}
impl<IO> AsyncRead for TlsConnectionStream<IO>
where
IO: AsyncRead + AsyncWrite + Unpin,
{
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut ReadBuf,
) -> Poll<Result<(), io::Error>> {
self.project().0.poll_read(cx, buf)
}
}
impl<IO: AsyncRead + AsyncWrite + Unpin> AsyncWrite for TlsConnectionStream<IO> {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
self.project().0.poll_write(cx, buf)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_flush(cx)
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TestConnect` represents the connection between a test client and the `TestServer` instance
/// that created it. This type should never be used directly.
#[derive(Clone)]
pub struct TestConnect {
pub(crate) addr: SocketAddr,
pub(crate) config: Arc<rustls::ClientConfig>,
}
impl Service<Uri> for TestConnect {
type Response = TlsConnectionStream<TcpStream>;
type Error = tokio::io::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, req: Uri) -> Self::Future {
let tls = TlsConnector::from(self.config.clone());
let address = self.addr;
async move {
match TcpStream::connect(address).await {
Ok(stream) => {
let domain = ServerName::try_from(req.host().unwrap()).unwrap();
match tls.connect(domain, stream).await {
Ok(tls_stream) => {
info!("Client TcpStream connected: {:?}", tls_stream);
Ok(TlsConnectionStream(tls_stream))
}
Err(error) => {
info!("TLS TestClient error: {:?}", error);
Err(error)
}
}
}
Err(error) => Err(error),
}
}
.boxed()
}
}
impl From<SocketAddr> for TestConnect {
fn from(addr: SocketAddr) -> Self {
let mut root_store = RootCertStore::empty();
let ca_cert = include_bytes!("tls_ca_cert.der").to_vec();
root_store.add(&Certificate(ca_cert)).unwrap();
let cfg = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store)
.with_no_client_auth();
Self {
addr,
config: Arc::new(cfg),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::helper::TestHandler;
use crate::test::{self, async_test, Server};
use tokio::sync::oneshot;
#[test]
fn test_server_serves_requests() {
test::common_tests::serves_requests(TestServer::new, TestServer::client)
}
#[test]
fn test_server_times_out() {
test::common_tests::times_out(TestServer::with_timeout, TestServer::client)
}
#[test]
fn test_server_async_echo() |
#[test]
fn test_server_supports_multiple_servers() {
test::common_tests::supports_multiple_servers(TestServer::new, TestServer::client)
}
#[test]
fn test_server_spawns_and_runs_futures() {
let server = TestServer::new(TestHandler::default()).unwrap();
let (sender, spawn_receiver) = oneshot::channel();
let (spawn_sender, run_receiver) = oneshot::channel();
sender.send(1).unwrap();
server.spawn(async move {
assert_eq!(1, spawn_receiver.await.unwrap());
spawn_sender.send(42).unwrap();
});
assert_eq!(42, server.run_future(run_receiver).unwrap());
}
#[test]
fn test_server_adds_client_address_to_state() {
test::common_tests::adds_client_address_to_state(TestServer::new, TestServer::client);
}
#[tokio::test]
async fn async_test_server_serves_requests() {
async_test::common_tests::serves_requests(AsyncTestServer::new, AsyncTestServer::client)
.await;
}
#[tokio::test]
async fn async_test_server_times_out() {
async_test::common_tests::times_out(
AsyncTestServer::new_with_timeout,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn async_test_server_echo() {
async_test::common_tests::echo(AsyncTestServer::new, AsyncTestServer::client).await;
}
#[tokio::test]
async fn async_test_server_supports_multiple_servers() {
async_test::common_tests::supports_multiple_servers(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn async_test_server_adds_client_address_to_state() {
async_test::common_tests::adds_client_address_to_state(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
}
| {
test::common_tests::async_echo(TestServer::new, TestServer::client)
} | identifier_body |
test.rs | //! Contains helpers for Gotham applications to use during testing.
//!
//! See the [`TestServer`] and [`AsyncTestServer`] types for example usage.
use std::convert::TryFrom;
use std::future::Future;
use std::io;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use futures_util::future::{BoxFuture, FutureExt};
use hyper::client::connect::{Connected, Connection};
use hyper::service::Service;
use hyper::Uri;
use log::info;
use pin_project::pin_project;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use tokio::time::Sleep;
use tokio_rustls::client::TlsStream;
use tokio_rustls::rustls::{
self, Certificate, ClientConfig, PrivateKey, RootCertStore, ServerConfig, ServerName,
};
use tokio_rustls::TlsConnector;
use crate::handler::NewHandler;
use crate::test::async_test::{AsyncTestClient, AsyncTestServerInner};
use crate::test::{self, TestClient, TestServerData};
use crate::tls::rustls_wrap;
fn server_config() -> ServerConfig {
let cert = Certificate(include_bytes!("tls_cert.der").to_vec());
let key = PrivateKey(include_bytes!("tls_key.der").to_vec());
ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(vec![cert], key)
.expect("Unable to create TLS server config")
}
/// The `TestServer` type, which is used as a harness when writing test cases for Hyper services
/// (which Gotham's `Router` is). An instance of `TestServer` is run asynchronously within the
/// current thread, and is only accessible by a client returned from the `TestServer`.
///
/// # Examples
///
/// ```rust
/// # extern crate hyper;
/// # extern crate gotham;
/// #
/// # use gotham::state::State;
/// # use hyper::{Body, Response, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # fn main() {
/// use gotham::tls::test::TestServer;
///
/// let test_server = TestServer::new(|| Ok(my_handler)).unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct TestServer {
data: Arc<TestServerData>,
}
impl test::Server for TestServer {
fn run_future<F, O>(&self, future: F) -> O
where
F: Future<Output = O>,
{
self.data.run_future(future)
}
fn request_expiry(&self) -> Sleep {
self.data.request_expiry()
}
}
impl TestServer {
/// Creates a `TestServer` instance for the `Handler` spawned by `new_handler`. This server has
/// the same guarantee given by `hyper::server::Http::bind`, that a new service will be spawned
/// for each connection.
///
/// Timeout will be set to 10 seconds.
pub fn new<NH: NewHandler +'static>(new_handler: NH) -> anyhow::Result<TestServer> {
TestServer::with_timeout(new_handler, 10)
}
/// Sets the request timeout to `timeout` seconds and returns a new `TestServer`.
pub fn with_timeout<NH: NewHandler +'static>(
new_handler: NH,
timeout: u64,
) -> anyhow::Result<TestServer> {
let cfg = server_config();
let data = TestServerData::new(new_handler, timeout, rustls_wrap(cfg))?;
Ok(TestServer {
data: Arc::new(data),
})
}
/// Returns a client connected to the `TestServer`. The transport is handled internally.
pub fn client(&self) -> TestClient<Self, TestConnect> {
self.data.client(self)
}
/// Spawns the given future on the `TestServer`'s internal runtime.
/// This allows you to spawn more futures ontop of the `TestServer` in your
/// tests.
pub fn spawn<F>(&self, future: F)
where
F: Future<Output = ()> + Send +'static,
{
self.data.spawn(future)
}
}
/// An [`AsyncTestServer`], that can be used for testing requests against a server in asynchronous contexts.
/// The [`AsyncTestServer`] runs in the runtime where it is created and an [`AsyncTestClient`] can be
/// created to make asynchronous requests to it.
///
/// This differs from [`crate::plain::test::TestServer`] in that it doesn't come with it's own runtime and therefore
/// doesn't crash when used inside of another runtime.
///
/// # Example
///
/// ```rust
/// # use gotham::state::State;
/// # use hyper::{Response, Body, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # #[tokio::main]
/// # async fn main() {
/// use gotham::tls::test::AsyncTestServer;
///
/// let test_server = AsyncTestServer::new(|| Ok(my_handler)).await.unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().await.unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct AsyncTestServer {
inner: Arc<AsyncTestServerInner>,
}
impl AsyncTestServer {
/// Creates an [`AsyncTestServer`] instance for the [`crate::handler::Handler`](`Handler`) spawned by `new_handler`. This server has
/// the same guarantee given by [`hyper::server::Server::bind`], that a new service will be spawned
/// for each connection.
///
/// Requests will time out after 10 seconds by default. Use [`AsyncTestServer::with_timeout`] for a different timeout.
pub async fn new<NH: NewHandler +'static>(new_handler: NH) -> anyhow::Result<AsyncTestServer> {
AsyncTestServer::new_with_timeout(new_handler, Duration::from_secs(10)).await
}
/// Sets the request timeout to `timeout` seconds and returns a new [`AsyncTestServer`].
pub async fn new_with_timeout<NH: NewHandler +'static>(
new_handler: NH,
timeout: Duration,
) -> anyhow::Result<AsyncTestServer> {
let cfg = server_config();
let inner = AsyncTestServerInner::new(new_handler, timeout, rustls_wrap(cfg)).await?;
Ok(AsyncTestServer {
inner: Arc::new(inner),
})
}
/// Returns a client connected to the [`AsyncTestServer`]. It can be used to make requests against the test server.
/// The transport is handled internally.
pub fn client(&self) -> AsyncTestClient<crate::tls::test::TestConnect> {
self.inner.client()
}
}
#[allow(missing_docs)]
#[pin_project]
pub struct TlsConnectionStream<IO>(#[pin] TlsStream<IO>);
impl<IO: AsyncRead + AsyncWrite + Connection + Unpin> Connection for TlsConnectionStream<IO> {
fn connected(&self) -> Connected {
let (tcp, tls) = self.0.get_ref();
if tls.alpn_protocol() == Some(b"h2") {
tcp.connected().negotiated_h2()
} else {
tcp.connected()
}
}
}
impl<IO> AsyncRead for TlsConnectionStream<IO>
where
IO: AsyncRead + AsyncWrite + Unpin,
{
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut ReadBuf,
) -> Poll<Result<(), io::Error>> {
self.project().0.poll_read(cx, buf)
}
}
impl<IO: AsyncRead + AsyncWrite + Unpin> AsyncWrite for TlsConnectionStream<IO> {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
self.project().0.poll_write(cx, buf)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_flush(cx)
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TestConnect` represents the connection between a test client and the `TestServer` instance
/// that created it. This type should never be used directly.
#[derive(Clone)]
pub struct TestConnect {
pub(crate) addr: SocketAddr,
pub(crate) config: Arc<rustls::ClientConfig>,
}
impl Service<Uri> for TestConnect {
type Response = TlsConnectionStream<TcpStream>;
type Error = tokio::io::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, req: Uri) -> Self::Future {
let tls = TlsConnector::from(self.config.clone());
let address = self.addr;
async move {
match TcpStream::connect(address).await {
Ok(stream) => {
let domain = ServerName::try_from(req.host().unwrap()).unwrap();
match tls.connect(domain, stream).await {
Ok(tls_stream) => {
info!("Client TcpStream connected: {:?}", tls_stream);
Ok(TlsConnectionStream(tls_stream))
}
Err(error) => {
info!("TLS TestClient error: {:?}", error);
Err(error)
}
}
}
Err(error) => Err(error),
}
}
.boxed()
}
}
impl From<SocketAddr> for TestConnect {
fn from(addr: SocketAddr) -> Self {
let mut root_store = RootCertStore::empty();
let ca_cert = include_bytes!("tls_ca_cert.der").to_vec();
root_store.add(&Certificate(ca_cert)).unwrap();
let cfg = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store)
.with_no_client_auth();
Self {
addr,
config: Arc::new(cfg),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::helper::TestHandler;
use crate::test::{self, async_test, Server};
use tokio::sync::oneshot;
#[test]
fn test_server_serves_requests() {
test::common_tests::serves_requests(TestServer::new, TestServer::client)
}
#[test]
fn test_server_times_out() {
test::common_tests::times_out(TestServer::with_timeout, TestServer::client)
}
#[test]
fn test_server_async_echo() {
test::common_tests::async_echo(TestServer::new, TestServer::client)
}
#[test]
fn test_server_supports_multiple_servers() {
test::common_tests::supports_multiple_servers(TestServer::new, TestServer::client)
}
#[test]
fn test_server_spawns_and_runs_futures() {
let server = TestServer::new(TestHandler::default()).unwrap();
let (sender, spawn_receiver) = oneshot::channel();
let (spawn_sender, run_receiver) = oneshot::channel();
sender.send(1).unwrap();
server.spawn(async move {
assert_eq!(1, spawn_receiver.await.unwrap());
spawn_sender.send(42).unwrap();
});
assert_eq!(42, server.run_future(run_receiver).unwrap());
}
#[test]
fn test_server_adds_client_address_to_state() {
test::common_tests::adds_client_address_to_state(TestServer::new, TestServer::client);
}
#[tokio::test]
async fn async_test_server_serves_requests() {
async_test::common_tests::serves_requests(AsyncTestServer::new, AsyncTestServer::client)
.await;
}
#[tokio::test]
async fn async_test_server_times_out() {
async_test::common_tests::times_out(
AsyncTestServer::new_with_timeout,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn async_test_server_echo() {
async_test::common_tests::echo(AsyncTestServer::new, AsyncTestServer::client).await;
}
#[tokio::test]
async fn async_test_server_supports_multiple_servers() {
async_test::common_tests::supports_multiple_servers(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn | () {
async_test::common_tests::adds_client_address_to_state(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
}
| async_test_server_adds_client_address_to_state | identifier_name |
test.rs | //! Contains helpers for Gotham applications to use during testing.
//!
//! See the [`TestServer`] and [`AsyncTestServer`] types for example usage.
use std::convert::TryFrom;
use std::future::Future;
use std::io;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use futures_util::future::{BoxFuture, FutureExt};
use hyper::client::connect::{Connected, Connection};
use hyper::service::Service;
use hyper::Uri;
use log::info;
use pin_project::pin_project;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use tokio::time::Sleep;
use tokio_rustls::client::TlsStream;
use tokio_rustls::rustls::{
self, Certificate, ClientConfig, PrivateKey, RootCertStore, ServerConfig, ServerName,
};
use tokio_rustls::TlsConnector;
use crate::handler::NewHandler;
use crate::test::async_test::{AsyncTestClient, AsyncTestServerInner};
use crate::test::{self, TestClient, TestServerData};
use crate::tls::rustls_wrap;
fn server_config() -> ServerConfig {
let cert = Certificate(include_bytes!("tls_cert.der").to_vec());
let key = PrivateKey(include_bytes!("tls_key.der").to_vec());
ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(vec![cert], key)
.expect("Unable to create TLS server config")
}
/// The `TestServer` type, which is used as a harness when writing test cases for Hyper services
/// (which Gotham's `Router` is). An instance of `TestServer` is run asynchronously within the
/// current thread, and is only accessible by a client returned from the `TestServer`.
///
/// # Examples
///
/// ```rust
/// # extern crate hyper;
/// # extern crate gotham;
/// #
/// # use gotham::state::State;
/// # use hyper::{Body, Response, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # fn main() {
/// use gotham::tls::test::TestServer;
///
/// let test_server = TestServer::new(|| Ok(my_handler)).unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct TestServer {
data: Arc<TestServerData>,
}
impl test::Server for TestServer {
fn run_future<F, O>(&self, future: F) -> O
where
F: Future<Output = O>,
{
self.data.run_future(future)
}
fn request_expiry(&self) -> Sleep {
self.data.request_expiry()
}
}
impl TestServer {
/// Creates a `TestServer` instance for the `Handler` spawned by `new_handler`. This server has
/// the same guarantee given by `hyper::server::Http::bind`, that a new service will be spawned
/// for each connection.
///
/// Timeout will be set to 10 seconds.
pub fn new<NH: NewHandler +'static>(new_handler: NH) -> anyhow::Result<TestServer> {
TestServer::with_timeout(new_handler, 10)
}
/// Sets the request timeout to `timeout` seconds and returns a new `TestServer`.
pub fn with_timeout<NH: NewHandler +'static>(
new_handler: NH,
timeout: u64,
) -> anyhow::Result<TestServer> {
let cfg = server_config();
let data = TestServerData::new(new_handler, timeout, rustls_wrap(cfg))?;
Ok(TestServer {
data: Arc::new(data),
})
}
/// Returns a client connected to the `TestServer`. The transport is handled internally.
pub fn client(&self) -> TestClient<Self, TestConnect> {
self.data.client(self)
}
/// Spawns the given future on the `TestServer`'s internal runtime.
/// This allows you to spawn more futures ontop of the `TestServer` in your
/// tests.
pub fn spawn<F>(&self, future: F)
where
F: Future<Output = ()> + Send +'static,
{
self.data.spawn(future)
}
}
/// An [`AsyncTestServer`], that can be used for testing requests against a server in asynchronous contexts.
/// The [`AsyncTestServer`] runs in the runtime where it is created and an [`AsyncTestClient`] can be
/// created to make asynchronous requests to it.
///
/// This differs from [`crate::plain::test::TestServer`] in that it doesn't come with it's own runtime and therefore
/// doesn't crash when used inside of another runtime.
///
/// # Example
///
/// ```rust
/// # use gotham::state::State;
/// # use hyper::{Response, Body, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # #[tokio::main]
/// # async fn main() {
/// use gotham::tls::test::AsyncTestServer;
///
/// let test_server = AsyncTestServer::new(|| Ok(my_handler)).await.unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().await.unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct AsyncTestServer {
inner: Arc<AsyncTestServerInner>,
}
impl AsyncTestServer {
/// Creates an [`AsyncTestServer`] instance for the [`crate::handler::Handler`](`Handler`) spawned by `new_handler`. This server has
/// the same guarantee given by [`hyper::server::Server::bind`], that a new service will be spawned
/// for each connection.
///
/// Requests will time out after 10 seconds by default. Use [`AsyncTestServer::with_timeout`] for a different timeout.
pub async fn new<NH: NewHandler +'static>(new_handler: NH) -> anyhow::Result<AsyncTestServer> {
AsyncTestServer::new_with_timeout(new_handler, Duration::from_secs(10)).await
}
/// Sets the request timeout to `timeout` seconds and returns a new [`AsyncTestServer`].
pub async fn new_with_timeout<NH: NewHandler +'static>(
new_handler: NH,
timeout: Duration,
) -> anyhow::Result<AsyncTestServer> {
let cfg = server_config();
let inner = AsyncTestServerInner::new(new_handler, timeout, rustls_wrap(cfg)).await?;
Ok(AsyncTestServer {
inner: Arc::new(inner),
})
}
/// Returns a client connected to the [`AsyncTestServer`]. It can be used to make requests against the test server.
/// The transport is handled internally.
pub fn client(&self) -> AsyncTestClient<crate::tls::test::TestConnect> {
self.inner.client()
}
}
#[allow(missing_docs)]
#[pin_project]
pub struct TlsConnectionStream<IO>(#[pin] TlsStream<IO>);
impl<IO: AsyncRead + AsyncWrite + Connection + Unpin> Connection for TlsConnectionStream<IO> {
fn connected(&self) -> Connected {
let (tcp, tls) = self.0.get_ref();
if tls.alpn_protocol() == Some(b"h2") | else {
tcp.connected()
}
}
}
impl<IO> AsyncRead for TlsConnectionStream<IO>
where
IO: AsyncRead + AsyncWrite + Unpin,
{
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut ReadBuf,
) -> Poll<Result<(), io::Error>> {
self.project().0.poll_read(cx, buf)
}
}
impl<IO: AsyncRead + AsyncWrite + Unpin> AsyncWrite for TlsConnectionStream<IO> {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
self.project().0.poll_write(cx, buf)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_flush(cx)
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TestConnect` represents the connection between a test client and the `TestServer` instance
/// that created it. This type should never be used directly.
#[derive(Clone)]
pub struct TestConnect {
pub(crate) addr: SocketAddr,
pub(crate) config: Arc<rustls::ClientConfig>,
}
impl Service<Uri> for TestConnect {
type Response = TlsConnectionStream<TcpStream>;
type Error = tokio::io::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, req: Uri) -> Self::Future {
let tls = TlsConnector::from(self.config.clone());
let address = self.addr;
async move {
match TcpStream::connect(address).await {
Ok(stream) => {
let domain = ServerName::try_from(req.host().unwrap()).unwrap();
match tls.connect(domain, stream).await {
Ok(tls_stream) => {
info!("Client TcpStream connected: {:?}", tls_stream);
Ok(TlsConnectionStream(tls_stream))
}
Err(error) => {
info!("TLS TestClient error: {:?}", error);
Err(error)
}
}
}
Err(error) => Err(error),
}
}
.boxed()
}
}
impl From<SocketAddr> for TestConnect {
fn from(addr: SocketAddr) -> Self {
let mut root_store = RootCertStore::empty();
let ca_cert = include_bytes!("tls_ca_cert.der").to_vec();
root_store.add(&Certificate(ca_cert)).unwrap();
let cfg = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store)
.with_no_client_auth();
Self {
addr,
config: Arc::new(cfg),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::helper::TestHandler;
use crate::test::{self, async_test, Server};
use tokio::sync::oneshot;
#[test]
fn test_server_serves_requests() {
test::common_tests::serves_requests(TestServer::new, TestServer::client)
}
#[test]
fn test_server_times_out() {
test::common_tests::times_out(TestServer::with_timeout, TestServer::client)
}
#[test]
fn test_server_async_echo() {
test::common_tests::async_echo(TestServer::new, TestServer::client)
}
#[test]
fn test_server_supports_multiple_servers() {
test::common_tests::supports_multiple_servers(TestServer::new, TestServer::client)
}
#[test]
fn test_server_spawns_and_runs_futures() {
let server = TestServer::new(TestHandler::default()).unwrap();
let (sender, spawn_receiver) = oneshot::channel();
let (spawn_sender, run_receiver) = oneshot::channel();
sender.send(1).unwrap();
server.spawn(async move {
assert_eq!(1, spawn_receiver.await.unwrap());
spawn_sender.send(42).unwrap();
});
assert_eq!(42, server.run_future(run_receiver).unwrap());
}
#[test]
fn test_server_adds_client_address_to_state() {
test::common_tests::adds_client_address_to_state(TestServer::new, TestServer::client);
}
#[tokio::test]
async fn async_test_server_serves_requests() {
async_test::common_tests::serves_requests(AsyncTestServer::new, AsyncTestServer::client)
.await;
}
#[tokio::test]
async fn async_test_server_times_out() {
async_test::common_tests::times_out(
AsyncTestServer::new_with_timeout,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn async_test_server_echo() {
async_test::common_tests::echo(AsyncTestServer::new, AsyncTestServer::client).await;
}
#[tokio::test]
async fn async_test_server_supports_multiple_servers() {
async_test::common_tests::supports_multiple_servers(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn async_test_server_adds_client_address_to_state() {
async_test::common_tests::adds_client_address_to_state(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
}
| {
tcp.connected().negotiated_h2()
} | conditional_block |
test.rs | //! Contains helpers for Gotham applications to use during testing.
//!
//! See the [`TestServer`] and [`AsyncTestServer`] types for example usage.
use std::convert::TryFrom;
use std::future::Future;
use std::io;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use futures_util::future::{BoxFuture, FutureExt};
use hyper::client::connect::{Connected, Connection};
use hyper::service::Service;
use hyper::Uri;
use log::info;
use pin_project::pin_project;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use tokio::time::Sleep;
use tokio_rustls::client::TlsStream;
use tokio_rustls::rustls::{
self, Certificate, ClientConfig, PrivateKey, RootCertStore, ServerConfig, ServerName,
};
use tokio_rustls::TlsConnector;
use crate::handler::NewHandler;
use crate::test::async_test::{AsyncTestClient, AsyncTestServerInner};
use crate::test::{self, TestClient, TestServerData};
use crate::tls::rustls_wrap;
fn server_config() -> ServerConfig {
let cert = Certificate(include_bytes!("tls_cert.der").to_vec());
let key = PrivateKey(include_bytes!("tls_key.der").to_vec());
ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(vec![cert], key)
.expect("Unable to create TLS server config")
}
/// The `TestServer` type, which is used as a harness when writing test cases for Hyper services
/// (which Gotham's `Router` is). An instance of `TestServer` is run asynchronously within the
/// current thread, and is only accessible by a client returned from the `TestServer`.
///
/// # Examples
///
/// ```rust
/// # extern crate hyper;
/// # extern crate gotham;
/// #
/// # use gotham::state::State;
/// # use hyper::{Body, Response, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # fn main() {
/// use gotham::tls::test::TestServer;
///
/// let test_server = TestServer::new(|| Ok(my_handler)).unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct TestServer {
data: Arc<TestServerData>,
}
impl test::Server for TestServer {
fn run_future<F, O>(&self, future: F) -> O
where
F: Future<Output = O>,
{
self.data.run_future(future)
}
fn request_expiry(&self) -> Sleep {
self.data.request_expiry()
}
}
impl TestServer {
/// Creates a `TestServer` instance for the `Handler` spawned by `new_handler`. This server has
/// the same guarantee given by `hyper::server::Http::bind`, that a new service will be spawned
/// for each connection.
///
/// Timeout will be set to 10 seconds.
pub fn new<NH: NewHandler +'static>(new_handler: NH) -> anyhow::Result<TestServer> {
TestServer::with_timeout(new_handler, 10)
}
/// Sets the request timeout to `timeout` seconds and returns a new `TestServer`.
pub fn with_timeout<NH: NewHandler +'static>(
new_handler: NH,
timeout: u64,
) -> anyhow::Result<TestServer> {
let cfg = server_config();
let data = TestServerData::new(new_handler, timeout, rustls_wrap(cfg))?;
Ok(TestServer {
data: Arc::new(data),
})
}
/// Returns a client connected to the `TestServer`. The transport is handled internally.
pub fn client(&self) -> TestClient<Self, TestConnect> {
self.data.client(self)
}
/// Spawns the given future on the `TestServer`'s internal runtime.
/// This allows you to spawn more futures ontop of the `TestServer` in your
/// tests.
pub fn spawn<F>(&self, future: F)
where
F: Future<Output = ()> + Send +'static,
{
self.data.spawn(future)
}
}
/// An [`AsyncTestServer`], that can be used for testing requests against a server in asynchronous contexts.
/// The [`AsyncTestServer`] runs in the runtime where it is created and an [`AsyncTestClient`] can be
/// created to make asynchronous requests to it.
///
/// This differs from [`crate::plain::test::TestServer`] in that it doesn't come with it's own runtime and therefore
/// doesn't crash when used inside of another runtime.
///
/// # Example
///
/// ```rust
/// # use gotham::state::State;
/// # use hyper::{Response, Body, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # #[tokio::main]
/// # async fn main() {
/// use gotham::tls::test::AsyncTestServer;
///
/// let test_server = AsyncTestServer::new(|| Ok(my_handler)).await.unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().await.unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct AsyncTestServer {
inner: Arc<AsyncTestServerInner>,
}
impl AsyncTestServer {
/// Creates an [`AsyncTestServer`] instance for the [`crate::handler::Handler`](`Handler`) spawned by `new_handler`. This server has
/// the same guarantee given by [`hyper::server::Server::bind`], that a new service will be spawned
/// for each connection.
///
/// Requests will time out after 10 seconds by default. Use [`AsyncTestServer::with_timeout`] for a different timeout.
pub async fn new<NH: NewHandler +'static>(new_handler: NH) -> anyhow::Result<AsyncTestServer> {
AsyncTestServer::new_with_timeout(new_handler, Duration::from_secs(10)).await
}
/// Sets the request timeout to `timeout` seconds and returns a new [`AsyncTestServer`].
pub async fn new_with_timeout<NH: NewHandler +'static>(
new_handler: NH,
timeout: Duration,
) -> anyhow::Result<AsyncTestServer> {
let cfg = server_config();
let inner = AsyncTestServerInner::new(new_handler, timeout, rustls_wrap(cfg)).await?;
Ok(AsyncTestServer {
inner: Arc::new(inner),
})
}
/// Returns a client connected to the [`AsyncTestServer`]. It can be used to make requests against the test server.
/// The transport is handled internally.
pub fn client(&self) -> AsyncTestClient<crate::tls::test::TestConnect> {
self.inner.client()
}
}
#[allow(missing_docs)]
#[pin_project]
pub struct TlsConnectionStream<IO>(#[pin] TlsStream<IO>);
impl<IO: AsyncRead + AsyncWrite + Connection + Unpin> Connection for TlsConnectionStream<IO> {
fn connected(&self) -> Connected {
let (tcp, tls) = self.0.get_ref();
if tls.alpn_protocol() == Some(b"h2") {
tcp.connected().negotiated_h2()
} else {
tcp.connected()
}
}
}
impl<IO> AsyncRead for TlsConnectionStream<IO>
where
IO: AsyncRead + AsyncWrite + Unpin,
{
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut ReadBuf,
) -> Poll<Result<(), io::Error>> {
self.project().0.poll_read(cx, buf)
}
}
impl<IO: AsyncRead + AsyncWrite + Unpin> AsyncWrite for TlsConnectionStream<IO> {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
self.project().0.poll_write(cx, buf)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_flush(cx)
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TestConnect` represents the connection between a test client and the `TestServer` instance
/// that created it. This type should never be used directly.
#[derive(Clone)]
pub struct TestConnect {
pub(crate) addr: SocketAddr,
pub(crate) config: Arc<rustls::ClientConfig>,
}
impl Service<Uri> for TestConnect {
type Response = TlsConnectionStream<TcpStream>;
type Error = tokio::io::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, req: Uri) -> Self::Future {
let tls = TlsConnector::from(self.config.clone());
let address = self.addr; | Ok(stream) => {
let domain = ServerName::try_from(req.host().unwrap()).unwrap();
match tls.connect(domain, stream).await {
Ok(tls_stream) => {
info!("Client TcpStream connected: {:?}", tls_stream);
Ok(TlsConnectionStream(tls_stream))
}
Err(error) => {
info!("TLS TestClient error: {:?}", error);
Err(error)
}
}
}
Err(error) => Err(error),
}
}
.boxed()
}
}
impl From<SocketAddr> for TestConnect {
fn from(addr: SocketAddr) -> Self {
let mut root_store = RootCertStore::empty();
let ca_cert = include_bytes!("tls_ca_cert.der").to_vec();
root_store.add(&Certificate(ca_cert)).unwrap();
let cfg = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store)
.with_no_client_auth();
Self {
addr,
config: Arc::new(cfg),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::helper::TestHandler;
use crate::test::{self, async_test, Server};
use tokio::sync::oneshot;
#[test]
fn test_server_serves_requests() {
test::common_tests::serves_requests(TestServer::new, TestServer::client)
}
#[test]
fn test_server_times_out() {
test::common_tests::times_out(TestServer::with_timeout, TestServer::client)
}
#[test]
fn test_server_async_echo() {
test::common_tests::async_echo(TestServer::new, TestServer::client)
}
#[test]
fn test_server_supports_multiple_servers() {
test::common_tests::supports_multiple_servers(TestServer::new, TestServer::client)
}
#[test]
fn test_server_spawns_and_runs_futures() {
let server = TestServer::new(TestHandler::default()).unwrap();
let (sender, spawn_receiver) = oneshot::channel();
let (spawn_sender, run_receiver) = oneshot::channel();
sender.send(1).unwrap();
server.spawn(async move {
assert_eq!(1, spawn_receiver.await.unwrap());
spawn_sender.send(42).unwrap();
});
assert_eq!(42, server.run_future(run_receiver).unwrap());
}
#[test]
fn test_server_adds_client_address_to_state() {
test::common_tests::adds_client_address_to_state(TestServer::new, TestServer::client);
}
#[tokio::test]
async fn async_test_server_serves_requests() {
async_test::common_tests::serves_requests(AsyncTestServer::new, AsyncTestServer::client)
.await;
}
#[tokio::test]
async fn async_test_server_times_out() {
async_test::common_tests::times_out(
AsyncTestServer::new_with_timeout,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn async_test_server_echo() {
async_test::common_tests::echo(AsyncTestServer::new, AsyncTestServer::client).await;
}
#[tokio::test]
async fn async_test_server_supports_multiple_servers() {
async_test::common_tests::supports_multiple_servers(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn async_test_server_adds_client_address_to_state() {
async_test::common_tests::adds_client_address_to_state(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
} |
async move {
match TcpStream::connect(address).await { | random_line_split |
viewer.rs | use std::f32::consts::PI;
use std::os::raw::c_void;
use std::path::Path;
use std::process;
use std::time::Instant;
use cgmath::{ Deg, Point3 };
use collision::Aabb;
use gl;
use gltf;
use glutin;
use glutin::{
Api,
MouseScrollDelta,
MouseButton,
GlContext,
GlRequest,
GlProfile,
VirtualKeyCode,
WindowEvent,
};
use glutin::dpi::PhysicalSize;
use glutin::ElementState::*;
use image::{DynamicImage};
use controls::{OrbitControls, NavState};
use controls::CameraMovement::*;
use framebuffer::Framebuffer;
use importdata::ImportData;
use render::*;
use render::math::*;
use utils::{print_elapsed, FrameTimer, gl_check_error, print_context_info};
// TODO!: complete and pass through draw calls? or get rid of multiple shaders?
// How about state ordering anyway?
// struct DrawState {
// current_shader: ShaderFlags,
// back_face_culling_enabled: bool
// }
pub struct CameraOptions {
pub index: i32,
pub position: Option<Vector3>,
pub target: Option<Vector3>,
pub fovy: Deg<f32>,
pub straight: bool,
}
pub struct GltfViewer {
size: PhysicalSize,
dpi_factor: f64,
orbit_controls: OrbitControls,
first_mouse: bool,
last_x: f32,
last_y: f32,
events_loop: Option<glutin::EventsLoop>,
gl_window: Option<glutin::GlWindow>,
// TODO!: get rid of scene?
root: Root,
scene: Scene,
delta_time: f64, // seconds
last_frame: Instant,
render_timer: FrameTimer,
}
/// Note about `headless` and `visible`: True headless rendering doesn't work on
/// all operating systems, but an invisible window usually works
impl GltfViewer {
pub fn new(
source: &str,
width: u32,
height: u32,
headless: bool,
visible: bool,
camera_options: CameraOptions,
scene_index: usize,
) -> GltfViewer {
let gl_request = GlRequest::Specific(Api::OpenGl, (3, 3));
let gl_profile = GlProfile::Core;
let (events_loop, gl_window, dpi_factor, inner_size) =
if headless {
let headless_context = glutin::HeadlessRendererBuilder::new(width, height)
//.with_gl(gl_request)
//.with_gl_profile(gl_profile)
.build()
.unwrap();
unsafe { headless_context.make_current().unwrap() }
gl::load_with(|symbol| headless_context.get_proc_address(symbol) as *const _);
let framebuffer = Framebuffer::new(width, height);
framebuffer.bind();
unsafe { gl::Viewport(0, 0, width as i32, height as i32); }
(None, None, 1.0, PhysicalSize::new(width as f64, height as f64)) // TODO: real height (retina? (should be the same as PhysicalSize when headless?))
}
else {
// glutin: initialize and configure
let events_loop = glutin::EventsLoop::new();
let window_size = glutin::dpi::LogicalSize::new(width as f64, height as f64);
// TODO?: hints for 4.1, core profile, forward compat
let window = glutin::WindowBuilder::new()
.with_title("gltf-viewer")
.with_dimensions(window_size)
.with_visibility(visible);
let context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.with_vsync(true);
let gl_window = glutin::GlWindow::new(window, context, &events_loop).unwrap();
// Real dimensions might be much higher on High-DPI displays
let dpi_factor = gl_window.get_hidpi_factor();
let inner_size = gl_window.get_inner_size().unwrap().to_physical(dpi_factor);
unsafe { gl_window.make_current().unwrap(); }
// gl: load all OpenGL function pointers
gl::load_with(|symbol| gl_window.get_proc_address(symbol) as *const _);
(Some(events_loop), Some(gl_window), dpi_factor, inner_size)
};
let mut orbit_controls = OrbitControls::new(
Point3::new(0.0, 0.0, 2.0),
inner_size);
orbit_controls.camera = Camera::default();
orbit_controls.camera.fovy = camera_options.fovy;
orbit_controls.camera.update_aspect_ratio(inner_size.width as f32 / inner_size.height as f32); // updates projection matrix
let first_mouse = true;
let last_x: f32 = inner_size.width as f32 / 2.0;
let last_y: f32 = inner_size.height as f32 / 2.0;
unsafe {
print_context_info();
gl::ClearColor(0.0, 1.0, 0.0, 1.0); // green for debugging
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
if headless ||!visible {
// transparent background for screenshots
gl::ClearColor(0.0, 0.0, 0.0, 0.0);
}
else {
gl::ClearColor(0.1, 0.2, 0.3, 1.0);
}
gl::Enable(gl::DEPTH_TEST);
// TODO: keyboard switch?
// draw in wireframe
// gl::PolygonMode(gl::FRONT_AND_BACK, gl::LINE);
};
let (root, scene) = Self::load(source, scene_index);
let mut viewer = GltfViewer {
size: inner_size,
dpi_factor,
orbit_controls,
first_mouse, last_x, last_y,
events_loop,
gl_window,
root,
scene,
delta_time: 0.0, // seconds
last_frame: Instant::now(),
render_timer: FrameTimer::new("rendering", 300),
};
unsafe { gl_check_error!(); };
if camera_options.index!= 0 && camera_options.index >= viewer.root.camera_nodes.len() as i32 {
error!("No camera with index {} found in glTF file (max: {})",
camera_options.index, viewer.root.camera_nodes.len() as i32 - 1);
process::exit(2)
}
if!viewer.root.camera_nodes.is_empty() && camera_options.index!= -1 {
let cam_node = &viewer.root.get_camera_node(camera_options.index as usize);
let cam_node_info = format!("{} ({:?})", cam_node.index, cam_node.name);
let cam = cam_node.camera.as_ref().unwrap();
info!("Using camera {} on node {}", cam.description(), cam_node_info);
viewer.orbit_controls.set_camera(cam, &cam_node.final_transform);
if camera_options.position.is_some() || camera_options.target.is_some() {
warn!("Ignoring --cam-pos / --cam-target since --cam-index is given.")
}
} else {
info!("Determining camera view from bounding box");
viewer.set_camera_from_bounds(camera_options.straight);
if let Some(p) = camera_options.position {
viewer.orbit_controls.position = Point3::from_vec(p)
}
if let Some(target) = camera_options.target {
viewer.orbit_controls.target = Point3::from_vec(target)
}
}
viewer
}
pub fn load(source: &str, scene_index: usize) -> (Root, Scene) {
let mut start_time = Instant::now();
// TODO!: http source
// let gltf =
if source.starts_with("http") {
panic!("not implemented: HTTP support temporarily removed.")
// let http_source = HttpSource::new(source);
// let import = gltf::Import::custom(http_source, Default::default());
// let gltf = import_gltf(import);
// println!(); // to end the "progress dots"
// gltf
}
// else {
let (doc, buffers, images) = match gltf::import(source) {
Ok(tuple) => tuple,
Err(err) => {
error!("glTF import failed: {:?}", err);
if let gltf::Error::Io(_) = err {
error!("Hint: Are the.bin file(s) referenced by the.gltf file available?")
}
process::exit(1)
},
};
let imp = ImportData { doc, buffers, images };
print_elapsed("Imported glTF in ", &start_time);
start_time = Instant::now();
// load first scene
if scene_index >= imp.doc.scenes().len() {
error!("Scene index too high - file has only {} scene(s)", imp.doc.scenes().len());
process::exit(3)
}
let base_path = Path::new(source);
let mut root = Root::from_gltf(&imp, base_path);
let scene = Scene::from_gltf(&imp.doc.scenes().nth(scene_index).unwrap(), &mut root);
print_elapsed(&format!("Loaded scene with {} nodes, {} meshes in ",
imp.doc.nodes().count(), imp.doc.meshes().len()), &start_time);
(root, scene)
}
/// determine "nice" camera perspective from bounding box. Inspired by donmccurdy/three-gltf-viewer
fn set_camera_from_bounds(&mut self, straight: bool) {
let bounds = &self.scene.bounds;
let size = (bounds.max - bounds.min).magnitude();
let center = bounds.center();
// TODO: x,y addition optional
let cam_pos = if straight {
Point3::new(
center.x,
center.y,
center.z + size * 0.75,
)
} else {
Point3::new(
center.x + size / 2.0,
center.y + size / 5.0,
center.z + size / 2.0,
)
};
self.orbit_controls.position = cam_pos;
self.orbit_controls.target = center;
self.orbit_controls.camera.znear = size / 100.0;
self.orbit_controls.camera.zfar = Some(size * 20.0);
self.orbit_controls.camera.update_projection_matrix();
}
pub fn start_render_loop(&mut self) {
loop {
// per-frame time logic
// NOTE: Deliberately ignoring the seconds of `elapsed()`
self.delta_time = f64::from(self.last_frame.elapsed().subsec_nanos()) / 1_000_000_000.0;
self.last_frame = Instant::now();
// events
let keep_running = process_events(
&mut self.events_loop.as_mut().unwrap(), self.gl_window.as_mut().unwrap(),
&mut self.orbit_controls,
&mut self.dpi_factor,
&mut self.size);
if!keep_running {
unsafe { gl_check_error!(); } // final error check so errors don't go unnoticed
break
}
self.orbit_controls.frame_update(self.delta_time); // keyboard navigation
self.draw();
self.gl_window.as_ref().unwrap().swap_buffers().unwrap();
}
}
// Returns whether to keep running
pub fn draw(&mut self) {
// render
unsafe {
self.render_timer.start();
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
let cam_params = self.orbit_controls.camera_params();
self.scene.draw(&mut self.root, &cam_params);
self.render_timer.end();
}
}
pub fn screenshot(&mut self, filename: &str) {
self.draw();
let mut img = DynamicImage::new_rgba8(self.size.width as u32, self.size.height as u32);
unsafe {
let pixels = img.as_mut_rgba8().unwrap();
gl::PixelStorei(gl::PACK_ALIGNMENT, 1);
gl::ReadPixels(0, 0, self.size.width as i32, self.size.height as i32, gl::RGBA,
gl::UNSIGNED_BYTE, pixels.as_mut_ptr() as *mut c_void);
gl_check_error!();
}
let img = img.flipv();
if let Err(err) = img.save(filename) {
error!("{}", err);
}
else {
println!("Saved {}x{} screenshot to {}", self.size.width, self.size.height, filename);
}
}
pub fn multiscreenshot(&mut self, filename: &str, count: u32) {
let min_angle : f32 = 0.0 ;
let max_angle : f32 = 2.0 * PI ;
let increment_angle : f32 = ((max_angle - min_angle)/(count as f32)) as f32;
for i in 1..(count+1) {
self.orbit_controls.rotate_object(increment_angle);
let dot = filename.rfind('.').unwrap_or_else(|| filename.len());
let mut actual_name = filename.to_string();
actual_name.insert_str(dot, &format!("_{}", i));
self.screenshot(&actual_name[..]);
}
}
}
#[allow(too_many_arguments)]
fn process_events(
events_loop: &mut glutin::EventsLoop,
gl_window: &glutin::GlWindow,
mut orbit_controls: &mut OrbitControls,
dpi_factor: &mut f64,
size: &mut PhysicalSize) -> bool
{
let mut keep_running = true;
#[allow(single_match)]
events_loop.poll_events(|event| {
match event {
glutin::Event::WindowEvent{ event,.. } => match event {
WindowEvent::CloseRequested => {
keep_running = false;
},
WindowEvent::Destroyed => {
// Log and exit?
panic!("WindowEvent::Destroyed, unimplemented.");
},
WindowEvent::Resized(logical) => {
let ph = logical.to_physical(*dpi_factor);
gl_window.resize(ph);
*size = ph;
orbit_controls.camera.update_aspect_ratio((ph.width / ph.height) as f32);
orbit_controls.screen_size = ph;
},
WindowEvent::HiDpiFactorChanged(f) => {
*dpi_factor = f;
},
WindowEvent::DroppedFile(_path_buf) => {
() // TODO: drag file in
}
WindowEvent::MouseInput { button, state: Pressed,..} => {
match button {
MouseButton::Left => {
orbit_controls.state = NavState::Rotating;
},
MouseButton::Right => {
orbit_controls.state = NavState::Panning;
}, | },
WindowEvent::MouseInput { button, state: Released,..} => {
match (button, orbit_controls.state.clone()) {
(MouseButton::Left, NavState::Rotating) | (MouseButton::Right, NavState::Panning) => {
orbit_controls.state = NavState::None;
orbit_controls.handle_mouse_up();
},
_ => ()
}
}
WindowEvent::CursorMoved { position,.. } => {
let ph = position.to_physical(*dpi_factor);
orbit_controls.handle_mouse_move(ph)
},
WindowEvent::MouseWheel { delta: MouseScrollDelta::PixelDelta(logical),.. } => {
let ph = logical.to_physical(*dpi_factor);
orbit_controls.process_mouse_scroll(ph.y as f32);
}
WindowEvent::MouseWheel { delta: MouseScrollDelta::LineDelta(_rows, lines),.. } => {
orbit_controls.process_mouse_scroll(lines * 3.0);
}
WindowEvent::KeyboardInput { input,.. } => {
keep_running = process_input(input, &mut orbit_controls);
}
_ => ()
},
_ => ()
}
});
keep_running
}
fn process_input(input: glutin::KeyboardInput, controls: &mut OrbitControls) -> bool {
let pressed = match input.state {
Pressed => true,
Released => false
};
if let Some(code) = input.virtual_keycode {
match code {
VirtualKeyCode::Escape if pressed => return false,
VirtualKeyCode::W | VirtualKeyCode::Up => controls.process_keyboard(FORWARD, pressed),
VirtualKeyCode::S | VirtualKeyCode::Down => controls.process_keyboard(BACKWARD, pressed),
VirtualKeyCode::A | VirtualKeyCode::Left => controls.process_keyboard(LEFT, pressed),
VirtualKeyCode::D | VirtualKeyCode::Right => controls.process_keyboard(RIGHT, pressed),
_ => ()
}
}
true
} | _ => ()
} | random_line_split |
viewer.rs | use std::f32::consts::PI;
use std::os::raw::c_void;
use std::path::Path;
use std::process;
use std::time::Instant;
use cgmath::{ Deg, Point3 };
use collision::Aabb;
use gl;
use gltf;
use glutin;
use glutin::{
Api,
MouseScrollDelta,
MouseButton,
GlContext,
GlRequest,
GlProfile,
VirtualKeyCode,
WindowEvent,
};
use glutin::dpi::PhysicalSize;
use glutin::ElementState::*;
use image::{DynamicImage};
use controls::{OrbitControls, NavState};
use controls::CameraMovement::*;
use framebuffer::Framebuffer;
use importdata::ImportData;
use render::*;
use render::math::*;
use utils::{print_elapsed, FrameTimer, gl_check_error, print_context_info};
// TODO!: complete and pass through draw calls? or get rid of multiple shaders?
// How about state ordering anyway?
// struct DrawState {
// current_shader: ShaderFlags,
// back_face_culling_enabled: bool
// }
pub struct CameraOptions {
pub index: i32,
pub position: Option<Vector3>,
pub target: Option<Vector3>,
pub fovy: Deg<f32>,
pub straight: bool,
}
pub struct GltfViewer {
size: PhysicalSize,
dpi_factor: f64,
orbit_controls: OrbitControls,
first_mouse: bool,
last_x: f32,
last_y: f32,
events_loop: Option<glutin::EventsLoop>,
gl_window: Option<glutin::GlWindow>,
// TODO!: get rid of scene?
root: Root,
scene: Scene,
delta_time: f64, // seconds
last_frame: Instant,
render_timer: FrameTimer,
}
/// Note about `headless` and `visible`: True headless rendering doesn't work on
/// all operating systems, but an invisible window usually works
impl GltfViewer {
pub fn new(
source: &str,
width: u32,
height: u32,
headless: bool,
visible: bool,
camera_options: CameraOptions,
scene_index: usize,
) -> GltfViewer {
let gl_request = GlRequest::Specific(Api::OpenGl, (3, 3));
let gl_profile = GlProfile::Core;
let (events_loop, gl_window, dpi_factor, inner_size) =
if headless {
let headless_context = glutin::HeadlessRendererBuilder::new(width, height)
//.with_gl(gl_request)
//.with_gl_profile(gl_profile)
.build()
.unwrap();
unsafe { headless_context.make_current().unwrap() }
gl::load_with(|symbol| headless_context.get_proc_address(symbol) as *const _);
let framebuffer = Framebuffer::new(width, height);
framebuffer.bind();
unsafe { gl::Viewport(0, 0, width as i32, height as i32); }
(None, None, 1.0, PhysicalSize::new(width as f64, height as f64)) // TODO: real height (retina? (should be the same as PhysicalSize when headless?))
}
else {
// glutin: initialize and configure
let events_loop = glutin::EventsLoop::new();
let window_size = glutin::dpi::LogicalSize::new(width as f64, height as f64);
// TODO?: hints for 4.1, core profile, forward compat
let window = glutin::WindowBuilder::new()
.with_title("gltf-viewer")
.with_dimensions(window_size)
.with_visibility(visible);
let context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.with_vsync(true);
let gl_window = glutin::GlWindow::new(window, context, &events_loop).unwrap();
// Real dimensions might be much higher on High-DPI displays
let dpi_factor = gl_window.get_hidpi_factor();
let inner_size = gl_window.get_inner_size().unwrap().to_physical(dpi_factor);
unsafe { gl_window.make_current().unwrap(); }
// gl: load all OpenGL function pointers
gl::load_with(|symbol| gl_window.get_proc_address(symbol) as *const _);
(Some(events_loop), Some(gl_window), dpi_factor, inner_size)
};
let mut orbit_controls = OrbitControls::new(
Point3::new(0.0, 0.0, 2.0),
inner_size);
orbit_controls.camera = Camera::default();
orbit_controls.camera.fovy = camera_options.fovy;
orbit_controls.camera.update_aspect_ratio(inner_size.width as f32 / inner_size.height as f32); // updates projection matrix
let first_mouse = true;
let last_x: f32 = inner_size.width as f32 / 2.0;
let last_y: f32 = inner_size.height as f32 / 2.0;
unsafe {
print_context_info();
gl::ClearColor(0.0, 1.0, 0.0, 1.0); // green for debugging
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
if headless ||!visible {
// transparent background for screenshots
gl::ClearColor(0.0, 0.0, 0.0, 0.0);
}
else {
gl::ClearColor(0.1, 0.2, 0.3, 1.0);
}
gl::Enable(gl::DEPTH_TEST);
// TODO: keyboard switch?
// draw in wireframe
// gl::PolygonMode(gl::FRONT_AND_BACK, gl::LINE);
};
let (root, scene) = Self::load(source, scene_index);
let mut viewer = GltfViewer {
size: inner_size,
dpi_factor,
orbit_controls,
first_mouse, last_x, last_y,
events_loop,
gl_window,
root,
scene,
delta_time: 0.0, // seconds
last_frame: Instant::now(),
render_timer: FrameTimer::new("rendering", 300),
};
unsafe { gl_check_error!(); };
if camera_options.index!= 0 && camera_options.index >= viewer.root.camera_nodes.len() as i32 {
error!("No camera with index {} found in glTF file (max: {})",
camera_options.index, viewer.root.camera_nodes.len() as i32 - 1);
process::exit(2)
}
if!viewer.root.camera_nodes.is_empty() && camera_options.index!= -1 {
let cam_node = &viewer.root.get_camera_node(camera_options.index as usize);
let cam_node_info = format!("{} ({:?})", cam_node.index, cam_node.name);
let cam = cam_node.camera.as_ref().unwrap();
info!("Using camera {} on node {}", cam.description(), cam_node_info);
viewer.orbit_controls.set_camera(cam, &cam_node.final_transform);
if camera_options.position.is_some() || camera_options.target.is_some() {
warn!("Ignoring --cam-pos / --cam-target since --cam-index is given.")
}
} else {
info!("Determining camera view from bounding box");
viewer.set_camera_from_bounds(camera_options.straight);
if let Some(p) = camera_options.position {
viewer.orbit_controls.position = Point3::from_vec(p)
}
if let Some(target) = camera_options.target {
viewer.orbit_controls.target = Point3::from_vec(target)
}
}
viewer
}
pub fn | (source: &str, scene_index: usize) -> (Root, Scene) {
let mut start_time = Instant::now();
// TODO!: http source
// let gltf =
if source.starts_with("http") {
panic!("not implemented: HTTP support temporarily removed.")
// let http_source = HttpSource::new(source);
// let import = gltf::Import::custom(http_source, Default::default());
// let gltf = import_gltf(import);
// println!(); // to end the "progress dots"
// gltf
}
// else {
let (doc, buffers, images) = match gltf::import(source) {
Ok(tuple) => tuple,
Err(err) => {
error!("glTF import failed: {:?}", err);
if let gltf::Error::Io(_) = err {
error!("Hint: Are the.bin file(s) referenced by the.gltf file available?")
}
process::exit(1)
},
};
let imp = ImportData { doc, buffers, images };
print_elapsed("Imported glTF in ", &start_time);
start_time = Instant::now();
// load first scene
if scene_index >= imp.doc.scenes().len() {
error!("Scene index too high - file has only {} scene(s)", imp.doc.scenes().len());
process::exit(3)
}
let base_path = Path::new(source);
let mut root = Root::from_gltf(&imp, base_path);
let scene = Scene::from_gltf(&imp.doc.scenes().nth(scene_index).unwrap(), &mut root);
print_elapsed(&format!("Loaded scene with {} nodes, {} meshes in ",
imp.doc.nodes().count(), imp.doc.meshes().len()), &start_time);
(root, scene)
}
/// determine "nice" camera perspective from bounding box. Inspired by donmccurdy/three-gltf-viewer
fn set_camera_from_bounds(&mut self, straight: bool) {
let bounds = &self.scene.bounds;
let size = (bounds.max - bounds.min).magnitude();
let center = bounds.center();
// TODO: x,y addition optional
let cam_pos = if straight {
Point3::new(
center.x,
center.y,
center.z + size * 0.75,
)
} else {
Point3::new(
center.x + size / 2.0,
center.y + size / 5.0,
center.z + size / 2.0,
)
};
self.orbit_controls.position = cam_pos;
self.orbit_controls.target = center;
self.orbit_controls.camera.znear = size / 100.0;
self.orbit_controls.camera.zfar = Some(size * 20.0);
self.orbit_controls.camera.update_projection_matrix();
}
pub fn start_render_loop(&mut self) {
loop {
// per-frame time logic
// NOTE: Deliberately ignoring the seconds of `elapsed()`
self.delta_time = f64::from(self.last_frame.elapsed().subsec_nanos()) / 1_000_000_000.0;
self.last_frame = Instant::now();
// events
let keep_running = process_events(
&mut self.events_loop.as_mut().unwrap(), self.gl_window.as_mut().unwrap(),
&mut self.orbit_controls,
&mut self.dpi_factor,
&mut self.size);
if!keep_running {
unsafe { gl_check_error!(); } // final error check so errors don't go unnoticed
break
}
self.orbit_controls.frame_update(self.delta_time); // keyboard navigation
self.draw();
self.gl_window.as_ref().unwrap().swap_buffers().unwrap();
}
}
// Returns whether to keep running
pub fn draw(&mut self) {
// render
unsafe {
self.render_timer.start();
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
let cam_params = self.orbit_controls.camera_params();
self.scene.draw(&mut self.root, &cam_params);
self.render_timer.end();
}
}
pub fn screenshot(&mut self, filename: &str) {
self.draw();
let mut img = DynamicImage::new_rgba8(self.size.width as u32, self.size.height as u32);
unsafe {
let pixels = img.as_mut_rgba8().unwrap();
gl::PixelStorei(gl::PACK_ALIGNMENT, 1);
gl::ReadPixels(0, 0, self.size.width as i32, self.size.height as i32, gl::RGBA,
gl::UNSIGNED_BYTE, pixels.as_mut_ptr() as *mut c_void);
gl_check_error!();
}
let img = img.flipv();
if let Err(err) = img.save(filename) {
error!("{}", err);
}
else {
println!("Saved {}x{} screenshot to {}", self.size.width, self.size.height, filename);
}
}
pub fn multiscreenshot(&mut self, filename: &str, count: u32) {
let min_angle : f32 = 0.0 ;
let max_angle : f32 = 2.0 * PI ;
let increment_angle : f32 = ((max_angle - min_angle)/(count as f32)) as f32;
for i in 1..(count+1) {
self.orbit_controls.rotate_object(increment_angle);
let dot = filename.rfind('.').unwrap_or_else(|| filename.len());
let mut actual_name = filename.to_string();
actual_name.insert_str(dot, &format!("_{}", i));
self.screenshot(&actual_name[..]);
}
}
}
#[allow(too_many_arguments)]
fn process_events(
events_loop: &mut glutin::EventsLoop,
gl_window: &glutin::GlWindow,
mut orbit_controls: &mut OrbitControls,
dpi_factor: &mut f64,
size: &mut PhysicalSize) -> bool
{
let mut keep_running = true;
#[allow(single_match)]
events_loop.poll_events(|event| {
match event {
glutin::Event::WindowEvent{ event,.. } => match event {
WindowEvent::CloseRequested => {
keep_running = false;
},
WindowEvent::Destroyed => {
// Log and exit?
panic!("WindowEvent::Destroyed, unimplemented.");
},
WindowEvent::Resized(logical) => {
let ph = logical.to_physical(*dpi_factor);
gl_window.resize(ph);
*size = ph;
orbit_controls.camera.update_aspect_ratio((ph.width / ph.height) as f32);
orbit_controls.screen_size = ph;
},
WindowEvent::HiDpiFactorChanged(f) => {
*dpi_factor = f;
},
WindowEvent::DroppedFile(_path_buf) => {
() // TODO: drag file in
}
WindowEvent::MouseInput { button, state: Pressed,..} => {
match button {
MouseButton::Left => {
orbit_controls.state = NavState::Rotating;
},
MouseButton::Right => {
orbit_controls.state = NavState::Panning;
},
_ => ()
}
},
WindowEvent::MouseInput { button, state: Released,..} => {
match (button, orbit_controls.state.clone()) {
(MouseButton::Left, NavState::Rotating) | (MouseButton::Right, NavState::Panning) => {
orbit_controls.state = NavState::None;
orbit_controls.handle_mouse_up();
},
_ => ()
}
}
WindowEvent::CursorMoved { position,.. } => {
let ph = position.to_physical(*dpi_factor);
orbit_controls.handle_mouse_move(ph)
},
WindowEvent::MouseWheel { delta: MouseScrollDelta::PixelDelta(logical),.. } => {
let ph = logical.to_physical(*dpi_factor);
orbit_controls.process_mouse_scroll(ph.y as f32);
}
WindowEvent::MouseWheel { delta: MouseScrollDelta::LineDelta(_rows, lines),.. } => {
orbit_controls.process_mouse_scroll(lines * 3.0);
}
WindowEvent::KeyboardInput { input,.. } => {
keep_running = process_input(input, &mut orbit_controls);
}
_ => ()
},
_ => ()
}
});
keep_running
}
fn process_input(input: glutin::KeyboardInput, controls: &mut OrbitControls) -> bool {
let pressed = match input.state {
Pressed => true,
Released => false
};
if let Some(code) = input.virtual_keycode {
match code {
VirtualKeyCode::Escape if pressed => return false,
VirtualKeyCode::W | VirtualKeyCode::Up => controls.process_keyboard(FORWARD, pressed),
VirtualKeyCode::S | VirtualKeyCode::Down => controls.process_keyboard(BACKWARD, pressed),
VirtualKeyCode::A | VirtualKeyCode::Left => controls.process_keyboard(LEFT, pressed),
VirtualKeyCode::D | VirtualKeyCode::Right => controls.process_keyboard(RIGHT, pressed),
_ => ()
}
}
true
}
| load | identifier_name |
viewer.rs | use std::f32::consts::PI;
use std::os::raw::c_void;
use std::path::Path;
use std::process;
use std::time::Instant;
use cgmath::{ Deg, Point3 };
use collision::Aabb;
use gl;
use gltf;
use glutin;
use glutin::{
Api,
MouseScrollDelta,
MouseButton,
GlContext,
GlRequest,
GlProfile,
VirtualKeyCode,
WindowEvent,
};
use glutin::dpi::PhysicalSize;
use glutin::ElementState::*;
use image::{DynamicImage};
use controls::{OrbitControls, NavState};
use controls::CameraMovement::*;
use framebuffer::Framebuffer;
use importdata::ImportData;
use render::*;
use render::math::*;
use utils::{print_elapsed, FrameTimer, gl_check_error, print_context_info};
// TODO!: complete and pass through draw calls? or get rid of multiple shaders?
// How about state ordering anyway?
// struct DrawState {
// current_shader: ShaderFlags,
// back_face_culling_enabled: bool
// }
pub struct CameraOptions {
pub index: i32,
pub position: Option<Vector3>,
pub target: Option<Vector3>,
pub fovy: Deg<f32>,
pub straight: bool,
}
pub struct GltfViewer {
size: PhysicalSize,
dpi_factor: f64,
orbit_controls: OrbitControls,
first_mouse: bool,
last_x: f32,
last_y: f32,
events_loop: Option<glutin::EventsLoop>,
gl_window: Option<glutin::GlWindow>,
// TODO!: get rid of scene?
root: Root,
scene: Scene,
delta_time: f64, // seconds
last_frame: Instant,
render_timer: FrameTimer,
}
/// Note about `headless` and `visible`: True headless rendering doesn't work on
/// all operating systems, but an invisible window usually works
impl GltfViewer {
pub fn new(
source: &str,
width: u32,
height: u32,
headless: bool,
visible: bool,
camera_options: CameraOptions,
scene_index: usize,
) -> GltfViewer {
let gl_request = GlRequest::Specific(Api::OpenGl, (3, 3));
let gl_profile = GlProfile::Core;
let (events_loop, gl_window, dpi_factor, inner_size) =
if headless {
let headless_context = glutin::HeadlessRendererBuilder::new(width, height)
//.with_gl(gl_request)
//.with_gl_profile(gl_profile)
.build()
.unwrap();
unsafe { headless_context.make_current().unwrap() }
gl::load_with(|symbol| headless_context.get_proc_address(symbol) as *const _);
let framebuffer = Framebuffer::new(width, height);
framebuffer.bind();
unsafe { gl::Viewport(0, 0, width as i32, height as i32); }
(None, None, 1.0, PhysicalSize::new(width as f64, height as f64)) // TODO: real height (retina? (should be the same as PhysicalSize when headless?))
}
else {
// glutin: initialize and configure
let events_loop = glutin::EventsLoop::new();
let window_size = glutin::dpi::LogicalSize::new(width as f64, height as f64);
// TODO?: hints for 4.1, core profile, forward compat
let window = glutin::WindowBuilder::new()
.with_title("gltf-viewer")
.with_dimensions(window_size)
.with_visibility(visible);
let context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.with_vsync(true);
let gl_window = glutin::GlWindow::new(window, context, &events_loop).unwrap();
// Real dimensions might be much higher on High-DPI displays
let dpi_factor = gl_window.get_hidpi_factor();
let inner_size = gl_window.get_inner_size().unwrap().to_physical(dpi_factor);
unsafe { gl_window.make_current().unwrap(); }
// gl: load all OpenGL function pointers
gl::load_with(|symbol| gl_window.get_proc_address(symbol) as *const _);
(Some(events_loop), Some(gl_window), dpi_factor, inner_size)
};
let mut orbit_controls = OrbitControls::new(
Point3::new(0.0, 0.0, 2.0),
inner_size);
orbit_controls.camera = Camera::default();
orbit_controls.camera.fovy = camera_options.fovy;
orbit_controls.camera.update_aspect_ratio(inner_size.width as f32 / inner_size.height as f32); // updates projection matrix
let first_mouse = true;
let last_x: f32 = inner_size.width as f32 / 2.0;
let last_y: f32 = inner_size.height as f32 / 2.0;
unsafe {
print_context_info();
gl::ClearColor(0.0, 1.0, 0.0, 1.0); // green for debugging
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
if headless ||!visible {
// transparent background for screenshots
gl::ClearColor(0.0, 0.0, 0.0, 0.0);
}
else {
gl::ClearColor(0.1, 0.2, 0.3, 1.0);
}
gl::Enable(gl::DEPTH_TEST);
// TODO: keyboard switch?
// draw in wireframe
// gl::PolygonMode(gl::FRONT_AND_BACK, gl::LINE);
};
let (root, scene) = Self::load(source, scene_index);
let mut viewer = GltfViewer {
size: inner_size,
dpi_factor,
orbit_controls,
first_mouse, last_x, last_y,
events_loop,
gl_window,
root,
scene,
delta_time: 0.0, // seconds
last_frame: Instant::now(),
render_timer: FrameTimer::new("rendering", 300),
};
unsafe { gl_check_error!(); };
if camera_options.index!= 0 && camera_options.index >= viewer.root.camera_nodes.len() as i32 {
error!("No camera with index {} found in glTF file (max: {})",
camera_options.index, viewer.root.camera_nodes.len() as i32 - 1);
process::exit(2)
}
if!viewer.root.camera_nodes.is_empty() && camera_options.index!= -1 {
let cam_node = &viewer.root.get_camera_node(camera_options.index as usize);
let cam_node_info = format!("{} ({:?})", cam_node.index, cam_node.name);
let cam = cam_node.camera.as_ref().unwrap();
info!("Using camera {} on node {}", cam.description(), cam_node_info);
viewer.orbit_controls.set_camera(cam, &cam_node.final_transform);
if camera_options.position.is_some() || camera_options.target.is_some() {
warn!("Ignoring --cam-pos / --cam-target since --cam-index is given.")
}
} else {
info!("Determining camera view from bounding box");
viewer.set_camera_from_bounds(camera_options.straight);
if let Some(p) = camera_options.position {
viewer.orbit_controls.position = Point3::from_vec(p)
}
if let Some(target) = camera_options.target {
viewer.orbit_controls.target = Point3::from_vec(target)
}
}
viewer
}
pub fn load(source: &str, scene_index: usize) -> (Root, Scene) {
let mut start_time = Instant::now();
// TODO!: http source
// let gltf =
if source.starts_with("http") {
panic!("not implemented: HTTP support temporarily removed.")
// let http_source = HttpSource::new(source);
// let import = gltf::Import::custom(http_source, Default::default());
// let gltf = import_gltf(import);
// println!(); // to end the "progress dots"
// gltf
}
// else {
let (doc, buffers, images) = match gltf::import(source) {
Ok(tuple) => tuple,
Err(err) => {
error!("glTF import failed: {:?}", err);
if let gltf::Error::Io(_) = err {
error!("Hint: Are the.bin file(s) referenced by the.gltf file available?")
}
process::exit(1)
},
};
let imp = ImportData { doc, buffers, images };
print_elapsed("Imported glTF in ", &start_time);
start_time = Instant::now();
// load first scene
if scene_index >= imp.doc.scenes().len() {
error!("Scene index too high - file has only {} scene(s)", imp.doc.scenes().len());
process::exit(3)
}
let base_path = Path::new(source);
let mut root = Root::from_gltf(&imp, base_path);
let scene = Scene::from_gltf(&imp.doc.scenes().nth(scene_index).unwrap(), &mut root);
print_elapsed(&format!("Loaded scene with {} nodes, {} meshes in ",
imp.doc.nodes().count(), imp.doc.meshes().len()), &start_time);
(root, scene)
}
/// determine "nice" camera perspective from bounding box. Inspired by donmccurdy/three-gltf-viewer
fn set_camera_from_bounds(&mut self, straight: bool) {
let bounds = &self.scene.bounds;
let size = (bounds.max - bounds.min).magnitude();
let center = bounds.center();
// TODO: x,y addition optional
let cam_pos = if straight {
Point3::new(
center.x,
center.y,
center.z + size * 0.75,
)
} else | ;
self.orbit_controls.position = cam_pos;
self.orbit_controls.target = center;
self.orbit_controls.camera.znear = size / 100.0;
self.orbit_controls.camera.zfar = Some(size * 20.0);
self.orbit_controls.camera.update_projection_matrix();
}
pub fn start_render_loop(&mut self) {
loop {
// per-frame time logic
// NOTE: Deliberately ignoring the seconds of `elapsed()`
self.delta_time = f64::from(self.last_frame.elapsed().subsec_nanos()) / 1_000_000_000.0;
self.last_frame = Instant::now();
// events
let keep_running = process_events(
&mut self.events_loop.as_mut().unwrap(), self.gl_window.as_mut().unwrap(),
&mut self.orbit_controls,
&mut self.dpi_factor,
&mut self.size);
if!keep_running {
unsafe { gl_check_error!(); } // final error check so errors don't go unnoticed
break
}
self.orbit_controls.frame_update(self.delta_time); // keyboard navigation
self.draw();
self.gl_window.as_ref().unwrap().swap_buffers().unwrap();
}
}
// Returns whether to keep running
pub fn draw(&mut self) {
// render
unsafe {
self.render_timer.start();
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
let cam_params = self.orbit_controls.camera_params();
self.scene.draw(&mut self.root, &cam_params);
self.render_timer.end();
}
}
pub fn screenshot(&mut self, filename: &str) {
self.draw();
let mut img = DynamicImage::new_rgba8(self.size.width as u32, self.size.height as u32);
unsafe {
let pixels = img.as_mut_rgba8().unwrap();
gl::PixelStorei(gl::PACK_ALIGNMENT, 1);
gl::ReadPixels(0, 0, self.size.width as i32, self.size.height as i32, gl::RGBA,
gl::UNSIGNED_BYTE, pixels.as_mut_ptr() as *mut c_void);
gl_check_error!();
}
let img = img.flipv();
if let Err(err) = img.save(filename) {
error!("{}", err);
}
else {
println!("Saved {}x{} screenshot to {}", self.size.width, self.size.height, filename);
}
}
pub fn multiscreenshot(&mut self, filename: &str, count: u32) {
let min_angle : f32 = 0.0 ;
let max_angle : f32 = 2.0 * PI ;
let increment_angle : f32 = ((max_angle - min_angle)/(count as f32)) as f32;
for i in 1..(count+1) {
self.orbit_controls.rotate_object(increment_angle);
let dot = filename.rfind('.').unwrap_or_else(|| filename.len());
let mut actual_name = filename.to_string();
actual_name.insert_str(dot, &format!("_{}", i));
self.screenshot(&actual_name[..]);
}
}
}
#[allow(too_many_arguments)]
fn process_events(
events_loop: &mut glutin::EventsLoop,
gl_window: &glutin::GlWindow,
mut orbit_controls: &mut OrbitControls,
dpi_factor: &mut f64,
size: &mut PhysicalSize) -> bool
{
let mut keep_running = true;
#[allow(single_match)]
events_loop.poll_events(|event| {
match event {
glutin::Event::WindowEvent{ event,.. } => match event {
WindowEvent::CloseRequested => {
keep_running = false;
},
WindowEvent::Destroyed => {
// Log and exit?
panic!("WindowEvent::Destroyed, unimplemented.");
},
WindowEvent::Resized(logical) => {
let ph = logical.to_physical(*dpi_factor);
gl_window.resize(ph);
*size = ph;
orbit_controls.camera.update_aspect_ratio((ph.width / ph.height) as f32);
orbit_controls.screen_size = ph;
},
WindowEvent::HiDpiFactorChanged(f) => {
*dpi_factor = f;
},
WindowEvent::DroppedFile(_path_buf) => {
() // TODO: drag file in
}
WindowEvent::MouseInput { button, state: Pressed,..} => {
match button {
MouseButton::Left => {
orbit_controls.state = NavState::Rotating;
},
MouseButton::Right => {
orbit_controls.state = NavState::Panning;
},
_ => ()
}
},
WindowEvent::MouseInput { button, state: Released,..} => {
match (button, orbit_controls.state.clone()) {
(MouseButton::Left, NavState::Rotating) | (MouseButton::Right, NavState::Panning) => {
orbit_controls.state = NavState::None;
orbit_controls.handle_mouse_up();
},
_ => ()
}
}
WindowEvent::CursorMoved { position,.. } => {
let ph = position.to_physical(*dpi_factor);
orbit_controls.handle_mouse_move(ph)
},
WindowEvent::MouseWheel { delta: MouseScrollDelta::PixelDelta(logical),.. } => {
let ph = logical.to_physical(*dpi_factor);
orbit_controls.process_mouse_scroll(ph.y as f32);
}
WindowEvent::MouseWheel { delta: MouseScrollDelta::LineDelta(_rows, lines),.. } => {
orbit_controls.process_mouse_scroll(lines * 3.0);
}
WindowEvent::KeyboardInput { input,.. } => {
keep_running = process_input(input, &mut orbit_controls);
}
_ => ()
},
_ => ()
}
});
keep_running
}
fn process_input(input: glutin::KeyboardInput, controls: &mut OrbitControls) -> bool {
let pressed = match input.state {
Pressed => true,
Released => false
};
if let Some(code) = input.virtual_keycode {
match code {
VirtualKeyCode::Escape if pressed => return false,
VirtualKeyCode::W | VirtualKeyCode::Up => controls.process_keyboard(FORWARD, pressed),
VirtualKeyCode::S | VirtualKeyCode::Down => controls.process_keyboard(BACKWARD, pressed),
VirtualKeyCode::A | VirtualKeyCode::Left => controls.process_keyboard(LEFT, pressed),
VirtualKeyCode::D | VirtualKeyCode::Right => controls.process_keyboard(RIGHT, pressed),
_ => ()
}
}
true
}
| {
Point3::new(
center.x + size / 2.0,
center.y + size / 5.0,
center.z + size / 2.0,
)
} | conditional_block |
viewer.rs | use std::f32::consts::PI;
use std::os::raw::c_void;
use std::path::Path;
use std::process;
use std::time::Instant;
use cgmath::{ Deg, Point3 };
use collision::Aabb;
use gl;
use gltf;
use glutin;
use glutin::{
Api,
MouseScrollDelta,
MouseButton,
GlContext,
GlRequest,
GlProfile,
VirtualKeyCode,
WindowEvent,
};
use glutin::dpi::PhysicalSize;
use glutin::ElementState::*;
use image::{DynamicImage};
use controls::{OrbitControls, NavState};
use controls::CameraMovement::*;
use framebuffer::Framebuffer;
use importdata::ImportData;
use render::*;
use render::math::*;
use utils::{print_elapsed, FrameTimer, gl_check_error, print_context_info};
// TODO!: complete and pass through draw calls? or get rid of multiple shaders?
// How about state ordering anyway?
// struct DrawState {
// current_shader: ShaderFlags,
// back_face_culling_enabled: bool
// }
pub struct CameraOptions {
pub index: i32,
pub position: Option<Vector3>,
pub target: Option<Vector3>,
pub fovy: Deg<f32>,
pub straight: bool,
}
pub struct GltfViewer {
size: PhysicalSize,
dpi_factor: f64,
orbit_controls: OrbitControls,
first_mouse: bool,
last_x: f32,
last_y: f32,
events_loop: Option<glutin::EventsLoop>,
gl_window: Option<glutin::GlWindow>,
// TODO!: get rid of scene?
root: Root,
scene: Scene,
delta_time: f64, // seconds
last_frame: Instant,
render_timer: FrameTimer,
}
/// Note about `headless` and `visible`: True headless rendering doesn't work on
/// all operating systems, but an invisible window usually works
impl GltfViewer {
pub fn new(
source: &str,
width: u32,
height: u32,
headless: bool,
visible: bool,
camera_options: CameraOptions,
scene_index: usize,
) -> GltfViewer {
let gl_request = GlRequest::Specific(Api::OpenGl, (3, 3));
let gl_profile = GlProfile::Core;
let (events_loop, gl_window, dpi_factor, inner_size) =
if headless {
let headless_context = glutin::HeadlessRendererBuilder::new(width, height)
//.with_gl(gl_request)
//.with_gl_profile(gl_profile)
.build()
.unwrap();
unsafe { headless_context.make_current().unwrap() }
gl::load_with(|symbol| headless_context.get_proc_address(symbol) as *const _);
let framebuffer = Framebuffer::new(width, height);
framebuffer.bind();
unsafe { gl::Viewport(0, 0, width as i32, height as i32); }
(None, None, 1.0, PhysicalSize::new(width as f64, height as f64)) // TODO: real height (retina? (should be the same as PhysicalSize when headless?))
}
else {
// glutin: initialize and configure
let events_loop = glutin::EventsLoop::new();
let window_size = glutin::dpi::LogicalSize::new(width as f64, height as f64);
// TODO?: hints for 4.1, core profile, forward compat
let window = glutin::WindowBuilder::new()
.with_title("gltf-viewer")
.with_dimensions(window_size)
.with_visibility(visible);
let context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.with_vsync(true);
let gl_window = glutin::GlWindow::new(window, context, &events_loop).unwrap();
// Real dimensions might be much higher on High-DPI displays
let dpi_factor = gl_window.get_hidpi_factor();
let inner_size = gl_window.get_inner_size().unwrap().to_physical(dpi_factor);
unsafe { gl_window.make_current().unwrap(); }
// gl: load all OpenGL function pointers
gl::load_with(|symbol| gl_window.get_proc_address(symbol) as *const _);
(Some(events_loop), Some(gl_window), dpi_factor, inner_size)
};
let mut orbit_controls = OrbitControls::new(
Point3::new(0.0, 0.0, 2.0),
inner_size);
orbit_controls.camera = Camera::default();
orbit_controls.camera.fovy = camera_options.fovy;
orbit_controls.camera.update_aspect_ratio(inner_size.width as f32 / inner_size.height as f32); // updates projection matrix
let first_mouse = true;
let last_x: f32 = inner_size.width as f32 / 2.0;
let last_y: f32 = inner_size.height as f32 / 2.0;
unsafe {
print_context_info();
gl::ClearColor(0.0, 1.0, 0.0, 1.0); // green for debugging
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
if headless ||!visible {
// transparent background for screenshots
gl::ClearColor(0.0, 0.0, 0.0, 0.0);
}
else {
gl::ClearColor(0.1, 0.2, 0.3, 1.0);
}
gl::Enable(gl::DEPTH_TEST);
// TODO: keyboard switch?
// draw in wireframe
// gl::PolygonMode(gl::FRONT_AND_BACK, gl::LINE);
};
let (root, scene) = Self::load(source, scene_index);
let mut viewer = GltfViewer {
size: inner_size,
dpi_factor,
orbit_controls,
first_mouse, last_x, last_y,
events_loop,
gl_window,
root,
scene,
delta_time: 0.0, // seconds
last_frame: Instant::now(),
render_timer: FrameTimer::new("rendering", 300),
};
unsafe { gl_check_error!(); };
if camera_options.index!= 0 && camera_options.index >= viewer.root.camera_nodes.len() as i32 {
error!("No camera with index {} found in glTF file (max: {})",
camera_options.index, viewer.root.camera_nodes.len() as i32 - 1);
process::exit(2)
}
if!viewer.root.camera_nodes.is_empty() && camera_options.index!= -1 {
let cam_node = &viewer.root.get_camera_node(camera_options.index as usize);
let cam_node_info = format!("{} ({:?})", cam_node.index, cam_node.name);
let cam = cam_node.camera.as_ref().unwrap();
info!("Using camera {} on node {}", cam.description(), cam_node_info);
viewer.orbit_controls.set_camera(cam, &cam_node.final_transform);
if camera_options.position.is_some() || camera_options.target.is_some() {
warn!("Ignoring --cam-pos / --cam-target since --cam-index is given.")
}
} else {
info!("Determining camera view from bounding box");
viewer.set_camera_from_bounds(camera_options.straight);
if let Some(p) = camera_options.position {
viewer.orbit_controls.position = Point3::from_vec(p)
}
if let Some(target) = camera_options.target {
viewer.orbit_controls.target = Point3::from_vec(target)
}
}
viewer
}
pub fn load(source: &str, scene_index: usize) -> (Root, Scene) {
let mut start_time = Instant::now();
// TODO!: http source
// let gltf =
if source.starts_with("http") {
panic!("not implemented: HTTP support temporarily removed.")
// let http_source = HttpSource::new(source);
// let import = gltf::Import::custom(http_source, Default::default());
// let gltf = import_gltf(import);
// println!(); // to end the "progress dots"
// gltf
}
// else {
let (doc, buffers, images) = match gltf::import(source) {
Ok(tuple) => tuple,
Err(err) => {
error!("glTF import failed: {:?}", err);
if let gltf::Error::Io(_) = err {
error!("Hint: Are the.bin file(s) referenced by the.gltf file available?")
}
process::exit(1)
},
};
let imp = ImportData { doc, buffers, images };
print_elapsed("Imported glTF in ", &start_time);
start_time = Instant::now();
// load first scene
if scene_index >= imp.doc.scenes().len() {
error!("Scene index too high - file has only {} scene(s)", imp.doc.scenes().len());
process::exit(3)
}
let base_path = Path::new(source);
let mut root = Root::from_gltf(&imp, base_path);
let scene = Scene::from_gltf(&imp.doc.scenes().nth(scene_index).unwrap(), &mut root);
print_elapsed(&format!("Loaded scene with {} nodes, {} meshes in ",
imp.doc.nodes().count(), imp.doc.meshes().len()), &start_time);
(root, scene)
}
/// determine "nice" camera perspective from bounding box. Inspired by donmccurdy/three-gltf-viewer
fn set_camera_from_bounds(&mut self, straight: bool) {
let bounds = &self.scene.bounds;
let size = (bounds.max - bounds.min).magnitude();
let center = bounds.center();
// TODO: x,y addition optional
let cam_pos = if straight {
Point3::new(
center.x,
center.y,
center.z + size * 0.75,
)
} else {
Point3::new(
center.x + size / 2.0,
center.y + size / 5.0,
center.z + size / 2.0,
)
};
self.orbit_controls.position = cam_pos;
self.orbit_controls.target = center;
self.orbit_controls.camera.znear = size / 100.0;
self.orbit_controls.camera.zfar = Some(size * 20.0);
self.orbit_controls.camera.update_projection_matrix();
}
pub fn start_render_loop(&mut self) {
loop {
// per-frame time logic
// NOTE: Deliberately ignoring the seconds of `elapsed()`
self.delta_time = f64::from(self.last_frame.elapsed().subsec_nanos()) / 1_000_000_000.0;
self.last_frame = Instant::now();
// events
let keep_running = process_events(
&mut self.events_loop.as_mut().unwrap(), self.gl_window.as_mut().unwrap(),
&mut self.orbit_controls,
&mut self.dpi_factor,
&mut self.size);
if!keep_running {
unsafe { gl_check_error!(); } // final error check so errors don't go unnoticed
break
}
self.orbit_controls.frame_update(self.delta_time); // keyboard navigation
self.draw();
self.gl_window.as_ref().unwrap().swap_buffers().unwrap();
}
}
// Returns whether to keep running
pub fn draw(&mut self) {
// render
unsafe {
self.render_timer.start();
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
let cam_params = self.orbit_controls.camera_params();
self.scene.draw(&mut self.root, &cam_params);
self.render_timer.end();
}
}
pub fn screenshot(&mut self, filename: &str) {
self.draw();
let mut img = DynamicImage::new_rgba8(self.size.width as u32, self.size.height as u32);
unsafe {
let pixels = img.as_mut_rgba8().unwrap();
gl::PixelStorei(gl::PACK_ALIGNMENT, 1);
gl::ReadPixels(0, 0, self.size.width as i32, self.size.height as i32, gl::RGBA,
gl::UNSIGNED_BYTE, pixels.as_mut_ptr() as *mut c_void);
gl_check_error!();
}
let img = img.flipv();
if let Err(err) = img.save(filename) {
error!("{}", err);
}
else {
println!("Saved {}x{} screenshot to {}", self.size.width, self.size.height, filename);
}
}
pub fn multiscreenshot(&mut self, filename: &str, count: u32) {
let min_angle : f32 = 0.0 ;
let max_angle : f32 = 2.0 * PI ;
let increment_angle : f32 = ((max_angle - min_angle)/(count as f32)) as f32;
for i in 1..(count+1) {
self.orbit_controls.rotate_object(increment_angle);
let dot = filename.rfind('.').unwrap_or_else(|| filename.len());
let mut actual_name = filename.to_string();
actual_name.insert_str(dot, &format!("_{}", i));
self.screenshot(&actual_name[..]);
}
}
}
#[allow(too_many_arguments)]
fn process_events(
events_loop: &mut glutin::EventsLoop,
gl_window: &glutin::GlWindow,
mut orbit_controls: &mut OrbitControls,
dpi_factor: &mut f64,
size: &mut PhysicalSize) -> bool
| WindowEvent::HiDpiFactorChanged(f) => {
*dpi_factor = f;
},
WindowEvent::DroppedFile(_path_buf) => {
() // TODO: drag file in
}
WindowEvent::MouseInput { button, state: Pressed,..} => {
match button {
MouseButton::Left => {
orbit_controls.state = NavState::Rotating;
},
MouseButton::Right => {
orbit_controls.state = NavState::Panning;
},
_ => ()
}
},
WindowEvent::MouseInput { button, state: Released,..} => {
match (button, orbit_controls.state.clone()) {
(MouseButton::Left, NavState::Rotating) | (MouseButton::Right, NavState::Panning) => {
orbit_controls.state = NavState::None;
orbit_controls.handle_mouse_up();
},
_ => ()
}
}
WindowEvent::CursorMoved { position,.. } => {
let ph = position.to_physical(*dpi_factor);
orbit_controls.handle_mouse_move(ph)
},
WindowEvent::MouseWheel { delta: MouseScrollDelta::PixelDelta(logical),.. } => {
let ph = logical.to_physical(*dpi_factor);
orbit_controls.process_mouse_scroll(ph.y as f32);
}
WindowEvent::MouseWheel { delta: MouseScrollDelta::LineDelta(_rows, lines),.. } => {
orbit_controls.process_mouse_scroll(lines * 3.0);
}
WindowEvent::KeyboardInput { input,.. } => {
keep_running = process_input(input, &mut orbit_controls);
}
_ => ()
},
_ => ()
}
});
keep_running
}
fn process_input(input: glutin::KeyboardInput, controls: &mut OrbitControls) -> bool {
let pressed = match input.state {
Pressed => true,
Released => false
};
if let Some(code) = input.virtual_keycode {
match code {
VirtualKeyCode::Escape if pressed => return false,
VirtualKeyCode::W | VirtualKeyCode::Up => controls.process_keyboard(FORWARD, pressed),
VirtualKeyCode::S | VirtualKeyCode::Down => controls.process_keyboard(BACKWARD, pressed),
VirtualKeyCode::A | VirtualKeyCode::Left => controls.process_keyboard(LEFT, pressed),
VirtualKeyCode::D | VirtualKeyCode::Right => controls.process_keyboard(RIGHT, pressed),
_ => ()
}
}
true
}
| {
let mut keep_running = true;
#[allow(single_match)]
events_loop.poll_events(|event| {
match event {
glutin::Event::WindowEvent{ event, .. } => match event {
WindowEvent::CloseRequested => {
keep_running = false;
},
WindowEvent::Destroyed => {
// Log and exit?
panic!("WindowEvent::Destroyed, unimplemented.");
},
WindowEvent::Resized(logical) => {
let ph = logical.to_physical(*dpi_factor);
gl_window.resize(ph);
*size = ph;
orbit_controls.camera.update_aspect_ratio((ph.width / ph.height) as f32);
orbit_controls.screen_size = ph;
}, | identifier_body |
cq_reactor.rs | //! The Completion Queue Reactor. Functions like any other async/await reactor, but is driven by
//! IRQs triggering wakeups in order to poll NVME completion queues (see `CompletionFuture`).
//!
//! While the reactor is primarily intended to wait for IRQs and then poll completion queues, it
//! can also be used for notifying when a full submission queue can submit a new command (see
//! `AvailableSqEntryFuture`).
use std::convert::TryFrom;
use std::fs::File;
use std::future::Future;
use std::io::prelude::*;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::{mem, task, thread};
use syscall::data::Event;
use syscall::Result;
use crossbeam_channel::Receiver; | use super::{CmdId, CqId, InterruptSources, Nvme, NvmeComp, NvmeCmd, SqId};
/// A notification request, sent by the future in order to tell the completion thread that the
/// current task wants a notification when a matching completion queue entry has been seen.
#[derive(Debug)]
pub enum NotifReq {
RequestCompletion {
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
waker: task::Waker,
// TODO: Get rid of this allocation, or maybe a thread-local vec for reusing.
// TODO: Maybe the `remem` crate.
message: Arc<Mutex<Option<CompletionMessage>>>,
},
RequestAvailSubmission {
sq_id: SqId,
waker: task::Waker,
}
}
enum PendingReq {
PendingCompletion {
waker: task::Waker,
message: Arc<Mutex<Option<CompletionMessage>>>,
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
},
PendingAvailSubmission {
waker: task::Waker,
sq_id: SqId,
},
}
struct CqReactor {
int_sources: InterruptSources,
nvme: Arc<Nvme>,
pending_reqs: Vec<PendingReq>,
// used to store commands that may be completed before a completion is requested
receiver: Receiver<NotifReq>,
event_queue: File,
}
impl CqReactor {
fn create_event_queue(int_sources: &mut InterruptSources) -> Result<File> {
use syscall::flag::*;
let fd = syscall::open("event:", O_CLOEXEC | O_RDWR)?;
let mut file = unsafe { File::from_raw_fd(fd as RawFd) };
for (num, irq_handle) in int_sources.iter_mut() {
if file
.write(&Event {
id: irq_handle.as_raw_fd() as usize,
flags: syscall::EVENT_READ,
data: num as usize,
})
.unwrap()
== 0
{
panic!("Failed to setup event queue for {} {:?}", num, irq_handle);
}
}
Ok(file)
}
fn new(
nvme: Arc<Nvme>,
mut int_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> Result<Self> {
Ok(Self {
event_queue: Self::create_event_queue(&mut int_sources)?,
int_sources,
nvme,
pending_reqs: Vec::new(),
receiver,
})
}
fn handle_notif_reqs_raw(pending_reqs: &mut Vec<PendingReq>, receiver: &Receiver<NotifReq>, block_until_first: bool) {
let mut blocking_iter;
let mut nonblocking_iter;
let iter: &mut dyn Iterator<Item = NotifReq> = if block_until_first {
blocking_iter = std::iter::once(receiver.recv().unwrap()).chain(receiver.try_iter());
&mut blocking_iter
} else {
nonblocking_iter = receiver.try_iter();
&mut nonblocking_iter
};
for req in iter {
log::trace!("Got notif req: {:?}", req);
match req {
NotifReq::RequestCompletion {
sq_id,
cq_id,
cmd_id,
waker,
message,
} => pending_reqs.push(PendingReq::PendingCompletion {
sq_id,
cq_id,
cmd_id,
message,
waker,
}),
NotifReq::RequestAvailSubmission { sq_id, waker } => pending_reqs.push(PendingReq::PendingAvailSubmission { sq_id, waker, }),
}
}
}
fn poll_completion_queues(&mut self, iv: u16) -> Option<()> {
let ivs_read_guard = self.nvme.cqs_for_ivs.read().unwrap();
let cqs_read_guard = self.nvme.completion_queues.read().unwrap();
let mut entry_count = 0;
let cq_ids = ivs_read_guard.get(&iv)?;
for cq_id in cq_ids.iter().copied() {
let mut completion_queue_guard = cqs_read_guard.get(&cq_id)?.lock().unwrap();
let &mut (ref mut completion_queue, _) = &mut *completion_queue_guard;
while let Some((head, entry)) = completion_queue.complete(None) {
unsafe { self.nvme.completion_queue_head(cq_id, head) };
log::trace!("Got completion queue entry (CQID {}): {:?} at {}", cq_id, entry, head);
{
let submission_queues_read_lock = self.nvme.submission_queues.read().unwrap();
// this lock is actually important, since it will block during submission from other
// threads. the lock won't be held for long by the submitters, but it still prevents
// the entry being lost before this reactor is actually able to respond:
let &(ref sq_lock, corresponding_cq_id) = submission_queues_read_lock.get(&{entry.sq_id}).expect("nvmed: internal error: queue returned from controller doesn't exist");
assert_eq!(cq_id, corresponding_cq_id);
let mut sq_guard = sq_lock.lock().unwrap();
sq_guard.head = entry.sq_head;
// the channel still has to be polled twice though:
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, false);
}
Self::try_notify_futures(&mut self.pending_reqs, cq_id, &entry);
entry_count += 1;
}
}
if entry_count == 0 {}
Some(())
}
fn finish_pending_completion(pending_reqs: &mut Vec<PendingReq>, req_cq_id: CqId, cq_id: CqId, sq_id: SqId, cmd_id: CmdId, entry: &NvmeComp, i: usize) -> bool {
if req_cq_id == cq_id
&& sq_id == entry.sq_id
&& cmd_id == entry.cid
{
let (waker, message) = match pending_reqs.remove(i) {
PendingReq::PendingCompletion { waker, message,.. } => (waker, message),
_ => unreachable!(),
};
*message.lock().unwrap() = Some(CompletionMessage { cq_entry: *entry });
waker.wake();
true
} else {
false
}
}
fn finish_pending_avail_submission(pending_reqs: &mut Vec<PendingReq>, sq_id: SqId, entry: &NvmeComp, i: usize) -> bool {
if sq_id == entry.sq_id {
let waker = match pending_reqs.remove(i) {
PendingReq::PendingAvailSubmission { waker,.. } => waker,
_ => unreachable!(),
};
waker.wake();
true
} else {
false
}
}
fn try_notify_futures(pending_reqs: &mut Vec<PendingReq>, cq_id: CqId, entry: &NvmeComp) -> Option<()> {
let mut i = 0usize;
let mut futures_notified = 0;
while i < pending_reqs.len() {
match &pending_reqs[i] {
&PendingReq::PendingCompletion { cq_id: req_cq_id, sq_id, cmd_id,.. } => if Self::finish_pending_completion(pending_reqs, req_cq_id, cq_id, sq_id, cmd_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
&PendingReq::PendingAvailSubmission { sq_id,.. } => if Self::finish_pending_avail_submission(pending_reqs, sq_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
}
}
if futures_notified == 0 {}
Some(())
}
fn run(mut self) {
log::debug!("Running CQ reactor");
let mut event = Event::default();
let mut irq_word = [0u8; 8]; // stores the IRQ count
const WORD_SIZE: usize = mem::size_of::<usize>();
loop {
let block_until_first = self.pending_reqs.is_empty();
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, block_until_first);
log::trace!("Handled notif reqs");
// block on getting the next event
if self.event_queue.read(&mut event).unwrap() == 0 {
// event queue has been destroyed
break;
}
let (vector, irq_handle) = match self.int_sources.iter_mut().nth(event.data) {
Some(s) => s,
None => continue,
};
if irq_handle.read(&mut irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
// acknowledge the interrupt (only necessary for level-triggered INTx# interrups)
if irq_handle.write(&irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
log::trace!("NVME IRQ: vector {}", vector);
self.nvme.set_vector_masked(vector, true);
self.poll_completion_queues(vector);
self.nvme.set_vector_masked(vector, false);
}
}
}
pub fn start_cq_reactor_thread(
nvme: Arc<Nvme>,
interrupt_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> thread::JoinHandle<()> {
// Actually, nothing prevents us from spawning additional threads. the channel is MPMC and
// everything is properly synchronized. I'm not saying this is strictly required, but with
// multiple completion queues it might actually be worth considering. The (in-kernel) IRQ
// subsystem can have some room for improvement regarding lowering the latency, but MSI-X allows
// multiple vectors to point to different CPUs, so that the load can be balanced across the
// logical processors.
thread::spawn(move || {
CqReactor::new(nvme, interrupt_sources, receiver)
.expect("nvmed: failed to setup CQ reactor")
.run()
})
}
#[derive(Debug)]
pub struct CompletionMessage {
cq_entry: NvmeComp,
}
pub enum CompletionFutureState<'a, F> {
// the future is in its initial state: the command has not been submitted yet, and no interest
// has been registered. this state will repeat until a free submission queue entry appears to
// it, which it probably will since queues aren't supposed to be nearly always be full.
PendingSubmission {
cmd_init: F,
nvme: &'a Nvme,
sq_id: SqId,
},
PendingCompletion {
nvme: &'a Nvme,
cq_id: CqId,
cmd_id: CmdId,
sq_id: SqId,
message: Arc<Mutex<Option<CompletionMessage>>>,
},
Finished,
Placeholder,
}
pub struct CompletionFuture<'a, F> {
pub state: CompletionFutureState<'a, F>,
}
// enum not self-referential
impl<F> Unpin for CompletionFuture<'_, F> {}
impl<F> Future for CompletionFuture<'_, F>
where
F: FnOnce(CmdId) -> NvmeCmd,
{
type Output = NvmeComp;
fn poll(self: Pin<&mut Self>, context: &mut task::Context) -> task::Poll<Self::Output> {
let this = &mut self.get_mut().state;
match mem::replace(this, CompletionFutureState::Placeholder) {
CompletionFutureState::PendingSubmission { cmd_init, nvme, sq_id } => {
let sqs_read_guard = nvme.submission_queues.read().unwrap();
let &(ref sq_lock, cq_id) = sqs_read_guard
.get(&sq_id)
.expect("nvmed: internal error: given SQ for SQ ID not there");
let mut sq_guard = sq_lock.lock().unwrap();
let sq = &mut *sq_guard;
if sq.is_full() {
// when the CQ reactor gets a new completion queue entry, it'll lock the
// submisson queue it came from. since we're holding the same lock, this
// message will always be sent before the reactor is done with the entry.
nvme.reactor_sender.send(NotifReq::RequestAvailSubmission { sq_id, waker: context.waker().clone() }).unwrap();
*this = CompletionFutureState::PendingSubmission { cmd_init, nvme, sq_id };
return task::Poll::Pending;
}
let cmd_id =
u16::try_from(sq.tail).expect("nvmed: internal error: CQ has more than 2^16 entries");
let tail = sq.submit_unchecked(cmd_init(cmd_id));
let tail = u16::try_from(tail).unwrap();
// make sure that we register interest before the reactor can get notified
let message = Arc::new(Mutex::new(None));
*this = CompletionFutureState::PendingCompletion { nvme, cq_id, cmd_id, sq_id, message: Arc::clone(&message), };
nvme.reactor_sender.send(NotifReq::RequestCompletion { cq_id, sq_id, cmd_id, message, waker: context.waker().clone() }).expect("reactor dead");
unsafe { nvme.submission_queue_tail(sq_id, tail) };
task::Poll::Pending
}
CompletionFutureState::PendingCompletion {
message,
cq_id,
cmd_id,
sq_id,
nvme,
} => {
if let Some(value) = message.lock().unwrap().take() {
*this = CompletionFutureState::Finished;
return task::Poll::Ready(value.cq_entry);
}
nvme.reactor_sender.send(NotifReq::RequestCompletion {
cq_id,
sq_id,
cmd_id,
waker: context.waker().clone(),
message: Arc::clone(&message),
}).expect("reactor dead");
*this = CompletionFutureState::PendingCompletion { message, cq_id, cmd_id, sq_id, nvme };
task::Poll::Pending
}
CompletionFutureState::Finished => {
panic!("calling poll() on an already finished CompletionFuture")
}
CompletionFutureState::Placeholder => unreachable!(),
}
}
} | random_line_split |
|
cq_reactor.rs | //! The Completion Queue Reactor. Functions like any other async/await reactor, but is driven by
//! IRQs triggering wakeups in order to poll NVME completion queues (see `CompletionFuture`).
//!
//! While the reactor is primarily intended to wait for IRQs and then poll completion queues, it
//! can also be used for notifying when a full submission queue can submit a new command (see
//! `AvailableSqEntryFuture`).
use std::convert::TryFrom;
use std::fs::File;
use std::future::Future;
use std::io::prelude::*;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::{mem, task, thread};
use syscall::data::Event;
use syscall::Result;
use crossbeam_channel::Receiver;
use super::{CmdId, CqId, InterruptSources, Nvme, NvmeComp, NvmeCmd, SqId};
/// A notification request, sent by the future in order to tell the completion thread that the
/// current task wants a notification when a matching completion queue entry has been seen.
#[derive(Debug)]
pub enum NotifReq {
RequestCompletion {
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
waker: task::Waker,
// TODO: Get rid of this allocation, or maybe a thread-local vec for reusing.
// TODO: Maybe the `remem` crate.
message: Arc<Mutex<Option<CompletionMessage>>>,
},
RequestAvailSubmission {
sq_id: SqId,
waker: task::Waker,
}
}
enum PendingReq {
PendingCompletion {
waker: task::Waker,
message: Arc<Mutex<Option<CompletionMessage>>>,
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
},
PendingAvailSubmission {
waker: task::Waker,
sq_id: SqId,
},
}
struct CqReactor {
int_sources: InterruptSources,
nvme: Arc<Nvme>,
pending_reqs: Vec<PendingReq>,
// used to store commands that may be completed before a completion is requested
receiver: Receiver<NotifReq>,
event_queue: File,
}
impl CqReactor {
fn create_event_queue(int_sources: &mut InterruptSources) -> Result<File> {
use syscall::flag::*;
let fd = syscall::open("event:", O_CLOEXEC | O_RDWR)?;
let mut file = unsafe { File::from_raw_fd(fd as RawFd) };
for (num, irq_handle) in int_sources.iter_mut() {
if file
.write(&Event {
id: irq_handle.as_raw_fd() as usize,
flags: syscall::EVENT_READ,
data: num as usize,
})
.unwrap()
== 0
{
panic!("Failed to setup event queue for {} {:?}", num, irq_handle);
}
}
Ok(file)
}
fn new(
nvme: Arc<Nvme>,
mut int_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> Result<Self> {
Ok(Self {
event_queue: Self::create_event_queue(&mut int_sources)?,
int_sources,
nvme,
pending_reqs: Vec::new(),
receiver,
})
}
fn handle_notif_reqs_raw(pending_reqs: &mut Vec<PendingReq>, receiver: &Receiver<NotifReq>, block_until_first: bool) {
let mut blocking_iter;
let mut nonblocking_iter;
let iter: &mut dyn Iterator<Item = NotifReq> = if block_until_first {
blocking_iter = std::iter::once(receiver.recv().unwrap()).chain(receiver.try_iter());
&mut blocking_iter
} else {
nonblocking_iter = receiver.try_iter();
&mut nonblocking_iter
};
for req in iter {
log::trace!("Got notif req: {:?}", req);
match req {
NotifReq::RequestCompletion {
sq_id,
cq_id,
cmd_id,
waker,
message,
} => pending_reqs.push(PendingReq::PendingCompletion {
sq_id,
cq_id,
cmd_id,
message,
waker,
}),
NotifReq::RequestAvailSubmission { sq_id, waker } => pending_reqs.push(PendingReq::PendingAvailSubmission { sq_id, waker, }),
}
}
}
fn poll_completion_queues(&mut self, iv: u16) -> Option<()> {
let ivs_read_guard = self.nvme.cqs_for_ivs.read().unwrap();
let cqs_read_guard = self.nvme.completion_queues.read().unwrap();
let mut entry_count = 0;
let cq_ids = ivs_read_guard.get(&iv)?;
for cq_id in cq_ids.iter().copied() {
let mut completion_queue_guard = cqs_read_guard.get(&cq_id)?.lock().unwrap();
let &mut (ref mut completion_queue, _) = &mut *completion_queue_guard;
while let Some((head, entry)) = completion_queue.complete(None) {
unsafe { self.nvme.completion_queue_head(cq_id, head) };
log::trace!("Got completion queue entry (CQID {}): {:?} at {}", cq_id, entry, head);
{
let submission_queues_read_lock = self.nvme.submission_queues.read().unwrap();
// this lock is actually important, since it will block during submission from other
// threads. the lock won't be held for long by the submitters, but it still prevents
// the entry being lost before this reactor is actually able to respond:
let &(ref sq_lock, corresponding_cq_id) = submission_queues_read_lock.get(&{entry.sq_id}).expect("nvmed: internal error: queue returned from controller doesn't exist");
assert_eq!(cq_id, corresponding_cq_id);
let mut sq_guard = sq_lock.lock().unwrap();
sq_guard.head = entry.sq_head;
// the channel still has to be polled twice though:
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, false);
}
Self::try_notify_futures(&mut self.pending_reqs, cq_id, &entry);
entry_count += 1;
}
}
if entry_count == 0 {}
Some(())
}
fn finish_pending_completion(pending_reqs: &mut Vec<PendingReq>, req_cq_id: CqId, cq_id: CqId, sq_id: SqId, cmd_id: CmdId, entry: &NvmeComp, i: usize) -> bool {
if req_cq_id == cq_id
&& sq_id == entry.sq_id
&& cmd_id == entry.cid
{
let (waker, message) = match pending_reqs.remove(i) {
PendingReq::PendingCompletion { waker, message,.. } => (waker, message),
_ => unreachable!(),
};
*message.lock().unwrap() = Some(CompletionMessage { cq_entry: *entry });
waker.wake();
true
} else {
false
}
}
fn finish_pending_avail_submission(pending_reqs: &mut Vec<PendingReq>, sq_id: SqId, entry: &NvmeComp, i: usize) -> bool {
if sq_id == entry.sq_id {
let waker = match pending_reqs.remove(i) {
PendingReq::PendingAvailSubmission { waker,.. } => waker,
_ => unreachable!(),
};
waker.wake();
true
} else {
false
}
}
fn try_notify_futures(pending_reqs: &mut Vec<PendingReq>, cq_id: CqId, entry: &NvmeComp) -> Option<()> {
let mut i = 0usize;
let mut futures_notified = 0;
while i < pending_reqs.len() {
match &pending_reqs[i] {
&PendingReq::PendingCompletion { cq_id: req_cq_id, sq_id, cmd_id,.. } => if Self::finish_pending_completion(pending_reqs, req_cq_id, cq_id, sq_id, cmd_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
&PendingReq::PendingAvailSubmission { sq_id,.. } => if Self::finish_pending_avail_submission(pending_reqs, sq_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
}
}
if futures_notified == 0 {}
Some(())
}
fn run(mut self) {
log::debug!("Running CQ reactor");
let mut event = Event::default();
let mut irq_word = [0u8; 8]; // stores the IRQ count
const WORD_SIZE: usize = mem::size_of::<usize>();
loop {
let block_until_first = self.pending_reqs.is_empty();
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, block_until_first);
log::trace!("Handled notif reqs");
// block on getting the next event
if self.event_queue.read(&mut event).unwrap() == 0 {
// event queue has been destroyed
break;
}
let (vector, irq_handle) = match self.int_sources.iter_mut().nth(event.data) {
Some(s) => s,
None => continue,
};
if irq_handle.read(&mut irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
// acknowledge the interrupt (only necessary for level-triggered INTx# interrups)
if irq_handle.write(&irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
log::trace!("NVME IRQ: vector {}", vector);
self.nvme.set_vector_masked(vector, true);
self.poll_completion_queues(vector);
self.nvme.set_vector_masked(vector, false);
}
}
}
pub fn | (
nvme: Arc<Nvme>,
interrupt_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> thread::JoinHandle<()> {
// Actually, nothing prevents us from spawning additional threads. the channel is MPMC and
// everything is properly synchronized. I'm not saying this is strictly required, but with
// multiple completion queues it might actually be worth considering. The (in-kernel) IRQ
// subsystem can have some room for improvement regarding lowering the latency, but MSI-X allows
// multiple vectors to point to different CPUs, so that the load can be balanced across the
// logical processors.
thread::spawn(move || {
CqReactor::new(nvme, interrupt_sources, receiver)
.expect("nvmed: failed to setup CQ reactor")
.run()
})
}
#[derive(Debug)]
pub struct CompletionMessage {
cq_entry: NvmeComp,
}
pub enum CompletionFutureState<'a, F> {
// the future is in its initial state: the command has not been submitted yet, and no interest
// has been registered. this state will repeat until a free submission queue entry appears to
// it, which it probably will since queues aren't supposed to be nearly always be full.
PendingSubmission {
cmd_init: F,
nvme: &'a Nvme,
sq_id: SqId,
},
PendingCompletion {
nvme: &'a Nvme,
cq_id: CqId,
cmd_id: CmdId,
sq_id: SqId,
message: Arc<Mutex<Option<CompletionMessage>>>,
},
Finished,
Placeholder,
}
pub struct CompletionFuture<'a, F> {
pub state: CompletionFutureState<'a, F>,
}
// enum not self-referential
impl<F> Unpin for CompletionFuture<'_, F> {}
impl<F> Future for CompletionFuture<'_, F>
where
F: FnOnce(CmdId) -> NvmeCmd,
{
type Output = NvmeComp;
fn poll(self: Pin<&mut Self>, context: &mut task::Context) -> task::Poll<Self::Output> {
let this = &mut self.get_mut().state;
match mem::replace(this, CompletionFutureState::Placeholder) {
CompletionFutureState::PendingSubmission { cmd_init, nvme, sq_id } => {
let sqs_read_guard = nvme.submission_queues.read().unwrap();
let &(ref sq_lock, cq_id) = sqs_read_guard
.get(&sq_id)
.expect("nvmed: internal error: given SQ for SQ ID not there");
let mut sq_guard = sq_lock.lock().unwrap();
let sq = &mut *sq_guard;
if sq.is_full() {
// when the CQ reactor gets a new completion queue entry, it'll lock the
// submisson queue it came from. since we're holding the same lock, this
// message will always be sent before the reactor is done with the entry.
nvme.reactor_sender.send(NotifReq::RequestAvailSubmission { sq_id, waker: context.waker().clone() }).unwrap();
*this = CompletionFutureState::PendingSubmission { cmd_init, nvme, sq_id };
return task::Poll::Pending;
}
let cmd_id =
u16::try_from(sq.tail).expect("nvmed: internal error: CQ has more than 2^16 entries");
let tail = sq.submit_unchecked(cmd_init(cmd_id));
let tail = u16::try_from(tail).unwrap();
// make sure that we register interest before the reactor can get notified
let message = Arc::new(Mutex::new(None));
*this = CompletionFutureState::PendingCompletion { nvme, cq_id, cmd_id, sq_id, message: Arc::clone(&message), };
nvme.reactor_sender.send(NotifReq::RequestCompletion { cq_id, sq_id, cmd_id, message, waker: context.waker().clone() }).expect("reactor dead");
unsafe { nvme.submission_queue_tail(sq_id, tail) };
task::Poll::Pending
}
CompletionFutureState::PendingCompletion {
message,
cq_id,
cmd_id,
sq_id,
nvme,
} => {
if let Some(value) = message.lock().unwrap().take() {
*this = CompletionFutureState::Finished;
return task::Poll::Ready(value.cq_entry);
}
nvme.reactor_sender.send(NotifReq::RequestCompletion {
cq_id,
sq_id,
cmd_id,
waker: context.waker().clone(),
message: Arc::clone(&message),
}).expect("reactor dead");
*this = CompletionFutureState::PendingCompletion { message, cq_id, cmd_id, sq_id, nvme };
task::Poll::Pending
}
CompletionFutureState::Finished => {
panic!("calling poll() on an already finished CompletionFuture")
}
CompletionFutureState::Placeholder => unreachable!(),
}
}
}
| start_cq_reactor_thread | identifier_name |
cq_reactor.rs | //! The Completion Queue Reactor. Functions like any other async/await reactor, but is driven by
//! IRQs triggering wakeups in order to poll NVME completion queues (see `CompletionFuture`).
//!
//! While the reactor is primarily intended to wait for IRQs and then poll completion queues, it
//! can also be used for notifying when a full submission queue can submit a new command (see
//! `AvailableSqEntryFuture`).
use std::convert::TryFrom;
use std::fs::File;
use std::future::Future;
use std::io::prelude::*;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::{mem, task, thread};
use syscall::data::Event;
use syscall::Result;
use crossbeam_channel::Receiver;
use super::{CmdId, CqId, InterruptSources, Nvme, NvmeComp, NvmeCmd, SqId};
/// A notification request, sent by the future in order to tell the completion thread that the
/// current task wants a notification when a matching completion queue entry has been seen.
#[derive(Debug)]
pub enum NotifReq {
RequestCompletion {
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
waker: task::Waker,
// TODO: Get rid of this allocation, or maybe a thread-local vec for reusing.
// TODO: Maybe the `remem` crate.
message: Arc<Mutex<Option<CompletionMessage>>>,
},
RequestAvailSubmission {
sq_id: SqId,
waker: task::Waker,
}
}
enum PendingReq {
PendingCompletion {
waker: task::Waker,
message: Arc<Mutex<Option<CompletionMessage>>>,
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
},
PendingAvailSubmission {
waker: task::Waker,
sq_id: SqId,
},
}
struct CqReactor {
int_sources: InterruptSources,
nvme: Arc<Nvme>,
pending_reqs: Vec<PendingReq>,
// used to store commands that may be completed before a completion is requested
receiver: Receiver<NotifReq>,
event_queue: File,
}
impl CqReactor {
fn create_event_queue(int_sources: &mut InterruptSources) -> Result<File> |
fn new(
nvme: Arc<Nvme>,
mut int_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> Result<Self> {
Ok(Self {
event_queue: Self::create_event_queue(&mut int_sources)?,
int_sources,
nvme,
pending_reqs: Vec::new(),
receiver,
})
}
fn handle_notif_reqs_raw(pending_reqs: &mut Vec<PendingReq>, receiver: &Receiver<NotifReq>, block_until_first: bool) {
let mut blocking_iter;
let mut nonblocking_iter;
let iter: &mut dyn Iterator<Item = NotifReq> = if block_until_first {
blocking_iter = std::iter::once(receiver.recv().unwrap()).chain(receiver.try_iter());
&mut blocking_iter
} else {
nonblocking_iter = receiver.try_iter();
&mut nonblocking_iter
};
for req in iter {
log::trace!("Got notif req: {:?}", req);
match req {
NotifReq::RequestCompletion {
sq_id,
cq_id,
cmd_id,
waker,
message,
} => pending_reqs.push(PendingReq::PendingCompletion {
sq_id,
cq_id,
cmd_id,
message,
waker,
}),
NotifReq::RequestAvailSubmission { sq_id, waker } => pending_reqs.push(PendingReq::PendingAvailSubmission { sq_id, waker, }),
}
}
}
fn poll_completion_queues(&mut self, iv: u16) -> Option<()> {
let ivs_read_guard = self.nvme.cqs_for_ivs.read().unwrap();
let cqs_read_guard = self.nvme.completion_queues.read().unwrap();
let mut entry_count = 0;
let cq_ids = ivs_read_guard.get(&iv)?;
for cq_id in cq_ids.iter().copied() {
let mut completion_queue_guard = cqs_read_guard.get(&cq_id)?.lock().unwrap();
let &mut (ref mut completion_queue, _) = &mut *completion_queue_guard;
while let Some((head, entry)) = completion_queue.complete(None) {
unsafe { self.nvme.completion_queue_head(cq_id, head) };
log::trace!("Got completion queue entry (CQID {}): {:?} at {}", cq_id, entry, head);
{
let submission_queues_read_lock = self.nvme.submission_queues.read().unwrap();
// this lock is actually important, since it will block during submission from other
// threads. the lock won't be held for long by the submitters, but it still prevents
// the entry being lost before this reactor is actually able to respond:
let &(ref sq_lock, corresponding_cq_id) = submission_queues_read_lock.get(&{entry.sq_id}).expect("nvmed: internal error: queue returned from controller doesn't exist");
assert_eq!(cq_id, corresponding_cq_id);
let mut sq_guard = sq_lock.lock().unwrap();
sq_guard.head = entry.sq_head;
// the channel still has to be polled twice though:
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, false);
}
Self::try_notify_futures(&mut self.pending_reqs, cq_id, &entry);
entry_count += 1;
}
}
if entry_count == 0 {}
Some(())
}
fn finish_pending_completion(pending_reqs: &mut Vec<PendingReq>, req_cq_id: CqId, cq_id: CqId, sq_id: SqId, cmd_id: CmdId, entry: &NvmeComp, i: usize) -> bool {
if req_cq_id == cq_id
&& sq_id == entry.sq_id
&& cmd_id == entry.cid
{
let (waker, message) = match pending_reqs.remove(i) {
PendingReq::PendingCompletion { waker, message,.. } => (waker, message),
_ => unreachable!(),
};
*message.lock().unwrap() = Some(CompletionMessage { cq_entry: *entry });
waker.wake();
true
} else {
false
}
}
fn finish_pending_avail_submission(pending_reqs: &mut Vec<PendingReq>, sq_id: SqId, entry: &NvmeComp, i: usize) -> bool {
if sq_id == entry.sq_id {
let waker = match pending_reqs.remove(i) {
PendingReq::PendingAvailSubmission { waker,.. } => waker,
_ => unreachable!(),
};
waker.wake();
true
} else {
false
}
}
fn try_notify_futures(pending_reqs: &mut Vec<PendingReq>, cq_id: CqId, entry: &NvmeComp) -> Option<()> {
let mut i = 0usize;
let mut futures_notified = 0;
while i < pending_reqs.len() {
match &pending_reqs[i] {
&PendingReq::PendingCompletion { cq_id: req_cq_id, sq_id, cmd_id,.. } => if Self::finish_pending_completion(pending_reqs, req_cq_id, cq_id, sq_id, cmd_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
&PendingReq::PendingAvailSubmission { sq_id,.. } => if Self::finish_pending_avail_submission(pending_reqs, sq_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
}
}
if futures_notified == 0 {}
Some(())
}
fn run(mut self) {
log::debug!("Running CQ reactor");
let mut event = Event::default();
let mut irq_word = [0u8; 8]; // stores the IRQ count
const WORD_SIZE: usize = mem::size_of::<usize>();
loop {
let block_until_first = self.pending_reqs.is_empty();
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, block_until_first);
log::trace!("Handled notif reqs");
// block on getting the next event
if self.event_queue.read(&mut event).unwrap() == 0 {
// event queue has been destroyed
break;
}
let (vector, irq_handle) = match self.int_sources.iter_mut().nth(event.data) {
Some(s) => s,
None => continue,
};
if irq_handle.read(&mut irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
// acknowledge the interrupt (only necessary for level-triggered INTx# interrups)
if irq_handle.write(&irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
log::trace!("NVME IRQ: vector {}", vector);
self.nvme.set_vector_masked(vector, true);
self.poll_completion_queues(vector);
self.nvme.set_vector_masked(vector, false);
}
}
}
pub fn start_cq_reactor_thread(
nvme: Arc<Nvme>,
interrupt_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> thread::JoinHandle<()> {
// Actually, nothing prevents us from spawning additional threads. the channel is MPMC and
// everything is properly synchronized. I'm not saying this is strictly required, but with
// multiple completion queues it might actually be worth considering. The (in-kernel) IRQ
// subsystem can have some room for improvement regarding lowering the latency, but MSI-X allows
// multiple vectors to point to different CPUs, so that the load can be balanced across the
// logical processors.
thread::spawn(move || {
CqReactor::new(nvme, interrupt_sources, receiver)
.expect("nvmed: failed to setup CQ reactor")
.run()
})
}
#[derive(Debug)]
pub struct CompletionMessage {
cq_entry: NvmeComp,
}
pub enum CompletionFutureState<'a, F> {
// the future is in its initial state: the command has not been submitted yet, and no interest
// has been registered. this state will repeat until a free submission queue entry appears to
// it, which it probably will since queues aren't supposed to be nearly always be full.
PendingSubmission {
cmd_init: F,
nvme: &'a Nvme,
sq_id: SqId,
},
PendingCompletion {
nvme: &'a Nvme,
cq_id: CqId,
cmd_id: CmdId,
sq_id: SqId,
message: Arc<Mutex<Option<CompletionMessage>>>,
},
Finished,
Placeholder,
}
pub struct CompletionFuture<'a, F> {
pub state: CompletionFutureState<'a, F>,
}
// enum not self-referential
impl<F> Unpin for CompletionFuture<'_, F> {}
impl<F> Future for CompletionFuture<'_, F>
where
F: FnOnce(CmdId) -> NvmeCmd,
{
type Output = NvmeComp;
fn poll(self: Pin<&mut Self>, context: &mut task::Context) -> task::Poll<Self::Output> {
let this = &mut self.get_mut().state;
match mem::replace(this, CompletionFutureState::Placeholder) {
CompletionFutureState::PendingSubmission { cmd_init, nvme, sq_id } => {
let sqs_read_guard = nvme.submission_queues.read().unwrap();
let &(ref sq_lock, cq_id) = sqs_read_guard
.get(&sq_id)
.expect("nvmed: internal error: given SQ for SQ ID not there");
let mut sq_guard = sq_lock.lock().unwrap();
let sq = &mut *sq_guard;
if sq.is_full() {
// when the CQ reactor gets a new completion queue entry, it'll lock the
// submisson queue it came from. since we're holding the same lock, this
// message will always be sent before the reactor is done with the entry.
nvme.reactor_sender.send(NotifReq::RequestAvailSubmission { sq_id, waker: context.waker().clone() }).unwrap();
*this = CompletionFutureState::PendingSubmission { cmd_init, nvme, sq_id };
return task::Poll::Pending;
}
let cmd_id =
u16::try_from(sq.tail).expect("nvmed: internal error: CQ has more than 2^16 entries");
let tail = sq.submit_unchecked(cmd_init(cmd_id));
let tail = u16::try_from(tail).unwrap();
// make sure that we register interest before the reactor can get notified
let message = Arc::new(Mutex::new(None));
*this = CompletionFutureState::PendingCompletion { nvme, cq_id, cmd_id, sq_id, message: Arc::clone(&message), };
nvme.reactor_sender.send(NotifReq::RequestCompletion { cq_id, sq_id, cmd_id, message, waker: context.waker().clone() }).expect("reactor dead");
unsafe { nvme.submission_queue_tail(sq_id, tail) };
task::Poll::Pending
}
CompletionFutureState::PendingCompletion {
message,
cq_id,
cmd_id,
sq_id,
nvme,
} => {
if let Some(value) = message.lock().unwrap().take() {
*this = CompletionFutureState::Finished;
return task::Poll::Ready(value.cq_entry);
}
nvme.reactor_sender.send(NotifReq::RequestCompletion {
cq_id,
sq_id,
cmd_id,
waker: context.waker().clone(),
message: Arc::clone(&message),
}).expect("reactor dead");
*this = CompletionFutureState::PendingCompletion { message, cq_id, cmd_id, sq_id, nvme };
task::Poll::Pending
}
CompletionFutureState::Finished => {
panic!("calling poll() on an already finished CompletionFuture")
}
CompletionFutureState::Placeholder => unreachable!(),
}
}
}
| {
use syscall::flag::*;
let fd = syscall::open("event:", O_CLOEXEC | O_RDWR)?;
let mut file = unsafe { File::from_raw_fd(fd as RawFd) };
for (num, irq_handle) in int_sources.iter_mut() {
if file
.write(&Event {
id: irq_handle.as_raw_fd() as usize,
flags: syscall::EVENT_READ,
data: num as usize,
})
.unwrap()
== 0
{
panic!("Failed to setup event queue for {} {:?}", num, irq_handle);
}
}
Ok(file)
} | identifier_body |
xcbwin.rs | use std::sync::Arc;
use std::sync::Mutex;
use std::ops::Drop;
use std::thread;
use window::Dock;
use cairo;
use cairo::XCBSurface;
use cairo_sys;
use xcb;
use xcb::*;
fn get_visualid_from_depth(scr: Screen, depth: u8) -> (Visualid, u8) {
for d in scr.allowed_depths() {
if depth == d.depth() |
}
// If no depth matches return root visual
return (scr.root_visual(), scr.root_depth());
}
pub struct XCB {
conn: Arc<Connection>,
scr_num: i32,
win: Window,
root: Window,
bufpix: Pixmap,
gc: Gcontext,
colour: Colormap,
visual: Visualid,
depth: u8,
size: (u16, u16), // (w, h)
pos: (i16, i16), // (x, y)
scr_size: (u16, u16),
bottom: bool,
click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>>,
}
impl XCB {
pub fn new() -> XCB {
// Create XCB struct to return
let (conn, scr_num) = {
let (conn, scr_num) = Connection::connect(None).unwrap();
(Arc::new(conn), scr_num)
};
let win = conn.generate_id();
let gc = conn.generate_id(); // The GC is created later
let colour = conn.generate_id();
let click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>> =
Arc::new(Mutex::new(Box::new(|_, _, _| {} // Placeholder closure
)));
let bufpix = conn.generate_id(); // Pixmap created later
let size = (1u16, 1u16); // default size
let root;
let visual;
let depth;
let mut scr_size = (0u16, 0u16);
{
let screen = conn.get_setup()
.roots()
.nth(scr_num as usize)
.unwrap();
scr_size.0 = screen.width_in_pixels();
scr_size.1 = screen.height_in_pixels();
root = screen.root();
let (v, d) = get_visualid_from_depth(screen, 32);
visual = v;
depth = d;
}
let x = XCB {
conn,
scr_num,
win,
root,
bufpix,
gc,
colour,
visual,
depth,
size,
pos: (0, 0),
scr_size,
bottom: false,
click_fn,
};
// Create the window
// Masks to use
create_colormap(&*x.conn, COLORMAP_ALLOC_NONE as u8,
x.colour, x.root,
x.visual)
.request_check().unwrap();
let values = [
(CW_EVENT_MASK, EVENT_MASK_BUTTON_PRESS | EVENT_MASK_EXPOSURE),
(CW_BACK_PIXEL, 0),
(CW_COLORMAP, x.colour),
(CW_BORDER_PIXEL, 0),
];
create_window(&*x.conn,
x.depth,
x.win,
x.root,
x.pos.0, x.pos.1,
x.size.0, x.size.1,
0,
WINDOW_CLASS_INPUT_OUTPUT as u16,
x.visual,
&values)
.request_check().unwrap();
let title = "lemonade";
change_property(&*x.conn, xcb::PROP_MODE_REPLACE as u8, x.win,
xcb::ATOM_WM_NAME, xcb::ATOM_STRING, 8, title.as_bytes());
create_gc(&*x.conn, x.gc, x.win, &[]);
create_pixmap(&*x.conn, x.depth, x.bufpix,
x.win, x.size.0, x.size.1);
// Create event-monitoring thread
let conn = x.conn.clone();
let click_fn = x.click_fn.clone();
let win = x.win;
let bufpix = x.bufpix;
let gc = x.gc;
thread::spawn(move || {
while let Some(e) = conn.wait_for_event() {
match e.response_type() as u8 {
BUTTON_PRESS => {
let e: &ButtonPressEvent = unsafe {
cast_event(&e)
};
let (x, y) = (e.event_x(), e.event_y());
let b = e.detail();
let f = click_fn.lock().unwrap();
f(x, y, b);
}
EXPOSE => {
let e: &ExposeEvent = unsafe {
cast_event(&e)
};
let w = e.width();
let h = e.height();
let x = e.x() as i16;
let y = e.y() as i16;
copy_area(&*conn, bufpix, win, gc,
x, y, x, y, w, h);
conn.flush();
}
_ => {}
}
}
println!("ERROR");
});
return x;
}
fn map_window(&self) {
map_window(&self.conn, self.win);
}
fn unmap_window(&self) {
unmap_window(&self.conn, self.win);
}
fn reposition_window(&mut self) {
self.unmap_window();
let mut data: [i16; 12] = [
0, 0, 0, 0, // left, right, top, bottom
0, 0, // left offset
0, 0, // right offset
0, 0, // top offset
0, 0, // bottom offset
];
let curr_x = self.pos.0;
let (xb, xe) = (curr_x, curr_x + self.size.0 as i16);
let ypos;
if self.bottom {
ypos = self.scr_size.1 as i16 - self.size.1 as i16;
data[2] = 0; // top offset
data[3] = self.size.1 as i16;
data[8] = 0; data[9] = 0;
data[10] = xb; data[11] = xe;
} else {
ypos = 0;
data[2] = self.size.1 as i16;
data[3] = 0; // bottom offset
data[8] = xb; data[9] = xe;
data[10] = 0; data[11] = 0;
}
self.set_pos(curr_x as u16, ypos as u16);
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_STRUT_PARTIAL"),
ATOM_ATOM,
16,
&data);
self.map_window();
}
fn get_atom(&self, name: &str) -> Atom {
let atom = intern_atom(&self.conn, false, name);
atom.get_reply().unwrap().atom()
}
fn get_screen(&self) -> Screen {
let setup = self.conn.get_setup();
let screen = setup.roots().nth(self.scr_num as usize).unwrap();
return screen;
}
fn get_visual(&self) -> Visualtype {
for d in self.get_screen().allowed_depths() {
for v in d.visuals() {
if v.visual_id() == self.visual {
return v;
}
}
}
panic!("Failed to find visual type");
}
/// Set a new size for the window.
///
/// Note: This clears the buffer, so make sure to draw
/// after setting the size and not before. Else, the
/// drawn image is lost.
fn set_size(&mut self, w: u16, h: u16) {
// Update the pixmap to match new size
free_pixmap(&self.conn, self.bufpix);
create_pixmap(&self.conn, self.depth, self.bufpix,
self.win, w, h);
// Clear the new pixmap
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_CLEAR)]);
copy_area(&*self.conn, self.bufpix, self.bufpix, self.gc,
0, 0, 0, 0, w, h);
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_COPY)]);
// Set the size
configure_window(&*self.conn, self.win, &[
(CONFIG_WINDOW_WIDTH as u16, w as u32),
(CONFIG_WINDOW_HEIGHT as u16, h as u32),
]).request_check()
.unwrap();
self.size = (w, h);
}
/// Set the internal position value.
///
/// Cannot move the window if it is docked. The `reposition_window` method
/// must be used if it is docked.
fn set_pos(&mut self, x: u16, y: u16) {
configure_window(&self.conn, self.win, &[
(CONFIG_WINDOW_X as u16, x as u32),
(CONFIG_WINDOW_Y as u16, y as u32),
]).request_check()
.unwrap();
self.pos = (x as i16, y as i16);
}
}
impl Dock for XCB {
fn create_surface(&self) -> cairo::Surface {
// Prepare cairo variables
let cr_conn = unsafe {
cairo::XCBConnection::from_raw_none(
self.conn.get_raw_conn() as *mut cairo_sys::xcb_connection_t)
};
let cr_draw = cairo::XCBDrawable(self.bufpix);
let cr_visual = unsafe {
cairo::XCBVisualType::from_raw_none(
&mut self.get_visual().base as *mut ffi::xcb_visualtype_t
as *mut cairo_sys::xcb_visualtype_t)
};
// Create the surface using previous variables
return cairo::Surface::create(
&cr_conn, &cr_draw, &cr_visual,
self.size.0 as i32, self.size.1 as i32);
}
fn dock(&self) {
let data = [
self.get_atom("_NET_WM_WINDOW_TYPE_DOCK"),
];
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_WINDOW_TYPE"),
xcb::ATOM_ATOM,
32,
&data)
.request_check()
.expect("Failed to dock window");
}
fn top(&mut self) {
self.bottom = false;
self.reposition_window();
}
fn bottom(&mut self) {
self.bottom = true;
self.reposition_window();
}
fn set_size(&mut self, w: u16, h: u16) {
self.set_size(w, h);
}
fn set_offset(&mut self, x: u16, y: u16) {
if self.bottom {
let screen_height = self.scr_size.1;
self.set_pos(x, screen_height - y);
} else {
self.set_pos(x, y);
}
self.reposition_window();
}
fn get_screen_size(&self) -> (u16, u16) {
(self.scr_size.0, self.scr_size.1)
}
fn flush(&self) {
copy_area(&*self.conn, self.bufpix, self.win, self.gc,
0, 0, 0, 0, self.size.0, self.size.1);
self.conn.flush();
}
fn click_cb<F>(&mut self, f: F)
where F: Fn(i16, i16, u8) + Send + Sync +'static {
let mut cb = self.click_fn.lock().unwrap();
*cb = Box::new(f);
}
}
impl Drop for XCB {
fn drop(&mut self) {
free_pixmap(&*self.conn, self.win);
free_pixmap(&*self.conn, self.bufpix);
free_gc(&*self.conn, self.gc);
free_colormap(&*self.conn, self.colour);
}
}
| {
for v in d.visuals() {
return (v.visual_id(), depth);
}
} | conditional_block |
xcbwin.rs | use std::sync::Arc;
use std::sync::Mutex;
use std::ops::Drop;
use std::thread;
use window::Dock;
use cairo;
use cairo::XCBSurface;
use cairo_sys;
use xcb;
use xcb::*;
fn get_visualid_from_depth(scr: Screen, depth: u8) -> (Visualid, u8) {
for d in scr.allowed_depths() {
if depth == d.depth() {
for v in d.visuals() {
return (v.visual_id(), depth);
}
}
}
// If no depth matches return root visual
return (scr.root_visual(), scr.root_depth());
}
pub struct XCB {
conn: Arc<Connection>,
scr_num: i32,
win: Window,
root: Window,
bufpix: Pixmap,
gc: Gcontext,
colour: Colormap,
visual: Visualid,
depth: u8,
size: (u16, u16), // (w, h)
pos: (i16, i16), // (x, y)
scr_size: (u16, u16),
bottom: bool,
click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>>,
}
impl XCB {
pub fn new() -> XCB {
// Create XCB struct to return
let (conn, scr_num) = {
let (conn, scr_num) = Connection::connect(None).unwrap();
(Arc::new(conn), scr_num)
};
let win = conn.generate_id();
let gc = conn.generate_id(); // The GC is created later
let colour = conn.generate_id();
let click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>> =
Arc::new(Mutex::new(Box::new(|_, _, _| {} // Placeholder closure
)));
let bufpix = conn.generate_id(); // Pixmap created later
let size = (1u16, 1u16); // default size
let root;
let visual;
let depth;
let mut scr_size = (0u16, 0u16);
{
let screen = conn.get_setup()
.roots()
.nth(scr_num as usize)
.unwrap();
scr_size.0 = screen.width_in_pixels();
scr_size.1 = screen.height_in_pixels();
root = screen.root();
let (v, d) = get_visualid_from_depth(screen, 32);
visual = v;
depth = d;
}
let x = XCB {
conn,
scr_num,
win,
root,
bufpix,
gc,
colour,
visual,
depth,
size,
pos: (0, 0),
scr_size,
bottom: false,
click_fn,
};
// Create the window
// Masks to use
create_colormap(&*x.conn, COLORMAP_ALLOC_NONE as u8,
x.colour, x.root,
x.visual)
.request_check().unwrap();
let values = [
(CW_EVENT_MASK, EVENT_MASK_BUTTON_PRESS | EVENT_MASK_EXPOSURE),
(CW_BACK_PIXEL, 0),
(CW_COLORMAP, x.colour),
(CW_BORDER_PIXEL, 0),
];
create_window(&*x.conn,
x.depth,
x.win,
x.root,
x.pos.0, x.pos.1,
x.size.0, x.size.1,
0,
WINDOW_CLASS_INPUT_OUTPUT as u16,
x.visual,
&values)
.request_check().unwrap();
let title = "lemonade";
change_property(&*x.conn, xcb::PROP_MODE_REPLACE as u8, x.win,
xcb::ATOM_WM_NAME, xcb::ATOM_STRING, 8, title.as_bytes());
create_gc(&*x.conn, x.gc, x.win, &[]);
create_pixmap(&*x.conn, x.depth, x.bufpix,
x.win, x.size.0, x.size.1);
// Create event-monitoring thread
let conn = x.conn.clone();
let click_fn = x.click_fn.clone();
let win = x.win;
let bufpix = x.bufpix;
let gc = x.gc;
thread::spawn(move || {
while let Some(e) = conn.wait_for_event() {
match e.response_type() as u8 {
BUTTON_PRESS => {
let e: &ButtonPressEvent = unsafe {
cast_event(&e)
};
let (x, y) = (e.event_x(), e.event_y());
let b = e.detail();
let f = click_fn.lock().unwrap();
f(x, y, b);
}
EXPOSE => {
let e: &ExposeEvent = unsafe {
cast_event(&e)
};
let w = e.width();
let h = e.height();
let x = e.x() as i16;
let y = e.y() as i16;
copy_area(&*conn, bufpix, win, gc,
x, y, x, y, w, h);
conn.flush();
}
_ => {}
}
}
println!("ERROR");
});
return x;
}
fn map_window(&self) {
map_window(&self.conn, self.win);
}
fn unmap_window(&self) {
unmap_window(&self.conn, self.win);
}
fn reposition_window(&mut self) {
self.unmap_window();
let mut data: [i16; 12] = [
0, 0, 0, 0, // left, right, top, bottom
0, 0, // left offset
0, 0, // right offset
0, 0, // top offset
0, 0, // bottom offset
];
let curr_x = self.pos.0;
let (xb, xe) = (curr_x, curr_x + self.size.0 as i16);
let ypos;
if self.bottom {
ypos = self.scr_size.1 as i16 - self.size.1 as i16;
data[2] = 0; // top offset
data[3] = self.size.1 as i16;
data[8] = 0; data[9] = 0;
data[10] = xb; data[11] = xe;
} else {
ypos = 0;
data[2] = self.size.1 as i16;
data[3] = 0; // bottom offset
data[8] = xb; data[9] = xe;
data[10] = 0; data[11] = 0;
}
self.set_pos(curr_x as u16, ypos as u16);
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_STRUT_PARTIAL"),
ATOM_ATOM,
16,
&data);
self.map_window();
}
fn get_atom(&self, name: &str) -> Atom {
let atom = intern_atom(&self.conn, false, name);
atom.get_reply().unwrap().atom()
}
fn get_screen(&self) -> Screen {
let setup = self.conn.get_setup();
let screen = setup.roots().nth(self.scr_num as usize).unwrap();
return screen;
}
fn get_visual(&self) -> Visualtype {
for d in self.get_screen().allowed_depths() {
for v in d.visuals() {
if v.visual_id() == self.visual {
return v;
}
}
}
panic!("Failed to find visual type");
}
/// Set a new size for the window.
///
/// Note: This clears the buffer, so make sure to draw
/// after setting the size and not before. Else, the
/// drawn image is lost.
fn set_size(&mut self, w: u16, h: u16) {
// Update the pixmap to match new size
free_pixmap(&self.conn, self.bufpix);
create_pixmap(&self.conn, self.depth, self.bufpix,
self.win, w, h);
// Clear the new pixmap
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_CLEAR)]);
copy_area(&*self.conn, self.bufpix, self.bufpix, self.gc,
0, 0, 0, 0, w, h);
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_COPY)]);
// Set the size
configure_window(&*self.conn, self.win, &[
(CONFIG_WINDOW_WIDTH as u16, w as u32),
(CONFIG_WINDOW_HEIGHT as u16, h as u32),
]).request_check()
.unwrap();
self.size = (w, h);
}
/// Set the internal position value.
///
/// Cannot move the window if it is docked. The `reposition_window` method
/// must be used if it is docked.
fn set_pos(&mut self, x: u16, y: u16) {
configure_window(&self.conn, self.win, &[
(CONFIG_WINDOW_X as u16, x as u32),
(CONFIG_WINDOW_Y as u16, y as u32),
]).request_check()
.unwrap();
| impl Dock for XCB {
fn create_surface(&self) -> cairo::Surface {
// Prepare cairo variables
let cr_conn = unsafe {
cairo::XCBConnection::from_raw_none(
self.conn.get_raw_conn() as *mut cairo_sys::xcb_connection_t)
};
let cr_draw = cairo::XCBDrawable(self.bufpix);
let cr_visual = unsafe {
cairo::XCBVisualType::from_raw_none(
&mut self.get_visual().base as *mut ffi::xcb_visualtype_t
as *mut cairo_sys::xcb_visualtype_t)
};
// Create the surface using previous variables
return cairo::Surface::create(
&cr_conn, &cr_draw, &cr_visual,
self.size.0 as i32, self.size.1 as i32);
}
fn dock(&self) {
let data = [
self.get_atom("_NET_WM_WINDOW_TYPE_DOCK"),
];
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_WINDOW_TYPE"),
xcb::ATOM_ATOM,
32,
&data)
.request_check()
.expect("Failed to dock window");
}
fn top(&mut self) {
self.bottom = false;
self.reposition_window();
}
fn bottom(&mut self) {
self.bottom = true;
self.reposition_window();
}
fn set_size(&mut self, w: u16, h: u16) {
self.set_size(w, h);
}
fn set_offset(&mut self, x: u16, y: u16) {
if self.bottom {
let screen_height = self.scr_size.1;
self.set_pos(x, screen_height - y);
} else {
self.set_pos(x, y);
}
self.reposition_window();
}
fn get_screen_size(&self) -> (u16, u16) {
(self.scr_size.0, self.scr_size.1)
}
fn flush(&self) {
copy_area(&*self.conn, self.bufpix, self.win, self.gc,
0, 0, 0, 0, self.size.0, self.size.1);
self.conn.flush();
}
fn click_cb<F>(&mut self, f: F)
where F: Fn(i16, i16, u8) + Send + Sync +'static {
let mut cb = self.click_fn.lock().unwrap();
*cb = Box::new(f);
}
}
impl Drop for XCB {
fn drop(&mut self) {
free_pixmap(&*self.conn, self.win);
free_pixmap(&*self.conn, self.bufpix);
free_gc(&*self.conn, self.gc);
free_colormap(&*self.conn, self.colour);
}
} | self.pos = (x as i16, y as i16);
}
}
| random_line_split |
xcbwin.rs | use std::sync::Arc;
use std::sync::Mutex;
use std::ops::Drop;
use std::thread;
use window::Dock;
use cairo;
use cairo::XCBSurface;
use cairo_sys;
use xcb;
use xcb::*;
fn get_visualid_from_depth(scr: Screen, depth: u8) -> (Visualid, u8) {
for d in scr.allowed_depths() {
if depth == d.depth() {
for v in d.visuals() {
return (v.visual_id(), depth);
}
}
}
// If no depth matches return root visual
return (scr.root_visual(), scr.root_depth());
}
pub struct XCB {
conn: Arc<Connection>,
scr_num: i32,
win: Window,
root: Window,
bufpix: Pixmap,
gc: Gcontext,
colour: Colormap,
visual: Visualid,
depth: u8,
size: (u16, u16), // (w, h)
pos: (i16, i16), // (x, y)
scr_size: (u16, u16),
bottom: bool,
click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>>,
}
impl XCB {
pub fn new() -> XCB {
// Create XCB struct to return
let (conn, scr_num) = {
let (conn, scr_num) = Connection::connect(None).unwrap();
(Arc::new(conn), scr_num)
};
let win = conn.generate_id();
let gc = conn.generate_id(); // The GC is created later
let colour = conn.generate_id();
let click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>> =
Arc::new(Mutex::new(Box::new(|_, _, _| {} // Placeholder closure
)));
let bufpix = conn.generate_id(); // Pixmap created later
let size = (1u16, 1u16); // default size
let root;
let visual;
let depth;
let mut scr_size = (0u16, 0u16);
{
let screen = conn.get_setup()
.roots()
.nth(scr_num as usize)
.unwrap();
scr_size.0 = screen.width_in_pixels();
scr_size.1 = screen.height_in_pixels();
root = screen.root();
let (v, d) = get_visualid_from_depth(screen, 32);
visual = v;
depth = d;
}
let x = XCB {
conn,
scr_num,
win,
root,
bufpix,
gc,
colour,
visual,
depth,
size,
pos: (0, 0),
scr_size,
bottom: false,
click_fn,
};
// Create the window
// Masks to use
create_colormap(&*x.conn, COLORMAP_ALLOC_NONE as u8,
x.colour, x.root,
x.visual)
.request_check().unwrap();
let values = [
(CW_EVENT_MASK, EVENT_MASK_BUTTON_PRESS | EVENT_MASK_EXPOSURE),
(CW_BACK_PIXEL, 0),
(CW_COLORMAP, x.colour),
(CW_BORDER_PIXEL, 0),
];
create_window(&*x.conn,
x.depth,
x.win,
x.root,
x.pos.0, x.pos.1,
x.size.0, x.size.1,
0,
WINDOW_CLASS_INPUT_OUTPUT as u16,
x.visual,
&values)
.request_check().unwrap();
let title = "lemonade";
change_property(&*x.conn, xcb::PROP_MODE_REPLACE as u8, x.win,
xcb::ATOM_WM_NAME, xcb::ATOM_STRING, 8, title.as_bytes());
create_gc(&*x.conn, x.gc, x.win, &[]);
create_pixmap(&*x.conn, x.depth, x.bufpix,
x.win, x.size.0, x.size.1);
// Create event-monitoring thread
let conn = x.conn.clone();
let click_fn = x.click_fn.clone();
let win = x.win;
let bufpix = x.bufpix;
let gc = x.gc;
thread::spawn(move || {
while let Some(e) = conn.wait_for_event() {
match e.response_type() as u8 {
BUTTON_PRESS => {
let e: &ButtonPressEvent = unsafe {
cast_event(&e)
};
let (x, y) = (e.event_x(), e.event_y());
let b = e.detail();
let f = click_fn.lock().unwrap();
f(x, y, b);
}
EXPOSE => {
let e: &ExposeEvent = unsafe {
cast_event(&e)
};
let w = e.width();
let h = e.height();
let x = e.x() as i16;
let y = e.y() as i16;
copy_area(&*conn, bufpix, win, gc,
x, y, x, y, w, h);
conn.flush();
}
_ => {}
}
}
println!("ERROR");
});
return x;
}
fn map_window(&self) {
map_window(&self.conn, self.win);
}
fn unmap_window(&self) {
unmap_window(&self.conn, self.win);
}
fn reposition_window(&mut self) {
self.unmap_window();
let mut data: [i16; 12] = [
0, 0, 0, 0, // left, right, top, bottom
0, 0, // left offset
0, 0, // right offset
0, 0, // top offset
0, 0, // bottom offset
];
let curr_x = self.pos.0;
let (xb, xe) = (curr_x, curr_x + self.size.0 as i16);
let ypos;
if self.bottom {
ypos = self.scr_size.1 as i16 - self.size.1 as i16;
data[2] = 0; // top offset
data[3] = self.size.1 as i16;
data[8] = 0; data[9] = 0;
data[10] = xb; data[11] = xe;
} else {
ypos = 0;
data[2] = self.size.1 as i16;
data[3] = 0; // bottom offset
data[8] = xb; data[9] = xe;
data[10] = 0; data[11] = 0;
}
self.set_pos(curr_x as u16, ypos as u16);
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_STRUT_PARTIAL"),
ATOM_ATOM,
16,
&data);
self.map_window();
}
fn | (&self, name: &str) -> Atom {
let atom = intern_atom(&self.conn, false, name);
atom.get_reply().unwrap().atom()
}
fn get_screen(&self) -> Screen {
let setup = self.conn.get_setup();
let screen = setup.roots().nth(self.scr_num as usize).unwrap();
return screen;
}
fn get_visual(&self) -> Visualtype {
for d in self.get_screen().allowed_depths() {
for v in d.visuals() {
if v.visual_id() == self.visual {
return v;
}
}
}
panic!("Failed to find visual type");
}
/// Set a new size for the window.
///
/// Note: This clears the buffer, so make sure to draw
/// after setting the size and not before. Else, the
/// drawn image is lost.
fn set_size(&mut self, w: u16, h: u16) {
// Update the pixmap to match new size
free_pixmap(&self.conn, self.bufpix);
create_pixmap(&self.conn, self.depth, self.bufpix,
self.win, w, h);
// Clear the new pixmap
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_CLEAR)]);
copy_area(&*self.conn, self.bufpix, self.bufpix, self.gc,
0, 0, 0, 0, w, h);
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_COPY)]);
// Set the size
configure_window(&*self.conn, self.win, &[
(CONFIG_WINDOW_WIDTH as u16, w as u32),
(CONFIG_WINDOW_HEIGHT as u16, h as u32),
]).request_check()
.unwrap();
self.size = (w, h);
}
/// Set the internal position value.
///
/// Cannot move the window if it is docked. The `reposition_window` method
/// must be used if it is docked.
fn set_pos(&mut self, x: u16, y: u16) {
configure_window(&self.conn, self.win, &[
(CONFIG_WINDOW_X as u16, x as u32),
(CONFIG_WINDOW_Y as u16, y as u32),
]).request_check()
.unwrap();
self.pos = (x as i16, y as i16);
}
}
impl Dock for XCB {
fn create_surface(&self) -> cairo::Surface {
// Prepare cairo variables
let cr_conn = unsafe {
cairo::XCBConnection::from_raw_none(
self.conn.get_raw_conn() as *mut cairo_sys::xcb_connection_t)
};
let cr_draw = cairo::XCBDrawable(self.bufpix);
let cr_visual = unsafe {
cairo::XCBVisualType::from_raw_none(
&mut self.get_visual().base as *mut ffi::xcb_visualtype_t
as *mut cairo_sys::xcb_visualtype_t)
};
// Create the surface using previous variables
return cairo::Surface::create(
&cr_conn, &cr_draw, &cr_visual,
self.size.0 as i32, self.size.1 as i32);
}
fn dock(&self) {
let data = [
self.get_atom("_NET_WM_WINDOW_TYPE_DOCK"),
];
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_WINDOW_TYPE"),
xcb::ATOM_ATOM,
32,
&data)
.request_check()
.expect("Failed to dock window");
}
fn top(&mut self) {
self.bottom = false;
self.reposition_window();
}
fn bottom(&mut self) {
self.bottom = true;
self.reposition_window();
}
fn set_size(&mut self, w: u16, h: u16) {
self.set_size(w, h);
}
fn set_offset(&mut self, x: u16, y: u16) {
if self.bottom {
let screen_height = self.scr_size.1;
self.set_pos(x, screen_height - y);
} else {
self.set_pos(x, y);
}
self.reposition_window();
}
fn get_screen_size(&self) -> (u16, u16) {
(self.scr_size.0, self.scr_size.1)
}
fn flush(&self) {
copy_area(&*self.conn, self.bufpix, self.win, self.gc,
0, 0, 0, 0, self.size.0, self.size.1);
self.conn.flush();
}
fn click_cb<F>(&mut self, f: F)
where F: Fn(i16, i16, u8) + Send + Sync +'static {
let mut cb = self.click_fn.lock().unwrap();
*cb = Box::new(f);
}
}
impl Drop for XCB {
fn drop(&mut self) {
free_pixmap(&*self.conn, self.win);
free_pixmap(&*self.conn, self.bufpix);
free_gc(&*self.conn, self.gc);
free_colormap(&*self.conn, self.colour);
}
}
| get_atom | identifier_name |
xcbwin.rs | use std::sync::Arc;
use std::sync::Mutex;
use std::ops::Drop;
use std::thread;
use window::Dock;
use cairo;
use cairo::XCBSurface;
use cairo_sys;
use xcb;
use xcb::*;
fn get_visualid_from_depth(scr: Screen, depth: u8) -> (Visualid, u8) {
for d in scr.allowed_depths() {
if depth == d.depth() {
for v in d.visuals() {
return (v.visual_id(), depth);
}
}
}
// If no depth matches return root visual
return (scr.root_visual(), scr.root_depth());
}
pub struct XCB {
conn: Arc<Connection>,
scr_num: i32,
win: Window,
root: Window,
bufpix: Pixmap,
gc: Gcontext,
colour: Colormap,
visual: Visualid,
depth: u8,
size: (u16, u16), // (w, h)
pos: (i16, i16), // (x, y)
scr_size: (u16, u16),
bottom: bool,
click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>>,
}
impl XCB {
pub fn new() -> XCB {
// Create XCB struct to return
let (conn, scr_num) = {
let (conn, scr_num) = Connection::connect(None).unwrap();
(Arc::new(conn), scr_num)
};
let win = conn.generate_id();
let gc = conn.generate_id(); // The GC is created later
let colour = conn.generate_id();
let click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>> =
Arc::new(Mutex::new(Box::new(|_, _, _| {} // Placeholder closure
)));
let bufpix = conn.generate_id(); // Pixmap created later
let size = (1u16, 1u16); // default size
let root;
let visual;
let depth;
let mut scr_size = (0u16, 0u16);
{
let screen = conn.get_setup()
.roots()
.nth(scr_num as usize)
.unwrap();
scr_size.0 = screen.width_in_pixels();
scr_size.1 = screen.height_in_pixels();
root = screen.root();
let (v, d) = get_visualid_from_depth(screen, 32);
visual = v;
depth = d;
}
let x = XCB {
conn,
scr_num,
win,
root,
bufpix,
gc,
colour,
visual,
depth,
size,
pos: (0, 0),
scr_size,
bottom: false,
click_fn,
};
// Create the window
// Masks to use
create_colormap(&*x.conn, COLORMAP_ALLOC_NONE as u8,
x.colour, x.root,
x.visual)
.request_check().unwrap();
let values = [
(CW_EVENT_MASK, EVENT_MASK_BUTTON_PRESS | EVENT_MASK_EXPOSURE),
(CW_BACK_PIXEL, 0),
(CW_COLORMAP, x.colour),
(CW_BORDER_PIXEL, 0),
];
create_window(&*x.conn,
x.depth,
x.win,
x.root,
x.pos.0, x.pos.1,
x.size.0, x.size.1,
0,
WINDOW_CLASS_INPUT_OUTPUT as u16,
x.visual,
&values)
.request_check().unwrap();
let title = "lemonade";
change_property(&*x.conn, xcb::PROP_MODE_REPLACE as u8, x.win,
xcb::ATOM_WM_NAME, xcb::ATOM_STRING, 8, title.as_bytes());
create_gc(&*x.conn, x.gc, x.win, &[]);
create_pixmap(&*x.conn, x.depth, x.bufpix,
x.win, x.size.0, x.size.1);
// Create event-monitoring thread
let conn = x.conn.clone();
let click_fn = x.click_fn.clone();
let win = x.win;
let bufpix = x.bufpix;
let gc = x.gc;
thread::spawn(move || {
while let Some(e) = conn.wait_for_event() {
match e.response_type() as u8 {
BUTTON_PRESS => {
let e: &ButtonPressEvent = unsafe {
cast_event(&e)
};
let (x, y) = (e.event_x(), e.event_y());
let b = e.detail();
let f = click_fn.lock().unwrap();
f(x, y, b);
}
EXPOSE => {
let e: &ExposeEvent = unsafe {
cast_event(&e)
};
let w = e.width();
let h = e.height();
let x = e.x() as i16;
let y = e.y() as i16;
copy_area(&*conn, bufpix, win, gc,
x, y, x, y, w, h);
conn.flush();
}
_ => {}
}
}
println!("ERROR");
});
return x;
}
fn map_window(&self) {
map_window(&self.conn, self.win);
}
fn unmap_window(&self) {
unmap_window(&self.conn, self.win);
}
fn reposition_window(&mut self) {
self.unmap_window();
let mut data: [i16; 12] = [
0, 0, 0, 0, // left, right, top, bottom
0, 0, // left offset
0, 0, // right offset
0, 0, // top offset
0, 0, // bottom offset
];
let curr_x = self.pos.0;
let (xb, xe) = (curr_x, curr_x + self.size.0 as i16);
let ypos;
if self.bottom {
ypos = self.scr_size.1 as i16 - self.size.1 as i16;
data[2] = 0; // top offset
data[3] = self.size.1 as i16;
data[8] = 0; data[9] = 0;
data[10] = xb; data[11] = xe;
} else {
ypos = 0;
data[2] = self.size.1 as i16;
data[3] = 0; // bottom offset
data[8] = xb; data[9] = xe;
data[10] = 0; data[11] = 0;
}
self.set_pos(curr_x as u16, ypos as u16);
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_STRUT_PARTIAL"),
ATOM_ATOM,
16,
&data);
self.map_window();
}
fn get_atom(&self, name: &str) -> Atom {
let atom = intern_atom(&self.conn, false, name);
atom.get_reply().unwrap().atom()
}
fn get_screen(&self) -> Screen {
let setup = self.conn.get_setup();
let screen = setup.roots().nth(self.scr_num as usize).unwrap();
return screen;
}
fn get_visual(&self) -> Visualtype {
for d in self.get_screen().allowed_depths() {
for v in d.visuals() {
if v.visual_id() == self.visual {
return v;
}
}
}
panic!("Failed to find visual type");
}
/// Set a new size for the window.
///
/// Note: This clears the buffer, so make sure to draw
/// after setting the size and not before. Else, the
/// drawn image is lost.
fn set_size(&mut self, w: u16, h: u16) {
// Update the pixmap to match new size
free_pixmap(&self.conn, self.bufpix);
create_pixmap(&self.conn, self.depth, self.bufpix,
self.win, w, h);
// Clear the new pixmap
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_CLEAR)]);
copy_area(&*self.conn, self.bufpix, self.bufpix, self.gc,
0, 0, 0, 0, w, h);
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_COPY)]);
// Set the size
configure_window(&*self.conn, self.win, &[
(CONFIG_WINDOW_WIDTH as u16, w as u32),
(CONFIG_WINDOW_HEIGHT as u16, h as u32),
]).request_check()
.unwrap();
self.size = (w, h);
}
/// Set the internal position value.
///
/// Cannot move the window if it is docked. The `reposition_window` method
/// must be used if it is docked.
fn set_pos(&mut self, x: u16, y: u16) {
configure_window(&self.conn, self.win, &[
(CONFIG_WINDOW_X as u16, x as u32),
(CONFIG_WINDOW_Y as u16, y as u32),
]).request_check()
.unwrap();
self.pos = (x as i16, y as i16);
}
}
impl Dock for XCB {
fn create_surface(&self) -> cairo::Surface {
// Prepare cairo variables
let cr_conn = unsafe {
cairo::XCBConnection::from_raw_none(
self.conn.get_raw_conn() as *mut cairo_sys::xcb_connection_t)
};
let cr_draw = cairo::XCBDrawable(self.bufpix);
let cr_visual = unsafe {
cairo::XCBVisualType::from_raw_none(
&mut self.get_visual().base as *mut ffi::xcb_visualtype_t
as *mut cairo_sys::xcb_visualtype_t)
};
// Create the surface using previous variables
return cairo::Surface::create(
&cr_conn, &cr_draw, &cr_visual,
self.size.0 as i32, self.size.1 as i32);
}
fn dock(&self) {
let data = [
self.get_atom("_NET_WM_WINDOW_TYPE_DOCK"),
];
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_WINDOW_TYPE"),
xcb::ATOM_ATOM,
32,
&data)
.request_check()
.expect("Failed to dock window");
}
fn top(&mut self) {
self.bottom = false;
self.reposition_window();
}
fn bottom(&mut self) {
self.bottom = true;
self.reposition_window();
}
fn set_size(&mut self, w: u16, h: u16) {
self.set_size(w, h);
}
fn set_offset(&mut self, x: u16, y: u16) {
if self.bottom {
let screen_height = self.scr_size.1;
self.set_pos(x, screen_height - y);
} else {
self.set_pos(x, y);
}
self.reposition_window();
}
fn get_screen_size(&self) -> (u16, u16) {
(self.scr_size.0, self.scr_size.1)
}
fn flush(&self) {
copy_area(&*self.conn, self.bufpix, self.win, self.gc,
0, 0, 0, 0, self.size.0, self.size.1);
self.conn.flush();
}
fn click_cb<F>(&mut self, f: F)
where F: Fn(i16, i16, u8) + Send + Sync +'static {
let mut cb = self.click_fn.lock().unwrap();
*cb = Box::new(f);
}
}
impl Drop for XCB {
fn drop(&mut self) |
}
| {
free_pixmap(&*self.conn, self.win);
free_pixmap(&*self.conn, self.bufpix);
free_gc(&*self.conn, self.gc);
free_colormap(&*self.conn, self.colour);
} | identifier_body |
lib.rs | that frame has
//! been dropped. The frame is created when `Julia::scope(_with_slots)` is called and dropped
//! when that method returns.
//!
//! Because you can use both a `Global` and a mutable reference to a `GcFrame` inside the closure,
//! it's possible to access the contents of modules and create new values that can be used by
//! Julia. The methods of [`Module`] let you access the contents of arbitrary modules, several
//! methods are available to create new values.
//!
//! The simplest is to call [`Value::eval_string`], a method that takes two arguments. The first
//! must implement the [`Scope`] trait, the second is a string which has to contain valid Julia
//! code. The most important thing to know about the [`Scope`] trait for now is that it's used
//! by functions that create new values to ensure the result is rooted. Mutable references to
//! [`GcFrame`]s implement [`Scope`], in this case the [`Value`] that is returned is rooted in
//! that frame, so the result is protected from garbage collection until the frame is dropped when
//! that scope ends.
//!
//! In practice, [`Value::eval_string`] is relatively limited. It can be used to evaluate simple
//! function calls like `sqrt(2.0)`, but can't take any arguments. Its most important use-case is
//! importing installed packages by evaluating an `import` or `using` statement. A more
//! interesting method, [`Value::new`], can be used with data of any type that implements
//! [`IntoJulia`]. This trait is implemented by primitive types like `i8` and `char`. Any type
//! that implements [`IntoJulia`] also implements [`Unbox`] which is used to extract the contents
//! of a Julia value.
//!
//! In addition to evaluating raw commands with `Value::eval_string`, it's possible to call
//! anything that implements [`Call`] as a Julia function, `Value` implements this trait because
//! any Julia value is potentially callable as a function. Functions can be called with any number
//! of positional arguments and be provided with keyword arguments. Both `Value::eval_string` and
//! the trait methods of `Call` are all unsafe. It's trivial to write a function like
//! `boom() = unsafe_load(Ptr{Float64}(C_NULL))`, which causes a segfault when it's called, and
//! call it with these methods.
//!
//! As a simple example, let's convert two numbers to Julia values and add them:
//!
//! ```no_run
//! use jlrs::prelude::*;
//!
//! # fn main() {
//! // Initializing Julia is unsafe because it can race with another crate that does
//! // the same.
//! let mut julia = unsafe { Julia::init().unwrap() };
//! let res = julia.scope(|global, frame| {
//! // Create the two arguments. Note that the first argument, something that
//! // implements Scope, is taken by value and mutable references don't implement
//! // Copy, so it's necessary to mutably reborrow the frame.
//! let i = Value::new(&mut *frame, 2u64)?;
//! let j = Value::new(&mut *frame, 1u32)?;
//!
//! // The `+` function can be found in the base module.
//! let func = Module::base(global).function(&mut *frame, "+")?;
//!
//! // Call the function and unbox the result as a `u64`. The result of the function
//! // call is a nested `Result`; the outer error doesn't contain to any Julia
//! // data, while the inner error contains the exception if one is thrown. Here the
//! // exception is converted to the outer error type by calling `into_jlrs_result`, this new
//! // error contains the error message Julia would have shown. Colors can be enabled by
//! // calling `Julia::error_color`.
//! unsafe {
//! func.call2(&mut *frame, i, j)?
//! .into_jlrs_result()?
//! .unbox::<u64>()
//! }
//! }).unwrap();
//!
//! assert_eq!(res, 3);
//! # }
//! ```
//!
//! Many more features are available, including creating and accessing n-dimensional Julia arrays
//! and nesting scopes. To learn how to use them, please see the documentation for the [`memory`]
//! and [`wrappers`] modules.
//!
//!
//! ## Calling Rust from Julia
//!
//! Julia's `ccall` interface can be used to call `extern "C"` functions defined in Rust, for most
//! use-cases you shouldn't need jlrs. There are two major ways to use `ccall`, with a pointer to
//! the function or a `(:function, "library")` pair.
//!
//! A function can be cast to a void pointer and converted to a [`Value`]:
//!
//! ```no_run
//! # use jlrs::prelude::*;
//! // This function will be provided to Julia as a pointer, so its name can be mangled.
//! unsafe extern "C" fn call_me(arg: bool) -> isize {
//! if arg {
//! 1
//! } else {
//! -1
//! }
//! }
//!
//! # fn main() {
//! let mut julia = unsafe { Julia::init().unwrap() };
//! julia.scope(|global, frame| unsafe {
//! // Cast the function to a void pointer
//! let call_me_val = Value::new(&mut *frame, call_me as *mut std::ffi::c_void)?;
//!
//! // Value::eval_string can be used to create new functions.
//! let func = Value::eval_string(
//! &mut *frame,
//! "myfunc(callme::Ptr{Cvoid})::Int = ccall(callme, Int, (Bool,), true)"
//! )?.unwrap();
//!
//! // Call the function and unbox the result.
//! let output = func.call1(&mut *frame, call_me_val)?
//! .into_jlrs_result()?
//! .unbox::<isize>()?;
//!
//! assert_eq!(output, 1);
//!
//! Ok(())
//! }).unwrap();
//! # }
//! ```
//!
//! You can also use functions defined in `dylib` and `cdylib` libraries. In order to create such
//! a library you need to add
//!
//! ```toml
//! [lib]
//! crate-type = ["dylib"]
//! ```
//!
//! or
//!
//! ```toml
//! [lib]
//! crate-type = ["cdylib"]
//! ```
//!
//! respectively to your crate's `Cargo.toml`. Use a `dylib` if you want to use the crate in other
//! Rust crates, but if it's only intended to be called through `ccall` a `cdylib` is the better
//! choice. On Linux, compiling such a crate will be compiled to `lib<crate_name>.so`.
//!
//! The functions you want to use with `ccall` must be both `extern "C"` functions to ensure the C
//! ABI is used, and annotated with `#[no_mangle]` to prevent name mangling. Julia can find
//! libraries in directories that are either on the default library search path or included by
//! setting the `LD_LIBRARY_PATH` environment variable on Linux. If the compiled library is not
//! directly visible to Julia, you can open it with `Libdl.dlopen` and acquire function pointers
//! with `Libdl.dlsym`. These pointers can be called the same way as the pointer in the previous
//! example.
//!
//! If the library is visible to Julia you can access it with the library name. If `call_me` is
//! defined in a crate called `foo`, the following should work if the function is annotated with
//! `#[no_mangle]`:
//!
//! ```julia
//! ccall((:call_me, "libfoo"), Int, (Bool,), false)
//! ```
//!
//! One important aspect of calling Rust from other languages in general is that panicking across
//! an FFI boundary is undefined behaviour. If you're not sure your code will never panic, wrap it
//! with `std::panic::catch_unwind`.
//!
//! Most features provided by jlrs including accessing modules, calling functions, and borrowing
//! array data require a [`Global`] or a frame. You can access these by creating a [`CCall`]
//! first. Another method provided by [`CCall`] is [`CCall::uv_async_send`], this method can be
//! used in combination with `Base.AsyncCondition`. In particular, it lets you write a `ccall`able
//! function that does its actual work on another thread, return early and `wait` on the async
//! condition, which happens when [`CCall::uv_async_send`] is called when that work is finished.
//! The advantage of this is that the long-running function will not block the Julia runtime,
//! There's an example available on GitHub that shows how to do this.
//!
//!
//! ## Async runtime
//!
//! The async runtime runs Julia in a separate thread and returns a handle that can be shared
//! across threads. The handle can be used to send new tasks to the runtime, multiple tasks can
//! run in parallel by scheduling a function call as a new Julia `Task`. While the Julia `Task`
//! has not completed, the runtime can switch to another task. To use this feature you must enable
//! the `async` feature flag:
//!
//! ```toml
//! [dependencies]
//! jlrs = { version = "0.12", features = ["async"] }
//! ```
//!
//! The struct [`AsyncJulia`] is exported by the prelude and lets you initialize the runtime in
//! two ways, either as a blocking task or as a thread. The first way should be used if you want
//! to integrate the async runtime into a larger project that uses `async_std`.
//!
//! The easiest way to interact with Julia when using the async runtime is by using
//! `AsyncJulia::blocking_task`, which can be used to send a closure like the one in the first
//! example and call it. While this closure has not completed the runtime is blocked, the methods
//! that schedule a function call as a new Julia `Task` can't be used.
//!
//! In order to write non-blocking tasks, you must implement either the [`AsyncTask`] or
//! [`GeneratorTask`] trait. An `AsyncTask` can be called once, its async `run` method replaces
//! the closure; this method takes a `Global` and a mutable reference [`AsyncGcFrame`]. The
//! `AsyncGcFrame` provides mostly the same functionality as `GcFrame`, but can also be used to
//! call the methods of the [`CallAsync`] trait. These methods schedule the function call on
//! another thread and return a `Future`. While awaiting the result the runtime can handle another
//! task.
//!
//!A `GeneratorTask` can be called multiple times. In addition to `run` it also has an async `init` method. This method is called when the `GeneratorTask` is created and can be used to prepare the initial state of the task. The frame provided to `init` is not dropped after this method returns, which means this initial state can contain Julia data. Whenever a `GeneratorTask` is successfully created a `GeneratorHandle` is returned. This handle can be used to call the `GeneratorTask` which calls its `run` method once. A `GeneratorHandle` can be cloned and shared across threads.
//!
//! You can find basic examples that show how to implement these traits in
//! [the examples directory of the GitHub repository].
//!
//!
//! # Testing
//!
//! The restriction that Julia can be initialized once must be taken into account when running
//! tests that use `jlrs`. The recommended approach is to create a thread-local static `RefCell`:
//!
//! ```no_run
//! use jlrs::prelude::*;
//! use std::cell::RefCell;
//! thread_local! {
//! pub static JULIA: RefCell<Julia> = {
//! let julia = RefCell::new(unsafe { Julia::init().unwrap() });
//! julia.borrow_mut().scope(|_global, _frame| {
//! /* include everything you need to use */
//! Ok(())
//! }).unwrap();
//! julia
//! };
//! }
//! ```
//!
//! Tests that use this construct can only use one thread for testing, so you must use
//! `cargo test -- --test-threads=1`, otherwise the code above will panic when a test
//! tries to call `Julia::init` a second time from another thread.
//!
//! If these tests also involve the async runtime, the `JULIA_NUM_THREADS` environment
//! variable must be set to a value larger than 2.
//!
//! If you want to run jlrs's tests, both these requirements must be taken into account:
//! `JULIA_NUM_THREADS=3 cargo test -- --test-threads=1`
//!
//!
//! # Custom types
//!
//! In order to map a struct in Rust to one in Julia you can derive [`ValidLayout`], [`Unbox`],
//! and [`Typecheck`]. If the struct in Julia has no type parameters and is a bits type you can
//! also derive [`IntoJulia`], which lets you use the type in combination with [`Value::new`].
//!
//! You should normally not need to implement these structs or traits manually. The JlrsReflect.jl
//! package can generate the correct Rust struct and automatically derive the supported traits for
//! types that have no tuple or union fields with type parameters. The reason for this restriction
//! is that the layout of tuple and union fields can be very different depending on these
//! parameters in a way that can't be expressed in Rust.
//!
//! These custom types can also be used when you call Rust from Julia with `ccall`.
//!
//! [their User Guide]: https://rust-lang.github.io/rust-bindgen/requirements.html
//! [on Microsoft's website]: https://docs.microsoft.com/en-us/windows/wsl/install-win10
//! [the examples directory of the repo]: https://github.com/Taaitaaiger/jlrs/tree/master/examples
//! [`IntoJulia`]: crate::convert::into_julia::IntoJulia
//! [`Typecheck`]: crate::layout::typecheck::Typecheck
//! [`ValidLayout`]: crate::layout::valid_layout::ValidLayout
//! [`Unbox`]: crate::convert::unbox::Unbox
//! [`CallAsync::call_async`]: crate::extensions::multitask::call_async::CallAsync
//! [`AsyncGcFrame`]: crate::extensions::multitask::async_frame::AsyncGcFrame
//! [`Frame`]: crate::memory::frame::Frame
//! [`AsyncTask`]: crate::extensions::multitask::async_task::AsyncTask
//! [`GeneratorTask`]: crate::extensions::multitask::async_task::GeneratorTask
//! [`GeneratorHandle`]: crate::extensions::multitask::async_task::GeneratorHandle
//! [`AsyncJulia`]: crate::extensions::multitask::AsyncJulia
//! [`CallAsync`]: crate::extensions::multitask::call_async::CallAsync
//! [`DataType`]: crate::wrappers::ptr::datatype::DataType
//! [`TypedArray`]: crate::wrappers::ptr::array::TypedArray
//! [`Output`]: crate::memory::output::Output
//! [`OutputScope`]: crate::memory::output::OutputScope
//! [`ScopeExt`]: crate::memory::scope::ScopeExt
//! [`ScopeExt::scope`]: crate::memory::scope::ScopeExt::scope
//! [`Scope`]: crate::memory::scope::Scope
//! [`Scope::value_scope`]: crate::memory::scope::Scope::value_scope
//! [`Scope::result_scope`]: crate::memory::scope::Scope::result_scope
#![forbid(rustdoc::broken_intra_doc_links)]
pub mod convert;
pub mod error;
pub mod extensions;
pub mod info;
pub mod layout;
pub mod memory;
pub mod prelude;
pub(crate) mod private;
#[doc(hidden)]
pub mod util;
pub mod wrappers;
use convert::into_jlrs_result::IntoJlrsResult;
use error::{JlrsError, JlrsResult, CANNOT_DISPLAY_VALUE};
use info::Info;
#[cfg(feature = "uv")]
use jl_sys::uv_async_send;
use jl_sys::{
jl_array_dims_ptr, jl_array_ndims, jl_atexit_hook, jl_init, jl_init_with_image,
jl_is_initialized,
};
use memory::frame::{GcFrame, NullFrame};
use memory::global::Global;
use memory::mode::Sync;
use memory::stack_page::StackPage;
use prelude::Wrapper;
use private::Private;
use std::ffi::CString;
use std::io::{Error as IOError, ErrorKind};
use std::mem::{self, MaybeUninit};
use std::path::Path;
use std::ptr::null_mut;
use std::slice;
use std::sync::atomic::{AtomicBool, Ordering};
use wrappers::ptr::module::Module;
use wrappers::ptr::string::JuliaString;
use wrappers::ptr::value::Value;
use wrappers::ptr::{array::Array, call::Call, private::Wrapper as _};
pub(crate) static INIT: AtomicBool = AtomicBool::new(false);
pub(crate) static JLRS_JL: &'static str = include_str!("jlrs.jl");
/// A Julia instance. You must create it with [`Julia::init`] or [`Julia::init_with_image`]
/// before you can do anything related to Julia. While this struct exists Julia is active,
/// dropping it causes the shutdown code to be called but this doesn't leave Julia in a state from which it can be reinitialized.
pub struct Julia {
page: StackPage,
}
impl Julia {
/// Initialize Julia, this method can only be called once. If it's called a second time it
/// will return an error. If this struct is dropped, you will need to restart your program to
/// be able to call Julia code again.
///
/// This method is unsafe because it can race with another crate initializing Julia.
pub unsafe fn init() -> JlrsResult<Self> {
if jl_is_initialized()!= 0 || INIT.swap(true, Ordering::SeqCst) {
return Err(JlrsError::AlreadyInitialized.into());
}
jl_init();
let mut jl = Julia {
page: StackPage::default(),
};
jl.scope_with_slots(1, |_, frame| {
Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?;
Ok(())
})
.expect("Could not load Jlrs module");
Ok(jl)
}
/// This method is similar to [`Julia::init`] except that it loads a custom system image. A
/// custom image can be generated with the [`PackageCompiler`] package for Julia. The main
/// advantage of using a custom image over the default one is that it allows you to avoid much
/// of the compilation overhead often associated with Julia.
///
/// Two arguments are required to call this method compared to [`Julia::init`];
/// `julia_bindir` and `image_relative_path`. The first must be the absolute path to a
/// directory that contains a compatible Julia binary (eg `${JULIA_DIR}/bin`), the second must
/// be either an absolute or a relative path to a system image.
///
/// This method will return an error if either of the two paths doesn't exist or if Julia
/// has already been initialized. It is unsafe because it can race with another crate
/// initializing Julia.
///
/// [`PackageCompiler`]: https://julialang.github.io/PackageCompiler.jl/dev/
pub unsafe fn init_with_image<P: AsRef<Path>, Q: AsRef<Path>>(
julia_bindir: P,
image_path: Q,
) -> JlrsResult<Self> {
if INIT.swap(true, Ordering::SeqCst) {
Err(JlrsError::AlreadyInitialized)?;
}
let julia_bindir_str = julia_bindir.as_ref().to_string_lossy().to_string();
let image_path_str = image_path.as_ref().to_string_lossy().to_string();
if!julia_bindir.as_ref().exists() {
let io_err = IOError::new(ErrorKind::NotFound, julia_bindir_str);
return Err(JlrsError::other(io_err))?;
}
if!image_path.as_ref().exists() {
let io_err = IOError::new(ErrorKind::NotFound, image_path_str);
return Err(JlrsError::other(io_err))?;
}
let bindir = CString::new(julia_bindir_str).unwrap();
let im_rel_path = CString::new(image_path_str).unwrap();
jl_init_with_image(bindir.as_ptr(), im_rel_path.as_ptr());
let mut jl = Julia {
page: StackPage::default(),
};
jl.scope_with_slots(1, |_, frame| {
Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?;
Ok(())
})
.expect("Could not load Jlrs module");
Ok(jl)
}
/// Enable or disable colored error messages originating from Julia. If this is enabled the
/// error message in [`JlrsError::Exception`] can contain ANSI color codes. This feature is
/// disabled by default.
pub fn error_color(&mut self, enable: bool) -> JlrsResult<()> {
self.scope(|global, _frame| unsafe {
let enable = if enable {
Value::true_v(global)
} else {
Value::false_v(global)
};
Module::main(global)
.submodule_ref("Jlrs")?
.wrapper_unchecked()
.global_ref("color")?
.value_unchecked()
.set_field_unchecked("x", enable)?;
Ok(())
})?;
Ok(())
}
/// Calls `include` in the `Main` module in Julia, which executes the file's contents in that
/// module. This has the same effect as calling `include` in the Julia REPL.
///
/// Example:
///
/// ```no_run
/// # use jlrs::prelude::*;
/// # fn main() {
/// # let mut julia = unsafe { Julia::init().unwrap() };
/// julia.include("Path/To/MyJuliaCode.jl").unwrap();
/// # }
/// ```
pub fn include<P: AsRef<Path>>(&mut self, path: P) -> JlrsResult<()> {
if path.as_ref().exists() {
return self.scope_with_slots(2, |global, frame| unsafe {
let path_jl_str = JuliaString::new(&mut *frame, path.as_ref().to_string_lossy())?;
let include_func = Module::main(global)
.function_ref("include")?
.wrapper_unchecked();
let res = include_func.call1(frame, path_jl_str)?;
return match res {
Ok(_) => Ok(()),
Err(e) => Err(JlrsError::IncludeError {
path: path.as_ref().to_string_lossy().into(),
msg: e.display_string_or(CANNOT_DISPLAY_VALUE),
})?,
};
});
}
Err(JlrsError::IncludeNotFound {
path: path.as_ref().to_string_lossy().into(),
})?
}
/// This method is a main entrypoint to interact with Julia. It takes a closure with two
/// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary
/// results.
///
/// Example:
///
/// ```
/// # use jlrs::prelude::*;
/// # use jlrs::util::JULIA;
/// # fn main() {
/// # JULIA.with(|j| {
/// # let mut julia = j.borrow_mut();
/// julia.scope(|_global, frame| {
/// let _i = Value::new(&mut *frame, 1u64)?;
/// Ok(())
/// }).unwrap();
/// # });
/// # }
/// ```
pub fn scope<T, F>(&mut self, func: F) -> JlrsResult<T>
where
for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>,
{
unsafe {
let global = Global::new();
let mut frame = GcFrame::new(self.page.as_mut(), 0, Sync);
func(global, &mut frame)
}
}
/// This method is a main entrypoint to interact with Julia. It takes a closure with two
/// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary
/// results. The frame will preallocate `slots` slots.
///
/// Example:
///
/// ```
/// # use jlrs::prelude::*;
/// # use jlrs::util::JULIA;
/// # fn main() {
/// # JULIA.with(|j| {
/// # let mut julia = j.borrow_mut();
/// julia.scope_with_slots(1, |_global, frame| {
/// // Uses the preallocated slot
/// let _i = Value::new(&mut *frame, 1u64)?;
/// // Allocates a new slot, because only a single slot was preallocated
/// let _j = Value::new(&mut *frame, 1u64)?;
/// Ok(())
/// }).unwrap();
/// # });
/// # }
/// ```
pub fn scope_with_slots<T, F>(&mut self, slots: usize, func: F) -> JlrsResult<T>
where
for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>,
{
unsafe {
let global = Global::new();
if slots + 2 > self.page.size() {
self.page = StackPage::new(slots + 2);
}
let mut frame = GcFrame::new(self.page.as_mut(), slots, Sync);
func(global, &mut frame)
}
}
/// Provides access to global information.
pub fn info(&self) -> Info {
Info::new()
}
}
impl Drop for Julia {
fn drop(&mut self) {
unsafe {
jl_atexit_hook(0);
}
}
}
/// When you call Rust from Julia through `ccall`, Julia has already been initialized and trying to
/// initialize it again would cause a crash. In order to still be able to call Julia from Rust
/// and to borrow arrays (if you pass them as `Array` rather than `Ptr{Array}`), you'll need to
/// create a frame first. You can use this struct to do so. It must never be used outside
/// functions called through `ccall`, and only once for each `ccall`ed function.
///
/// If you only need to use a frame to borrow array data, you can use [`CCall::null_scope`].
/// Unlike [`Julia`], `CCall` postpones the allocation of the stack that is used for managing the
/// GC until a `GcFrame` is created. In the case of a null scope, this stack isn't allocated at
/// all.
pub struct CCall {
page: Option<StackPage>,
}
impl CCall {
/// Create a new `CCall`. This function must never be called outside a function called through
/// `ccall` from Julia and must only be called once during that call. The stack is not
/// allocated until a [`GcFrame`] is created.
pub unsafe fn new() -> Self {
CCall { page: None }
}
/// Wake the task associated with `handle`. The handle must be the `handle` field of a
/// `Base.AsyncCondition` in Julia. This can be used to call a long-running Rust function from
/// Julia with ccall in another thread and wait for it to complete in Julia without blocking,
/// there's an example available in the repository: ccall_with_threads.
///
/// This method is only available if the `uv` feature is enabled.
#[cfg(feature = "uv")]
pub unsafe fn uv_async_send(handle: *mut std::ffi::c_void) -> bool {
uv_async_send(handle.cast()) == 0
}
/// Creates a [`GcFrame`], calls the given closure, and returns its result.
pub fn | scope | identifier_name |
|
lib.rs | .z/lib/julia` must also be
//! added to `LD_LIBRARY_PATH`.
//!
//! #### Windows
//!
//! If you want to use jlrs on Windows you must use WSL. An installation guide to install WSL on
//! Windows can be found [on Microsoft's website]. After installing a Linux distribution, follow
//! the installation instructions for Linux.
//!
//!
//! # Using this crate
//!
//! The first thing you should do is `use` the [`prelude`]-module with an asterisk, this will
//! bring all the structs and traits you're likely to need into scope. When embedding Julia, it
//! must be initialized before it can be used. You can do this by calling [`Julia::init`] which
//! returns an instance of [`Julia`]. Note that this method can only be called once while the
//! application is running; if you drop it you won't be able to create a new instance but have to
//! restart the application. If you want to use a custom system image, you must call
//! [`Julia::init_with_image`] instead of `Julia::init`. If you're calling Rust from Julia
//! everything has already been initialized, you can use `CCall` instead. If you want to use the
//! async runtime, one of the initialization methods of [`AsyncJulia`] must be used.
//!
//!
//! ## Calling Julia from Rust
//!
//! After initialization you have an instance of [`Julia`], [`Julia::include`] can be used to
//! include files with custom Julia code. In order to call Julia functions and create new values
//! that can be used by these functions, [`Julia::scope`] and [`Julia::scope_with_slots`] must be
//! used. These two methods take a closure with two arguments, a [`Global`] and a mutable
//! reference to a [`GcFrame`]. `Global` is a token that is used to access Julia modules, their
//! contents and other global values, while `GcFrame` is used to root local values. Rooting a
//! value in a frame prevents it from being freed by the garbage collector until that frame has
//! been dropped. The frame is created when `Julia::scope(_with_slots)` is called and dropped
//! when that method returns.
//!
//! Because you can use both a `Global` and a mutable reference to a `GcFrame` inside the closure,
//! it's possible to access the contents of modules and create new values that can be used by
//! Julia. The methods of [`Module`] let you access the contents of arbitrary modules, several
//! methods are available to create new values.
//!
//! The simplest is to call [`Value::eval_string`], a method that takes two arguments. The first
//! must implement the [`Scope`] trait, the second is a string which has to contain valid Julia
//! code. The most important thing to know about the [`Scope`] trait for now is that it's used
//! by functions that create new values to ensure the result is rooted. Mutable references to
//! [`GcFrame`]s implement [`Scope`], in this case the [`Value`] that is returned is rooted in
//! that frame, so the result is protected from garbage collection until the frame is dropped when
//! that scope ends.
//!
//! In practice, [`Value::eval_string`] is relatively limited. It can be used to evaluate simple
//! function calls like `sqrt(2.0)`, but can't take any arguments. Its most important use-case is
//! importing installed packages by evaluating an `import` or `using` statement. A more
//! interesting method, [`Value::new`], can be used with data of any type that implements
//! [`IntoJulia`]. This trait is implemented by primitive types like `i8` and `char`. Any type
//! that implements [`IntoJulia`] also implements [`Unbox`] which is used to extract the contents
//! of a Julia value.
//!
//! In addition to evaluating raw commands with `Value::eval_string`, it's possible to call
//! anything that implements [`Call`] as a Julia function, `Value` implements this trait because
//! any Julia value is potentially callable as a function. Functions can be called with any number
//! of positional arguments and be provided with keyword arguments. Both `Value::eval_string` and
//! the trait methods of `Call` are all unsafe. It's trivial to write a function like
//! `boom() = unsafe_load(Ptr{Float64}(C_NULL))`, which causes a segfault when it's called, and
//! call it with these methods.
//!
//! As a simple example, let's convert two numbers to Julia values and add them:
//!
//! ```no_run
//! use jlrs::prelude::*;
//!
//! # fn main() {
//! // Initializing Julia is unsafe because it can race with another crate that does
//! // the same.
//! let mut julia = unsafe { Julia::init().unwrap() };
//! let res = julia.scope(|global, frame| {
//! // Create the two arguments. Note that the first argument, something that
//! // implements Scope, is taken by value and mutable references don't implement
//! // Copy, so it's necessary to mutably reborrow the frame.
//! let i = Value::new(&mut *frame, 2u64)?;
//! let j = Value::new(&mut *frame, 1u32)?;
//!
//! // The `+` function can be found in the base module.
//! let func = Module::base(global).function(&mut *frame, "+")?;
//!
//! // Call the function and unbox the result as a `u64`. The result of the function
//! // call is a nested `Result`; the outer error doesn't contain to any Julia
//! // data, while the inner error contains the exception if one is thrown. Here the
//! // exception is converted to the outer error type by calling `into_jlrs_result`, this new
//! // error contains the error message Julia would have shown. Colors can be enabled by
//! // calling `Julia::error_color`.
//! unsafe {
//! func.call2(&mut *frame, i, j)?
//! .into_jlrs_result()?
//! .unbox::<u64>()
//! }
//! }).unwrap();
//!
//! assert_eq!(res, 3);
//! # }
//! ```
//!
//! Many more features are available, including creating and accessing n-dimensional Julia arrays
//! and nesting scopes. To learn how to use them, please see the documentation for the [`memory`]
//! and [`wrappers`] modules.
//!
//!
//! ## Calling Rust from Julia
//!
//! Julia's `ccall` interface can be used to call `extern "C"` functions defined in Rust, for most
//! use-cases you shouldn't need jlrs. There are two major ways to use `ccall`, with a pointer to
//! the function or a `(:function, "library")` pair.
//!
//! A function can be cast to a void pointer and converted to a [`Value`]:
//!
//! ```no_run
//! # use jlrs::prelude::*;
//! // This function will be provided to Julia as a pointer, so its name can be mangled.
//! unsafe extern "C" fn call_me(arg: bool) -> isize {
//! if arg {
//! 1
//! } else {
//! -1
//! }
//! }
//!
//! # fn main() {
//! let mut julia = unsafe { Julia::init().unwrap() };
//! julia.scope(|global, frame| unsafe {
//! // Cast the function to a void pointer
//! let call_me_val = Value::new(&mut *frame, call_me as *mut std::ffi::c_void)?;
//!
//! // Value::eval_string can be used to create new functions.
//! let func = Value::eval_string(
//! &mut *frame,
//! "myfunc(callme::Ptr{Cvoid})::Int = ccall(callme, Int, (Bool,), true)"
//! )?.unwrap();
//!
//! // Call the function and unbox the result.
//! let output = func.call1(&mut *frame, call_me_val)?
//! .into_jlrs_result()?
//! .unbox::<isize>()?;
//!
//! assert_eq!(output, 1);
//!
//! Ok(())
//! }).unwrap();
//! # }
//! ```
//!
//! You can also use functions defined in `dylib` and `cdylib` libraries. In order to create such
//! a library you need to add
//!
//! ```toml
//! [lib]
//! crate-type = ["dylib"]
//! ```
//!
//! or
//!
//! ```toml
//! [lib]
//! crate-type = ["cdylib"]
//! ```
//!
//! respectively to your crate's `Cargo.toml`. Use a `dylib` if you want to use the crate in other
//! Rust crates, but if it's only intended to be called through `ccall` a `cdylib` is the better
//! choice. On Linux, compiling such a crate will be compiled to `lib<crate_name>.so`.
//!
//! The functions you want to use with `ccall` must be both `extern "C"` functions to ensure the C
//! ABI is used, and annotated with `#[no_mangle]` to prevent name mangling. Julia can find
//! libraries in directories that are either on the default library search path or included by
//! setting the `LD_LIBRARY_PATH` environment variable on Linux. If the compiled library is not
//! directly visible to Julia, you can open it with `Libdl.dlopen` and acquire function pointers
//! with `Libdl.dlsym`. These pointers can be called the same way as the pointer in the previous
//! example.
//!
//! If the library is visible to Julia you can access it with the library name. If `call_me` is
//! defined in a crate called `foo`, the following should work if the function is annotated with
//! `#[no_mangle]`:
//!
//! ```julia
//! ccall((:call_me, "libfoo"), Int, (Bool,), false)
//! ```
//!
//! One important aspect of calling Rust from other languages in general is that panicking across
//! an FFI boundary is undefined behaviour. If you're not sure your code will never panic, wrap it
//! with `std::panic::catch_unwind`.
//!
//! Most features provided by jlrs including accessing modules, calling functions, and borrowing
//! array data require a [`Global`] or a frame. You can access these by creating a [`CCall`]
//! first. Another method provided by [`CCall`] is [`CCall::uv_async_send`], this method can be
//! used in combination with `Base.AsyncCondition`. In particular, it lets you write a `ccall`able
//! function that does its actual work on another thread, return early and `wait` on the async
//! condition, which happens when [`CCall::uv_async_send`] is called when that work is finished.
//! The advantage of this is that the long-running function will not block the Julia runtime,
//! There's an example available on GitHub that shows how to do this.
//!
//!
//! ## Async runtime
//!
//! The async runtime runs Julia in a separate thread and returns a handle that can be shared
//! across threads. The handle can be used to send new tasks to the runtime, multiple tasks can
//! run in parallel by scheduling a function call as a new Julia `Task`. While the Julia `Task`
//! has not completed, the runtime can switch to another task. To use this feature you must enable
//! the `async` feature flag:
//!
//! ```toml
//! [dependencies]
//! jlrs = { version = "0.12", features = ["async"] }
//! ```
//!
//! The struct [`AsyncJulia`] is exported by the prelude and lets you initialize the runtime in
//! two ways, either as a blocking task or as a thread. The first way should be used if you want
//! to integrate the async runtime into a larger project that uses `async_std`.
//!
//! The easiest way to interact with Julia when using the async runtime is by using
//! `AsyncJulia::blocking_task`, which can be used to send a closure like the one in the first
//! example and call it. While this closure has not completed the runtime is blocked, the methods
//! that schedule a function call as a new Julia `Task` can't be used.
//!
//! In order to write non-blocking tasks, you must implement either the [`AsyncTask`] or
//! [`GeneratorTask`] trait. An `AsyncTask` can be called once, its async `run` method replaces
//! the closure; this method takes a `Global` and a mutable reference [`AsyncGcFrame`]. The
//! `AsyncGcFrame` provides mostly the same functionality as `GcFrame`, but can also be used to
//! call the methods of the [`CallAsync`] trait. These methods schedule the function call on
//! another thread and return a `Future`. While awaiting the result the runtime can handle another
//! task.
//!
//!A `GeneratorTask` can be called multiple times. In addition to `run` it also has an async `init` method. This method is called when the `GeneratorTask` is created and can be used to prepare the initial state of the task. The frame provided to `init` is not dropped after this method returns, which means this initial state can contain Julia data. Whenever a `GeneratorTask` is successfully created a `GeneratorHandle` is returned. This handle can be used to call the `GeneratorTask` which calls its `run` method once. A `GeneratorHandle` can be cloned and shared across threads.
//!
//! You can find basic examples that show how to implement these traits in
//! [the examples directory of the GitHub repository].
//!
//!
//! # Testing
//!
//! The restriction that Julia can be initialized once must be taken into account when running
//! tests that use `jlrs`. The recommended approach is to create a thread-local static `RefCell`:
//!
//! ```no_run
//! use jlrs::prelude::*;
//! use std::cell::RefCell;
//! thread_local! {
//! pub static JULIA: RefCell<Julia> = {
//! let julia = RefCell::new(unsafe { Julia::init().unwrap() });
//! julia.borrow_mut().scope(|_global, _frame| {
//! /* include everything you need to use */
//! Ok(())
//! }).unwrap();
//! julia
//! };
//! }
//! ```
//!
//! Tests that use this construct can only use one thread for testing, so you must use
//! `cargo test -- --test-threads=1`, otherwise the code above will panic when a test
//! tries to call `Julia::init` a second time from another thread.
//!
//! If these tests also involve the async runtime, the `JULIA_NUM_THREADS` environment
//! variable must be set to a value larger than 2.
//!
//! If you want to run jlrs's tests, both these requirements must be taken into account:
//! `JULIA_NUM_THREADS=3 cargo test -- --test-threads=1`
//!
//!
//! # Custom types
//!
//! In order to map a struct in Rust to one in Julia you can derive [`ValidLayout`], [`Unbox`],
//! and [`Typecheck`]. If the struct in Julia has no type parameters and is a bits type you can
//! also derive [`IntoJulia`], which lets you use the type in combination with [`Value::new`].
//!
//! You should normally not need to implement these structs or traits manually. The JlrsReflect.jl
//! package can generate the correct Rust struct and automatically derive the supported traits for
//! types that have no tuple or union fields with type parameters. The reason for this restriction
//! is that the layout of tuple and union fields can be very different depending on these
//! parameters in a way that can't be expressed in Rust.
//!
//! These custom types can also be used when you call Rust from Julia with `ccall`.
//!
//! [their User Guide]: https://rust-lang.github.io/rust-bindgen/requirements.html
//! [on Microsoft's website]: https://docs.microsoft.com/en-us/windows/wsl/install-win10
//! [the examples directory of the repo]: https://github.com/Taaitaaiger/jlrs/tree/master/examples
//! [`IntoJulia`]: crate::convert::into_julia::IntoJulia
//! [`Typecheck`]: crate::layout::typecheck::Typecheck
//! [`ValidLayout`]: crate::layout::valid_layout::ValidLayout
//! [`Unbox`]: crate::convert::unbox::Unbox
//! [`CallAsync::call_async`]: crate::extensions::multitask::call_async::CallAsync
//! [`AsyncGcFrame`]: crate::extensions::multitask::async_frame::AsyncGcFrame
//! [`Frame`]: crate::memory::frame::Frame
//! [`AsyncTask`]: crate::extensions::multitask::async_task::AsyncTask
//! [`GeneratorTask`]: crate::extensions::multitask::async_task::GeneratorTask
//! [`GeneratorHandle`]: crate::extensions::multitask::async_task::GeneratorHandle
//! [`AsyncJulia`]: crate::extensions::multitask::AsyncJulia
//! [`CallAsync`]: crate::extensions::multitask::call_async::CallAsync
//! [`DataType`]: crate::wrappers::ptr::datatype::DataType
//! [`TypedArray`]: crate::wrappers::ptr::array::TypedArray
//! [`Output`]: crate::memory::output::Output
//! [`OutputScope`]: crate::memory::output::OutputScope
//! [`ScopeExt`]: crate::memory::scope::ScopeExt
//! [`ScopeExt::scope`]: crate::memory::scope::ScopeExt::scope
//! [`Scope`]: crate::memory::scope::Scope
//! [`Scope::value_scope`]: crate::memory::scope::Scope::value_scope
//! [`Scope::result_scope`]: crate::memory::scope::Scope::result_scope
#![forbid(rustdoc::broken_intra_doc_links)]
pub mod convert;
pub mod error;
pub mod extensions;
pub mod info;
pub mod layout;
pub mod memory;
pub mod prelude;
pub(crate) mod private;
#[doc(hidden)]
pub mod util;
pub mod wrappers;
use convert::into_jlrs_result::IntoJlrsResult;
use error::{JlrsError, JlrsResult, CANNOT_DISPLAY_VALUE};
use info::Info;
#[cfg(feature = "uv")]
use jl_sys::uv_async_send;
use jl_sys::{
jl_array_dims_ptr, jl_array_ndims, jl_atexit_hook, jl_init, jl_init_with_image,
jl_is_initialized,
};
use memory::frame::{GcFrame, NullFrame};
use memory::global::Global;
use memory::mode::Sync;
use memory::stack_page::StackPage;
use prelude::Wrapper;
use private::Private;
use std::ffi::CString;
use std::io::{Error as IOError, ErrorKind};
use std::mem::{self, MaybeUninit};
use std::path::Path;
use std::ptr::null_mut;
use std::slice;
use std::sync::atomic::{AtomicBool, Ordering};
use wrappers::ptr::module::Module;
use wrappers::ptr::string::JuliaString;
use wrappers::ptr::value::Value;
use wrappers::ptr::{array::Array, call::Call, private::Wrapper as _};
pub(crate) static INIT: AtomicBool = AtomicBool::new(false);
pub(crate) static JLRS_JL: &'static str = include_str!("jlrs.jl");
/// A Julia instance. You must create it with [`Julia::init`] or [`Julia::init_with_image`]
/// before you can do anything related to Julia. While this struct exists Julia is active,
/// dropping it causes the shutdown code to be called but this doesn't leave Julia in a state from which it can be reinitialized.
pub struct Julia {
page: StackPage,
}
impl Julia {
/// Initialize Julia, this method can only be called once. If it's called a second time it
/// will return an error. If this struct is dropped, you will need to restart your program to
/// be able to call Julia code again.
///
/// This method is unsafe because it can race with another crate initializing Julia.
pub unsafe fn init() -> JlrsResult<Self> {
if jl_is_initialized()!= 0 || INIT.swap(true, Ordering::SeqCst) {
return Err(JlrsError::AlreadyInitialized.into());
}
jl_init();
let mut jl = Julia {
page: StackPage::default(),
};
jl.scope_with_slots(1, |_, frame| {
Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?;
Ok(())
})
.expect("Could not load Jlrs module");
Ok(jl)
}
/// This method is similar to [`Julia::init`] except that it loads a custom system image. A
/// custom image can be generated with the [`PackageCompiler`] package for Julia. The main
/// advantage of using a custom image over the default one is that it allows you to avoid much
/// of the compilation overhead often associated with Julia.
///
/// Two arguments are required to call this method compared to [`Julia::init`];
/// `julia_bindir` and `image_relative_path`. The first must be the absolute path to a
/// directory that contains a compatible Julia binary (eg `${JULIA_DIR}/bin`), the second must
/// be either an absolute or a relative path to a system image.
///
/// This method will return an error if either of the two paths doesn't exist or if Julia
/// has already been initialized. It is unsafe because it can race with another crate
/// initializing Julia.
///
/// [`PackageCompiler`]: https://julialang.github.io/PackageCompiler.jl/dev/
pub unsafe fn init_with_image<P: AsRef<Path>, Q: AsRef<Path>>(
julia_bindir: P,
image_path: Q,
) -> JlrsResult<Self> {
if INIT.swap(true, Ordering::SeqCst) {
Err(JlrsError::AlreadyInitialized)?;
}
let julia_bindir_str = julia_bindir.as_ref().to_string_lossy().to_string();
let image_path_str = image_path.as_ref().to_string_lossy().to_string();
if!julia_bindir.as_ref().exists() {
let io_err = IOError::new(ErrorKind::NotFound, julia_bindir_str);
return Err(JlrsError::other(io_err))?;
}
if!image_path.as_ref().exists() {
let io_err = IOError::new(ErrorKind::NotFound, image_path_str);
return Err(JlrsError::other(io_err))?;
}
let bindir = CString::new(julia_bindir_str).unwrap();
let im_rel_path = CString::new(image_path_str).unwrap();
jl_init_with_image(bindir.as_ptr(), im_rel_path.as_ptr());
let mut jl = Julia {
page: StackPage::default(),
};
jl.scope_with_slots(1, |_, frame| {
Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?;
Ok(())
})
.expect("Could not load Jlrs module");
Ok(jl)
}
/// Enable or disable colored error messages originating from Julia. If this is enabled the
/// error message in [`JlrsError::Exception`] can contain ANSI color codes. This feature is
/// disabled by default.
pub fn error_color(&mut self, enable: bool) -> JlrsResult<()> {
self.scope(|global, _frame| unsafe {
let enable = if enable {
Value::true_v(global)
} else {
Value::false_v(global)
};
Module::main(global)
.submodule_ref("Jlrs")?
.wrapper_unchecked()
.global_ref("color")?
.value_unchecked()
.set_field_unchecked("x", enable)?;
Ok(())
})?;
Ok(())
}
/// Calls `include` in the `Main` module in Julia, which executes the file's contents in that
/// module. This has the same effect as calling `include` in the Julia REPL.
///
/// Example:
///
/// ```no_run
/// # use jlrs::prelude::*;
/// # fn main() {
/// # let mut julia = unsafe { Julia::init().unwrap() };
/// julia.include("Path/To/MyJuliaCode.jl").unwrap();
/// # }
/// ```
pub fn include<P: AsRef<Path>>(&mut self, path: P) -> JlrsResult<()> {
if path.as_ref().exists() {
return self.scope_with_slots(2, |global, frame| unsafe {
let path_jl_str = JuliaString::new(&mut *frame, path.as_ref().to_string_lossy())?;
let include_func = Module::main(global)
.function_ref("include")?
.wrapper_unchecked();
let res = include_func.call1(frame, path_jl_str)?;
return match res {
Ok(_) => Ok(()),
Err(e) => Err(JlrsError::IncludeError {
path: path.as_ref().to_string_lossy().into(),
msg: e.display_string_or(CANNOT_DISPLAY_VALUE),
})?,
};
});
}
Err(JlrsError::IncludeNotFound {
path: path.as_ref().to_string_lossy().into(),
})?
}
/// This method is a main entrypoint to interact with Julia. It takes a closure with two
/// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary
/// results.
///
/// Example:
///
/// ```
/// # use jlrs::prelude::*;
/// # use jlrs::util::JULIA;
/// # fn main() {
/// # JULIA.with(|j| {
/// # let mut julia = j.borrow_mut();
/// julia.scope(|_global, frame| {
/// let _i = Value::new(&mut *frame, 1u64)?;
/// Ok(())
/// }).unwrap();
/// # });
/// # }
/// ```
pub fn scope<T, F>(&mut self, func: F) -> JlrsResult<T>
where
for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>,
{
unsafe {
let global = Global::new();
let mut frame = GcFrame::new(self.page.as_mut(), 0, Sync);
func(global, &mut frame)
}
}
/// This method is a main entrypoint to interact with Julia. It takes a closure with two
/// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary | /// results. The frame will preallocate `slots` slots.
///
/// Example:
///
/// ```
/// # use jlrs::prelude::*;
/// # use jlrs::util::JULIA;
/// # fn main() {
/// # JULIA.with(|j| {
/// # let mut julia = j.borrow_mut();
/// julia.scope_with_slots(1, |_global, frame| {
/// // Uses the preallocated slot
/// let _i = Value::new(&mut *frame, 1u64)?;
/// // Allocates a new slot, because only a single slot was preallocated
/// let _j = Value::new(&mut *frame, 1u64)?;
/// Ok(())
/// }).unwrap();
/// # });
/// # }
/// ```
pub fn scope_with_slots<T, F>(&mut self, slots: usize, func: F) -> JlrsResult<T>
where
for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>,
{
unsafe {
let global = Global::new();
if slots + 2 > self.page.size() {
self.page = StackPage::new(slots + 2);
}
let mut frame = GcFrame::new(self.page.as_mut(), slots, Sync);
func(global, &mut frame)
}
}
/// Provides access to global information.
pub fn info(&self) -> Info {
Info::new()
}
}
impl Drop for Julia {
fn drop(&mut self) {
unsafe {
jl_atexit_hook(0);
}
}
}
/// When you call Rust from Julia through `ccall`, Julia has already been initialized and trying to
/// initialize it again would cause a crash. In order to still be able to call Julia from Rust
/// and to borrow arrays (if you pass them as `Array` rather than `Ptr{Array}`), you'll need to
/// create | random_line_split |
|
lib.rs | z/lib/julia` must also be
//! added to `LD_LIBRARY_PATH`.
//!
//! #### Windows
//!
//! If you want to use jlrs on Windows you must use WSL. An installation guide to install WSL on
//! Windows can be found [on Microsoft's website]. After installing a Linux distribution, follow
//! the installation instructions for Linux.
//!
//!
//! # Using this crate
//!
//! The first thing you should do is `use` the [`prelude`]-module with an asterisk, this will
//! bring all the structs and traits you're likely to need into scope. When embedding Julia, it
//! must be initialized before it can be used. You can do this by calling [`Julia::init`] which
//! returns an instance of [`Julia`]. Note that this method can only be called once while the
//! application is running; if you drop it you won't be able to create a new instance but have to
//! restart the application. If you want to use a custom system image, you must call
//! [`Julia::init_with_image`] instead of `Julia::init`. If you're calling Rust from Julia
//! everything has already been initialized, you can use `CCall` instead. If you want to use the
//! async runtime, one of the initialization methods of [`AsyncJulia`] must be used.
//!
//!
//! ## Calling Julia from Rust
//!
//! After initialization you have an instance of [`Julia`], [`Julia::include`] can be used to
//! include files with custom Julia code. In order to call Julia functions and create new values
//! that can be used by these functions, [`Julia::scope`] and [`Julia::scope_with_slots`] must be
//! used. These two methods take a closure with two arguments, a [`Global`] and a mutable
//! reference to a [`GcFrame`]. `Global` is a token that is used to access Julia modules, their
//! contents and other global values, while `GcFrame` is used to root local values. Rooting a
//! value in a frame prevents it from being freed by the garbage collector until that frame has
//! been dropped. The frame is created when `Julia::scope(_with_slots)` is called and dropped
//! when that method returns.
//!
//! Because you can use both a `Global` and a mutable reference to a `GcFrame` inside the closure,
//! it's possible to access the contents of modules and create new values that can be used by
//! Julia. The methods of [`Module`] let you access the contents of arbitrary modules, several
//! methods are available to create new values.
//!
//! The simplest is to call [`Value::eval_string`], a method that takes two arguments. The first
//! must implement the [`Scope`] trait, the second is a string which has to contain valid Julia
//! code. The most important thing to know about the [`Scope`] trait for now is that it's used
//! by functions that create new values to ensure the result is rooted. Mutable references to
//! [`GcFrame`]s implement [`Scope`], in this case the [`Value`] that is returned is rooted in
//! that frame, so the result is protected from garbage collection until the frame is dropped when
//! that scope ends.
//!
//! In practice, [`Value::eval_string`] is relatively limited. It can be used to evaluate simple
//! function calls like `sqrt(2.0)`, but can't take any arguments. Its most important use-case is
//! importing installed packages by evaluating an `import` or `using` statement. A more
//! interesting method, [`Value::new`], can be used with data of any type that implements
//! [`IntoJulia`]. This trait is implemented by primitive types like `i8` and `char`. Any type
//! that implements [`IntoJulia`] also implements [`Unbox`] which is used to extract the contents
//! of a Julia value.
//!
//! In addition to evaluating raw commands with `Value::eval_string`, it's possible to call
//! anything that implements [`Call`] as a Julia function, `Value` implements this trait because
//! any Julia value is potentially callable as a function. Functions can be called with any number
//! of positional arguments and be provided with keyword arguments. Both `Value::eval_string` and
//! the trait methods of `Call` are all unsafe. It's trivial to write a function like
//! `boom() = unsafe_load(Ptr{Float64}(C_NULL))`, which causes a segfault when it's called, and
//! call it with these methods.
//!
//! As a simple example, let's convert two numbers to Julia values and add them:
//!
//! ```no_run
//! use jlrs::prelude::*;
//!
//! # fn main() {
//! // Initializing Julia is unsafe because it can race with another crate that does
//! // the same.
//! let mut julia = unsafe { Julia::init().unwrap() };
//! let res = julia.scope(|global, frame| {
//! // Create the two arguments. Note that the first argument, something that
//! // implements Scope, is taken by value and mutable references don't implement
//! // Copy, so it's necessary to mutably reborrow the frame.
//! let i = Value::new(&mut *frame, 2u64)?;
//! let j = Value::new(&mut *frame, 1u32)?;
//!
//! // The `+` function can be found in the base module.
//! let func = Module::base(global).function(&mut *frame, "+")?;
//!
//! // Call the function and unbox the result as a `u64`. The result of the function
//! // call is a nested `Result`; the outer error doesn't contain to any Julia
//! // data, while the inner error contains the exception if one is thrown. Here the
//! // exception is converted to the outer error type by calling `into_jlrs_result`, this new
//! // error contains the error message Julia would have shown. Colors can be enabled by
//! // calling `Julia::error_color`.
//! unsafe {
//! func.call2(&mut *frame, i, j)?
//! .into_jlrs_result()?
//! .unbox::<u64>()
//! }
//! }).unwrap();
//!
//! assert_eq!(res, 3);
//! # }
//! ```
//!
//! Many more features are available, including creating and accessing n-dimensional Julia arrays
//! and nesting scopes. To learn how to use them, please see the documentation for the [`memory`]
//! and [`wrappers`] modules.
//!
//!
//! ## Calling Rust from Julia
//!
//! Julia's `ccall` interface can be used to call `extern "C"` functions defined in Rust, for most
//! use-cases you shouldn't need jlrs. There are two major ways to use `ccall`, with a pointer to
//! the function or a `(:function, "library")` pair.
//!
//! A function can be cast to a void pointer and converted to a [`Value`]:
//!
//! ```no_run
//! # use jlrs::prelude::*;
//! // This function will be provided to Julia as a pointer, so its name can be mangled.
//! unsafe extern "C" fn call_me(arg: bool) -> isize {
//! if arg {
//! 1
//! } else {
//! -1
//! }
//! }
//!
//! # fn main() {
//! let mut julia = unsafe { Julia::init().unwrap() };
//! julia.scope(|global, frame| unsafe {
//! // Cast the function to a void pointer
//! let call_me_val = Value::new(&mut *frame, call_me as *mut std::ffi::c_void)?;
//!
//! // Value::eval_string can be used to create new functions.
//! let func = Value::eval_string(
//! &mut *frame,
//! "myfunc(callme::Ptr{Cvoid})::Int = ccall(callme, Int, (Bool,), true)"
//! )?.unwrap();
//!
//! // Call the function and unbox the result.
//! let output = func.call1(&mut *frame, call_me_val)?
//! .into_jlrs_result()?
//! .unbox::<isize>()?;
//!
//! assert_eq!(output, 1);
//!
//! Ok(())
//! }).unwrap();
//! # }
//! ```
//!
//! You can also use functions defined in `dylib` and `cdylib` libraries. In order to create such
//! a library you need to add
//!
//! ```toml
//! [lib]
//! crate-type = ["dylib"]
//! ```
//!
//! or
//!
//! ```toml
//! [lib]
//! crate-type = ["cdylib"]
//! ```
//!
//! respectively to your crate's `Cargo.toml`. Use a `dylib` if you want to use the crate in other
//! Rust crates, but if it's only intended to be called through `ccall` a `cdylib` is the better
//! choice. On Linux, compiling such a crate will be compiled to `lib<crate_name>.so`.
//!
//! The functions you want to use with `ccall` must be both `extern "C"` functions to ensure the C
//! ABI is used, and annotated with `#[no_mangle]` to prevent name mangling. Julia can find
//! libraries in directories that are either on the default library search path or included by
//! setting the `LD_LIBRARY_PATH` environment variable on Linux. If the compiled library is not
//! directly visible to Julia, you can open it with `Libdl.dlopen` and acquire function pointers
//! with `Libdl.dlsym`. These pointers can be called the same way as the pointer in the previous
//! example.
//!
//! If the library is visible to Julia you can access it with the library name. If `call_me` is
//! defined in a crate called `foo`, the following should work if the function is annotated with
//! `#[no_mangle]`:
//!
//! ```julia
//! ccall((:call_me, "libfoo"), Int, (Bool,), false)
//! ```
//!
//! One important aspect of calling Rust from other languages in general is that panicking across
//! an FFI boundary is undefined behaviour. If you're not sure your code will never panic, wrap it
//! with `std::panic::catch_unwind`.
//!
//! Most features provided by jlrs including accessing modules, calling functions, and borrowing
//! array data require a [`Global`] or a frame. You can access these by creating a [`CCall`]
//! first. Another method provided by [`CCall`] is [`CCall::uv_async_send`], this method can be
//! used in combination with `Base.AsyncCondition`. In particular, it lets you write a `ccall`able
//! function that does its actual work on another thread, return early and `wait` on the async
//! condition, which happens when [`CCall::uv_async_send`] is called when that work is finished.
//! The advantage of this is that the long-running function will not block the Julia runtime,
//! There's an example available on GitHub that shows how to do this.
//!
//!
//! ## Async runtime
//!
//! The async runtime runs Julia in a separate thread and returns a handle that can be shared
//! across threads. The handle can be used to send new tasks to the runtime, multiple tasks can
//! run in parallel by scheduling a function call as a new Julia `Task`. While the Julia `Task`
//! has not completed, the runtime can switch to another task. To use this feature you must enable
//! the `async` feature flag:
//!
//! ```toml
//! [dependencies]
//! jlrs = { version = "0.12", features = ["async"] }
//! ```
//!
//! The struct [`AsyncJulia`] is exported by the prelude and lets you initialize the runtime in
//! two ways, either as a blocking task or as a thread. The first way should be used if you want
//! to integrate the async runtime into a larger project that uses `async_std`.
//!
//! The easiest way to interact with Julia when using the async runtime is by using
//! `AsyncJulia::blocking_task`, which can be used to send a closure like the one in the first
//! example and call it. While this closure has not completed the runtime is blocked, the methods
//! that schedule a function call as a new Julia `Task` can't be used.
//!
//! In order to write non-blocking tasks, you must implement either the [`AsyncTask`] or
//! [`GeneratorTask`] trait. An `AsyncTask` can be called once, its async `run` method replaces
//! the closure; this method takes a `Global` and a mutable reference [`AsyncGcFrame`]. The
//! `AsyncGcFrame` provides mostly the same functionality as `GcFrame`, but can also be used to
//! call the methods of the [`CallAsync`] trait. These methods schedule the function call on
//! another thread and return a `Future`. While awaiting the result the runtime can handle another
//! task.
//!
//!A `GeneratorTask` can be called multiple times. In addition to `run` it also has an async `init` method. This method is called when the `GeneratorTask` is created and can be used to prepare the initial state of the task. The frame provided to `init` is not dropped after this method returns, which means this initial state can contain Julia data. Whenever a `GeneratorTask` is successfully created a `GeneratorHandle` is returned. This handle can be used to call the `GeneratorTask` which calls its `run` method once. A `GeneratorHandle` can be cloned and shared across threads.
//!
//! You can find basic examples that show how to implement these traits in
//! [the examples directory of the GitHub repository].
//!
//!
//! # Testing
//!
//! The restriction that Julia can be initialized once must be taken into account when running
//! tests that use `jlrs`. The recommended approach is to create a thread-local static `RefCell`:
//!
//! ```no_run
//! use jlrs::prelude::*;
//! use std::cell::RefCell;
//! thread_local! {
//! pub static JULIA: RefCell<Julia> = {
//! let julia = RefCell::new(unsafe { Julia::init().unwrap() });
//! julia.borrow_mut().scope(|_global, _frame| {
//! /* include everything you need to use */
//! Ok(())
//! }).unwrap();
//! julia
//! };
//! }
//! ```
//!
//! Tests that use this construct can only use one thread for testing, so you must use
//! `cargo test -- --test-threads=1`, otherwise the code above will panic when a test
//! tries to call `Julia::init` a second time from another thread.
//!
//! If these tests also involve the async runtime, the `JULIA_NUM_THREADS` environment
//! variable must be set to a value larger than 2.
//!
//! If you want to run jlrs's tests, both these requirements must be taken into account:
//! `JULIA_NUM_THREADS=3 cargo test -- --test-threads=1`
//!
//!
//! # Custom types
//!
//! In order to map a struct in Rust to one in Julia you can derive [`ValidLayout`], [`Unbox`],
//! and [`Typecheck`]. If the struct in Julia has no type parameters and is a bits type you can
//! also derive [`IntoJulia`], which lets you use the type in combination with [`Value::new`].
//!
//! You should normally not need to implement these structs or traits manually. The JlrsReflect.jl
//! package can generate the correct Rust struct and automatically derive the supported traits for
//! types that have no tuple or union fields with type parameters. The reason for this restriction
//! is that the layout of tuple and union fields can be very different depending on these
//! parameters in a way that can't be expressed in Rust.
//!
//! These custom types can also be used when you call Rust from Julia with `ccall`.
//!
//! [their User Guide]: https://rust-lang.github.io/rust-bindgen/requirements.html
//! [on Microsoft's website]: https://docs.microsoft.com/en-us/windows/wsl/install-win10
//! [the examples directory of the repo]: https://github.com/Taaitaaiger/jlrs/tree/master/examples
//! [`IntoJulia`]: crate::convert::into_julia::IntoJulia
//! [`Typecheck`]: crate::layout::typecheck::Typecheck
//! [`ValidLayout`]: crate::layout::valid_layout::ValidLayout
//! [`Unbox`]: crate::convert::unbox::Unbox
//! [`CallAsync::call_async`]: crate::extensions::multitask::call_async::CallAsync
//! [`AsyncGcFrame`]: crate::extensions::multitask::async_frame::AsyncGcFrame
//! [`Frame`]: crate::memory::frame::Frame
//! [`AsyncTask`]: crate::extensions::multitask::async_task::AsyncTask
//! [`GeneratorTask`]: crate::extensions::multitask::async_task::GeneratorTask
//! [`GeneratorHandle`]: crate::extensions::multitask::async_task::GeneratorHandle
//! [`AsyncJulia`]: crate::extensions::multitask::AsyncJulia
//! [`CallAsync`]: crate::extensions::multitask::call_async::CallAsync
//! [`DataType`]: crate::wrappers::ptr::datatype::DataType
//! [`TypedArray`]: crate::wrappers::ptr::array::TypedArray
//! [`Output`]: crate::memory::output::Output
//! [`OutputScope`]: crate::memory::output::OutputScope
//! [`ScopeExt`]: crate::memory::scope::ScopeExt
//! [`ScopeExt::scope`]: crate::memory::scope::ScopeExt::scope
//! [`Scope`]: crate::memory::scope::Scope
//! [`Scope::value_scope`]: crate::memory::scope::Scope::value_scope
//! [`Scope::result_scope`]: crate::memory::scope::Scope::result_scope
#![forbid(rustdoc::broken_intra_doc_links)]
pub mod convert;
pub mod error;
pub mod extensions;
pub mod info;
pub mod layout;
pub mod memory;
pub mod prelude;
pub(crate) mod private;
#[doc(hidden)]
pub mod util;
pub mod wrappers;
use convert::into_jlrs_result::IntoJlrsResult;
use error::{JlrsError, JlrsResult, CANNOT_DISPLAY_VALUE};
use info::Info;
#[cfg(feature = "uv")]
use jl_sys::uv_async_send;
use jl_sys::{
jl_array_dims_ptr, jl_array_ndims, jl_atexit_hook, jl_init, jl_init_with_image,
jl_is_initialized,
};
use memory::frame::{GcFrame, NullFrame};
use memory::global::Global;
use memory::mode::Sync;
use memory::stack_page::StackPage;
use prelude::Wrapper;
use private::Private;
use std::ffi::CString;
use std::io::{Error as IOError, ErrorKind};
use std::mem::{self, MaybeUninit};
use std::path::Path;
use std::ptr::null_mut;
use std::slice;
use std::sync::atomic::{AtomicBool, Ordering};
use wrappers::ptr::module::Module;
use wrappers::ptr::string::JuliaString;
use wrappers::ptr::value::Value;
use wrappers::ptr::{array::Array, call::Call, private::Wrapper as _};
pub(crate) static INIT: AtomicBool = AtomicBool::new(false);
pub(crate) static JLRS_JL: &'static str = include_str!("jlrs.jl");
/// A Julia instance. You must create it with [`Julia::init`] or [`Julia::init_with_image`]
/// before you can do anything related to Julia. While this struct exists Julia is active,
/// dropping it causes the shutdown code to be called but this doesn't leave Julia in a state from which it can be reinitialized.
pub struct Julia {
page: StackPage,
}
impl Julia {
/// Initialize Julia, this method can only be called once. If it's called a second time it
/// will return an error. If this struct is dropped, you will need to restart your program to
/// be able to call Julia code again.
///
/// This method is unsafe because it can race with another crate initializing Julia.
pub unsafe fn init() -> JlrsResult<Self> {
if jl_is_initialized()!= 0 || INIT.swap(true, Ordering::SeqCst) {
return Err(JlrsError::AlreadyInitialized.into());
}
jl_init();
let mut jl = Julia {
page: StackPage::default(),
};
jl.scope_with_slots(1, |_, frame| {
Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?;
Ok(())
})
.expect("Could not load Jlrs module");
Ok(jl)
}
/// This method is similar to [`Julia::init`] except that it loads a custom system image. A
/// custom image can be generated with the [`PackageCompiler`] package for Julia. The main
/// advantage of using a custom image over the default one is that it allows you to avoid much
/// of the compilation overhead often associated with Julia.
///
/// Two arguments are required to call this method compared to [`Julia::init`];
/// `julia_bindir` and `image_relative_path`. The first must be the absolute path to a
/// directory that contains a compatible Julia binary (eg `${JULIA_DIR}/bin`), the second must
/// be either an absolute or a relative path to a system image.
///
/// This method will return an error if either of the two paths doesn't exist or if Julia
/// has already been initialized. It is unsafe because it can race with another crate
/// initializing Julia.
///
/// [`PackageCompiler`]: https://julialang.github.io/PackageCompiler.jl/dev/
pub unsafe fn init_with_image<P: AsRef<Path>, Q: AsRef<Path>>(
julia_bindir: P,
image_path: Q,
) -> JlrsResult<Self> {
if INIT.swap(true, Ordering::SeqCst) {
Err(JlrsError::AlreadyInitialized)?;
}
let julia_bindir_str = julia_bindir.as_ref().to_string_lossy().to_string();
let image_path_str = image_path.as_ref().to_string_lossy().to_string();
if!julia_bindir.as_ref().exists() |
if!image_path.as_ref().exists() {
let io_err = IOError::new(ErrorKind::NotFound, image_path_str);
return Err(JlrsError::other(io_err))?;
}
let bindir = CString::new(julia_bindir_str).unwrap();
let im_rel_path = CString::new(image_path_str).unwrap();
jl_init_with_image(bindir.as_ptr(), im_rel_path.as_ptr());
let mut jl = Julia {
page: StackPage::default(),
};
jl.scope_with_slots(1, |_, frame| {
Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?;
Ok(())
})
.expect("Could not load Jlrs module");
Ok(jl)
}
/// Enable or disable colored error messages originating from Julia. If this is enabled the
/// error message in [`JlrsError::Exception`] can contain ANSI color codes. This feature is
/// disabled by default.
pub fn error_color(&mut self, enable: bool) -> JlrsResult<()> {
self.scope(|global, _frame| unsafe {
let enable = if enable {
Value::true_v(global)
} else {
Value::false_v(global)
};
Module::main(global)
.submodule_ref("Jlrs")?
.wrapper_unchecked()
.global_ref("color")?
.value_unchecked()
.set_field_unchecked("x", enable)?;
Ok(())
})?;
Ok(())
}
/// Calls `include` in the `Main` module in Julia, which executes the file's contents in that
/// module. This has the same effect as calling `include` in the Julia REPL.
///
/// Example:
///
/// ```no_run
/// # use jlrs::prelude::*;
/// # fn main() {
/// # let mut julia = unsafe { Julia::init().unwrap() };
/// julia.include("Path/To/MyJuliaCode.jl").unwrap();
/// # }
/// ```
pub fn include<P: AsRef<Path>>(&mut self, path: P) -> JlrsResult<()> {
if path.as_ref().exists() {
return self.scope_with_slots(2, |global, frame| unsafe {
let path_jl_str = JuliaString::new(&mut *frame, path.as_ref().to_string_lossy())?;
let include_func = Module::main(global)
.function_ref("include")?
.wrapper_unchecked();
let res = include_func.call1(frame, path_jl_str)?;
return match res {
Ok(_) => Ok(()),
Err(e) => Err(JlrsError::IncludeError {
path: path.as_ref().to_string_lossy().into(),
msg: e.display_string_or(CANNOT_DISPLAY_VALUE),
})?,
};
});
}
Err(JlrsError::IncludeNotFound {
path: path.as_ref().to_string_lossy().into(),
})?
}
/// This method is a main entrypoint to interact with Julia. It takes a closure with two
/// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary
/// results.
///
/// Example:
///
/// ```
/// # use jlrs::prelude::*;
/// # use jlrs::util::JULIA;
/// # fn main() {
/// # JULIA.with(|j| {
/// # let mut julia = j.borrow_mut();
/// julia.scope(|_global, frame| {
/// let _i = Value::new(&mut *frame, 1u64)?;
/// Ok(())
/// }).unwrap();
/// # });
/// # }
/// ```
pub fn scope<T, F>(&mut self, func: F) -> JlrsResult<T>
where
for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>,
{
unsafe {
let global = Global::new();
let mut frame = GcFrame::new(self.page.as_mut(), 0, Sync);
func(global, &mut frame)
}
}
/// This method is a main entrypoint to interact with Julia. It takes a closure with two
/// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary
/// results. The frame will preallocate `slots` slots.
///
/// Example:
///
/// ```
/// # use jlrs::prelude::*;
/// # use jlrs::util::JULIA;
/// # fn main() {
/// # JULIA.with(|j| {
/// # let mut julia = j.borrow_mut();
/// julia.scope_with_slots(1, |_global, frame| {
/// // Uses the preallocated slot
/// let _i = Value::new(&mut *frame, 1u64)?;
/// // Allocates a new slot, because only a single slot was preallocated
/// let _j = Value::new(&mut *frame, 1u64)?;
/// Ok(())
/// }).unwrap();
/// # });
/// # }
/// ```
pub fn scope_with_slots<T, F>(&mut self, slots: usize, func: F) -> JlrsResult<T>
where
for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>,
{
unsafe {
let global = Global::new();
if slots + 2 > self.page.size() {
self.page = StackPage::new(slots + 2);
}
let mut frame = GcFrame::new(self.page.as_mut(), slots, Sync);
func(global, &mut frame)
}
}
/// Provides access to global information.
pub fn info(&self) -> Info {
Info::new()
}
}
impl Drop for Julia {
fn drop(&mut self) {
unsafe {
jl_atexit_hook(0);
}
}
}
/// When you call Rust from Julia through `ccall`, Julia has already been initialized and trying to
/// initialize it again would cause a crash. In order to still be able to call Julia from Rust
/// and to borrow arrays (if you pass them as `Array` rather than `Ptr{Array}`), you'll need to
/// | {
let io_err = IOError::new(ErrorKind::NotFound, julia_bindir_str);
return Err(JlrsError::other(io_err))?;
} | conditional_block |
lib.rs | `GcFrame` is used to root local values. Rooting a
//! value in a frame prevents it from being freed by the garbage collector until that frame has
//! been dropped. The frame is created when `Julia::scope(_with_slots)` is called and dropped
//! when that method returns.
//!
//! Because you can use both a `Global` and a mutable reference to a `GcFrame` inside the closure,
//! it's possible to access the contents of modules and create new values that can be used by
//! Julia. The methods of [`Module`] let you access the contents of arbitrary modules, several
//! methods are available to create new values.
//!
//! The simplest is to call [`Value::eval_string`], a method that takes two arguments. The first
//! must implement the [`Scope`] trait, the second is a string which has to contain valid Julia
//! code. The most important thing to know about the [`Scope`] trait for now is that it's used
//! by functions that create new values to ensure the result is rooted. Mutable references to
//! [`GcFrame`]s implement [`Scope`], in this case the [`Value`] that is returned is rooted in
//! that frame, so the result is protected from garbage collection until the frame is dropped when
//! that scope ends.
//!
//! In practice, [`Value::eval_string`] is relatively limited. It can be used to evaluate simple
//! function calls like `sqrt(2.0)`, but can't take any arguments. Its most important use-case is
//! importing installed packages by evaluating an `import` or `using` statement. A more
//! interesting method, [`Value::new`], can be used with data of any type that implements
//! [`IntoJulia`]. This trait is implemented by primitive types like `i8` and `char`. Any type
//! that implements [`IntoJulia`] also implements [`Unbox`] which is used to extract the contents
//! of a Julia value.
//!
//! In addition to evaluating raw commands with `Value::eval_string`, it's possible to call
//! anything that implements [`Call`] as a Julia function, `Value` implements this trait because
//! any Julia value is potentially callable as a function. Functions can be called with any number
//! of positional arguments and be provided with keyword arguments. Both `Value::eval_string` and
//! the trait methods of `Call` are all unsafe. It's trivial to write a function like
//! `boom() = unsafe_load(Ptr{Float64}(C_NULL))`, which causes a segfault when it's called, and
//! call it with these methods.
//!
//! As a simple example, let's convert two numbers to Julia values and add them:
//!
//! ```no_run
//! use jlrs::prelude::*;
//!
//! # fn main() {
//! // Initializing Julia is unsafe because it can race with another crate that does
//! // the same.
//! let mut julia = unsafe { Julia::init().unwrap() };
//! let res = julia.scope(|global, frame| {
//! // Create the two arguments. Note that the first argument, something that
//! // implements Scope, is taken by value and mutable references don't implement
//! // Copy, so it's necessary to mutably reborrow the frame.
//! let i = Value::new(&mut *frame, 2u64)?;
//! let j = Value::new(&mut *frame, 1u32)?;
//!
//! // The `+` function can be found in the base module.
//! let func = Module::base(global).function(&mut *frame, "+")?;
//!
//! // Call the function and unbox the result as a `u64`. The result of the function
//! // call is a nested `Result`; the outer error doesn't contain to any Julia
//! // data, while the inner error contains the exception if one is thrown. Here the
//! // exception is converted to the outer error type by calling `into_jlrs_result`, this new
//! // error contains the error message Julia would have shown. Colors can be enabled by
//! // calling `Julia::error_color`.
//! unsafe {
//! func.call2(&mut *frame, i, j)?
//! .into_jlrs_result()?
//! .unbox::<u64>()
//! }
//! }).unwrap();
//!
//! assert_eq!(res, 3);
//! # }
//! ```
//!
//! Many more features are available, including creating and accessing n-dimensional Julia arrays
//! and nesting scopes. To learn how to use them, please see the documentation for the [`memory`]
//! and [`wrappers`] modules.
//!
//!
//! ## Calling Rust from Julia
//!
//! Julia's `ccall` interface can be used to call `extern "C"` functions defined in Rust, for most
//! use-cases you shouldn't need jlrs. There are two major ways to use `ccall`, with a pointer to
//! the function or a `(:function, "library")` pair.
//!
//! A function can be cast to a void pointer and converted to a [`Value`]:
//!
//! ```no_run
//! # use jlrs::prelude::*;
//! // This function will be provided to Julia as a pointer, so its name can be mangled.
//! unsafe extern "C" fn call_me(arg: bool) -> isize {
//! if arg {
//! 1
//! } else {
//! -1
//! }
//! }
//!
//! # fn main() {
//! let mut julia = unsafe { Julia::init().unwrap() };
//! julia.scope(|global, frame| unsafe {
//! // Cast the function to a void pointer
//! let call_me_val = Value::new(&mut *frame, call_me as *mut std::ffi::c_void)?;
//!
//! // Value::eval_string can be used to create new functions.
//! let func = Value::eval_string(
//! &mut *frame,
//! "myfunc(callme::Ptr{Cvoid})::Int = ccall(callme, Int, (Bool,), true)"
//! )?.unwrap();
//!
//! // Call the function and unbox the result.
//! let output = func.call1(&mut *frame, call_me_val)?
//! .into_jlrs_result()?
//! .unbox::<isize>()?;
//!
//! assert_eq!(output, 1);
//!
//! Ok(())
//! }).unwrap();
//! # }
//! ```
//!
//! You can also use functions defined in `dylib` and `cdylib` libraries. In order to create such
//! a library you need to add
//!
//! ```toml
//! [lib]
//! crate-type = ["dylib"]
//! ```
//!
//! or
//!
//! ```toml
//! [lib]
//! crate-type = ["cdylib"]
//! ```
//!
//! respectively to your crate's `Cargo.toml`. Use a `dylib` if you want to use the crate in other
//! Rust crates, but if it's only intended to be called through `ccall` a `cdylib` is the better
//! choice. On Linux, compiling such a crate will be compiled to `lib<crate_name>.so`.
//!
//! The functions you want to use with `ccall` must be both `extern "C"` functions to ensure the C
//! ABI is used, and annotated with `#[no_mangle]` to prevent name mangling. Julia can find
//! libraries in directories that are either on the default library search path or included by
//! setting the `LD_LIBRARY_PATH` environment variable on Linux. If the compiled library is not
//! directly visible to Julia, you can open it with `Libdl.dlopen` and acquire function pointers
//! with `Libdl.dlsym`. These pointers can be called the same way as the pointer in the previous
//! example.
//!
//! If the library is visible to Julia you can access it with the library name. If `call_me` is
//! defined in a crate called `foo`, the following should work if the function is annotated with
//! `#[no_mangle]`:
//!
//! ```julia
//! ccall((:call_me, "libfoo"), Int, (Bool,), false)
//! ```
//!
//! One important aspect of calling Rust from other languages in general is that panicking across
//! an FFI boundary is undefined behaviour. If you're not sure your code will never panic, wrap it
//! with `std::panic::catch_unwind`.
//!
//! Most features provided by jlrs including accessing modules, calling functions, and borrowing
//! array data require a [`Global`] or a frame. You can access these by creating a [`CCall`]
//! first. Another method provided by [`CCall`] is [`CCall::uv_async_send`], this method can be
//! used in combination with `Base.AsyncCondition`. In particular, it lets you write a `ccall`able
//! function that does its actual work on another thread, return early and `wait` on the async
//! condition, which happens when [`CCall::uv_async_send`] is called when that work is finished.
//! The advantage of this is that the long-running function will not block the Julia runtime,
//! There's an example available on GitHub that shows how to do this.
//!
//!
//! ## Async runtime
//!
//! The async runtime runs Julia in a separate thread and returns a handle that can be shared
//! across threads. The handle can be used to send new tasks to the runtime, multiple tasks can
//! run in parallel by scheduling a function call as a new Julia `Task`. While the Julia `Task`
//! has not completed, the runtime can switch to another task. To use this feature you must enable
//! the `async` feature flag:
//!
//! ```toml
//! [dependencies]
//! jlrs = { version = "0.12", features = ["async"] }
//! ```
//!
//! The struct [`AsyncJulia`] is exported by the prelude and lets you initialize the runtime in
//! two ways, either as a blocking task or as a thread. The first way should be used if you want
//! to integrate the async runtime into a larger project that uses `async_std`.
//!
//! The easiest way to interact with Julia when using the async runtime is by using
//! `AsyncJulia::blocking_task`, which can be used to send a closure like the one in the first
//! example and call it. While this closure has not completed the runtime is blocked, the methods
//! that schedule a function call as a new Julia `Task` can't be used.
//!
//! In order to write non-blocking tasks, you must implement either the [`AsyncTask`] or
//! [`GeneratorTask`] trait. An `AsyncTask` can be called once, its async `run` method replaces
//! the closure; this method takes a `Global` and a mutable reference [`AsyncGcFrame`]. The
//! `AsyncGcFrame` provides mostly the same functionality as `GcFrame`, but can also be used to
//! call the methods of the [`CallAsync`] trait. These methods schedule the function call on
//! another thread and return a `Future`. While awaiting the result the runtime can handle another
//! task.
//!
//!A `GeneratorTask` can be called multiple times. In addition to `run` it also has an async `init` method. This method is called when the `GeneratorTask` is created and can be used to prepare the initial state of the task. The frame provided to `init` is not dropped after this method returns, which means this initial state can contain Julia data. Whenever a `GeneratorTask` is successfully created a `GeneratorHandle` is returned. This handle can be used to call the `GeneratorTask` which calls its `run` method once. A `GeneratorHandle` can be cloned and shared across threads.
//!
//! You can find basic examples that show how to implement these traits in
//! [the examples directory of the GitHub repository].
//!
//!
//! # Testing
//!
//! The restriction that Julia can be initialized once must be taken into account when running
//! tests that use `jlrs`. The recommended approach is to create a thread-local static `RefCell`:
//!
//! ```no_run
//! use jlrs::prelude::*;
//! use std::cell::RefCell;
//! thread_local! {
//! pub static JULIA: RefCell<Julia> = {
//! let julia = RefCell::new(unsafe { Julia::init().unwrap() });
//! julia.borrow_mut().scope(|_global, _frame| {
//! /* include everything you need to use */
//! Ok(())
//! }).unwrap();
//! julia
//! };
//! }
//! ```
//!
//! Tests that use this construct can only use one thread for testing, so you must use
//! `cargo test -- --test-threads=1`, otherwise the code above will panic when a test
//! tries to call `Julia::init` a second time from another thread.
//!
//! If these tests also involve the async runtime, the `JULIA_NUM_THREADS` environment
//! variable must be set to a value larger than 2.
//!
//! If you want to run jlrs's tests, both these requirements must be taken into account:
//! `JULIA_NUM_THREADS=3 cargo test -- --test-threads=1`
//!
//!
//! # Custom types
//!
//! In order to map a struct in Rust to one in Julia you can derive [`ValidLayout`], [`Unbox`],
//! and [`Typecheck`]. If the struct in Julia has no type parameters and is a bits type you can
//! also derive [`IntoJulia`], which lets you use the type in combination with [`Value::new`].
//!
//! You should normally not need to implement these structs or traits manually. The JlrsReflect.jl
//! package can generate the correct Rust struct and automatically derive the supported traits for
//! types that have no tuple or union fields with type parameters. The reason for this restriction
//! is that the layout of tuple and union fields can be very different depending on these
//! parameters in a way that can't be expressed in Rust.
//!
//! These custom types can also be used when you call Rust from Julia with `ccall`.
//!
//! [their User Guide]: https://rust-lang.github.io/rust-bindgen/requirements.html
//! [on Microsoft's website]: https://docs.microsoft.com/en-us/windows/wsl/install-win10
//! [the examples directory of the repo]: https://github.com/Taaitaaiger/jlrs/tree/master/examples
//! [`IntoJulia`]: crate::convert::into_julia::IntoJulia
//! [`Typecheck`]: crate::layout::typecheck::Typecheck
//! [`ValidLayout`]: crate::layout::valid_layout::ValidLayout
//! [`Unbox`]: crate::convert::unbox::Unbox
//! [`CallAsync::call_async`]: crate::extensions::multitask::call_async::CallAsync
//! [`AsyncGcFrame`]: crate::extensions::multitask::async_frame::AsyncGcFrame
//! [`Frame`]: crate::memory::frame::Frame
//! [`AsyncTask`]: crate::extensions::multitask::async_task::AsyncTask
//! [`GeneratorTask`]: crate::extensions::multitask::async_task::GeneratorTask
//! [`GeneratorHandle`]: crate::extensions::multitask::async_task::GeneratorHandle
//! [`AsyncJulia`]: crate::extensions::multitask::AsyncJulia
//! [`CallAsync`]: crate::extensions::multitask::call_async::CallAsync
//! [`DataType`]: crate::wrappers::ptr::datatype::DataType
//! [`TypedArray`]: crate::wrappers::ptr::array::TypedArray
//! [`Output`]: crate::memory::output::Output
//! [`OutputScope`]: crate::memory::output::OutputScope
//! [`ScopeExt`]: crate::memory::scope::ScopeExt
//! [`ScopeExt::scope`]: crate::memory::scope::ScopeExt::scope
//! [`Scope`]: crate::memory::scope::Scope
//! [`Scope::value_scope`]: crate::memory::scope::Scope::value_scope
//! [`Scope::result_scope`]: crate::memory::scope::Scope::result_scope
#![forbid(rustdoc::broken_intra_doc_links)]
pub mod convert;
pub mod error;
pub mod extensions;
pub mod info;
pub mod layout;
pub mod memory;
pub mod prelude;
pub(crate) mod private;
#[doc(hidden)]
pub mod util;
pub mod wrappers;
use convert::into_jlrs_result::IntoJlrsResult;
use error::{JlrsError, JlrsResult, CANNOT_DISPLAY_VALUE};
use info::Info;
#[cfg(feature = "uv")]
use jl_sys::uv_async_send;
use jl_sys::{
jl_array_dims_ptr, jl_array_ndims, jl_atexit_hook, jl_init, jl_init_with_image,
jl_is_initialized,
};
use memory::frame::{GcFrame, NullFrame};
use memory::global::Global;
use memory::mode::Sync;
use memory::stack_page::StackPage;
use prelude::Wrapper;
use private::Private;
use std::ffi::CString;
use std::io::{Error as IOError, ErrorKind};
use std::mem::{self, MaybeUninit};
use std::path::Path;
use std::ptr::null_mut;
use std::slice;
use std::sync::atomic::{AtomicBool, Ordering};
use wrappers::ptr::module::Module;
use wrappers::ptr::string::JuliaString;
use wrappers::ptr::value::Value;
use wrappers::ptr::{array::Array, call::Call, private::Wrapper as _};
pub(crate) static INIT: AtomicBool = AtomicBool::new(false);
pub(crate) static JLRS_JL: &'static str = include_str!("jlrs.jl");
/// A Julia instance. You must create it with [`Julia::init`] or [`Julia::init_with_image`]
/// before you can do anything related to Julia. While this struct exists Julia is active,
/// dropping it causes the shutdown code to be called but this doesn't leave Julia in a state from which it can be reinitialized.
pub struct Julia {
page: StackPage,
}
impl Julia {
/// Initialize Julia, this method can only be called once. If it's called a second time it
/// will return an error. If this struct is dropped, you will need to restart your program to
/// be able to call Julia code again.
///
/// This method is unsafe because it can race with another crate initializing Julia.
pub unsafe fn init() -> JlrsResult<Self> {
if jl_is_initialized()!= 0 || INIT.swap(true, Ordering::SeqCst) {
return Err(JlrsError::AlreadyInitialized.into());
}
jl_init();
let mut jl = Julia {
page: StackPage::default(),
};
jl.scope_with_slots(1, |_, frame| {
Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?;
Ok(())
})
.expect("Could not load Jlrs module");
Ok(jl)
}
/// This method is similar to [`Julia::init`] except that it loads a custom system image. A
/// custom image can be generated with the [`PackageCompiler`] package for Julia. The main
/// advantage of using a custom image over the default one is that it allows you to avoid much
/// of the compilation overhead often associated with Julia.
///
/// Two arguments are required to call this method compared to [`Julia::init`];
/// `julia_bindir` and `image_relative_path`. The first must be the absolute path to a
/// directory that contains a compatible Julia binary (eg `${JULIA_DIR}/bin`), the second must
/// be either an absolute or a relative path to a system image.
///
/// This method will return an error if either of the two paths doesn't exist or if Julia
/// has already been initialized. It is unsafe because it can race with another crate
/// initializing Julia.
///
/// [`PackageCompiler`]: https://julialang.github.io/PackageCompiler.jl/dev/
pub unsafe fn init_with_image<P: AsRef<Path>, Q: AsRef<Path>>(
julia_bindir: P,
image_path: Q,
) -> JlrsResult<Self> {
if INIT.swap(true, Ordering::SeqCst) {
Err(JlrsError::AlreadyInitialized)?;
}
let julia_bindir_str = julia_bindir.as_ref().to_string_lossy().to_string();
let image_path_str = image_path.as_ref().to_string_lossy().to_string();
if!julia_bindir.as_ref().exists() {
let io_err = IOError::new(ErrorKind::NotFound, julia_bindir_str);
return Err(JlrsError::other(io_err))?;
}
if!image_path.as_ref().exists() {
let io_err = IOError::new(ErrorKind::NotFound, image_path_str);
return Err(JlrsError::other(io_err))?;
}
let bindir = CString::new(julia_bindir_str).unwrap();
let im_rel_path = CString::new(image_path_str).unwrap();
jl_init_with_image(bindir.as_ptr(), im_rel_path.as_ptr());
let mut jl = Julia {
page: StackPage::default(),
};
jl.scope_with_slots(1, |_, frame| {
Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?;
Ok(())
})
.expect("Could not load Jlrs module");
Ok(jl)
}
/// Enable or disable colored error messages originating from Julia. If this is enabled the
/// error message in [`JlrsError::Exception`] can contain ANSI color codes. This feature is
/// disabled by default.
pub fn error_color(&mut self, enable: bool) -> JlrsResult<()> {
self.scope(|global, _frame| unsafe {
let enable = if enable {
Value::true_v(global)
} else {
Value::false_v(global)
};
Module::main(global)
.submodule_ref("Jlrs")?
.wrapper_unchecked()
.global_ref("color")?
.value_unchecked()
.set_field_unchecked("x", enable)?;
Ok(())
})?;
Ok(())
}
/// Calls `include` in the `Main` module in Julia, which executes the file's contents in that
/// module. This has the same effect as calling `include` in the Julia REPL.
///
/// Example:
///
/// ```no_run
/// # use jlrs::prelude::*;
/// # fn main() {
/// # let mut julia = unsafe { Julia::init().unwrap() };
/// julia.include("Path/To/MyJuliaCode.jl").unwrap();
/// # }
/// ```
pub fn include<P: AsRef<Path>>(&mut self, path: P) -> JlrsResult<()> {
if path.as_ref().exists() {
return self.scope_with_slots(2, |global, frame| unsafe {
let path_jl_str = JuliaString::new(&mut *frame, path.as_ref().to_string_lossy())?;
let include_func = Module::main(global)
.function_ref("include")?
.wrapper_unchecked();
let res = include_func.call1(frame, path_jl_str)?;
return match res {
Ok(_) => Ok(()),
Err(e) => Err(JlrsError::IncludeError {
path: path.as_ref().to_string_lossy().into(),
msg: e.display_string_or(CANNOT_DISPLAY_VALUE),
})?,
};
});
}
Err(JlrsError::IncludeNotFound {
path: path.as_ref().to_string_lossy().into(),
})?
}
/// This method is a main entrypoint to interact with Julia. It takes a closure with two
/// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary
/// results.
///
/// Example:
///
/// ```
/// # use jlrs::prelude::*;
/// # use jlrs::util::JULIA;
/// # fn main() {
/// # JULIA.with(|j| {
/// # let mut julia = j.borrow_mut();
/// julia.scope(|_global, frame| {
/// let _i = Value::new(&mut *frame, 1u64)?;
/// Ok(())
/// }).unwrap();
/// # });
/// # }
/// ```
pub fn scope<T, F>(&mut self, func: F) -> JlrsResult<T>
where
for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>,
{
unsafe {
let global = Global::new();
let mut frame = GcFrame::new(self.page.as_mut(), 0, Sync);
func(global, &mut frame)
}
}
/// This method is a main entrypoint to interact with Julia. It takes a closure with two
/// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary
/// results. The frame will preallocate `slots` slots.
///
/// Example:
///
/// ```
/// # use jlrs::prelude::*;
/// # use jlrs::util::JULIA;
/// # fn main() {
/// # JULIA.with(|j| {
/// # let mut julia = j.borrow_mut();
/// julia.scope_with_slots(1, |_global, frame| {
/// // Uses the preallocated slot
/// let _i = Value::new(&mut *frame, 1u64)?;
/// // Allocates a new slot, because only a single slot was preallocated
/// let _j = Value::new(&mut *frame, 1u64)?;
/// Ok(())
/// }).unwrap();
/// # });
/// # }
/// ```
pub fn scope_with_slots<T, F>(&mut self, slots: usize, func: F) -> JlrsResult<T>
where
for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>,
{
unsafe {
let global = Global::new();
if slots + 2 > self.page.size() {
self.page = StackPage::new(slots + 2);
}
let mut frame = GcFrame::new(self.page.as_mut(), slots, Sync);
func(global, &mut frame)
}
}
/// Provides access to global information.
pub fn info(&self) -> Info {
Info::new()
}
}
impl Drop for Julia {
fn drop(&mut self) {
unsafe {
jl_atexit_hook(0);
}
}
}
/// When you call Rust from Julia through `ccall`, Julia has already been initialized and trying to
/// initialize it again would cause a crash. In order to still be able to call Julia from Rust
/// and to borrow arrays (if you pass them as `Array` rather than `Ptr{Array}`), you'll need to
/// create a frame first. You can use this struct to do so. It must never be used outside
/// functions called through `ccall`, and only once for each `ccall`ed function.
///
/// If you only need to use a frame to borrow array data, you can use [`CCall::null_scope`].
/// Unlike [`Julia`], `CCall` postpones the allocation of the stack that is used for managing the
/// GC until a `GcFrame` is created. In the case of a null scope, this stack isn't allocated at
/// all.
pub struct CCall {
page: Option<StackPage>,
}
impl CCall {
/// Create a new `CCall`. This function must never be called outside a function called through
/// `ccall` from Julia and must only be called once during that call. The stack is not
/// allocated until a [`GcFrame`] is created.
pub unsafe fn new() -> Self {
CCall { page: None }
}
/// Wake the task associated with `handle`. The handle must be the `handle` field of a
/// `Base.AsyncCondition` in Julia. This can be used to call a long-running Rust function from
/// Julia with ccall in another thread and wait for it to complete in Julia without blocking,
/// there's an example available in the repository: ccall_with_threads.
///
/// This method is only available if the `uv` feature is enabled.
#[cfg(feature = "uv")]
pub unsafe fn uv_async_send(handle: *mut std::ffi::c_void) -> bool | {
uv_async_send(handle.cast()) == 0
} | identifier_body |
|
round_trip.rs | use std::fmt::Debug;
use tree_buf::prelude::*;
mod common;
use common::*;
use std::collections::HashMap;
use tree_buf::encode_options;
use tree_buf::options;
// Create this namespace to hide the prelude. This is a check that the hygenics do not require any types from tree_buf to be imported
mod hide_namespace {
use tree_buf::{Read, Write};
#[derive(Read, Write, PartialEq, Debug, Clone)]
pub struct Bits {
pub f: f64,
pub obj_array: Vec<Bobs>,
pub extra: Option<Bobs>,
pub s: Box<String>,
}
#[derive(Read, Write, PartialEq, Debug, Clone)]
pub struct Bobs {
pub one: Vec<u64>,
pub tup: (f64, f64),
}
}
use hide_namespace::{Bits, Bobs};
// TODO: Compare to Avro - https://github.com/flavray/avro-rs
fn make_item() -> Bits {
Bits {
f: 5.0,
extra: Some(Bobs {
one: vec![99],
tup: (9999.99, 200.1),
}),
s: Box::new("abc".to_owned()),
obj_array: vec![
Bobs {
one: vec![3, 2, 1, 0],
tup: (10.0, 200.2),
},
Bobs { one: vec![], tup: (2.2, 200.3) },
Bobs {
one: vec![20, 20, 20, 20, 20, 20, 20],
tup: (0.0, 200.4),
},
],
}
}
#[test]
fn broken_int() {
round_trip(&75339u64, 4, 10);
}
#[test]
fn bools_root() {
round_trip(&true, 1, 5);
round_trip(&false, 1, 5);
}
#[test]
fn opts_root() {
round_trip(&Some(true), 1, 9);
round_trip(&Option::<bool>::None, 1, 3);
}
#[test]
fn bool_array() {
round_trip(&vec![false, true, true, false, true, true, true, false, false, true, false, true], 6, 9);
}
#[test]
fn ints_root() {
round_trip(&0u32, 1, 5);
round_trip(&1u32, 1, 5);
for i in 2..=127u32 {
round_trip(&i, 2, 6);
}
for i in 128..=255u32 {
round_trip(&i, 2, 6);
}
for i in 256..1024u32 {
round_trip(&i, 3, 8);
}
}
// Special case for 1 element array encodes root object
#[test]
fn array1() {
round_trip(&vec![99u64], 3, 8);
round_trip(&vec![1u64], 2, 7);
}
#[test]
fn int_vec() {
round_trip(&vec![99u64, 100], 6, 10);
}
#[test]
fn float64_vec() {
round_trip(&vec![0.99], 10, 16);
round_trip(&vec![0.01, 0.02, 0.03, 0.04], 36, 65);
}
#[test]
fn float32_vec() {
round_trip(&vec![0.99f32], 6, 14);
round_trip(&vec![0.01f32, 0.02, 0.03, 0.04], 20, 38);
}
#[test]
fn lossy_f64_vec() {
let mut data = Vec::new();
for i in 0..50 {
data.push(0.01 * i as f64);
}
let tolerance = -10;
let options = encode_options! { options::LossyFloatTolerance(tolerance) };
let binary = tree_buf::write_with_options(&data, &options);
assert_eq!(binary.len(), 104);
let decoded = read::<Vec<f64>>(&binary).unwrap();
assert_eq!(data.len(), decoded.len());
for (e, d) in data.iter().zip(decoded.iter()) {
assert!((e - d).abs() <= 0.001);
}
// Show how much smaller this is than lossless
let options = encode_options! { options::LosslessFloat };
let binary = tree_buf::write_with_options(&data, &options);
assert_eq!(binary.len(), 376);
// Show that this is much better than fixed, since this would be a minimum for exactly 0 schema overhead.
assert_eq!(std::mem::size_of::<f64>() * data.len(), 400);
}
#[test]
fn nested_float_vec() {
round_trip(&vec![vec![10.0, 11.0], vec![], vec![99.0]], 24, 32);
}
#[test]
fn array_tuple() {
round_trip(&vec![vec![(1u32, 2u32), (3, 4), (5, 6)]], 14, 19);
}
#[test]
fn item() {
let item = make_item();
round_trip(&item, 144, 221);
}
#[test]
fn item_vec() {
let item = make_item();
let item = vec![item; 5];
round_trip(&item, 379, 646);
}
#[test]
fn nullable_array() {
round_trip(&vec![Some(1u32), Some(2)], 10, 14);
}
#[test]
fn visibility_modifiers() {
#[derive(Default, Read, Write, Debug, PartialEq, Clone)]
struct Inherited {
a: u64,
}
#[derive(Default, Read, Write, Debug, PartialEq, Clone)]
pub(crate) struct Crate {
a: u64,
}
#[derive(Default, Read, Write, Debug, PartialEq, Clone)]
pub struct Public {
a: u64,
}
round_trip_default::<Inherited>(4, 8);
round_trip_default::<Crate>(4, 8);
round_trip_default::<Public>(4, 8);
}
#[test]
fn ignores() {
use tree_buf::Ignore;
round_trip(&Ignore, 1, 3);
#[derive(Default, Read, Write, Debug, PartialEq, Clone)]
struct X {
i: Ignore,
}
let x = X { i: Ignore };
round_trip(&x, 4, 6);
#[derive(Read, Write, Debug, PartialEq, Clone)]
enum E {
A(Ignore),
B(Ignore),
}
let e = E::A(Ignore);
round_trip(&e, 4, 10);
#[derive(Read, Write, Debug, PartialEq, Clone)]
struct N {
e: E,
}
let o = vec![N { e: E::A(Ignore) }, N { e: E::B(Ignore) }];
round_trip(&o, 16, 18);
}
// TODO: Using Quickcheck and Arbitrary with quickcheck_derive.
#[test]
fn various_types() {
round_trip_default::<u64>(1, 5);
round_trip_default::<u32>(1, 5);
round_trip_default::<u16>(1, 5);
round_trip_default::<u8>(1, 5);
round_trip_default::<(u64, u64)>(3, 9);
round_trip_default::<(u64, u32)>(3, 9);
round_trip_default::<f64>(1, 14);
// See also: 84d15459-35e4-4f04-896f-0f4ea9ce52a9
round_trip_default::<Vec<u32>>(1, 5);
round_trip_default::<Option<Vec<u32>>>(1, 3);
round_trip_default::<Option<u32>>(1, 3);
round_trip_default::<Vec<Option<u32>>>(1, 5);
round_trip_default::<String>(1, 6);
}
#[test]
fn conversions() {
// TODO: f32
//serialize_eq(1.0f64, 1.0f32, 0);
//serialize_eq(1.0f32, 1.0f64, 0);
//serialize_eq(9.0f32, 9.0f64, 0);
// TODO: A bunch more of these
}
#[test]
fn small_structs() {
#[derive(Read, Write, Default, Debug, PartialEq, Clone)]
struct _1 {
a: u64,
}
round_trip_default::<_1>(4, 8);
}
#[test]
fn large_structs() {
#[derive(Read, Write, Default, Debug, PartialEq, Clone)]
struct _14 {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64,
g: f64,
h: f64,
i: f64,
j: f64,
k: f64,
l: f64,
m: f64,
n: f64,
}
#[derive(Read, Write, Default, Debug, PartialEq, Clone)]
struct _15 {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64,
g: f64,
h: f64,
i: f64,
j: f64,
k: f64,
l: f64,
m: f64,
n: f64,
o: f64,
}
#[derive(Read, Write, Default, Debug, PartialEq, Clone)]
struct _16 {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64,
g: f64,
h: f64,
i: f64,
j: f64,
k: f64,
l: f64,
m: f64,
n: f64,
o: f64,
p: f64,
}
#[derive(Read, Write, Default, Debug, PartialEq, Clone)]
struct _17 {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64,
g: f64,
h: f64,
i: f64,
j: f64,
k: f64,
l: f64,
m: f64,
n: f64,
o: f64,
p: f64,
q: f64,
}
round_trip_default::<_14>(44, 200);
round_trip_default::<_15>(47, 214);
round_trip_default::<_16>(50, 228);
round_trip_default::<_17>(53, 242);
}
#[test]
fn map_0_root() |
#[test]
fn map_1_root() {
let mut data = HashMap::new();
data.insert("test".to_owned(), 5u32);
round_trip(&data, 10, 22);
}
#[test]
fn map_n_root() {
let mut data = HashMap::new();
data.insert("test3".to_owned(), 5u32);
data.insert("test2".to_owned(), 5);
data.insert("test1".to_owned(), 0);
round_trip(&data, None, None);
}
#[test]
fn maps_array() {
let mut data = Vec::new();
for i in 0..5u32 {
let mut h = HashMap::new();
h.insert(i, Vec::<u32>::new());
h.insert(10, vec![10, 9, 8, 7]);
data.push(h);
}
// Interestingly, the output size is not deterministic in this case.
// It depends on whether the last key or value from iterating the HashMap is Default
round_trip(&data, None, None);
}
#[test]
fn maps_void() {
let mut data = Vec::new();
for _ in 0..5 {
let h = HashMap::<String, String>::new();
data.push(h);
}
round_trip(&data, 10, 13);
}
#[test]
fn fixed_arrays() {
round_trip(&[0u32, 1, 2, 3], 8, 10);
round_trip(&[0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 6, 8);
}
// This failed to compile at one point when moving generics for WriterArray out of associated type.
#[test]
fn enum_with_vec() {
#[derive(Write, Read, Debug, PartialEq, Clone)]
enum X {
X(Vec<u64>),
}
round_trip(&X::X(vec![25, 30, 0, 0, 0]), 11, 21);
}
fn owned_vec(strs: Vec<&'static str>) -> Vec<String> {
strs.iter().map(|s| String::from(*s)).collect()
}
#[test]
fn strings_using_dictionary() {
let data = vec!["abcd", "abcd", "def", "abcd", "abcd", "abcd", ""];
round_trip(&owned_vec(data), 21, 23);
let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd"];
round_trip(&owned_vec(data), 13, 15);
let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd", "def", "def"];
round_trip(&owned_vec(data), 17, 20);
let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd", "abcd", "def"];
round_trip(&owned_vec(data), 17, 20);
}
#[test]
fn nested_strings_using_rle() {
let data = (
//owned_vec(vec![]),
owned_vec(vec!["abc", "abc", "abc"]),
owned_vec(vec!["def", "def", "def"]),
1u32,
);
//let data = owned_vec(vec!["abc", "abc", "abc"]);
// TODO: Add sizes
round_trip(&data, 26, 30);
}
#[test]
fn long_bool_runs() {
let mut data = Vec::new();
for i in 560..570 {
for _ in 0..i {
data.push(true);
}
data.push(false);
}
round_trip(&data, 36, 68);
}
#[test]
fn int_to_bool_nested() {
let data = (
vec![0u32,0,1,1,0],
vec![0u32,0,0,1,1,1,1],
);
round_trip(&data, 11, 15);
let data = vec![
vec![0u32, 0, 1, 1,0],
vec![1u32, 1, 1, 1, 1, 1, 0],
vec![1u32, 0, 0, 0, 0, 0, 1],
];
round_trip(&data, 13, 18);
}
// TODO: Use coverage marks to ensure all types are used
// https://ferrous-systems.com/blog/coverage-marks/
// This was useful for narrowing down a subset of a broken compressor.
// It may be useful in the future
/*
#[test]
fn broken_gorilla() {
use rand::Rng;
use std::convert::TryInto as _;
use tree_buf::internal::encodings::gorilla;
let broken = [-75.01536474599993, -75.00911189799993, 114.37647545700004];
let mut bytes = Vec::new();
gorilla::compress((&broken[..]).iter().copied(), &mut bytes).unwrap();
let out: Vec<f64> = gorilla::decompress(&bytes[..]).unwrap();
assert_eq!(&broken[..], &out[..]);
// 356301 - 356304
// 457009 - 457012
let data = std::fs::read("C:\\git\\floats.dat").unwrap();
let mut offset = 0;
let mut values = Vec::new();
while offset < data.len() {
let val = (&data[offset..(offset + 8)]).try_into().unwrap();
offset += 8;
let f = f64::from_le_bytes(val);
values.push(f);
}
return;
fn attempt(values: &[f64], min: usize, max: usize) -> bool {
let values = &values[min..max];
std::panic::catch_unwind(|| {
let mut bytes = Vec::new();
gorilla::compress(values.iter().copied(), &mut bytes).unwrap();
let out: Vec<f64> = gorilla::decompress(&bytes[..]).unwrap();
assert_eq!(values, &out[..]);
})
.is_ok()
}
let mut min = 0;
let mut max = values.len();
let mut rng = rand::thread_rng();
for _ in 0..100000 {
let try_min = rng.gen_range(min, max);
let try_max = rng.gen_range(try_min + 1, max + 1);
if try_min == min && try_max == max {
continue;
}
if!attempt(&values[..], try_min, try_max) {
min = try_min;
max = try_max;
}
}
}
*/
| {
// See also: 84d15459-35e4-4f04-896f-0f4ea9ce52a9
let data = HashMap::<u32, u32>::new();
round_trip(&data, 2, 8);
} | identifier_body |
round_trip.rs | use std::fmt::Debug;
use tree_buf::prelude::*;
mod common;
use common::*;
use std::collections::HashMap;
use tree_buf::encode_options;
use tree_buf::options;
// Create this namespace to hide the prelude. This is a check that the hygenics do not require any types from tree_buf to be imported
mod hide_namespace {
use tree_buf::{Read, Write};
#[derive(Read, Write, PartialEq, Debug, Clone)]
pub struct Bits {
pub f: f64,
pub obj_array: Vec<Bobs>,
pub extra: Option<Bobs>,
pub s: Box<String>,
}
#[derive(Read, Write, PartialEq, Debug, Clone)]
pub struct Bobs {
pub one: Vec<u64>,
pub tup: (f64, f64),
}
}
use hide_namespace::{Bits, Bobs};
// TODO: Compare to Avro - https://github.com/flavray/avro-rs
fn make_item() -> Bits {
Bits {
f: 5.0,
extra: Some(Bobs {
one: vec![99],
tup: (9999.99, 200.1),
}),
s: Box::new("abc".to_owned()),
obj_array: vec![
Bobs {
one: vec![3, 2, 1, 0],
tup: (10.0, 200.2),
},
Bobs { one: vec![], tup: (2.2, 200.3) },
Bobs {
one: vec![20, 20, 20, 20, 20, 20, 20],
tup: (0.0, 200.4),
},
],
}
}
#[test]
fn broken_int() {
round_trip(&75339u64, 4, 10);
}
#[test]
fn bools_root() {
round_trip(&true, 1, 5);
round_trip(&false, 1, 5);
}
#[test]
fn opts_root() {
round_trip(&Some(true), 1, 9);
round_trip(&Option::<bool>::None, 1, 3);
}
#[test]
fn bool_array() {
round_trip(&vec![false, true, true, false, true, true, true, false, false, true, false, true], 6, 9);
}
#[test]
fn ints_root() {
round_trip(&0u32, 1, 5);
round_trip(&1u32, 1, 5);
for i in 2..=127u32 {
round_trip(&i, 2, 6);
}
for i in 128..=255u32 {
round_trip(&i, 2, 6);
}
for i in 256..1024u32 {
round_trip(&i, 3, 8);
}
}
// Special case for 1 element array encodes root object
#[test]
fn array1() {
round_trip(&vec![99u64], 3, 8);
round_trip(&vec![1u64], 2, 7);
}
#[test]
fn int_vec() {
round_trip(&vec![99u64, 100], 6, 10);
}
#[test]
fn float64_vec() {
round_trip(&vec![0.99], 10, 16);
round_trip(&vec![0.01, 0.02, 0.03, 0.04], 36, 65);
}
#[test]
fn float32_vec() {
round_trip(&vec![0.99f32], 6, 14);
round_trip(&vec![0.01f32, 0.02, 0.03, 0.04], 20, 38);
}
#[test]
fn lossy_f64_vec() {
let mut data = Vec::new();
for i in 0..50 {
data.push(0.01 * i as f64);
}
let tolerance = -10;
let options = encode_options! { options::LossyFloatTolerance(tolerance) };
let binary = tree_buf::write_with_options(&data, &options);
assert_eq!(binary.len(), 104);
let decoded = read::<Vec<f64>>(&binary).unwrap();
assert_eq!(data.len(), decoded.len());
for (e, d) in data.iter().zip(decoded.iter()) {
assert!((e - d).abs() <= 0.001);
}
// Show how much smaller this is than lossless
let options = encode_options! { options::LosslessFloat };
let binary = tree_buf::write_with_options(&data, &options);
assert_eq!(binary.len(), 376);
// Show that this is much better than fixed, since this would be a minimum for exactly 0 schema overhead.
assert_eq!(std::mem::size_of::<f64>() * data.len(), 400);
}
#[test]
fn nested_float_vec() {
round_trip(&vec![vec![10.0, 11.0], vec![], vec![99.0]], 24, 32);
}
#[test]
fn array_tuple() {
round_trip(&vec![vec![(1u32, 2u32), (3, 4), (5, 6)]], 14, 19);
}
#[test]
fn item() {
let item = make_item();
round_trip(&item, 144, 221);
}
#[test]
fn item_vec() {
let item = make_item();
let item = vec![item; 5];
round_trip(&item, 379, 646);
}
#[test]
fn nullable_array() {
round_trip(&vec![Some(1u32), Some(2)], 10, 14);
}
#[test]
fn visibility_modifiers() {
#[derive(Default, Read, Write, Debug, PartialEq, Clone)]
struct Inherited {
a: u64,
}
#[derive(Default, Read, Write, Debug, PartialEq, Clone)]
pub(crate) struct Crate {
a: u64,
}
#[derive(Default, Read, Write, Debug, PartialEq, Clone)]
pub struct Public {
a: u64,
}
round_trip_default::<Inherited>(4, 8);
round_trip_default::<Crate>(4, 8);
round_trip_default::<Public>(4, 8);
}
#[test]
fn ignores() {
use tree_buf::Ignore;
round_trip(&Ignore, 1, 3);
#[derive(Default, Read, Write, Debug, PartialEq, Clone)]
struct X {
i: Ignore,
}
let x = X { i: Ignore };
round_trip(&x, 4, 6);
#[derive(Read, Write, Debug, PartialEq, Clone)]
enum E {
A(Ignore),
B(Ignore),
}
let e = E::A(Ignore);
round_trip(&e, 4, 10);
#[derive(Read, Write, Debug, PartialEq, Clone)]
struct N {
e: E,
}
let o = vec![N { e: E::A(Ignore) }, N { e: E::B(Ignore) }];
round_trip(&o, 16, 18);
}
// TODO: Using Quickcheck and Arbitrary with quickcheck_derive.
#[test]
fn various_types() {
round_trip_default::<u64>(1, 5);
round_trip_default::<u32>(1, 5);
round_trip_default::<u16>(1, 5);
round_trip_default::<u8>(1, 5);
round_trip_default::<(u64, u64)>(3, 9);
round_trip_default::<(u64, u32)>(3, 9);
round_trip_default::<f64>(1, 14);
// See also: 84d15459-35e4-4f04-896f-0f4ea9ce52a9
round_trip_default::<Vec<u32>>(1, 5);
round_trip_default::<Option<Vec<u32>>>(1, 3);
round_trip_default::<Option<u32>>(1, 3);
round_trip_default::<Vec<Option<u32>>>(1, 5);
round_trip_default::<String>(1, 6);
}
#[test]
fn conversions() {
// TODO: f32
//serialize_eq(1.0f64, 1.0f32, 0);
//serialize_eq(1.0f32, 1.0f64, 0);
//serialize_eq(9.0f32, 9.0f64, 0);
// TODO: A bunch more of these
}
#[test]
fn small_structs() {
#[derive(Read, Write, Default, Debug, PartialEq, Clone)]
struct _1 {
a: u64,
}
round_trip_default::<_1>(4, 8);
}
#[test]
fn large_structs() {
#[derive(Read, Write, Default, Debug, PartialEq, Clone)]
struct _14 {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64,
g: f64,
h: f64,
i: f64,
j: f64,
k: f64,
l: f64,
m: f64,
n: f64,
}
#[derive(Read, Write, Default, Debug, PartialEq, Clone)]
struct _15 {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64,
g: f64,
h: f64,
i: f64,
j: f64,
k: f64,
l: f64,
m: f64,
n: f64,
o: f64,
}
#[derive(Read, Write, Default, Debug, PartialEq, Clone)]
struct _16 {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64,
g: f64,
h: f64,
i: f64,
j: f64,
k: f64,
l: f64,
m: f64,
n: f64,
o: f64,
p: f64,
}
#[derive(Read, Write, Default, Debug, PartialEq, Clone)]
struct _17 {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64,
g: f64,
h: f64,
i: f64,
j: f64,
k: f64,
l: f64,
m: f64,
n: f64,
o: f64,
p: f64,
q: f64,
}
round_trip_default::<_14>(44, 200);
round_trip_default::<_15>(47, 214);
round_trip_default::<_16>(50, 228);
round_trip_default::<_17>(53, 242);
}
#[test]
fn map_0_root() {
// See also: 84d15459-35e4-4f04-896f-0f4ea9ce52a9
let data = HashMap::<u32, u32>::new();
round_trip(&data, 2, 8);
}
#[test]
fn map_1_root() {
let mut data = HashMap::new();
data.insert("test".to_owned(), 5u32);
round_trip(&data, 10, 22);
}
#[test]
fn | () {
let mut data = HashMap::new();
data.insert("test3".to_owned(), 5u32);
data.insert("test2".to_owned(), 5);
data.insert("test1".to_owned(), 0);
round_trip(&data, None, None);
}
#[test]
fn maps_array() {
let mut data = Vec::new();
for i in 0..5u32 {
let mut h = HashMap::new();
h.insert(i, Vec::<u32>::new());
h.insert(10, vec![10, 9, 8, 7]);
data.push(h);
}
// Interestingly, the output size is not deterministic in this case.
// It depends on whether the last key or value from iterating the HashMap is Default
round_trip(&data, None, None);
}
#[test]
fn maps_void() {
let mut data = Vec::new();
for _ in 0..5 {
let h = HashMap::<String, String>::new();
data.push(h);
}
round_trip(&data, 10, 13);
}
#[test]
fn fixed_arrays() {
round_trip(&[0u32, 1, 2, 3], 8, 10);
round_trip(&[0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 6, 8);
}
// This failed to compile at one point when moving generics for WriterArray out of associated type.
#[test]
fn enum_with_vec() {
#[derive(Write, Read, Debug, PartialEq, Clone)]
enum X {
X(Vec<u64>),
}
round_trip(&X::X(vec![25, 30, 0, 0, 0]), 11, 21);
}
fn owned_vec(strs: Vec<&'static str>) -> Vec<String> {
strs.iter().map(|s| String::from(*s)).collect()
}
#[test]
fn strings_using_dictionary() {
let data = vec!["abcd", "abcd", "def", "abcd", "abcd", "abcd", ""];
round_trip(&owned_vec(data), 21, 23);
let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd"];
round_trip(&owned_vec(data), 13, 15);
let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd", "def", "def"];
round_trip(&owned_vec(data), 17, 20);
let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd", "abcd", "def"];
round_trip(&owned_vec(data), 17, 20);
}
#[test]
fn nested_strings_using_rle() {
let data = (
//owned_vec(vec![]),
owned_vec(vec!["abc", "abc", "abc"]),
owned_vec(vec!["def", "def", "def"]),
1u32,
);
//let data = owned_vec(vec!["abc", "abc", "abc"]);
// TODO: Add sizes
round_trip(&data, 26, 30);
}
#[test]
fn long_bool_runs() {
let mut data = Vec::new();
for i in 560..570 {
for _ in 0..i {
data.push(true);
}
data.push(false);
}
round_trip(&data, 36, 68);
}
#[test]
fn int_to_bool_nested() {
let data = (
vec![0u32,0,1,1,0],
vec![0u32,0,0,1,1,1,1],
);
round_trip(&data, 11, 15);
let data = vec![
vec![0u32, 0, 1, 1,0],
vec![1u32, 1, 1, 1, 1, 1, 0],
vec![1u32, 0, 0, 0, 0, 0, 1],
];
round_trip(&data, 13, 18);
}
// TODO: Use coverage marks to ensure all types are used
// https://ferrous-systems.com/blog/coverage-marks/
// This was useful for narrowing down a subset of a broken compressor.
// It may be useful in the future
/*
#[test]
fn broken_gorilla() {
use rand::Rng;
use std::convert::TryInto as _;
use tree_buf::internal::encodings::gorilla;
let broken = [-75.01536474599993, -75.00911189799993, 114.37647545700004];
let mut bytes = Vec::new();
gorilla::compress((&broken[..]).iter().copied(), &mut bytes).unwrap();
let out: Vec<f64> = gorilla::decompress(&bytes[..]).unwrap();
assert_eq!(&broken[..], &out[..]);
// 356301 - 356304
// 457009 - 457012
let data = std::fs::read("C:\\git\\floats.dat").unwrap();
let mut offset = 0;
let mut values = Vec::new();
while offset < data.len() {
let val = (&data[offset..(offset + 8)]).try_into().unwrap();
offset += 8;
let f = f64::from_le_bytes(val);
values.push(f);
}
return;
fn attempt(values: &[f64], min: usize, max: usize) -> bool {
let values = &values[min..max];
std::panic::catch_unwind(|| {
let mut bytes = Vec::new();
gorilla::compress(values.iter().copied(), &mut bytes).unwrap();
let out: Vec<f64> = gorilla::decompress(&bytes[..]).unwrap();
assert_eq!(values, &out[..]);
})
.is_ok()
}
let mut min = 0;
let mut max = values.len();
let mut rng = rand::thread_rng();
for _ in 0..100000 {
let try_min = rng.gen_range(min, max);
let try_max = rng.gen_range(try_min + 1, max + 1);
if try_min == min && try_max == max {
continue;
}
if!attempt(&values[..], try_min, try_max) {
min = try_min;
max = try_max;
}
}
}
*/
| map_n_root | identifier_name |
round_trip.rs | use std::fmt::Debug;
use tree_buf::prelude::*;
mod common;
use common::*;
use std::collections::HashMap;
use tree_buf::encode_options;
use tree_buf::options;
// Create this namespace to hide the prelude. This is a check that the hygenics do not require any types from tree_buf to be imported
mod hide_namespace {
use tree_buf::{Read, Write};
#[derive(Read, Write, PartialEq, Debug, Clone)]
pub struct Bits {
pub f: f64,
pub obj_array: Vec<Bobs>,
pub extra: Option<Bobs>,
pub s: Box<String>,
}
#[derive(Read, Write, PartialEq, Debug, Clone)]
pub struct Bobs {
pub one: Vec<u64>,
pub tup: (f64, f64),
}
}
use hide_namespace::{Bits, Bobs};
// TODO: Compare to Avro - https://github.com/flavray/avro-rs
fn make_item() -> Bits {
Bits {
f: 5.0,
extra: Some(Bobs {
one: vec![99],
tup: (9999.99, 200.1),
}),
s: Box::new("abc".to_owned()),
obj_array: vec![
Bobs {
one: vec![3, 2, 1, 0],
tup: (10.0, 200.2),
},
Bobs { one: vec![], tup: (2.2, 200.3) },
Bobs {
one: vec![20, 20, 20, 20, 20, 20, 20],
tup: (0.0, 200.4),
},
],
}
}
#[test]
fn broken_int() {
round_trip(&75339u64, 4, 10);
}
#[test]
fn bools_root() {
round_trip(&true, 1, 5);
round_trip(&false, 1, 5);
}
#[test]
fn opts_root() {
round_trip(&Some(true), 1, 9);
round_trip(&Option::<bool>::None, 1, 3);
}
#[test]
fn bool_array() {
round_trip(&vec![false, true, true, false, true, true, true, false, false, true, false, true], 6, 9);
}
#[test]
fn ints_root() {
round_trip(&0u32, 1, 5);
round_trip(&1u32, 1, 5);
for i in 2..=127u32 {
round_trip(&i, 2, 6);
}
for i in 128..=255u32 {
round_trip(&i, 2, 6);
}
for i in 256..1024u32 {
round_trip(&i, 3, 8);
}
}
// Special case for 1 element array encodes root object
#[test]
fn array1() {
round_trip(&vec![99u64], 3, 8);
round_trip(&vec![1u64], 2, 7);
}
#[test]
fn int_vec() {
round_trip(&vec![99u64, 100], 6, 10);
}
#[test]
fn float64_vec() {
round_trip(&vec![0.99], 10, 16);
round_trip(&vec![0.01, 0.02, 0.03, 0.04], 36, 65);
}
#[test]
fn float32_vec() {
round_trip(&vec![0.99f32], 6, 14);
round_trip(&vec![0.01f32, 0.02, 0.03, 0.04], 20, 38);
}
#[test]
fn lossy_f64_vec() {
let mut data = Vec::new();
for i in 0..50 {
data.push(0.01 * i as f64);
}
let tolerance = -10;
let options = encode_options! { options::LossyFloatTolerance(tolerance) };
let binary = tree_buf::write_with_options(&data, &options);
assert_eq!(binary.len(), 104);
let decoded = read::<Vec<f64>>(&binary).unwrap();
assert_eq!(data.len(), decoded.len());
for (e, d) in data.iter().zip(decoded.iter()) {
assert!((e - d).abs() <= 0.001);
}
// Show how much smaller this is than lossless | // Show that this is much better than fixed, since this would be a minimum for exactly 0 schema overhead.
assert_eq!(std::mem::size_of::<f64>() * data.len(), 400);
}
#[test]
fn nested_float_vec() {
round_trip(&vec![vec![10.0, 11.0], vec![], vec![99.0]], 24, 32);
}
#[test]
fn array_tuple() {
round_trip(&vec![vec![(1u32, 2u32), (3, 4), (5, 6)]], 14, 19);
}
#[test]
fn item() {
let item = make_item();
round_trip(&item, 144, 221);
}
#[test]
fn item_vec() {
let item = make_item();
let item = vec![item; 5];
round_trip(&item, 379, 646);
}
#[test]
fn nullable_array() {
round_trip(&vec![Some(1u32), Some(2)], 10, 14);
}
#[test]
fn visibility_modifiers() {
#[derive(Default, Read, Write, Debug, PartialEq, Clone)]
struct Inherited {
a: u64,
}
#[derive(Default, Read, Write, Debug, PartialEq, Clone)]
pub(crate) struct Crate {
a: u64,
}
#[derive(Default, Read, Write, Debug, PartialEq, Clone)]
pub struct Public {
a: u64,
}
round_trip_default::<Inherited>(4, 8);
round_trip_default::<Crate>(4, 8);
round_trip_default::<Public>(4, 8);
}
#[test]
fn ignores() {
use tree_buf::Ignore;
round_trip(&Ignore, 1, 3);
#[derive(Default, Read, Write, Debug, PartialEq, Clone)]
struct X {
i: Ignore,
}
let x = X { i: Ignore };
round_trip(&x, 4, 6);
#[derive(Read, Write, Debug, PartialEq, Clone)]
enum E {
A(Ignore),
B(Ignore),
}
let e = E::A(Ignore);
round_trip(&e, 4, 10);
#[derive(Read, Write, Debug, PartialEq, Clone)]
struct N {
e: E,
}
let o = vec![N { e: E::A(Ignore) }, N { e: E::B(Ignore) }];
round_trip(&o, 16, 18);
}
// TODO: Using Quickcheck and Arbitrary with quickcheck_derive.
#[test]
fn various_types() {
round_trip_default::<u64>(1, 5);
round_trip_default::<u32>(1, 5);
round_trip_default::<u16>(1, 5);
round_trip_default::<u8>(1, 5);
round_trip_default::<(u64, u64)>(3, 9);
round_trip_default::<(u64, u32)>(3, 9);
round_trip_default::<f64>(1, 14);
// See also: 84d15459-35e4-4f04-896f-0f4ea9ce52a9
round_trip_default::<Vec<u32>>(1, 5);
round_trip_default::<Option<Vec<u32>>>(1, 3);
round_trip_default::<Option<u32>>(1, 3);
round_trip_default::<Vec<Option<u32>>>(1, 5);
round_trip_default::<String>(1, 6);
}
#[test]
fn conversions() {
// TODO: f32
//serialize_eq(1.0f64, 1.0f32, 0);
//serialize_eq(1.0f32, 1.0f64, 0);
//serialize_eq(9.0f32, 9.0f64, 0);
// TODO: A bunch more of these
}
#[test]
fn small_structs() {
#[derive(Read, Write, Default, Debug, PartialEq, Clone)]
struct _1 {
a: u64,
}
round_trip_default::<_1>(4, 8);
}
#[test]
fn large_structs() {
#[derive(Read, Write, Default, Debug, PartialEq, Clone)]
struct _14 {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64,
g: f64,
h: f64,
i: f64,
j: f64,
k: f64,
l: f64,
m: f64,
n: f64,
}
#[derive(Read, Write, Default, Debug, PartialEq, Clone)]
struct _15 {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64,
g: f64,
h: f64,
i: f64,
j: f64,
k: f64,
l: f64,
m: f64,
n: f64,
o: f64,
}
#[derive(Read, Write, Default, Debug, PartialEq, Clone)]
struct _16 {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64,
g: f64,
h: f64,
i: f64,
j: f64,
k: f64,
l: f64,
m: f64,
n: f64,
o: f64,
p: f64,
}
#[derive(Read, Write, Default, Debug, PartialEq, Clone)]
struct _17 {
a: f64,
b: f64,
c: f64,
d: f64,
e: f64,
f: f64,
g: f64,
h: f64,
i: f64,
j: f64,
k: f64,
l: f64,
m: f64,
n: f64,
o: f64,
p: f64,
q: f64,
}
round_trip_default::<_14>(44, 200);
round_trip_default::<_15>(47, 214);
round_trip_default::<_16>(50, 228);
round_trip_default::<_17>(53, 242);
}
#[test]
fn map_0_root() {
// See also: 84d15459-35e4-4f04-896f-0f4ea9ce52a9
let data = HashMap::<u32, u32>::new();
round_trip(&data, 2, 8);
}
#[test]
fn map_1_root() {
let mut data = HashMap::new();
data.insert("test".to_owned(), 5u32);
round_trip(&data, 10, 22);
}
#[test]
fn map_n_root() {
let mut data = HashMap::new();
data.insert("test3".to_owned(), 5u32);
data.insert("test2".to_owned(), 5);
data.insert("test1".to_owned(), 0);
round_trip(&data, None, None);
}
#[test]
fn maps_array() {
let mut data = Vec::new();
for i in 0..5u32 {
let mut h = HashMap::new();
h.insert(i, Vec::<u32>::new());
h.insert(10, vec![10, 9, 8, 7]);
data.push(h);
}
// Interestingly, the output size is not deterministic in this case.
// It depends on whether the last key or value from iterating the HashMap is Default
round_trip(&data, None, None);
}
#[test]
fn maps_void() {
let mut data = Vec::new();
for _ in 0..5 {
let h = HashMap::<String, String>::new();
data.push(h);
}
round_trip(&data, 10, 13);
}
#[test]
fn fixed_arrays() {
round_trip(&[0u32, 1, 2, 3], 8, 10);
round_trip(&[0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 6, 8);
}
// This failed to compile at one point when moving generics for WriterArray out of associated type.
#[test]
fn enum_with_vec() {
#[derive(Write, Read, Debug, PartialEq, Clone)]
enum X {
X(Vec<u64>),
}
round_trip(&X::X(vec![25, 30, 0, 0, 0]), 11, 21);
}
fn owned_vec(strs: Vec<&'static str>) -> Vec<String> {
strs.iter().map(|s| String::from(*s)).collect()
}
#[test]
fn strings_using_dictionary() {
let data = vec!["abcd", "abcd", "def", "abcd", "abcd", "abcd", ""];
round_trip(&owned_vec(data), 21, 23);
let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd"];
round_trip(&owned_vec(data), 13, 15);
let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd", "def", "def"];
round_trip(&owned_vec(data), 17, 20);
let data = vec!["abcd", "abcd", "abcd", "abcd", "abcd", "abcd", "def"];
round_trip(&owned_vec(data), 17, 20);
}
#[test]
fn nested_strings_using_rle() {
let data = (
//owned_vec(vec![]),
owned_vec(vec!["abc", "abc", "abc"]),
owned_vec(vec!["def", "def", "def"]),
1u32,
);
//let data = owned_vec(vec!["abc", "abc", "abc"]);
// TODO: Add sizes
round_trip(&data, 26, 30);
}
#[test]
fn long_bool_runs() {
let mut data = Vec::new();
for i in 560..570 {
for _ in 0..i {
data.push(true);
}
data.push(false);
}
round_trip(&data, 36, 68);
}
#[test]
fn int_to_bool_nested() {
let data = (
vec![0u32,0,1,1,0],
vec![0u32,0,0,1,1,1,1],
);
round_trip(&data, 11, 15);
let data = vec![
vec![0u32, 0, 1, 1,0],
vec![1u32, 1, 1, 1, 1, 1, 0],
vec![1u32, 0, 0, 0, 0, 0, 1],
];
round_trip(&data, 13, 18);
}
// TODO: Use coverage marks to ensure all types are used
// https://ferrous-systems.com/blog/coverage-marks/
// This was useful for narrowing down a subset of a broken compressor.
// It may be useful in the future
/*
#[test]
fn broken_gorilla() {
use rand::Rng;
use std::convert::TryInto as _;
use tree_buf::internal::encodings::gorilla;
let broken = [-75.01536474599993, -75.00911189799993, 114.37647545700004];
let mut bytes = Vec::new();
gorilla::compress((&broken[..]).iter().copied(), &mut bytes).unwrap();
let out: Vec<f64> = gorilla::decompress(&bytes[..]).unwrap();
assert_eq!(&broken[..], &out[..]);
// 356301 - 356304
// 457009 - 457012
let data = std::fs::read("C:\\git\\floats.dat").unwrap();
let mut offset = 0;
let mut values = Vec::new();
while offset < data.len() {
let val = (&data[offset..(offset + 8)]).try_into().unwrap();
offset += 8;
let f = f64::from_le_bytes(val);
values.push(f);
}
return;
fn attempt(values: &[f64], min: usize, max: usize) -> bool {
let values = &values[min..max];
std::panic::catch_unwind(|| {
let mut bytes = Vec::new();
gorilla::compress(values.iter().copied(), &mut bytes).unwrap();
let out: Vec<f64> = gorilla::decompress(&bytes[..]).unwrap();
assert_eq!(values, &out[..]);
})
.is_ok()
}
let mut min = 0;
let mut max = values.len();
let mut rng = rand::thread_rng();
for _ in 0..100000 {
let try_min = rng.gen_range(min, max);
let try_max = rng.gen_range(try_min + 1, max + 1);
if try_min == min && try_max == max {
continue;
}
if!attempt(&values[..], try_min, try_max) {
min = try_min;
max = try_max;
}
}
}
*/ | let options = encode_options! { options::LosslessFloat };
let binary = tree_buf::write_with_options(&data, &options);
assert_eq!(binary.len(), 376);
| random_line_split |
chip8.rs |
use rand::{Rng, thread_rng};
pub const PIXEL_W: u16 = 64; // width of CHIP-8 screen
pub const PIXEL_H: u16 = 32; // height of CHIP-8 screen
pub const FONT_LOCATION: u16 = 0x80; // location of font set in system RAM
pub const CARTRIDGE_LOCATION: u16 = 0x200; // location in system RAM where game data should be loaded on boot
pub struct | {
pub memory: [u8; 4096], // RAM
pub reg: [u8; 16], // registers
pub gfx: [u8; (PIXEL_W * PIXEL_H) as usize], // pixels
stack: [u16; 16], // subroutine stack
pub key: [u8; 16], // keypad
idx: u16, // index register
pc: u16, // program counter
sp: u16, // stack pointer
pub delay_timer: u8,
pub sound_timer: u8,
pub draw_flag: bool, // set when clear screen or draw opcodes are called
}
impl Chip8 {
pub fn new() -> Chip8 {
let mut chip = Chip8 {
memory: [0;4096],
reg: [0;16],
gfx: [0; (PIXEL_W * PIXEL_H) as usize],
stack: [0; 16],
key: [0; 16],
idx: 0,
pc: CARTRIDGE_LOCATION,
sp: 0,
delay_timer: 0,
sound_timer: 0,
draw_flag: false,
};
// load font set
for (i, v) in FONT_SET.iter().enumerate() {
chip.memory[FONT_LOCATION as usize + i] = *v;
}
chip
}
pub fn cycle(&mut self) {
// all opcodes are two bytes.
// get the byte at memory[program counter] and memory[program counter + 1],
// split them into nibbles for convenience.
let w = self.memory[self.pc as usize] >> 4;
let x = self.memory[self.pc as usize] & 0xF;
let y = self.memory[(self.pc+1) as usize] >> 4;
let z = self.memory[(self.pc+1) as usize] & 0xF;
let yz = y << 4 | z;
let xyz: u16 = (x as u16) << 8 | (y as u16) << 4 | (z as u16);
let (_x, _y, _z) = (x as usize, y as usize, z as usize);
let opcode = (w, x, y, z);
if super::DEBUG {
println!("=================\nregisters: {:02x?}", self.reg);
println!("pc: 0x{:02x}, idx: 0x{:02x}, bytes at idx: {:02x?}",
self.pc, self.idx,
&self.memory[self.idx as usize..(self.idx+8) as usize]);
println!("executing opcode {:02x?}", opcode);
}
match opcode {
// skipping instruction 0XYZ
// clear screen.
(0x0, 0x0, 0xE, 0x0) => {
self.draw_flag = true;
self.gfx.iter_mut().for_each(|b| *b = 0);
self.pc += 2;
},
// return from subroutine.
(0x0, 0x0, 0xE, 0xE) => {
self.sp -= 1;
self.pc = self.stack[self.sp as usize];
},
// go to xyz.
(0x1, _, _, _) => self.pc = xyz,
// call subroutine at xyz.
(0x2, _, _, _) => {
self.stack[self.sp as usize] = self.pc + 2; // put next instruction on stack
self.sp += 1; // increase stack pointer
self.pc = xyz; // jump to subroutine
},
// skip next instruction if register x equals yz.
(0x3, _, _, _) => {
if self.reg[_x] == yz {
self.pc += 2;
}
self.pc += 2;
},
// skip next instruction if register x doesn't equal yz.
(0x4, _, _, _) => {
if self.reg[_x]!= yz {
self.pc += 2;
}
self.pc += 2;
},
// skip next instruction if reg x == reg y.
(0x5, _, _, 0x0) => {
if self.reg[_x] == self.reg[_y] {
self.pc += 2;
}
self.pc += 2;
},
// set reg x to yz.
(0x6, _, _, _) => {
self.reg[_x] = yz;
self.pc += 2;
},
// add yz to reg x.
(0x7, _, _, _) => {
self.reg[_x] = self.reg[_x].wrapping_add(yz);
self.pc += 2;
},
// set reg x to value of reg y.
(0x8, _, _, 0x0) => {
self.reg[_x] = self.reg[_y];
self.pc += 2;
},
// set reg x to reg x | reg y.
(0x8, _, _, 0x1) => {
self.reg[_x] |= self.reg[_y];
self.pc += 2;
},
// set reg x to reg x & reg y.
(0x8, _, _, 0x2) => {
self.reg[_x] &= self.reg[_y];
self.pc += 2;
},
// UNDOCUMENTED. set reg x to reg x ^ reg y.
(0x8, _, _, 0x3) => {
self.reg[_x] ^= self.reg[_y];
self.pc += 2;
},
// add reg y to reg x. reg f is set to 1 when there's a carry, and to 0 when there isn't.
(0x8, _, _, 0x4) => {
let old_x = self.reg[_x];
self.reg[_x] = self.reg[_x].wrapping_add(self.reg[_y]);
self.reg[0xF] = if self.reg[_x] < old_x { 1 } else { 0 };
self.pc += 2;
},
// reg y is subtracted from reg x. reg f is set to 0 when there's a borrow, and 1 when there isn't.
(0x8, _, _, 0x5) => {
self.reg[0xF] = if self.reg[_x] < self.reg[_y] { 0 } else { 1 };
self.reg[_x] = self.reg[_x].wrapping_sub(self.reg[_y]);
self.pc += 2;
},
// WEIRD UNDOCUMENTED LEGACY ONE. TODO: add legacy mode?
(0x8, _, _, 0x6) => {
// first attempt. newer version?
self.reg[0xF] = self.reg[_x] & 0x1;
self.reg[_x] >>= 1;
// legacy? according to https://github.com/mattmikolay/chip-8/wiki/CHIP%E2%80%908-Instruction-Set
// self.reg[0xF] = self.reg[_y] & 0x1;
// self.reg[_x] = self.reg[_y] >> 1;
self.pc += 2;
},
// UNDOCUMENTED. sets reg x to reg y minus reg x. reg f is set to 0 when there's a borrow, and 1 when there isn't.
(0x8, _, _, 0x7) => {
self.reg[0xF] = if self.reg[_y] < self.reg[_x] { 0 } else { 1 };
self.reg[_x] = self.reg[_y].wrapping_sub(self.reg[_x]);
self.pc += 2;
},
// UNDOCUMENTED. store the most significant bit of reg x in reg f and left-shift reg x by 1.
(0x8, _, _, 0xE) => {
// according to https://en.wikipedia.org/wiki/CHIP-8#Opcode_table
self.reg[0xF] = (self.reg[_x] & (1 << 7)) >> 7;
self.reg[_x] <<= 1;
// according to https://github.com/mattmikolay/chip-8/wiki/CHIP%E2%80%908-Instruction-Set
// self.reg[0xF] = (self.reg[_y] & (1 << 7)) >> 7;
// self.reg[_x] = self.reg[_y] << 1;
self.pc += 2;
},
// skips the next instruction if reg x doesn't equal reg y.
(0x9, _, _, 0x0) => {
if self.reg[_x]!= self.reg[_y] {
self.pc += 2;
}
self.pc += 2;
},
// Sets idx to the address xyz.
(0xA, _, _, _) => {
self.idx = xyz;
self.pc += 2;
},
// jump to xyz plus reg 0.
(0xB, _, _, _) => {
self.pc = xyz + (self.reg[0x0] as u16);
},
// set reg x to the result of a bitwise and operation on a random number (Typically: 0 to 255) and yz.
(0xC, _, _, _) => {
let rand_val: u8 = thread_rng().gen();
self.reg[_x] = yz & rand_val;
self.pc += 2;
},
// draw sprites at coordinate reg x, reg y (NOT X AND Y AS I ORIGINALLY DID) a width of 8 and height of z.
// get z sprites from memory starting at location idx.
(0xD, _, _, _) => {
self.draw_flag = true;
let mut pixel_unset = false;
let sprites = &self.memory[self.idx as usize.. (self.idx + (z as u16)) as usize];
for i in 0.._z { // for each row of 8 pixels (sprite)
// x is columns, y is rows. gfx is a flat array. starting coordinate is ((y + row number) * PIXEL_W) + x.
// every 8 bytes, we have to skip to next row, which means adding another PIXEL_W.
if super::DEBUG {
println!("drawing byte: 0b{:08b}", sprites[i]);
}
for j in 0..8 {
let current_coordinate = self.reg[_x] as usize + ((self.reg[_y] as usize + i) * (PIXEL_W as usize)) + j;
let current_sprite_bit = (sprites[i] & (1 << (7-j))) >> (7-j);
if super::DEBUG {
println!("drawing pixel 0b{:b} at {}, {}",
current_sprite_bit,
current_coordinate % PIXEL_W as usize,
current_coordinate / PIXEL_W as usize
);
}
if self.gfx[current_coordinate % self.gfx.len()] & current_sprite_bit!= 0 { // if the current byte/pixel is 1, and the sprite bit is 1,
pixel_unset = true; // then the xor operation will flip an on bit to off, meaning we need to record and set reg f.
}
self.gfx[current_coordinate % self.gfx.len()] ^= current_sprite_bit; // xor with sprite bit to draw
}
}
self.reg[0xF] = if pixel_unset { 1 } else { 0 };
self.pc += 2;
if super::DEBUG {
println!("screen:");
for i in 0..PIXEL_H {
for j in 0..PIXEL_W {
print!("{} ", self.gfx[((PIXEL_W * i) + j) as usize])
}
println!();
}
}
},
// skip next instruction if key corresponding to reg x is pressed.
(0xE, _, 0x9, 0xE) => {
if self.key[self.reg[_x] as usize]!= 0 {
self.pc += 2;
}
self.pc += 2;
},
// skip next instruction if key corresponding to reg x isn't pressed.
(0xE, _, 0xA, 0x1) => {
if self.key[self.reg[_x] as usize] == 0 {
self.pc += 2;
}
self.pc += 2;
},
// set reg x to value of delay timer.
(0xF, _, 0x0, 0x7) => {
self.reg[_x] = self.delay_timer;
self.pc += 2;
},
// wait for key press and store in reg x.
(0xF, _, 0x0, 0xA) => {
// we don't check for input in the middle of a cycle, so we should just pass, not incrementing program counter,
// and let the program come back to here until a key is registered.
if self.key!= [0; 16] {
'key_checking: for (i, k) in self.key.iter().enumerate() { // including lifetime so we can break after only one key is stored to reg x
if *k!= 0 {
self.reg[_x] = i as u8;
self.pc += 2;
break 'key_checking;
}
}
}
},
// set delay timer to value of reg x.
(0xF, _, 0x1, 0x5) => {
self.delay_timer = self.reg[_x];
self.pc += 2;
},
// set sound timer to value of reg x.
(0xF, _, 0x1, 0x8) => {
self.sound_timer = self.reg[_x];
self.pc += 2;
},
// add value of reg x to idx.
(0xF, _, 0x1, 0xE) => {
self.idx += self.reg[_x] as u16;
self.pc += 2;
},
// set idx to location of font char IN REGISTER X (not x).
(0xF, _, 0x2, 0x9) => {
self.idx = FONT_LOCATION + (self.reg[_x] as u16 * 5);
self.pc += 2;
},
// store the binary-coded decimal representation of reg x in memory[idx..idx+2].
(0xF, _, 0x3, 0x3) => {
self.memory[self.idx as usize] = self.reg[_x] / 100;
self.memory[self.idx as usize + 1] = (self.reg[_x] % 100) / 10;
self.memory[self.idx as usize + 2] = self.reg[_x] % 10;
self.pc += 2;
},
// store reg 0.. reg x (inclusive) in memory[idx..]. don't modify idx.
(0xF, _, 0x5, 0x5) => {
for i in 0..= _x {
self.memory[self.idx as usize + i] = self.reg[i];
}
self.pc += 2;
},
// load reg 0.. reg x (inclusive) from memory[idx..]. don't modify idx.
(0xF, _, 0x6, 0x5) => {
for i in 0..= _x {
self.reg[i] = self.memory[self.idx as usize + i];
}
self.pc += 2;
},
oopsie => {
println!("illegal instruction: {:02x?}", oopsie);
self.pc += 2;
},
};
}
}
const FONT_SET: [u8; 80] = [
0xF0, 0x90, 0x90, 0x90, 0xF0, // 0
0x20, 0x60, 0x20, 0x20, 0x70, // 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3
0x90, 0x90, 0xF0, 0x10, 0x10, // 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6
0xF0, 0x10, 0x20, 0x40, 0x40, // 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9
0xF0, 0x90, 0xF0, 0x90, 0x90, // A
0xE0, 0x90, 0xE0, 0x90, 0xE0, // B
0xF0, 0x80, 0x80, 0x80, 0xF0, // C
0xE0, 0x90, 0x90, 0x90, 0xE0, // D
0xF0, 0x80, 0xF0, 0x80, 0xF0, // E
0xF0, 0x80, 0xF0, 0x80, 0x80 // F
];
| Chip8 | identifier_name |
chip8.rs |
use rand::{Rng, thread_rng};
pub const PIXEL_W: u16 = 64; // width of CHIP-8 screen
pub const PIXEL_H: u16 = 32; // height of CHIP-8 screen
pub const FONT_LOCATION: u16 = 0x80; // location of font set in system RAM
pub const CARTRIDGE_LOCATION: u16 = 0x200; // location in system RAM where game data should be loaded on boot
pub struct Chip8 {
pub memory: [u8; 4096], // RAM
pub reg: [u8; 16], // registers
pub gfx: [u8; (PIXEL_W * PIXEL_H) as usize], // pixels
stack: [u16; 16], // subroutine stack
pub key: [u8; 16], // keypad
idx: u16, // index register
pc: u16, // program counter
sp: u16, // stack pointer
pub delay_timer: u8,
pub sound_timer: u8,
pub draw_flag: bool, // set when clear screen or draw opcodes are called
}
impl Chip8 {
pub fn new() -> Chip8 {
let mut chip = Chip8 {
memory: [0;4096],
reg: [0;16],
gfx: [0; (PIXEL_W * PIXEL_H) as usize],
stack: [0; 16],
key: [0; 16],
idx: 0,
pc: CARTRIDGE_LOCATION,
sp: 0,
delay_timer: 0,
sound_timer: 0,
draw_flag: false,
};
// load font set
for (i, v) in FONT_SET.iter().enumerate() {
chip.memory[FONT_LOCATION as usize + i] = *v;
}
chip
}
pub fn cycle(&mut self) {
// all opcodes are two bytes.
// get the byte at memory[program counter] and memory[program counter + 1],
// split them into nibbles for convenience.
let w = self.memory[self.pc as usize] >> 4;
let x = self.memory[self.pc as usize] & 0xF;
let y = self.memory[(self.pc+1) as usize] >> 4;
let z = self.memory[(self.pc+1) as usize] & 0xF;
let yz = y << 4 | z;
let xyz: u16 = (x as u16) << 8 | (y as u16) << 4 | (z as u16);
let (_x, _y, _z) = (x as usize, y as usize, z as usize);
let opcode = (w, x, y, z);
if super::DEBUG {
println!("=================\nregisters: {:02x?}", self.reg);
println!("pc: 0x{:02x}, idx: 0x{:02x}, bytes at idx: {:02x?}",
self.pc, self.idx,
&self.memory[self.idx as usize..(self.idx+8) as usize]);
println!("executing opcode {:02x?}", opcode);
}
match opcode {
// skipping instruction 0XYZ
// clear screen.
(0x0, 0x0, 0xE, 0x0) => {
self.draw_flag = true;
self.gfx.iter_mut().for_each(|b| *b = 0);
self.pc += 2;
},
// return from subroutine.
(0x0, 0x0, 0xE, 0xE) => {
self.sp -= 1;
self.pc = self.stack[self.sp as usize];
},
// go to xyz.
(0x1, _, _, _) => self.pc = xyz,
// call subroutine at xyz.
(0x2, _, _, _) => {
self.stack[self.sp as usize] = self.pc + 2; // put next instruction on stack
self.sp += 1; // increase stack pointer
self.pc = xyz; // jump to subroutine
},
// skip next instruction if register x equals yz.
(0x3, _, _, _) => {
if self.reg[_x] == yz {
self.pc += 2;
}
self.pc += 2;
},
// skip next instruction if register x doesn't equal yz.
(0x4, _, _, _) => {
if self.reg[_x]!= yz {
self.pc += 2;
}
self.pc += 2;
},
// skip next instruction if reg x == reg y.
(0x5, _, _, 0x0) => {
if self.reg[_x] == self.reg[_y] {
self.pc += 2;
}
self.pc += 2;
},
// set reg x to yz.
(0x6, _, _, _) => {
self.reg[_x] = yz;
self.pc += 2;
},
// add yz to reg x.
(0x7, _, _, _) => {
self.reg[_x] = self.reg[_x].wrapping_add(yz);
self.pc += 2;
},
// set reg x to value of reg y.
(0x8, _, _, 0x0) => {
self.reg[_x] = self.reg[_y];
self.pc += 2;
},
// set reg x to reg x | reg y.
(0x8, _, _, 0x1) => {
self.reg[_x] |= self.reg[_y];
self.pc += 2;
},
// set reg x to reg x & reg y.
(0x8, _, _, 0x2) => {
self.reg[_x] &= self.reg[_y];
self.pc += 2;
},
// UNDOCUMENTED. set reg x to reg x ^ reg y.
(0x8, _, _, 0x3) => {
self.reg[_x] ^= self.reg[_y];
self.pc += 2;
},
// add reg y to reg x. reg f is set to 1 when there's a carry, and to 0 when there isn't.
(0x8, _, _, 0x4) => {
let old_x = self.reg[_x];
self.reg[_x] = self.reg[_x].wrapping_add(self.reg[_y]);
self.reg[0xF] = if self.reg[_x] < old_x { 1 } else { 0 };
self.pc += 2;
},
// reg y is subtracted from reg x. reg f is set to 0 when there's a borrow, and 1 when there isn't.
(0x8, _, _, 0x5) => {
self.reg[0xF] = if self.reg[_x] < self.reg[_y] { 0 } else { 1 };
self.reg[_x] = self.reg[_x].wrapping_sub(self.reg[_y]);
self.pc += 2;
},
// WEIRD UNDOCUMENTED LEGACY ONE. TODO: add legacy mode?
(0x8, _, _, 0x6) => {
// first attempt. newer version?
self.reg[0xF] = self.reg[_x] & 0x1;
self.reg[_x] >>= 1;
// legacy? according to https://github.com/mattmikolay/chip-8/wiki/CHIP%E2%80%908-Instruction-Set
// self.reg[0xF] = self.reg[_y] & 0x1;
// self.reg[_x] = self.reg[_y] >> 1;
self.pc += 2;
},
// UNDOCUMENTED. sets reg x to reg y minus reg x. reg f is set to 0 when there's a borrow, and 1 when there isn't.
(0x8, _, _, 0x7) => {
self.reg[0xF] = if self.reg[_y] < self.reg[_x] { 0 } else { 1 };
self.reg[_x] = self.reg[_y].wrapping_sub(self.reg[_x]);
self.pc += 2;
},
// UNDOCUMENTED. store the most significant bit of reg x in reg f and left-shift reg x by 1.
(0x8, _, _, 0xE) => {
// according to https://en.wikipedia.org/wiki/CHIP-8#Opcode_table
self.reg[0xF] = (self.reg[_x] & (1 << 7)) >> 7;
self.reg[_x] <<= 1;
// according to https://github.com/mattmikolay/chip-8/wiki/CHIP%E2%80%908-Instruction-Set
// self.reg[0xF] = (self.reg[_y] & (1 << 7)) >> 7;
// self.reg[_x] = self.reg[_y] << 1;
self.pc += 2;
},
// skips the next instruction if reg x doesn't equal reg y.
(0x9, _, _, 0x0) => {
if self.reg[_x]!= self.reg[_y] {
self.pc += 2;
}
self.pc += 2;
},
// Sets idx to the address xyz.
(0xA, _, _, _) => {
self.idx = xyz;
self.pc += 2;
},
// jump to xyz plus reg 0.
(0xB, _, _, _) => {
self.pc = xyz + (self.reg[0x0] as u16);
},
// set reg x to the result of a bitwise and operation on a random number (Typically: 0 to 255) and yz.
(0xC, _, _, _) => | ,
// draw sprites at coordinate reg x, reg y (NOT X AND Y AS I ORIGINALLY DID) a width of 8 and height of z.
// get z sprites from memory starting at location idx.
(0xD, _, _, _) => {
self.draw_flag = true;
let mut pixel_unset = false;
let sprites = &self.memory[self.idx as usize.. (self.idx + (z as u16)) as usize];
for i in 0.._z { // for each row of 8 pixels (sprite)
// x is columns, y is rows. gfx is a flat array. starting coordinate is ((y + row number) * PIXEL_W) + x.
// every 8 bytes, we have to skip to next row, which means adding another PIXEL_W.
if super::DEBUG {
println!("drawing byte: 0b{:08b}", sprites[i]);
}
for j in 0..8 {
let current_coordinate = self.reg[_x] as usize + ((self.reg[_y] as usize + i) * (PIXEL_W as usize)) + j;
let current_sprite_bit = (sprites[i] & (1 << (7-j))) >> (7-j);
if super::DEBUG {
println!("drawing pixel 0b{:b} at {}, {}",
current_sprite_bit,
current_coordinate % PIXEL_W as usize,
current_coordinate / PIXEL_W as usize
);
}
if self.gfx[current_coordinate % self.gfx.len()] & current_sprite_bit!= 0 { // if the current byte/pixel is 1, and the sprite bit is 1,
pixel_unset = true; // then the xor operation will flip an on bit to off, meaning we need to record and set reg f.
}
self.gfx[current_coordinate % self.gfx.len()] ^= current_sprite_bit; // xor with sprite bit to draw
}
}
self.reg[0xF] = if pixel_unset { 1 } else { 0 };
self.pc += 2;
if super::DEBUG {
println!("screen:");
for i in 0..PIXEL_H {
for j in 0..PIXEL_W {
print!("{} ", self.gfx[((PIXEL_W * i) + j) as usize])
}
println!();
}
}
},
// skip next instruction if key corresponding to reg x is pressed.
(0xE, _, 0x9, 0xE) => {
if self.key[self.reg[_x] as usize]!= 0 {
self.pc += 2;
}
self.pc += 2;
},
// skip next instruction if key corresponding to reg x isn't pressed.
(0xE, _, 0xA, 0x1) => {
if self.key[self.reg[_x] as usize] == 0 {
self.pc += 2;
}
self.pc += 2;
},
// set reg x to value of delay timer.
(0xF, _, 0x0, 0x7) => {
self.reg[_x] = self.delay_timer;
self.pc += 2;
},
// wait for key press and store in reg x.
(0xF, _, 0x0, 0xA) => {
// we don't check for input in the middle of a cycle, so we should just pass, not incrementing program counter,
// and let the program come back to here until a key is registered.
if self.key!= [0; 16] {
'key_checking: for (i, k) in self.key.iter().enumerate() { // including lifetime so we can break after only one key is stored to reg x
if *k!= 0 {
self.reg[_x] = i as u8;
self.pc += 2;
break 'key_checking;
}
}
}
},
// set delay timer to value of reg x.
(0xF, _, 0x1, 0x5) => {
self.delay_timer = self.reg[_x];
self.pc += 2;
},
// set sound timer to value of reg x.
(0xF, _, 0x1, 0x8) => {
self.sound_timer = self.reg[_x];
self.pc += 2;
},
// add value of reg x to idx.
(0xF, _, 0x1, 0xE) => {
self.idx += self.reg[_x] as u16;
self.pc += 2;
},
// set idx to location of font char IN REGISTER X (not x).
(0xF, _, 0x2, 0x9) => {
self.idx = FONT_LOCATION + (self.reg[_x] as u16 * 5);
self.pc += 2;
},
// store the binary-coded decimal representation of reg x in memory[idx..idx+2].
(0xF, _, 0x3, 0x3) => {
self.memory[self.idx as usize] = self.reg[_x] / 100;
self.memory[self.idx as usize + 1] = (self.reg[_x] % 100) / 10;
self.memory[self.idx as usize + 2] = self.reg[_x] % 10;
self.pc += 2;
},
// store reg 0.. reg x (inclusive) in memory[idx..]. don't modify idx.
(0xF, _, 0x5, 0x5) => {
for i in 0..= _x {
self.memory[self.idx as usize + i] = self.reg[i];
}
self.pc += 2;
},
// load reg 0.. reg x (inclusive) from memory[idx..]. don't modify idx.
(0xF, _, 0x6, 0x5) => {
for i in 0..= _x {
self.reg[i] = self.memory[self.idx as usize + i];
}
self.pc += 2;
},
oopsie => {
println!("illegal instruction: {:02x?}", oopsie);
self.pc += 2;
},
};
}
}
const FONT_SET: [u8; 80] = [
0xF0, 0x90, 0x90, 0x90, 0xF0, // 0
0x20, 0x60, 0x20, 0x20, 0x70, // 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3
0x90, 0x90, 0xF0, 0x10, 0x10, // 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6
0xF0, 0x10, 0x20, 0x40, 0x40, // 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9
0xF0, 0x90, 0xF0, 0x90, 0x90, // A
0xE0, 0x90, 0xE0, 0x90, 0xE0, // B
0xF0, 0x80, 0x80, 0x80, 0xF0, // C
0xE0, 0x90, 0x90, 0x90, 0xE0, // D
0xF0, 0x80, 0xF0, 0x80, 0xF0, // E
0xF0, 0x80, 0xF0, 0x80, 0x80 // F
];
| {
let rand_val: u8 = thread_rng().gen();
self.reg[_x] = yz & rand_val;
self.pc += 2;
} | conditional_block |
chip8.rs | use rand::{Rng, thread_rng};
pub const PIXEL_W: u16 = 64; // width of CHIP-8 screen
pub const PIXEL_H: u16 = 32; // height of CHIP-8 screen
pub const FONT_LOCATION: u16 = 0x80; // location of font set in system RAM
pub const CARTRIDGE_LOCATION: u16 = 0x200; // location in system RAM where game data should be loaded on boot
pub struct Chip8 {
pub memory: [u8; 4096], // RAM
pub reg: [u8; 16], // registers
pub gfx: [u8; (PIXEL_W * PIXEL_H) as usize], // pixels
stack: [u16; 16], // subroutine stack
pub key: [u8; 16], // keypad
idx: u16, // index register
pc: u16, // program counter
sp: u16, // stack pointer
pub delay_timer: u8,
pub sound_timer: u8,
pub draw_flag: bool, // set when clear screen or draw opcodes are called
}
impl Chip8 {
pub fn new() -> Chip8 {
let mut chip = Chip8 {
memory: [0;4096],
reg: [0;16],
gfx: [0; (PIXEL_W * PIXEL_H) as usize],
stack: [0; 16],
key: [0; 16],
idx: 0,
pc: CARTRIDGE_LOCATION,
sp: 0,
delay_timer: 0,
sound_timer: 0,
draw_flag: false,
};
// load font set
for (i, v) in FONT_SET.iter().enumerate() {
chip.memory[FONT_LOCATION as usize + i] = *v;
}
chip
}
pub fn cycle(&mut self) {
// all opcodes are two bytes.
// get the byte at memory[program counter] and memory[program counter + 1],
// split them into nibbles for convenience.
let w = self.memory[self.pc as usize] >> 4;
let x = self.memory[self.pc as usize] & 0xF;
let y = self.memory[(self.pc+1) as usize] >> 4;
let z = self.memory[(self.pc+1) as usize] & 0xF;
let yz = y << 4 | z;
let xyz: u16 = (x as u16) << 8 | (y as u16) << 4 | (z as u16);
let (_x, _y, _z) = (x as usize, y as usize, z as usize);
let opcode = (w, x, y, z);
if super::DEBUG {
println!("=================\nregisters: {:02x?}", self.reg);
println!("pc: 0x{:02x}, idx: 0x{:02x}, bytes at idx: {:02x?}",
self.pc, self.idx,
&self.memory[self.idx as usize..(self.idx+8) as usize]);
println!("executing opcode {:02x?}", opcode);
}
match opcode {
// skipping instruction 0XYZ
// clear screen.
(0x0, 0x0, 0xE, 0x0) => {
self.draw_flag = true;
self.gfx.iter_mut().for_each(|b| *b = 0);
self.pc += 2;
},
// return from subroutine.
(0x0, 0x0, 0xE, 0xE) => {
self.sp -= 1;
self.pc = self.stack[self.sp as usize];
},
// go to xyz.
(0x1, _, _, _) => self.pc = xyz,
// call subroutine at xyz.
(0x2, _, _, _) => {
self.stack[self.sp as usize] = self.pc + 2; // put next instruction on stack
self.sp += 1; // increase stack pointer
self.pc = xyz; // jump to subroutine
},
// skip next instruction if register x equals yz.
(0x3, _, _, _) => {
if self.reg[_x] == yz {
self.pc += 2;
}
self.pc += 2;
},
// skip next instruction if register x doesn't equal yz.
(0x4, _, _, _) => {
if self.reg[_x]!= yz {
self.pc += 2;
}
self.pc += 2;
},
// skip next instruction if reg x == reg y.
(0x5, _, _, 0x0) => {
if self.reg[_x] == self.reg[_y] {
self.pc += 2;
}
self.pc += 2;
},
// set reg x to yz.
(0x6, _, _, _) => {
self.reg[_x] = yz;
self.pc += 2;
},
// add yz to reg x.
(0x7, _, _, _) => {
self.reg[_x] = self.reg[_x].wrapping_add(yz);
self.pc += 2;
},
// set reg x to value of reg y.
(0x8, _, _, 0x0) => {
self.reg[_x] = self.reg[_y];
self.pc += 2;
},
// set reg x to reg x | reg y.
(0x8, _, _, 0x1) => {
self.reg[_x] |= self.reg[_y];
self.pc += 2;
},
// set reg x to reg x & reg y.
(0x8, _, _, 0x2) => {
self.reg[_x] &= self.reg[_y];
self.pc += 2;
},
// UNDOCUMENTED. set reg x to reg x ^ reg y.
(0x8, _, _, 0x3) => {
self.reg[_x] ^= self.reg[_y];
self.pc += 2;
},
// add reg y to reg x. reg f is set to 1 when there's a carry, and to 0 when there isn't.
(0x8, _, _, 0x4) => {
let old_x = self.reg[_x];
self.reg[_x] = self.reg[_x].wrapping_add(self.reg[_y]);
self.reg[0xF] = if self.reg[_x] < old_x { 1 } else { 0 };
self.pc += 2;
},
// reg y is subtracted from reg x. reg f is set to 0 when there's a borrow, and 1 when there isn't.
(0x8, _, _, 0x5) => {
self.reg[0xF] = if self.reg[_x] < self.reg[_y] { 0 } else { 1 };
self.reg[_x] = self.reg[_x].wrapping_sub(self.reg[_y]);
self.pc += 2;
},
// WEIRD UNDOCUMENTED LEGACY ONE. TODO: add legacy mode?
(0x8, _, _, 0x6) => {
// first attempt. newer version?
self.reg[0xF] = self.reg[_x] & 0x1;
self.reg[_x] >>= 1;
// legacy? according to https://github.com/mattmikolay/chip-8/wiki/CHIP%E2%80%908-Instruction-Set
// self.reg[0xF] = self.reg[_y] & 0x1;
// self.reg[_x] = self.reg[_y] >> 1;
self.pc += 2;
},
// UNDOCUMENTED. sets reg x to reg y minus reg x. reg f is set to 0 when there's a borrow, and 1 when there isn't.
(0x8, _, _, 0x7) => {
self.reg[0xF] = if self.reg[_y] < self.reg[_x] { 0 } else { 1 };
self.reg[_x] = self.reg[_y].wrapping_sub(self.reg[_x]);
self.pc += 2;
},
// UNDOCUMENTED. store the most significant bit of reg x in reg f and left-shift reg x by 1.
(0x8, _, _, 0xE) => {
// according to https://en.wikipedia.org/wiki/CHIP-8#Opcode_table
self.reg[0xF] = (self.reg[_x] & (1 << 7)) >> 7;
self.reg[_x] <<= 1;
// according to https://github.com/mattmikolay/chip-8/wiki/CHIP%E2%80%908-Instruction-Set
// self.reg[0xF] = (self.reg[_y] & (1 << 7)) >> 7;
// self.reg[_x] = self.reg[_y] << 1;
self.pc += 2;
},
// skips the next instruction if reg x doesn't equal reg y.
(0x9, _, _, 0x0) => {
if self.reg[_x]!= self.reg[_y] {
self.pc += 2;
}
self.pc += 2;
},
// Sets idx to the address xyz.
(0xA, _, _, _) => {
self.idx = xyz;
self.pc += 2;
},
// jump to xyz plus reg 0.
(0xB, _, _, _) => {
self.pc = xyz + (self.reg[0x0] as u16);
},
// set reg x to the result of a bitwise and operation on a random number (Typically: 0 to 255) and yz.
(0xC, _, _, _) => {
let rand_val: u8 = thread_rng().gen();
self.reg[_x] = yz & rand_val;
self.pc += 2;
},
// draw sprites at coordinate reg x, reg y (NOT X AND Y AS I ORIGINALLY DID) a width of 8 and height of z.
// get z sprites from memory starting at location idx.
(0xD, _, _, _) => {
self.draw_flag = true;
let mut pixel_unset = false;
let sprites = &self.memory[self.idx as usize.. (self.idx + (z as u16)) as usize];
for i in 0.._z { // for each row of 8 pixels (sprite)
// x is columns, y is rows. gfx is a flat array. starting coordinate is ((y + row number) * PIXEL_W) + x.
// every 8 bytes, we have to skip to next row, which means adding another PIXEL_W.
if super::DEBUG {
println!("drawing byte: 0b{:08b}", sprites[i]);
}
for j in 0..8 {
let current_coordinate = self.reg[_x] as usize + ((self.reg[_y] as usize + i) * (PIXEL_W as usize)) + j;
let current_sprite_bit = (sprites[i] & (1 << (7-j))) >> (7-j); | current_sprite_bit,
current_coordinate % PIXEL_W as usize,
current_coordinate / PIXEL_W as usize
);
}
if self.gfx[current_coordinate % self.gfx.len()] & current_sprite_bit!= 0 { // if the current byte/pixel is 1, and the sprite bit is 1,
pixel_unset = true; // then the xor operation will flip an on bit to off, meaning we need to record and set reg f.
}
self.gfx[current_coordinate % self.gfx.len()] ^= current_sprite_bit; // xor with sprite bit to draw
}
}
self.reg[0xF] = if pixel_unset { 1 } else { 0 };
self.pc += 2;
if super::DEBUG {
println!("screen:");
for i in 0..PIXEL_H {
for j in 0..PIXEL_W {
print!("{} ", self.gfx[((PIXEL_W * i) + j) as usize])
}
println!();
}
}
},
// skip next instruction if key corresponding to reg x is pressed.
(0xE, _, 0x9, 0xE) => {
if self.key[self.reg[_x] as usize]!= 0 {
self.pc += 2;
}
self.pc += 2;
},
// skip next instruction if key corresponding to reg x isn't pressed.
(0xE, _, 0xA, 0x1) => {
if self.key[self.reg[_x] as usize] == 0 {
self.pc += 2;
}
self.pc += 2;
},
// set reg x to value of delay timer.
(0xF, _, 0x0, 0x7) => {
self.reg[_x] = self.delay_timer;
self.pc += 2;
},
// wait for key press and store in reg x.
(0xF, _, 0x0, 0xA) => {
// we don't check for input in the middle of a cycle, so we should just pass, not incrementing program counter,
// and let the program come back to here until a key is registered.
if self.key!= [0; 16] {
'key_checking: for (i, k) in self.key.iter().enumerate() { // including lifetime so we can break after only one key is stored to reg x
if *k!= 0 {
self.reg[_x] = i as u8;
self.pc += 2;
break 'key_checking;
}
}
}
},
// set delay timer to value of reg x.
(0xF, _, 0x1, 0x5) => {
self.delay_timer = self.reg[_x];
self.pc += 2;
},
// set sound timer to value of reg x.
(0xF, _, 0x1, 0x8) => {
self.sound_timer = self.reg[_x];
self.pc += 2;
},
// add value of reg x to idx.
(0xF, _, 0x1, 0xE) => {
self.idx += self.reg[_x] as u16;
self.pc += 2;
},
// set idx to location of font char IN REGISTER X (not x).
(0xF, _, 0x2, 0x9) => {
self.idx = FONT_LOCATION + (self.reg[_x] as u16 * 5);
self.pc += 2;
},
// store the binary-coded decimal representation of reg x in memory[idx..idx+2].
(0xF, _, 0x3, 0x3) => {
self.memory[self.idx as usize] = self.reg[_x] / 100;
self.memory[self.idx as usize + 1] = (self.reg[_x] % 100) / 10;
self.memory[self.idx as usize + 2] = self.reg[_x] % 10;
self.pc += 2;
},
// store reg 0.. reg x (inclusive) in memory[idx..]. don't modify idx.
(0xF, _, 0x5, 0x5) => {
for i in 0..= _x {
self.memory[self.idx as usize + i] = self.reg[i];
}
self.pc += 2;
},
// load reg 0.. reg x (inclusive) from memory[idx..]. don't modify idx.
(0xF, _, 0x6, 0x5) => {
for i in 0..= _x {
self.reg[i] = self.memory[self.idx as usize + i];
}
self.pc += 2;
},
oopsie => {
println!("illegal instruction: {:02x?}", oopsie);
self.pc += 2;
},
};
}
}
const FONT_SET: [u8; 80] = [
0xF0, 0x90, 0x90, 0x90, 0xF0, // 0
0x20, 0x60, 0x20, 0x20, 0x70, // 1
0xF0, 0x10, 0xF0, 0x80, 0xF0, // 2
0xF0, 0x10, 0xF0, 0x10, 0xF0, // 3
0x90, 0x90, 0xF0, 0x10, 0x10, // 4
0xF0, 0x80, 0xF0, 0x10, 0xF0, // 5
0xF0, 0x80, 0xF0, 0x90, 0xF0, // 6
0xF0, 0x10, 0x20, 0x40, 0x40, // 7
0xF0, 0x90, 0xF0, 0x90, 0xF0, // 8
0xF0, 0x90, 0xF0, 0x10, 0xF0, // 9
0xF0, 0x90, 0xF0, 0x90, 0x90, // A
0xE0, 0x90, 0xE0, 0x90, 0xE0, // B
0xF0, 0x80, 0x80, 0x80, 0xF0, // C
0xE0, 0x90, 0x90, 0x90, 0xE0, // D
0xF0, 0x80, 0xF0, 0x80, 0xF0, // E
0xF0, 0x80, 0xF0, 0x80, 0x80 // F
]; |
if super::DEBUG {
println!("drawing pixel 0b{:b} at {}, {}", | random_line_split |
mod.rs | //! Boa's implementation of ECMAScript's global `BigInt` object.
//!
//! `BigInt` is a built-in object that provides a way to represent whole numbers larger
//! than the largest number JavaScript can reliably represent with the Number primitive
//! and represented by the `Number.MAX_SAFE_INTEGER` constant.
//! `BigInt` can be used for arbitrarily large integers.
//!
//! More information:
//! - [ECMAScript reference][spec]
//! - [MDN documentation][mdn]
//!
//! [spec]: https://tc39.es/ecma262/#sec-bigint-objects
//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt
use crate::{
builtins::BuiltInObject,
context::intrinsics::{Intrinsics, StandardConstructor, StandardConstructors},
error::JsNativeError,
object::JsObject,
property::Attribute,
realm::Realm,
symbol::JsSymbol,
value::{IntegerOrInfinity, PreferredType},
Context, JsArgs, JsBigInt, JsResult, JsValue,
};
use boa_profiler::Profiler;
use num_bigint::ToBigInt;
use super::{BuiltInBuilder, BuiltInConstructor, IntrinsicObject};
#[cfg(test)]
mod tests;
/// `BigInt` implementation.
#[derive(Debug, Clone, Copy)]
pub struct BigInt;
| impl IntrinsicObject for BigInt {
fn init(realm: &Realm) {
let _timer = Profiler::global().start_event(Self::NAME, "init");
BuiltInBuilder::from_standard_constructor::<Self>(realm)
.method(Self::to_string, "toString", 0)
.method(Self::value_of, "valueOf", 0)
.static_method(Self::as_int_n, "asIntN", 2)
.static_method(Self::as_uint_n, "asUintN", 2)
.property(
JsSymbol::to_string_tag(),
Self::NAME,
Attribute::READONLY | Attribute::NON_ENUMERABLE | Attribute::CONFIGURABLE,
)
.build();
}
fn get(intrinsics: &Intrinsics) -> JsObject {
Self::STANDARD_CONSTRUCTOR(intrinsics.constructors()).constructor()
}
}
impl BuiltInObject for BigInt {
const NAME: &'static str = "BigInt";
}
impl BuiltInConstructor for BigInt {
const LENGTH: usize = 1;
const STANDARD_CONSTRUCTOR: fn(&StandardConstructors) -> &StandardConstructor =
StandardConstructors::bigint;
/// `BigInt()`
///
/// The `BigInt()` constructor is used to create `BigInt` objects.
///
/// More information:
/// - [ECMAScript reference][spec]
/// - [MDN documentation][mdn]
///
/// [spec]: https://tc39.es/ecma262/#sec-bigint-objects
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/BigInt
fn constructor(
new_target: &JsValue,
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<JsValue> {
// 1. If NewTarget is not undefined, throw a TypeError exception.
if!new_target.is_undefined() {
return Err(JsNativeError::typ()
.with_message("BigInt is not a constructor")
.into());
}
let value = args.get_or_undefined(0);
// 2. Let prim be? ToPrimitive(value, number).
let prim = value.to_primitive(context, PreferredType::Number)?;
// 3. If Type(prim) is Number, return? NumberToBigInt(prim).
if let Some(number) = prim.as_number() {
return Self::number_to_bigint(number);
}
// 4. Otherwise, return? ToBigInt(prim).
Ok(prim.to_bigint(context)?.into())
}
}
impl BigInt {
/// `NumberToBigInt ( number )`
///
/// More information:
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-numbertobigint
fn number_to_bigint(number: f64) -> JsResult<JsValue> {
// 1. If IsIntegralNumber(number) is false, throw a RangeError exception.
if number.is_nan() || number.is_infinite() || number.fract()!= 0.0 {
return Err(JsNativeError::range()
.with_message(format!("cannot convert {number} to a BigInt"))
.into());
}
// 2. Return the BigInt value that represents ℝ(number).
Ok(JsBigInt::from(number.to_bigint().expect("This conversion must be safe")).into())
}
/// The abstract operation `thisBigIntValue` takes argument value.
///
/// The phrase “this `BigInt` value” within the specification of a method refers to the
/// result returned by calling the abstract operation `thisBigIntValue` with the `this` value
/// of the method invocation passed as the argument.
///
/// More information:
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-thisbigintvalue
fn this_bigint_value(value: &JsValue) -> JsResult<JsBigInt> {
value
// 1. If Type(value) is BigInt, return value.
.as_bigint()
.cloned()
// 2. If Type(value) is Object and value has a [[BigIntData]] internal slot, then
// a. Assert: Type(value.[[BigIntData]]) is BigInt.
// b. Return value.[[BigIntData]].
.or_else(|| {
value
.as_object()
.and_then(|obj| obj.borrow().as_bigint().cloned())
})
// 3. Throw a TypeError exception.
.ok_or_else(|| {
JsNativeError::typ()
.with_message("'this' is not a BigInt")
.into()
})
}
/// `BigInt.prototype.toString( [radix] )`
///
/// The `toString()` method returns a string representing the specified `BigInt` object.
///
/// More information:
/// - [ECMAScript reference][spec]
/// - [MDN documentation][mdn]
///
/// [spec]: https://tc39.es/ecma262/#sec-bigint.prototype.tostring
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/toString
#[allow(clippy::wrong_self_convention)]
pub(crate) fn to_string(
this: &JsValue,
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<JsValue> {
// 1. Let x be? thisBigIntValue(this value).
let x = Self::this_bigint_value(this)?;
let radix = args.get_or_undefined(0);
// 2. If radix is undefined, let radixMV be 10.
let radix_mv = if radix.is_undefined() {
// 5. If radixMV = 10, return! ToString(x).
// Note: early return optimization.
return Ok(x.to_string().into());
// 3. Else, let radixMV be? ToIntegerOrInfinity(radix).
} else {
radix.to_integer_or_infinity(context)?
};
// 4. If radixMV < 2 or radixMV > 36, throw a RangeError exception.
let radix_mv = match radix_mv {
IntegerOrInfinity::Integer(i) if (2..=36).contains(&i) => i,
_ => {
return Err(JsNativeError::range()
.with_message("radix must be an integer at least 2 and no greater than 36")
.into())
}
};
// 5. If radixMV = 10, return! ToString(x).
if radix_mv == 10 {
return Ok(x.to_string().into());
}
// 1. Let x be? thisBigIntValue(this value).
// 6. Return the String representation of this Number value using the radix specified by radixMV.
// Letters a-z are used for digits with values 10 through 35.
// The precise algorithm is implementation-defined, however the algorithm should be a generalization of that specified in 6.1.6.2.23.
Ok(JsValue::new(x.to_string_radix(radix_mv as u32)))
}
/// `BigInt.prototype.valueOf()`
///
/// The `valueOf()` method returns the wrapped primitive value of a Number object.
///
/// More information:
/// - [ECMAScript reference][spec]
/// - [MDN documentation][mdn]
///
/// [spec]: https://tc39.es/ecma262/#sec-bigint.prototype.valueof
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/valueOf
pub(crate) fn value_of(
this: &JsValue,
_: &[JsValue],
_: &mut Context<'_>,
) -> JsResult<JsValue> {
Ok(JsValue::new(Self::this_bigint_value(this)?))
}
/// `BigInt.asIntN()`
///
/// The `BigInt.asIntN()` method wraps the value of a `BigInt` to a signed integer between `-2**(width - 1)` and `2**(width-1) - 1`.
///
/// [spec]: https://tc39.es/ecma262/#sec-bigint.asintn
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/asIntN
#[allow(clippy::wrong_self_convention)]
pub(crate) fn as_int_n(
_: &JsValue,
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<JsValue> {
let (modulo, bits) = Self::calculate_as_uint_n(args, context)?;
if bits > 0
&& modulo >= JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits) - 1))?
{
Ok(JsValue::new(JsBigInt::sub(
&modulo,
&JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits)))?,
)))
} else {
Ok(JsValue::new(modulo))
}
}
/// `BigInt.asUintN()`
///
/// The `BigInt.asUintN()` method wraps the value of a `BigInt` to an unsigned integer between `0` and `2**(width) - 1`.
///
/// [spec]: https://tc39.es/ecma262/#sec-bigint.asuintn
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/asUintN
#[allow(clippy::wrong_self_convention)]
pub(crate) fn as_uint_n(
_: &JsValue,
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<JsValue> {
let (modulo, _) = Self::calculate_as_uint_n(args, context)?;
Ok(JsValue::new(modulo))
}
/// Helper function to wrap the value of a `BigInt` to an unsigned integer.
///
/// This function expects the same arguments as `as_uint_n` and wraps the value of a `BigInt`.
/// Additionally to the wrapped unsigned value it returns the converted `bits` argument, so it
/// can be reused from the `as_int_n` method.
fn calculate_as_uint_n(
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<(JsBigInt, u32)> {
let bits_arg = args.get_or_undefined(0);
let bigint_arg = args.get_or_undefined(1);
let bits = bits_arg.to_index(context)?;
let bits = u32::try_from(bits).unwrap_or(u32::MAX);
let bigint = bigint_arg.to_bigint(context)?;
Ok((
JsBigInt::mod_floor(
&bigint,
&JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits)))?,
),
bits,
))
}
} | random_line_split |
|
mod.rs | //! Boa's implementation of ECMAScript's global `BigInt` object.
//!
//! `BigInt` is a built-in object that provides a way to represent whole numbers larger
//! than the largest number JavaScript can reliably represent with the Number primitive
//! and represented by the `Number.MAX_SAFE_INTEGER` constant.
//! `BigInt` can be used for arbitrarily large integers.
//!
//! More information:
//! - [ECMAScript reference][spec]
//! - [MDN documentation][mdn]
//!
//! [spec]: https://tc39.es/ecma262/#sec-bigint-objects
//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt
use crate::{
builtins::BuiltInObject,
context::intrinsics::{Intrinsics, StandardConstructor, StandardConstructors},
error::JsNativeError,
object::JsObject,
property::Attribute,
realm::Realm,
symbol::JsSymbol,
value::{IntegerOrInfinity, PreferredType},
Context, JsArgs, JsBigInt, JsResult, JsValue,
};
use boa_profiler::Profiler;
use num_bigint::ToBigInt;
use super::{BuiltInBuilder, BuiltInConstructor, IntrinsicObject};
#[cfg(test)]
mod tests;
/// `BigInt` implementation.
#[derive(Debug, Clone, Copy)]
pub struct BigInt;
impl IntrinsicObject for BigInt {
fn | (realm: &Realm) {
let _timer = Profiler::global().start_event(Self::NAME, "init");
BuiltInBuilder::from_standard_constructor::<Self>(realm)
.method(Self::to_string, "toString", 0)
.method(Self::value_of, "valueOf", 0)
.static_method(Self::as_int_n, "asIntN", 2)
.static_method(Self::as_uint_n, "asUintN", 2)
.property(
JsSymbol::to_string_tag(),
Self::NAME,
Attribute::READONLY | Attribute::NON_ENUMERABLE | Attribute::CONFIGURABLE,
)
.build();
}
fn get(intrinsics: &Intrinsics) -> JsObject {
Self::STANDARD_CONSTRUCTOR(intrinsics.constructors()).constructor()
}
}
impl BuiltInObject for BigInt {
const NAME: &'static str = "BigInt";
}
impl BuiltInConstructor for BigInt {
const LENGTH: usize = 1;
const STANDARD_CONSTRUCTOR: fn(&StandardConstructors) -> &StandardConstructor =
StandardConstructors::bigint;
/// `BigInt()`
///
/// The `BigInt()` constructor is used to create `BigInt` objects.
///
/// More information:
/// - [ECMAScript reference][spec]
/// - [MDN documentation][mdn]
///
/// [spec]: https://tc39.es/ecma262/#sec-bigint-objects
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/BigInt
fn constructor(
new_target: &JsValue,
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<JsValue> {
// 1. If NewTarget is not undefined, throw a TypeError exception.
if!new_target.is_undefined() {
return Err(JsNativeError::typ()
.with_message("BigInt is not a constructor")
.into());
}
let value = args.get_or_undefined(0);
// 2. Let prim be? ToPrimitive(value, number).
let prim = value.to_primitive(context, PreferredType::Number)?;
// 3. If Type(prim) is Number, return? NumberToBigInt(prim).
if let Some(number) = prim.as_number() {
return Self::number_to_bigint(number);
}
// 4. Otherwise, return? ToBigInt(prim).
Ok(prim.to_bigint(context)?.into())
}
}
impl BigInt {
/// `NumberToBigInt ( number )`
///
/// More information:
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-numbertobigint
fn number_to_bigint(number: f64) -> JsResult<JsValue> {
// 1. If IsIntegralNumber(number) is false, throw a RangeError exception.
if number.is_nan() || number.is_infinite() || number.fract()!= 0.0 {
return Err(JsNativeError::range()
.with_message(format!("cannot convert {number} to a BigInt"))
.into());
}
// 2. Return the BigInt value that represents ℝ(number).
Ok(JsBigInt::from(number.to_bigint().expect("This conversion must be safe")).into())
}
/// The abstract operation `thisBigIntValue` takes argument value.
///
/// The phrase “this `BigInt` value” within the specification of a method refers to the
/// result returned by calling the abstract operation `thisBigIntValue` with the `this` value
/// of the method invocation passed as the argument.
///
/// More information:
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-thisbigintvalue
fn this_bigint_value(value: &JsValue) -> JsResult<JsBigInt> {
value
// 1. If Type(value) is BigInt, return value.
.as_bigint()
.cloned()
// 2. If Type(value) is Object and value has a [[BigIntData]] internal slot, then
// a. Assert: Type(value.[[BigIntData]]) is BigInt.
// b. Return value.[[BigIntData]].
.or_else(|| {
value
.as_object()
.and_then(|obj| obj.borrow().as_bigint().cloned())
})
// 3. Throw a TypeError exception.
.ok_or_else(|| {
JsNativeError::typ()
.with_message("'this' is not a BigInt")
.into()
})
}
/// `BigInt.prototype.toString( [radix] )`
///
/// The `toString()` method returns a string representing the specified `BigInt` object.
///
/// More information:
/// - [ECMAScript reference][spec]
/// - [MDN documentation][mdn]
///
/// [spec]: https://tc39.es/ecma262/#sec-bigint.prototype.tostring
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/toString
#[allow(clippy::wrong_self_convention)]
pub(crate) fn to_string(
this: &JsValue,
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<JsValue> {
// 1. Let x be? thisBigIntValue(this value).
let x = Self::this_bigint_value(this)?;
let radix = args.get_or_undefined(0);
// 2. If radix is undefined, let radixMV be 10.
let radix_mv = if radix.is_undefined() {
// 5. If radixMV = 10, return! ToString(x).
// Note: early return optimization.
return Ok(x.to_string().into());
// 3. Else, let radixMV be? ToIntegerOrInfinity(radix).
} else {
radix.to_integer_or_infinity(context)?
};
// 4. If radixMV < 2 or radixMV > 36, throw a RangeError exception.
let radix_mv = match radix_mv {
IntegerOrInfinity::Integer(i) if (2..=36).contains(&i) => i,
_ => {
return Err(JsNativeError::range()
.with_message("radix must be an integer at least 2 and no greater than 36")
.into())
}
};
// 5. If radixMV = 10, return! ToString(x).
if radix_mv == 10 {
return Ok(x.to_string().into());
}
// 1. Let x be? thisBigIntValue(this value).
// 6. Return the String representation of this Number value using the radix specified by radixMV.
// Letters a-z are used for digits with values 10 through 35.
// The precise algorithm is implementation-defined, however the algorithm should be a generalization of that specified in 6.1.6.2.23.
Ok(JsValue::new(x.to_string_radix(radix_mv as u32)))
}
/// `BigInt.prototype.valueOf()`
///
/// The `valueOf()` method returns the wrapped primitive value of a Number object.
///
/// More information:
/// - [ECMAScript reference][spec]
/// - [MDN documentation][mdn]
///
/// [spec]: https://tc39.es/ecma262/#sec-bigint.prototype.valueof
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/valueOf
pub(crate) fn value_of(
this: &JsValue,
_: &[JsValue],
_: &mut Context<'_>,
) -> JsResult<JsValue> {
Ok(JsValue::new(Self::this_bigint_value(this)?))
}
/// `BigInt.asIntN()`
///
/// The `BigInt.asIntN()` method wraps the value of a `BigInt` to a signed integer between `-2**(width - 1)` and `2**(width-1) - 1`.
///
/// [spec]: https://tc39.es/ecma262/#sec-bigint.asintn
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/asIntN
#[allow(clippy::wrong_self_convention)]
pub(crate) fn as_int_n(
_: &JsValue,
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<JsValue> {
let (modulo, bits) = Self::calculate_as_uint_n(args, context)?;
if bits > 0
&& modulo >= JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits) - 1))?
{
Ok(JsValue::new(JsBigInt::sub(
&modulo,
&JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits)))?,
)))
} else {
Ok(JsValue::new(modulo))
}
}
/// `BigInt.asUintN()`
///
/// The `BigInt.asUintN()` method wraps the value of a `BigInt` to an unsigned integer between `0` and `2**(width) - 1`.
///
/// [spec]: https://tc39.es/ecma262/#sec-bigint.asuintn
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/asUintN
#[allow(clippy::wrong_self_convention)]
pub(crate) fn as_uint_n(
_: &JsValue,
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<JsValue> {
let (modulo, _) = Self::calculate_as_uint_n(args, context)?;
Ok(JsValue::new(modulo))
}
/// Helper function to wrap the value of a `BigInt` to an unsigned integer.
///
/// This function expects the same arguments as `as_uint_n` and wraps the value of a `BigInt`.
/// Additionally to the wrapped unsigned value it returns the converted `bits` argument, so it
/// can be reused from the `as_int_n` method.
fn calculate_as_uint_n(
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<(JsBigInt, u32)> {
let bits_arg = args.get_or_undefined(0);
let bigint_arg = args.get_or_undefined(1);
let bits = bits_arg.to_index(context)?;
let bits = u32::try_from(bits).unwrap_or(u32::MAX);
let bigint = bigint_arg.to_bigint(context)?;
Ok((
JsBigInt::mod_floor(
&bigint,
&JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits)))?,
),
bits,
))
}
}
| init | identifier_name |
mod.rs | //! Boa's implementation of ECMAScript's global `BigInt` object.
//!
//! `BigInt` is a built-in object that provides a way to represent whole numbers larger
//! than the largest number JavaScript can reliably represent with the Number primitive
//! and represented by the `Number.MAX_SAFE_INTEGER` constant.
//! `BigInt` can be used for arbitrarily large integers.
//!
//! More information:
//! - [ECMAScript reference][spec]
//! - [MDN documentation][mdn]
//!
//! [spec]: https://tc39.es/ecma262/#sec-bigint-objects
//! [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt
use crate::{
builtins::BuiltInObject,
context::intrinsics::{Intrinsics, StandardConstructor, StandardConstructors},
error::JsNativeError,
object::JsObject,
property::Attribute,
realm::Realm,
symbol::JsSymbol,
value::{IntegerOrInfinity, PreferredType},
Context, JsArgs, JsBigInt, JsResult, JsValue,
};
use boa_profiler::Profiler;
use num_bigint::ToBigInt;
use super::{BuiltInBuilder, BuiltInConstructor, IntrinsicObject};
#[cfg(test)]
mod tests;
/// `BigInt` implementation.
#[derive(Debug, Clone, Copy)]
pub struct BigInt;
impl IntrinsicObject for BigInt {
fn init(realm: &Realm) {
let _timer = Profiler::global().start_event(Self::NAME, "init");
BuiltInBuilder::from_standard_constructor::<Self>(realm)
.method(Self::to_string, "toString", 0)
.method(Self::value_of, "valueOf", 0)
.static_method(Self::as_int_n, "asIntN", 2)
.static_method(Self::as_uint_n, "asUintN", 2)
.property(
JsSymbol::to_string_tag(),
Self::NAME,
Attribute::READONLY | Attribute::NON_ENUMERABLE | Attribute::CONFIGURABLE,
)
.build();
}
fn get(intrinsics: &Intrinsics) -> JsObject {
Self::STANDARD_CONSTRUCTOR(intrinsics.constructors()).constructor()
}
}
impl BuiltInObject for BigInt {
const NAME: &'static str = "BigInt";
}
impl BuiltInConstructor for BigInt {
const LENGTH: usize = 1;
const STANDARD_CONSTRUCTOR: fn(&StandardConstructors) -> &StandardConstructor =
StandardConstructors::bigint;
/// `BigInt()`
///
/// The `BigInt()` constructor is used to create `BigInt` objects.
///
/// More information:
/// - [ECMAScript reference][spec]
/// - [MDN documentation][mdn]
///
/// [spec]: https://tc39.es/ecma262/#sec-bigint-objects
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/BigInt
fn constructor(
new_target: &JsValue,
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<JsValue> {
// 1. If NewTarget is not undefined, throw a TypeError exception.
if!new_target.is_undefined() {
return Err(JsNativeError::typ()
.with_message("BigInt is not a constructor")
.into());
}
let value = args.get_or_undefined(0);
// 2. Let prim be? ToPrimitive(value, number).
let prim = value.to_primitive(context, PreferredType::Number)?;
// 3. If Type(prim) is Number, return? NumberToBigInt(prim).
if let Some(number) = prim.as_number() {
return Self::number_to_bigint(number);
}
// 4. Otherwise, return? ToBigInt(prim).
Ok(prim.to_bigint(context)?.into())
}
}
impl BigInt {
/// `NumberToBigInt ( number )`
///
/// More information:
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-numbertobigint
fn number_to_bigint(number: f64) -> JsResult<JsValue> {
// 1. If IsIntegralNumber(number) is false, throw a RangeError exception.
if number.is_nan() || number.is_infinite() || number.fract()!= 0.0 {
return Err(JsNativeError::range()
.with_message(format!("cannot convert {number} to a BigInt"))
.into());
}
// 2. Return the BigInt value that represents ℝ(number).
Ok(JsBigInt::from(number.to_bigint().expect("This conversion must be safe")).into())
}
/// The abstract operation `thisBigIntValue` takes argument value.
///
/// The phrase “this `BigInt` value” within the specification of a method refers to the
/// result returned by calling the abstract operation `thisBigIntValue` with the `this` value
/// of the method invocation passed as the argument.
///
/// More information:
/// - [ECMAScript reference][spec]
///
/// [spec]: https://tc39.es/ecma262/#sec-thisbigintvalue
fn this_bigint_value(value: &JsValue) -> JsResult<JsBigInt> {
value
// 1. If Type(value) is BigInt, return value.
.as_bigint()
.cloned()
// 2. If Type(value) is Object and value has a [[BigIntData]] internal slot, then
// a. Assert: Type(value.[[BigIntData]]) is BigInt.
// b. Return value.[[BigIntData]].
.or_else(|| {
value
.as_object()
.and_then(|obj| obj.borrow().as_bigint().cloned())
})
// 3. Throw a TypeError exception.
.ok_or_else(|| {
JsNativeError::typ()
.with_message("'this' is not a BigInt")
.into()
})
}
/// `BigInt.prototype.toString( [radix] )`
///
/// The `toString()` method returns a string representing the specified `BigInt` object.
///
/// More information:
/// - [ECMAScript reference][spec]
/// - [MDN documentation][mdn]
///
/// [spec]: https://tc39.es/ecma262/#sec-bigint.prototype.tostring
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/toString
#[allow(clippy::wrong_self_convention)]
pub(crate) fn to_string(
this: &JsValue,
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<JsValue> {
// 1. Let x be? thisBigIntValue(this value).
let x = Self::this_bigint_value(this)?;
let radix = args.get_or_undefined(0);
// 2. If radix is undefined, let radixMV be 10.
let radix_mv = if radix.is_undefined() {
// 5. If radixMV = 10, return! ToString(x).
// Note: early return optimization.
return Ok(x.to_string().into());
// 3. Else, let radixMV be? ToIntegerOrInfinity(radix).
} else {
radix.to_integer_or_infinity(context)?
};
// 4. If radixMV < 2 or radixMV > 36, throw a RangeError exception.
let radix_mv = match radix_mv {
IntegerOrInfinity::Integer(i) if (2..=36).contains(&i) => i,
_ => {
return Err(JsNativeError::range()
.with_message("radix must be an integer at least 2 and no greater than 36")
.into())
}
};
// 5. If radixMV = 10, return! ToString(x).
if radix_mv == 10 {
return Ok(x.to_string().into());
}
// 1. Let x be? thisBigIntValue(this value).
// 6. Return the String representation of this Number value using the radix specified by radixMV.
// Letters a-z are used for digits with values 10 through 35.
// The precise algorithm is implementation-defined, however the algorithm should be a generalization of that specified in 6.1.6.2.23.
Ok(JsValue::new(x.to_string_radix(radix_mv as u32)))
}
/// `BigInt.prototype.valueOf()`
///
/// The `valueOf()` method returns the wrapped primitive value of a Number object.
///
/// More information:
/// - [ECMAScript reference][spec]
/// - [MDN documentation][mdn]
///
/// [spec]: https://tc39.es/ecma262/#sec-bigint.prototype.valueof
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/valueOf
pub(crate) fn value_of(
this: &JsValue,
_: &[JsValue],
_: &mut Context<'_>,
) -> JsResult<JsValue> {
Ok(JsValue::new(Self::this_bigint_value(this)?))
}
/// `BigInt.asIntN()`
///
/// The `BigInt.asIntN()` method wraps the value of a `BigInt` to a signed integer between `-2**(width - 1)` and `2**(width-1) - 1`.
///
/// [spec]: https://tc39.es/ecma262/#sec-bigint.asintn
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/asIntN
#[allow(clippy::wrong_self_convention)]
pub(crate) fn as_int_n(
_: &JsValue,
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<JsValue> {
let (modulo, bits) = Self::calculate_as_uint_n(args, context)?;
if bits > 0
&& modulo >= JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits) - 1))?
{
| {
Ok(JsValue::new(modulo))
}
}
/// `BigInt.asUintN()`
///
/// The `BigInt.asUintN()` method wraps the value of a `BigInt` to an unsigned integer between `0` and `2**(width) - 1`.
///
/// [spec]: https://tc39.es/ecma262/#sec-bigint.asuintn
/// [mdn]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/BigInt/asUintN
#[allow(clippy::wrong_self_convention)]
pub(crate) fn as_uint_n(
_: &JsValue,
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<JsValue> {
let (modulo, _) = Self::calculate_as_uint_n(args, context)?;
Ok(JsValue::new(modulo))
}
/// Helper function to wrap the value of a `BigInt` to an unsigned integer.
///
/// This function expects the same arguments as `as_uint_n` and wraps the value of a `BigInt`.
/// Additionally to the wrapped unsigned value it returns the converted `bits` argument, so it
/// can be reused from the `as_int_n` method.
fn calculate_as_uint_n(
args: &[JsValue],
context: &mut Context<'_>,
) -> JsResult<(JsBigInt, u32)> {
let bits_arg = args.get_or_undefined(0);
let bigint_arg = args.get_or_undefined(1);
let bits = bits_arg.to_index(context)?;
let bits = u32::try_from(bits).unwrap_or(u32::MAX);
let bigint = bigint_arg.to_bigint(context)?;
Ok((
JsBigInt::mod_floor(
&bigint,
&JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits)))?,
),
bits,
))
}
}
| Ok(JsValue::new(JsBigInt::sub(
&modulo,
&JsBigInt::pow(&JsBigInt::new(2), &JsBigInt::new(i64::from(bits)))?,
)))
} else | conditional_block |
thread_pool.rs | use std::marker::PhantomData;
use std::sync::Arc;
use std::thread::JoinHandle;
use std::{iter, mem};
use crossbeam::atomic::AtomicCell;
use crossbeam::deque::{Injector, Steal, Stealer, Worker};
use crossbeam::sync::{Parker, Unparker, WaitGroup};
use futures::task::{Context, Poll, Waker};
use futures::Future;
use std::pin::Pin;
/// A chunk of work with some metadata
struct Task {
_group_id: TaskGroupId,
task_fn: Box<dyn TaskFn>,
}
type TaskGroupId = usize;
pub trait TaskFn: FnOnce() + Send {}
pub trait StaticTaskFn: TaskFn +'static {}
impl<T> TaskFn for T where T: FnOnce() + Send {}
impl<T> StaticTaskFn for T where T: TaskFn +'static {}
impl Task {
/// Create a new task to be executed at some point
fn new<F>(group_id: TaskGroupId, f: F) -> Self
where
F: StaticTaskFn,
{
Self {
_group_id: group_id,
task_fn: Box::new(f),
}
}
/// Executes the task
/// TODO: use `FnTraits` once stable
fn call_once(self) {
(self.task_fn)()
}
}
/// A worker thread pool for compute-heavy tasks
///
/// TODO: increase threads to a certain maximum number if current threads don't produce results fast enough?
/// TODO: schedule tasks based on group context?
///
#[derive(Debug)]
pub struct ThreadPool {
target_thread_count: usize,
global_queue: Arc<Injector<Task>>,
stealers: Vec<Stealer<Task>>,
threads: Vec<JoinHandle<()>>,
next_group_id: AtomicCell<usize>,
parked_threads: Arc<Injector<Unparker>>,
}
impl ThreadPool {
/// Creates a new thread pool with `number_of_threads` threads.
///
/// # Panics
/// Panics if `number_of_threads` is 0.
///
pub fn new(number_of_threads: usize) -> Self {
assert!(
number_of_threads > 0,
"There must be at least one thread for the thread pool"
);
let worker_deques: Vec<Worker<Task>> =
(0..number_of_threads).map(|_| Worker::new_fifo()).collect();
let mut thread_pool = Self {
target_thread_count: number_of_threads,
global_queue: Arc::new(Injector::new()),
stealers: worker_deques.iter().map(Worker::stealer).collect(),
threads: Vec::with_capacity(number_of_threads),
next_group_id: AtomicCell::new(0),
parked_threads: Arc::new(Injector::new()),
};
for worker_deque in worker_deques {
let global_queue = thread_pool.global_queue.clone();
let stealers = thread_pool.stealers.clone();
let parked_threads = thread_pool.parked_threads.clone();
thread_pool.threads.push(std::thread::spawn(move || {
Self::await_work(&worker_deque, &global_queue, &stealers, &parked_threads);
}))
}
thread_pool
}
fn await_work(
local: &Worker<Task>,
global: &Injector<Task>,
stealers: &[Stealer<Task>],
parked_threads: &Injector<Unparker>,
) {
let parker = Parker::new();
let unparker = parker.unparker();
loop {
// Pop a task from the local queue, if not empty.
let task = local.pop().or_else(|| {
// Otherwise, we need to look for a task elsewhere.
iter::repeat_with(|| {
// Try stealing a batch of tasks from the global queue.
global
.steal_batch_and_pop(local)
// Or try stealing a task from one of the other threads.
.or_else(|| stealers.iter().map(Stealer::steal).collect())
})
// Loop while no task was stolen and any steal operation needs to be retried.
.find(|s|!s.is_retry())
// Extract the stolen task, if there is one.
.and_then(Steal::success)
});
if let Some(task) = task {
// TODO: recover panics
task.call_once();
} else {
parked_threads.push(unparker.clone());
parker.park();
}
}
}
pub fn create_context(&self) -> ThreadPoolContext {
ThreadPoolContext::new(self, self.next_group_id.fetch_add(1))
}
fn compute(&self, task: Task) {
self.global_queue.push(task);
// un-park a thread since there is new work
if let Steal::Success(unparker) = self.parked_threads.steal() {
unparker.unpark();
}
}
}
impl Default for ThreadPool {
fn default() -> Self {
Self::new(num_cpus::get())
}
}
/// A computation context for a group that spawns tasks in a `ThreadPool`
#[derive(Copy, Clone, Debug)]
pub struct ThreadPoolContext<'pool> {
thread_pool: &'pool ThreadPool,
task_group_id: TaskGroupId,
}
impl<'pool> ThreadPoolContext<'pool> {
/// Create a new `ThreadPoolContext`
fn new(thread_pool: &'pool ThreadPool, task_group_id: TaskGroupId) -> Self {
Self {
thread_pool,
task_group_id,
}
}
/// What is the degree of parallelism that the `ThreadPool` aims for?
/// This is helpful to determine how to split the work into tasks.
pub fn degree_of_parallelism(&self) -> usize |
/// Compute a task in the `ThreadPool`
pub fn compute<F>(&self, task: F)
where
F: StaticTaskFn,
{
self.thread_pool
.compute(Task::new(self.task_group_id, task));
}
/// Execute a bunch of tasks in a scope that blocks until all tasks are finished.
/// Provides a lifetime for that scope.
/// TODO: provide an async version so that async workflows can do something in the meantime?
/// TODO: handle panics: if a thread panics, this function will block forever
pub fn scope<'scope, S>(&'pool self, scope_fn: S)
where
S: FnOnce(&Scope<'pool,'scope>) +'scope,
{
let scope = Scope::<'pool,'scope> {
thread_pool_context: &self,
wait_group: WaitGroup::new(),
_scope_marker: PhantomData,
};
scope_fn(&scope);
scope.wait_group.wait();
}
}
/// A scope in which you can execute tasks and it blocks until all tasks are finished
#[derive(Debug)]
pub struct Scope<'pool,'scope> {
thread_pool_context: &'pool ThreadPoolContext<'pool>,
wait_group: WaitGroup,
// needs to be invariant to `'scope`, cf. https://github.com/crossbeam-rs/crossbeam/pull/226/files#r232721183
_scope_marker: PhantomData<&'scope mut &'scope ()>,
}
impl<'pool,'scope> Scope<'pool,'scope> {
/// Compute a task in the `ThreadPool`
pub fn compute<F>(&self, task: F)
where
F: TaskFn +'scope,
{
let wait_group = self.wait_group.clone();
// Allocate the `task` on the heap and erase the `'scope` bound.
let task: Box<dyn TaskFn +'scope> = Box::new(task);
let task: Box<dyn StaticTaskFn> = unsafe { mem::transmute(task) };
self.thread_pool_context.compute(move || {
task();
// decrement `WaitGroup` counter
drop(wait_group);
});
}
/// Compute a task in the `ThreadPool` and return a `Future` of a result
pub fn compute_result<F, R>(&self, task: F) -> TaskResult<R>
where
F: FnOnce() -> R + Send +'scope,
R: Clone + Send +'static,
{
let future = TaskResult::default();
let future_ref = future.clone();
self.compute(move || {
future_ref.set(task());
});
future
}
}
/// A future that provides the task result
pub struct TaskResult<R> {
option: Arc<AtomicCell<TaskResultOption<R>>>,
}
// we can't derive `Clone` since it requires `R` to be `Clone` as well
impl<R> Clone for TaskResult<R> {
fn clone(&self) -> Self {
Self {
option: self.option.clone(),
}
}
}
/// The state of the `TaskResult` future
#[derive(Debug)]
enum TaskResultOption<R> {
None,
Result(R),
Waiting(Waker),
}
impl<R> Default for TaskResultOption<R> {
fn default() -> Self {
TaskResultOption::None
}
}
impl<R> TaskResult<R> {
fn set(&self, result: R) {
match self.option.swap(TaskResultOption::Result(result)) {
TaskResultOption::None => {} // do nothing
TaskResultOption::Result(_) => {
unreachable!("There must not be a second computation of the result")
}
TaskResultOption::Waiting(waker) => waker.wake(),
};
}
}
impl<R> Default for TaskResult<R> {
fn default() -> Self {
Self {
option: Default::default(),
}
}
}
impl<R> Future for TaskResult<R> {
type Output = R;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self
.option
.swap(TaskResultOption::Waiting(cx.waker().clone()))
{
TaskResultOption::None | TaskResultOption::Waiting(_) => Poll::Pending,
TaskResultOption::Result(r) => Poll::Ready(r),
}
}
}
#[cfg(test)]
mod tests {
use std::sync::atomic::{AtomicI32, AtomicUsize, Ordering};
use futures::future;
use super::*;
use crossbeam::utils::Backoff;
#[test]
#[allow(clippy::blacklisted_name)]
fn one_task() {
let thread_pool = ThreadPool::new(1);
let foo = Arc::new(AtomicI32::new(0));
let bar = foo.clone();
thread_pool.compute(Task::new(0, move || {
bar.fetch_add(42, Ordering::SeqCst);
}));
let backoff = Backoff::new();
while foo.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
#[allow(clippy::blacklisted_name)]
fn two_task_one_thread() {
let thread_pool = ThreadPool::new(2);
let foo = Arc::new(AtomicI32::new(0));
let bar = foo.clone();
thread_pool.compute(Task::new(0, move || {
bar.fetch_add(20, Ordering::SeqCst);
}));
let baz = foo.clone();
thread_pool.compute(Task::new(0, move || {
baz.fetch_add(22, Ordering::SeqCst);
}));
let backoff = Backoff::new();
while foo.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
#[allow(clippy::blacklisted_name)]
fn two_task_two_threads() {
let thread_pool = ThreadPool::new(2);
let foo = Arc::new(AtomicI32::new(0));
let bar = foo.clone();
thread_pool.compute(Task::new(0, move || {
bar.fetch_add(20, Ordering::SeqCst);
}));
let baz = foo.clone();
thread_pool.compute(Task::new(0, move || {
baz.fetch_add(22, Ordering::SeqCst);
}));
let backoff = Backoff::new();
while foo.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
fn lots_of_tasks() {
let thread_pool = ThreadPool::new(2);
let number_of_tasks = 1_000_000;
let tasks_completed = Arc::new(AtomicI32::new(0));
for _ in 0..number_of_tasks {
let tasks_completed = tasks_completed.clone();
thread_pool.compute(Task::new(0, move || {
tasks_completed.fetch_add(1, Ordering::SeqCst);
}));
}
let backoff = Backoff::new();
while tasks_completed.load(Ordering::SeqCst)!= number_of_tasks {
backoff.snooze();
}
}
#[test]
fn context() {
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let result = Arc::new(AtomicI32::new(0));
let result_clone = result.clone();
context.compute(move || {
result_clone.fetch_add(42, Ordering::SeqCst);
});
let backoff = Backoff::new();
while result.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
fn scoped() {
const NUMBER_OF_TASKS: usize = 42;
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let result = AtomicUsize::new(0);
context.scope(|scope| {
for _ in 0..NUMBER_OF_TASKS {
scope.compute(|| {
result.fetch_add(1, Ordering::SeqCst);
});
}
});
assert_eq!(result.load(Ordering::SeqCst), NUMBER_OF_TASKS);
}
#[test]
fn scoped_vec() {
const NUMBER_OF_TASKS: usize = 42;
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let mut result = vec![0; NUMBER_OF_TASKS];
context.scope(|scope| {
for (chunk, i) in result.chunks_exact_mut(1).zip(0..NUMBER_OF_TASKS) {
scope.compute(move || chunk[0] = i);
}
});
assert_eq!((0..NUMBER_OF_TASKS).collect::<Vec<_>>(), result);
}
#[test]
fn compute_results() {
const NUMBER_OF_TASKS: usize = 42;
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let mut futures = Vec::with_capacity(NUMBER_OF_TASKS);
context.scope(|scope| {
for i in 0..NUMBER_OF_TASKS {
futures.push(scope.compute_result(move || i));
}
});
let result = futures::executor::block_on(future::join_all(futures));
assert_eq!(result, (0..NUMBER_OF_TASKS).collect::<Vec<_>>());
}
#[test]
fn parking() {
let thread_pool = ThreadPool::new(1);
let context = thread_pool.create_context();
// wait for the thread to be parked
let backoff = Backoff::new();
while thread_pool.parked_threads.len() == 0 {
backoff.snooze();
}
let mut unparked = false;
context.scope(|scope| scope.compute(|| unparked = true));
assert!(unparked)
}
}
| {
self.thread_pool.target_thread_count
} | identifier_body |
thread_pool.rs | use std::marker::PhantomData;
use std::sync::Arc;
use std::thread::JoinHandle;
use std::{iter, mem};
use crossbeam::atomic::AtomicCell;
use crossbeam::deque::{Injector, Steal, Stealer, Worker};
use crossbeam::sync::{Parker, Unparker, WaitGroup};
use futures::task::{Context, Poll, Waker};
use futures::Future;
use std::pin::Pin;
/// A chunk of work with some metadata
struct Task {
_group_id: TaskGroupId,
task_fn: Box<dyn TaskFn>,
}
type TaskGroupId = usize;
pub trait TaskFn: FnOnce() + Send {}
pub trait StaticTaskFn: TaskFn +'static {}
impl<T> TaskFn for T where T: FnOnce() + Send {}
impl<T> StaticTaskFn for T where T: TaskFn +'static {}
impl Task {
/// Create a new task to be executed at some point
fn new<F>(group_id: TaskGroupId, f: F) -> Self
where
F: StaticTaskFn,
{
Self {
_group_id: group_id,
task_fn: Box::new(f),
}
}
/// Executes the task
/// TODO: use `FnTraits` once stable
fn call_once(self) {
(self.task_fn)()
}
}
/// A worker thread pool for compute-heavy tasks
///
/// TODO: increase threads to a certain maximum number if current threads don't produce results fast enough?
/// TODO: schedule tasks based on group context?
///
#[derive(Debug)]
pub struct ThreadPool {
target_thread_count: usize,
global_queue: Arc<Injector<Task>>,
stealers: Vec<Stealer<Task>>,
threads: Vec<JoinHandle<()>>,
next_group_id: AtomicCell<usize>,
parked_threads: Arc<Injector<Unparker>>,
}
impl ThreadPool {
/// Creates a new thread pool with `number_of_threads` threads.
///
/// # Panics
/// Panics if `number_of_threads` is 0.
///
pub fn new(number_of_threads: usize) -> Self {
assert!(
number_of_threads > 0,
"There must be at least one thread for the thread pool"
);
let worker_deques: Vec<Worker<Task>> =
(0..number_of_threads).map(|_| Worker::new_fifo()).collect();
let mut thread_pool = Self {
target_thread_count: number_of_threads,
global_queue: Arc::new(Injector::new()),
stealers: worker_deques.iter().map(Worker::stealer).collect(),
threads: Vec::with_capacity(number_of_threads),
next_group_id: AtomicCell::new(0),
parked_threads: Arc::new(Injector::new()),
};
for worker_deque in worker_deques {
let global_queue = thread_pool.global_queue.clone();
let stealers = thread_pool.stealers.clone();
let parked_threads = thread_pool.parked_threads.clone();
thread_pool.threads.push(std::thread::spawn(move || {
Self::await_work(&worker_deque, &global_queue, &stealers, &parked_threads);
}))
}
thread_pool
}
fn await_work(
local: &Worker<Task>,
global: &Injector<Task>,
stealers: &[Stealer<Task>],
parked_threads: &Injector<Unparker>,
) {
let parker = Parker::new();
let unparker = parker.unparker();
loop {
// Pop a task from the local queue, if not empty.
let task = local.pop().or_else(|| {
// Otherwise, we need to look for a task elsewhere.
iter::repeat_with(|| {
// Try stealing a batch of tasks from the global queue.
global
.steal_batch_and_pop(local)
// Or try stealing a task from one of the other threads.
.or_else(|| stealers.iter().map(Stealer::steal).collect())
})
// Loop while no task was stolen and any steal operation needs to be retried.
.find(|s|!s.is_retry())
// Extract the stolen task, if there is one.
.and_then(Steal::success)
});
if let Some(task) = task {
// TODO: recover panics
task.call_once();
} else {
parked_threads.push(unparker.clone());
parker.park();
}
}
}
pub fn create_context(&self) -> ThreadPoolContext {
ThreadPoolContext::new(self, self.next_group_id.fetch_add(1))
}
fn compute(&self, task: Task) {
self.global_queue.push(task);
// un-park a thread since there is new work
if let Steal::Success(unparker) = self.parked_threads.steal() {
unparker.unpark();
}
}
}
impl Default for ThreadPool {
fn default() -> Self {
Self::new(num_cpus::get())
}
}
/// A computation context for a group that spawns tasks in a `ThreadPool`
#[derive(Copy, Clone, Debug)]
pub struct ThreadPoolContext<'pool> {
thread_pool: &'pool ThreadPool,
task_group_id: TaskGroupId,
}
impl<'pool> ThreadPoolContext<'pool> {
/// Create a new `ThreadPoolContext`
fn new(thread_pool: &'pool ThreadPool, task_group_id: TaskGroupId) -> Self {
Self {
thread_pool,
task_group_id,
}
}
/// What is the degree of parallelism that the `ThreadPool` aims for?
/// This is helpful to determine how to split the work into tasks.
pub fn | (&self) -> usize {
self.thread_pool.target_thread_count
}
/// Compute a task in the `ThreadPool`
pub fn compute<F>(&self, task: F)
where
F: StaticTaskFn,
{
self.thread_pool
.compute(Task::new(self.task_group_id, task));
}
/// Execute a bunch of tasks in a scope that blocks until all tasks are finished.
/// Provides a lifetime for that scope.
/// TODO: provide an async version so that async workflows can do something in the meantime?
/// TODO: handle panics: if a thread panics, this function will block forever
pub fn scope<'scope, S>(&'pool self, scope_fn: S)
where
S: FnOnce(&Scope<'pool,'scope>) +'scope,
{
let scope = Scope::<'pool,'scope> {
thread_pool_context: &self,
wait_group: WaitGroup::new(),
_scope_marker: PhantomData,
};
scope_fn(&scope);
scope.wait_group.wait();
}
}
/// A scope in which you can execute tasks and it blocks until all tasks are finished
#[derive(Debug)]
pub struct Scope<'pool,'scope> {
thread_pool_context: &'pool ThreadPoolContext<'pool>,
wait_group: WaitGroup,
// needs to be invariant to `'scope`, cf. https://github.com/crossbeam-rs/crossbeam/pull/226/files#r232721183
_scope_marker: PhantomData<&'scope mut &'scope ()>,
}
impl<'pool,'scope> Scope<'pool,'scope> {
/// Compute a task in the `ThreadPool`
pub fn compute<F>(&self, task: F)
where
F: TaskFn +'scope,
{
let wait_group = self.wait_group.clone();
// Allocate the `task` on the heap and erase the `'scope` bound.
let task: Box<dyn TaskFn +'scope> = Box::new(task);
let task: Box<dyn StaticTaskFn> = unsafe { mem::transmute(task) };
self.thread_pool_context.compute(move || {
task();
// decrement `WaitGroup` counter
drop(wait_group);
});
}
/// Compute a task in the `ThreadPool` and return a `Future` of a result
pub fn compute_result<F, R>(&self, task: F) -> TaskResult<R>
where
F: FnOnce() -> R + Send +'scope,
R: Clone + Send +'static,
{
let future = TaskResult::default();
let future_ref = future.clone();
self.compute(move || {
future_ref.set(task());
});
future
}
}
/// A future that provides the task result
pub struct TaskResult<R> {
option: Arc<AtomicCell<TaskResultOption<R>>>,
}
// we can't derive `Clone` since it requires `R` to be `Clone` as well
impl<R> Clone for TaskResult<R> {
fn clone(&self) -> Self {
Self {
option: self.option.clone(),
}
}
}
/// The state of the `TaskResult` future
#[derive(Debug)]
enum TaskResultOption<R> {
None,
Result(R),
Waiting(Waker),
}
impl<R> Default for TaskResultOption<R> {
fn default() -> Self {
TaskResultOption::None
}
}
impl<R> TaskResult<R> {
fn set(&self, result: R) {
match self.option.swap(TaskResultOption::Result(result)) {
TaskResultOption::None => {} // do nothing
TaskResultOption::Result(_) => {
unreachable!("There must not be a second computation of the result")
}
TaskResultOption::Waiting(waker) => waker.wake(),
};
}
}
impl<R> Default for TaskResult<R> {
fn default() -> Self {
Self {
option: Default::default(),
}
}
}
impl<R> Future for TaskResult<R> {
type Output = R;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self
.option
.swap(TaskResultOption::Waiting(cx.waker().clone()))
{
TaskResultOption::None | TaskResultOption::Waiting(_) => Poll::Pending,
TaskResultOption::Result(r) => Poll::Ready(r),
}
}
}
#[cfg(test)]
mod tests {
use std::sync::atomic::{AtomicI32, AtomicUsize, Ordering};
use futures::future;
use super::*;
use crossbeam::utils::Backoff;
#[test]
#[allow(clippy::blacklisted_name)]
fn one_task() {
let thread_pool = ThreadPool::new(1);
let foo = Arc::new(AtomicI32::new(0));
let bar = foo.clone();
thread_pool.compute(Task::new(0, move || {
bar.fetch_add(42, Ordering::SeqCst);
}));
let backoff = Backoff::new();
while foo.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
#[allow(clippy::blacklisted_name)]
fn two_task_one_thread() {
let thread_pool = ThreadPool::new(2);
let foo = Arc::new(AtomicI32::new(0));
let bar = foo.clone();
thread_pool.compute(Task::new(0, move || {
bar.fetch_add(20, Ordering::SeqCst);
}));
let baz = foo.clone();
thread_pool.compute(Task::new(0, move || {
baz.fetch_add(22, Ordering::SeqCst);
}));
let backoff = Backoff::new();
while foo.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
#[allow(clippy::blacklisted_name)]
fn two_task_two_threads() {
let thread_pool = ThreadPool::new(2);
let foo = Arc::new(AtomicI32::new(0));
let bar = foo.clone();
thread_pool.compute(Task::new(0, move || {
bar.fetch_add(20, Ordering::SeqCst);
}));
let baz = foo.clone();
thread_pool.compute(Task::new(0, move || {
baz.fetch_add(22, Ordering::SeqCst);
}));
let backoff = Backoff::new();
while foo.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
fn lots_of_tasks() {
let thread_pool = ThreadPool::new(2);
let number_of_tasks = 1_000_000;
let tasks_completed = Arc::new(AtomicI32::new(0));
for _ in 0..number_of_tasks {
let tasks_completed = tasks_completed.clone();
thread_pool.compute(Task::new(0, move || {
tasks_completed.fetch_add(1, Ordering::SeqCst);
}));
}
let backoff = Backoff::new();
while tasks_completed.load(Ordering::SeqCst)!= number_of_tasks {
backoff.snooze();
}
}
#[test]
fn context() {
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let result = Arc::new(AtomicI32::new(0));
let result_clone = result.clone();
context.compute(move || {
result_clone.fetch_add(42, Ordering::SeqCst);
});
let backoff = Backoff::new();
while result.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
fn scoped() {
const NUMBER_OF_TASKS: usize = 42;
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let result = AtomicUsize::new(0);
context.scope(|scope| {
for _ in 0..NUMBER_OF_TASKS {
scope.compute(|| {
result.fetch_add(1, Ordering::SeqCst);
});
}
});
assert_eq!(result.load(Ordering::SeqCst), NUMBER_OF_TASKS);
}
#[test]
fn scoped_vec() {
const NUMBER_OF_TASKS: usize = 42;
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let mut result = vec![0; NUMBER_OF_TASKS];
context.scope(|scope| {
for (chunk, i) in result.chunks_exact_mut(1).zip(0..NUMBER_OF_TASKS) {
scope.compute(move || chunk[0] = i);
}
});
assert_eq!((0..NUMBER_OF_TASKS).collect::<Vec<_>>(), result);
}
#[test]
fn compute_results() {
const NUMBER_OF_TASKS: usize = 42;
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let mut futures = Vec::with_capacity(NUMBER_OF_TASKS);
context.scope(|scope| {
for i in 0..NUMBER_OF_TASKS {
futures.push(scope.compute_result(move || i));
}
});
let result = futures::executor::block_on(future::join_all(futures));
assert_eq!(result, (0..NUMBER_OF_TASKS).collect::<Vec<_>>());
}
#[test]
fn parking() {
let thread_pool = ThreadPool::new(1);
let context = thread_pool.create_context();
// wait for the thread to be parked
let backoff = Backoff::new();
while thread_pool.parked_threads.len() == 0 {
backoff.snooze();
}
let mut unparked = false;
context.scope(|scope| scope.compute(|| unparked = true));
assert!(unparked)
}
}
| degree_of_parallelism | identifier_name |
thread_pool.rs | use std::marker::PhantomData;
use std::sync::Arc;
use std::thread::JoinHandle;
use std::{iter, mem};
use crossbeam::atomic::AtomicCell;
use crossbeam::deque::{Injector, Steal, Stealer, Worker};
use crossbeam::sync::{Parker, Unparker, WaitGroup};
use futures::task::{Context, Poll, Waker};
use futures::Future;
use std::pin::Pin;
/// A chunk of work with some metadata
struct Task {
_group_id: TaskGroupId,
task_fn: Box<dyn TaskFn>,
}
type TaskGroupId = usize;
pub trait TaskFn: FnOnce() + Send {}
pub trait StaticTaskFn: TaskFn +'static {}
impl<T> TaskFn for T where T: FnOnce() + Send {}
impl<T> StaticTaskFn for T where T: TaskFn +'static {}
impl Task {
/// Create a new task to be executed at some point
fn new<F>(group_id: TaskGroupId, f: F) -> Self
where
F: StaticTaskFn,
{
Self {
_group_id: group_id,
task_fn: Box::new(f),
}
}
/// Executes the task
/// TODO: use `FnTraits` once stable
fn call_once(self) {
(self.task_fn)()
}
}
/// A worker thread pool for compute-heavy tasks
///
/// TODO: increase threads to a certain maximum number if current threads don't produce results fast enough?
/// TODO: schedule tasks based on group context?
///
#[derive(Debug)]
pub struct ThreadPool {
target_thread_count: usize,
global_queue: Arc<Injector<Task>>,
stealers: Vec<Stealer<Task>>,
threads: Vec<JoinHandle<()>>,
next_group_id: AtomicCell<usize>,
parked_threads: Arc<Injector<Unparker>>,
}
impl ThreadPool {
/// Creates a new thread pool with `number_of_threads` threads.
///
/// # Panics
/// Panics if `number_of_threads` is 0.
///
pub fn new(number_of_threads: usize) -> Self {
assert!(
number_of_threads > 0,
"There must be at least one thread for the thread pool"
);
let worker_deques: Vec<Worker<Task>> =
(0..number_of_threads).map(|_| Worker::new_fifo()).collect();
let mut thread_pool = Self {
target_thread_count: number_of_threads,
global_queue: Arc::new(Injector::new()),
stealers: worker_deques.iter().map(Worker::stealer).collect(),
threads: Vec::with_capacity(number_of_threads),
next_group_id: AtomicCell::new(0),
parked_threads: Arc::new(Injector::new()),
};
for worker_deque in worker_deques {
let global_queue = thread_pool.global_queue.clone();
let stealers = thread_pool.stealers.clone();
let parked_threads = thread_pool.parked_threads.clone();
thread_pool.threads.push(std::thread::spawn(move || {
Self::await_work(&worker_deque, &global_queue, &stealers, &parked_threads);
}))
}
thread_pool
}
fn await_work(
local: &Worker<Task>,
global: &Injector<Task>,
stealers: &[Stealer<Task>],
parked_threads: &Injector<Unparker>,
) {
let parker = Parker::new();
let unparker = parker.unparker();
loop {
// Pop a task from the local queue, if not empty.
let task = local.pop().or_else(|| {
// Otherwise, we need to look for a task elsewhere.
iter::repeat_with(|| {
// Try stealing a batch of tasks from the global queue.
global
.steal_batch_and_pop(local)
// Or try stealing a task from one of the other threads.
.or_else(|| stealers.iter().map(Stealer::steal).collect())
})
// Loop while no task was stolen and any steal operation needs to be retried.
.find(|s|!s.is_retry())
// Extract the stolen task, if there is one.
.and_then(Steal::success)
});
if let Some(task) = task | else {
parked_threads.push(unparker.clone());
parker.park();
}
}
}
pub fn create_context(&self) -> ThreadPoolContext {
ThreadPoolContext::new(self, self.next_group_id.fetch_add(1))
}
fn compute(&self, task: Task) {
self.global_queue.push(task);
// un-park a thread since there is new work
if let Steal::Success(unparker) = self.parked_threads.steal() {
unparker.unpark();
}
}
}
impl Default for ThreadPool {
fn default() -> Self {
Self::new(num_cpus::get())
}
}
/// A computation context for a group that spawns tasks in a `ThreadPool`
#[derive(Copy, Clone, Debug)]
pub struct ThreadPoolContext<'pool> {
thread_pool: &'pool ThreadPool,
task_group_id: TaskGroupId,
}
impl<'pool> ThreadPoolContext<'pool> {
/// Create a new `ThreadPoolContext`
fn new(thread_pool: &'pool ThreadPool, task_group_id: TaskGroupId) -> Self {
Self {
thread_pool,
task_group_id,
}
}
/// What is the degree of parallelism that the `ThreadPool` aims for?
/// This is helpful to determine how to split the work into tasks.
pub fn degree_of_parallelism(&self) -> usize {
self.thread_pool.target_thread_count
}
/// Compute a task in the `ThreadPool`
pub fn compute<F>(&self, task: F)
where
F: StaticTaskFn,
{
self.thread_pool
.compute(Task::new(self.task_group_id, task));
}
/// Execute a bunch of tasks in a scope that blocks until all tasks are finished.
/// Provides a lifetime for that scope.
/// TODO: provide an async version so that async workflows can do something in the meantime?
/// TODO: handle panics: if a thread panics, this function will block forever
pub fn scope<'scope, S>(&'pool self, scope_fn: S)
where
S: FnOnce(&Scope<'pool,'scope>) +'scope,
{
let scope = Scope::<'pool,'scope> {
thread_pool_context: &self,
wait_group: WaitGroup::new(),
_scope_marker: PhantomData,
};
scope_fn(&scope);
scope.wait_group.wait();
}
}
/// A scope in which you can execute tasks and it blocks until all tasks are finished
#[derive(Debug)]
pub struct Scope<'pool,'scope> {
thread_pool_context: &'pool ThreadPoolContext<'pool>,
wait_group: WaitGroup,
// needs to be invariant to `'scope`, cf. https://github.com/crossbeam-rs/crossbeam/pull/226/files#r232721183
_scope_marker: PhantomData<&'scope mut &'scope ()>,
}
impl<'pool,'scope> Scope<'pool,'scope> {
/// Compute a task in the `ThreadPool`
pub fn compute<F>(&self, task: F)
where
F: TaskFn +'scope,
{
let wait_group = self.wait_group.clone();
// Allocate the `task` on the heap and erase the `'scope` bound.
let task: Box<dyn TaskFn +'scope> = Box::new(task);
let task: Box<dyn StaticTaskFn> = unsafe { mem::transmute(task) };
self.thread_pool_context.compute(move || {
task();
// decrement `WaitGroup` counter
drop(wait_group);
});
}
/// Compute a task in the `ThreadPool` and return a `Future` of a result
pub fn compute_result<F, R>(&self, task: F) -> TaskResult<R>
where
F: FnOnce() -> R + Send +'scope,
R: Clone + Send +'static,
{
let future = TaskResult::default();
let future_ref = future.clone();
self.compute(move || {
future_ref.set(task());
});
future
}
}
/// A future that provides the task result
pub struct TaskResult<R> {
option: Arc<AtomicCell<TaskResultOption<R>>>,
}
// we can't derive `Clone` since it requires `R` to be `Clone` as well
impl<R> Clone for TaskResult<R> {
fn clone(&self) -> Self {
Self {
option: self.option.clone(),
}
}
}
/// The state of the `TaskResult` future
#[derive(Debug)]
enum TaskResultOption<R> {
None,
Result(R),
Waiting(Waker),
}
impl<R> Default for TaskResultOption<R> {
fn default() -> Self {
TaskResultOption::None
}
}
impl<R> TaskResult<R> {
fn set(&self, result: R) {
match self.option.swap(TaskResultOption::Result(result)) {
TaskResultOption::None => {} // do nothing
TaskResultOption::Result(_) => {
unreachable!("There must not be a second computation of the result")
}
TaskResultOption::Waiting(waker) => waker.wake(),
};
}
}
impl<R> Default for TaskResult<R> {
fn default() -> Self {
Self {
option: Default::default(),
}
}
}
impl<R> Future for TaskResult<R> {
type Output = R;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self
.option
.swap(TaskResultOption::Waiting(cx.waker().clone()))
{
TaskResultOption::None | TaskResultOption::Waiting(_) => Poll::Pending,
TaskResultOption::Result(r) => Poll::Ready(r),
}
}
}
#[cfg(test)]
mod tests {
use std::sync::atomic::{AtomicI32, AtomicUsize, Ordering};
use futures::future;
use super::*;
use crossbeam::utils::Backoff;
#[test]
#[allow(clippy::blacklisted_name)]
fn one_task() {
let thread_pool = ThreadPool::new(1);
let foo = Arc::new(AtomicI32::new(0));
let bar = foo.clone();
thread_pool.compute(Task::new(0, move || {
bar.fetch_add(42, Ordering::SeqCst);
}));
let backoff = Backoff::new();
while foo.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
#[allow(clippy::blacklisted_name)]
fn two_task_one_thread() {
let thread_pool = ThreadPool::new(2);
let foo = Arc::new(AtomicI32::new(0));
let bar = foo.clone();
thread_pool.compute(Task::new(0, move || {
bar.fetch_add(20, Ordering::SeqCst);
}));
let baz = foo.clone();
thread_pool.compute(Task::new(0, move || {
baz.fetch_add(22, Ordering::SeqCst);
}));
let backoff = Backoff::new();
while foo.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
#[allow(clippy::blacklisted_name)]
fn two_task_two_threads() {
let thread_pool = ThreadPool::new(2);
let foo = Arc::new(AtomicI32::new(0));
let bar = foo.clone();
thread_pool.compute(Task::new(0, move || {
bar.fetch_add(20, Ordering::SeqCst);
}));
let baz = foo.clone();
thread_pool.compute(Task::new(0, move || {
baz.fetch_add(22, Ordering::SeqCst);
}));
let backoff = Backoff::new();
while foo.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
fn lots_of_tasks() {
let thread_pool = ThreadPool::new(2);
let number_of_tasks = 1_000_000;
let tasks_completed = Arc::new(AtomicI32::new(0));
for _ in 0..number_of_tasks {
let tasks_completed = tasks_completed.clone();
thread_pool.compute(Task::new(0, move || {
tasks_completed.fetch_add(1, Ordering::SeqCst);
}));
}
let backoff = Backoff::new();
while tasks_completed.load(Ordering::SeqCst)!= number_of_tasks {
backoff.snooze();
}
}
#[test]
fn context() {
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let result = Arc::new(AtomicI32::new(0));
let result_clone = result.clone();
context.compute(move || {
result_clone.fetch_add(42, Ordering::SeqCst);
});
let backoff = Backoff::new();
while result.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
fn scoped() {
const NUMBER_OF_TASKS: usize = 42;
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let result = AtomicUsize::new(0);
context.scope(|scope| {
for _ in 0..NUMBER_OF_TASKS {
scope.compute(|| {
result.fetch_add(1, Ordering::SeqCst);
});
}
});
assert_eq!(result.load(Ordering::SeqCst), NUMBER_OF_TASKS);
}
#[test]
fn scoped_vec() {
const NUMBER_OF_TASKS: usize = 42;
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let mut result = vec![0; NUMBER_OF_TASKS];
context.scope(|scope| {
for (chunk, i) in result.chunks_exact_mut(1).zip(0..NUMBER_OF_TASKS) {
scope.compute(move || chunk[0] = i);
}
});
assert_eq!((0..NUMBER_OF_TASKS).collect::<Vec<_>>(), result);
}
#[test]
fn compute_results() {
const NUMBER_OF_TASKS: usize = 42;
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let mut futures = Vec::with_capacity(NUMBER_OF_TASKS);
context.scope(|scope| {
for i in 0..NUMBER_OF_TASKS {
futures.push(scope.compute_result(move || i));
}
});
let result = futures::executor::block_on(future::join_all(futures));
assert_eq!(result, (0..NUMBER_OF_TASKS).collect::<Vec<_>>());
}
#[test]
fn parking() {
let thread_pool = ThreadPool::new(1);
let context = thread_pool.create_context();
// wait for the thread to be parked
let backoff = Backoff::new();
while thread_pool.parked_threads.len() == 0 {
backoff.snooze();
}
let mut unparked = false;
context.scope(|scope| scope.compute(|| unparked = true));
assert!(unparked)
}
}
| {
// TODO: recover panics
task.call_once();
} | conditional_block |
thread_pool.rs | use std::marker::PhantomData;
use std::sync::Arc;
use std::thread::JoinHandle;
use std::{iter, mem};
use crossbeam::atomic::AtomicCell;
use crossbeam::deque::{Injector, Steal, Stealer, Worker};
use crossbeam::sync::{Parker, Unparker, WaitGroup};
use futures::task::{Context, Poll, Waker};
use futures::Future;
use std::pin::Pin;
/// A chunk of work with some metadata
struct Task {
_group_id: TaskGroupId,
task_fn: Box<dyn TaskFn>,
}
type TaskGroupId = usize;
pub trait TaskFn: FnOnce() + Send {}
pub trait StaticTaskFn: TaskFn +'static {}
impl<T> TaskFn for T where T: FnOnce() + Send {}
impl<T> StaticTaskFn for T where T: TaskFn +'static {}
impl Task {
/// Create a new task to be executed at some point
fn new<F>(group_id: TaskGroupId, f: F) -> Self
where
F: StaticTaskFn,
{
Self {
_group_id: group_id,
task_fn: Box::new(f),
}
}
/// Executes the task
/// TODO: use `FnTraits` once stable
fn call_once(self) {
(self.task_fn)()
}
}
/// A worker thread pool for compute-heavy tasks
///
/// TODO: increase threads to a certain maximum number if current threads don't produce results fast enough?
/// TODO: schedule tasks based on group context?
///
#[derive(Debug)]
pub struct ThreadPool {
target_thread_count: usize,
global_queue: Arc<Injector<Task>>,
stealers: Vec<Stealer<Task>>,
threads: Vec<JoinHandle<()>>,
next_group_id: AtomicCell<usize>,
parked_threads: Arc<Injector<Unparker>>,
}
impl ThreadPool {
/// Creates a new thread pool with `number_of_threads` threads.
///
/// # Panics
/// Panics if `number_of_threads` is 0.
///
pub fn new(number_of_threads: usize) -> Self {
assert!(
number_of_threads > 0,
"There must be at least one thread for the thread pool"
);
let worker_deques: Vec<Worker<Task>> =
(0..number_of_threads).map(|_| Worker::new_fifo()).collect();
let mut thread_pool = Self {
target_thread_count: number_of_threads,
global_queue: Arc::new(Injector::new()),
stealers: worker_deques.iter().map(Worker::stealer).collect(),
threads: Vec::with_capacity(number_of_threads),
next_group_id: AtomicCell::new(0),
parked_threads: Arc::new(Injector::new()),
};
for worker_deque in worker_deques {
let global_queue = thread_pool.global_queue.clone();
let stealers = thread_pool.stealers.clone();
let parked_threads = thread_pool.parked_threads.clone();
thread_pool.threads.push(std::thread::spawn(move || {
Self::await_work(&worker_deque, &global_queue, &stealers, &parked_threads);
}))
}
thread_pool
}
fn await_work(
local: &Worker<Task>,
global: &Injector<Task>,
stealers: &[Stealer<Task>],
parked_threads: &Injector<Unparker>,
) {
let parker = Parker::new();
let unparker = parker.unparker();
loop {
// Pop a task from the local queue, if not empty.
let task = local.pop().or_else(|| {
// Otherwise, we need to look for a task elsewhere.
iter::repeat_with(|| {
// Try stealing a batch of tasks from the global queue.
global
.steal_batch_and_pop(local)
// Or try stealing a task from one of the other threads.
.or_else(|| stealers.iter().map(Stealer::steal).collect())
})
// Loop while no task was stolen and any steal operation needs to be retried.
.find(|s|!s.is_retry())
// Extract the stolen task, if there is one.
.and_then(Steal::success)
});
if let Some(task) = task {
// TODO: recover panics
task.call_once();
} else {
parked_threads.push(unparker.clone());
parker.park();
}
}
}
pub fn create_context(&self) -> ThreadPoolContext {
ThreadPoolContext::new(self, self.next_group_id.fetch_add(1))
}
fn compute(&self, task: Task) {
self.global_queue.push(task);
// un-park a thread since there is new work
if let Steal::Success(unparker) = self.parked_threads.steal() {
unparker.unpark();
}
}
}
impl Default for ThreadPool {
fn default() -> Self {
Self::new(num_cpus::get())
}
}
/// A computation context for a group that spawns tasks in a `ThreadPool`
#[derive(Copy, Clone, Debug)]
pub struct ThreadPoolContext<'pool> {
thread_pool: &'pool ThreadPool,
task_group_id: TaskGroupId,
}
impl<'pool> ThreadPoolContext<'pool> {
/// Create a new `ThreadPoolContext`
fn new(thread_pool: &'pool ThreadPool, task_group_id: TaskGroupId) -> Self {
Self {
thread_pool,
task_group_id,
}
}
/// What is the degree of parallelism that the `ThreadPool` aims for?
/// This is helpful to determine how to split the work into tasks.
pub fn degree_of_parallelism(&self) -> usize {
self.thread_pool.target_thread_count
}
/// Compute a task in the `ThreadPool`
pub fn compute<F>(&self, task: F)
where
F: StaticTaskFn,
{
self.thread_pool
.compute(Task::new(self.task_group_id, task));
}
/// Execute a bunch of tasks in a scope that blocks until all tasks are finished.
/// Provides a lifetime for that scope.
/// TODO: provide an async version so that async workflows can do something in the meantime?
/// TODO: handle panics: if a thread panics, this function will block forever
pub fn scope<'scope, S>(&'pool self, scope_fn: S)
where
S: FnOnce(&Scope<'pool,'scope>) +'scope,
{
let scope = Scope::<'pool,'scope> {
thread_pool_context: &self,
wait_group: WaitGroup::new(),
_scope_marker: PhantomData,
};
scope_fn(&scope);
scope.wait_group.wait();
}
}
/// A scope in which you can execute tasks and it blocks until all tasks are finished
#[derive(Debug)]
pub struct Scope<'pool,'scope> {
thread_pool_context: &'pool ThreadPoolContext<'pool>,
wait_group: WaitGroup,
// needs to be invariant to `'scope`, cf. https://github.com/crossbeam-rs/crossbeam/pull/226/files#r232721183
_scope_marker: PhantomData<&'scope mut &'scope ()>,
}
impl<'pool,'scope> Scope<'pool,'scope> {
/// Compute a task in the `ThreadPool`
pub fn compute<F>(&self, task: F)
where
F: TaskFn +'scope,
{
let wait_group = self.wait_group.clone();
// Allocate the `task` on the heap and erase the `'scope` bound.
let task: Box<dyn TaskFn +'scope> = Box::new(task);
let task: Box<dyn StaticTaskFn> = unsafe { mem::transmute(task) };
self.thread_pool_context.compute(move || {
task();
// decrement `WaitGroup` counter
drop(wait_group);
});
}
/// Compute a task in the `ThreadPool` and return a `Future` of a result
pub fn compute_result<F, R>(&self, task: F) -> TaskResult<R>
where
F: FnOnce() -> R + Send +'scope,
R: Clone + Send +'static,
{
let future = TaskResult::default();
let future_ref = future.clone();
self.compute(move || {
future_ref.set(task());
});
future
}
}
/// A future that provides the task result
pub struct TaskResult<R> {
option: Arc<AtomicCell<TaskResultOption<R>>>,
}
// we can't derive `Clone` since it requires `R` to be `Clone` as well
impl<R> Clone for TaskResult<R> {
fn clone(&self) -> Self {
Self {
option: self.option.clone(),
}
}
}
/// The state of the `TaskResult` future
#[derive(Debug)]
enum TaskResultOption<R> {
None,
Result(R),
Waiting(Waker),
}
impl<R> Default for TaskResultOption<R> {
fn default() -> Self {
TaskResultOption::None
}
}
impl<R> TaskResult<R> {
fn set(&self, result: R) {
match self.option.swap(TaskResultOption::Result(result)) {
TaskResultOption::None => {} // do nothing
TaskResultOption::Result(_) => {
unreachable!("There must not be a second computation of the result")
}
TaskResultOption::Waiting(waker) => waker.wake(),
};
}
}
impl<R> Default for TaskResult<R> {
fn default() -> Self {
Self {
option: Default::default(),
}
}
}
impl<R> Future for TaskResult<R> {
type Output = R;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self
.option
.swap(TaskResultOption::Waiting(cx.waker().clone()))
{
TaskResultOption::None | TaskResultOption::Waiting(_) => Poll::Pending,
TaskResultOption::Result(r) => Poll::Ready(r),
}
}
}
#[cfg(test)]
mod tests {
use std::sync::atomic::{AtomicI32, AtomicUsize, Ordering};
use futures::future;
use super::*;
use crossbeam::utils::Backoff;
#[test]
#[allow(clippy::blacklisted_name)]
fn one_task() {
let thread_pool = ThreadPool::new(1);
let foo = Arc::new(AtomicI32::new(0));
let bar = foo.clone();
thread_pool.compute(Task::new(0, move || {
bar.fetch_add(42, Ordering::SeqCst);
}));
let backoff = Backoff::new();
while foo.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
#[allow(clippy::blacklisted_name)]
fn two_task_one_thread() {
let thread_pool = ThreadPool::new(2);
let foo = Arc::new(AtomicI32::new(0));
let bar = foo.clone();
thread_pool.compute(Task::new(0, move || {
bar.fetch_add(20, Ordering::SeqCst);
}));
let baz = foo.clone();
thread_pool.compute(Task::new(0, move || {
baz.fetch_add(22, Ordering::SeqCst);
}));
let backoff = Backoff::new();
while foo.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
#[allow(clippy::blacklisted_name)]
fn two_task_two_threads() {
let thread_pool = ThreadPool::new(2);
let foo = Arc::new(AtomicI32::new(0));
let bar = foo.clone();
thread_pool.compute(Task::new(0, move || {
bar.fetch_add(20, Ordering::SeqCst);
}));
let baz = foo.clone();
thread_pool.compute(Task::new(0, move || {
baz.fetch_add(22, Ordering::SeqCst);
}));
let backoff = Backoff::new();
while foo.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
fn lots_of_tasks() {
let thread_pool = ThreadPool::new(2);
let number_of_tasks = 1_000_000;
let tasks_completed = Arc::new(AtomicI32::new(0));
for _ in 0..number_of_tasks {
let tasks_completed = tasks_completed.clone();
thread_pool.compute(Task::new(0, move || {
tasks_completed.fetch_add(1, Ordering::SeqCst);
}));
}
let backoff = Backoff::new();
while tasks_completed.load(Ordering::SeqCst)!= number_of_tasks {
backoff.snooze();
}
}
#[test]
fn context() {
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let result = Arc::new(AtomicI32::new(0));
let result_clone = result.clone();
context.compute(move || {
result_clone.fetch_add(42, Ordering::SeqCst);
});
let backoff = Backoff::new();
while result.load(Ordering::SeqCst)!= 42 {
backoff.snooze();
}
}
#[test]
fn scoped() {
const NUMBER_OF_TASKS: usize = 42;
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let result = AtomicUsize::new(0);
context.scope(|scope| {
for _ in 0..NUMBER_OF_TASKS {
scope.compute(|| {
result.fetch_add(1, Ordering::SeqCst);
}); |
#[test]
fn scoped_vec() {
const NUMBER_OF_TASKS: usize = 42;
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let mut result = vec![0; NUMBER_OF_TASKS];
context.scope(|scope| {
for (chunk, i) in result.chunks_exact_mut(1).zip(0..NUMBER_OF_TASKS) {
scope.compute(move || chunk[0] = i);
}
});
assert_eq!((0..NUMBER_OF_TASKS).collect::<Vec<_>>(), result);
}
#[test]
fn compute_results() {
const NUMBER_OF_TASKS: usize = 42;
let thread_pool = ThreadPool::new(2);
let context = thread_pool.create_context();
let mut futures = Vec::with_capacity(NUMBER_OF_TASKS);
context.scope(|scope| {
for i in 0..NUMBER_OF_TASKS {
futures.push(scope.compute_result(move || i));
}
});
let result = futures::executor::block_on(future::join_all(futures));
assert_eq!(result, (0..NUMBER_OF_TASKS).collect::<Vec<_>>());
}
#[test]
fn parking() {
let thread_pool = ThreadPool::new(1);
let context = thread_pool.create_context();
// wait for the thread to be parked
let backoff = Backoff::new();
while thread_pool.parked_threads.len() == 0 {
backoff.snooze();
}
let mut unparked = false;
context.scope(|scope| scope.compute(|| unparked = true));
assert!(unparked)
}
} | }
});
assert_eq!(result.load(Ordering::SeqCst), NUMBER_OF_TASKS);
} | random_line_split |
main.rs | extern crate gl;
extern crate imgui;
extern crate imgui_opengl_renderer;
extern crate imgui_sdl2;
extern crate sdl2;
/*
@TODO:
- Show line numbers!
- Use traits to Send, Parse and Draw
- Create a checkbox to enable debugging the parser, queries, etc;
- Write a logger to use a imgui window
*/
use imgui::im_str;
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
use std::collections::HashSet;
use std::io::{BufRead, BufReader, Error, Write};
use std::process::{Child, ChildStdin, ChildStdout, Command, Stdio};
use std::sync::mpsc::SendError;
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError};
use std::sync::{atomic::AtomicBool, atomic::Ordering, Arc, Mutex};
use std::thread;
use std::thread::sleep;
use std::time::Instant;
use std::{
io::{self, Read},
process,
time::Duration,
};
mod debugger;
mod graphics;
mod parser;
mod ui;
use graphics::build_text;
use std::cmp::max;
use ui::is_window_docked;
fn send_commands(sender: &Sender<String>, commands: &[&str], time: u64) {
for command in commands {
send_command(command, &sender).unwrap();
sleep(Duration::from_millis(time));
}
}
pub fn send_command(command: &str, sender: &Sender<String>) -> Result<(), SendError<String>> {
sender.send(String::from(command))?;
Ok(())
}
pub fn is_split(id: u32) -> bool {
unsafe {
let node = imgui::sys::igDockBuilderGetNode(id);
if std::ptr::null() == node {
false
} else {
imgui::sys::ImGuiDockNode_IsSplitNode(node)
}
}
}
const STEP_COMMANDS: [&str; 5] = [
"step\n",
"-data-list-register-values x 0 1 2 3 4 5 6 7 8 9 10\n",
"-stack-list-locals 1\n",
r#" -data-disassemble -s $pc -e "$pc + 20" -- 0
"#,
r#" -data-read-memory &arr x 1 1 128
"#,
];
const STARTUP_COMMANDS: [&str; 3] = [
"start\n",
"target record-full\n",
"-data-list-register-names\n",
];
fn start_graphics<F>(gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, f: F, sender: &Sender<String>)
where
F: Fn(),
| let _gl_context = window
.gl_create_context()
.expect("Couldn't create GL context");
gl::load_with(|s| video_subsystem.gl_get_proc_address(s) as _);
let mut imgui = imgui::Context::create();
imgui.io_mut().config_flags |= imgui::ConfigFlags::DOCKING_ENABLE;
let mut path = std::path::PathBuf::new();
path.push("imgui");
path.set_extension("ini");
//imgui.set_ini_filename(Some(path));
imgui.set_ini_filename(None);
let mut imgui_sdl2 = imgui_sdl2::ImguiSdl2::new(&mut imgui, &window);
let renderer = imgui_opengl_renderer::Renderer::new(&mut imgui, |s| {
video_subsystem.gl_get_proc_address(s) as _
});
let mut last_frame = Instant::now();
let mut event_pump = sdl_context.event_pump().unwrap();
let mut prev_keys = HashSet::new();
let mut file_txt = String::from("no file loaded");
let mut input_buf = imgui::ImString::new("type something here");
'running: loop {
for event in event_pump.poll_iter() {
imgui_sdl2.handle_event(&mut imgui, &event);
if imgui_sdl2.ignore_event(&event) {
continue;
}
match event {
Event::Quit {.. }
| Event::KeyDown {
keycode: Some(Keycode::Escape),
..
} => break 'running,
_ => {}
}
}
let keys = event_pump
.keyboard_state()
.pressed_scancodes()
.filter_map(Keycode::from_scancode)
.collect();
// Get the difference between the new and old sets.
let new_keys = &keys - &prev_keys;
// Call step commands
if new_keys.contains(&Keycode::Right) {
send_commands(sender, &STEP_COMMANDS, 50);
}
if new_keys.contains(&Keycode::Left) {
send_command("reverse-step\n", sender).unwrap();
}
prev_keys = keys;
imgui_sdl2.prepare_frame(imgui.io_mut(), &window, &event_pump.mouse_state());
let now = Instant::now();
let delta = now - last_frame;
let delta_s = delta.as_secs() as f32 + delta.subsec_nanos() as f32 / 1_000_000_000.0;
last_frame = now;
imgui.io_mut().delta_time = delta_s;
let ui = imgui.frame();
let mut left_dock: u32 = 0;
let mut left_top: u32 = 0;
let mut left_down: u32 = 0;
let mut right_dock: u32 = 0;
let mut right_top: u32 = 0;
let mut right_down: u32 = 0;
let mut main_dock: u32 = 0;
unsafe {
main_dock = imgui::sys::igDockSpaceOverViewport(
imgui::sys::igGetMainViewport(),
0,
::std::ptr::null::<imgui::sys::ImGuiWindowClass>(),
);
}
if!is_split(main_dock) {
unsafe {
imgui::sys::igDockBuilderSplitNode(
main_dock,
imgui::Direction::Right as i32,
0.3f32,
&mut right_dock,
&mut left_dock,
);
}
}
if right_dock!= 0 &&!is_split(right_dock) {
unsafe {
imgui::sys::igDockBuilderSplitNode(
right_dock,
imgui::Direction::Up as i32,
0.5f32,
&mut right_top,
&mut right_down,
);
}
}
if left_dock!= 0 &&!is_split(left_dock) {
unsafe {
imgui::sys::igDockBuilderSplitNode(
left_dock,
imgui::Direction::Up as i32,
0.65f32,
&mut left_top,
&mut left_down,
);
}
}
let mut gdb = gdb_mutex.lock().unwrap();
if let Some(str) = gdb.get_file() {
file_txt = str;
}
ui::docked_window(&ui, &mut gdb, "Code", left_top, |ui, gdb| {
let mut x = 1.0f32;
for (i, l) in file_txt.lines().enumerate() {
if (i + 1) == gdb.line as usize {
ui.text_colored([x, 0f32, 0f32, 1.0f32], &l);
x -= 0.5f32;
} else {
ui.text_colored([x, x, x, 1.0f32], &l);
}
}
});
ui::docked_window(&ui, &mut gdb, "Vars", right_down, |ui, gdb| {
ui.columns(2, im_str!("A"), true);
for (k, v) in &gdb.variables {
ui.text(k);
ui.next_column();
ui.text(v);
ui.next_column();
}
});
ui::docked_window(&ui, &mut gdb, "Regs", right_top, |ui, gdb| {
ui.columns(2, im_str!("A"), true);
for (k, v) in &gdb.registers_ordered() {
ui.text(k);
ui.next_column();
ui.text(v);
ui.next_column();
}
});
ui::docked_window(&ui, &mut gdb, "Asm", left_down, |ui, gdb| {
{
imgui::TabBar::new(im_str!("test"))
.reorderable(true)
.build(&ui, || {
for (k, v) in &gdb.asm {
let s: &imgui::ImStr;
let c_str: std::ffi::CString;
unsafe {
c_str = std::ffi::CString::new(k.as_str()).unwrap();
s = imgui::ImStr::from_utf8_with_nul_unchecked(
c_str.as_bytes_with_nul(),
);
}
let pc_addr = gdb.pc_addr.get(k).unwrap();
imgui::TabItem::new(s).build(&ui, || {
ui.text_colored(
[0.8f32, 0.8f32, 0.2f32, 1f32],
format!("{:#x}", pc_addr),
);
ui.separator();
ui.columns(2, im_str!("asm_col"), true);
for (addr, line) in v {
if line.len() > 0 {
if addr == pc_addr {
ui.text_colored(
[1f32, 0f32, 0f32, 1f32],
format!("{:#x}", addr),
);
} else {
ui.text_colored(
[1f32, 1f32, 1f32, 1f32],
format!("{:#x}", addr),
);
}
ui.next_column();
ui.text_colored([1f32, 1f32, 1f32, 1f32], line);
ui.next_column();
}
}
})
}
})
}
});
ui::docked_window(&ui, &gdb, "Console", left_down, |ui, gdb| {
ui.text_colored([1f32, 1f32, 1f32, 1f32], &gdb.console_output);
if imgui::InputText::new(ui, im_str!(""), &mut input_buf)
.enter_returns_true(true)
.build()
{
let mut cmd = String::from(input_buf.to_str());
cmd.push('\n');
send_command(&cmd, &sender).unwrap();
input_buf.clear();
}
});
ui::docked_window(&ui, &gdb, "memory", right_down, |ui, gdb| {
let (addr, mem) = &gdb.memory;
let mut addr = *addr;
let mut s = format!("{:#08x} ", addr);
let mut col = 0.2f32;
for (i, val) in mem.iter().enumerate() {
if *val!= 0u64 {
col = 1f32;
}
s.push_str(&format!("{:02x}", val));
s.push(' ');
addr += 1;
if (i + 1) % 8 == 0 {
ui.text_colored([col, col, col, 1f32], &s);
// cleaning the string for the next line
s = format!("{:#08x} ", addr);
col = 0.2f32;
}
}
//@Error maybe some values won't be rendered here
});
//ui.show_demo_window(&mut true);
unsafe {
gl::ClearColor(0.2, 0.2, 0.2, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT);
}
imgui_sdl2.prepare_render(&ui, &window);
renderer.render(ui);
window.gl_swap_window();
}
}
fn start_process_thread(
child: &mut Child,
receiver: Receiver<String>,
gdb_mutex: Arc<Mutex<debugger::DebuggerState>>,
) {
let mut stdin = child.stdin.take().unwrap();
let stdout = child.stdout.take().unwrap();
use crate::debugger::DebuggerState;
// Receiving commands and sending them to GDB's stdin
thread::spawn(move || {
for line in receiver {
stdin.write_all(line.as_bytes()).unwrap();
}
});
// Reading and processing GDB stdout
thread::spawn(move || {
let mut f = BufReader::new(stdout);
loop {
let mut line = String::new();
f.read_line(&mut line).unwrap();
print!("[LINE] {}", line);
let gdb: &mut DebuggerState = &mut *gdb_mutex.lock().unwrap();
let vals = parser::parse(&line, gdb);
println!("[PARSER] {:#?}", &vals);
if let Ok(v) = vals {
// Here we try to limit the scope were we hold the mutex
gdb.update(&v);
}
}
});
}
fn start_process(
receiver: Receiver<String>,
gdb_mutex: Arc<Mutex<debugger::DebuggerState>>,
) -> Child {
let mut child = Command::new("gdb")
.arg("--interpreter=mi3")
.arg("./examples/a.exe")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()
.expect("Failed to start process");
start_process_thread(&mut child, receiver, gdb_mutex);
println!("Started process: {}", child.id());
child
}
fn main() -> Result<(), Error> {
let (tx, rx) = channel();
let gdb_mutex = Arc::new(Mutex::new(debugger::DebuggerState::new()));
let mut child = start_process(rx, Arc::clone(&gdb_mutex));
send_commands(&tx, &STARTUP_COMMANDS, 100);
start_graphics(Arc::clone(&gdb_mutex), move || {}, &tx);
child.kill()?;
Ok(())
}
| {
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
let ttf_context = sdl2::ttf::init().unwrap();
{
let gl_attr = video_subsystem.gl_attr();
gl_attr.set_context_profile(sdl2::video::GLProfile::Core);
gl_attr.set_context_version(3, 0);
}
let window = video_subsystem
.window("rust-sdl2 demo", 1000, 950)
.position_centered()
.resizable()
.allow_highdpi()
.opengl()
.build()
.unwrap();
| identifier_body |
main.rs | extern crate gl;
extern crate imgui;
extern crate imgui_opengl_renderer;
extern crate imgui_sdl2;
extern crate sdl2;
/*
@TODO:
- Show line numbers!
- Use traits to Send, Parse and Draw
- Create a checkbox to enable debugging the parser, queries, etc;
- Write a logger to use a imgui window
*/
use imgui::im_str;
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
use std::collections::HashSet;
use std::io::{BufRead, BufReader, Error, Write};
use std::process::{Child, ChildStdin, ChildStdout, Command, Stdio};
use std::sync::mpsc::SendError;
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError};
use std::sync::{atomic::AtomicBool, atomic::Ordering, Arc, Mutex};
use std::thread;
use std::thread::sleep;
use std::time::Instant;
use std::{
io::{self, Read},
process,
time::Duration,
};
mod debugger;
mod graphics;
mod parser;
mod ui;
use graphics::build_text;
use std::cmp::max;
use ui::is_window_docked;
fn send_commands(sender: &Sender<String>, commands: &[&str], time: u64) {
for command in commands {
send_command(command, &sender).unwrap();
sleep(Duration::from_millis(time));
}
}
pub fn send_command(command: &str, sender: &Sender<String>) -> Result<(), SendError<String>> {
sender.send(String::from(command))?;
Ok(())
}
pub fn is_split(id: u32) -> bool {
unsafe {
let node = imgui::sys::igDockBuilderGetNode(id);
if std::ptr::null() == node | else {
imgui::sys::ImGuiDockNode_IsSplitNode(node)
}
}
}
const STEP_COMMANDS: [&str; 5] = [
"step\n",
"-data-list-register-values x 0 1 2 3 4 5 6 7 8 9 10\n",
"-stack-list-locals 1\n",
r#" -data-disassemble -s $pc -e "$pc + 20" -- 0
"#,
r#" -data-read-memory &arr x 1 1 128
"#,
];
const STARTUP_COMMANDS: [&str; 3] = [
"start\n",
"target record-full\n",
"-data-list-register-names\n",
];
fn start_graphics<F>(gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, f: F, sender: &Sender<String>)
where
F: Fn(),
{
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
let ttf_context = sdl2::ttf::init().unwrap();
{
let gl_attr = video_subsystem.gl_attr();
gl_attr.set_context_profile(sdl2::video::GLProfile::Core);
gl_attr.set_context_version(3, 0);
}
let window = video_subsystem
.window("rust-sdl2 demo", 1000, 950)
.position_centered()
.resizable()
.allow_highdpi()
.opengl()
.build()
.unwrap();
let _gl_context = window
.gl_create_context()
.expect("Couldn't create GL context");
gl::load_with(|s| video_subsystem.gl_get_proc_address(s) as _);
let mut imgui = imgui::Context::create();
imgui.io_mut().config_flags |= imgui::ConfigFlags::DOCKING_ENABLE;
let mut path = std::path::PathBuf::new();
path.push("imgui");
path.set_extension("ini");
//imgui.set_ini_filename(Some(path));
imgui.set_ini_filename(None);
let mut imgui_sdl2 = imgui_sdl2::ImguiSdl2::new(&mut imgui, &window);
let renderer = imgui_opengl_renderer::Renderer::new(&mut imgui, |s| {
video_subsystem.gl_get_proc_address(s) as _
});
let mut last_frame = Instant::now();
let mut event_pump = sdl_context.event_pump().unwrap();
let mut prev_keys = HashSet::new();
let mut file_txt = String::from("no file loaded");
let mut input_buf = imgui::ImString::new("type something here");
'running: loop {
for event in event_pump.poll_iter() {
imgui_sdl2.handle_event(&mut imgui, &event);
if imgui_sdl2.ignore_event(&event) {
continue;
}
match event {
Event::Quit {.. }
| Event::KeyDown {
keycode: Some(Keycode::Escape),
..
} => break 'running,
_ => {}
}
}
let keys = event_pump
.keyboard_state()
.pressed_scancodes()
.filter_map(Keycode::from_scancode)
.collect();
// Get the difference between the new and old sets.
let new_keys = &keys - &prev_keys;
// Call step commands
if new_keys.contains(&Keycode::Right) {
send_commands(sender, &STEP_COMMANDS, 50);
}
if new_keys.contains(&Keycode::Left) {
send_command("reverse-step\n", sender).unwrap();
}
prev_keys = keys;
imgui_sdl2.prepare_frame(imgui.io_mut(), &window, &event_pump.mouse_state());
let now = Instant::now();
let delta = now - last_frame;
let delta_s = delta.as_secs() as f32 + delta.subsec_nanos() as f32 / 1_000_000_000.0;
last_frame = now;
imgui.io_mut().delta_time = delta_s;
let ui = imgui.frame();
let mut left_dock: u32 = 0;
let mut left_top: u32 = 0;
let mut left_down: u32 = 0;
let mut right_dock: u32 = 0;
let mut right_top: u32 = 0;
let mut right_down: u32 = 0;
let mut main_dock: u32 = 0;
unsafe {
main_dock = imgui::sys::igDockSpaceOverViewport(
imgui::sys::igGetMainViewport(),
0,
::std::ptr::null::<imgui::sys::ImGuiWindowClass>(),
);
}
if!is_split(main_dock) {
unsafe {
imgui::sys::igDockBuilderSplitNode(
main_dock,
imgui::Direction::Right as i32,
0.3f32,
&mut right_dock,
&mut left_dock,
);
}
}
if right_dock!= 0 &&!is_split(right_dock) {
unsafe {
imgui::sys::igDockBuilderSplitNode(
right_dock,
imgui::Direction::Up as i32,
0.5f32,
&mut right_top,
&mut right_down,
);
}
}
if left_dock!= 0 &&!is_split(left_dock) {
unsafe {
imgui::sys::igDockBuilderSplitNode(
left_dock,
imgui::Direction::Up as i32,
0.65f32,
&mut left_top,
&mut left_down,
);
}
}
let mut gdb = gdb_mutex.lock().unwrap();
if let Some(str) = gdb.get_file() {
file_txt = str;
}
ui::docked_window(&ui, &mut gdb, "Code", left_top, |ui, gdb| {
let mut x = 1.0f32;
for (i, l) in file_txt.lines().enumerate() {
if (i + 1) == gdb.line as usize {
ui.text_colored([x, 0f32, 0f32, 1.0f32], &l);
x -= 0.5f32;
} else {
ui.text_colored([x, x, x, 1.0f32], &l);
}
}
});
ui::docked_window(&ui, &mut gdb, "Vars", right_down, |ui, gdb| {
ui.columns(2, im_str!("A"), true);
for (k, v) in &gdb.variables {
ui.text(k);
ui.next_column();
ui.text(v);
ui.next_column();
}
});
ui::docked_window(&ui, &mut gdb, "Regs", right_top, |ui, gdb| {
ui.columns(2, im_str!("A"), true);
for (k, v) in &gdb.registers_ordered() {
ui.text(k);
ui.next_column();
ui.text(v);
ui.next_column();
}
});
ui::docked_window(&ui, &mut gdb, "Asm", left_down, |ui, gdb| {
{
imgui::TabBar::new(im_str!("test"))
.reorderable(true)
.build(&ui, || {
for (k, v) in &gdb.asm {
let s: &imgui::ImStr;
let c_str: std::ffi::CString;
unsafe {
c_str = std::ffi::CString::new(k.as_str()).unwrap();
s = imgui::ImStr::from_utf8_with_nul_unchecked(
c_str.as_bytes_with_nul(),
);
}
let pc_addr = gdb.pc_addr.get(k).unwrap();
imgui::TabItem::new(s).build(&ui, || {
ui.text_colored(
[0.8f32, 0.8f32, 0.2f32, 1f32],
format!("{:#x}", pc_addr),
);
ui.separator();
ui.columns(2, im_str!("asm_col"), true);
for (addr, line) in v {
if line.len() > 0 {
if addr == pc_addr {
ui.text_colored(
[1f32, 0f32, 0f32, 1f32],
format!("{:#x}", addr),
);
} else {
ui.text_colored(
[1f32, 1f32, 1f32, 1f32],
format!("{:#x}", addr),
);
}
ui.next_column();
ui.text_colored([1f32, 1f32, 1f32, 1f32], line);
ui.next_column();
}
}
})
}
})
}
});
ui::docked_window(&ui, &gdb, "Console", left_down, |ui, gdb| {
ui.text_colored([1f32, 1f32, 1f32, 1f32], &gdb.console_output);
if imgui::InputText::new(ui, im_str!(""), &mut input_buf)
.enter_returns_true(true)
.build()
{
let mut cmd = String::from(input_buf.to_str());
cmd.push('\n');
send_command(&cmd, &sender).unwrap();
input_buf.clear();
}
});
ui::docked_window(&ui, &gdb, "memory", right_down, |ui, gdb| {
let (addr, mem) = &gdb.memory;
let mut addr = *addr;
let mut s = format!("{:#08x} ", addr);
let mut col = 0.2f32;
for (i, val) in mem.iter().enumerate() {
if *val!= 0u64 {
col = 1f32;
}
s.push_str(&format!("{:02x}", val));
s.push(' ');
addr += 1;
if (i + 1) % 8 == 0 {
ui.text_colored([col, col, col, 1f32], &s);
// cleaning the string for the next line
s = format!("{:#08x} ", addr);
col = 0.2f32;
}
}
//@Error maybe some values won't be rendered here
});
//ui.show_demo_window(&mut true);
unsafe {
gl::ClearColor(0.2, 0.2, 0.2, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT);
}
imgui_sdl2.prepare_render(&ui, &window);
renderer.render(ui);
window.gl_swap_window();
}
}
fn start_process_thread(
child: &mut Child,
receiver: Receiver<String>,
gdb_mutex: Arc<Mutex<debugger::DebuggerState>>,
) {
let mut stdin = child.stdin.take().unwrap();
let stdout = child.stdout.take().unwrap();
use crate::debugger::DebuggerState;
// Receiving commands and sending them to GDB's stdin
thread::spawn(move || {
for line in receiver {
stdin.write_all(line.as_bytes()).unwrap();
}
});
// Reading and processing GDB stdout
thread::spawn(move || {
let mut f = BufReader::new(stdout);
loop {
let mut line = String::new();
f.read_line(&mut line).unwrap();
print!("[LINE] {}", line);
let gdb: &mut DebuggerState = &mut *gdb_mutex.lock().unwrap();
let vals = parser::parse(&line, gdb);
println!("[PARSER] {:#?}", &vals);
if let Ok(v) = vals {
// Here we try to limit the scope were we hold the mutex
gdb.update(&v);
}
}
});
}
fn start_process(
receiver: Receiver<String>,
gdb_mutex: Arc<Mutex<debugger::DebuggerState>>,
) -> Child {
let mut child = Command::new("gdb")
.arg("--interpreter=mi3")
.arg("./examples/a.exe")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()
.expect("Failed to start process");
start_process_thread(&mut child, receiver, gdb_mutex);
println!("Started process: {}", child.id());
child
}
fn main() -> Result<(), Error> {
let (tx, rx) = channel();
let gdb_mutex = Arc::new(Mutex::new(debugger::DebuggerState::new()));
let mut child = start_process(rx, Arc::clone(&gdb_mutex));
send_commands(&tx, &STARTUP_COMMANDS, 100);
start_graphics(Arc::clone(&gdb_mutex), move || {}, &tx);
child.kill()?;
Ok(())
}
| {
false
} | conditional_block |
main.rs | extern crate gl;
extern crate imgui;
extern crate imgui_opengl_renderer;
extern crate imgui_sdl2;
extern crate sdl2;
/*
@TODO:
- Show line numbers!
- Use traits to Send, Parse and Draw
- Create a checkbox to enable debugging the parser, queries, etc;
- Write a logger to use a imgui window
*/
use imgui::im_str;
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
use std::collections::HashSet;
use std::io::{BufRead, BufReader, Error, Write};
use std::process::{Child, ChildStdin, ChildStdout, Command, Stdio};
use std::sync::mpsc::SendError;
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError};
use std::sync::{atomic::AtomicBool, atomic::Ordering, Arc, Mutex};
use std::thread;
use std::thread::sleep;
use std::time::Instant;
use std::{
io::{self, Read},
process,
time::Duration,
};
mod debugger;
mod graphics;
mod parser;
mod ui;
use graphics::build_text;
use std::cmp::max;
use ui::is_window_docked;
fn send_commands(sender: &Sender<String>, commands: &[&str], time: u64) {
for command in commands {
send_command(command, &sender).unwrap();
sleep(Duration::from_millis(time));
}
}
pub fn send_command(command: &str, sender: &Sender<String>) -> Result<(), SendError<String>> {
sender.send(String::from(command))?;
Ok(())
}
pub fn is_split(id: u32) -> bool {
unsafe {
let node = imgui::sys::igDockBuilderGetNode(id);
if std::ptr::null() == node {
false
} else {
imgui::sys::ImGuiDockNode_IsSplitNode(node)
}
}
}
const STEP_COMMANDS: [&str; 5] = [
"step\n",
"-data-list-register-values x 0 1 2 3 4 5 6 7 8 9 10\n",
"-stack-list-locals 1\n",
r#" -data-disassemble -s $pc -e "$pc + 20" -- 0
"#,
r#" -data-read-memory &arr x 1 1 128
"#,
];
const STARTUP_COMMANDS: [&str; 3] = [
"start\n",
"target record-full\n",
"-data-list-register-names\n",
];
fn start_graphics<F>(gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, f: F, sender: &Sender<String>)
where
F: Fn(),
{
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
let ttf_context = sdl2::ttf::init().unwrap();
{
let gl_attr = video_subsystem.gl_attr();
gl_attr.set_context_profile(sdl2::video::GLProfile::Core);
gl_attr.set_context_version(3, 0);
}
let window = video_subsystem
.window("rust-sdl2 demo", 1000, 950)
.position_centered()
.resizable()
.allow_highdpi()
.opengl()
.build()
.unwrap();
let _gl_context = window
.gl_create_context()
.expect("Couldn't create GL context");
gl::load_with(|s| video_subsystem.gl_get_proc_address(s) as _);
let mut imgui = imgui::Context::create();
imgui.io_mut().config_flags |= imgui::ConfigFlags::DOCKING_ENABLE;
let mut path = std::path::PathBuf::new();
path.push("imgui");
path.set_extension("ini");
//imgui.set_ini_filename(Some(path));
imgui.set_ini_filename(None);
let mut imgui_sdl2 = imgui_sdl2::ImguiSdl2::new(&mut imgui, &window);
let renderer = imgui_opengl_renderer::Renderer::new(&mut imgui, |s| {
video_subsystem.gl_get_proc_address(s) as _
});
let mut last_frame = Instant::now();
let mut event_pump = sdl_context.event_pump().unwrap();
let mut prev_keys = HashSet::new();
let mut file_txt = String::from("no file loaded");
let mut input_buf = imgui::ImString::new("type something here");
'running: loop {
for event in event_pump.poll_iter() {
imgui_sdl2.handle_event(&mut imgui, &event);
if imgui_sdl2.ignore_event(&event) {
continue;
}
match event {
Event::Quit {.. }
| Event::KeyDown {
keycode: Some(Keycode::Escape),
..
} => break 'running,
_ => {}
}
}
let keys = event_pump
.keyboard_state()
.pressed_scancodes()
.filter_map(Keycode::from_scancode)
.collect();
// Get the difference between the new and old sets.
let new_keys = &keys - &prev_keys;
// Call step commands
if new_keys.contains(&Keycode::Right) {
send_commands(sender, &STEP_COMMANDS, 50);
}
if new_keys.contains(&Keycode::Left) {
send_command("reverse-step\n", sender).unwrap();
}
prev_keys = keys;
imgui_sdl2.prepare_frame(imgui.io_mut(), &window, &event_pump.mouse_state());
let now = Instant::now();
let delta = now - last_frame;
let delta_s = delta.as_secs() as f32 + delta.subsec_nanos() as f32 / 1_000_000_000.0;
last_frame = now;
imgui.io_mut().delta_time = delta_s;
let ui = imgui.frame();
let mut left_dock: u32 = 0;
let mut left_top: u32 = 0;
let mut left_down: u32 = 0;
let mut right_dock: u32 = 0;
let mut right_top: u32 = 0;
let mut right_down: u32 = 0;
let mut main_dock: u32 = 0;
unsafe {
main_dock = imgui::sys::igDockSpaceOverViewport(
imgui::sys::igGetMainViewport(),
0,
::std::ptr::null::<imgui::sys::ImGuiWindowClass>(),
);
}
if!is_split(main_dock) {
unsafe {
imgui::sys::igDockBuilderSplitNode(
main_dock,
imgui::Direction::Right as i32,
0.3f32,
&mut right_dock,
&mut left_dock,
);
}
}
if right_dock!= 0 &&!is_split(right_dock) {
unsafe {
imgui::sys::igDockBuilderSplitNode(
right_dock,
imgui::Direction::Up as i32,
0.5f32,
&mut right_top,
&mut right_down,
);
}
}
if left_dock!= 0 &&!is_split(left_dock) {
unsafe {
imgui::sys::igDockBuilderSplitNode(
left_dock,
imgui::Direction::Up as i32,
0.65f32,
&mut left_top,
&mut left_down,
);
}
}
let mut gdb = gdb_mutex.lock().unwrap();
if let Some(str) = gdb.get_file() {
file_txt = str;
}
ui::docked_window(&ui, &mut gdb, "Code", left_top, |ui, gdb| {
let mut x = 1.0f32;
for (i, l) in file_txt.lines().enumerate() {
if (i + 1) == gdb.line as usize {
ui.text_colored([x, 0f32, 0f32, 1.0f32], &l);
x -= 0.5f32; |
ui::docked_window(&ui, &mut gdb, "Vars", right_down, |ui, gdb| {
ui.columns(2, im_str!("A"), true);
for (k, v) in &gdb.variables {
ui.text(k);
ui.next_column();
ui.text(v);
ui.next_column();
}
});
ui::docked_window(&ui, &mut gdb, "Regs", right_top, |ui, gdb| {
ui.columns(2, im_str!("A"), true);
for (k, v) in &gdb.registers_ordered() {
ui.text(k);
ui.next_column();
ui.text(v);
ui.next_column();
}
});
ui::docked_window(&ui, &mut gdb, "Asm", left_down, |ui, gdb| {
{
imgui::TabBar::new(im_str!("test"))
.reorderable(true)
.build(&ui, || {
for (k, v) in &gdb.asm {
let s: &imgui::ImStr;
let c_str: std::ffi::CString;
unsafe {
c_str = std::ffi::CString::new(k.as_str()).unwrap();
s = imgui::ImStr::from_utf8_with_nul_unchecked(
c_str.as_bytes_with_nul(),
);
}
let pc_addr = gdb.pc_addr.get(k).unwrap();
imgui::TabItem::new(s).build(&ui, || {
ui.text_colored(
[0.8f32, 0.8f32, 0.2f32, 1f32],
format!("{:#x}", pc_addr),
);
ui.separator();
ui.columns(2, im_str!("asm_col"), true);
for (addr, line) in v {
if line.len() > 0 {
if addr == pc_addr {
ui.text_colored(
[1f32, 0f32, 0f32, 1f32],
format!("{:#x}", addr),
);
} else {
ui.text_colored(
[1f32, 1f32, 1f32, 1f32],
format!("{:#x}", addr),
);
}
ui.next_column();
ui.text_colored([1f32, 1f32, 1f32, 1f32], line);
ui.next_column();
}
}
})
}
})
}
});
ui::docked_window(&ui, &gdb, "Console", left_down, |ui, gdb| {
ui.text_colored([1f32, 1f32, 1f32, 1f32], &gdb.console_output);
if imgui::InputText::new(ui, im_str!(""), &mut input_buf)
.enter_returns_true(true)
.build()
{
let mut cmd = String::from(input_buf.to_str());
cmd.push('\n');
send_command(&cmd, &sender).unwrap();
input_buf.clear();
}
});
ui::docked_window(&ui, &gdb, "memory", right_down, |ui, gdb| {
let (addr, mem) = &gdb.memory;
let mut addr = *addr;
let mut s = format!("{:#08x} ", addr);
let mut col = 0.2f32;
for (i, val) in mem.iter().enumerate() {
if *val!= 0u64 {
col = 1f32;
}
s.push_str(&format!("{:02x}", val));
s.push(' ');
addr += 1;
if (i + 1) % 8 == 0 {
ui.text_colored([col, col, col, 1f32], &s);
// cleaning the string for the next line
s = format!("{:#08x} ", addr);
col = 0.2f32;
}
}
//@Error maybe some values won't be rendered here
});
//ui.show_demo_window(&mut true);
unsafe {
gl::ClearColor(0.2, 0.2, 0.2, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT);
}
imgui_sdl2.prepare_render(&ui, &window);
renderer.render(ui);
window.gl_swap_window();
}
}
fn start_process_thread(
child: &mut Child,
receiver: Receiver<String>,
gdb_mutex: Arc<Mutex<debugger::DebuggerState>>,
) {
let mut stdin = child.stdin.take().unwrap();
let stdout = child.stdout.take().unwrap();
use crate::debugger::DebuggerState;
// Receiving commands and sending them to GDB's stdin
thread::spawn(move || {
for line in receiver {
stdin.write_all(line.as_bytes()).unwrap();
}
});
// Reading and processing GDB stdout
thread::spawn(move || {
let mut f = BufReader::new(stdout);
loop {
let mut line = String::new();
f.read_line(&mut line).unwrap();
print!("[LINE] {}", line);
let gdb: &mut DebuggerState = &mut *gdb_mutex.lock().unwrap();
let vals = parser::parse(&line, gdb);
println!("[PARSER] {:#?}", &vals);
if let Ok(v) = vals {
// Here we try to limit the scope were we hold the mutex
gdb.update(&v);
}
}
});
}
fn start_process(
receiver: Receiver<String>,
gdb_mutex: Arc<Mutex<debugger::DebuggerState>>,
) -> Child {
let mut child = Command::new("gdb")
.arg("--interpreter=mi3")
.arg("./examples/a.exe")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()
.expect("Failed to start process");
start_process_thread(&mut child, receiver, gdb_mutex);
println!("Started process: {}", child.id());
child
}
fn main() -> Result<(), Error> {
let (tx, rx) = channel();
let gdb_mutex = Arc::new(Mutex::new(debugger::DebuggerState::new()));
let mut child = start_process(rx, Arc::clone(&gdb_mutex));
send_commands(&tx, &STARTUP_COMMANDS, 100);
start_graphics(Arc::clone(&gdb_mutex), move || {}, &tx);
child.kill()?;
Ok(())
} | } else {
ui.text_colored([x, x, x, 1.0f32], &l);
}
}
}); | random_line_split |
main.rs | extern crate gl;
extern crate imgui;
extern crate imgui_opengl_renderer;
extern crate imgui_sdl2;
extern crate sdl2;
/*
@TODO:
- Show line numbers!
- Use traits to Send, Parse and Draw
- Create a checkbox to enable debugging the parser, queries, etc;
- Write a logger to use a imgui window
*/
use imgui::im_str;
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
use std::collections::HashSet;
use std::io::{BufRead, BufReader, Error, Write};
use std::process::{Child, ChildStdin, ChildStdout, Command, Stdio};
use std::sync::mpsc::SendError;
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError};
use std::sync::{atomic::AtomicBool, atomic::Ordering, Arc, Mutex};
use std::thread;
use std::thread::sleep;
use std::time::Instant;
use std::{
io::{self, Read},
process,
time::Duration,
};
mod debugger;
mod graphics;
mod parser;
mod ui;
use graphics::build_text;
use std::cmp::max;
use ui::is_window_docked;
fn send_commands(sender: &Sender<String>, commands: &[&str], time: u64) {
for command in commands {
send_command(command, &sender).unwrap();
sleep(Duration::from_millis(time));
}
}
pub fn send_command(command: &str, sender: &Sender<String>) -> Result<(), SendError<String>> {
sender.send(String::from(command))?;
Ok(())
}
pub fn is_split(id: u32) -> bool {
unsafe {
let node = imgui::sys::igDockBuilderGetNode(id);
if std::ptr::null() == node {
false
} else {
imgui::sys::ImGuiDockNode_IsSplitNode(node)
}
}
}
const STEP_COMMANDS: [&str; 5] = [
"step\n",
"-data-list-register-values x 0 1 2 3 4 5 6 7 8 9 10\n",
"-stack-list-locals 1\n",
r#" -data-disassemble -s $pc -e "$pc + 20" -- 0
"#,
r#" -data-read-memory &arr x 1 1 128
"#,
];
const STARTUP_COMMANDS: [&str; 3] = [
"start\n",
"target record-full\n",
"-data-list-register-names\n",
];
fn start_graphics<F>(gdb_mutex: Arc<Mutex<debugger::DebuggerState>>, f: F, sender: &Sender<String>)
where
F: Fn(),
{
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
let ttf_context = sdl2::ttf::init().unwrap();
{
let gl_attr = video_subsystem.gl_attr();
gl_attr.set_context_profile(sdl2::video::GLProfile::Core);
gl_attr.set_context_version(3, 0);
}
let window = video_subsystem
.window("rust-sdl2 demo", 1000, 950)
.position_centered()
.resizable()
.allow_highdpi()
.opengl()
.build()
.unwrap();
let _gl_context = window
.gl_create_context()
.expect("Couldn't create GL context");
gl::load_with(|s| video_subsystem.gl_get_proc_address(s) as _);
let mut imgui = imgui::Context::create();
imgui.io_mut().config_flags |= imgui::ConfigFlags::DOCKING_ENABLE;
let mut path = std::path::PathBuf::new();
path.push("imgui");
path.set_extension("ini");
//imgui.set_ini_filename(Some(path));
imgui.set_ini_filename(None);
let mut imgui_sdl2 = imgui_sdl2::ImguiSdl2::new(&mut imgui, &window);
let renderer = imgui_opengl_renderer::Renderer::new(&mut imgui, |s| {
video_subsystem.gl_get_proc_address(s) as _
});
let mut last_frame = Instant::now();
let mut event_pump = sdl_context.event_pump().unwrap();
let mut prev_keys = HashSet::new();
let mut file_txt = String::from("no file loaded");
let mut input_buf = imgui::ImString::new("type something here");
'running: loop {
for event in event_pump.poll_iter() {
imgui_sdl2.handle_event(&mut imgui, &event);
if imgui_sdl2.ignore_event(&event) {
continue;
}
match event {
Event::Quit {.. }
| Event::KeyDown {
keycode: Some(Keycode::Escape),
..
} => break 'running,
_ => {}
}
}
let keys = event_pump
.keyboard_state()
.pressed_scancodes()
.filter_map(Keycode::from_scancode)
.collect();
// Get the difference between the new and old sets.
let new_keys = &keys - &prev_keys;
// Call step commands
if new_keys.contains(&Keycode::Right) {
send_commands(sender, &STEP_COMMANDS, 50);
}
if new_keys.contains(&Keycode::Left) {
send_command("reverse-step\n", sender).unwrap();
}
prev_keys = keys;
imgui_sdl2.prepare_frame(imgui.io_mut(), &window, &event_pump.mouse_state());
let now = Instant::now();
let delta = now - last_frame;
let delta_s = delta.as_secs() as f32 + delta.subsec_nanos() as f32 / 1_000_000_000.0;
last_frame = now;
imgui.io_mut().delta_time = delta_s;
let ui = imgui.frame();
let mut left_dock: u32 = 0;
let mut left_top: u32 = 0;
let mut left_down: u32 = 0;
let mut right_dock: u32 = 0;
let mut right_top: u32 = 0;
let mut right_down: u32 = 0;
let mut main_dock: u32 = 0;
unsafe {
main_dock = imgui::sys::igDockSpaceOverViewport(
imgui::sys::igGetMainViewport(),
0,
::std::ptr::null::<imgui::sys::ImGuiWindowClass>(),
);
}
if!is_split(main_dock) {
unsafe {
imgui::sys::igDockBuilderSplitNode(
main_dock,
imgui::Direction::Right as i32,
0.3f32,
&mut right_dock,
&mut left_dock,
);
}
}
if right_dock!= 0 &&!is_split(right_dock) {
unsafe {
imgui::sys::igDockBuilderSplitNode(
right_dock,
imgui::Direction::Up as i32,
0.5f32,
&mut right_top,
&mut right_down,
);
}
}
if left_dock!= 0 &&!is_split(left_dock) {
unsafe {
imgui::sys::igDockBuilderSplitNode(
left_dock,
imgui::Direction::Up as i32,
0.65f32,
&mut left_top,
&mut left_down,
);
}
}
let mut gdb = gdb_mutex.lock().unwrap();
if let Some(str) = gdb.get_file() {
file_txt = str;
}
ui::docked_window(&ui, &mut gdb, "Code", left_top, |ui, gdb| {
let mut x = 1.0f32;
for (i, l) in file_txt.lines().enumerate() {
if (i + 1) == gdb.line as usize {
ui.text_colored([x, 0f32, 0f32, 1.0f32], &l);
x -= 0.5f32;
} else {
ui.text_colored([x, x, x, 1.0f32], &l);
}
}
});
ui::docked_window(&ui, &mut gdb, "Vars", right_down, |ui, gdb| {
ui.columns(2, im_str!("A"), true);
for (k, v) in &gdb.variables {
ui.text(k);
ui.next_column();
ui.text(v);
ui.next_column();
}
});
ui::docked_window(&ui, &mut gdb, "Regs", right_top, |ui, gdb| {
ui.columns(2, im_str!("A"), true);
for (k, v) in &gdb.registers_ordered() {
ui.text(k);
ui.next_column();
ui.text(v);
ui.next_column();
}
});
ui::docked_window(&ui, &mut gdb, "Asm", left_down, |ui, gdb| {
{
imgui::TabBar::new(im_str!("test"))
.reorderable(true)
.build(&ui, || {
for (k, v) in &gdb.asm {
let s: &imgui::ImStr;
let c_str: std::ffi::CString;
unsafe {
c_str = std::ffi::CString::new(k.as_str()).unwrap();
s = imgui::ImStr::from_utf8_with_nul_unchecked(
c_str.as_bytes_with_nul(),
);
}
let pc_addr = gdb.pc_addr.get(k).unwrap();
imgui::TabItem::new(s).build(&ui, || {
ui.text_colored(
[0.8f32, 0.8f32, 0.2f32, 1f32],
format!("{:#x}", pc_addr),
);
ui.separator();
ui.columns(2, im_str!("asm_col"), true);
for (addr, line) in v {
if line.len() > 0 {
if addr == pc_addr {
ui.text_colored(
[1f32, 0f32, 0f32, 1f32],
format!("{:#x}", addr),
);
} else {
ui.text_colored(
[1f32, 1f32, 1f32, 1f32],
format!("{:#x}", addr),
);
}
ui.next_column();
ui.text_colored([1f32, 1f32, 1f32, 1f32], line);
ui.next_column();
}
}
})
}
})
}
});
ui::docked_window(&ui, &gdb, "Console", left_down, |ui, gdb| {
ui.text_colored([1f32, 1f32, 1f32, 1f32], &gdb.console_output);
if imgui::InputText::new(ui, im_str!(""), &mut input_buf)
.enter_returns_true(true)
.build()
{
let mut cmd = String::from(input_buf.to_str());
cmd.push('\n');
send_command(&cmd, &sender).unwrap();
input_buf.clear();
}
});
ui::docked_window(&ui, &gdb, "memory", right_down, |ui, gdb| {
let (addr, mem) = &gdb.memory;
let mut addr = *addr;
let mut s = format!("{:#08x} ", addr);
let mut col = 0.2f32;
for (i, val) in mem.iter().enumerate() {
if *val!= 0u64 {
col = 1f32;
}
s.push_str(&format!("{:02x}", val));
s.push(' ');
addr += 1;
if (i + 1) % 8 == 0 {
ui.text_colored([col, col, col, 1f32], &s);
// cleaning the string for the next line
s = format!("{:#08x} ", addr);
col = 0.2f32;
}
}
//@Error maybe some values won't be rendered here
});
//ui.show_demo_window(&mut true);
unsafe {
gl::ClearColor(0.2, 0.2, 0.2, 1.0);
gl::Clear(gl::COLOR_BUFFER_BIT);
}
imgui_sdl2.prepare_render(&ui, &window);
renderer.render(ui);
window.gl_swap_window();
}
}
fn start_process_thread(
child: &mut Child,
receiver: Receiver<String>,
gdb_mutex: Arc<Mutex<debugger::DebuggerState>>,
) {
let mut stdin = child.stdin.take().unwrap();
let stdout = child.stdout.take().unwrap();
use crate::debugger::DebuggerState;
// Receiving commands and sending them to GDB's stdin
thread::spawn(move || {
for line in receiver {
stdin.write_all(line.as_bytes()).unwrap();
}
});
// Reading and processing GDB stdout
thread::spawn(move || {
let mut f = BufReader::new(stdout);
loop {
let mut line = String::new();
f.read_line(&mut line).unwrap();
print!("[LINE] {}", line);
let gdb: &mut DebuggerState = &mut *gdb_mutex.lock().unwrap();
let vals = parser::parse(&line, gdb);
println!("[PARSER] {:#?}", &vals);
if let Ok(v) = vals {
// Here we try to limit the scope were we hold the mutex
gdb.update(&v);
}
}
});
}
fn start_process(
receiver: Receiver<String>,
gdb_mutex: Arc<Mutex<debugger::DebuggerState>>,
) -> Child {
let mut child = Command::new("gdb")
.arg("--interpreter=mi3")
.arg("./examples/a.exe")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()
.expect("Failed to start process");
start_process_thread(&mut child, receiver, gdb_mutex);
println!("Started process: {}", child.id());
child
}
fn | () -> Result<(), Error> {
let (tx, rx) = channel();
let gdb_mutex = Arc::new(Mutex::new(debugger::DebuggerState::new()));
let mut child = start_process(rx, Arc::clone(&gdb_mutex));
send_commands(&tx, &STARTUP_COMMANDS, 100);
start_graphics(Arc::clone(&gdb_mutex), move || {}, &tx);
child.kill()?;
Ok(())
}
| main | identifier_name |
zhtta.rs | return true;
}
}
return false;
}
}
impl std::cmp::Ord for HTTP_Request {
fn lt(&self, other: &HTTP_Request) -> bool {
//First get the file sizes for the Http_Request
let sizeSelf = fs::stat(self.path).size;
let sizeOther = fs::stat(other.path).size;
if sizeOther > sizeSelf {
return true;
}
else {
return getPriority(self.peer_name.clone()) < getPriority(other.peer_name.clone());
}
}
}
struct WebServer {
ip: ~str,
port: uint,
www_dir_path: ~Path,
request_queue_arc: MutexArc<PriorityQueue<HTTP_Request>>,
stream_map_arc: MutexArc<HashMap<~str, Option<std::io::net::tcp::TcpStream>>>,
cache: MutexArc<MutexArc<LruCache<Path,~[u8]>>>,
notify_port: Port<()>,
shared_notify_chan: SharedChan<()>
}
impl WebServer {
fn new(ip: &str, port: uint, www_dir: &str) -> WebServer {
let (notify_port, shared_notify_chan) = SharedChan::new();
let www_dir_path = ~Path::new(www_dir);
os::change_dir(www_dir_path.clone());
WebServer {
ip: ip.to_owned(),
port: port,
www_dir_path: www_dir_path,
request_queue_arc: MutexArc::new(PriorityQueue::new()),
stream_map_arc: MutexArc::new(HashMap::new()),
cache: MutexArc::new(MutexArc::new(LruCache::new(10))),
notify_port: notify_port,
shared_notify_chan: shared_notify_chan
}
}
fn run(&mut self) {
self.listen();
self.dequeue_static_file_request();
}
fn listen(&mut self) {
let addr = from_str::<SocketAddr>(format!("{:s}:{:u}", self.ip, self.port)).expect("Address error.");
let www_dir_path_str = self.www_dir_path.as_str().expect("invalid www path?").to_owned();
let request_queue_arc = self.request_queue_arc.clone();
let shared_notify_chan = self.shared_notify_chan.clone();
let stream_map_arc = self.stream_map_arc.clone();
spawn(proc() {
let mut acceptor = net::tcp::TcpListener::bind(addr).listen();
println!("{:s} listening on {:s} (serving from: {:s}).",
SERVER_NAME, addr.to_str(), www_dir_path_str);
//Visitor counter
let num_visitor : uint = 0;
//Arc for visitor counter.
let visitor_arc_mut = RWArc::new(num_visitor);
for stream in acceptor.incoming() {
let (queue_port, queue_chan) = Chan::new();
queue_chan.send(request_queue_arc.clone());
let notify_chan = shared_notify_chan.clone();
let stream_map_arc = stream_map_arc.clone();
let(portMut, chanMut) = Chan::new();
chanMut.send(visitor_arc_mut.clone());
// Spawn a task to handle the connection.
spawn(proc() {
let request_queue_arc = queue_port.recv();
//This updates counter by adding one to it.
let local_arc_mut = portMut.recv();
local_arc_mut.write(|value| {
*value += 1
});
//This sets a local variable to current count.
let mut visitor_count_local : uint = 0;
local_arc_mut.read(|value| {
//println(value.to_str());
visitor_count_local = *value;
});
let mut stream = stream;
let peer_name = WebServer::get_peer_name(&mut stream);
ipToFile(peer_name.clone());
let mut buf = [0,..500];
stream.read(buf);
let request_str = str::from_utf8(buf);
debug!("Request:\n{:s}", request_str);
let req_group : ~[&str]= request_str.splitn(' ', 3).collect();
if req_group.len() > 2 {
let path_str = "." + req_group[1].to_owned();
let mut path_obj = ~os::getcwd();
path_obj.push(path_str.clone());
let ext_str = match path_obj.extension_str() {
Some(e) => e,
None => "",
};
debug!("Requested path: [{:s}]", path_obj.as_str().expect("error"));
debug!("Requested path: [{:s}]", path_str);
if path_str == ~"./" {
debug!("===== Counter Page request =====");
WebServer::respond_with_counter_page(stream, &visitor_count_local);
debug!("=====Terminated connection from [{:s}].=====", peer_name);
} else if!path_obj.exists() || path_obj.is_dir() {
debug!("===== Error page request =====");
WebServer::respond_with_error_page(stream, path_obj);
debug!("=====Terminated connection from [{:s}].=====", peer_name);
} else if ext_str == "shtml" { // Dynamic web pages.
debug!("===== Dynamic Page request =====");
WebServer::respond_with_dynamic_page(stream, path_obj);
debug!("=====Terminated connection from [{:s}].=====", peer_name);
} else {
debug!("===== Static Page request =====");
WebServer::enqueue_static_file_request(stream, path_obj, stream_map_arc, request_queue_arc, notify_chan);
}
}
});
}
});
}
fn respond_with_error_page(stream: Option<std::io::net::tcp::TcpStream>, path: &Path) {
let mut stream = stream;
let msg: ~str = format!("Cannot open: {:s}", path.as_str().expect("invalid path").to_owned());
stream.write(HTTP_BAD.as_bytes());
stream.write(msg.as_bytes());
}
// TODO: Safe visitor counter.
fn respond_with_counter_page(stream: Option<std::io::net::tcp::TcpStream>, visitor_count_local: &uint) {
let mut stream = stream;
let visitor_count_other : uint = visitor_count_local.clone();
let response: ~str =
format!("{:s}{:s}<h1>Greetings, Krusty!</h1>
<h2>Visitor count: {:u}</h2></body></html>\r\n",
HTTP_OK, COUNTER_STYLE,
visitor_count_other);
debug!("Responding to counter request");
stream.write(response.as_bytes());
}
// TODO: Streaming file.
// TODO: Application-layer file caching.
fn respond_with_static_file(stream: Option<std::io::net::tcp::TcpStream>, path: &Path, cache: MutexArc<LruCache<Path, ~[u8]>>) {
let mut stream = stream;
stream.write(HTTP_OK.as_bytes());
let mut check : bool = true;
cache.access(|local_cache| {
let bytes = local_cache.get(path);
match(bytes) {
Some(bytes) => {
// in cache
debug!("File found in cache: {}", path.display());
let size = bytes.len();
let iterations = size %100000;
if(iterations < 100000) {
stream.write(bytes.to_owned());
}
else {
for i in range(0, iterations) {
let start = i * 100000;
let tempByte = bytes.slice(start,start+100000-1);
stream.write(tempByte);
}
let left = size - (iterations*100000);
stream.write(bytes.slice_from(left));
}
check = false;
}
None => {}
}
});
if(check) {
cache.access(|local_cache| {
// not in cache
//let mut stream = stream;
debug!("File not found in cache: {}", path.display());
let mut file_reader = File::open(path).expect("Invalid file!");
let fileSize = fs::stat(path).size;
let iterations = fileSize&100000;
let mut byteArray: ~[u8] = ~[];
if(iterations < 100000) {
let tempArray = file_reader.read_to_end();
stream.write(tempArray);
byteArray.push_all_move(tempArray);
}
else {
for i in range(0, iterations) {
let tempArray = file_reader.read_bytes(100000);
stream.write(tempArray);
byteArray.push_all_move(tempArray);
}
let tempArray = file_reader.read_to_end();
stream.write(tempArray);
byteArray.push_all_move(tempArray);
}
//add to cache!
//automatically handles removing other elements if necessary
//if(fileSize < 10000000) {
debug!("File added to cache: {}", path.display());
local_cache.put(path.clone(), byteArray);
//}
});
}
}
// TODO: Server-side gashing.
fn respond_with_dynamic_page(stream: Option<std::io::net::tcp::TcpStream>, path: &Path) {
//for now, just serve as static file
let shtml_file = File::open(path);
let mut rwStream = BufferedStream::new(shtml_file);
let mut newFile : ~[~str] = ~[];
let mut checkIfLastIsCmd : bool = false;
for line in rwStream.lines() {
let mut check : bool = false;
let mut newLine : ~[~str] = ~[];
for split in line.split(' ') {
if(check) {
let cmdSplit : ~[&str] = split.split('=').collect();
let command : ~str = cmdSplit[1].to_owned();
let finalCommand = command.slice(1,command.len()-1).to_owned();
let output : ~str = gash::run_cmdline(finalCommand);
newLine.push(output);
check = false;
checkIfLastIsCmd = true;
}
else if(split == "<!--#exec") {
check = true;
}
else if(split == "-->") {
}
else {
if(checkIfLastIsCmd && split.slice(0, 3) == "-->") {
newLine.push(split.slice_from(3).to_owned());
newLine.push(" ".to_owned());
checkIfLastIsCmd = false;
}
else if(split.len() > 9 && split.slice_from(split.len() - 9) == "<!--#exec") {
newLine.push(split.slice(0, split.len()-9).to_owned());
check = true; }
else {
newLine.push(split.to_owned());
newLine.push(" ".to_owned());
}
}
}
let mut fullLine : ~str = ~"";
for s in newLine.iter() {
fullLine = fullLine + s.clone();
}
newFile.push(fullLine);
}
let mut fullPage : ~str = ~"";
for s in newFile.iter() {
fullPage = fullPage + s.clone();
}
let mut stream = stream;
stream.write(HTTP_OK.as_bytes());
stream.write(fullPage.as_bytes());
}
// TODO: Smarter Scheduling.
fn enqueue_static_file_request(stream: Option<std::io::net::tcp::TcpStream>, path_obj: &Path, stream_map_arc: MutexArc<HashMap<~str, Option<std::io::net::tcp::TcpStream>>>, req_queue_arc: MutexArc<PriorityQueue<HTTP_Request>>, notify_chan: SharedChan<()>) {
// Save stream in hashmap for later response.
let mut stream = stream;
let peer_name = WebServer::get_peer_name(&mut stream);
let (stream_port, stream_chan) = Chan::new();
stream_chan.send(stream);
unsafe {
// Use an unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound.
stream_map_arc.unsafe_access(|local_stream_map| {
let stream = stream_port.recv();
local_stream_map.swap(peer_name.clone(), stream);
});
}
// Enqueue the HTTP request.
let req = HTTP_Request { peer_name: peer_name.clone(), path: ~path_obj.clone() };
let (req_port, req_chan) = Chan::new();
req_chan.send(req);
debug!("Waiting for queue mutex lock.");
req_queue_arc.access(|local_req_queue| {
debug!("Got queue mutex lock.");
let req: HTTP_Request = req_port.recv();
local_req_queue.push(req);
//debug!("Priority of new request is {:d}", getPriority(name.clone()));
debug!("A new request enqueued, now the length of queue is {:u}.", local_req_queue.len()); | }
// TODO: Smarter Scheduling.
fn dequeue_static_file_request(&mut self) {
let req_queue_get = self.request_queue_arc.clone();
let stream_map_get = self.stream_map_arc.clone();
let cacheArc = self.cache.clone();
//Semaphore for counting tasks
let s = Semaphore::new(4);
// Port<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_port.
let (request_port, request_chan) = Chan::new();
loop {
self.notify_port.recv(); // waiting for new request enqueued.
req_queue_get.access( |req_queue| {
match req_queue.maybe_pop() { // Priority queue.
None => { /* do nothing */ }
Some(req) => {
request_chan.send(req);
debug!("A new request dequeued, now the length of queue is {:u}.", req_queue.len());
}
}
});
let request = request_port.recv();
// Get stream from hashmap.
// Use unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound.
let (stream_port, stream_chan) = Chan::new();
let (request_port_local, request_chan_local) = Chan::new();
unsafe {
stream_map_get.unsafe_access(|local_stream_map| {
let stream = local_stream_map.pop(&request.peer_name).expect("no option tcpstream");
stream_chan.send(stream);
request_chan_local.send(request.path.clone());
});
}
if(fs::stat(request.path).size < 1000000) {
let mut file_reader = File::open(request.path).expect("Invalid file!");
let mut stream = stream_port.recv();
stream.write(HTTP_OK.as_bytes());
stream.write(file_reader.read_to_end());
}
| });
notify_chan.send(()); // Send incoming notification to responder task.
| random_line_split |
zhtta.rs | return true;
}
}
return false;
}
}
impl std::cmp::Ord for HTTP_Request {
fn lt(&self, other: &HTTP_Request) -> bool {
//First get the file sizes for the Http_Request
let sizeSelf = fs::stat(self.path).size;
let sizeOther = fs::stat(other.path).size;
if sizeOther > sizeSelf {
return true;
}
else {
return getPriority(self.peer_name.clone()) < getPriority(other.peer_name.clone());
}
}
}
struct WebServer {
ip: ~str,
port: uint,
www_dir_path: ~Path,
request_queue_arc: MutexArc<PriorityQueue<HTTP_Request>>,
stream_map_arc: MutexArc<HashMap<~str, Option<std::io::net::tcp::TcpStream>>>,
cache: MutexArc<MutexArc<LruCache<Path,~[u8]>>>,
notify_port: Port<()>,
shared_notify_chan: SharedChan<()>
}
impl WebServer {
fn new(ip: &str, port: uint, www_dir: &str) -> WebServer {
let (notify_port, shared_notify_chan) = SharedChan::new();
let www_dir_path = ~Path::new(www_dir);
os::change_dir(www_dir_path.clone());
WebServer {
ip: ip.to_owned(),
port: port,
www_dir_path: www_dir_path,
request_queue_arc: MutexArc::new(PriorityQueue::new()),
stream_map_arc: MutexArc::new(HashMap::new()),
cache: MutexArc::new(MutexArc::new(LruCache::new(10))),
notify_port: notify_port,
shared_notify_chan: shared_notify_chan
}
}
fn run(&mut self) {
self.listen();
self.dequeue_static_file_request();
}
fn listen(&mut self) {
let addr = from_str::<SocketAddr>(format!("{:s}:{:u}", self.ip, self.port)).expect("Address error.");
let www_dir_path_str = self.www_dir_path.as_str().expect("invalid www path?").to_owned();
let request_queue_arc = self.request_queue_arc.clone();
let shared_notify_chan = self.shared_notify_chan.clone();
let stream_map_arc = self.stream_map_arc.clone();
spawn(proc() {
let mut acceptor = net::tcp::TcpListener::bind(addr).listen();
println!("{:s} listening on {:s} (serving from: {:s}).",
SERVER_NAME, addr.to_str(), www_dir_path_str);
//Visitor counter
let num_visitor : uint = 0;
//Arc for visitor counter.
let visitor_arc_mut = RWArc::new(num_visitor);
for stream in acceptor.incoming() {
let (queue_port, queue_chan) = Chan::new();
queue_chan.send(request_queue_arc.clone());
let notify_chan = shared_notify_chan.clone();
let stream_map_arc = stream_map_arc.clone();
let(portMut, chanMut) = Chan::new();
chanMut.send(visitor_arc_mut.clone());
// Spawn a task to handle the connection.
spawn(proc() {
let request_queue_arc = queue_port.recv();
//This updates counter by adding one to it.
let local_arc_mut = portMut.recv();
local_arc_mut.write(|value| {
*value += 1
});
//This sets a local variable to current count.
let mut visitor_count_local : uint = 0;
local_arc_mut.read(|value| {
//println(value.to_str());
visitor_count_local = *value;
});
let mut stream = stream;
let peer_name = WebServer::get_peer_name(&mut stream);
ipToFile(peer_name.clone());
let mut buf = [0,..500];
stream.read(buf);
let request_str = str::from_utf8(buf);
debug!("Request:\n{:s}", request_str);
let req_group : ~[&str]= request_str.splitn(' ', 3).collect();
if req_group.len() > 2 {
let path_str = "." + req_group[1].to_owned();
let mut path_obj = ~os::getcwd();
path_obj.push(path_str.clone());
let ext_str = match path_obj.extension_str() {
Some(e) => e,
None => "",
};
debug!("Requested path: [{:s}]", path_obj.as_str().expect("error"));
debug!("Requested path: [{:s}]", path_str);
if path_str == ~"./" {
debug!("===== Counter Page request =====");
WebServer::respond_with_counter_page(stream, &visitor_count_local);
debug!("=====Terminated connection from [{:s}].=====", peer_name);
} else if!path_obj.exists() || path_obj.is_dir() {
debug!("===== Error page request =====");
WebServer::respond_with_error_page(stream, path_obj);
debug!("=====Terminated connection from [{:s}].=====", peer_name);
} else if ext_str == "shtml" { // Dynamic web pages.
debug!("===== Dynamic Page request =====");
WebServer::respond_with_dynamic_page(stream, path_obj);
debug!("=====Terminated connection from [{:s}].=====", peer_name);
} else {
debug!("===== Static Page request =====");
WebServer::enqueue_static_file_request(stream, path_obj, stream_map_arc, request_queue_arc, notify_chan);
}
}
});
}
});
}
fn respond_with_error_page(stream: Option<std::io::net::tcp::TcpStream>, path: &Path) {
let mut stream = stream;
let msg: ~str = format!("Cannot open: {:s}", path.as_str().expect("invalid path").to_owned());
stream.write(HTTP_BAD.as_bytes());
stream.write(msg.as_bytes());
}
// TODO: Safe visitor counter.
fn respond_with_counter_page(stream: Option<std::io::net::tcp::TcpStream>, visitor_count_local: &uint) {
let mut stream = stream;
let visitor_count_other : uint = visitor_count_local.clone();
let response: ~str =
format!("{:s}{:s}<h1>Greetings, Krusty!</h1>
<h2>Visitor count: {:u}</h2></body></html>\r\n",
HTTP_OK, COUNTER_STYLE,
visitor_count_other);
debug!("Responding to counter request");
stream.write(response.as_bytes());
}
// TODO: Streaming file.
// TODO: Application-layer file caching.
fn respond_with_static_file(stream: Option<std::io::net::tcp::TcpStream>, path: &Path, cache: MutexArc<LruCache<Path, ~[u8]>>) {
let mut stream = stream;
stream.write(HTTP_OK.as_bytes());
let mut check : bool = true;
cache.access(|local_cache| {
let bytes = local_cache.get(path);
match(bytes) {
Some(bytes) => {
// in cache
debug!("File found in cache: {}", path.display());
let size = bytes.len();
let iterations = size %100000;
if(iterations < 100000) {
stream.write(bytes.to_owned());
}
else {
for i in range(0, iterations) {
let start = i * 100000;
let tempByte = bytes.slice(start,start+100000-1);
stream.write(tempByte);
}
let left = size - (iterations*100000);
stream.write(bytes.slice_from(left));
}
check = false;
}
None => {}
}
});
if(check) {
cache.access(|local_cache| {
// not in cache
//let mut stream = stream;
debug!("File not found in cache: {}", path.display());
let mut file_reader = File::open(path).expect("Invalid file!");
let fileSize = fs::stat(path).size;
let iterations = fileSize&100000;
let mut byteArray: ~[u8] = ~[];
if(iterations < 100000) {
let tempArray = file_reader.read_to_end();
stream.write(tempArray);
byteArray.push_all_move(tempArray);
}
else {
for i in range(0, iterations) {
let tempArray = file_reader.read_bytes(100000);
stream.write(tempArray);
byteArray.push_all_move(tempArray);
}
let tempArray = file_reader.read_to_end();
stream.write(tempArray);
byteArray.push_all_move(tempArray);
}
//add to cache!
//automatically handles removing other elements if necessary
//if(fileSize < 10000000) {
debug!("File added to cache: {}", path.display());
local_cache.put(path.clone(), byteArray);
//}
});
}
}
// TODO: Server-side gashing.
fn respond_with_dynamic_page(stream: Option<std::io::net::tcp::TcpStream>, path: &Path) {
//for now, just serve as static file
let shtml_file = File::open(path);
let mut rwStream = BufferedStream::new(shtml_file);
let mut newFile : ~[~str] = ~[];
let mut checkIfLastIsCmd : bool = false;
for line in rwStream.lines() {
let mut check : bool = false;
let mut newLine : ~[~str] = ~[];
for split in line.split(' ') {
if(check) {
let cmdSplit : ~[&str] = split.split('=').collect();
let command : ~str = cmdSplit[1].to_owned();
let finalCommand = command.slice(1,command.len()-1).to_owned();
let output : ~str = gash::run_cmdline(finalCommand);
newLine.push(output);
check = false;
checkIfLastIsCmd = true;
}
else if(split == "<!--#exec") {
check = true;
}
else if(split == "-->") {
}
else {
if(checkIfLastIsCmd && split.slice(0, 3) == "-->") {
newLine.push(split.slice_from(3).to_owned());
newLine.push(" ".to_owned());
checkIfLastIsCmd = false;
}
else if(split.len() > 9 && split.slice_from(split.len() - 9) == "<!--#exec") {
newLine.push(split.slice(0, split.len()-9).to_owned());
check = true; }
else {
newLine.push(split.to_owned());
newLine.push(" ".to_owned());
}
}
}
let mut fullLine : ~str = ~"";
for s in newLine.iter() {
fullLine = fullLine + s.clone();
}
newFile.push(fullLine);
}
let mut fullPage : ~str = ~"";
for s in newFile.iter() {
fullPage = fullPage + s.clone();
}
let mut stream = stream;
stream.write(HTTP_OK.as_bytes());
stream.write(fullPage.as_bytes());
}
// TODO: Smarter Scheduling.
fn enqueue_static_file_request(stream: Option<std::io::net::tcp::TcpStream>, path_obj: &Path, stream_map_arc: MutexArc<HashMap<~str, Option<std::io::net::tcp::TcpStream>>>, req_queue_arc: MutexArc<PriorityQueue<HTTP_Request>>, notify_chan: SharedChan<()>) | req_queue_arc.access(|local_req_queue| {
debug!("Got queue mutex lock.");
let req: HTTP_Request = req_port.recv();
local_req_queue.push(req);
//debug!("Priority of new request is {:d}", getPriority(name.clone()));
debug!("A new request enqueued, now the length of queue is {:u}.", local_req_queue.len());
});
notify_chan.send(()); // Send incoming notification to responder task.
}
// TODO: Smarter Scheduling.
fn dequeue_static_file_request(&mut self) {
let req_queue_get = self.request_queue_arc.clone();
let stream_map_get = self.stream_map_arc.clone();
let cacheArc = self.cache.clone();
//Semaphore for counting tasks
let s = Semaphore::new(4);
// Port<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_port.
let (request_port, request_chan) = Chan::new();
loop {
self.notify_port.recv(); // waiting for new request enqueued.
req_queue_get.access( |req_queue| {
match req_queue.maybe_pop() { // Priority queue.
None => { /* do nothing */ }
Some(req) => {
request_chan.send(req);
debug!("A new request dequeued, now the length of queue is {:u}.", req_queue.len());
}
}
});
let request = request_port.recv();
// Get stream from hashmap.
// Use unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound.
let (stream_port, stream_chan) = Chan::new();
let (request_port_local, request_chan_local) = Chan::new();
unsafe {
stream_map_get.unsafe_access(|local_stream_map| {
let stream = local_stream_map.pop(&request.peer_name).expect("no option tcpstream");
stream_chan.send(stream);
request_chan_local.send(request.path.clone());
});
}
if(fs::stat(request.path).size < 1000000) {
let mut file_reader = File::open(request.path).expect("Invalid file!");
let mut stream = stream_port.recv();
stream.write(HTTP_OK.as_bytes());
stream.write(file_reader.read_to_end());
}
| {
// Save stream in hashmap for later response.
let mut stream = stream;
let peer_name = WebServer::get_peer_name(&mut stream);
let (stream_port, stream_chan) = Chan::new();
stream_chan.send(stream);
unsafe {
// Use an unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound.
stream_map_arc.unsafe_access(|local_stream_map| {
let stream = stream_port.recv();
local_stream_map.swap(peer_name.clone(), stream);
});
}
// Enqueue the HTTP request.
let req = HTTP_Request { peer_name: peer_name.clone(), path: ~path_obj.clone() };
let (req_port, req_chan) = Chan::new();
req_chan.send(req);
debug!("Waiting for queue mutex lock."); | identifier_body |
zhtta.rs | return true;
}
}
return false;
}
}
impl std::cmp::Ord for HTTP_Request {
fn lt(&self, other: &HTTP_Request) -> bool {
//First get the file sizes for the Http_Request
let sizeSelf = fs::stat(self.path).size;
let sizeOther = fs::stat(other.path).size;
if sizeOther > sizeSelf {
return true;
}
else {
return getPriority(self.peer_name.clone()) < getPriority(other.peer_name.clone());
}
}
}
struct WebServer {
ip: ~str,
port: uint,
www_dir_path: ~Path,
request_queue_arc: MutexArc<PriorityQueue<HTTP_Request>>,
stream_map_arc: MutexArc<HashMap<~str, Option<std::io::net::tcp::TcpStream>>>,
cache: MutexArc<MutexArc<LruCache<Path,~[u8]>>>,
notify_port: Port<()>,
shared_notify_chan: SharedChan<()>
}
impl WebServer {
fn new(ip: &str, port: uint, www_dir: &str) -> WebServer {
let (notify_port, shared_notify_chan) = SharedChan::new();
let www_dir_path = ~Path::new(www_dir);
os::change_dir(www_dir_path.clone());
WebServer {
ip: ip.to_owned(),
port: port,
www_dir_path: www_dir_path,
request_queue_arc: MutexArc::new(PriorityQueue::new()),
stream_map_arc: MutexArc::new(HashMap::new()),
cache: MutexArc::new(MutexArc::new(LruCache::new(10))),
notify_port: notify_port,
shared_notify_chan: shared_notify_chan
}
}
fn run(&mut self) {
self.listen();
self.dequeue_static_file_request();
}
fn listen(&mut self) {
let addr = from_str::<SocketAddr>(format!("{:s}:{:u}", self.ip, self.port)).expect("Address error.");
let www_dir_path_str = self.www_dir_path.as_str().expect("invalid www path?").to_owned();
let request_queue_arc = self.request_queue_arc.clone();
let shared_notify_chan = self.shared_notify_chan.clone();
let stream_map_arc = self.stream_map_arc.clone();
spawn(proc() {
let mut acceptor = net::tcp::TcpListener::bind(addr).listen();
println!("{:s} listening on {:s} (serving from: {:s}).",
SERVER_NAME, addr.to_str(), www_dir_path_str);
//Visitor counter
let num_visitor : uint = 0;
//Arc for visitor counter.
let visitor_arc_mut = RWArc::new(num_visitor);
for stream in acceptor.incoming() {
let (queue_port, queue_chan) = Chan::new();
queue_chan.send(request_queue_arc.clone());
let notify_chan = shared_notify_chan.clone();
let stream_map_arc = stream_map_arc.clone();
let(portMut, chanMut) = Chan::new();
chanMut.send(visitor_arc_mut.clone());
// Spawn a task to handle the connection.
spawn(proc() {
let request_queue_arc = queue_port.recv();
//This updates counter by adding one to it.
let local_arc_mut = portMut.recv();
local_arc_mut.write(|value| {
*value += 1
});
//This sets a local variable to current count.
let mut visitor_count_local : uint = 0;
local_arc_mut.read(|value| {
//println(value.to_str());
visitor_count_local = *value;
});
let mut stream = stream;
let peer_name = WebServer::get_peer_name(&mut stream);
ipToFile(peer_name.clone());
let mut buf = [0,..500];
stream.read(buf);
let request_str = str::from_utf8(buf);
debug!("Request:\n{:s}", request_str);
let req_group : ~[&str]= request_str.splitn(' ', 3).collect();
if req_group.len() > 2 {
let path_str = "." + req_group[1].to_owned();
let mut path_obj = ~os::getcwd();
path_obj.push(path_str.clone());
let ext_str = match path_obj.extension_str() {
Some(e) => e,
None => "",
};
debug!("Requested path: [{:s}]", path_obj.as_str().expect("error"));
debug!("Requested path: [{:s}]", path_str);
if path_str == ~"./" {
debug!("===== Counter Page request =====");
WebServer::respond_with_counter_page(stream, &visitor_count_local);
debug!("=====Terminated connection from [{:s}].=====", peer_name);
} else if!path_obj.exists() || path_obj.is_dir() {
debug!("===== Error page request =====");
WebServer::respond_with_error_page(stream, path_obj);
debug!("=====Terminated connection from [{:s}].=====", peer_name);
} else if ext_str == "shtml" { // Dynamic web pages.
debug!("===== Dynamic Page request =====");
WebServer::respond_with_dynamic_page(stream, path_obj);
debug!("=====Terminated connection from [{:s}].=====", peer_name);
} else {
debug!("===== Static Page request =====");
WebServer::enqueue_static_file_request(stream, path_obj, stream_map_arc, request_queue_arc, notify_chan);
}
}
});
}
});
}
fn | (stream: Option<std::io::net::tcp::TcpStream>, path: &Path) {
let mut stream = stream;
let msg: ~str = format!("Cannot open: {:s}", path.as_str().expect("invalid path").to_owned());
stream.write(HTTP_BAD.as_bytes());
stream.write(msg.as_bytes());
}
// TODO: Safe visitor counter.
fn respond_with_counter_page(stream: Option<std::io::net::tcp::TcpStream>, visitor_count_local: &uint) {
let mut stream = stream;
let visitor_count_other : uint = visitor_count_local.clone();
let response: ~str =
format!("{:s}{:s}<h1>Greetings, Krusty!</h1>
<h2>Visitor count: {:u}</h2></body></html>\r\n",
HTTP_OK, COUNTER_STYLE,
visitor_count_other);
debug!("Responding to counter request");
stream.write(response.as_bytes());
}
// TODO: Streaming file.
// TODO: Application-layer file caching.
fn respond_with_static_file(stream: Option<std::io::net::tcp::TcpStream>, path: &Path, cache: MutexArc<LruCache<Path, ~[u8]>>) {
let mut stream = stream;
stream.write(HTTP_OK.as_bytes());
let mut check : bool = true;
cache.access(|local_cache| {
let bytes = local_cache.get(path);
match(bytes) {
Some(bytes) => {
// in cache
debug!("File found in cache: {}", path.display());
let size = bytes.len();
let iterations = size %100000;
if(iterations < 100000) {
stream.write(bytes.to_owned());
}
else {
for i in range(0, iterations) {
let start = i * 100000;
let tempByte = bytes.slice(start,start+100000-1);
stream.write(tempByte);
}
let left = size - (iterations*100000);
stream.write(bytes.slice_from(left));
}
check = false;
}
None => {}
}
});
if(check) {
cache.access(|local_cache| {
// not in cache
//let mut stream = stream;
debug!("File not found in cache: {}", path.display());
let mut file_reader = File::open(path).expect("Invalid file!");
let fileSize = fs::stat(path).size;
let iterations = fileSize&100000;
let mut byteArray: ~[u8] = ~[];
if(iterations < 100000) {
let tempArray = file_reader.read_to_end();
stream.write(tempArray);
byteArray.push_all_move(tempArray);
}
else {
for i in range(0, iterations) {
let tempArray = file_reader.read_bytes(100000);
stream.write(tempArray);
byteArray.push_all_move(tempArray);
}
let tempArray = file_reader.read_to_end();
stream.write(tempArray);
byteArray.push_all_move(tempArray);
}
//add to cache!
//automatically handles removing other elements if necessary
//if(fileSize < 10000000) {
debug!("File added to cache: {}", path.display());
local_cache.put(path.clone(), byteArray);
//}
});
}
}
// TODO: Server-side gashing.
fn respond_with_dynamic_page(stream: Option<std::io::net::tcp::TcpStream>, path: &Path) {
//for now, just serve as static file
let shtml_file = File::open(path);
let mut rwStream = BufferedStream::new(shtml_file);
let mut newFile : ~[~str] = ~[];
let mut checkIfLastIsCmd : bool = false;
for line in rwStream.lines() {
let mut check : bool = false;
let mut newLine : ~[~str] = ~[];
for split in line.split(' ') {
if(check) {
let cmdSplit : ~[&str] = split.split('=').collect();
let command : ~str = cmdSplit[1].to_owned();
let finalCommand = command.slice(1,command.len()-1).to_owned();
let output : ~str = gash::run_cmdline(finalCommand);
newLine.push(output);
check = false;
checkIfLastIsCmd = true;
}
else if(split == "<!--#exec") {
check = true;
}
else if(split == "-->") {
}
else {
if(checkIfLastIsCmd && split.slice(0, 3) == "-->") {
newLine.push(split.slice_from(3).to_owned());
newLine.push(" ".to_owned());
checkIfLastIsCmd = false;
}
else if(split.len() > 9 && split.slice_from(split.len() - 9) == "<!--#exec") {
newLine.push(split.slice(0, split.len()-9).to_owned());
check = true; }
else {
newLine.push(split.to_owned());
newLine.push(" ".to_owned());
}
}
}
let mut fullLine : ~str = ~"";
for s in newLine.iter() {
fullLine = fullLine + s.clone();
}
newFile.push(fullLine);
}
let mut fullPage : ~str = ~"";
for s in newFile.iter() {
fullPage = fullPage + s.clone();
}
let mut stream = stream;
stream.write(HTTP_OK.as_bytes());
stream.write(fullPage.as_bytes());
}
// TODO: Smarter Scheduling.
fn enqueue_static_file_request(stream: Option<std::io::net::tcp::TcpStream>, path_obj: &Path, stream_map_arc: MutexArc<HashMap<~str, Option<std::io::net::tcp::TcpStream>>>, req_queue_arc: MutexArc<PriorityQueue<HTTP_Request>>, notify_chan: SharedChan<()>) {
// Save stream in hashmap for later response.
let mut stream = stream;
let peer_name = WebServer::get_peer_name(&mut stream);
let (stream_port, stream_chan) = Chan::new();
stream_chan.send(stream);
unsafe {
// Use an unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound.
stream_map_arc.unsafe_access(|local_stream_map| {
let stream = stream_port.recv();
local_stream_map.swap(peer_name.clone(), stream);
});
}
// Enqueue the HTTP request.
let req = HTTP_Request { peer_name: peer_name.clone(), path: ~path_obj.clone() };
let (req_port, req_chan) = Chan::new();
req_chan.send(req);
debug!("Waiting for queue mutex lock.");
req_queue_arc.access(|local_req_queue| {
debug!("Got queue mutex lock.");
let req: HTTP_Request = req_port.recv();
local_req_queue.push(req);
//debug!("Priority of new request is {:d}", getPriority(name.clone()));
debug!("A new request enqueued, now the length of queue is {:u}.", local_req_queue.len());
});
notify_chan.send(()); // Send incoming notification to responder task.
}
// TODO: Smarter Scheduling.
fn dequeue_static_file_request(&mut self) {
let req_queue_get = self.request_queue_arc.clone();
let stream_map_get = self.stream_map_arc.clone();
let cacheArc = self.cache.clone();
//Semaphore for counting tasks
let s = Semaphore::new(4);
// Port<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_port.
let (request_port, request_chan) = Chan::new();
loop {
self.notify_port.recv(); // waiting for new request enqueued.
req_queue_get.access( |req_queue| {
match req_queue.maybe_pop() { // Priority queue.
None => { /* do nothing */ }
Some(req) => {
request_chan.send(req);
debug!("A new request dequeued, now the length of queue is {:u}.", req_queue.len());
}
}
});
let request = request_port.recv();
// Get stream from hashmap.
// Use unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound.
let (stream_port, stream_chan) = Chan::new();
let (request_port_local, request_chan_local) = Chan::new();
unsafe {
stream_map_get.unsafe_access(|local_stream_map| {
let stream = local_stream_map.pop(&request.peer_name).expect("no option tcpstream");
stream_chan.send(stream);
request_chan_local.send(request.path.clone());
});
}
if(fs::stat(request.path).size < 1000000) {
let mut file_reader = File::open(request.path).expect("Invalid file!");
let mut stream = stream_port.recv();
stream.write(HTTP_OK.as_bytes());
stream.write(file_reader.read_to_end());
}
| respond_with_error_page | identifier_name |
zhtta.rs | notify_chan: SharedChan<()>
}
impl WebServer {
fn new(ip: &str, port: uint, www_dir: &str) -> WebServer {
let (notify_port, shared_notify_chan) = SharedChan::new();
let www_dir_path = ~Path::new(www_dir);
os::change_dir(www_dir_path.clone());
WebServer {
ip: ip.to_owned(),
port: port,
www_dir_path: www_dir_path,
request_queue_arc: MutexArc::new(PriorityQueue::new()),
stream_map_arc: MutexArc::new(HashMap::new()),
cache: MutexArc::new(MutexArc::new(LruCache::new(10))),
notify_port: notify_port,
shared_notify_chan: shared_notify_chan
}
}
fn run(&mut self) {
self.listen();
self.dequeue_static_file_request();
}
fn listen(&mut self) {
let addr = from_str::<SocketAddr>(format!("{:s}:{:u}", self.ip, self.port)).expect("Address error.");
let www_dir_path_str = self.www_dir_path.as_str().expect("invalid www path?").to_owned();
let request_queue_arc = self.request_queue_arc.clone();
let shared_notify_chan = self.shared_notify_chan.clone();
let stream_map_arc = self.stream_map_arc.clone();
spawn(proc() {
let mut acceptor = net::tcp::TcpListener::bind(addr).listen();
println!("{:s} listening on {:s} (serving from: {:s}).",
SERVER_NAME, addr.to_str(), www_dir_path_str);
//Visitor counter
let num_visitor : uint = 0;
//Arc for visitor counter.
let visitor_arc_mut = RWArc::new(num_visitor);
for stream in acceptor.incoming() {
let (queue_port, queue_chan) = Chan::new();
queue_chan.send(request_queue_arc.clone());
let notify_chan = shared_notify_chan.clone();
let stream_map_arc = stream_map_arc.clone();
let(portMut, chanMut) = Chan::new();
chanMut.send(visitor_arc_mut.clone());
// Spawn a task to handle the connection.
spawn(proc() {
let request_queue_arc = queue_port.recv();
//This updates counter by adding one to it.
let local_arc_mut = portMut.recv();
local_arc_mut.write(|value| {
*value += 1
});
//This sets a local variable to current count.
let mut visitor_count_local : uint = 0;
local_arc_mut.read(|value| {
//println(value.to_str());
visitor_count_local = *value;
});
let mut stream = stream;
let peer_name = WebServer::get_peer_name(&mut stream);
ipToFile(peer_name.clone());
let mut buf = [0,..500];
stream.read(buf);
let request_str = str::from_utf8(buf);
debug!("Request:\n{:s}", request_str);
let req_group : ~[&str]= request_str.splitn(' ', 3).collect();
if req_group.len() > 2 {
let path_str = "." + req_group[1].to_owned();
let mut path_obj = ~os::getcwd();
path_obj.push(path_str.clone());
let ext_str = match path_obj.extension_str() {
Some(e) => e,
None => "",
};
debug!("Requested path: [{:s}]", path_obj.as_str().expect("error"));
debug!("Requested path: [{:s}]", path_str);
if path_str == ~"./" {
debug!("===== Counter Page request =====");
WebServer::respond_with_counter_page(stream, &visitor_count_local);
debug!("=====Terminated connection from [{:s}].=====", peer_name);
} else if!path_obj.exists() || path_obj.is_dir() {
debug!("===== Error page request =====");
WebServer::respond_with_error_page(stream, path_obj);
debug!("=====Terminated connection from [{:s}].=====", peer_name);
} else if ext_str == "shtml" { // Dynamic web pages.
debug!("===== Dynamic Page request =====");
WebServer::respond_with_dynamic_page(stream, path_obj);
debug!("=====Terminated connection from [{:s}].=====", peer_name);
} else {
debug!("===== Static Page request =====");
WebServer::enqueue_static_file_request(stream, path_obj, stream_map_arc, request_queue_arc, notify_chan);
}
}
});
}
});
}
fn respond_with_error_page(stream: Option<std::io::net::tcp::TcpStream>, path: &Path) {
let mut stream = stream;
let msg: ~str = format!("Cannot open: {:s}", path.as_str().expect("invalid path").to_owned());
stream.write(HTTP_BAD.as_bytes());
stream.write(msg.as_bytes());
}
// TODO: Safe visitor counter.
fn respond_with_counter_page(stream: Option<std::io::net::tcp::TcpStream>, visitor_count_local: &uint) {
let mut stream = stream;
let visitor_count_other : uint = visitor_count_local.clone();
let response: ~str =
format!("{:s}{:s}<h1>Greetings, Krusty!</h1>
<h2>Visitor count: {:u}</h2></body></html>\r\n",
HTTP_OK, COUNTER_STYLE,
visitor_count_other);
debug!("Responding to counter request");
stream.write(response.as_bytes());
}
// TODO: Streaming file.
// TODO: Application-layer file caching.
fn respond_with_static_file(stream: Option<std::io::net::tcp::TcpStream>, path: &Path, cache: MutexArc<LruCache<Path, ~[u8]>>) {
let mut stream = stream;
stream.write(HTTP_OK.as_bytes());
let mut check : bool = true;
cache.access(|local_cache| {
let bytes = local_cache.get(path);
match(bytes) {
Some(bytes) => {
// in cache
debug!("File found in cache: {}", path.display());
let size = bytes.len();
let iterations = size %100000;
if(iterations < 100000) {
stream.write(bytes.to_owned());
}
else {
for i in range(0, iterations) {
let start = i * 100000;
let tempByte = bytes.slice(start,start+100000-1);
stream.write(tempByte);
}
let left = size - (iterations*100000);
stream.write(bytes.slice_from(left));
}
check = false;
}
None => {}
}
});
if(check) {
cache.access(|local_cache| {
// not in cache
//let mut stream = stream;
debug!("File not found in cache: {}", path.display());
let mut file_reader = File::open(path).expect("Invalid file!");
let fileSize = fs::stat(path).size;
let iterations = fileSize&100000;
let mut byteArray: ~[u8] = ~[];
if(iterations < 100000) {
let tempArray = file_reader.read_to_end();
stream.write(tempArray);
byteArray.push_all_move(tempArray);
}
else {
for i in range(0, iterations) {
let tempArray = file_reader.read_bytes(100000);
stream.write(tempArray);
byteArray.push_all_move(tempArray);
}
let tempArray = file_reader.read_to_end();
stream.write(tempArray);
byteArray.push_all_move(tempArray);
}
//add to cache!
//automatically handles removing other elements if necessary
//if(fileSize < 10000000) {
debug!("File added to cache: {}", path.display());
local_cache.put(path.clone(), byteArray);
//}
});
}
}
// TODO: Server-side gashing.
fn respond_with_dynamic_page(stream: Option<std::io::net::tcp::TcpStream>, path: &Path) {
//for now, just serve as static file
let shtml_file = File::open(path);
let mut rwStream = BufferedStream::new(shtml_file);
let mut newFile : ~[~str] = ~[];
let mut checkIfLastIsCmd : bool = false;
for line in rwStream.lines() {
let mut check : bool = false;
let mut newLine : ~[~str] = ~[];
for split in line.split(' ') {
if(check) {
let cmdSplit : ~[&str] = split.split('=').collect();
let command : ~str = cmdSplit[1].to_owned();
let finalCommand = command.slice(1,command.len()-1).to_owned();
let output : ~str = gash::run_cmdline(finalCommand);
newLine.push(output);
check = false;
checkIfLastIsCmd = true;
}
else if(split == "<!--#exec") {
check = true;
}
else if(split == "-->") {
}
else {
if(checkIfLastIsCmd && split.slice(0, 3) == "-->") {
newLine.push(split.slice_from(3).to_owned());
newLine.push(" ".to_owned());
checkIfLastIsCmd = false;
}
else if(split.len() > 9 && split.slice_from(split.len() - 9) == "<!--#exec") {
newLine.push(split.slice(0, split.len()-9).to_owned());
check = true; }
else {
newLine.push(split.to_owned());
newLine.push(" ".to_owned());
}
}
}
let mut fullLine : ~str = ~"";
for s in newLine.iter() {
fullLine = fullLine + s.clone();
}
newFile.push(fullLine);
}
let mut fullPage : ~str = ~"";
for s in newFile.iter() {
fullPage = fullPage + s.clone();
}
let mut stream = stream;
stream.write(HTTP_OK.as_bytes());
stream.write(fullPage.as_bytes());
}
// TODO: Smarter Scheduling.
fn enqueue_static_file_request(stream: Option<std::io::net::tcp::TcpStream>, path_obj: &Path, stream_map_arc: MutexArc<HashMap<~str, Option<std::io::net::tcp::TcpStream>>>, req_queue_arc: MutexArc<PriorityQueue<HTTP_Request>>, notify_chan: SharedChan<()>) {
// Save stream in hashmap for later response.
let mut stream = stream;
let peer_name = WebServer::get_peer_name(&mut stream);
let (stream_port, stream_chan) = Chan::new();
stream_chan.send(stream);
unsafe {
// Use an unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound.
stream_map_arc.unsafe_access(|local_stream_map| {
let stream = stream_port.recv();
local_stream_map.swap(peer_name.clone(), stream);
});
}
// Enqueue the HTTP request.
let req = HTTP_Request { peer_name: peer_name.clone(), path: ~path_obj.clone() };
let (req_port, req_chan) = Chan::new();
req_chan.send(req);
debug!("Waiting for queue mutex lock.");
req_queue_arc.access(|local_req_queue| {
debug!("Got queue mutex lock.");
let req: HTTP_Request = req_port.recv();
local_req_queue.push(req);
//debug!("Priority of new request is {:d}", getPriority(name.clone()));
debug!("A new request enqueued, now the length of queue is {:u}.", local_req_queue.len());
});
notify_chan.send(()); // Send incoming notification to responder task.
}
// TODO: Smarter Scheduling.
fn dequeue_static_file_request(&mut self) {
let req_queue_get = self.request_queue_arc.clone();
let stream_map_get = self.stream_map_arc.clone();
let cacheArc = self.cache.clone();
//Semaphore for counting tasks
let s = Semaphore::new(4);
// Port<> cannot be sent to another task. So we have to make this task as the main task that can access self.notify_port.
let (request_port, request_chan) = Chan::new();
loop {
self.notify_port.recv(); // waiting for new request enqueued.
req_queue_get.access( |req_queue| {
match req_queue.maybe_pop() { // Priority queue.
None => { /* do nothing */ }
Some(req) => {
request_chan.send(req);
debug!("A new request dequeued, now the length of queue is {:u}.", req_queue.len());
}
}
});
let request = request_port.recv();
// Get stream from hashmap.
// Use unsafe method, because TcpStream in Rust 0.9 doesn't have "Freeze" bound.
let (stream_port, stream_chan) = Chan::new();
let (request_port_local, request_chan_local) = Chan::new();
unsafe {
stream_map_get.unsafe_access(|local_stream_map| {
let stream = local_stream_map.pop(&request.peer_name).expect("no option tcpstream");
stream_chan.send(stream);
request_chan_local.send(request.path.clone());
});
}
if(fs::stat(request.path).size < 1000000) {
let mut file_reader = File::open(request.path).expect("Invalid file!");
let mut stream = stream_port.recv();
stream.write(HTTP_OK.as_bytes());
stream.write(file_reader.read_to_end());
}
else | {
let semaphore = s.clone();
semaphore.acquire();
// TODO: Spawning more tasks to respond the dequeued requests concurrently. You may need a semophore to control the concurrency.
semaphore.access( || {
//Sending cache into spawn
let(portCache, chanCache) = Chan::new();
chanCache.send(cacheArc.clone());
//Sending stream into spawn
let streamLocal = stream_port.recv();
let(portStream, chanStream) = Chan::new();
chanStream.send(streamLocal);
//Sending request into spawn
let portLocal = request_port_local.recv(); | conditional_block |
|
mesh_generator.rs |
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
use crate::{
app_state::AppState,
fog::FogConfig,
mesh_fade::{FadeUniform, FADE_IN, FADE_OUT},
utilities::bevy_util::thread_local_resource::ThreadLocalResource,
voxel_map::{Voxel, VoxelMap},
};
use bevy_mod_bounding::{aabb::Aabb, obb::Obb};
use bevy_rapier3d::prelude::{ColliderBundle, ColliderShape, RigidBodyBundle, RigidBodyType};
use building_blocks::{
mesh::*,
prelude::*,
storage::{LodChunkKey3, LodChunkUpdate3, SmallKeyHashMap},
};
use bevy::{
asset::prelude::*,
ecs,
prelude::*,
render::{mesh::Indices, pipeline::PrimitiveTopology},
tasks::ComputeTaskPool,
};
use std::{cell::RefCell, collections::VecDeque};
fn max_mesh_creations_per_frame(pool: &ComputeTaskPool) -> usize {
40 * pool.thread_num()
}
#[derive(Default)]
pub struct MeshCommandQueue {
commands: VecDeque<MeshCommand>,
}
impl MeshCommandQueue {
pub fn enqueue(&mut self, command: MeshCommand) {
self.commands.push_front(command);
}
pub fn is_empty(&self) -> bool {
self.commands.is_empty()
}
pub fn len(&self) -> usize {
self.commands.len()
}
pub fn clear(&mut self) {
self.commands.clear();
}
}
// PERF: try to eliminate the use of multiple Vecs
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum MeshCommand {
Create(LodChunkKey3),
Update(LodChunkUpdate3),
}
#[derive(Default)]
pub struct ChunkMeshes {
// Map from chunk key to mesh entity.
entities: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>,
remove_queue: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>,
}
impl ChunkMeshes {
pub fn clear_entities(&mut self, commands: &mut Commands, meshes: &mut Assets<Mesh>) {
self.entities.retain(|_, (entity, mesh)| {
clear_up_entity(entity, mesh, commands, meshes);
false
});
self.remove_queue.retain(|_, (entity, mesh)| {
clear_up_entity(entity, mesh, commands, meshes);
false
});
}
pub fn remove_entity(
&mut self,
lod_chunk_key: &LodChunkKey3,
commands: &mut Commands,
meshes: &mut Assets<Mesh>,
) {
if let Some((entity, mesh)) = self.entities.remove(lod_chunk_key) {
clear_up_entity(&entity, &mesh, commands, meshes);
}
}
}
fn clear_up_entity(
entity: &Entity,
mesh: &Handle<Mesh>,
commands: &mut Commands,
meshes: &mut Assets<Mesh>,
) {
commands.entity(*entity).despawn();
meshes.remove(mesh);
}
// Utility struct for building the mesh
#[derive(Debug, Clone)]
struct MeshBuf {
pub positions: Vec<[f32; 3]>,
pub normals: Vec<[f32; 3]>,
pub tex_coords: Vec<[f32; 2]>,
pub layer: Vec<u32>,
pub indices: Vec<u32>,
pub extent: Extent3i,
}
impl Default for MeshBuf {
fn default() -> Self {
Self {
positions: Vec::new(),
normals: Vec::new(),
tex_coords: Vec::new(),
layer: Vec::new(),
indices: Vec::new(),
extent: Extent3i::from_min_and_shape(PointN([0, 0, 0]), PointN([0, 0, 0])),
}
}
}
impl MeshBuf {
fn add_quad(
&mut self,
face: &OrientedCubeFace,
quad: &UnorientedQuad,
voxel_size: f32,
u_flip_face: Axis3,
layer: u32,
) {
let start_index = self.positions.len() as u32;
self.positions
.extend_from_slice(&face.quad_mesh_positions(quad, voxel_size));
self.normals.extend_from_slice(&face.quad_mesh_normals());
let flip_v = true;
self.tex_coords
.extend_from_slice(&face.tex_coords(u_flip_face, flip_v, quad));
self.layer.extend_from_slice(&[layer; 4]);
self.indices
.extend_from_slice(&face.quad_mesh_indices(start_index));
}
}
pub struct ArrayTextureMaterial(pub Handle<StandardMaterial>);
pub struct ArrayTexturePipelines(pub RenderPipelines);
/// Generates new meshes for all dirty chunks.
pub fn mesh_generator_system(
mut commands: Commands,
pool: Res<ComputeTaskPool>,
voxel_map: Res<VoxelMap>,
local_mesh_buffers: ecs::system::Local<ThreadLocalMeshBuffers>,
mut mesh_commands: ResMut<MeshCommandQueue>,
mut mesh_assets: ResMut<Assets<Mesh>>,
mut chunk_meshes: ResMut<ChunkMeshes>,
array_texture_pipelines: Res<ArrayTexturePipelines>,
array_texture_material: Res<ArrayTextureMaterial>,
mut state: ResMut<State<AppState>>,
) {
let first_run = chunk_meshes.entities.is_empty();
let new_chunk_meshes = apply_mesh_commands(
&*voxel_map,
&*local_mesh_buffers,
&*pool,
&mut *mesh_commands,
&mut *chunk_meshes,
&mut commands,
first_run,
);
spawn_mesh_entities(
new_chunk_meshes,
&mut commands,
&mut *mesh_assets,
&mut *chunk_meshes,
&*array_texture_pipelines,
&*array_texture_material,
);
if first_run {
println!("MESHES GENERATED!\n-> AppState::Running");
state.set(AppState::Running).unwrap();
}
}
fn apply_mesh_commands(
voxel_map: &VoxelMap,
local_mesh_buffers: &ThreadLocalMeshBuffers,
pool: &ComputeTaskPool,
mesh_commands: &mut MeshCommandQueue,
chunk_meshes: &mut ChunkMeshes,
commands: &mut Commands,
first_run: bool,
) -> Vec<(LodChunkKey3, Option<MeshBuf>)> {
let num_chunks_to_mesh = mesh_commands.len().min(max_mesh_creations_per_frame(pool));
let mut num_creates = 0;
let mut num_updates = 0;
pool.scope(|s| {
let mut num_meshes_created = 0;
for command in mesh_commands.commands.iter().rev().cloned() {
match command {
MeshCommand::Create(lod_key) => {
if!chunk_meshes.entities.contains_key(&lod_key) {
num_creates += 1;
num_meshes_created += 1;
s.spawn(async move {
(
lod_key,
create_mesh_for_chunk(lod_key, voxel_map, local_mesh_buffers),
)
});
}
}
MeshCommand::Update(update) => {
num_updates += 1;
match update {
LodChunkUpdate3::Split(split) => {
if let Some((entity, mesh)) =
chunk_meshes.entities.remove(&split.old_chunk)
{
chunk_meshes
.remove_queue
.insert(split.old_chunk, (entity, mesh));
commands.entity(entity).insert(FADE_OUT);
}
for &lod_key in split.new_chunks.iter() {
if!chunk_meshes.entities.contains_key(&lod_key) {
num_meshes_created += 1;
s.spawn(async move {
(
lod_key,
create_mesh_for_chunk(
lod_key,
voxel_map,
local_mesh_buffers,
),
)
});
}
}
}
LodChunkUpdate3::Merge(merge) => {
for lod_key in merge.old_chunks.iter() {
if let Some((entity, mesh)) = chunk_meshes.entities.remove(lod_key)
{
chunk_meshes.remove_queue.insert(*lod_key, (entity, mesh));
commands.entity(entity).insert(FADE_OUT);
}
}
if!chunk_meshes.entities.contains_key(&merge.new_chunk) {
num_meshes_created += 1;
s.spawn(async move {
(
merge.new_chunk,
create_mesh_for_chunk(
merge.new_chunk,
voxel_map,
local_mesh_buffers,
),
)
});
}
}
}
}
}
if!first_run && num_meshes_created >= num_chunks_to_mesh {
break;
}
}
let new_length = mesh_commands.len() - (num_creates + num_updates);
mesh_commands.commands.truncate(new_length);
})
}
pub fn mesh_despawn_system(
mut commands: Commands,
mut chunk_meshes: ResMut<ChunkMeshes>,
mut meshes: ResMut<Assets<Mesh>>,
query: Query<(&FadeUniform, &LodChunkKey3), With<Handle<Mesh>>>,
) {
for (fade, lod_chunk_key) in query.iter() {
if!fade.fade_in && fade.remaining == 0.0 {
if let Some((entity, mesh)) = chunk_meshes.remove_queue.remove(lod_chunk_key) {
commands.entity(entity).despawn();
meshes.remove(&mesh);
}
}
}
}
fn create_mesh_for_chunk(
key: LodChunkKey3,
voxel_map: &VoxelMap,
local_mesh_buffers: &ThreadLocalMeshBuffers,
) -> Option<MeshBuf> {
let chunks = voxel_map.pyramid.level(key.lod);
let chunk_extent = chunks.indexer.extent_for_chunk_at_key(key.chunk_key);
let padded_chunk_extent = padded_greedy_quads_chunk_extent(&chunk_extent);
// Keep a thread-local cache of buffers to avoid expensive reallocations every time we want to mesh a chunk.
let mesh_tls = local_mesh_buffers.get();
let mut surface_nets_buffers = mesh_tls
.get_or_create_with(|| {
RefCell::new(LocalSurfaceNetsBuffers {
mesh_buffer: GreedyQuadsBuffer::new(
padded_chunk_extent,
RIGHT_HANDED_Y_UP_CONFIG.quad_groups(),
),
neighborhood_buffer: Array3x1::fill(padded_chunk_extent, Voxel::EMPTY),
})
})
.borrow_mut();
let LocalSurfaceNetsBuffers {
mesh_buffer,
neighborhood_buffer,
} = &mut *surface_nets_buffers;
// While the chunk shape doesn't change, we need to make sure that it's in the right position for each particular chunk.
neighborhood_buffer.set_minimum(padded_chunk_extent.minimum);
// Only copy the chunk_extent, leaving the padding empty so that we don't get holes on LOD boundaries.
copy_extent(&chunk_extent, chunks, neighborhood_buffer);
let voxel_size = (1 << key.lod) as f32;
greedy_quads(neighborhood_buffer, &padded_chunk_extent, &mut *mesh_buffer);
if mesh_buffer.num_quads() == 0 {
None
} else {
let mut mesh_buf = MeshBuf::default();
mesh_buf.extent = chunk_extent * voxel_map.pyramid.chunk_shape();
for group in mesh_buffer.quad_groups.iter() {
for quad in group.quads.iter() {
let mat = neighborhood_buffer.get(quad.minimum);
mesh_buf.add_quad(
&group.face,
quad,
voxel_size,
RIGHT_HANDED_Y_UP_CONFIG.u_flip_face,
mat.0 as u32 - 1,
);
}
}
Some(mesh_buf)
}
}
// ThreadLocal doesn't let you get a mutable reference, so we need to use RefCell. We lock this down to only be used in this
// module as a Local resource, so we know it's safe.
type ThreadLocalMeshBuffers = ThreadLocalResource<RefCell<LocalSurfaceNetsBuffers>>;
pub struct LocalSurfaceNetsBuffers {
mesh_buffer: GreedyQuadsBuffer,
neighborhood_buffer: Array3x1<Voxel>,
}
fn spawn_mesh_entities(
new_chunk_meshes: Vec<(LodChunkKey3, Option<MeshBuf>)>,
commands: &mut Commands,
mesh_assets: &mut Assets<Mesh>,
chunk_meshes: &mut ChunkMeshes,
array_texture_pipelines: &ArrayTexturePipelines,
array_texture_material: &ArrayTextureMaterial,
) {
for (lod_chunk_key, item) in new_chunk_meshes.into_iter() {
let old_mesh = if let Some(mesh_buf) = item {
if mesh_buf.indices.is_empty() {
None
} else | let minimum = Vec3::new(
extent.minimum.0[0] as f32,
extent.minimum.0[1] as f32,
extent.minimum.0[2] as f32,
);
let maximum = Vec3::new(
extent.max().0[0] as f32,
extent.max().0[1] as f32,
extent.max().0[2] as f32,
);
let entity = commands
.spawn_bundle(PbrBundle {
mesh: mesh_handle.clone(),
render_pipelines: array_texture_pipelines.0.clone(),
material: array_texture_material.0.clone(),
..Default::default()
})
.insert_bundle((
FADE_IN,
lod_chunk_key,
Obb::from_aabb_orientation(
Aabb::from_extents(minimum, maximum),
Quat::IDENTITY,
),
FogConfig::default(),
))
.id();
if lod_chunk_key.lod == 0 {
let collider_vertices = positions
.iter()
.cloned()
.map(|p| bevy_rapier3d::rapier::math::Point::from_slice(&p))
.collect();
let collider_indices: Vec<[u32; 3]> =
indices.chunks(3).map(|i| [i[0], i[1], i[2]]).collect();
commands
.entity(entity)
.insert_bundle(RigidBodyBundle {
body_type: RigidBodyType::Static,
..Default::default()
| {
let mut render_mesh = Mesh::new(PrimitiveTopology::TriangleList);
let MeshBuf {
positions,
normals,
tex_coords,
layer,
indices,
extent,
} = mesh_buf;
render_mesh.set_attribute(Mesh::ATTRIBUTE_POSITION, positions.clone());
render_mesh.set_attribute(Mesh::ATTRIBUTE_NORMAL, normals);
render_mesh.set_attribute(Mesh::ATTRIBUTE_UV_0, tex_coords);
render_mesh.set_attribute("Vertex_Layer", layer);
render_mesh.set_indices(Some(Indices::U32(indices.clone())));
let mesh_handle = mesh_assets.add(render_mesh);
| conditional_block |
mesh_generator.rs |
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
use crate::{
app_state::AppState,
fog::FogConfig,
mesh_fade::{FadeUniform, FADE_IN, FADE_OUT},
utilities::bevy_util::thread_local_resource::ThreadLocalResource,
voxel_map::{Voxel, VoxelMap},
};
use bevy_mod_bounding::{aabb::Aabb, obb::Obb};
use bevy_rapier3d::prelude::{ColliderBundle, ColliderShape, RigidBodyBundle, RigidBodyType};
use building_blocks::{
mesh::*,
prelude::*,
storage::{LodChunkKey3, LodChunkUpdate3, SmallKeyHashMap},
};
use bevy::{
asset::prelude::*,
ecs,
prelude::*,
render::{mesh::Indices, pipeline::PrimitiveTopology},
tasks::ComputeTaskPool,
};
use std::{cell::RefCell, collections::VecDeque};
fn max_mesh_creations_per_frame(pool: &ComputeTaskPool) -> usize {
40 * pool.thread_num()
}
#[derive(Default)]
pub struct MeshCommandQueue {
commands: VecDeque<MeshCommand>,
}
impl MeshCommandQueue {
pub fn enqueue(&mut self, command: MeshCommand) {
self.commands.push_front(command);
}
pub fn is_empty(&self) -> bool {
self.commands.is_empty()
}
pub fn len(&self) -> usize {
self.commands.len()
}
pub fn clear(&mut self) {
self.commands.clear();
}
}
// PERF: try to eliminate the use of multiple Vecs
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum MeshCommand {
Create(LodChunkKey3),
Update(LodChunkUpdate3),
}
#[derive(Default)]
pub struct ChunkMeshes {
// Map from chunk key to mesh entity.
entities: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>,
remove_queue: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>,
}
impl ChunkMeshes {
pub fn | (&mut self, commands: &mut Commands, meshes: &mut Assets<Mesh>) {
self.entities.retain(|_, (entity, mesh)| {
clear_up_entity(entity, mesh, commands, meshes);
false
});
self.remove_queue.retain(|_, (entity, mesh)| {
clear_up_entity(entity, mesh, commands, meshes);
false
});
}
pub fn remove_entity(
&mut self,
lod_chunk_key: &LodChunkKey3,
commands: &mut Commands,
meshes: &mut Assets<Mesh>,
) {
if let Some((entity, mesh)) = self.entities.remove(lod_chunk_key) {
clear_up_entity(&entity, &mesh, commands, meshes);
}
}
}
fn clear_up_entity(
entity: &Entity,
mesh: &Handle<Mesh>,
commands: &mut Commands,
meshes: &mut Assets<Mesh>,
) {
commands.entity(*entity).despawn();
meshes.remove(mesh);
}
// Utility struct for building the mesh
#[derive(Debug, Clone)]
struct MeshBuf {
pub positions: Vec<[f32; 3]>,
pub normals: Vec<[f32; 3]>,
pub tex_coords: Vec<[f32; 2]>,
pub layer: Vec<u32>,
pub indices: Vec<u32>,
pub extent: Extent3i,
}
impl Default for MeshBuf {
fn default() -> Self {
Self {
positions: Vec::new(),
normals: Vec::new(),
tex_coords: Vec::new(),
layer: Vec::new(),
indices: Vec::new(),
extent: Extent3i::from_min_and_shape(PointN([0, 0, 0]), PointN([0, 0, 0])),
}
}
}
impl MeshBuf {
fn add_quad(
&mut self,
face: &OrientedCubeFace,
quad: &UnorientedQuad,
voxel_size: f32,
u_flip_face: Axis3,
layer: u32,
) {
let start_index = self.positions.len() as u32;
self.positions
.extend_from_slice(&face.quad_mesh_positions(quad, voxel_size));
self.normals.extend_from_slice(&face.quad_mesh_normals());
let flip_v = true;
self.tex_coords
.extend_from_slice(&face.tex_coords(u_flip_face, flip_v, quad));
self.layer.extend_from_slice(&[layer; 4]);
self.indices
.extend_from_slice(&face.quad_mesh_indices(start_index));
}
}
pub struct ArrayTextureMaterial(pub Handle<StandardMaterial>);
pub struct ArrayTexturePipelines(pub RenderPipelines);
/// Generates new meshes for all dirty chunks.
pub fn mesh_generator_system(
mut commands: Commands,
pool: Res<ComputeTaskPool>,
voxel_map: Res<VoxelMap>,
local_mesh_buffers: ecs::system::Local<ThreadLocalMeshBuffers>,
mut mesh_commands: ResMut<MeshCommandQueue>,
mut mesh_assets: ResMut<Assets<Mesh>>,
mut chunk_meshes: ResMut<ChunkMeshes>,
array_texture_pipelines: Res<ArrayTexturePipelines>,
array_texture_material: Res<ArrayTextureMaterial>,
mut state: ResMut<State<AppState>>,
) {
let first_run = chunk_meshes.entities.is_empty();
let new_chunk_meshes = apply_mesh_commands(
&*voxel_map,
&*local_mesh_buffers,
&*pool,
&mut *mesh_commands,
&mut *chunk_meshes,
&mut commands,
first_run,
);
spawn_mesh_entities(
new_chunk_meshes,
&mut commands,
&mut *mesh_assets,
&mut *chunk_meshes,
&*array_texture_pipelines,
&*array_texture_material,
);
if first_run {
println!("MESHES GENERATED!\n-> AppState::Running");
state.set(AppState::Running).unwrap();
}
}
fn apply_mesh_commands(
voxel_map: &VoxelMap,
local_mesh_buffers: &ThreadLocalMeshBuffers,
pool: &ComputeTaskPool,
mesh_commands: &mut MeshCommandQueue,
chunk_meshes: &mut ChunkMeshes,
commands: &mut Commands,
first_run: bool,
) -> Vec<(LodChunkKey3, Option<MeshBuf>)> {
let num_chunks_to_mesh = mesh_commands.len().min(max_mesh_creations_per_frame(pool));
let mut num_creates = 0;
let mut num_updates = 0;
pool.scope(|s| {
let mut num_meshes_created = 0;
for command in mesh_commands.commands.iter().rev().cloned() {
match command {
MeshCommand::Create(lod_key) => {
if!chunk_meshes.entities.contains_key(&lod_key) {
num_creates += 1;
num_meshes_created += 1;
s.spawn(async move {
(
lod_key,
create_mesh_for_chunk(lod_key, voxel_map, local_mesh_buffers),
)
});
}
}
MeshCommand::Update(update) => {
num_updates += 1;
match update {
LodChunkUpdate3::Split(split) => {
if let Some((entity, mesh)) =
chunk_meshes.entities.remove(&split.old_chunk)
{
chunk_meshes
.remove_queue
.insert(split.old_chunk, (entity, mesh));
commands.entity(entity).insert(FADE_OUT);
}
for &lod_key in split.new_chunks.iter() {
if!chunk_meshes.entities.contains_key(&lod_key) {
num_meshes_created += 1;
s.spawn(async move {
(
lod_key,
create_mesh_for_chunk(
lod_key,
voxel_map,
local_mesh_buffers,
),
)
});
}
}
}
LodChunkUpdate3::Merge(merge) => {
for lod_key in merge.old_chunks.iter() {
if let Some((entity, mesh)) = chunk_meshes.entities.remove(lod_key)
{
chunk_meshes.remove_queue.insert(*lod_key, (entity, mesh));
commands.entity(entity).insert(FADE_OUT);
}
}
if!chunk_meshes.entities.contains_key(&merge.new_chunk) {
num_meshes_created += 1;
s.spawn(async move {
(
merge.new_chunk,
create_mesh_for_chunk(
merge.new_chunk,
voxel_map,
local_mesh_buffers,
),
)
});
}
}
}
}
}
if!first_run && num_meshes_created >= num_chunks_to_mesh {
break;
}
}
let new_length = mesh_commands.len() - (num_creates + num_updates);
mesh_commands.commands.truncate(new_length);
})
}
pub fn mesh_despawn_system(
mut commands: Commands,
mut chunk_meshes: ResMut<ChunkMeshes>,
mut meshes: ResMut<Assets<Mesh>>,
query: Query<(&FadeUniform, &LodChunkKey3), With<Handle<Mesh>>>,
) {
for (fade, lod_chunk_key) in query.iter() {
if!fade.fade_in && fade.remaining == 0.0 {
if let Some((entity, mesh)) = chunk_meshes.remove_queue.remove(lod_chunk_key) {
commands.entity(entity).despawn();
meshes.remove(&mesh);
}
}
}
}
fn create_mesh_for_chunk(
key: LodChunkKey3,
voxel_map: &VoxelMap,
local_mesh_buffers: &ThreadLocalMeshBuffers,
) -> Option<MeshBuf> {
let chunks = voxel_map.pyramid.level(key.lod);
let chunk_extent = chunks.indexer.extent_for_chunk_at_key(key.chunk_key);
let padded_chunk_extent = padded_greedy_quads_chunk_extent(&chunk_extent);
// Keep a thread-local cache of buffers to avoid expensive reallocations every time we want to mesh a chunk.
let mesh_tls = local_mesh_buffers.get();
let mut surface_nets_buffers = mesh_tls
.get_or_create_with(|| {
RefCell::new(LocalSurfaceNetsBuffers {
mesh_buffer: GreedyQuadsBuffer::new(
padded_chunk_extent,
RIGHT_HANDED_Y_UP_CONFIG.quad_groups(),
),
neighborhood_buffer: Array3x1::fill(padded_chunk_extent, Voxel::EMPTY),
})
})
.borrow_mut();
let LocalSurfaceNetsBuffers {
mesh_buffer,
neighborhood_buffer,
} = &mut *surface_nets_buffers;
// While the chunk shape doesn't change, we need to make sure that it's in the right position for each particular chunk.
neighborhood_buffer.set_minimum(padded_chunk_extent.minimum);
// Only copy the chunk_extent, leaving the padding empty so that we don't get holes on LOD boundaries.
copy_extent(&chunk_extent, chunks, neighborhood_buffer);
let voxel_size = (1 << key.lod) as f32;
greedy_quads(neighborhood_buffer, &padded_chunk_extent, &mut *mesh_buffer);
if mesh_buffer.num_quads() == 0 {
None
} else {
let mut mesh_buf = MeshBuf::default();
mesh_buf.extent = chunk_extent * voxel_map.pyramid.chunk_shape();
for group in mesh_buffer.quad_groups.iter() {
for quad in group.quads.iter() {
let mat = neighborhood_buffer.get(quad.minimum);
mesh_buf.add_quad(
&group.face,
quad,
voxel_size,
RIGHT_HANDED_Y_UP_CONFIG.u_flip_face,
mat.0 as u32 - 1,
);
}
}
Some(mesh_buf)
}
}
// ThreadLocal doesn't let you get a mutable reference, so we need to use RefCell. We lock this down to only be used in this
// module as a Local resource, so we know it's safe.
type ThreadLocalMeshBuffers = ThreadLocalResource<RefCell<LocalSurfaceNetsBuffers>>;
pub struct LocalSurfaceNetsBuffers {
mesh_buffer: GreedyQuadsBuffer,
neighborhood_buffer: Array3x1<Voxel>,
}
fn spawn_mesh_entities(
new_chunk_meshes: Vec<(LodChunkKey3, Option<MeshBuf>)>,
commands: &mut Commands,
mesh_assets: &mut Assets<Mesh>,
chunk_meshes: &mut ChunkMeshes,
array_texture_pipelines: &ArrayTexturePipelines,
array_texture_material: &ArrayTextureMaterial,
) {
for (lod_chunk_key, item) in new_chunk_meshes.into_iter() {
let old_mesh = if let Some(mesh_buf) = item {
if mesh_buf.indices.is_empty() {
None
} else {
let mut render_mesh = Mesh::new(PrimitiveTopology::TriangleList);
let MeshBuf {
positions,
normals,
tex_coords,
layer,
indices,
extent,
} = mesh_buf;
render_mesh.set_attribute(Mesh::ATTRIBUTE_POSITION, positions.clone());
render_mesh.set_attribute(Mesh::ATTRIBUTE_NORMAL, normals);
render_mesh.set_attribute(Mesh::ATTRIBUTE_UV_0, tex_coords);
render_mesh.set_attribute("Vertex_Layer", layer);
render_mesh.set_indices(Some(Indices::U32(indices.clone())));
let mesh_handle = mesh_assets.add(render_mesh);
let minimum = Vec3::new(
extent.minimum.0[0] as f32,
extent.minimum.0[1] as f32,
extent.minimum.0[2] as f32,
);
let maximum = Vec3::new(
extent.max().0[0] as f32,
extent.max().0[1] as f32,
extent.max().0[2] as f32,
);
let entity = commands
.spawn_bundle(PbrBundle {
mesh: mesh_handle.clone(),
render_pipelines: array_texture_pipelines.0.clone(),
material: array_texture_material.0.clone(),
..Default::default()
})
.insert_bundle((
FADE_IN,
lod_chunk_key,
Obb::from_aabb_orientation(
Aabb::from_extents(minimum, maximum),
Quat::IDENTITY,
),
FogConfig::default(),
))
.id();
if lod_chunk_key.lod == 0 {
let collider_vertices = positions
.iter()
.cloned()
.map(|p| bevy_rapier3d::rapier::math::Point::from_slice(&p))
.collect();
let collider_indices: Vec<[u32; 3]> =
indices.chunks(3).map(|i| [i[0], i[1], i[2]]).collect();
commands
.entity(entity)
.insert_bundle(RigidBodyBundle {
body_type: RigidBodyType::Static,
..Default::default()
| clear_entities | identifier_name |
mesh_generator.rs |
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
use crate::{
app_state::AppState,
fog::FogConfig,
mesh_fade::{FadeUniform, FADE_IN, FADE_OUT},
utilities::bevy_util::thread_local_resource::ThreadLocalResource,
voxel_map::{Voxel, VoxelMap},
};
use bevy_mod_bounding::{aabb::Aabb, obb::Obb};
use bevy_rapier3d::prelude::{ColliderBundle, ColliderShape, RigidBodyBundle, RigidBodyType};
use building_blocks::{
mesh::*,
prelude::*,
storage::{LodChunkKey3, LodChunkUpdate3, SmallKeyHashMap},
};
use bevy::{
asset::prelude::*,
ecs,
prelude::*,
render::{mesh::Indices, pipeline::PrimitiveTopology},
tasks::ComputeTaskPool,
};
use std::{cell::RefCell, collections::VecDeque};
fn max_mesh_creations_per_frame(pool: &ComputeTaskPool) -> usize {
40 * pool.thread_num()
}
#[derive(Default)]
pub struct MeshCommandQueue {
commands: VecDeque<MeshCommand>,
}
impl MeshCommandQueue {
pub fn enqueue(&mut self, command: MeshCommand) |
pub fn is_empty(&self) -> bool {
self.commands.is_empty()
}
pub fn len(&self) -> usize {
self.commands.len()
}
pub fn clear(&mut self) {
self.commands.clear();
}
}
// PERF: try to eliminate the use of multiple Vecs
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum MeshCommand {
Create(LodChunkKey3),
Update(LodChunkUpdate3),
}
#[derive(Default)]
pub struct ChunkMeshes {
// Map from chunk key to mesh entity.
entities: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>,
remove_queue: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>,
}
impl ChunkMeshes {
pub fn clear_entities(&mut self, commands: &mut Commands, meshes: &mut Assets<Mesh>) {
self.entities.retain(|_, (entity, mesh)| {
clear_up_entity(entity, mesh, commands, meshes);
false
});
self.remove_queue.retain(|_, (entity, mesh)| {
clear_up_entity(entity, mesh, commands, meshes);
false
});
}
pub fn remove_entity(
&mut self,
lod_chunk_key: &LodChunkKey3,
commands: &mut Commands,
meshes: &mut Assets<Mesh>,
) {
if let Some((entity, mesh)) = self.entities.remove(lod_chunk_key) {
clear_up_entity(&entity, &mesh, commands, meshes);
}
}
}
fn clear_up_entity(
entity: &Entity,
mesh: &Handle<Mesh>,
commands: &mut Commands,
meshes: &mut Assets<Mesh>,
) {
commands.entity(*entity).despawn();
meshes.remove(mesh);
}
// Utility struct for building the mesh
#[derive(Debug, Clone)]
struct MeshBuf {
pub positions: Vec<[f32; 3]>,
pub normals: Vec<[f32; 3]>,
pub tex_coords: Vec<[f32; 2]>,
pub layer: Vec<u32>,
pub indices: Vec<u32>,
pub extent: Extent3i,
}
impl Default for MeshBuf {
fn default() -> Self {
Self {
positions: Vec::new(),
normals: Vec::new(),
tex_coords: Vec::new(),
layer: Vec::new(),
indices: Vec::new(),
extent: Extent3i::from_min_and_shape(PointN([0, 0, 0]), PointN([0, 0, 0])),
}
}
}
impl MeshBuf {
fn add_quad(
&mut self,
face: &OrientedCubeFace,
quad: &UnorientedQuad,
voxel_size: f32,
u_flip_face: Axis3,
layer: u32,
) {
let start_index = self.positions.len() as u32;
self.positions
.extend_from_slice(&face.quad_mesh_positions(quad, voxel_size));
self.normals.extend_from_slice(&face.quad_mesh_normals());
let flip_v = true;
self.tex_coords
.extend_from_slice(&face.tex_coords(u_flip_face, flip_v, quad));
self.layer.extend_from_slice(&[layer; 4]);
self.indices
.extend_from_slice(&face.quad_mesh_indices(start_index));
}
}
pub struct ArrayTextureMaterial(pub Handle<StandardMaterial>);
pub struct ArrayTexturePipelines(pub RenderPipelines);
/// Generates new meshes for all dirty chunks.
pub fn mesh_generator_system(
mut commands: Commands,
pool: Res<ComputeTaskPool>,
voxel_map: Res<VoxelMap>,
local_mesh_buffers: ecs::system::Local<ThreadLocalMeshBuffers>,
mut mesh_commands: ResMut<MeshCommandQueue>,
mut mesh_assets: ResMut<Assets<Mesh>>,
mut chunk_meshes: ResMut<ChunkMeshes>,
array_texture_pipelines: Res<ArrayTexturePipelines>,
array_texture_material: Res<ArrayTextureMaterial>,
mut state: ResMut<State<AppState>>,
) {
let first_run = chunk_meshes.entities.is_empty();
let new_chunk_meshes = apply_mesh_commands(
&*voxel_map,
&*local_mesh_buffers,
&*pool,
&mut *mesh_commands,
&mut *chunk_meshes,
&mut commands,
first_run,
);
spawn_mesh_entities(
new_chunk_meshes,
&mut commands,
&mut *mesh_assets,
&mut *chunk_meshes,
&*array_texture_pipelines,
&*array_texture_material,
);
if first_run {
println!("MESHES GENERATED!\n-> AppState::Running");
state.set(AppState::Running).unwrap();
}
}
fn apply_mesh_commands(
voxel_map: &VoxelMap,
local_mesh_buffers: &ThreadLocalMeshBuffers,
pool: &ComputeTaskPool,
mesh_commands: &mut MeshCommandQueue,
chunk_meshes: &mut ChunkMeshes,
commands: &mut Commands,
first_run: bool,
) -> Vec<(LodChunkKey3, Option<MeshBuf>)> {
let num_chunks_to_mesh = mesh_commands.len().min(max_mesh_creations_per_frame(pool));
let mut num_creates = 0;
let mut num_updates = 0;
pool.scope(|s| {
let mut num_meshes_created = 0;
for command in mesh_commands.commands.iter().rev().cloned() {
match command {
MeshCommand::Create(lod_key) => {
if!chunk_meshes.entities.contains_key(&lod_key) {
num_creates += 1;
num_meshes_created += 1;
s.spawn(async move {
(
lod_key,
create_mesh_for_chunk(lod_key, voxel_map, local_mesh_buffers),
)
});
}
}
MeshCommand::Update(update) => {
num_updates += 1;
match update {
LodChunkUpdate3::Split(split) => {
if let Some((entity, mesh)) =
chunk_meshes.entities.remove(&split.old_chunk)
{
chunk_meshes
.remove_queue
.insert(split.old_chunk, (entity, mesh));
commands.entity(entity).insert(FADE_OUT);
}
for &lod_key in split.new_chunks.iter() {
if!chunk_meshes.entities.contains_key(&lod_key) {
num_meshes_created += 1;
s.spawn(async move {
(
lod_key,
create_mesh_for_chunk(
lod_key,
voxel_map,
local_mesh_buffers,
),
)
});
}
}
}
LodChunkUpdate3::Merge(merge) => {
for lod_key in merge.old_chunks.iter() {
if let Some((entity, mesh)) = chunk_meshes.entities.remove(lod_key)
{
chunk_meshes.remove_queue.insert(*lod_key, (entity, mesh));
commands.entity(entity).insert(FADE_OUT);
}
}
if!chunk_meshes.entities.contains_key(&merge.new_chunk) {
num_meshes_created += 1;
s.spawn(async move {
(
merge.new_chunk,
create_mesh_for_chunk(
merge.new_chunk,
voxel_map,
local_mesh_buffers,
),
)
});
}
}
}
}
}
if!first_run && num_meshes_created >= num_chunks_to_mesh {
break;
}
}
let new_length = mesh_commands.len() - (num_creates + num_updates);
mesh_commands.commands.truncate(new_length);
})
}
pub fn mesh_despawn_system(
mut commands: Commands,
mut chunk_meshes: ResMut<ChunkMeshes>,
mut meshes: ResMut<Assets<Mesh>>,
query: Query<(&FadeUniform, &LodChunkKey3), With<Handle<Mesh>>>,
) {
for (fade, lod_chunk_key) in query.iter() {
if!fade.fade_in && fade.remaining == 0.0 {
if let Some((entity, mesh)) = chunk_meshes.remove_queue.remove(lod_chunk_key) {
commands.entity(entity).despawn();
meshes.remove(&mesh);
}
}
}
}
fn create_mesh_for_chunk(
key: LodChunkKey3,
voxel_map: &VoxelMap,
local_mesh_buffers: &ThreadLocalMeshBuffers,
) -> Option<MeshBuf> {
let chunks = voxel_map.pyramid.level(key.lod);
let chunk_extent = chunks.indexer.extent_for_chunk_at_key(key.chunk_key);
let padded_chunk_extent = padded_greedy_quads_chunk_extent(&chunk_extent);
// Keep a thread-local cache of buffers to avoid expensive reallocations every time we want to mesh a chunk.
let mesh_tls = local_mesh_buffers.get();
let mut surface_nets_buffers = mesh_tls
.get_or_create_with(|| {
RefCell::new(LocalSurfaceNetsBuffers {
mesh_buffer: GreedyQuadsBuffer::new(
padded_chunk_extent,
RIGHT_HANDED_Y_UP_CONFIG.quad_groups(),
),
neighborhood_buffer: Array3x1::fill(padded_chunk_extent, Voxel::EMPTY),
})
})
.borrow_mut();
let LocalSurfaceNetsBuffers {
mesh_buffer,
neighborhood_buffer,
} = &mut *surface_nets_buffers;
// While the chunk shape doesn't change, we need to make sure that it's in the right position for each particular chunk.
neighborhood_buffer.set_minimum(padded_chunk_extent.minimum);
// Only copy the chunk_extent, leaving the padding empty so that we don't get holes on LOD boundaries.
copy_extent(&chunk_extent, chunks, neighborhood_buffer);
let voxel_size = (1 << key.lod) as f32;
greedy_quads(neighborhood_buffer, &padded_chunk_extent, &mut *mesh_buffer);
if mesh_buffer.num_quads() == 0 {
None
} else {
let mut mesh_buf = MeshBuf::default();
mesh_buf.extent = chunk_extent * voxel_map.pyramid.chunk_shape();
for group in mesh_buffer.quad_groups.iter() {
for quad in group.quads.iter() {
let mat = neighborhood_buffer.get(quad.minimum);
mesh_buf.add_quad(
&group.face,
quad,
voxel_size,
RIGHT_HANDED_Y_UP_CONFIG.u_flip_face,
mat.0 as u32 - 1,
);
}
}
Some(mesh_buf)
}
}
// ThreadLocal doesn't let you get a mutable reference, so we need to use RefCell. We lock this down to only be used in this
// module as a Local resource, so we know it's safe.
type ThreadLocalMeshBuffers = ThreadLocalResource<RefCell<LocalSurfaceNetsBuffers>>;
pub struct LocalSurfaceNetsBuffers {
mesh_buffer: GreedyQuadsBuffer,
neighborhood_buffer: Array3x1<Voxel>,
}
fn spawn_mesh_entities(
new_chunk_meshes: Vec<(LodChunkKey3, Option<MeshBuf>)>,
commands: &mut Commands,
mesh_assets: &mut Assets<Mesh>,
chunk_meshes: &mut ChunkMeshes,
array_texture_pipelines: &ArrayTexturePipelines,
array_texture_material: &ArrayTextureMaterial,
) {
for (lod_chunk_key, item) in new_chunk_meshes.into_iter() {
let old_mesh = if let Some(mesh_buf) = item {
if mesh_buf.indices.is_empty() {
None
} else {
let mut render_mesh = Mesh::new(PrimitiveTopology::TriangleList);
let MeshBuf {
positions,
normals,
tex_coords,
layer,
indices,
extent,
} = mesh_buf;
render_mesh.set_attribute(Mesh::ATTRIBUTE_POSITION, positions.clone());
render_mesh.set_attribute(Mesh::ATTRIBUTE_NORMAL, normals);
render_mesh.set_attribute(Mesh::ATTRIBUTE_UV_0, tex_coords);
render_mesh.set_attribute("Vertex_Layer", layer);
render_mesh.set_indices(Some(Indices::U32(indices.clone())));
let mesh_handle = mesh_assets.add(render_mesh);
let minimum = Vec3::new(
extent.minimum.0[0] as f32,
extent.minimum.0[1] as f32,
extent.minimum.0[2] as f32,
);
let maximum = Vec3::new(
extent.max().0[0] as f32,
extent.max().0[1] as f32,
extent.max().0[2] as f32,
);
let entity = commands
.spawn_bundle(PbrBundle {
mesh: mesh_handle.clone(),
render_pipelines: array_texture_pipelines.0.clone(),
material: array_texture_material.0.clone(),
..Default::default()
})
.insert_bundle((
FADE_IN,
lod_chunk_key,
Obb::from_aabb_orientation(
Aabb::from_extents(minimum, maximum),
Quat::IDENTITY,
),
FogConfig::default(),
))
.id();
if lod_chunk_key.lod == 0 {
let collider_vertices = positions
.iter()
.cloned()
.map(|p| bevy_rapier3d::rapier::math::Point::from_slice(&p))
.collect();
let collider_indices: Vec<[u32; 3]> =
indices.chunks(3).map(|i| [i[0], i[1], i[2]]).collect();
commands
.entity(entity)
.insert_bundle(RigidBodyBundle {
body_type: RigidBodyType::Static,
..Default::default()
| {
self.commands.push_front(command);
} | identifier_body |
mesh_generator.rs | all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
use crate::{
app_state::AppState,
fog::FogConfig,
mesh_fade::{FadeUniform, FADE_IN, FADE_OUT},
utilities::bevy_util::thread_local_resource::ThreadLocalResource,
voxel_map::{Voxel, VoxelMap},
};
use bevy_mod_bounding::{aabb::Aabb, obb::Obb};
use bevy_rapier3d::prelude::{ColliderBundle, ColliderShape, RigidBodyBundle, RigidBodyType};
use building_blocks::{
mesh::*,
prelude::*,
storage::{LodChunkKey3, LodChunkUpdate3, SmallKeyHashMap},
};
use bevy::{
asset::prelude::*,
ecs,
prelude::*,
render::{mesh::Indices, pipeline::PrimitiveTopology},
tasks::ComputeTaskPool,
};
use std::{cell::RefCell, collections::VecDeque};
fn max_mesh_creations_per_frame(pool: &ComputeTaskPool) -> usize {
40 * pool.thread_num()
}
#[derive(Default)]
pub struct MeshCommandQueue {
commands: VecDeque<MeshCommand>,
}
impl MeshCommandQueue {
pub fn enqueue(&mut self, command: MeshCommand) {
self.commands.push_front(command);
}
pub fn is_empty(&self) -> bool {
self.commands.is_empty()
}
pub fn len(&self) -> usize {
self.commands.len()
}
pub fn clear(&mut self) {
self.commands.clear();
}
}
// PERF: try to eliminate the use of multiple Vecs
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum MeshCommand {
Create(LodChunkKey3),
Update(LodChunkUpdate3),
}
#[derive(Default)]
pub struct ChunkMeshes {
// Map from chunk key to mesh entity.
entities: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>,
remove_queue: SmallKeyHashMap<LodChunkKey3, (Entity, Handle<Mesh>)>,
}
impl ChunkMeshes {
pub fn clear_entities(&mut self, commands: &mut Commands, meshes: &mut Assets<Mesh>) {
self.entities.retain(|_, (entity, mesh)| {
clear_up_entity(entity, mesh, commands, meshes);
false
});
self.remove_queue.retain(|_, (entity, mesh)| {
clear_up_entity(entity, mesh, commands, meshes);
false
});
}
pub fn remove_entity(
&mut self,
lod_chunk_key: &LodChunkKey3,
commands: &mut Commands,
meshes: &mut Assets<Mesh>,
) {
if let Some((entity, mesh)) = self.entities.remove(lod_chunk_key) {
clear_up_entity(&entity, &mesh, commands, meshes);
}
}
}
fn clear_up_entity(
entity: &Entity,
mesh: &Handle<Mesh>,
commands: &mut Commands,
meshes: &mut Assets<Mesh>,
) {
commands.entity(*entity).despawn();
meshes.remove(mesh);
}
// Utility struct for building the mesh
#[derive(Debug, Clone)]
struct MeshBuf {
pub positions: Vec<[f32; 3]>,
pub normals: Vec<[f32; 3]>,
pub tex_coords: Vec<[f32; 2]>,
pub layer: Vec<u32>,
pub indices: Vec<u32>,
pub extent: Extent3i,
}
impl Default for MeshBuf {
fn default() -> Self {
Self {
positions: Vec::new(),
normals: Vec::new(),
tex_coords: Vec::new(),
layer: Vec::new(),
indices: Vec::new(),
extent: Extent3i::from_min_and_shape(PointN([0, 0, 0]), PointN([0, 0, 0])),
}
}
}
impl MeshBuf {
fn add_quad(
&mut self,
face: &OrientedCubeFace,
quad: &UnorientedQuad,
voxel_size: f32,
u_flip_face: Axis3,
layer: u32,
) {
let start_index = self.positions.len() as u32;
self.positions
.extend_from_slice(&face.quad_mesh_positions(quad, voxel_size));
self.normals.extend_from_slice(&face.quad_mesh_normals());
let flip_v = true;
self.tex_coords
.extend_from_slice(&face.tex_coords(u_flip_face, flip_v, quad));
self.layer.extend_from_slice(&[layer; 4]);
self.indices
.extend_from_slice(&face.quad_mesh_indices(start_index));
}
}
pub struct ArrayTextureMaterial(pub Handle<StandardMaterial>);
pub struct ArrayTexturePipelines(pub RenderPipelines);
/// Generates new meshes for all dirty chunks.
pub fn mesh_generator_system(
mut commands: Commands,
pool: Res<ComputeTaskPool>,
voxel_map: Res<VoxelMap>,
local_mesh_buffers: ecs::system::Local<ThreadLocalMeshBuffers>,
mut mesh_commands: ResMut<MeshCommandQueue>,
mut mesh_assets: ResMut<Assets<Mesh>>,
mut chunk_meshes: ResMut<ChunkMeshes>,
array_texture_pipelines: Res<ArrayTexturePipelines>,
array_texture_material: Res<ArrayTextureMaterial>,
mut state: ResMut<State<AppState>>,
) {
let first_run = chunk_meshes.entities.is_empty();
let new_chunk_meshes = apply_mesh_commands(
&*voxel_map,
&*local_mesh_buffers,
&*pool,
&mut *mesh_commands,
&mut *chunk_meshes,
&mut commands,
first_run,
);
spawn_mesh_entities(
new_chunk_meshes,
&mut commands,
&mut *mesh_assets,
&mut *chunk_meshes,
&*array_texture_pipelines,
&*array_texture_material,
);
if first_run {
println!("MESHES GENERATED!\n-> AppState::Running");
state.set(AppState::Running).unwrap();
}
}
fn apply_mesh_commands(
voxel_map: &VoxelMap,
local_mesh_buffers: &ThreadLocalMeshBuffers,
pool: &ComputeTaskPool,
mesh_commands: &mut MeshCommandQueue,
chunk_meshes: &mut ChunkMeshes,
commands: &mut Commands,
first_run: bool,
) -> Vec<(LodChunkKey3, Option<MeshBuf>)> {
let num_chunks_to_mesh = mesh_commands.len().min(max_mesh_creations_per_frame(pool));
let mut num_creates = 0;
let mut num_updates = 0;
pool.scope(|s| {
let mut num_meshes_created = 0;
for command in mesh_commands.commands.iter().rev().cloned() {
match command {
MeshCommand::Create(lod_key) => {
if!chunk_meshes.entities.contains_key(&lod_key) {
num_creates += 1;
num_meshes_created += 1;
s.spawn(async move {
(
lod_key,
create_mesh_for_chunk(lod_key, voxel_map, local_mesh_buffers),
)
});
}
}
MeshCommand::Update(update) => {
num_updates += 1;
match update {
LodChunkUpdate3::Split(split) => {
if let Some((entity, mesh)) =
chunk_meshes.entities.remove(&split.old_chunk)
{
chunk_meshes
.remove_queue
.insert(split.old_chunk, (entity, mesh));
commands.entity(entity).insert(FADE_OUT);
}
for &lod_key in split.new_chunks.iter() {
if!chunk_meshes.entities.contains_key(&lod_key) {
num_meshes_created += 1;
s.spawn(async move {
(
lod_key, | )
});
}
}
}
LodChunkUpdate3::Merge(merge) => {
for lod_key in merge.old_chunks.iter() {
if let Some((entity, mesh)) = chunk_meshes.entities.remove(lod_key)
{
chunk_meshes.remove_queue.insert(*lod_key, (entity, mesh));
commands.entity(entity).insert(FADE_OUT);
}
}
if!chunk_meshes.entities.contains_key(&merge.new_chunk) {
num_meshes_created += 1;
s.spawn(async move {
(
merge.new_chunk,
create_mesh_for_chunk(
merge.new_chunk,
voxel_map,
local_mesh_buffers,
),
)
});
}
}
}
}
}
if!first_run && num_meshes_created >= num_chunks_to_mesh {
break;
}
}
let new_length = mesh_commands.len() - (num_creates + num_updates);
mesh_commands.commands.truncate(new_length);
})
}
pub fn mesh_despawn_system(
mut commands: Commands,
mut chunk_meshes: ResMut<ChunkMeshes>,
mut meshes: ResMut<Assets<Mesh>>,
query: Query<(&FadeUniform, &LodChunkKey3), With<Handle<Mesh>>>,
) {
for (fade, lod_chunk_key) in query.iter() {
if!fade.fade_in && fade.remaining == 0.0 {
if let Some((entity, mesh)) = chunk_meshes.remove_queue.remove(lod_chunk_key) {
commands.entity(entity).despawn();
meshes.remove(&mesh);
}
}
}
}
fn create_mesh_for_chunk(
key: LodChunkKey3,
voxel_map: &VoxelMap,
local_mesh_buffers: &ThreadLocalMeshBuffers,
) -> Option<MeshBuf> {
let chunks = voxel_map.pyramid.level(key.lod);
let chunk_extent = chunks.indexer.extent_for_chunk_at_key(key.chunk_key);
let padded_chunk_extent = padded_greedy_quads_chunk_extent(&chunk_extent);
// Keep a thread-local cache of buffers to avoid expensive reallocations every time we want to mesh a chunk.
let mesh_tls = local_mesh_buffers.get();
let mut surface_nets_buffers = mesh_tls
.get_or_create_with(|| {
RefCell::new(LocalSurfaceNetsBuffers {
mesh_buffer: GreedyQuadsBuffer::new(
padded_chunk_extent,
RIGHT_HANDED_Y_UP_CONFIG.quad_groups(),
),
neighborhood_buffer: Array3x1::fill(padded_chunk_extent, Voxel::EMPTY),
})
})
.borrow_mut();
let LocalSurfaceNetsBuffers {
mesh_buffer,
neighborhood_buffer,
} = &mut *surface_nets_buffers;
// While the chunk shape doesn't change, we need to make sure that it's in the right position for each particular chunk.
neighborhood_buffer.set_minimum(padded_chunk_extent.minimum);
// Only copy the chunk_extent, leaving the padding empty so that we don't get holes on LOD boundaries.
copy_extent(&chunk_extent, chunks, neighborhood_buffer);
let voxel_size = (1 << key.lod) as f32;
greedy_quads(neighborhood_buffer, &padded_chunk_extent, &mut *mesh_buffer);
if mesh_buffer.num_quads() == 0 {
None
} else {
let mut mesh_buf = MeshBuf::default();
mesh_buf.extent = chunk_extent * voxel_map.pyramid.chunk_shape();
for group in mesh_buffer.quad_groups.iter() {
for quad in group.quads.iter() {
let mat = neighborhood_buffer.get(quad.minimum);
mesh_buf.add_quad(
&group.face,
quad,
voxel_size,
RIGHT_HANDED_Y_UP_CONFIG.u_flip_face,
mat.0 as u32 - 1,
);
}
}
Some(mesh_buf)
}
}
// ThreadLocal doesn't let you get a mutable reference, so we need to use RefCell. We lock this down to only be used in this
// module as a Local resource, so we know it's safe.
type ThreadLocalMeshBuffers = ThreadLocalResource<RefCell<LocalSurfaceNetsBuffers>>;
pub struct LocalSurfaceNetsBuffers {
mesh_buffer: GreedyQuadsBuffer,
neighborhood_buffer: Array3x1<Voxel>,
}
fn spawn_mesh_entities(
new_chunk_meshes: Vec<(LodChunkKey3, Option<MeshBuf>)>,
commands: &mut Commands,
mesh_assets: &mut Assets<Mesh>,
chunk_meshes: &mut ChunkMeshes,
array_texture_pipelines: &ArrayTexturePipelines,
array_texture_material: &ArrayTextureMaterial,
) {
for (lod_chunk_key, item) in new_chunk_meshes.into_iter() {
let old_mesh = if let Some(mesh_buf) = item {
if mesh_buf.indices.is_empty() {
None
} else {
let mut render_mesh = Mesh::new(PrimitiveTopology::TriangleList);
let MeshBuf {
positions,
normals,
tex_coords,
layer,
indices,
extent,
} = mesh_buf;
render_mesh.set_attribute(Mesh::ATTRIBUTE_POSITION, positions.clone());
render_mesh.set_attribute(Mesh::ATTRIBUTE_NORMAL, normals);
render_mesh.set_attribute(Mesh::ATTRIBUTE_UV_0, tex_coords);
render_mesh.set_attribute("Vertex_Layer", layer);
render_mesh.set_indices(Some(Indices::U32(indices.clone())));
let mesh_handle = mesh_assets.add(render_mesh);
let minimum = Vec3::new(
extent.minimum.0[0] as f32,
extent.minimum.0[1] as f32,
extent.minimum.0[2] as f32,
);
let maximum = Vec3::new(
extent.max().0[0] as f32,
extent.max().0[1] as f32,
extent.max().0[2] as f32,
);
let entity = commands
.spawn_bundle(PbrBundle {
mesh: mesh_handle.clone(),
render_pipelines: array_texture_pipelines.0.clone(),
material: array_texture_material.0.clone(),
..Default::default()
})
.insert_bundle((
FADE_IN,
lod_chunk_key,
Obb::from_aabb_orientation(
Aabb::from_extents(minimum, maximum),
Quat::IDENTITY,
),
FogConfig::default(),
))
.id();
if lod_chunk_key.lod == 0 {
let collider_vertices = positions
.iter()
.cloned()
.map(|p| bevy_rapier3d::rapier::math::Point::from_slice(&p))
.collect();
let collider_indices: Vec<[u32; 3]> =
indices.chunks(3).map(|i| [i[0], i[1], i[2]]).collect();
commands
.entity(entity)
.insert_bundle(RigidBodyBundle {
body_type: RigidBodyType::Static,
..Default::default()
| create_mesh_for_chunk(
lod_key,
voxel_map,
local_mesh_buffers,
), | random_line_split |
mod.rs | use crate::audio::*;
use crate::format;
use id3;
use lazy_static::lazy_static;
use liblame_sys::*;
use log::*;
use regex::bytes;
use sample;
use std::*;
mod index;
use self::index::FrameIndex;
/// This is the absolute maximum number of samples that can be contained in a single frame.
const MAX_FRAME_SIZE: usize = 1152;
const MAX_FRAME_BYTES: usize = 1348;
pub fn magic() -> &'static bytes::Regex {
lazy_static! {
static ref MAGIC: bytes::Regex =
bytes::Regex::new(r"(?s-u)^(?:ID3)|(:?\xff[\xe0-\xff])").unwrap();
}
&MAGIC
}
struct DecoderInit {
hip: hip_t,
mp3_data: mp3data_struct,
buffers: [[i16; MAX_FRAME_SIZE]; 2],
decode_count: usize,
stream_offset: u64,
tag: Option<id3::Tag>,
}
unsafe fn init_decoder<R>(mut input: &mut R) -> Result<DecoderInit, Error>
where
R: io::Read + io::Seek,
{
let tag = {
let mut buf = [0; 3];
input.read_exact(&mut buf)?;
input.seek(io::SeekFrom::Start(0))?;
if &buf == b"ID3" {
Some(id3::Tag::read_from(&mut input)?)
} else {
None
}
};
// On very rare occasions, LAME is unable to find the start of the stream.
index::find_stream(input)?;
let stream_offset = input.seek(io::SeekFrom::Current(0))?;
let hip: hip_t = hip_decode_init();
if hip.is_null() {
return Err(Error::ConstructionFailed);
}
hip_set_debugf(hip, Some(debug_cb));
hip_set_msgf(hip, Some(msg_cb));
hip_set_errorf(hip, Some(error_cb));
let mut mp3_data = mem::zeroed();
let mut enc_delay = 0;
let mut enc_padding = 0;
let mut buf_left = [0; MAX_FRAME_SIZE];
let mut buf_right = [0; MAX_FRAME_SIZE];
let mut rs = 0;
while rs == 0 {
let mut read_buf = [0; MAX_FRAME_BYTES];
let num_read = input.read(&mut read_buf)?;
rs = hip_decode1_headersB(
hip,
read_buf.as_mut_ptr(),
num_read,
buf_left.as_mut_ptr(),
buf_right.as_mut_ptr(),
&mut mp3_data,
&mut enc_delay,
&mut enc_padding,
);
}
if rs == -1 {
hip_decode_exit(hip);
return Err(Error::Lame(rs));
}
let decode_count = rs;
if mp3_data.header_parsed!= 1 {
return Err(Error::NoHeader);
}
Ok(DecoderInit {
hip,
mp3_data,
buffers: [buf_left, buf_right],
decode_count: decode_count as usize,
stream_offset,
tag,
})
}
pub fn decode_metadata<R>(mut input: R) -> Result<format::Metadata, Error>
where
R: io::Read + io::Seek,
{
unsafe {
let init = init_decoder(&mut input)?;
hip_decode_exit(init.hip);
let num_samples = if init.mp3_data.nsamp!= 0 {
init.mp3_data.nsamp
} else {
input.seek(io::SeekFrom::Start(init.stream_offset))?;
let frame_index = FrameIndex::read(&mut input)?;
frame_index.num_samples()
};
Ok(format::Metadata {
sample_rate: init.mp3_data.samplerate as u32,
num_samples: Some(num_samples),
tag: init.tag,
})
}
}
pub fn decode<R>(mut input: R) -> Result<(dynam::Audio, format::Metadata), Error>
where
R: io::Read + io::Seek +'static,
{
unsafe {
let init = init_decoder(&mut input)?;
let sample_rate = init.mp3_data.samplerate as u32;
let num_channels = init.mp3_data.stereo as u32;
input.seek(io::SeekFrom::Start(init.stream_offset))?;
let frame_index = FrameIndex::read(&mut input)?;
input.seek(io::SeekFrom::Start(frame_index.frames[0].offset))?;
let meta = format::Metadata {
sample_rate,
num_samples: Some(frame_index.num_samples()),
tag: init.tag,
};
macro_rules! dyn_type {
($dyn:path) => {
$dyn(Box::from(Decoder {
input,
input_buf: [0; MAX_FRAME_BYTES],
hip: init.hip,
frame_index,
sample_rate,
buffers: init.buffers,
next_frame: 0,
next_sample: 0,
samples_available: init.decode_count,
_f: marker::PhantomData,
}))
.into()
};
}
Ok((
match num_channels {
1 => dyn_type!(dynam::Seek::MonoI16),
2 => dyn_type!(dynam::Seek::StereoI16),
_ => unreachable!(), // LAME's interface does not allow this.
},
meta,
))
}
}
struct Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
input: R,
input_buf: [u8; MAX_FRAME_BYTES],
hip: hip_t,
frame_index: FrameIndex,
sample_rate: u32,
buffers: [[i16; MAX_FRAME_SIZE]; 2],
next_frame: usize,
next_sample: usize,
samples_available: usize,
_f: marker::PhantomData<F>,
}
unsafe impl<F, R> Send for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
}
impl<F, R> iter::Iterator for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
type Item = F;
fn next(&mut self) -> Option<Self::Item> {
let mut num_read = 0;
while self.next_sample >= self.samples_available {
unsafe {
let rs = hip_decode1(
self.hip,
self.input_buf.as_mut_ptr(),
num_read,
self.buffers[0].as_mut_ptr(),
self.buffers[1].as_mut_ptr(),
);
match rs {
0 => {
if self.next_frame >= self.frame_index.frames.len() {
return None;
}
let frame = &self.frame_index.frames[self.next_frame];
num_read = match self
.input
.read(&mut self.input_buf[..frame.length as usize])
{
Ok(nr) if nr == 0 => return None,
Ok(nr) => nr,
Err(err) => {
error!("{}", err);
return None;
}
};
}
code if code < 0 => {
error!("Error decoding next frame: {}", Error::Lame(code));
return None;
}
decode_count => {
self.next_frame += 1;
self.next_sample = 0;
self.samples_available = decode_count as usize;
}
};
}
}
let frame = F::from_fn(|ch| self.buffers[ch][self.next_sample]);
self.next_sample += 1;
Some(frame)
}
}
impl<F, R> Source for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
fn sample_rate(&self) -> u32 {
self.sample_rate
}
}
impl<F, R> Seekable for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
fn seek(&mut self, position: u64) -> Result<(), SeekError> {
let i = self
.frame_index
.frame_for_sample(position)
.ok_or(SeekError::OutofRange {
pos: position,
size: self.length(),
})?;
self.next_frame = i;
self.next_sample = position as usize - self.frame_index.frames[i].sample_offset as usize;
self.samples_available = 0;
assert!(self.next_frame < self.frame_index.frames.len());
assert!(self.next_sample < MAX_FRAME_SIZE);
let frame = &self.frame_index.frames[self.next_frame];
self.input
.seek(io::SeekFrom::Start(frame.offset))
.map_err(Box::from)?;
Ok(())
}
fn length(&self) -> u64 {
self.frame_index.num_samples()
}
fn current_position(&self) -> u64 {
if self.next_frame == 0 {
return 0;
}
self.frame_index.frames[self.next_frame - 1].sample_offset + self.next_sample as u64
}
}
impl<F, R> Seek for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
}
impl<F, R> Drop for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
fn drop(&mut self) {
unsafe {
hip_decode_exit(self.hip);
}
}
}
unsafe extern "C" fn debug_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) {
debug!("{}", VaFormatter(format, ap));
}
unsafe extern "C" fn msg_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) {
info!("{}", VaFormatter(format, ap));
}
unsafe extern "C" fn error_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) {
error!("{}", VaFormatter(format, ap));
}
struct VaFormatter(*const os::raw::c_char, *mut __va_list_tag);
impl fmt::Display for VaFormatter {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
unsafe {
let cstr = ffi::CStr::from_ptr(self.0);
// A buffer two times the format should be enough in most cases.
let mut buf = vec![0u8; cstr.to_bytes().len() * 2];
vsnprintf(buf.as_mut_ptr() as *mut i8, buf.len(), self.0, self.1);
write!(
f,
"{}",
String::from_utf8_lossy(&*buf).trim_matches(&['\0', '\n'][..])
)
}
}
}
#[derive(Debug)]
pub enum Error {
IO(io::Error),
ID3(id3::Error),
Index(index::Error),
Lame(i32),
ConstructionFailed,
NoHeader,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::IO(ref err) => write!(f, "IO: {}", err),
Error::ID3(ref err) => write!(f, "ID3: {}", err),
Error::Index(ref err) => write!(f, "Index: {}", err),
Error::Lame(code) => {
let msg = match code {
0 => "okay",
-1 => "generic error",
-10 => "no memory",
-11 => "bad bitrate",
-12 => "bad sample frequency",
-13 => "internal error",
-80 => "read error",
-81 => "write error",
-82 => "file too large",
_ => "unknown",
};
write!(f, "Lame error: {}", msg)
}
Error::ConstructionFailed => write!(f, "Failed to construct decoder"),
Error::NoHeader => write!(f, "Missing header"),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
"MP3 error"
}
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::IO(ref err) => Some(err),
Error::ID3(ref err) => Some(err),
Error::Index(ref err) => Some(err),
_ => None,
}
}
}
impl From<io::Error> for Error {
fn | (err: io::Error) -> Error {
Error::IO(err)
}
}
impl From<id3::Error> for Error {
fn from(err: id3::Error) -> Error {
Error::ID3(err)
}
}
impl From<index::Error> for Error {
fn from(err: index::Error) -> Error {
Error::Index(err)
}
}
#[cfg(all(test, feature = "unstable"))]
mod benchmarks {
extern crate test;
use super::*;
#[bench]
fn read_metadata(b: &mut test::Bencher) {
b.iter(|| {
let file = fs::File::open("testdata/10s_440hz_320cbr_stereo.mp3").unwrap();
decode_metadata(file).unwrap();
});
}
#[bench]
fn decoder_open(b: &mut test::Bencher) {
b.iter(|| {
let file = fs::File::open("testdata/10s_440hz_320cbr_stereo.mp3").unwrap();
decode(file).unwrap();
});
}
}
| from | identifier_name |
mod.rs | use crate::audio::*;
use crate::format;
use id3;
use lazy_static::lazy_static;
use liblame_sys::*;
use log::*;
use regex::bytes;
use sample;
use std::*;
mod index;
use self::index::FrameIndex;
/// This is the absolute maximum number of samples that can be contained in a single frame.
const MAX_FRAME_SIZE: usize = 1152;
const MAX_FRAME_BYTES: usize = 1348;
pub fn magic() -> &'static bytes::Regex {
lazy_static! {
static ref MAGIC: bytes::Regex =
bytes::Regex::new(r"(?s-u)^(?:ID3)|(:?\xff[\xe0-\xff])").unwrap();
}
&MAGIC
}
struct DecoderInit {
hip: hip_t,
mp3_data: mp3data_struct,
buffers: [[i16; MAX_FRAME_SIZE]; 2],
decode_count: usize,
stream_offset: u64,
tag: Option<id3::Tag>,
}
unsafe fn init_decoder<R>(mut input: &mut R) -> Result<DecoderInit, Error>
where
R: io::Read + io::Seek,
{
let tag = {
let mut buf = [0; 3];
input.read_exact(&mut buf)?;
input.seek(io::SeekFrom::Start(0))?;
if &buf == b"ID3" {
Some(id3::Tag::read_from(&mut input)?)
} else {
None
}
};
// On very rare occasions, LAME is unable to find the start of the stream.
index::find_stream(input)?;
let stream_offset = input.seek(io::SeekFrom::Current(0))?;
let hip: hip_t = hip_decode_init();
if hip.is_null() {
return Err(Error::ConstructionFailed);
}
hip_set_debugf(hip, Some(debug_cb));
hip_set_msgf(hip, Some(msg_cb));
hip_set_errorf(hip, Some(error_cb));
let mut mp3_data = mem::zeroed();
let mut enc_delay = 0;
let mut enc_padding = 0;
let mut buf_left = [0; MAX_FRAME_SIZE];
let mut buf_right = [0; MAX_FRAME_SIZE];
let mut rs = 0;
while rs == 0 {
let mut read_buf = [0; MAX_FRAME_BYTES];
let num_read = input.read(&mut read_buf)?;
rs = hip_decode1_headersB(
hip,
read_buf.as_mut_ptr(),
num_read,
buf_left.as_mut_ptr(),
buf_right.as_mut_ptr(),
&mut mp3_data,
&mut enc_delay,
&mut enc_padding,
);
}
if rs == -1 {
hip_decode_exit(hip);
return Err(Error::Lame(rs));
}
let decode_count = rs;
if mp3_data.header_parsed!= 1 {
return Err(Error::NoHeader);
}
Ok(DecoderInit {
hip,
mp3_data,
buffers: [buf_left, buf_right],
decode_count: decode_count as usize,
stream_offset,
tag,
})
}
pub fn decode_metadata<R>(mut input: R) -> Result<format::Metadata, Error>
where
R: io::Read + io::Seek,
{
unsafe {
let init = init_decoder(&mut input)?;
hip_decode_exit(init.hip);
let num_samples = if init.mp3_data.nsamp!= 0 {
init.mp3_data.nsamp
} else {
input.seek(io::SeekFrom::Start(init.stream_offset))?;
let frame_index = FrameIndex::read(&mut input)?;
frame_index.num_samples()
};
Ok(format::Metadata {
sample_rate: init.mp3_data.samplerate as u32,
num_samples: Some(num_samples),
tag: init.tag,
})
}
}
pub fn decode<R>(mut input: R) -> Result<(dynam::Audio, format::Metadata), Error>
where
R: io::Read + io::Seek +'static,
{
unsafe {
let init = init_decoder(&mut input)?;
let sample_rate = init.mp3_data.samplerate as u32;
let num_channels = init.mp3_data.stereo as u32;
input.seek(io::SeekFrom::Start(init.stream_offset))?;
let frame_index = FrameIndex::read(&mut input)?;
input.seek(io::SeekFrom::Start(frame_index.frames[0].offset))?;
let meta = format::Metadata {
sample_rate,
num_samples: Some(frame_index.num_samples()),
tag: init.tag,
};
macro_rules! dyn_type {
($dyn:path) => {
$dyn(Box::from(Decoder {
input,
input_buf: [0; MAX_FRAME_BYTES],
hip: init.hip,
frame_index,
sample_rate,
buffers: init.buffers,
next_frame: 0,
next_sample: 0,
samples_available: init.decode_count,
_f: marker::PhantomData,
}))
.into()
};
}
Ok((
match num_channels {
1 => dyn_type!(dynam::Seek::MonoI16),
2 => dyn_type!(dynam::Seek::StereoI16),
_ => unreachable!(), // LAME's interface does not allow this.
},
meta,
))
}
}
struct Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
input: R,
input_buf: [u8; MAX_FRAME_BYTES],
hip: hip_t,
frame_index: FrameIndex,
sample_rate: u32,
buffers: [[i16; MAX_FRAME_SIZE]; 2],
next_frame: usize,
next_sample: usize,
samples_available: usize,
_f: marker::PhantomData<F>,
}
unsafe impl<F, R> Send for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
}
impl<F, R> iter::Iterator for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
type Item = F;
fn next(&mut self) -> Option<Self::Item> {
let mut num_read = 0;
while self.next_sample >= self.samples_available {
unsafe {
let rs = hip_decode1(
self.hip,
self.input_buf.as_mut_ptr(),
num_read,
self.buffers[0].as_mut_ptr(),
self.buffers[1].as_mut_ptr(),
);
match rs {
0 => {
if self.next_frame >= self.frame_index.frames.len() {
return None;
}
let frame = &self.frame_index.frames[self.next_frame];
num_read = match self
.input
.read(&mut self.input_buf[..frame.length as usize])
{
Ok(nr) if nr == 0 => return None,
Ok(nr) => nr,
Err(err) => {
error!("{}", err);
return None;
}
};
}
code if code < 0 => {
error!("Error decoding next frame: {}", Error::Lame(code));
return None;
}
decode_count => {
self.next_frame += 1;
self.next_sample = 0;
self.samples_available = decode_count as usize;
}
};
}
}
let frame = F::from_fn(|ch| self.buffers[ch][self.next_sample]);
self.next_sample += 1;
Some(frame)
}
}
impl<F, R> Source for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
fn sample_rate(&self) -> u32 |
}
impl<F, R> Seekable for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
fn seek(&mut self, position: u64) -> Result<(), SeekError> {
let i = self
.frame_index
.frame_for_sample(position)
.ok_or(SeekError::OutofRange {
pos: position,
size: self.length(),
})?;
self.next_frame = i;
self.next_sample = position as usize - self.frame_index.frames[i].sample_offset as usize;
self.samples_available = 0;
assert!(self.next_frame < self.frame_index.frames.len());
assert!(self.next_sample < MAX_FRAME_SIZE);
let frame = &self.frame_index.frames[self.next_frame];
self.input
.seek(io::SeekFrom::Start(frame.offset))
.map_err(Box::from)?;
Ok(())
}
fn length(&self) -> u64 {
self.frame_index.num_samples()
}
fn current_position(&self) -> u64 {
if self.next_frame == 0 {
return 0;
}
self.frame_index.frames[self.next_frame - 1].sample_offset + self.next_sample as u64
}
}
impl<F, R> Seek for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
}
impl<F, R> Drop for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
fn drop(&mut self) {
unsafe {
hip_decode_exit(self.hip);
}
}
}
unsafe extern "C" fn debug_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) {
debug!("{}", VaFormatter(format, ap));
}
unsafe extern "C" fn msg_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) {
info!("{}", VaFormatter(format, ap));
}
unsafe extern "C" fn error_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) {
error!("{}", VaFormatter(format, ap));
}
struct VaFormatter(*const os::raw::c_char, *mut __va_list_tag);
impl fmt::Display for VaFormatter {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
unsafe {
let cstr = ffi::CStr::from_ptr(self.0);
// A buffer two times the format should be enough in most cases.
let mut buf = vec![0u8; cstr.to_bytes().len() * 2];
vsnprintf(buf.as_mut_ptr() as *mut i8, buf.len(), self.0, self.1);
write!(
f,
"{}",
String::from_utf8_lossy(&*buf).trim_matches(&['\0', '\n'][..])
)
}
}
}
#[derive(Debug)]
pub enum Error {
IO(io::Error),
ID3(id3::Error),
Index(index::Error),
Lame(i32),
ConstructionFailed,
NoHeader,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::IO(ref err) => write!(f, "IO: {}", err),
Error::ID3(ref err) => write!(f, "ID3: {}", err),
Error::Index(ref err) => write!(f, "Index: {}", err),
Error::Lame(code) => {
let msg = match code {
0 => "okay",
-1 => "generic error",
-10 => "no memory",
-11 => "bad bitrate",
-12 => "bad sample frequency",
-13 => "internal error",
-80 => "read error",
-81 => "write error",
-82 => "file too large",
_ => "unknown",
};
write!(f, "Lame error: {}", msg)
}
Error::ConstructionFailed => write!(f, "Failed to construct decoder"),
Error::NoHeader => write!(f, "Missing header"),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
"MP3 error"
}
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::IO(ref err) => Some(err),
Error::ID3(ref err) => Some(err),
Error::Index(ref err) => Some(err),
_ => None,
}
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::IO(err)
}
}
impl From<id3::Error> for Error {
fn from(err: id3::Error) -> Error {
Error::ID3(err)
}
}
impl From<index::Error> for Error {
fn from(err: index::Error) -> Error {
Error::Index(err)
}
}
#[cfg(all(test, feature = "unstable"))]
mod benchmarks {
extern crate test;
use super::*;
#[bench]
fn read_metadata(b: &mut test::Bencher) {
b.iter(|| {
let file = fs::File::open("testdata/10s_440hz_320cbr_stereo.mp3").unwrap();
decode_metadata(file).unwrap();
});
}
#[bench]
fn decoder_open(b: &mut test::Bencher) {
b.iter(|| {
let file = fs::File::open("testdata/10s_440hz_320cbr_stereo.mp3").unwrap();
decode(file).unwrap();
});
}
}
| {
self.sample_rate
} | identifier_body |
mod.rs | use crate::audio::*;
use crate::format;
use id3;
use lazy_static::lazy_static;
use liblame_sys::*;
use log::*;
use regex::bytes;
use sample;
use std::*;
mod index;
use self::index::FrameIndex;
/// This is the absolute maximum number of samples that can be contained in a single frame.
const MAX_FRAME_SIZE: usize = 1152;
const MAX_FRAME_BYTES: usize = 1348;
pub fn magic() -> &'static bytes::Regex {
lazy_static! {
static ref MAGIC: bytes::Regex =
bytes::Regex::new(r"(?s-u)^(?:ID3)|(:?\xff[\xe0-\xff])").unwrap();
}
&MAGIC
}
struct DecoderInit {
hip: hip_t,
mp3_data: mp3data_struct,
buffers: [[i16; MAX_FRAME_SIZE]; 2],
decode_count: usize,
stream_offset: u64,
tag: Option<id3::Tag>,
}
unsafe fn init_decoder<R>(mut input: &mut R) -> Result<DecoderInit, Error>
where
R: io::Read + io::Seek,
{
let tag = {
let mut buf = [0; 3];
input.read_exact(&mut buf)?;
input.seek(io::SeekFrom::Start(0))?;
if &buf == b"ID3" {
Some(id3::Tag::read_from(&mut input)?)
} else {
None
}
};
// On very rare occasions, LAME is unable to find the start of the stream.
index::find_stream(input)?;
let stream_offset = input.seek(io::SeekFrom::Current(0))?;
let hip: hip_t = hip_decode_init();
if hip.is_null() {
return Err(Error::ConstructionFailed);
}
hip_set_debugf(hip, Some(debug_cb));
hip_set_msgf(hip, Some(msg_cb));
hip_set_errorf(hip, Some(error_cb));
let mut mp3_data = mem::zeroed();
let mut enc_delay = 0;
let mut enc_padding = 0;
let mut buf_left = [0; MAX_FRAME_SIZE];
let mut buf_right = [0; MAX_FRAME_SIZE];
let mut rs = 0;
while rs == 0 {
let mut read_buf = [0; MAX_FRAME_BYTES];
let num_read = input.read(&mut read_buf)?;
rs = hip_decode1_headersB(
hip,
read_buf.as_mut_ptr(),
num_read,
buf_left.as_mut_ptr(),
buf_right.as_mut_ptr(),
&mut mp3_data,
&mut enc_delay,
&mut enc_padding,
);
}
if rs == -1 {
hip_decode_exit(hip);
return Err(Error::Lame(rs));
}
let decode_count = rs;
if mp3_data.header_parsed!= 1 {
return Err(Error::NoHeader);
}
Ok(DecoderInit {
hip,
mp3_data,
buffers: [buf_left, buf_right],
decode_count: decode_count as usize,
stream_offset,
tag,
})
}
pub fn decode_metadata<R>(mut input: R) -> Result<format::Metadata, Error>
where
R: io::Read + io::Seek,
{
unsafe {
let init = init_decoder(&mut input)?;
hip_decode_exit(init.hip);
let num_samples = if init.mp3_data.nsamp!= 0 {
init.mp3_data.nsamp
} else {
input.seek(io::SeekFrom::Start(init.stream_offset))?;
let frame_index = FrameIndex::read(&mut input)?;
frame_index.num_samples()
};
Ok(format::Metadata {
sample_rate: init.mp3_data.samplerate as u32,
num_samples: Some(num_samples),
tag: init.tag,
})
}
}
pub fn decode<R>(mut input: R) -> Result<(dynam::Audio, format::Metadata), Error>
where
R: io::Read + io::Seek +'static,
{
unsafe {
let init = init_decoder(&mut input)?;
let sample_rate = init.mp3_data.samplerate as u32;
let num_channels = init.mp3_data.stereo as u32;
input.seek(io::SeekFrom::Start(init.stream_offset))?;
let frame_index = FrameIndex::read(&mut input)?;
input.seek(io::SeekFrom::Start(frame_index.frames[0].offset))?;
let meta = format::Metadata {
sample_rate, | $dyn(Box::from(Decoder {
input,
input_buf: [0; MAX_FRAME_BYTES],
hip: init.hip,
frame_index,
sample_rate,
buffers: init.buffers,
next_frame: 0,
next_sample: 0,
samples_available: init.decode_count,
_f: marker::PhantomData,
}))
.into()
};
}
Ok((
match num_channels {
1 => dyn_type!(dynam::Seek::MonoI16),
2 => dyn_type!(dynam::Seek::StereoI16),
_ => unreachable!(), // LAME's interface does not allow this.
},
meta,
))
}
}
struct Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
input: R,
input_buf: [u8; MAX_FRAME_BYTES],
hip: hip_t,
frame_index: FrameIndex,
sample_rate: u32,
buffers: [[i16; MAX_FRAME_SIZE]; 2],
next_frame: usize,
next_sample: usize,
samples_available: usize,
_f: marker::PhantomData<F>,
}
unsafe impl<F, R> Send for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
}
impl<F, R> iter::Iterator for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
type Item = F;
fn next(&mut self) -> Option<Self::Item> {
let mut num_read = 0;
while self.next_sample >= self.samples_available {
unsafe {
let rs = hip_decode1(
self.hip,
self.input_buf.as_mut_ptr(),
num_read,
self.buffers[0].as_mut_ptr(),
self.buffers[1].as_mut_ptr(),
);
match rs {
0 => {
if self.next_frame >= self.frame_index.frames.len() {
return None;
}
let frame = &self.frame_index.frames[self.next_frame];
num_read = match self
.input
.read(&mut self.input_buf[..frame.length as usize])
{
Ok(nr) if nr == 0 => return None,
Ok(nr) => nr,
Err(err) => {
error!("{}", err);
return None;
}
};
}
code if code < 0 => {
error!("Error decoding next frame: {}", Error::Lame(code));
return None;
}
decode_count => {
self.next_frame += 1;
self.next_sample = 0;
self.samples_available = decode_count as usize;
}
};
}
}
let frame = F::from_fn(|ch| self.buffers[ch][self.next_sample]);
self.next_sample += 1;
Some(frame)
}
}
impl<F, R> Source for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
fn sample_rate(&self) -> u32 {
self.sample_rate
}
}
impl<F, R> Seekable for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
fn seek(&mut self, position: u64) -> Result<(), SeekError> {
let i = self
.frame_index
.frame_for_sample(position)
.ok_or(SeekError::OutofRange {
pos: position,
size: self.length(),
})?;
self.next_frame = i;
self.next_sample = position as usize - self.frame_index.frames[i].sample_offset as usize;
self.samples_available = 0;
assert!(self.next_frame < self.frame_index.frames.len());
assert!(self.next_sample < MAX_FRAME_SIZE);
let frame = &self.frame_index.frames[self.next_frame];
self.input
.seek(io::SeekFrom::Start(frame.offset))
.map_err(Box::from)?;
Ok(())
}
fn length(&self) -> u64 {
self.frame_index.num_samples()
}
fn current_position(&self) -> u64 {
if self.next_frame == 0 {
return 0;
}
self.frame_index.frames[self.next_frame - 1].sample_offset + self.next_sample as u64
}
}
impl<F, R> Seek for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
}
impl<F, R> Drop for Decoder<F, R>
where
F: sample::Frame<Sample = i16>,
R: io::Read + io::Seek +'static,
{
fn drop(&mut self) {
unsafe {
hip_decode_exit(self.hip);
}
}
}
unsafe extern "C" fn debug_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) {
debug!("{}", VaFormatter(format, ap));
}
unsafe extern "C" fn msg_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) {
info!("{}", VaFormatter(format, ap));
}
unsafe extern "C" fn error_cb(format: *const os::raw::c_char, ap: *mut __va_list_tag) {
error!("{}", VaFormatter(format, ap));
}
struct VaFormatter(*const os::raw::c_char, *mut __va_list_tag);
impl fmt::Display for VaFormatter {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
unsafe {
let cstr = ffi::CStr::from_ptr(self.0);
// A buffer two times the format should be enough in most cases.
let mut buf = vec![0u8; cstr.to_bytes().len() * 2];
vsnprintf(buf.as_mut_ptr() as *mut i8, buf.len(), self.0, self.1);
write!(
f,
"{}",
String::from_utf8_lossy(&*buf).trim_matches(&['\0', '\n'][..])
)
}
}
}
#[derive(Debug)]
pub enum Error {
IO(io::Error),
ID3(id3::Error),
Index(index::Error),
Lame(i32),
ConstructionFailed,
NoHeader,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::IO(ref err) => write!(f, "IO: {}", err),
Error::ID3(ref err) => write!(f, "ID3: {}", err),
Error::Index(ref err) => write!(f, "Index: {}", err),
Error::Lame(code) => {
let msg = match code {
0 => "okay",
-1 => "generic error",
-10 => "no memory",
-11 => "bad bitrate",
-12 => "bad sample frequency",
-13 => "internal error",
-80 => "read error",
-81 => "write error",
-82 => "file too large",
_ => "unknown",
};
write!(f, "Lame error: {}", msg)
}
Error::ConstructionFailed => write!(f, "Failed to construct decoder"),
Error::NoHeader => write!(f, "Missing header"),
}
}
}
impl error::Error for Error {
fn description(&self) -> &str {
"MP3 error"
}
fn cause(&self) -> Option<&error::Error> {
match *self {
Error::IO(ref err) => Some(err),
Error::ID3(ref err) => Some(err),
Error::Index(ref err) => Some(err),
_ => None,
}
}
}
impl From<io::Error> for Error {
fn from(err: io::Error) -> Error {
Error::IO(err)
}
}
impl From<id3::Error> for Error {
fn from(err: id3::Error) -> Error {
Error::ID3(err)
}
}
impl From<index::Error> for Error {
fn from(err: index::Error) -> Error {
Error::Index(err)
}
}
#[cfg(all(test, feature = "unstable"))]
mod benchmarks {
extern crate test;
use super::*;
#[bench]
fn read_metadata(b: &mut test::Bencher) {
b.iter(|| {
let file = fs::File::open("testdata/10s_440hz_320cbr_stereo.mp3").unwrap();
decode_metadata(file).unwrap();
});
}
#[bench]
fn decoder_open(b: &mut test::Bencher) {
b.iter(|| {
let file = fs::File::open("testdata/10s_440hz_320cbr_stereo.mp3").unwrap();
decode(file).unwrap();
});
}
} | num_samples: Some(frame_index.num_samples()),
tag: init.tag,
};
macro_rules! dyn_type {
($dyn:path) => { | random_line_split |
day3.rs | use {
aoc_runner_derive::aoc,
re_parse::{Error as ReParseError, ReParse, Regex},
serde_derive::Deserialize,
std::{
cmp::max,
mem::replace,
ops::{Index, IndexMut},
slice::Iter as SliceIter,
str::{FromStr, Split},
},
};
struct ClaimIterator<'s> {
input: Split<'s, char>,
}
impl<'s> ClaimIterator<'s> {
pub fn new(input: &'s str) -> Self {
ClaimIterator {
input: input.split('\n'),
}
}
}
#[derive(Debug, Deserialize, ReParse)]
#[re_parse(
regex = r#"#(?P<id>\d{1,4}) @ (?P<left>\d{1,3}),(?P<top>\d{1,3}): (?P<width>\d{1,2})x(?P<height>\d{1,2})"#
)]
struct RawClaim {
id: usize,
left: usize,
top: usize,
width: usize,
height: usize,
}
#[derive(Clone, Debug)]
struct Claim {
id: usize,
left: usize,
top: usize,
right: usize,
bottom: usize,
}
impl Claim {
// FIXME: This is actually wrong, and I've just compensated by making `intersects` inclusive.
// There should be no need to call this twice!
fn contains_edge_of(&self, other: &Self) -> (bool, bool) {
let intersects_horizontally = {
let bottom_in_horizontal_band = self.bottom > other.top && self.bottom <= other.bottom;
let top_in_horizontal_band = self.top >= other.top && self.top < other.bottom;
bottom_in_horizontal_band || top_in_horizontal_band
};
let intersects_vertically = {
let left_in_vertical_band = self.left >= other.left && self.left < other.right;
let right_in_vertical_band = self.right > other.left && self.right <= other.right;
left_in_vertical_band || right_in_vertical_band
};
(intersects_horizontally, intersects_vertically)
}
pub fn intersects(&self, other: &Self) -> bool {
let (self_contains_horiz, self_contains_vert) = self.contains_edge_of(other);
let (other_contains_horiz, other_contains_vert) = other.contains_edge_of(self);
(self_contains_horiz || other_contains_horiz) && (self_contains_vert || other_contains_vert)
}
}
#[test]
fn test_intersection() {
const CLAIM_TO_COMPARE_TO: &'static str = "#0 @ 2,2: 3x3";
let claim: Claim = CLAIM_TO_COMPARE_TO.parse().unwrap();
for other in &[
// Close but not touching
"#0 @ 1,1: 1x1",
"#0 @ 2,1: 1x1",
"#0 @ 3,1: 1x1",
"#0 @ 4,1: 1x1",
"#0 @ 5,1: 1x1",
"#0 @ 5,2: 1x1",
"#0 @ 5,3: 1x1",
"#0 @ 5,4: 1x1",
"#0 @ 5,5: 1x1",
"#0 @ 4,5: 1x1",
"#0 @ 3,5: 1x1",
"#0 @ 2,5: 1x1",
"#0 @ 1,5: 1x1",
"#0 @ 1,4: 1x1",
"#0 @ 1,3: 1x1",
"#0 @ 1,2: 1x1",
// Way out there
] {
if claim.intersects(&other.parse().unwrap()) {
panic!("{:?} is not supposed to intersect {:?}", other, claim);
}
}
for other in &[
// Same thing
CLAIM_TO_COMPARE_TO,
// Other encompasses first
"#0 @ 1,1: 5x5",
// First encompasses other
"#0 @ 3,3: 1x1",
// Edges
"#0 @ 1,1: 2x2",
"#0 @ 2,1: 2x2",
"#0 @ 3,1: 2x2",
"#0 @ 3,2: 2x2",
"#0 @ 3,3: 2x2",
"#0 @ 2,3: 2x2",
"#0 @ 1,3: 2x2",
"#0 @ 1,2: 2x2",
] {
if!claim.intersects(&other.parse().unwrap()) {
panic!("{:?} is supposed to intersect {:?}", other, claim);
}
}
// Other failing cases found
fn intersects(s1: &str, s2: &str) -> bool {
s1.parse::<Claim>()
.unwrap()
.intersects(&s2.parse().unwrap())
}
//"#1236 @ ".parse().unwrap()
assert!(intersects(
"#1236 @ 420,613: 19x12",
"#344 @ 426,611: 12x21"
));
}
#[derive(Debug)]
enum ClaimParseError {
ParseFailed(ReParseError),
InvalidDimensions(usize, usize),
}
impl FromStr for Claim {
type Err = ClaimParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
use self::ClaimParseError::*;
let RawClaim {
id,
left,
top,
width,
height,
} = RawClaim::from_str(s).map_err(ParseFailed)?;
if width == 0 || height == 0 {
return Err(InvalidDimensions(width, height));
}
Ok(Self {
id,
left,
top,
right: left.checked_add(width).unwrap(),
bottom: top.checked_add(height).unwrap(),
})
}
}
impl<'s> Iterator for ClaimIterator<'s> {
type Item = Claim;
fn next(&mut self) -> Option<Self::Item> {
match self.input.next()? {
"" => None,
other => Some(other.parse().unwrap()),
}
}
}
struct GrowOnlyGrid<T> {
inner: Vec<T>,
len_x: usize,
len_y: usize,
}
impl<T> GrowOnlyGrid<T> {
pub fn new_with<F: FnMut() -> T>(x: usize, y: usize, mut f: F) -> Self {
Self {
inner: {
let len = x.checked_mul(y).unwrap();
let mut inner = Vec::with_capacity(len);
// OPT: Use the soon-to-be-stable `resize_with` instead.
while inner.len() < len {
inner.push(f());
}
inner
},
len_x: x,
len_y: y,
}
}
pub fn grow_with<F: FnMut() -> T>(&mut self, x: usize, y: usize, f: F)
where
T: Default,
{
let old_len_x = self.len_x;
let old_len_y = self.len_y;
let old = replace(
self,
Self::new_with(max(x, old_len_x), max(y, old_len_y), f),
);
let mut old_values = old.inner.into_iter();
for y in 0..old_len_y {
// OPT: We could probably just copy slices here directly
for x in 0..old_len_x {
let idx = unsafe { self.index_from_coords_unchecked(x, y) };
self.inner[idx] = old_values.next().unwrap();
}
}
}
pub fn dimensions(&self) -> (usize, usize) {
(self.len_x, self.len_y)
}
fn index_from_coords(&self, x: usize, y: usize) -> usize {
if x >= self.len_x || y >= self.len_y {
panic!(
"coordinates {:?} exceed current dimensions of {:?}",
(x, y),
self.dimensions()
);
}
unsafe { self.index_from_coords_unchecked(x, y) }
}
unsafe fn index_from_coords_unchecked(&self, x: usize, y: usize) -> usize {
y * self.len_x + x
}
}
impl<T> Index<(usize, usize)> for GrowOnlyGrid<T> {
type Output = T;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
let idx = self.index_from_coords(x, y);
&self.inner[idx]
}
}
impl<T> IndexMut<(usize, usize)> for GrowOnlyGrid<T> {
fn index_mut(&mut self, (x, y): (usize, usize)) -> &mut Self::Output {
let idx = self.index_from_coords(x, y);
&mut self.inner[idx]
}
}
impl<T> GrowOnlyGrid<T> {
pub fn iter_flat(&self) -> SliceIter<T> {
self.inner[..].iter()
}
}
#[aoc(day3, part1)]
pub fn day3_part1(input: &str) -> usize {
let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default);
for claim in ClaimIterator::new(input) {
let Claim {
id: _,
left,
top,
right,
bottom,
} = claim;
grid.grow_with(
right.checked_add(1).unwrap(),
bottom.checked_add(1).unwrap(),
Default::default,
);
for y in top..bottom {
for x in left..right {
let blarg = &mut grid[(x, y)];
*blarg = blarg.checked_add(1).unwrap();
}
}
}
grid.iter_flat().filter(|x| x > &&1).count()
}
#[cfg(test)]
const INPUT: &'static str = include_str!("../input/2018/day3.txt");
#[cfg(test)]
const HINT_INPUT: &'static str = r#"#1 @ 1,3: 4x4
#2 @ 3,1: 4x4
#3 @ 5,5: 2x2
"#;
#[cfg(test)]
const HINT_EXPECTED_PART1_OUTPUT: usize = 4;
#[cfg(test)]
const HINT_EXPECTED_PART2_OUTPUT: usize = 3;
#[cfg(test)]
const EXPECTED_PART2_OUTPUT: usize = 603;
#[test]
fn test_day3_part1_hint() {
assert_eq!(day3_part1(HINT_INPUT), HINT_EXPECTED_PART1_OUTPUT);
}
#[aoc(day3, part2, square_iteration)]
pub fn day3_part2_square_iteration(input: &str) -> usize {
// OPT: Use ArrayVec for even more performance? Depends on max size.
// OR OPT: Pre-allocating might be beneficial here, not sure how `size_hint` works for char
// splits.
let mut claims = ClaimIterator::new(input)
.map(|c| (c, true))
.collect::<Vec<_>>();
for i in 0..claims.len() {
for j in i + 1..claims.len() {
if claims[i].0.intersects(&claims[j].0) |
}
}
let uncontested = claims
.into_iter()
.filter_map(|(c, uncontested)| if uncontested { Some(c) } else { None })
.collect::<Vec<_>>();
if uncontested.len()!= 1 {
panic!("Expected single remaining claim, got {:?}", uncontested);
}
uncontested[0].id
}
#[test]
fn test_day3_part2_square_iteration_hint() {
assert_eq!(
day3_part2_square_iteration(HINT_INPUT),
HINT_EXPECTED_PART2_OUTPUT
);
}
#[test]
fn test_day3_part2_square_iteration_answer() {
assert_eq!(day3_part2_square_iteration(INPUT), EXPECTED_PART2_OUTPUT);
}
#[aoc(day3, part2, grid_again)]
pub fn day3_part2_grid_again(input: &str) -> usize {
let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default);
let claims = ClaimIterator::new(input).collect::<Vec<_>>();
for Claim {
id: _,
left,
top,
right,
bottom,
} in claims.iter()
{
grid.grow_with(
right.checked_add(1).unwrap(),
bottom.checked_add(1).unwrap(),
Default::default,
);
for y in *top..*bottom {
for x in *left..*right {
*(&mut grid[(x, y)]) += 1;
}
}
}
let uncontested = claims
.into_iter()
.filter(
|Claim {
left,
top,
bottom,
right,
..
}| {
for y in *top..*bottom {
for x in *left..*right {
let count = grid[(x, y)];
assert!(count!= 0);
if count > 1 {
return false;
}
}
}
true
},
)
.collect::<Vec<_>>();
assert_eq!(uncontested.len(), 1);
uncontested[0].id
}
#[test]
fn test_day3_part2_grid_again_hint() {
assert_eq!(
day3_part2_grid_again(HINT_INPUT),
HINT_EXPECTED_PART2_OUTPUT
);
}
#[test]
fn test_day3_part2_grid_again_answer() {
assert_eq!(day3_part2_grid_again(INPUT), EXPECTED_PART2_OUTPUT);
}
| {
(&mut claims[i]).1 = false;
(&mut claims[j]).1 = false;
} | conditional_block |
day3.rs | use {
aoc_runner_derive::aoc,
re_parse::{Error as ReParseError, ReParse, Regex},
serde_derive::Deserialize,
std::{
cmp::max,
mem::replace,
ops::{Index, IndexMut},
slice::Iter as SliceIter,
str::{FromStr, Split},
},
};
struct ClaimIterator<'s> {
input: Split<'s, char>,
}
impl<'s> ClaimIterator<'s> {
pub fn new(input: &'s str) -> Self {
ClaimIterator {
input: input.split('\n'),
}
}
}
#[derive(Debug, Deserialize, ReParse)]
#[re_parse(
regex = r#"#(?P<id>\d{1,4}) @ (?P<left>\d{1,3}),(?P<top>\d{1,3}): (?P<width>\d{1,2})x(?P<height>\d{1,2})"#
)]
struct RawClaim {
id: usize,
left: usize,
top: usize,
width: usize,
height: usize,
}
#[derive(Clone, Debug)]
struct Claim {
id: usize,
left: usize,
top: usize,
right: usize,
bottom: usize,
}
impl Claim {
// FIXME: This is actually wrong, and I've just compensated by making `intersects` inclusive.
// There should be no need to call this twice!
fn contains_edge_of(&self, other: &Self) -> (bool, bool) {
let intersects_horizontally = {
let bottom_in_horizontal_band = self.bottom > other.top && self.bottom <= other.bottom;
let top_in_horizontal_band = self.top >= other.top && self.top < other.bottom;
bottom_in_horizontal_band || top_in_horizontal_band
};
let intersects_vertically = {
let left_in_vertical_band = self.left >= other.left && self.left < other.right;
let right_in_vertical_band = self.right > other.left && self.right <= other.right;
left_in_vertical_band || right_in_vertical_band
};
(intersects_horizontally, intersects_vertically)
}
pub fn intersects(&self, other: &Self) -> bool {
let (self_contains_horiz, self_contains_vert) = self.contains_edge_of(other);
let (other_contains_horiz, other_contains_vert) = other.contains_edge_of(self);
(self_contains_horiz || other_contains_horiz) && (self_contains_vert || other_contains_vert)
}
}
#[test]
fn | () {
const CLAIM_TO_COMPARE_TO: &'static str = "#0 @ 2,2: 3x3";
let claim: Claim = CLAIM_TO_COMPARE_TO.parse().unwrap();
for other in &[
// Close but not touching
"#0 @ 1,1: 1x1",
"#0 @ 2,1: 1x1",
"#0 @ 3,1: 1x1",
"#0 @ 4,1: 1x1",
"#0 @ 5,1: 1x1",
"#0 @ 5,2: 1x1",
"#0 @ 5,3: 1x1",
"#0 @ 5,4: 1x1",
"#0 @ 5,5: 1x1",
"#0 @ 4,5: 1x1",
"#0 @ 3,5: 1x1",
"#0 @ 2,5: 1x1",
"#0 @ 1,5: 1x1",
"#0 @ 1,4: 1x1",
"#0 @ 1,3: 1x1",
"#0 @ 1,2: 1x1",
// Way out there
] {
if claim.intersects(&other.parse().unwrap()) {
panic!("{:?} is not supposed to intersect {:?}", other, claim);
}
}
for other in &[
// Same thing
CLAIM_TO_COMPARE_TO,
// Other encompasses first
"#0 @ 1,1: 5x5",
// First encompasses other
"#0 @ 3,3: 1x1",
// Edges
"#0 @ 1,1: 2x2",
"#0 @ 2,1: 2x2",
"#0 @ 3,1: 2x2",
"#0 @ 3,2: 2x2",
"#0 @ 3,3: 2x2",
"#0 @ 2,3: 2x2",
"#0 @ 1,3: 2x2",
"#0 @ 1,2: 2x2",
] {
if!claim.intersects(&other.parse().unwrap()) {
panic!("{:?} is supposed to intersect {:?}", other, claim);
}
}
// Other failing cases found
fn intersects(s1: &str, s2: &str) -> bool {
s1.parse::<Claim>()
.unwrap()
.intersects(&s2.parse().unwrap())
}
//"#1236 @ ".parse().unwrap()
assert!(intersects(
"#1236 @ 420,613: 19x12",
"#344 @ 426,611: 12x21"
));
}
#[derive(Debug)]
enum ClaimParseError {
ParseFailed(ReParseError),
InvalidDimensions(usize, usize),
}
impl FromStr for Claim {
type Err = ClaimParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
use self::ClaimParseError::*;
let RawClaim {
id,
left,
top,
width,
height,
} = RawClaim::from_str(s).map_err(ParseFailed)?;
if width == 0 || height == 0 {
return Err(InvalidDimensions(width, height));
}
Ok(Self {
id,
left,
top,
right: left.checked_add(width).unwrap(),
bottom: top.checked_add(height).unwrap(),
})
}
}
impl<'s> Iterator for ClaimIterator<'s> {
type Item = Claim;
fn next(&mut self) -> Option<Self::Item> {
match self.input.next()? {
"" => None,
other => Some(other.parse().unwrap()),
}
}
}
struct GrowOnlyGrid<T> {
inner: Vec<T>,
len_x: usize,
len_y: usize,
}
impl<T> GrowOnlyGrid<T> {
pub fn new_with<F: FnMut() -> T>(x: usize, y: usize, mut f: F) -> Self {
Self {
inner: {
let len = x.checked_mul(y).unwrap();
let mut inner = Vec::with_capacity(len);
// OPT: Use the soon-to-be-stable `resize_with` instead.
while inner.len() < len {
inner.push(f());
}
inner
},
len_x: x,
len_y: y,
}
}
pub fn grow_with<F: FnMut() -> T>(&mut self, x: usize, y: usize, f: F)
where
T: Default,
{
let old_len_x = self.len_x;
let old_len_y = self.len_y;
let old = replace(
self,
Self::new_with(max(x, old_len_x), max(y, old_len_y), f),
);
let mut old_values = old.inner.into_iter();
for y in 0..old_len_y {
// OPT: We could probably just copy slices here directly
for x in 0..old_len_x {
let idx = unsafe { self.index_from_coords_unchecked(x, y) };
self.inner[idx] = old_values.next().unwrap();
}
}
}
pub fn dimensions(&self) -> (usize, usize) {
(self.len_x, self.len_y)
}
fn index_from_coords(&self, x: usize, y: usize) -> usize {
if x >= self.len_x || y >= self.len_y {
panic!(
"coordinates {:?} exceed current dimensions of {:?}",
(x, y),
self.dimensions()
);
}
unsafe { self.index_from_coords_unchecked(x, y) }
}
unsafe fn index_from_coords_unchecked(&self, x: usize, y: usize) -> usize {
y * self.len_x + x
}
}
impl<T> Index<(usize, usize)> for GrowOnlyGrid<T> {
type Output = T;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
let idx = self.index_from_coords(x, y);
&self.inner[idx]
}
}
impl<T> IndexMut<(usize, usize)> for GrowOnlyGrid<T> {
fn index_mut(&mut self, (x, y): (usize, usize)) -> &mut Self::Output {
let idx = self.index_from_coords(x, y);
&mut self.inner[idx]
}
}
impl<T> GrowOnlyGrid<T> {
pub fn iter_flat(&self) -> SliceIter<T> {
self.inner[..].iter()
}
}
#[aoc(day3, part1)]
pub fn day3_part1(input: &str) -> usize {
let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default);
for claim in ClaimIterator::new(input) {
let Claim {
id: _,
left,
top,
right,
bottom,
} = claim;
grid.grow_with(
right.checked_add(1).unwrap(),
bottom.checked_add(1).unwrap(),
Default::default,
);
for y in top..bottom {
for x in left..right {
let blarg = &mut grid[(x, y)];
*blarg = blarg.checked_add(1).unwrap();
}
}
}
grid.iter_flat().filter(|x| x > &&1).count()
}
#[cfg(test)]
const INPUT: &'static str = include_str!("../input/2018/day3.txt");
#[cfg(test)]
const HINT_INPUT: &'static str = r#"#1 @ 1,3: 4x4
#2 @ 3,1: 4x4
#3 @ 5,5: 2x2
"#;
#[cfg(test)]
const HINT_EXPECTED_PART1_OUTPUT: usize = 4;
#[cfg(test)]
const HINT_EXPECTED_PART2_OUTPUT: usize = 3;
#[cfg(test)]
const EXPECTED_PART2_OUTPUT: usize = 603;
#[test]
fn test_day3_part1_hint() {
assert_eq!(day3_part1(HINT_INPUT), HINT_EXPECTED_PART1_OUTPUT);
}
#[aoc(day3, part2, square_iteration)]
pub fn day3_part2_square_iteration(input: &str) -> usize {
// OPT: Use ArrayVec for even more performance? Depends on max size.
// OR OPT: Pre-allocating might be beneficial here, not sure how `size_hint` works for char
// splits.
let mut claims = ClaimIterator::new(input)
.map(|c| (c, true))
.collect::<Vec<_>>();
for i in 0..claims.len() {
for j in i + 1..claims.len() {
if claims[i].0.intersects(&claims[j].0) {
(&mut claims[i]).1 = false;
(&mut claims[j]).1 = false;
}
}
}
let uncontested = claims
.into_iter()
.filter_map(|(c, uncontested)| if uncontested { Some(c) } else { None })
.collect::<Vec<_>>();
if uncontested.len()!= 1 {
panic!("Expected single remaining claim, got {:?}", uncontested);
}
uncontested[0].id
}
#[test]
fn test_day3_part2_square_iteration_hint() {
assert_eq!(
day3_part2_square_iteration(HINT_INPUT),
HINT_EXPECTED_PART2_OUTPUT
);
}
#[test]
fn test_day3_part2_square_iteration_answer() {
assert_eq!(day3_part2_square_iteration(INPUT), EXPECTED_PART2_OUTPUT);
}
#[aoc(day3, part2, grid_again)]
pub fn day3_part2_grid_again(input: &str) -> usize {
let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default);
let claims = ClaimIterator::new(input).collect::<Vec<_>>();
for Claim {
id: _,
left,
top,
right,
bottom,
} in claims.iter()
{
grid.grow_with(
right.checked_add(1).unwrap(),
bottom.checked_add(1).unwrap(),
Default::default,
);
for y in *top..*bottom {
for x in *left..*right {
*(&mut grid[(x, y)]) += 1;
}
}
}
let uncontested = claims
.into_iter()
.filter(
|Claim {
left,
top,
bottom,
right,
..
}| {
for y in *top..*bottom {
for x in *left..*right {
let count = grid[(x, y)];
assert!(count!= 0);
if count > 1 {
return false;
}
}
}
true
},
)
.collect::<Vec<_>>();
assert_eq!(uncontested.len(), 1);
uncontested[0].id
}
#[test]
fn test_day3_part2_grid_again_hint() {
assert_eq!(
day3_part2_grid_again(HINT_INPUT),
HINT_EXPECTED_PART2_OUTPUT
);
}
#[test]
fn test_day3_part2_grid_again_answer() {
assert_eq!(day3_part2_grid_again(INPUT), EXPECTED_PART2_OUTPUT);
}
| test_intersection | identifier_name |
day3.rs | use {
aoc_runner_derive::aoc,
re_parse::{Error as ReParseError, ReParse, Regex},
serde_derive::Deserialize,
std::{
cmp::max,
mem::replace,
ops::{Index, IndexMut},
slice::Iter as SliceIter,
str::{FromStr, Split},
},
};
struct ClaimIterator<'s> {
input: Split<'s, char>,
}
impl<'s> ClaimIterator<'s> {
pub fn new(input: &'s str) -> Self {
ClaimIterator {
input: input.split('\n'),
}
}
}
#[derive(Debug, Deserialize, ReParse)]
#[re_parse(
regex = r#"#(?P<id>\d{1,4}) @ (?P<left>\d{1,3}),(?P<top>\d{1,3}): (?P<width>\d{1,2})x(?P<height>\d{1,2})"#
)]
struct RawClaim {
id: usize,
left: usize,
top: usize,
width: usize,
height: usize,
}
#[derive(Clone, Debug)]
struct Claim {
id: usize,
left: usize,
top: usize,
right: usize,
bottom: usize,
}
impl Claim {
// FIXME: This is actually wrong, and I've just compensated by making `intersects` inclusive.
// There should be no need to call this twice!
fn contains_edge_of(&self, other: &Self) -> (bool, bool) {
let intersects_horizontally = {
let bottom_in_horizontal_band = self.bottom > other.top && self.bottom <= other.bottom;
let top_in_horizontal_band = self.top >= other.top && self.top < other.bottom;
bottom_in_horizontal_band || top_in_horizontal_band
};
let intersects_vertically = {
let left_in_vertical_band = self.left >= other.left && self.left < other.right;
let right_in_vertical_band = self.right > other.left && self.right <= other.right;
left_in_vertical_band || right_in_vertical_band
};
(intersects_horizontally, intersects_vertically)
}
pub fn intersects(&self, other: &Self) -> bool {
let (self_contains_horiz, self_contains_vert) = self.contains_edge_of(other);
let (other_contains_horiz, other_contains_vert) = other.contains_edge_of(self);
(self_contains_horiz || other_contains_horiz) && (self_contains_vert || other_contains_vert)
}
}
#[test]
fn test_intersection() {
const CLAIM_TO_COMPARE_TO: &'static str = "#0 @ 2,2: 3x3";
let claim: Claim = CLAIM_TO_COMPARE_TO.parse().unwrap();
for other in &[
// Close but not touching
"#0 @ 1,1: 1x1",
"#0 @ 2,1: 1x1",
"#0 @ 3,1: 1x1",
"#0 @ 4,1: 1x1",
"#0 @ 5,1: 1x1",
"#0 @ 5,2: 1x1",
"#0 @ 5,3: 1x1",
"#0 @ 5,4: 1x1",
"#0 @ 5,5: 1x1",
"#0 @ 4,5: 1x1",
"#0 @ 3,5: 1x1",
"#0 @ 2,5: 1x1",
"#0 @ 1,5: 1x1",
"#0 @ 1,4: 1x1",
"#0 @ 1,3: 1x1",
"#0 @ 1,2: 1x1",
// Way out there
] {
if claim.intersects(&other.parse().unwrap()) {
panic!("{:?} is not supposed to intersect {:?}", other, claim);
}
}
for other in &[
// Same thing
CLAIM_TO_COMPARE_TO,
// Other encompasses first
"#0 @ 1,1: 5x5",
// First encompasses other
"#0 @ 3,3: 1x1",
// Edges
"#0 @ 1,1: 2x2",
"#0 @ 2,1: 2x2",
"#0 @ 3,1: 2x2",
"#0 @ 3,2: 2x2",
"#0 @ 3,3: 2x2",
"#0 @ 2,3: 2x2",
"#0 @ 1,3: 2x2",
"#0 @ 1,2: 2x2",
] {
if!claim.intersects(&other.parse().unwrap()) {
panic!("{:?} is supposed to intersect {:?}", other, claim);
}
}
// Other failing cases found
fn intersects(s1: &str, s2: &str) -> bool {
s1.parse::<Claim>()
.unwrap()
.intersects(&s2.parse().unwrap())
}
//"#1236 @ ".parse().unwrap()
assert!(intersects(
"#1236 @ 420,613: 19x12",
"#344 @ 426,611: 12x21"
));
}
#[derive(Debug)]
enum ClaimParseError {
ParseFailed(ReParseError),
InvalidDimensions(usize, usize),
}
impl FromStr for Claim {
type Err = ClaimParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
use self::ClaimParseError::*;
let RawClaim {
id,
left,
top,
width,
height,
} = RawClaim::from_str(s).map_err(ParseFailed)?;
if width == 0 || height == 0 {
return Err(InvalidDimensions(width, height));
}
Ok(Self {
id,
left,
top,
right: left.checked_add(width).unwrap(),
bottom: top.checked_add(height).unwrap(),
})
}
}
impl<'s> Iterator for ClaimIterator<'s> {
type Item = Claim;
fn next(&mut self) -> Option<Self::Item> {
match self.input.next()? {
"" => None,
other => Some(other.parse().unwrap()),
}
}
}
struct GrowOnlyGrid<T> {
inner: Vec<T>,
len_x: usize,
len_y: usize,
}
impl<T> GrowOnlyGrid<T> {
pub fn new_with<F: FnMut() -> T>(x: usize, y: usize, mut f: F) -> Self {
Self {
inner: {
let len = x.checked_mul(y).unwrap();
let mut inner = Vec::with_capacity(len);
// OPT: Use the soon-to-be-stable `resize_with` instead.
while inner.len() < len {
inner.push(f());
}
inner
},
len_x: x,
len_y: y,
}
}
pub fn grow_with<F: FnMut() -> T>(&mut self, x: usize, y: usize, f: F)
where
T: Default,
{
let old_len_x = self.len_x;
let old_len_y = self.len_y;
let old = replace(
self,
Self::new_with(max(x, old_len_x), max(y, old_len_y), f),
);
let mut old_values = old.inner.into_iter();
for y in 0..old_len_y {
// OPT: We could probably just copy slices here directly
for x in 0..old_len_x {
let idx = unsafe { self.index_from_coords_unchecked(x, y) };
self.inner[idx] = old_values.next().unwrap();
}
}
}
pub fn dimensions(&self) -> (usize, usize) {
(self.len_x, self.len_y)
}
fn index_from_coords(&self, x: usize, y: usize) -> usize {
if x >= self.len_x || y >= self.len_y {
panic!(
"coordinates {:?} exceed current dimensions of {:?}",
(x, y),
self.dimensions()
);
}
unsafe { self.index_from_coords_unchecked(x, y) }
}
unsafe fn index_from_coords_unchecked(&self, x: usize, y: usize) -> usize {
y * self.len_x + x
}
}
impl<T> Index<(usize, usize)> for GrowOnlyGrid<T> {
type Output = T;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
let idx = self.index_from_coords(x, y);
&self.inner[idx]
}
}
impl<T> IndexMut<(usize, usize)> for GrowOnlyGrid<T> {
fn index_mut(&mut self, (x, y): (usize, usize)) -> &mut Self::Output {
let idx = self.index_from_coords(x, y);
&mut self.inner[idx]
}
}
impl<T> GrowOnlyGrid<T> {
pub fn iter_flat(&self) -> SliceIter<T> {
self.inner[..].iter()
}
}
#[aoc(day3, part1)]
pub fn day3_part1(input: &str) -> usize {
let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default);
for claim in ClaimIterator::new(input) {
let Claim {
id: _,
left,
top,
right,
bottom,
} = claim;
grid.grow_with(
right.checked_add(1).unwrap(),
bottom.checked_add(1).unwrap(),
Default::default,
);
for y in top..bottom {
for x in left..right {
let blarg = &mut grid[(x, y)];
*blarg = blarg.checked_add(1).unwrap();
}
}
}
grid.iter_flat().filter(|x| x > &&1).count()
}
#[cfg(test)]
const INPUT: &'static str = include_str!("../input/2018/day3.txt");
#[cfg(test)]
const HINT_INPUT: &'static str = r#"#1 @ 1,3: 4x4
#2 @ 3,1: 4x4
#3 @ 5,5: 2x2
"#;
#[cfg(test)]
const HINT_EXPECTED_PART1_OUTPUT: usize = 4;
#[cfg(test)]
const HINT_EXPECTED_PART2_OUTPUT: usize = 3;
#[cfg(test)]
const EXPECTED_PART2_OUTPUT: usize = 603;
#[test]
fn test_day3_part1_hint() {
assert_eq!(day3_part1(HINT_INPUT), HINT_EXPECTED_PART1_OUTPUT);
}
#[aoc(day3, part2, square_iteration)]
pub fn day3_part2_square_iteration(input: &str) -> usize {
// OPT: Use ArrayVec for even more performance? Depends on max size.
// OR OPT: Pre-allocating might be beneficial here, not sure how `size_hint` works for char
// splits.
let mut claims = ClaimIterator::new(input)
.map(|c| (c, true))
.collect::<Vec<_>>();
for i in 0..claims.len() {
for j in i + 1..claims.len() {
if claims[i].0.intersects(&claims[j].0) {
(&mut claims[i]).1 = false;
(&mut claims[j]).1 = false;
}
}
}
let uncontested = claims
.into_iter()
.filter_map(|(c, uncontested)| if uncontested { Some(c) } else { None })
.collect::<Vec<_>>();
if uncontested.len()!= 1 {
panic!("Expected single remaining claim, got {:?}", uncontested);
}
uncontested[0].id
}
#[test]
fn test_day3_part2_square_iteration_hint() {
assert_eq!(
day3_part2_square_iteration(HINT_INPUT),
HINT_EXPECTED_PART2_OUTPUT
);
}
#[test]
fn test_day3_part2_square_iteration_answer() {
assert_eq!(day3_part2_square_iteration(INPUT), EXPECTED_PART2_OUTPUT);
}
#[aoc(day3, part2, grid_again)]
pub fn day3_part2_grid_again(input: &str) -> usize {
let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default);
let claims = ClaimIterator::new(input).collect::<Vec<_>>();
for Claim {
id: _,
left,
top,
right,
bottom,
} in claims.iter()
{
grid.grow_with(
right.checked_add(1).unwrap(),
bottom.checked_add(1).unwrap(),
Default::default,
);
for y in *top..*bottom {
for x in *left..*right {
*(&mut grid[(x, y)]) += 1;
}
}
}
let uncontested = claims
.into_iter()
.filter(
|Claim {
left,
top,
bottom, | let count = grid[(x, y)];
assert!(count!= 0);
if count > 1 {
return false;
}
}
}
true
},
)
.collect::<Vec<_>>();
assert_eq!(uncontested.len(), 1);
uncontested[0].id
}
#[test]
fn test_day3_part2_grid_again_hint() {
assert_eq!(
day3_part2_grid_again(HINT_INPUT),
HINT_EXPECTED_PART2_OUTPUT
);
}
#[test]
fn test_day3_part2_grid_again_answer() {
assert_eq!(day3_part2_grid_again(INPUT), EXPECTED_PART2_OUTPUT);
} | right,
..
}| {
for y in *top..*bottom {
for x in *left..*right { | random_line_split |
day3.rs | use {
aoc_runner_derive::aoc,
re_parse::{Error as ReParseError, ReParse, Regex},
serde_derive::Deserialize,
std::{
cmp::max,
mem::replace,
ops::{Index, IndexMut},
slice::Iter as SliceIter,
str::{FromStr, Split},
},
};
struct ClaimIterator<'s> {
input: Split<'s, char>,
}
impl<'s> ClaimIterator<'s> {
pub fn new(input: &'s str) -> Self {
ClaimIterator {
input: input.split('\n'),
}
}
}
#[derive(Debug, Deserialize, ReParse)]
#[re_parse(
regex = r#"#(?P<id>\d{1,4}) @ (?P<left>\d{1,3}),(?P<top>\d{1,3}): (?P<width>\d{1,2})x(?P<height>\d{1,2})"#
)]
struct RawClaim {
id: usize,
left: usize,
top: usize,
width: usize,
height: usize,
}
#[derive(Clone, Debug)]
struct Claim {
id: usize,
left: usize,
top: usize,
right: usize,
bottom: usize,
}
impl Claim {
// FIXME: This is actually wrong, and I've just compensated by making `intersects` inclusive.
// There should be no need to call this twice!
fn contains_edge_of(&self, other: &Self) -> (bool, bool) {
let intersects_horizontally = {
let bottom_in_horizontal_band = self.bottom > other.top && self.bottom <= other.bottom;
let top_in_horizontal_band = self.top >= other.top && self.top < other.bottom;
bottom_in_horizontal_band || top_in_horizontal_band
};
let intersects_vertically = {
let left_in_vertical_band = self.left >= other.left && self.left < other.right;
let right_in_vertical_band = self.right > other.left && self.right <= other.right;
left_in_vertical_band || right_in_vertical_band
};
(intersects_horizontally, intersects_vertically)
}
pub fn intersects(&self, other: &Self) -> bool {
let (self_contains_horiz, self_contains_vert) = self.contains_edge_of(other);
let (other_contains_horiz, other_contains_vert) = other.contains_edge_of(self);
(self_contains_horiz || other_contains_horiz) && (self_contains_vert || other_contains_vert)
}
}
#[test]
fn test_intersection() {
const CLAIM_TO_COMPARE_TO: &'static str = "#0 @ 2,2: 3x3";
let claim: Claim = CLAIM_TO_COMPARE_TO.parse().unwrap();
for other in &[
// Close but not touching
"#0 @ 1,1: 1x1",
"#0 @ 2,1: 1x1",
"#0 @ 3,1: 1x1",
"#0 @ 4,1: 1x1",
"#0 @ 5,1: 1x1",
"#0 @ 5,2: 1x1",
"#0 @ 5,3: 1x1",
"#0 @ 5,4: 1x1",
"#0 @ 5,5: 1x1",
"#0 @ 4,5: 1x1",
"#0 @ 3,5: 1x1",
"#0 @ 2,5: 1x1",
"#0 @ 1,5: 1x1",
"#0 @ 1,4: 1x1",
"#0 @ 1,3: 1x1",
"#0 @ 1,2: 1x1",
// Way out there
] {
if claim.intersects(&other.parse().unwrap()) {
panic!("{:?} is not supposed to intersect {:?}", other, claim);
}
}
for other in &[
// Same thing
CLAIM_TO_COMPARE_TO,
// Other encompasses first
"#0 @ 1,1: 5x5",
// First encompasses other
"#0 @ 3,3: 1x1",
// Edges
"#0 @ 1,1: 2x2",
"#0 @ 2,1: 2x2",
"#0 @ 3,1: 2x2",
"#0 @ 3,2: 2x2",
"#0 @ 3,3: 2x2",
"#0 @ 2,3: 2x2",
"#0 @ 1,3: 2x2",
"#0 @ 1,2: 2x2",
] {
if!claim.intersects(&other.parse().unwrap()) {
panic!("{:?} is supposed to intersect {:?}", other, claim);
}
}
// Other failing cases found
fn intersects(s1: &str, s2: &str) -> bool {
s1.parse::<Claim>()
.unwrap()
.intersects(&s2.parse().unwrap())
}
//"#1236 @ ".parse().unwrap()
assert!(intersects(
"#1236 @ 420,613: 19x12",
"#344 @ 426,611: 12x21"
));
}
#[derive(Debug)]
enum ClaimParseError {
ParseFailed(ReParseError),
InvalidDimensions(usize, usize),
}
impl FromStr for Claim {
type Err = ClaimParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
use self::ClaimParseError::*;
let RawClaim {
id,
left,
top,
width,
height,
} = RawClaim::from_str(s).map_err(ParseFailed)?;
if width == 0 || height == 0 {
return Err(InvalidDimensions(width, height));
}
Ok(Self {
id,
left,
top,
right: left.checked_add(width).unwrap(),
bottom: top.checked_add(height).unwrap(),
})
}
}
impl<'s> Iterator for ClaimIterator<'s> {
type Item = Claim;
fn next(&mut self) -> Option<Self::Item> {
match self.input.next()? {
"" => None,
other => Some(other.parse().unwrap()),
}
}
}
struct GrowOnlyGrid<T> {
inner: Vec<T>,
len_x: usize,
len_y: usize,
}
impl<T> GrowOnlyGrid<T> {
pub fn new_with<F: FnMut() -> T>(x: usize, y: usize, mut f: F) -> Self {
Self {
inner: {
let len = x.checked_mul(y).unwrap();
let mut inner = Vec::with_capacity(len);
// OPT: Use the soon-to-be-stable `resize_with` instead.
while inner.len() < len {
inner.push(f());
}
inner
},
len_x: x,
len_y: y,
}
}
pub fn grow_with<F: FnMut() -> T>(&mut self, x: usize, y: usize, f: F)
where
T: Default,
{
let old_len_x = self.len_x;
let old_len_y = self.len_y;
let old = replace(
self,
Self::new_with(max(x, old_len_x), max(y, old_len_y), f),
);
let mut old_values = old.inner.into_iter();
for y in 0..old_len_y {
// OPT: We could probably just copy slices here directly
for x in 0..old_len_x {
let idx = unsafe { self.index_from_coords_unchecked(x, y) };
self.inner[idx] = old_values.next().unwrap();
}
}
}
pub fn dimensions(&self) -> (usize, usize) {
(self.len_x, self.len_y)
}
fn index_from_coords(&self, x: usize, y: usize) -> usize {
if x >= self.len_x || y >= self.len_y {
panic!(
"coordinates {:?} exceed current dimensions of {:?}",
(x, y),
self.dimensions()
);
}
unsafe { self.index_from_coords_unchecked(x, y) }
}
unsafe fn index_from_coords_unchecked(&self, x: usize, y: usize) -> usize {
y * self.len_x + x
}
}
impl<T> Index<(usize, usize)> for GrowOnlyGrid<T> {
type Output = T;
fn index(&self, (x, y): (usize, usize)) -> &Self::Output {
let idx = self.index_from_coords(x, y);
&self.inner[idx]
}
}
impl<T> IndexMut<(usize, usize)> for GrowOnlyGrid<T> {
fn index_mut(&mut self, (x, y): (usize, usize)) -> &mut Self::Output {
let idx = self.index_from_coords(x, y);
&mut self.inner[idx]
}
}
impl<T> GrowOnlyGrid<T> {
pub fn iter_flat(&self) -> SliceIter<T> {
self.inner[..].iter()
}
}
#[aoc(day3, part1)]
pub fn day3_part1(input: &str) -> usize {
let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default);
for claim in ClaimIterator::new(input) {
let Claim {
id: _,
left,
top,
right,
bottom,
} = claim;
grid.grow_with(
right.checked_add(1).unwrap(),
bottom.checked_add(1).unwrap(),
Default::default,
);
for y in top..bottom {
for x in left..right {
let blarg = &mut grid[(x, y)];
*blarg = blarg.checked_add(1).unwrap();
}
}
}
grid.iter_flat().filter(|x| x > &&1).count()
}
#[cfg(test)]
const INPUT: &'static str = include_str!("../input/2018/day3.txt");
#[cfg(test)]
const HINT_INPUT: &'static str = r#"#1 @ 1,3: 4x4
#2 @ 3,1: 4x4
#3 @ 5,5: 2x2
"#;
#[cfg(test)]
const HINT_EXPECTED_PART1_OUTPUT: usize = 4;
#[cfg(test)]
const HINT_EXPECTED_PART2_OUTPUT: usize = 3;
#[cfg(test)]
const EXPECTED_PART2_OUTPUT: usize = 603;
#[test]
fn test_day3_part1_hint() |
#[aoc(day3, part2, square_iteration)]
pub fn day3_part2_square_iteration(input: &str) -> usize {
// OPT: Use ArrayVec for even more performance? Depends on max size.
// OR OPT: Pre-allocating might be beneficial here, not sure how `size_hint` works for char
// splits.
let mut claims = ClaimIterator::new(input)
.map(|c| (c, true))
.collect::<Vec<_>>();
for i in 0..claims.len() {
for j in i + 1..claims.len() {
if claims[i].0.intersects(&claims[j].0) {
(&mut claims[i]).1 = false;
(&mut claims[j]).1 = false;
}
}
}
let uncontested = claims
.into_iter()
.filter_map(|(c, uncontested)| if uncontested { Some(c) } else { None })
.collect::<Vec<_>>();
if uncontested.len()!= 1 {
panic!("Expected single remaining claim, got {:?}", uncontested);
}
uncontested[0].id
}
#[test]
fn test_day3_part2_square_iteration_hint() {
assert_eq!(
day3_part2_square_iteration(HINT_INPUT),
HINT_EXPECTED_PART2_OUTPUT
);
}
#[test]
fn test_day3_part2_square_iteration_answer() {
assert_eq!(day3_part2_square_iteration(INPUT), EXPECTED_PART2_OUTPUT);
}
#[aoc(day3, part2, grid_again)]
pub fn day3_part2_grid_again(input: &str) -> usize {
let mut grid = GrowOnlyGrid::<u8>::new_with(1000, 1000, Default::default);
let claims = ClaimIterator::new(input).collect::<Vec<_>>();
for Claim {
id: _,
left,
top,
right,
bottom,
} in claims.iter()
{
grid.grow_with(
right.checked_add(1).unwrap(),
bottom.checked_add(1).unwrap(),
Default::default,
);
for y in *top..*bottom {
for x in *left..*right {
*(&mut grid[(x, y)]) += 1;
}
}
}
let uncontested = claims
.into_iter()
.filter(
|Claim {
left,
top,
bottom,
right,
..
}| {
for y in *top..*bottom {
for x in *left..*right {
let count = grid[(x, y)];
assert!(count!= 0);
if count > 1 {
return false;
}
}
}
true
},
)
.collect::<Vec<_>>();
assert_eq!(uncontested.len(), 1);
uncontested[0].id
}
#[test]
fn test_day3_part2_grid_again_hint() {
assert_eq!(
day3_part2_grid_again(HINT_INPUT),
HINT_EXPECTED_PART2_OUTPUT
);
}
#[test]
fn test_day3_part2_grid_again_answer() {
assert_eq!(day3_part2_grid_again(INPUT), EXPECTED_PART2_OUTPUT);
}
| {
assert_eq!(day3_part1(HINT_INPUT), HINT_EXPECTED_PART1_OUTPUT);
} | identifier_body |
livestream.rs | use std::collections::{HashMap, BTreeMap};
use tokio::sync::mpsc::*;
use std::{thread, fs};
use crate::camera::CameraProvider;
use std::sync::Arc;
use std::cell::RefCell;
use tokio::sync::mpsc::error::TrySendError;
use tokio::sync::mpsc::error::ErrorKind;
use std::io::Write;
use std::sync::mpsc as bchan;
pub type VideoFrame=(Vec<u8>, usize, usize, usize);
use crate::inference_engine::{start_inference_service, InfererHandler};
use crate::time_now;
// 10 Frames as a batch.
pub struct VideoBatchContent{
pub data: Vec<u8>,
pub sizes: [usize; 10],
pub capture_timestamps: [usize; 10],
pub infer_timestamps: [usize; 10],
pub infer_results: [usize; 10]
}
pub struct MutableVideoBatchContent{
pub data: Vec<u8>,
pub sizes: [usize; 10],
pub capture_timestamps: [usize; 10]
}
pub type VideoBatch=Arc<VideoBatchContent>;
pub type MutableVideoBatch=Box<MutableVideoBatchContent>;
pub enum IncomingMessage{
CameraShot(VideoBatch),
FrameReq(usize, usize),
ClientJoin(Sender<OutcomingMessage>),
ClientQuit(usize),
QueryInfo(usize)
}
pub struct StreamInfo{
pub current_range: (usize, usize),
pub h264_header: Arc<Vec<u8>>
}
pub enum OutcomingMessage{
FrameArrive(Result<VideoBatch, (usize, usize)>),
ClientID(usize),
CurrentInfo(StreamInfo)
}
// A simple single-threaded ring buffer.
pub struct RingBuffer<T: Clone>{
data: Vec<Option<T>>,
size: usize,
start: usize,
end: usize,
offset: usize, // real index of offset
next_index: usize // real index of end
}
impl<T:Clone> RingBuffer<T>{
pub fn new(size: usize)->RingBuffer<T>{
assert!(size>1);
let mut v=Vec::new();
for i in 0..size{
v.push(None);
}
RingBuffer{
data: v,
size,
start: 0,
end: 0,
offset: 0,
next_index: 0
}
}
pub fn info(&self){
println!("<RingBuffer size={}, start={}, end={}, offset={}, next_index={}>", self.size, self.start, self.end, self.offset, self.next_index);
}
pub fn fetch(&mut self, index: usize)->Option<T>{
//println!("fetching frame {} from [{}, {})", index, self.offset, self.next_index);
if index<self.offset || index>=self.next_index{
return None;
}
let mut idx=index-self.offset+self.start;
if idx>=self.size{
idx-=self.size;
}
Some(self.data[idx].as_ref().unwrap().clone())
}
pub fn push(&mut self, value: T){
let index=self.next_index;
self.next_index=index+1;
self.data[self.end]=Some(value);
self.end+=1;
if self.end>=self.size{
self.end-=self.size;
}
if self.end==self.start{ // The ring-buffer is full. Push start ahead.
self.start+=1;
if self.start>=self.size{
self.start-=self.size;
}
self.offset+=1;
}
}
pub fn current_range(&self)->(usize, usize){
(self.offset, self.next_index)
}
pub fn fetch_with_err(&mut self, index: usize)->Result<T, (usize, usize)>{
match self.fetch(index){
Some(x)=>Ok(x),
None=>Err(self.current_range())
}
}
}
pub struct LiveStream{
next_client_id: usize,
clients: BTreeMap<usize, Sender<OutcomingMessage>>,
cached_frames: RingBuffer<VideoBatch>,
channel: (Sender<IncomingMessage>, Receiver<IncomingMessage>),
first_frame: Option<Arc<Vec<u8>>>
}
impl LiveStream{
pub fn | ()->Self{
LiveStream{
next_client_id: 0,
clients: BTreeMap::new(),
cached_frames: RingBuffer::new(20),
channel: channel(5),
first_frame: None
}
}
pub fn get_sender(&self)->Sender<IncomingMessage>{
self.channel.0.clone()
}
pub fn start(mut self, mut camera: Box<CameraProvider>, mut inferer: Box<InfererHandler>, runtime: &mut tokio::runtime::Runtime)->Sender<IncomingMessage>{
let mut sender=self.get_sender();
let ret=sender.clone();
println!("Taking first frame");
//let mut camera=camera.take().unwrap();
self.first_frame=Some(camera.h264_header());
//let mut inferer=inferer.take().unwrap();
// Start camera thread.
std::thread::spawn(move ||{
let mut i:usize=0;
use std::time::Instant;
let mut now = Instant::now();
loop {
//println!("camera {}", i);
i=i+1;
let msg=Box::new({
let mut buffer=Vec::new();
buffer.reserve(640*480*3*10);
let mut timestamps=[0 as usize; 10];
let mut old_size=0;
let mut sizes=[0; 10];
for i in 0..=9{
camera.capture_zerocopy(&mut buffer).unwrap();
timestamps[i]=time_now();
sizes[i]=buffer.len()-old_size;
old_size=buffer.len();
}
MutableVideoBatchContent{data: buffer, sizes, capture_timestamps: timestamps}
});
/*
let mut msg= ({
let mut data: [std::mem::MaybeUninit<Option<(Vec<u8>, usize)>>; 10] = unsafe {
std::mem::MaybeUninit::uninit().assume_init()
};
for elem in &mut data[..] {
unsafe { std::ptr::write(elem.as_mut_ptr(), Some({
let pic=camera.capture().unwrap();
let stamp=time_now();
(pic, stamp)
})); }
}
let batch=unsafe { std::mem::transmute::<_, [Option<(Vec<u8>, usize)>; 10]>(data) };
//let mut file = fs::File::create(&format!("frame-{}.264", i)).unwrap();
//for i in batch.iter(){
// file.write_all(&i.0).unwrap();
//}
batch
});
*/
//println!("sending to inferer");
inferer.send(msg).unwrap();
//println!("sent");
/*
loop {
let ret=sender.try_send(msg);
match ret{
Ok(())=>{
break;
}
Err(TrySendError{kind: ErrorKind::NoCapacity, value:p})=>{
msg=p;
}
Err(TrySendError{kind: ErrorKind::Closed, value:p})=>{
panic!("Closed!");
}
}
}
*/
if i%2==0{
let elapsed = now.elapsed();
let sec = (elapsed.as_secs() as f64) + (elapsed.subsec_nanos() as f64 / 1000_000_000.0);
println!("i={} sec={} FPS={}", i*10, sec, 20.0/sec);
now = Instant::now();
}
//std::thread::sleep(std::time::Duration::new(1, 0));
}
});
// Start tokio coroutine
runtime.spawn (async move {
loop{
let msg=self.channel.1.recv().await.unwrap();
self.handle_message(msg).await;
}
});
return ret;
}
pub async fn handle_message(&mut self, msg: IncomingMessage){
match msg{
IncomingMessage::CameraShot(video)=>{
self.cached_frames.push(video);
//self.cached_frames.info();
}
IncomingMessage::FrameReq(client_id, frame_id)=>{
let sender=self.clients.get(&client_id).unwrap();
sender.clone().send(OutcomingMessage::FrameArrive(self.cached_frames.fetch_with_err(frame_id))).await.ok().unwrap();
}
IncomingMessage::ClientJoin(sender)=>{
let id=self.next_client_id;
self.next_client_id+=1;
sender.clone().send(OutcomingMessage::ClientID(id)).await.ok().unwrap();
self.clients.insert(id, sender.clone());
}
IncomingMessage::ClientQuit(client_id)=>{
self.clients.remove(&client_id);
}
IncomingMessage::QueryInfo(client_id)=>{
let sender=self.clients.get(&client_id).unwrap();
sender.clone().send(OutcomingMessage::CurrentInfo(StreamInfo{
current_range: self.cached_frames.current_range(),
h264_header: Arc::clone(&self.first_frame.as_ref().unwrap())
})).await.ok().unwrap();
}
}
}
}
pub struct LiveStreamClient{
index: usize,
stream: Sender<IncomingMessage>,
receiver: Receiver<OutcomingMessage>
}
impl LiveStreamClient{
pub async fn connect(stream: Sender<IncomingMessage>)->LiveStreamClient{
let (tx, mut rx)=channel(5);
stream.clone().send(IncomingMessage::ClientJoin(tx)).await.ok().unwrap();
match rx.recv().await.unwrap() {
OutcomingMessage::ClientID(index)=>{
LiveStreamClient{
index,
stream,
receiver: rx
}
}
_=>unreachable!()
}
}
pub async fn stream_info(&mut self)->StreamInfo{
self.stream.clone().send(IncomingMessage::QueryInfo(self.index)).await.ok().unwrap();
match self.receiver.recv().await.unwrap(){
OutcomingMessage::CurrentInfo(info)=>{
info
}
_=>unreachable!()
}
}
pub async fn request_batch(&mut self, index: usize)->Result<VideoBatch, (usize, usize)>{
self.stream.clone().send(IncomingMessage::FrameReq(self.index, index)).await.ok().unwrap();
match self.receiver.recv().await.unwrap(){
OutcomingMessage::FrameArrive(info)=>{
info
}
_=>unreachable!()
}
}
pub async fn destroy(&mut self){
self.stream.clone().send(IncomingMessage::ClientQuit(self.index)).await.ok().unwrap();
}
} | new | identifier_name |
livestream.rs | use std::collections::{HashMap, BTreeMap};
use tokio::sync::mpsc::*;
use std::{thread, fs};
use crate::camera::CameraProvider;
use std::sync::Arc;
use std::cell::RefCell;
use tokio::sync::mpsc::error::TrySendError;
use tokio::sync::mpsc::error::ErrorKind;
use std::io::Write;
use std::sync::mpsc as bchan;
pub type VideoFrame=(Vec<u8>, usize, usize, usize);
use crate::inference_engine::{start_inference_service, InfererHandler};
use crate::time_now;
// 10 Frames as a batch.
pub struct VideoBatchContent{
pub data: Vec<u8>,
pub sizes: [usize; 10],
pub capture_timestamps: [usize; 10],
pub infer_timestamps: [usize; 10],
pub infer_results: [usize; 10]
}
pub struct MutableVideoBatchContent{
pub data: Vec<u8>,
pub sizes: [usize; 10],
pub capture_timestamps: [usize; 10]
}
pub type VideoBatch=Arc<VideoBatchContent>;
pub type MutableVideoBatch=Box<MutableVideoBatchContent>;
pub enum IncomingMessage{
CameraShot(VideoBatch),
FrameReq(usize, usize),
ClientJoin(Sender<OutcomingMessage>),
ClientQuit(usize),
QueryInfo(usize)
}
pub struct StreamInfo{
pub current_range: (usize, usize),
pub h264_header: Arc<Vec<u8>>
}
pub enum OutcomingMessage{
FrameArrive(Result<VideoBatch, (usize, usize)>),
ClientID(usize),
CurrentInfo(StreamInfo)
}
// A simple single-threaded ring buffer.
pub struct RingBuffer<T: Clone>{
data: Vec<Option<T>>,
size: usize,
start: usize,
end: usize,
offset: usize, // real index of offset
next_index: usize // real index of end
}
impl<T:Clone> RingBuffer<T>{
pub fn new(size: usize)->RingBuffer<T>{
assert!(size>1);
let mut v=Vec::new();
for i in 0..size{
| data: v,
size,
start: 0,
end: 0,
offset: 0,
next_index: 0
}
}
pub fn info(&self){
println!("<RingBuffer size={}, start={}, end={}, offset={}, next_index={}>", self.size, self.start, self.end, self.offset, self.next_index);
}
pub fn fetch(&mut self, index: usize)->Option<T>{
//println!("fetching frame {} from [{}, {})", index, self.offset, self.next_index);
if index<self.offset || index>=self.next_index{
return None;
}
let mut idx=index-self.offset+self.start;
if idx>=self.size{
idx-=self.size;
}
Some(self.data[idx].as_ref().unwrap().clone())
}
pub fn push(&mut self, value: T){
let index=self.next_index;
self.next_index=index+1;
self.data[self.end]=Some(value);
self.end+=1;
if self.end>=self.size{
self.end-=self.size;
}
if self.end==self.start{ // The ring-buffer is full. Push start ahead.
self.start+=1;
if self.start>=self.size{
self.start-=self.size;
}
self.offset+=1;
}
}
pub fn current_range(&self)->(usize, usize){
(self.offset, self.next_index)
}
pub fn fetch_with_err(&mut self, index: usize)->Result<T, (usize, usize)>{
match self.fetch(index){
Some(x)=>Ok(x),
None=>Err(self.current_range())
}
}
}
pub struct LiveStream{
next_client_id: usize,
clients: BTreeMap<usize, Sender<OutcomingMessage>>,
cached_frames: RingBuffer<VideoBatch>,
channel: (Sender<IncomingMessage>, Receiver<IncomingMessage>),
first_frame: Option<Arc<Vec<u8>>>
}
impl LiveStream{
pub fn new()->Self{
LiveStream{
next_client_id: 0,
clients: BTreeMap::new(),
cached_frames: RingBuffer::new(20),
channel: channel(5),
first_frame: None
}
}
pub fn get_sender(&self)->Sender<IncomingMessage>{
self.channel.0.clone()
}
pub fn start(mut self, mut camera: Box<CameraProvider>, mut inferer: Box<InfererHandler>, runtime: &mut tokio::runtime::Runtime)->Sender<IncomingMessage>{
let mut sender=self.get_sender();
let ret=sender.clone();
println!("Taking first frame");
//let mut camera=camera.take().unwrap();
self.first_frame=Some(camera.h264_header());
//let mut inferer=inferer.take().unwrap();
// Start camera thread.
std::thread::spawn(move ||{
let mut i:usize=0;
use std::time::Instant;
let mut now = Instant::now();
loop {
//println!("camera {}", i);
i=i+1;
let msg=Box::new({
let mut buffer=Vec::new();
buffer.reserve(640*480*3*10);
let mut timestamps=[0 as usize; 10];
let mut old_size=0;
let mut sizes=[0; 10];
for i in 0..=9{
camera.capture_zerocopy(&mut buffer).unwrap();
timestamps[i]=time_now();
sizes[i]=buffer.len()-old_size;
old_size=buffer.len();
}
MutableVideoBatchContent{data: buffer, sizes, capture_timestamps: timestamps}
});
/*
let mut msg= ({
let mut data: [std::mem::MaybeUninit<Option<(Vec<u8>, usize)>>; 10] = unsafe {
std::mem::MaybeUninit::uninit().assume_init()
};
for elem in &mut data[..] {
unsafe { std::ptr::write(elem.as_mut_ptr(), Some({
let pic=camera.capture().unwrap();
let stamp=time_now();
(pic, stamp)
})); }
}
let batch=unsafe { std::mem::transmute::<_, [Option<(Vec<u8>, usize)>; 10]>(data) };
//let mut file = fs::File::create(&format!("frame-{}.264", i)).unwrap();
//for i in batch.iter(){
// file.write_all(&i.0).unwrap();
//}
batch
});
*/
//println!("sending to inferer");
inferer.send(msg).unwrap();
//println!("sent");
/*
loop {
let ret=sender.try_send(msg);
match ret{
Ok(())=>{
break;
}
Err(TrySendError{kind: ErrorKind::NoCapacity, value:p})=>{
msg=p;
}
Err(TrySendError{kind: ErrorKind::Closed, value:p})=>{
panic!("Closed!");
}
}
}
*/
if i%2==0{
let elapsed = now.elapsed();
let sec = (elapsed.as_secs() as f64) + (elapsed.subsec_nanos() as f64 / 1000_000_000.0);
println!("i={} sec={} FPS={}", i*10, sec, 20.0/sec);
now = Instant::now();
}
//std::thread::sleep(std::time::Duration::new(1, 0));
}
});
// Start tokio coroutine
runtime.spawn (async move {
loop{
let msg=self.channel.1.recv().await.unwrap();
self.handle_message(msg).await;
}
});
return ret;
}
pub async fn handle_message(&mut self, msg: IncomingMessage){
match msg{
IncomingMessage::CameraShot(video)=>{
self.cached_frames.push(video);
//self.cached_frames.info();
}
IncomingMessage::FrameReq(client_id, frame_id)=>{
let sender=self.clients.get(&client_id).unwrap();
sender.clone().send(OutcomingMessage::FrameArrive(self.cached_frames.fetch_with_err(frame_id))).await.ok().unwrap();
}
IncomingMessage::ClientJoin(sender)=>{
let id=self.next_client_id;
self.next_client_id+=1;
sender.clone().send(OutcomingMessage::ClientID(id)).await.ok().unwrap();
self.clients.insert(id, sender.clone());
}
IncomingMessage::ClientQuit(client_id)=>{
self.clients.remove(&client_id);
}
IncomingMessage::QueryInfo(client_id)=>{
let sender=self.clients.get(&client_id).unwrap();
sender.clone().send(OutcomingMessage::CurrentInfo(StreamInfo{
current_range: self.cached_frames.current_range(),
h264_header: Arc::clone(&self.first_frame.as_ref().unwrap())
})).await.ok().unwrap();
}
}
}
}
pub struct LiveStreamClient{
index: usize,
stream: Sender<IncomingMessage>,
receiver: Receiver<OutcomingMessage>
}
impl LiveStreamClient{
pub async fn connect(stream: Sender<IncomingMessage>)->LiveStreamClient{
let (tx, mut rx)=channel(5);
stream.clone().send(IncomingMessage::ClientJoin(tx)).await.ok().unwrap();
match rx.recv().await.unwrap() {
OutcomingMessage::ClientID(index)=>{
LiveStreamClient{
index,
stream,
receiver: rx
}
}
_=>unreachable!()
}
}
pub async fn stream_info(&mut self)->StreamInfo{
self.stream.clone().send(IncomingMessage::QueryInfo(self.index)).await.ok().unwrap();
match self.receiver.recv().await.unwrap(){
OutcomingMessage::CurrentInfo(info)=>{
info
}
_=>unreachable!()
}
}
pub async fn request_batch(&mut self, index: usize)->Result<VideoBatch, (usize, usize)>{
self.stream.clone().send(IncomingMessage::FrameReq(self.index, index)).await.ok().unwrap();
match self.receiver.recv().await.unwrap(){
OutcomingMessage::FrameArrive(info)=>{
info
}
_=>unreachable!()
}
}
pub async fn destroy(&mut self){
self.stream.clone().send(IncomingMessage::ClientQuit(self.index)).await.ok().unwrap();
}
} | v.push(None);
}
RingBuffer{
| random_line_split |
mod.rs | mod expr;
mod static_init;
mod stmt;
use std::collections::{HashMap, VecDeque};
use std::convert::TryFrom;
use crate::data::{prelude::*, types::FunctionType, Initializer, Scope, StorageClass};
use cranelift::codegen::{
self,
ir::{
entities::StackSlot,
function::Function,
stackslot::{StackSlotData, StackSlotKind},
ExternalName, InstBuilder, MemFlags,
},
settings,
};
use cranelift::frontend::Switch;
use cranelift::prelude::{Block, FunctionBuilder, FunctionBuilderContext, Signature};
use cranelift_module::{self, Backend, DataId, FuncId, Linkage, Module};
enum Id {
Function(FuncId),
Global(DataId),
Local(StackSlot),
}
struct Compiler<T: Backend> {
module: Module<T>,
scope: Scope<InternedStr, Id>,
debug: bool,
// if false, we last saw a switch
last_saw_loop: bool,
strings: HashMap<Vec<u8>, DataId>,
loops: Vec<(Block, Block)>,
// switch, default, end
// if default is empty once we get to the end of a switch body,
// we didn't see a default case
switches: Vec<(Switch, Option<Block>, Block)>,
labels: HashMap<InternedStr, Block>,
error_handler: ErrorHandler,
}
/// Compile a program from a high level IR to a Cranelift Module
pub(crate) fn compile<B: Backend>(
module: Module<B>,
program: Vec<Locatable<Declaration>>,
debug: bool,
) -> (Result<Module<B>, CompileError>, VecDeque<CompileWarning>) {
// really we'd like to have all errors but that requires a refactor
let mut err = None;
let mut compiler = Compiler::new(module, debug);
for decl in program {
let current = match (decl.data.symbol.ctype.clone(), decl.data.init) {
(Type::Function(func_type), None) => compiler
.declare_func(
decl.data.symbol.id,
&func_type.signature(compiler.module.isa()),
decl.data.symbol.storage_class,
false,
)
.map(|_| ()),
(Type::Void, _) => unreachable!("parser let an incomplete type through"),
(Type::Function(func_type), Some(Initializer::FunctionBody(stmts))) => compiler
.compile_func(
decl.data.symbol.id,
func_type,
decl.data.symbol.storage_class,
stmts,
decl.location,
),
(_, Some(Initializer::FunctionBody(_))) => {
unreachable!("only functions should have a function body")
}
(_, init) => compiler.store_static(decl.data.symbol, init, decl.location),
};
if let Err(e) = current {
err = Some(e);
break;
}
}
let warns = compiler.error_handler.warnings;
if let Some(err) = err {
(Err(err), warns)
} else {
(Ok(compiler.module), warns)
}
}
impl<B: Backend> Compiler<B> {
fn new(module: Module<B>, debug: bool) -> Compiler<B> {
Compiler {
module,
scope: Scope::new(),
loops: Vec::new(),
switches: Vec::new(),
labels: HashMap::new(),
// the initial value doesn't really matter
last_saw_loop: true,
strings: Default::default(),
error_handler: Default::default(),
debug,
}
}
// we have to consider the following cases:
// 1. declaration before definition
// 2. 2nd declaration before definition
// 3. definition
// 4. declaration after definition
// 1. should declare `id` a import unless specified as `static`.
// 3. should always declare `id` as export or local.
// 2. and 4. should be a no-op.
fn declare_func(
&mut self,
id: InternedStr,
signature: &Signature,
sc: StorageClass,
is_definition: bool,
) -> CompileResult<FuncId> {
use crate::get_str;
if!is_definition {
// case 2 and 4
if let Some(Id::Function(func_id)) = self.scope.get(&id) {
return Ok(*func_id);
}
}
let linkage = match sc {
StorageClass::Auto | StorageClass::Extern if is_definition => Linkage::Export,
StorageClass::Auto | StorageClass::Extern => Linkage::Import,
StorageClass::Static => Linkage::Local,
StorageClass::Register | StorageClass::Typedef => unreachable!(),
};
let func_id = self
.module
.declare_function(get_str!(id), linkage, &signature)
.unwrap_or_else(|err| panic!("{}", err));
self.scope.insert(id, Id::Function(func_id));
Ok(func_id)
}
/// declare an object on the stack
fn declare_stack(
&mut self,
decl: Declaration,
location: Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
if let Type::Function(ftype) = decl.symbol.ctype {
self.declare_func(
decl.symbol.id,
&ftype.signature(self.module.isa()),
decl.symbol.storage_class,
false,
)?;
return Ok(());
}
let u64_size = match decl.symbol.ctype.sizeof() {
Ok(size) => size,
Err(err) => {
return Err(CompileError::semantic(Locatable {
data: err.into(),
location,
}))
}
};
let kind = StackSlotKind::ExplicitSlot;
let size = match u32::try_from(u64_size) {
Ok(size) => size,
Err(_) => return Err(CompileError::semantic(Locatable {
data: "cannot store items on the stack that are more than 4 GB, it will overflow the stack".into(),
location,
}))
};
let data = StackSlotData {
kind,
size,
offset: None,
};
let stack_slot = builder.create_stack_slot(data);
self.scope.insert(decl.symbol.id, Id::Local(stack_slot));
if let Some(init) = decl.init {
self.store_stack(init, stack_slot, builder)?;
}
Ok(())
}
fn store_stack(
&mut self,
init: Initializer,
stack_slot: StackSlot,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
match init {
Initializer::Scalar(expr) => {
let val = self.compile_expr(*expr, builder)?;
// TODO: replace with `builder.ins().stack_store(val.ir_val, stack_slot, 0);`
// when Cranelift implements stack_store for i8 and i16
let addr = builder.ins().stack_addr(Type::ptr_type(), stack_slot, 0);
builder.ins().store(MemFlags::new(), val.ir_val, addr, 0);
}
Initializer::InitializerList(_) => unimplemented!("aggregate dynamic initialization"),
Initializer::FunctionBody(_) => unreachable!("functions can't be stored on the stack"),
}
Ok(())
}
// TODO: this is grossly inefficient, ask Cranelift devs if
// there's an easier way to make parameters modifiable.
fn | (
&mut self,
params: Vec<Symbol>,
func_start: Block,
location: &Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
// Cranelift requires that all block params are declared up front
let ir_vals: Vec<_> = params
.iter()
.map(|param| {
let ir_type = param.ctype.as_ir_type();
Ok(builder.append_block_param(func_start, ir_type))
})
.collect::<CompileResult<_>>()?;
for (param, ir_val) in params.into_iter().zip(ir_vals) {
let u64_size = match param.ctype.sizeof() {
Err(data) => semantic_err!(data.into(), *location),
Ok(size) => size,
};
let u32_size = match u32::try_from(u64_size) {
Err(_) => semantic_err!(
format!(
"size {} is too large for stack (can only handle 32-bit values)",
u64_size
),
*location
),
Ok(size) => size,
};
let stack_data = StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: u32_size,
offset: None,
};
let slot = builder.create_stack_slot(stack_data);
// TODO: need to take the address before storing until Cranelift implements
// stores for i8 and i16
// then this can be replaced with `builder.ins().stack_store(ir_val, slot, 0);`
// See https://github.com/CraneStation/cranelift/issues/433
let addr = builder.ins().stack_addr(Type::ptr_type(), slot, 0);
builder.ins().store(MemFlags::new(), ir_val, addr, 0);
self.scope.insert(param.id, Id::Local(slot));
}
Ok(())
}
fn compile_func(
&mut self,
id: InternedStr,
func_type: FunctionType,
sc: StorageClass,
stmts: Vec<Stmt>,
location: Location,
) -> CompileResult<()> {
let signature = func_type.signature(self.module.isa());
let func_id = self.declare_func(id.clone(), &signature, sc, true)?;
self.scope.enter();
// external name is meant to be a lookup in a symbol table,
// but we just give it garbage values
let mut func = Function::with_name_signature(ExternalName::user(0, 0), signature);
// this context is just boiler plate
let mut ctx = FunctionBuilderContext::new();
let mut builder = FunctionBuilder::new(&mut func, &mut ctx);
let func_start = builder.create_block();
builder.switch_to_block(func_start);
let should_ret = func_type.should_return();
if func_type.has_params() {
self.store_stack_params(func_type.params, func_start, &location, &mut builder)?;
}
self.compile_all(stmts, &mut builder)?;
if!builder.is_filled() {
if id == InternedStr::get_or_intern("main") {
let ir_int = func_type.return_type.as_ir_type();
let zero = [builder.ins().iconst(ir_int, 0)];
builder.ins().return_(&zero);
} else if should_ret {
semantic_err!(
format!(
"expected a return statement before end of function '{}' returning {}",
id, func_type.return_type
),
location
);
} else {
// void function, return nothing
builder.ins().return_(&[]);
}
}
self.scope.exit();
builder.seal_all_blocks();
builder.finalize();
let flags = settings::Flags::new(settings::builder());
if self.debug {
println!("{}", func);
}
if let Err(err) = codegen::verify_function(&func, &flags) {
panic!(
"verification error: {}\nnote: while compiling {}",
err, func
);
}
let mut ctx = codegen::Context::for_function(func);
if let Err(err) = self.module.define_function(func_id, &mut ctx) {
panic!(
"definition error: {}\nnote: while compiling {}",
err, ctx.func
);
}
Ok(())
}
}
impl FunctionType {
fn has_params(&self) -> bool {
!(self.params.len() == 1 && self.params[0].ctype == Type::Void)
}
}
| store_stack_params | identifier_name |
mod.rs | mod expr;
mod static_init;
mod stmt;
use std::collections::{HashMap, VecDeque};
use std::convert::TryFrom;
use crate::data::{prelude::*, types::FunctionType, Initializer, Scope, StorageClass};
use cranelift::codegen::{
self,
ir::{
entities::StackSlot,
function::Function,
stackslot::{StackSlotData, StackSlotKind},
ExternalName, InstBuilder, MemFlags,
},
settings,
};
use cranelift::frontend::Switch;
use cranelift::prelude::{Block, FunctionBuilder, FunctionBuilderContext, Signature};
use cranelift_module::{self, Backend, DataId, FuncId, Linkage, Module};
enum Id {
Function(FuncId),
Global(DataId),
Local(StackSlot),
}
struct Compiler<T: Backend> {
module: Module<T>,
scope: Scope<InternedStr, Id>,
debug: bool,
// if false, we last saw a switch
last_saw_loop: bool,
strings: HashMap<Vec<u8>, DataId>,
loops: Vec<(Block, Block)>,
// switch, default, end
// if default is empty once we get to the end of a switch body,
// we didn't see a default case
switches: Vec<(Switch, Option<Block>, Block)>,
labels: HashMap<InternedStr, Block>,
error_handler: ErrorHandler,
}
/// Compile a program from a high level IR to a Cranelift Module
pub(crate) fn compile<B: Backend>(
module: Module<B>,
program: Vec<Locatable<Declaration>>,
debug: bool,
) -> (Result<Module<B>, CompileError>, VecDeque<CompileWarning>) {
// really we'd like to have all errors but that requires a refactor
let mut err = None;
let mut compiler = Compiler::new(module, debug);
for decl in program {
let current = match (decl.data.symbol.ctype.clone(), decl.data.init) {
(Type::Function(func_type), None) => compiler
.declare_func(
decl.data.symbol.id,
&func_type.signature(compiler.module.isa()),
decl.data.symbol.storage_class,
false,
)
.map(|_| ()),
(Type::Void, _) => unreachable!("parser let an incomplete type through"),
(Type::Function(func_type), Some(Initializer::FunctionBody(stmts))) => compiler
.compile_func(
decl.data.symbol.id,
func_type,
decl.data.symbol.storage_class,
stmts,
decl.location,
),
(_, Some(Initializer::FunctionBody(_))) => {
unreachable!("only functions should have a function body")
}
(_, init) => compiler.store_static(decl.data.symbol, init, decl.location),
};
if let Err(e) = current {
err = Some(e);
break;
}
}
let warns = compiler.error_handler.warnings;
if let Some(err) = err {
(Err(err), warns)
} else {
(Ok(compiler.module), warns)
}
}
impl<B: Backend> Compiler<B> {
fn new(module: Module<B>, debug: bool) -> Compiler<B> {
Compiler {
module,
scope: Scope::new(),
loops: Vec::new(),
switches: Vec::new(),
labels: HashMap::new(),
// the initial value doesn't really matter
last_saw_loop: true,
strings: Default::default(),
error_handler: Default::default(),
debug,
}
}
// we have to consider the following cases:
// 1. declaration before definition
// 2. 2nd declaration before definition
// 3. definition
// 4. declaration after definition
// 1. should declare `id` a import unless specified as `static`.
// 3. should always declare `id` as export or local.
// 2. and 4. should be a no-op.
fn declare_func(
&mut self,
id: InternedStr,
signature: &Signature,
sc: StorageClass,
is_definition: bool,
) -> CompileResult<FuncId> {
use crate::get_str;
if!is_definition {
// case 2 and 4
if let Some(Id::Function(func_id)) = self.scope.get(&id) {
return Ok(*func_id);
}
}
let linkage = match sc {
StorageClass::Auto | StorageClass::Extern if is_definition => Linkage::Export,
StorageClass::Auto | StorageClass::Extern => Linkage::Import,
StorageClass::Static => Linkage::Local,
StorageClass::Register | StorageClass::Typedef => unreachable!(),
};
let func_id = self
.module
.declare_function(get_str!(id), linkage, &signature)
.unwrap_or_else(|err| panic!("{}", err));
self.scope.insert(id, Id::Function(func_id));
Ok(func_id)
}
/// declare an object on the stack
fn declare_stack(
&mut self,
decl: Declaration,
location: Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
if let Type::Function(ftype) = decl.symbol.ctype {
self.declare_func(
decl.symbol.id,
&ftype.signature(self.module.isa()),
decl.symbol.storage_class,
false,
)?;
return Ok(());
}
let u64_size = match decl.symbol.ctype.sizeof() {
Ok(size) => size,
Err(err) => {
return Err(CompileError::semantic(Locatable {
data: err.into(),
location,
}))
}
};
let kind = StackSlotKind::ExplicitSlot;
let size = match u32::try_from(u64_size) {
Ok(size) => size,
Err(_) => return Err(CompileError::semantic(Locatable { | location,
}))
};
let data = StackSlotData {
kind,
size,
offset: None,
};
let stack_slot = builder.create_stack_slot(data);
self.scope.insert(decl.symbol.id, Id::Local(stack_slot));
if let Some(init) = decl.init {
self.store_stack(init, stack_slot, builder)?;
}
Ok(())
}
fn store_stack(
&mut self,
init: Initializer,
stack_slot: StackSlot,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
match init {
Initializer::Scalar(expr) => {
let val = self.compile_expr(*expr, builder)?;
// TODO: replace with `builder.ins().stack_store(val.ir_val, stack_slot, 0);`
// when Cranelift implements stack_store for i8 and i16
let addr = builder.ins().stack_addr(Type::ptr_type(), stack_slot, 0);
builder.ins().store(MemFlags::new(), val.ir_val, addr, 0);
}
Initializer::InitializerList(_) => unimplemented!("aggregate dynamic initialization"),
Initializer::FunctionBody(_) => unreachable!("functions can't be stored on the stack"),
}
Ok(())
}
// TODO: this is grossly inefficient, ask Cranelift devs if
// there's an easier way to make parameters modifiable.
fn store_stack_params(
&mut self,
params: Vec<Symbol>,
func_start: Block,
location: &Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
// Cranelift requires that all block params are declared up front
let ir_vals: Vec<_> = params
.iter()
.map(|param| {
let ir_type = param.ctype.as_ir_type();
Ok(builder.append_block_param(func_start, ir_type))
})
.collect::<CompileResult<_>>()?;
for (param, ir_val) in params.into_iter().zip(ir_vals) {
let u64_size = match param.ctype.sizeof() {
Err(data) => semantic_err!(data.into(), *location),
Ok(size) => size,
};
let u32_size = match u32::try_from(u64_size) {
Err(_) => semantic_err!(
format!(
"size {} is too large for stack (can only handle 32-bit values)",
u64_size
),
*location
),
Ok(size) => size,
};
let stack_data = StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: u32_size,
offset: None,
};
let slot = builder.create_stack_slot(stack_data);
// TODO: need to take the address before storing until Cranelift implements
// stores for i8 and i16
// then this can be replaced with `builder.ins().stack_store(ir_val, slot, 0);`
// See https://github.com/CraneStation/cranelift/issues/433
let addr = builder.ins().stack_addr(Type::ptr_type(), slot, 0);
builder.ins().store(MemFlags::new(), ir_val, addr, 0);
self.scope.insert(param.id, Id::Local(slot));
}
Ok(())
}
fn compile_func(
&mut self,
id: InternedStr,
func_type: FunctionType,
sc: StorageClass,
stmts: Vec<Stmt>,
location: Location,
) -> CompileResult<()> {
let signature = func_type.signature(self.module.isa());
let func_id = self.declare_func(id.clone(), &signature, sc, true)?;
self.scope.enter();
// external name is meant to be a lookup in a symbol table,
// but we just give it garbage values
let mut func = Function::with_name_signature(ExternalName::user(0, 0), signature);
// this context is just boiler plate
let mut ctx = FunctionBuilderContext::new();
let mut builder = FunctionBuilder::new(&mut func, &mut ctx);
let func_start = builder.create_block();
builder.switch_to_block(func_start);
let should_ret = func_type.should_return();
if func_type.has_params() {
self.store_stack_params(func_type.params, func_start, &location, &mut builder)?;
}
self.compile_all(stmts, &mut builder)?;
if!builder.is_filled() {
if id == InternedStr::get_or_intern("main") {
let ir_int = func_type.return_type.as_ir_type();
let zero = [builder.ins().iconst(ir_int, 0)];
builder.ins().return_(&zero);
} else if should_ret {
semantic_err!(
format!(
"expected a return statement before end of function '{}' returning {}",
id, func_type.return_type
),
location
);
} else {
// void function, return nothing
builder.ins().return_(&[]);
}
}
self.scope.exit();
builder.seal_all_blocks();
builder.finalize();
let flags = settings::Flags::new(settings::builder());
if self.debug {
println!("{}", func);
}
if let Err(err) = codegen::verify_function(&func, &flags) {
panic!(
"verification error: {}\nnote: while compiling {}",
err, func
);
}
let mut ctx = codegen::Context::for_function(func);
if let Err(err) = self.module.define_function(func_id, &mut ctx) {
panic!(
"definition error: {}\nnote: while compiling {}",
err, ctx.func
);
}
Ok(())
}
}
impl FunctionType {
fn has_params(&self) -> bool {
!(self.params.len() == 1 && self.params[0].ctype == Type::Void)
}
} | data: "cannot store items on the stack that are more than 4 GB, it will overflow the stack".into(), | random_line_split |
mod.rs | mod expr;
mod static_init;
mod stmt;
use std::collections::{HashMap, VecDeque};
use std::convert::TryFrom;
use crate::data::{prelude::*, types::FunctionType, Initializer, Scope, StorageClass};
use cranelift::codegen::{
self,
ir::{
entities::StackSlot,
function::Function,
stackslot::{StackSlotData, StackSlotKind},
ExternalName, InstBuilder, MemFlags,
},
settings,
};
use cranelift::frontend::Switch;
use cranelift::prelude::{Block, FunctionBuilder, FunctionBuilderContext, Signature};
use cranelift_module::{self, Backend, DataId, FuncId, Linkage, Module};
enum Id {
Function(FuncId),
Global(DataId),
Local(StackSlot),
}
struct Compiler<T: Backend> {
module: Module<T>,
scope: Scope<InternedStr, Id>,
debug: bool,
// if false, we last saw a switch
last_saw_loop: bool,
strings: HashMap<Vec<u8>, DataId>,
loops: Vec<(Block, Block)>,
// switch, default, end
// if default is empty once we get to the end of a switch body,
// we didn't see a default case
switches: Vec<(Switch, Option<Block>, Block)>,
labels: HashMap<InternedStr, Block>,
error_handler: ErrorHandler,
}
/// Compile a program from a high level IR to a Cranelift Module
pub(crate) fn compile<B: Backend>(
module: Module<B>,
program: Vec<Locatable<Declaration>>,
debug: bool,
) -> (Result<Module<B>, CompileError>, VecDeque<CompileWarning>) {
// really we'd like to have all errors but that requires a refactor
let mut err = None;
let mut compiler = Compiler::new(module, debug);
for decl in program {
let current = match (decl.data.symbol.ctype.clone(), decl.data.init) {
(Type::Function(func_type), None) => compiler
.declare_func(
decl.data.symbol.id,
&func_type.signature(compiler.module.isa()),
decl.data.symbol.storage_class,
false,
)
.map(|_| ()),
(Type::Void, _) => unreachable!("parser let an incomplete type through"),
(Type::Function(func_type), Some(Initializer::FunctionBody(stmts))) => compiler
.compile_func(
decl.data.symbol.id,
func_type,
decl.data.symbol.storage_class,
stmts,
decl.location,
),
(_, Some(Initializer::FunctionBody(_))) => {
unreachable!("only functions should have a function body")
}
(_, init) => compiler.store_static(decl.data.symbol, init, decl.location),
};
if let Err(e) = current {
err = Some(e);
break;
}
}
let warns = compiler.error_handler.warnings;
if let Some(err) = err {
(Err(err), warns)
} else {
(Ok(compiler.module), warns)
}
}
impl<B: Backend> Compiler<B> {
fn new(module: Module<B>, debug: bool) -> Compiler<B> {
Compiler {
module,
scope: Scope::new(),
loops: Vec::new(),
switches: Vec::new(),
labels: HashMap::new(),
// the initial value doesn't really matter
last_saw_loop: true,
strings: Default::default(),
error_handler: Default::default(),
debug,
}
}
// we have to consider the following cases:
// 1. declaration before definition
// 2. 2nd declaration before definition
// 3. definition
// 4. declaration after definition
// 1. should declare `id` a import unless specified as `static`.
// 3. should always declare `id` as export or local.
// 2. and 4. should be a no-op.
fn declare_func(
&mut self,
id: InternedStr,
signature: &Signature,
sc: StorageClass,
is_definition: bool,
) -> CompileResult<FuncId> {
use crate::get_str;
if!is_definition {
// case 2 and 4
if let Some(Id::Function(func_id)) = self.scope.get(&id) {
return Ok(*func_id);
}
}
let linkage = match sc {
StorageClass::Auto | StorageClass::Extern if is_definition => Linkage::Export,
StorageClass::Auto | StorageClass::Extern => Linkage::Import,
StorageClass::Static => Linkage::Local,
StorageClass::Register | StorageClass::Typedef => unreachable!(),
};
let func_id = self
.module
.declare_function(get_str!(id), linkage, &signature)
.unwrap_or_else(|err| panic!("{}", err));
self.scope.insert(id, Id::Function(func_id));
Ok(func_id)
}
/// declare an object on the stack
fn declare_stack(
&mut self,
decl: Declaration,
location: Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
if let Type::Function(ftype) = decl.symbol.ctype {
self.declare_func(
decl.symbol.id,
&ftype.signature(self.module.isa()),
decl.symbol.storage_class,
false,
)?;
return Ok(());
}
let u64_size = match decl.symbol.ctype.sizeof() {
Ok(size) => size,
Err(err) => {
return Err(CompileError::semantic(Locatable {
data: err.into(),
location,
}))
}
};
let kind = StackSlotKind::ExplicitSlot;
let size = match u32::try_from(u64_size) {
Ok(size) => size,
Err(_) => return Err(CompileError::semantic(Locatable {
data: "cannot store items on the stack that are more than 4 GB, it will overflow the stack".into(),
location,
}))
};
let data = StackSlotData {
kind,
size,
offset: None,
};
let stack_slot = builder.create_stack_slot(data);
self.scope.insert(decl.symbol.id, Id::Local(stack_slot));
if let Some(init) = decl.init {
self.store_stack(init, stack_slot, builder)?;
}
Ok(())
}
fn store_stack(
&mut self,
init: Initializer,
stack_slot: StackSlot,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
match init {
Initializer::Scalar(expr) => {
let val = self.compile_expr(*expr, builder)?;
// TODO: replace with `builder.ins().stack_store(val.ir_val, stack_slot, 0);`
// when Cranelift implements stack_store for i8 and i16
let addr = builder.ins().stack_addr(Type::ptr_type(), stack_slot, 0);
builder.ins().store(MemFlags::new(), val.ir_val, addr, 0);
}
Initializer::InitializerList(_) => unimplemented!("aggregate dynamic initialization"),
Initializer::FunctionBody(_) => unreachable!("functions can't be stored on the stack"),
}
Ok(())
}
// TODO: this is grossly inefficient, ask Cranelift devs if
// there's an easier way to make parameters modifiable.
fn store_stack_params(
&mut self,
params: Vec<Symbol>,
func_start: Block,
location: &Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
// Cranelift requires that all block params are declared up front
let ir_vals: Vec<_> = params
.iter()
.map(|param| {
let ir_type = param.ctype.as_ir_type();
Ok(builder.append_block_param(func_start, ir_type))
})
.collect::<CompileResult<_>>()?;
for (param, ir_val) in params.into_iter().zip(ir_vals) {
let u64_size = match param.ctype.sizeof() {
Err(data) => semantic_err!(data.into(), *location),
Ok(size) => size,
};
let u32_size = match u32::try_from(u64_size) {
Err(_) => semantic_err!(
format!(
"size {} is too large for stack (can only handle 32-bit values)",
u64_size
),
*location
),
Ok(size) => size,
};
let stack_data = StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: u32_size,
offset: None,
};
let slot = builder.create_stack_slot(stack_data);
// TODO: need to take the address before storing until Cranelift implements
// stores for i8 and i16
// then this can be replaced with `builder.ins().stack_store(ir_val, slot, 0);`
// See https://github.com/CraneStation/cranelift/issues/433
let addr = builder.ins().stack_addr(Type::ptr_type(), slot, 0);
builder.ins().store(MemFlags::new(), ir_val, addr, 0);
self.scope.insert(param.id, Id::Local(slot));
}
Ok(())
}
fn compile_func(
&mut self,
id: InternedStr,
func_type: FunctionType,
sc: StorageClass,
stmts: Vec<Stmt>,
location: Location,
) -> CompileResult<()> {
let signature = func_type.signature(self.module.isa());
let func_id = self.declare_func(id.clone(), &signature, sc, true)?;
self.scope.enter();
// external name is meant to be a lookup in a symbol table,
// but we just give it garbage values
let mut func = Function::with_name_signature(ExternalName::user(0, 0), signature);
// this context is just boiler plate
let mut ctx = FunctionBuilderContext::new();
let mut builder = FunctionBuilder::new(&mut func, &mut ctx);
let func_start = builder.create_block();
builder.switch_to_block(func_start);
let should_ret = func_type.should_return();
if func_type.has_params() {
self.store_stack_params(func_type.params, func_start, &location, &mut builder)?;
}
self.compile_all(stmts, &mut builder)?;
if!builder.is_filled() {
if id == InternedStr::get_or_intern("main") {
let ir_int = func_type.return_type.as_ir_type();
let zero = [builder.ins().iconst(ir_int, 0)];
builder.ins().return_(&zero);
} else if should_ret {
semantic_err!(
format!(
"expected a return statement before end of function '{}' returning {}",
id, func_type.return_type
),
location
);
} else |
}
self.scope.exit();
builder.seal_all_blocks();
builder.finalize();
let flags = settings::Flags::new(settings::builder());
if self.debug {
println!("{}", func);
}
if let Err(err) = codegen::verify_function(&func, &flags) {
panic!(
"verification error: {}\nnote: while compiling {}",
err, func
);
}
let mut ctx = codegen::Context::for_function(func);
if let Err(err) = self.module.define_function(func_id, &mut ctx) {
panic!(
"definition error: {}\nnote: while compiling {}",
err, ctx.func
);
}
Ok(())
}
}
impl FunctionType {
fn has_params(&self) -> bool {
!(self.params.len() == 1 && self.params[0].ctype == Type::Void)
}
}
| {
// void function, return nothing
builder.ins().return_(&[]);
} | conditional_block |
mod.rs | mod expr;
mod static_init;
mod stmt;
use std::collections::{HashMap, VecDeque};
use std::convert::TryFrom;
use crate::data::{prelude::*, types::FunctionType, Initializer, Scope, StorageClass};
use cranelift::codegen::{
self,
ir::{
entities::StackSlot,
function::Function,
stackslot::{StackSlotData, StackSlotKind},
ExternalName, InstBuilder, MemFlags,
},
settings,
};
use cranelift::frontend::Switch;
use cranelift::prelude::{Block, FunctionBuilder, FunctionBuilderContext, Signature};
use cranelift_module::{self, Backend, DataId, FuncId, Linkage, Module};
enum Id {
Function(FuncId),
Global(DataId),
Local(StackSlot),
}
struct Compiler<T: Backend> {
module: Module<T>,
scope: Scope<InternedStr, Id>,
debug: bool,
// if false, we last saw a switch
last_saw_loop: bool,
strings: HashMap<Vec<u8>, DataId>,
loops: Vec<(Block, Block)>,
// switch, default, end
// if default is empty once we get to the end of a switch body,
// we didn't see a default case
switches: Vec<(Switch, Option<Block>, Block)>,
labels: HashMap<InternedStr, Block>,
error_handler: ErrorHandler,
}
/// Compile a program from a high level IR to a Cranelift Module
pub(crate) fn compile<B: Backend>(
module: Module<B>,
program: Vec<Locatable<Declaration>>,
debug: bool,
) -> (Result<Module<B>, CompileError>, VecDeque<CompileWarning>) {
// really we'd like to have all errors but that requires a refactor
let mut err = None;
let mut compiler = Compiler::new(module, debug);
for decl in program {
let current = match (decl.data.symbol.ctype.clone(), decl.data.init) {
(Type::Function(func_type), None) => compiler
.declare_func(
decl.data.symbol.id,
&func_type.signature(compiler.module.isa()),
decl.data.symbol.storage_class,
false,
)
.map(|_| ()),
(Type::Void, _) => unreachable!("parser let an incomplete type through"),
(Type::Function(func_type), Some(Initializer::FunctionBody(stmts))) => compiler
.compile_func(
decl.data.symbol.id,
func_type,
decl.data.symbol.storage_class,
stmts,
decl.location,
),
(_, Some(Initializer::FunctionBody(_))) => {
unreachable!("only functions should have a function body")
}
(_, init) => compiler.store_static(decl.data.symbol, init, decl.location),
};
if let Err(e) = current {
err = Some(e);
break;
}
}
let warns = compiler.error_handler.warnings;
if let Some(err) = err {
(Err(err), warns)
} else {
(Ok(compiler.module), warns)
}
}
impl<B: Backend> Compiler<B> {
fn new(module: Module<B>, debug: bool) -> Compiler<B> {
Compiler {
module,
scope: Scope::new(),
loops: Vec::new(),
switches: Vec::new(),
labels: HashMap::new(),
// the initial value doesn't really matter
last_saw_loop: true,
strings: Default::default(),
error_handler: Default::default(),
debug,
}
}
// we have to consider the following cases:
// 1. declaration before definition
// 2. 2nd declaration before definition
// 3. definition
// 4. declaration after definition
// 1. should declare `id` a import unless specified as `static`.
// 3. should always declare `id` as export or local.
// 2. and 4. should be a no-op.
fn declare_func(
&mut self,
id: InternedStr,
signature: &Signature,
sc: StorageClass,
is_definition: bool,
) -> CompileResult<FuncId> {
use crate::get_str;
if!is_definition {
// case 2 and 4
if let Some(Id::Function(func_id)) = self.scope.get(&id) {
return Ok(*func_id);
}
}
let linkage = match sc {
StorageClass::Auto | StorageClass::Extern if is_definition => Linkage::Export,
StorageClass::Auto | StorageClass::Extern => Linkage::Import,
StorageClass::Static => Linkage::Local,
StorageClass::Register | StorageClass::Typedef => unreachable!(),
};
let func_id = self
.module
.declare_function(get_str!(id), linkage, &signature)
.unwrap_or_else(|err| panic!("{}", err));
self.scope.insert(id, Id::Function(func_id));
Ok(func_id)
}
/// declare an object on the stack
fn declare_stack(
&mut self,
decl: Declaration,
location: Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
if let Type::Function(ftype) = decl.symbol.ctype {
self.declare_func(
decl.symbol.id,
&ftype.signature(self.module.isa()),
decl.symbol.storage_class,
false,
)?;
return Ok(());
}
let u64_size = match decl.symbol.ctype.sizeof() {
Ok(size) => size,
Err(err) => {
return Err(CompileError::semantic(Locatable {
data: err.into(),
location,
}))
}
};
let kind = StackSlotKind::ExplicitSlot;
let size = match u32::try_from(u64_size) {
Ok(size) => size,
Err(_) => return Err(CompileError::semantic(Locatable {
data: "cannot store items on the stack that are more than 4 GB, it will overflow the stack".into(),
location,
}))
};
let data = StackSlotData {
kind,
size,
offset: None,
};
let stack_slot = builder.create_stack_slot(data);
self.scope.insert(decl.symbol.id, Id::Local(stack_slot));
if let Some(init) = decl.init {
self.store_stack(init, stack_slot, builder)?;
}
Ok(())
}
fn store_stack(
&mut self,
init: Initializer,
stack_slot: StackSlot,
builder: &mut FunctionBuilder,
) -> CompileResult<()> {
match init {
Initializer::Scalar(expr) => {
let val = self.compile_expr(*expr, builder)?;
// TODO: replace with `builder.ins().stack_store(val.ir_val, stack_slot, 0);`
// when Cranelift implements stack_store for i8 and i16
let addr = builder.ins().stack_addr(Type::ptr_type(), stack_slot, 0);
builder.ins().store(MemFlags::new(), val.ir_val, addr, 0);
}
Initializer::InitializerList(_) => unimplemented!("aggregate dynamic initialization"),
Initializer::FunctionBody(_) => unreachable!("functions can't be stored on the stack"),
}
Ok(())
}
// TODO: this is grossly inefficient, ask Cranelift devs if
// there's an easier way to make parameters modifiable.
fn store_stack_params(
&mut self,
params: Vec<Symbol>,
func_start: Block,
location: &Location,
builder: &mut FunctionBuilder,
) -> CompileResult<()> | *location
),
Ok(size) => size,
};
let stack_data = StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: u32_size,
offset: None,
};
let slot = builder.create_stack_slot(stack_data);
// TODO: need to take the address before storing until Cranelift implements
// stores for i8 and i16
// then this can be replaced with `builder.ins().stack_store(ir_val, slot, 0);`
// See https://github.com/CraneStation/cranelift/issues/433
let addr = builder.ins().stack_addr(Type::ptr_type(), slot, 0);
builder.ins().store(MemFlags::new(), ir_val, addr, 0);
self.scope.insert(param.id, Id::Local(slot));
}
Ok(())
}
fn compile_func(
&mut self,
id: InternedStr,
func_type: FunctionType,
sc: StorageClass,
stmts: Vec<Stmt>,
location: Location,
) -> CompileResult<()> {
let signature = func_type.signature(self.module.isa());
let func_id = self.declare_func(id.clone(), &signature, sc, true)?;
self.scope.enter();
// external name is meant to be a lookup in a symbol table,
// but we just give it garbage values
let mut func = Function::with_name_signature(ExternalName::user(0, 0), signature);
// this context is just boiler plate
let mut ctx = FunctionBuilderContext::new();
let mut builder = FunctionBuilder::new(&mut func, &mut ctx);
let func_start = builder.create_block();
builder.switch_to_block(func_start);
let should_ret = func_type.should_return();
if func_type.has_params() {
self.store_stack_params(func_type.params, func_start, &location, &mut builder)?;
}
self.compile_all(stmts, &mut builder)?;
if!builder.is_filled() {
if id == InternedStr::get_or_intern("main") {
let ir_int = func_type.return_type.as_ir_type();
let zero = [builder.ins().iconst(ir_int, 0)];
builder.ins().return_(&zero);
} else if should_ret {
semantic_err!(
format!(
"expected a return statement before end of function '{}' returning {}",
id, func_type.return_type
),
location
);
} else {
// void function, return nothing
builder.ins().return_(&[]);
}
}
self.scope.exit();
builder.seal_all_blocks();
builder.finalize();
let flags = settings::Flags::new(settings::builder());
if self.debug {
println!("{}", func);
}
if let Err(err) = codegen::verify_function(&func, &flags) {
panic!(
"verification error: {}\nnote: while compiling {}",
err, func
);
}
let mut ctx = codegen::Context::for_function(func);
if let Err(err) = self.module.define_function(func_id, &mut ctx) {
panic!(
"definition error: {}\nnote: while compiling {}",
err, ctx.func
);
}
Ok(())
}
}
impl FunctionType {
fn has_params(&self) -> bool {
!(self.params.len() == 1 && self.params[0].ctype == Type::Void)
}
}
| {
// Cranelift requires that all block params are declared up front
let ir_vals: Vec<_> = params
.iter()
.map(|param| {
let ir_type = param.ctype.as_ir_type();
Ok(builder.append_block_param(func_start, ir_type))
})
.collect::<CompileResult<_>>()?;
for (param, ir_val) in params.into_iter().zip(ir_vals) {
let u64_size = match param.ctype.sizeof() {
Err(data) => semantic_err!(data.into(), *location),
Ok(size) => size,
};
let u32_size = match u32::try_from(u64_size) {
Err(_) => semantic_err!(
format!(
"size {} is too large for stack (can only handle 32-bit values)",
u64_size
), | identifier_body |
fork_resolver.rs | /*
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ------------------------------------------------------------------------------
*/
use enclave_sgx::WaitCertificate;
use engine::consensus_state::*;
use engine::consensus_state_store::ConsensusStateStore;
use poet2_util;
use sawtooth_sdk::consensus::engine::*;
use serde_json;
use service::Poet2Service;
pub fn resolve_fork(
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
block_id: BlockId,
mut claim_block_dur: u64,
) -> bool {
let block_ = service.get_block(&block_id);
let mut published = false;
let chain_head = service.get_chain_head();
if block_.is_ok() {
let block = block_.unwrap();
let prev_block_ = service.get_block(&block.previous_id);
info!(
"Choosing between chain heads -- current: {:?} -- new: {:?}",
chain_head, block
);
// Commiting or Resolving fork if one exists
// Advance the chain if possible.
let new_block_dur = get_cert_from(&block).wait_time;
if claim_block_dur == 0 {
claim_block_dur = new_block_dur;
}
// Current block points to current head
// Check if block already claimed. Go on to
// compare duration then. Accept one of them
// and update it to be new chain head
if block.block_num == (1 + chain_head.block_num) && block.previous_id == chain_head.block_id
{
debug!(
"New block duration {} Claim block duration {}",
new_block_dur, claim_block_dur
);
if new_block_dur <= claim_block_dur {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("New block extends current chain. Committing {:?}", block);
let agg_chain_clock = service.get_chain_clock() + new_block_dur;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
debug!(
"Storing cummulative cc = {} for blockId : {:?}",
agg_chain_clock,
block_id.clone()
);
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id);
} else {
info!("New block has larger duration. Failing {:?}", block);
service.fail_block(block_id);
}
}
// Check if the previous block is strictly in the
// cache. If so, look for common ancestor and resolve fork.
else if prev_block_.is_ok() {
let prev_block = prev_block_.unwrap();
if state_store.get(prev_block.block_id).is_err() {
let mut cache_block = block.clone();
let block_state;
let mut block_state_;
let cc_upto_head = service.get_chain_clock();
let mut fork_cc: u64 = new_block_dur;
let mut fork_len: u64 = 1;
let mut cc_upto_ancestor = 0_u64;
let mut ancestor_found: bool = false;
info!("Looping over chain to find common ancestor.");
loop {
let cache_block_ = service.get_block(&cache_block.previous_id);
// If block's previous not in cache or statestore,
// break from loop and send block to cache
if cache_block_.is_ok() {
cache_block = cache_block_.unwrap();
if cache_block.block_num == 0 {
debug!("Genesis reached while finding common ancestor.");
ancestor_found = true;
break;
}
// get cc from certificate in cache_block
let ancestor_cc = get_cert_from(&cache_block).wait_time;
// Assuming here that we have the consensus state
// for each block that has been committed into the chain.
// Parse blocks from cache & states from the statestore
// to find a common ancestor.
// Keep account of the chainclocks from cache.
// Once common ancestor is found, compare the
// chainclocks of the forks to choose a fork
block_state_ = state_store.get(cache_block.block_id.clone());
if block_state_.is_ok() {
// Found common ancestor
info!("Found a common ancestor at block {:?}", block.clone());
ancestor_found = true;
block_state = block_state_.unwrap();
cc_upto_ancestor = block_state.aggregate_chain_clock;
break;
}
fork_cc += ancestor_cc;
fork_len += 1;
} else {
info!("Not a valid fork.");
}
}
let mut fork_won = false;
let mut chain_cc: u64 = 0;
if ancestor_found {
info!("Found a common ancestor. Comparing length.");
debug!(
"Chain clocks upto head = {}, upto common ancestor = {}",
cc_upto_head, cc_upto_ancestor
);
chain_cc = cc_upto_head - cc_upto_ancestor;
let chain_len: u64 = chain_head.block_num - cache_block.block_num;
if chain_len > fork_len {
fork_won = false;
} else if chain_len < fork_len {
fork_won = true;
}
// Fork lengths are equal
else {
if chain_cc == fork_cc {
fork_won = if get_cert_from(&block).duration_id
< get_cert_from(&chain_head).duration_id
{
true
} else {
false
};
} else {
fork_won = if fork_cc < chain_cc { true } else { false };
}
}
}
if fork_won {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("Switching to fork.");
// fork_cc is inclusive of new block
let agg_chain_clock = cc_upto_ancestor + fork_cc;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
debug!(
"Aggregate chain clock upto common ancestor = {}
Fork chain clock = {}. After switch aggregate = {}",
cc_upto_ancestor, fork_cc, agg_chain_clock
);
debug!("Storing cummulative cc = {}", agg_chain_clock);
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id); | // Delete states for all blocks not in chain
let chain_len_to_delete = chain_head.block_num - cache_block.block_num;
delete_states_upto(
cache_block.block_id,
chain_head.clone().block_id,
chain_len_to_delete,
service,
state_store,
);
} else {
info!("Not switching to fork");
service.ignore_block(block.block_id.clone());
}
}
}
}
published
// Fork Resolution done
}
fn delete_states_upto(
ancestor: BlockId,
head: BlockId,
delete_len: u64,
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
) -> () {
let mut next = head;
let mut count = 0_u64;
loop {
if ancestor == next || count >= delete_len {
break;
}
count += 1;
let state_ = state_store.get(next.clone());
if state_.is_err() {
debug!("State not found. Getting block via service.");
let block_ = service.get_block(&next);
if block_.is_ok() {
let block = block_.unwrap();
next = block.previous_id;
continue;
}
break;
} else {
debug!("Deleting state for {:?}", next.clone());
state_store.delete(next.clone());
next = BlockId::from(
state_
.unwrap()
.estimate_info
.previous_block_id
.as_bytes()
.to_vec(),
);
}
}
}
fn get_cert_from(block: &Block) -> WaitCertificate {
let payload = block.payload.clone();
debug!("Extracted payload from block: {:?}", payload.clone());
let (wait_certificate, _) = poet2_util::payload_to_wc_and_sig(&payload);
debug!("Serialized wait_cert : {:?}", &wait_certificate);
serde_json::from_str(&wait_certificate).unwrap()
} | // Mark all blocks upto common ancestor
// in the chain as invalid. | random_line_split |
fork_resolver.rs | /*
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ------------------------------------------------------------------------------
*/
use enclave_sgx::WaitCertificate;
use engine::consensus_state::*;
use engine::consensus_state_store::ConsensusStateStore;
use poet2_util;
use sawtooth_sdk::consensus::engine::*;
use serde_json;
use service::Poet2Service;
pub fn resolve_fork(
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
block_id: BlockId,
mut claim_block_dur: u64,
) -> bool {
let block_ = service.get_block(&block_id);
let mut published = false;
let chain_head = service.get_chain_head();
if block_.is_ok() {
let block = block_.unwrap();
let prev_block_ = service.get_block(&block.previous_id);
info!(
"Choosing between chain heads -- current: {:?} -- new: {:?}",
chain_head, block
);
// Commiting or Resolving fork if one exists
// Advance the chain if possible.
let new_block_dur = get_cert_from(&block).wait_time;
if claim_block_dur == 0 {
claim_block_dur = new_block_dur;
}
// Current block points to current head
// Check if block already claimed. Go on to
// compare duration then. Accept one of them
// and update it to be new chain head
if block.block_num == (1 + chain_head.block_num) && block.previous_id == chain_head.block_id
{
debug!(
"New block duration {} Claim block duration {}",
new_block_dur, claim_block_dur
);
if new_block_dur <= claim_block_dur {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("New block extends current chain. Committing {:?}", block);
let agg_chain_clock = service.get_chain_clock() + new_block_dur;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
debug!(
"Storing cummulative cc = {} for blockId : {:?}",
agg_chain_clock,
block_id.clone()
);
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id);
} else {
info!("New block has larger duration. Failing {:?}", block);
service.fail_block(block_id);
}
}
// Check if the previous block is strictly in the
// cache. If so, look for common ancestor and resolve fork.
else if prev_block_.is_ok() {
let prev_block = prev_block_.unwrap();
if state_store.get(prev_block.block_id).is_err() {
let mut cache_block = block.clone();
let block_state;
let mut block_state_;
let cc_upto_head = service.get_chain_clock();
let mut fork_cc: u64 = new_block_dur;
let mut fork_len: u64 = 1;
let mut cc_upto_ancestor = 0_u64;
let mut ancestor_found: bool = false;
info!("Looping over chain to find common ancestor.");
loop {
let cache_block_ = service.get_block(&cache_block.previous_id);
// If block's previous not in cache or statestore,
// break from loop and send block to cache
if cache_block_.is_ok() {
cache_block = cache_block_.unwrap();
if cache_block.block_num == 0 {
debug!("Genesis reached while finding common ancestor.");
ancestor_found = true;
break;
}
// get cc from certificate in cache_block
let ancestor_cc = get_cert_from(&cache_block).wait_time;
// Assuming here that we have the consensus state
// for each block that has been committed into the chain.
// Parse blocks from cache & states from the statestore
// to find a common ancestor.
// Keep account of the chainclocks from cache.
// Once common ancestor is found, compare the
// chainclocks of the forks to choose a fork
block_state_ = state_store.get(cache_block.block_id.clone());
if block_state_.is_ok() {
// Found common ancestor
info!("Found a common ancestor at block {:?}", block.clone());
ancestor_found = true;
block_state = block_state_.unwrap();
cc_upto_ancestor = block_state.aggregate_chain_clock;
break;
}
fork_cc += ancestor_cc;
fork_len += 1;
} else {
info!("Not a valid fork.");
}
}
let mut fork_won = false;
let mut chain_cc: u64 = 0;
if ancestor_found {
info!("Found a common ancestor. Comparing length.");
debug!(
"Chain clocks upto head = {}, upto common ancestor = {}",
cc_upto_head, cc_upto_ancestor
);
chain_cc = cc_upto_head - cc_upto_ancestor;
let chain_len: u64 = chain_head.block_num - cache_block.block_num;
if chain_len > fork_len {
fork_won = false;
} else if chain_len < fork_len {
fork_won = true;
}
// Fork lengths are equal
else {
if chain_cc == fork_cc | else {
fork_won = if fork_cc < chain_cc { true } else { false };
}
}
}
if fork_won {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("Switching to fork.");
// fork_cc is inclusive of new block
let agg_chain_clock = cc_upto_ancestor + fork_cc;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
debug!(
"Aggregate chain clock upto common ancestor = {}
Fork chain clock = {}. After switch aggregate = {}",
cc_upto_ancestor, fork_cc, agg_chain_clock
);
debug!("Storing cummulative cc = {}", agg_chain_clock);
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id);
// Mark all blocks upto common ancestor
// in the chain as invalid.
// Delete states for all blocks not in chain
let chain_len_to_delete = chain_head.block_num - cache_block.block_num;
delete_states_upto(
cache_block.block_id,
chain_head.clone().block_id,
chain_len_to_delete,
service,
state_store,
);
} else {
info!("Not switching to fork");
service.ignore_block(block.block_id.clone());
}
}
}
}
published
// Fork Resolution done
}
fn delete_states_upto(
ancestor: BlockId,
head: BlockId,
delete_len: u64,
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
) -> () {
let mut next = head;
let mut count = 0_u64;
loop {
if ancestor == next || count >= delete_len {
break;
}
count += 1;
let state_ = state_store.get(next.clone());
if state_.is_err() {
debug!("State not found. Getting block via service.");
let block_ = service.get_block(&next);
if block_.is_ok() {
let block = block_.unwrap();
next = block.previous_id;
continue;
}
break;
} else {
debug!("Deleting state for {:?}", next.clone());
state_store.delete(next.clone());
next = BlockId::from(
state_
.unwrap()
.estimate_info
.previous_block_id
.as_bytes()
.to_vec(),
);
}
}
}
fn get_cert_from(block: &Block) -> WaitCertificate {
let payload = block.payload.clone();
debug!("Extracted payload from block: {:?}", payload.clone());
let (wait_certificate, _) = poet2_util::payload_to_wc_and_sig(&payload);
debug!("Serialized wait_cert : {:?}", &wait_certificate);
serde_json::from_str(&wait_certificate).unwrap()
}
| {
fork_won = if get_cert_from(&block).duration_id
< get_cert_from(&chain_head).duration_id
{
true
} else {
false
};
} | conditional_block |
fork_resolver.rs | /*
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ------------------------------------------------------------------------------
*/
use enclave_sgx::WaitCertificate;
use engine::consensus_state::*;
use engine::consensus_state_store::ConsensusStateStore;
use poet2_util;
use sawtooth_sdk::consensus::engine::*;
use serde_json;
use service::Poet2Service;
pub fn resolve_fork(
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
block_id: BlockId,
mut claim_block_dur: u64,
) -> bool {
let block_ = service.get_block(&block_id);
let mut published = false;
let chain_head = service.get_chain_head();
if block_.is_ok() {
let block = block_.unwrap();
let prev_block_ = service.get_block(&block.previous_id);
info!(
"Choosing between chain heads -- current: {:?} -- new: {:?}",
chain_head, block
);
// Commiting or Resolving fork if one exists
// Advance the chain if possible.
let new_block_dur = get_cert_from(&block).wait_time;
if claim_block_dur == 0 {
claim_block_dur = new_block_dur;
}
// Current block points to current head
// Check if block already claimed. Go on to
// compare duration then. Accept one of them
// and update it to be new chain head
if block.block_num == (1 + chain_head.block_num) && block.previous_id == chain_head.block_id
{
debug!(
"New block duration {} Claim block duration {}",
new_block_dur, claim_block_dur
);
if new_block_dur <= claim_block_dur {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("New block extends current chain. Committing {:?}", block);
let agg_chain_clock = service.get_chain_clock() + new_block_dur;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
debug!(
"Storing cummulative cc = {} for blockId : {:?}",
agg_chain_clock,
block_id.clone()
);
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id);
} else {
info!("New block has larger duration. Failing {:?}", block);
service.fail_block(block_id);
}
}
// Check if the previous block is strictly in the
// cache. If so, look for common ancestor and resolve fork.
else if prev_block_.is_ok() {
let prev_block = prev_block_.unwrap();
if state_store.get(prev_block.block_id).is_err() {
let mut cache_block = block.clone();
let block_state;
let mut block_state_;
let cc_upto_head = service.get_chain_clock();
let mut fork_cc: u64 = new_block_dur;
let mut fork_len: u64 = 1;
let mut cc_upto_ancestor = 0_u64;
let mut ancestor_found: bool = false;
info!("Looping over chain to find common ancestor.");
loop {
let cache_block_ = service.get_block(&cache_block.previous_id);
// If block's previous not in cache or statestore,
// break from loop and send block to cache
if cache_block_.is_ok() {
cache_block = cache_block_.unwrap();
if cache_block.block_num == 0 {
debug!("Genesis reached while finding common ancestor.");
ancestor_found = true;
break;
}
// get cc from certificate in cache_block
let ancestor_cc = get_cert_from(&cache_block).wait_time;
// Assuming here that we have the consensus state
// for each block that has been committed into the chain.
// Parse blocks from cache & states from the statestore
// to find a common ancestor.
// Keep account of the chainclocks from cache.
// Once common ancestor is found, compare the
// chainclocks of the forks to choose a fork
block_state_ = state_store.get(cache_block.block_id.clone());
if block_state_.is_ok() {
// Found common ancestor
info!("Found a common ancestor at block {:?}", block.clone());
ancestor_found = true;
block_state = block_state_.unwrap();
cc_upto_ancestor = block_state.aggregate_chain_clock;
break;
}
fork_cc += ancestor_cc;
fork_len += 1;
} else {
info!("Not a valid fork.");
}
}
let mut fork_won = false;
let mut chain_cc: u64 = 0;
if ancestor_found {
info!("Found a common ancestor. Comparing length.");
debug!(
"Chain clocks upto head = {}, upto common ancestor = {}",
cc_upto_head, cc_upto_ancestor
);
chain_cc = cc_upto_head - cc_upto_ancestor;
let chain_len: u64 = chain_head.block_num - cache_block.block_num;
if chain_len > fork_len {
fork_won = false;
} else if chain_len < fork_len {
fork_won = true;
}
// Fork lengths are equal
else {
if chain_cc == fork_cc {
fork_won = if get_cert_from(&block).duration_id
< get_cert_from(&chain_head).duration_id
{
true
} else {
false
};
} else {
fork_won = if fork_cc < chain_cc { true } else { false };
}
}
}
if fork_won {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("Switching to fork.");
// fork_cc is inclusive of new block
let agg_chain_clock = cc_upto_ancestor + fork_cc;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
debug!(
"Aggregate chain clock upto common ancestor = {}
Fork chain clock = {}. After switch aggregate = {}",
cc_upto_ancestor, fork_cc, agg_chain_clock
);
debug!("Storing cummulative cc = {}", agg_chain_clock);
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id);
// Mark all blocks upto common ancestor
// in the chain as invalid.
// Delete states for all blocks not in chain
let chain_len_to_delete = chain_head.block_num - cache_block.block_num;
delete_states_upto(
cache_block.block_id,
chain_head.clone().block_id,
chain_len_to_delete,
service,
state_store,
);
} else {
info!("Not switching to fork");
service.ignore_block(block.block_id.clone());
}
}
}
}
published
// Fork Resolution done
}
fn | (
ancestor: BlockId,
head: BlockId,
delete_len: u64,
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
) -> () {
let mut next = head;
let mut count = 0_u64;
loop {
if ancestor == next || count >= delete_len {
break;
}
count += 1;
let state_ = state_store.get(next.clone());
if state_.is_err() {
debug!("State not found. Getting block via service.");
let block_ = service.get_block(&next);
if block_.is_ok() {
let block = block_.unwrap();
next = block.previous_id;
continue;
}
break;
} else {
debug!("Deleting state for {:?}", next.clone());
state_store.delete(next.clone());
next = BlockId::from(
state_
.unwrap()
.estimate_info
.previous_block_id
.as_bytes()
.to_vec(),
);
}
}
}
fn get_cert_from(block: &Block) -> WaitCertificate {
let payload = block.payload.clone();
debug!("Extracted payload from block: {:?}", payload.clone());
let (wait_certificate, _) = poet2_util::payload_to_wc_and_sig(&payload);
debug!("Serialized wait_cert : {:?}", &wait_certificate);
serde_json::from_str(&wait_certificate).unwrap()
}
| delete_states_upto | identifier_name |
fork_resolver.rs | /*
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ------------------------------------------------------------------------------
*/
use enclave_sgx::WaitCertificate;
use engine::consensus_state::*;
use engine::consensus_state_store::ConsensusStateStore;
use poet2_util;
use sawtooth_sdk::consensus::engine::*;
use serde_json;
use service::Poet2Service;
pub fn resolve_fork(
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
block_id: BlockId,
mut claim_block_dur: u64,
) -> bool {
let block_ = service.get_block(&block_id);
let mut published = false;
let chain_head = service.get_chain_head();
if block_.is_ok() {
let block = block_.unwrap();
let prev_block_ = service.get_block(&block.previous_id);
info!(
"Choosing between chain heads -- current: {:?} -- new: {:?}",
chain_head, block
);
// Commiting or Resolving fork if one exists
// Advance the chain if possible.
let new_block_dur = get_cert_from(&block).wait_time;
if claim_block_dur == 0 {
claim_block_dur = new_block_dur;
}
// Current block points to current head
// Check if block already claimed. Go on to
// compare duration then. Accept one of them
// and update it to be new chain head
if block.block_num == (1 + chain_head.block_num) && block.previous_id == chain_head.block_id
{
debug!(
"New block duration {} Claim block duration {}",
new_block_dur, claim_block_dur
);
if new_block_dur <= claim_block_dur {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("New block extends current chain. Committing {:?}", block);
let agg_chain_clock = service.get_chain_clock() + new_block_dur;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
debug!(
"Storing cummulative cc = {} for blockId : {:?}",
agg_chain_clock,
block_id.clone()
);
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id);
} else {
info!("New block has larger duration. Failing {:?}", block);
service.fail_block(block_id);
}
}
// Check if the previous block is strictly in the
// cache. If so, look for common ancestor and resolve fork.
else if prev_block_.is_ok() {
let prev_block = prev_block_.unwrap();
if state_store.get(prev_block.block_id).is_err() {
let mut cache_block = block.clone();
let block_state;
let mut block_state_;
let cc_upto_head = service.get_chain_clock();
let mut fork_cc: u64 = new_block_dur;
let mut fork_len: u64 = 1;
let mut cc_upto_ancestor = 0_u64;
let mut ancestor_found: bool = false;
info!("Looping over chain to find common ancestor.");
loop {
let cache_block_ = service.get_block(&cache_block.previous_id);
// If block's previous not in cache or statestore,
// break from loop and send block to cache
if cache_block_.is_ok() {
cache_block = cache_block_.unwrap();
if cache_block.block_num == 0 {
debug!("Genesis reached while finding common ancestor.");
ancestor_found = true;
break;
}
// get cc from certificate in cache_block
let ancestor_cc = get_cert_from(&cache_block).wait_time;
// Assuming here that we have the consensus state
// for each block that has been committed into the chain.
// Parse blocks from cache & states from the statestore
// to find a common ancestor.
// Keep account of the chainclocks from cache.
// Once common ancestor is found, compare the
// chainclocks of the forks to choose a fork
block_state_ = state_store.get(cache_block.block_id.clone());
if block_state_.is_ok() {
// Found common ancestor
info!("Found a common ancestor at block {:?}", block.clone());
ancestor_found = true;
block_state = block_state_.unwrap();
cc_upto_ancestor = block_state.aggregate_chain_clock;
break;
}
fork_cc += ancestor_cc;
fork_len += 1;
} else {
info!("Not a valid fork.");
}
}
let mut fork_won = false;
let mut chain_cc: u64 = 0;
if ancestor_found {
info!("Found a common ancestor. Comparing length.");
debug!(
"Chain clocks upto head = {}, upto common ancestor = {}",
cc_upto_head, cc_upto_ancestor
);
chain_cc = cc_upto_head - cc_upto_ancestor;
let chain_len: u64 = chain_head.block_num - cache_block.block_num;
if chain_len > fork_len {
fork_won = false;
} else if chain_len < fork_len {
fork_won = true;
}
// Fork lengths are equal
else {
if chain_cc == fork_cc {
fork_won = if get_cert_from(&block).duration_id
< get_cert_from(&chain_head).duration_id
{
true
} else {
false
};
} else {
fork_won = if fork_cc < chain_cc { true } else { false };
}
}
}
if fork_won {
info!("Discarding the block in progress.");
service.cancel_block();
published = true;
info!("Switching to fork.");
// fork_cc is inclusive of new block
let agg_chain_clock = cc_upto_ancestor + fork_cc;
let mut state = ConsensusState::default();
state.aggregate_chain_clock = agg_chain_clock;
debug!(
"Aggregate chain clock upto common ancestor = {}
Fork chain clock = {}. After switch aggregate = {}",
cc_upto_ancestor, fork_cc, agg_chain_clock
);
debug!("Storing cummulative cc = {}", agg_chain_clock);
state.estimate_info = EstimateInfo {
population_estimate: 0_f64,
previous_block_id: poet2_util::to_hex_string(&Vec::from(block.previous_id)),
validator_id: poet2_util::to_hex_string(&Vec::from(block.signer_id)),
};
state_store.put(block_id.clone(), state);
service.set_chain_clock(agg_chain_clock);
service.commit_block(block_id);
// Mark all blocks upto common ancestor
// in the chain as invalid.
// Delete states for all blocks not in chain
let chain_len_to_delete = chain_head.block_num - cache_block.block_num;
delete_states_upto(
cache_block.block_id,
chain_head.clone().block_id,
chain_len_to_delete,
service,
state_store,
);
} else {
info!("Not switching to fork");
service.ignore_block(block.block_id.clone());
}
}
}
}
published
// Fork Resolution done
}
fn delete_states_upto(
ancestor: BlockId,
head: BlockId,
delete_len: u64,
service: &mut Poet2Service,
state_store: &mut ConsensusStateStore,
) -> () | state_store.delete(next.clone());
next = BlockId::from(
state_
.unwrap()
.estimate_info
.previous_block_id
.as_bytes()
.to_vec(),
);
}
}
}
fn get_cert_from(block: &Block) -> WaitCertificate {
let payload = block.payload.clone();
debug!("Extracted payload from block: {:?}", payload.clone());
let (wait_certificate, _) = poet2_util::payload_to_wc_and_sig(&payload);
debug!("Serialized wait_cert : {:?}", &wait_certificate);
serde_json::from_str(&wait_certificate).unwrap()
}
| {
let mut next = head;
let mut count = 0_u64;
loop {
if ancestor == next || count >= delete_len {
break;
}
count += 1;
let state_ = state_store.get(next.clone());
if state_.is_err() {
debug!("State not found. Getting block via service.");
let block_ = service.get_block(&next);
if block_.is_ok() {
let block = block_.unwrap();
next = block.previous_id;
continue;
}
break;
} else {
debug!("Deleting state for {:?}", next.clone()); | identifier_body |
actions.rs | use crate::{
dkg_contract::{DKG as DKGContract, DKG_ABI},
opts::*,
};
use rand::{CryptoRng, RngCore};
use std::{fs::File, io::Write, sync::Arc};
use dkg_core::{
primitives::{joint_feldman::*, resharing::RDKG, *},
DKGPhase, Phase2Result,
};
use anyhow::Result;
use ethers::prelude::*;
use ethers::providers::Middleware;
use ethers::signers::LocalWallet;
use rustc_hex::{FromHex, ToHex};
use serde::{Deserialize, Serialize};
use std::convert::TryFrom;
use threshold_bls::{group::Curve, sig::Scheme};
use threshold_bls::{
poly::{Idx, PublicPoly},
sig::Share,
};
#[derive(Serialize, Deserialize, Debug)]
struct CeloKeypairJson {
address: Address,
#[serde(rename = "privateKey")]
private_key: String,
}
pub fn keygen<R>(opts: KeygenOpts, rng: &mut R) -> Result<()>
where
R: CryptoRng + RngCore,
{
let wallet = Wallet::new(rng);
let output = CeloKeypairJson {
private_key: hex::encode(&wallet.signer().to_bytes()),
address: wallet.address(),
};
if let Some(path) = opts.path {
let f = File::create(path)?;
serde_json::to_writer(&f, &output)?;
} else {
serde_json::to_writer(std::io::stdout(), &output)?;
}
Ok(())
}
pub async fn deploy(opts: DeployOpts) -> Result<()> {
// hard-code the contract's bytecode when deploying
let bytecode = include_str!["../dkg.bin"];
let bytecode = bytecode.from_hex::<Vec<u8>>()?;
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let abi = DKG_ABI.clone();
let factory = ContractFactory::new(abi, Bytes::from(bytecode), client);
let contract = factory
.deploy((opts.threshold as u64, opts.phase_duration as u64))?
.send()
.await?;
println!("Contract deployed at: {:?}", contract.address());
Ok(())
}
pub async fn allow(opts: AllowlistOpts) -> Result<()> {
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let contract = DKGContract::new(opts.contract_address, client);
for addr in opts.address {
let tx = contract.allowlist(addr).block(BlockNumber::Pending);
let tx = tx.send().await?.await?;
println!("Sent `allow` tx for {:?} (hash: {:?})", addr, tx);
}
Ok(())
}
pub async fn start(opts: StartOpts) -> Result<()> {
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let contract = DKGContract::new(opts.contract_address, client);
// Submit the tx and wait for the confirmation
let _tx_hash = contract.start().send().await?.await?;
Ok(())
}
pub async fn reshare<S, M, C, R>(opts: ReshareConfig, rng: &mut R) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
M: Middleware,
R: RngCore,
{
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
// we need the previous group and public poly for resharing
let previous_group = {
let previous_dkg = DKGContract::new(opts.previous_contract_address, client.clone());
let previous_group = previous_dkg.get_bls_keys().call().await?;
pubkeys_to_group::<C>(previous_group)?
};
let public_poly = opts.public_polynomial.from_hex::<Vec<u8>>()?;
let public_poly: PublicPoly<C> = bincode::deserialize(&public_poly)?;
let dkg = DKGContract::new(opts.contract_address, client.clone());
let (private_key, public_key) = S::keypair(rng);
register::<S, Provider<Http>, LocalWallet>(&dkg, &public_key).await?;
let new_group = get_group::<C, Provider<Http>, LocalWallet>(&dkg).await?;
let phase0 = if let Some(share) = opts.share {
let share = share.from_hex::<Vec<u8>>()?;
let share: Share<C::Scalar> = bincode::deserialize(&share)?;
let dkg_output = DKGOutput {
share,
qual: previous_group,
public: public_poly,
};
RDKG::new_from_share(private_key, dkg_output, new_group)
} else {
RDKG::new_member(private_key, previous_group, public_poly, new_group)
}?;
run_dkg(dkg, phase0, rng, opts.output_path).await
}
pub async fn run<S, C, R>(opts: DKGConfig, rng: &mut R) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
R: RngCore,
{
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let dkg = DKGContract::new(opts.contract_address, client);
// 1. Generate the keys
let (private_key, public_key) = S::keypair(rng);
// 2. Register
register::<S, Provider<Http>, LocalWallet>(&dkg, &public_key).await?;
// Get the group info
let group = get_group::<C, Provider<Http>, LocalWallet>(&dkg).await?;
let phase0 = DKG::new(private_key, group)?;
run_dkg(dkg, phase0, rng, opts.output_path).await
}
async fn register<S: Scheme, M: Middleware +'static, Z: Signer +'static>(
dkg: &DKGContract<SignerMiddleware<M, Z>>,
public_key: &S::Public,
) -> Result<()> {
println!("Registering...");
let public_key_serialized = bincode::serialize(public_key)?;
let public_key_bytes = ethers::prelude::Bytes::from(public_key_serialized);
let _pending_tx = dkg.register(public_key_bytes).send().await?.await?;
// Wait for Phase 1
wait_for_phase(dkg, 1).await?;
Ok(())
}
async fn get_group<C: Curve, M: Middleware +'static, Z: Signer +'static>(
dkg: &DKGContract<SignerMiddleware<M, Z>>,
) -> Result<Group<C>> {
let group = dkg.get_bls_keys().call().await?;
let participants = dkg.get_participants().call().await?;
confirm_group(&group, participants)?;
let group = pubkeys_to_group::<C>(group)?;
Ok(group)
}
fn confirm_group(
pubkeys: &(U256, Vec<ethers::prelude::Bytes>),
participants: Vec<Address>,
) -> Result<()> {
// print some debug info
println!(
"Will run DKG with the group listed below and threshold {}",
pubkeys.0
);
for (bls_pubkey, address) in pubkeys.1.iter().zip(&participants) {
let key = bls_pubkey.to_vec().to_hex::<String>();
println!("{:?} -> {}", address, key)
}
if!clt::confirm(
"\nDoes the above group look good to you?",
false,
"\n",
true,
) {
return Err(anyhow::anyhow!("User rejected group choice."));
}
Ok(())
}
// Pass the result of `get_bls_keys` to convert the raw data to a group
fn pubkeys_to_group<C: Curve>(pubkeys: (U256, Vec<ethers::prelude::Bytes>)) -> Result<Group<C>> {
let nodes = pubkeys
.1
.into_iter()
.filter(|pubkey|!pubkey.to_vec().is_empty()) // skip users that did not register
.enumerate()
.map(|(i, pubkey)| {
let pubkey: C::Point = bincode::deserialize(&pubkey.to_vec()[..])?;
Ok(Node::<C>::new(i as Idx, pubkey))
})
.collect::<Result<_>>()?;
Ok(Group {
threshold: pubkeys.0.as_u64() as usize,
nodes,
})
}
// Shared helper for running the DKG in both normal and re-sharing mode
async fn run_dkg<P, C, R, M: Middleware +'static>(
mut dkg: DKGContract<M>,
phase0: P,
rng: &mut R,
output_path: Option<String>,
) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
// S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
P: Phase0<C>,
R: RngCore,
{
// Run Phase 1 and publish to the chain
println!("Calculating and broadcasting our shares...");
let phase1 = phase0.run(&mut dkg, rng).await?;
// Wait for Phase 2
wait_for_phase(&dkg, 2).await?;
// Get the shares
let shares = dkg.get_shares().call().await?;
println!("Got {} shares...", shares.len());
let shares = parse_bundle(&shares)?;
println!("Parsed {} shares. Running Phase 2", shares.len());
let phase2 = phase1.run(&mut dkg, &shares).await?;
// Get the responses
let responses = dkg.get_responses().call().await?;
println!("Got {} responses...", responses.len());
let responses = parse_bundle(&responses)?;
println!("Parsed the responses. Getting result.");
// Run Phase 2
let result = match phase2.run(&mut dkg, &responses).await? {
Phase2Result::Output(out) => Ok(out),
// Run Phase 3 if Phase 2 errored
Phase2Result::GoToPhase3(phase3) => {
println!("There were complaints. Running Phase 3.");
wait_for_phase(&dkg, 3).await?;
let justifications = dkg.get_justifications().call().await?;
let justifications = parse_bundle(&justifications)?;
phase3.run(&mut dkg, &justifications).await
}
};
match result {
Ok(output) => |
Err(err) => Err(anyhow::anyhow!("DKG error: {}", err)),
}
}
#[derive(serde::Serialize, Debug)]
struct OutputJson {
#[serde(rename = "publicKey")]
public_key: String,
#[serde(rename = "publicPolynomial")]
public_polynomial: String,
#[serde(rename = "share")]
share: String,
}
async fn wait_for_phase<M: Middleware>(
dkg: &DKGContract<M>,
num: u64,
) -> Result<(), ContractError<M>> {
println!("Waiting for Phase {} to start", num);
loop {
let phase = dkg.in_phase().call().await?;
if phase.as_u64() == num {
break;
}
print!(".");
// 6s for 1 Celo block
tokio::time::sleep(std::time::Duration::from_millis(6000)).await;
}
println!("\nIn Phase {}. Moving to the next step.", num);
Ok(())
}
fn parse_bundle<D: serde::de::DeserializeOwned>(
bundle: &[ethers::prelude::Bytes],
) -> Result<Vec<D>> {
bundle
.iter()
.filter(|item|!item.to_vec().is_empty()) // filter out empty items
.map(|item| Ok(bincode::deserialize::<D>(&item.to_vec()[..])?))
.collect()
}
fn write_output<C: Curve, W: Write>(writer: W, out: &DKGOutput<C>) -> Result<()> {
let output = OutputJson {
public_key: hex::encode(&bincode::serialize(&out.public.public_key())?),
public_polynomial: hex::encode(&bincode::serialize(&out.public)?),
share: hex::encode(&bincode::serialize(&out.share)?),
};
serde_json::to_writer(writer, &output)?;
Ok(())
}
| {
println!("Success. Your share and threshold pubkey are ready.");
if let Some(path) = output_path {
let file = File::create(path)?;
write_output(&file, &output)?;
} else {
write_output(std::io::stdout(), &output)?;
}
Ok(())
} | conditional_block |
actions.rs | use crate::{
dkg_contract::{DKG as DKGContract, DKG_ABI},
opts::*,
};
use rand::{CryptoRng, RngCore};
use std::{fs::File, io::Write, sync::Arc};
use dkg_core::{
primitives::{joint_feldman::*, resharing::RDKG, *},
DKGPhase, Phase2Result,
};
use anyhow::Result;
use ethers::prelude::*;
use ethers::providers::Middleware;
use ethers::signers::LocalWallet;
use rustc_hex::{FromHex, ToHex};
use serde::{Deserialize, Serialize};
use std::convert::TryFrom;
use threshold_bls::{group::Curve, sig::Scheme};
use threshold_bls::{
poly::{Idx, PublicPoly},
sig::Share,
};
#[derive(Serialize, Deserialize, Debug)]
struct CeloKeypairJson {
address: Address,
#[serde(rename = "privateKey")]
private_key: String,
}
pub fn keygen<R>(opts: KeygenOpts, rng: &mut R) -> Result<()>
where
R: CryptoRng + RngCore,
{
let wallet = Wallet::new(rng);
let output = CeloKeypairJson {
private_key: hex::encode(&wallet.signer().to_bytes()),
address: wallet.address(),
};
if let Some(path) = opts.path {
let f = File::create(path)?;
serde_json::to_writer(&f, &output)?;
} else {
serde_json::to_writer(std::io::stdout(), &output)?;
}
Ok(())
}
pub async fn deploy(opts: DeployOpts) -> Result<()> {
// hard-code the contract's bytecode when deploying
let bytecode = include_str!["../dkg.bin"];
let bytecode = bytecode.from_hex::<Vec<u8>>()?;
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let abi = DKG_ABI.clone();
let factory = ContractFactory::new(abi, Bytes::from(bytecode), client);
let contract = factory
.deploy((opts.threshold as u64, opts.phase_duration as u64))?
.send()
.await?;
println!("Contract deployed at: {:?}", contract.address());
Ok(())
}
pub async fn allow(opts: AllowlistOpts) -> Result<()> {
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet); |
let contract = DKGContract::new(opts.contract_address, client);
for addr in opts.address {
let tx = contract.allowlist(addr).block(BlockNumber::Pending);
let tx = tx.send().await?.await?;
println!("Sent `allow` tx for {:?} (hash: {:?})", addr, tx);
}
Ok(())
}
pub async fn start(opts: StartOpts) -> Result<()> {
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let contract = DKGContract::new(opts.contract_address, client);
// Submit the tx and wait for the confirmation
let _tx_hash = contract.start().send().await?.await?;
Ok(())
}
pub async fn reshare<S, M, C, R>(opts: ReshareConfig, rng: &mut R) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
M: Middleware,
R: RngCore,
{
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
// we need the previous group and public poly for resharing
let previous_group = {
let previous_dkg = DKGContract::new(opts.previous_contract_address, client.clone());
let previous_group = previous_dkg.get_bls_keys().call().await?;
pubkeys_to_group::<C>(previous_group)?
};
let public_poly = opts.public_polynomial.from_hex::<Vec<u8>>()?;
let public_poly: PublicPoly<C> = bincode::deserialize(&public_poly)?;
let dkg = DKGContract::new(opts.contract_address, client.clone());
let (private_key, public_key) = S::keypair(rng);
register::<S, Provider<Http>, LocalWallet>(&dkg, &public_key).await?;
let new_group = get_group::<C, Provider<Http>, LocalWallet>(&dkg).await?;
let phase0 = if let Some(share) = opts.share {
let share = share.from_hex::<Vec<u8>>()?;
let share: Share<C::Scalar> = bincode::deserialize(&share)?;
let dkg_output = DKGOutput {
share,
qual: previous_group,
public: public_poly,
};
RDKG::new_from_share(private_key, dkg_output, new_group)
} else {
RDKG::new_member(private_key, previous_group, public_poly, new_group)
}?;
run_dkg(dkg, phase0, rng, opts.output_path).await
}
pub async fn run<S, C, R>(opts: DKGConfig, rng: &mut R) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
R: RngCore,
{
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let dkg = DKGContract::new(opts.contract_address, client);
// 1. Generate the keys
let (private_key, public_key) = S::keypair(rng);
// 2. Register
register::<S, Provider<Http>, LocalWallet>(&dkg, &public_key).await?;
// Get the group info
let group = get_group::<C, Provider<Http>, LocalWallet>(&dkg).await?;
let phase0 = DKG::new(private_key, group)?;
run_dkg(dkg, phase0, rng, opts.output_path).await
}
async fn register<S: Scheme, M: Middleware +'static, Z: Signer +'static>(
dkg: &DKGContract<SignerMiddleware<M, Z>>,
public_key: &S::Public,
) -> Result<()> {
println!("Registering...");
let public_key_serialized = bincode::serialize(public_key)?;
let public_key_bytes = ethers::prelude::Bytes::from(public_key_serialized);
let _pending_tx = dkg.register(public_key_bytes).send().await?.await?;
// Wait for Phase 1
wait_for_phase(dkg, 1).await?;
Ok(())
}
async fn get_group<C: Curve, M: Middleware +'static, Z: Signer +'static>(
dkg: &DKGContract<SignerMiddleware<M, Z>>,
) -> Result<Group<C>> {
let group = dkg.get_bls_keys().call().await?;
let participants = dkg.get_participants().call().await?;
confirm_group(&group, participants)?;
let group = pubkeys_to_group::<C>(group)?;
Ok(group)
}
fn confirm_group(
pubkeys: &(U256, Vec<ethers::prelude::Bytes>),
participants: Vec<Address>,
) -> Result<()> {
// print some debug info
println!(
"Will run DKG with the group listed below and threshold {}",
pubkeys.0
);
for (bls_pubkey, address) in pubkeys.1.iter().zip(&participants) {
let key = bls_pubkey.to_vec().to_hex::<String>();
println!("{:?} -> {}", address, key)
}
if!clt::confirm(
"\nDoes the above group look good to you?",
false,
"\n",
true,
) {
return Err(anyhow::anyhow!("User rejected group choice."));
}
Ok(())
}
// Pass the result of `get_bls_keys` to convert the raw data to a group
fn pubkeys_to_group<C: Curve>(pubkeys: (U256, Vec<ethers::prelude::Bytes>)) -> Result<Group<C>> {
let nodes = pubkeys
.1
.into_iter()
.filter(|pubkey|!pubkey.to_vec().is_empty()) // skip users that did not register
.enumerate()
.map(|(i, pubkey)| {
let pubkey: C::Point = bincode::deserialize(&pubkey.to_vec()[..])?;
Ok(Node::<C>::new(i as Idx, pubkey))
})
.collect::<Result<_>>()?;
Ok(Group {
threshold: pubkeys.0.as_u64() as usize,
nodes,
})
}
// Shared helper for running the DKG in both normal and re-sharing mode
async fn run_dkg<P, C, R, M: Middleware +'static>(
mut dkg: DKGContract<M>,
phase0: P,
rng: &mut R,
output_path: Option<String>,
) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
// S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
P: Phase0<C>,
R: RngCore,
{
// Run Phase 1 and publish to the chain
println!("Calculating and broadcasting our shares...");
let phase1 = phase0.run(&mut dkg, rng).await?;
// Wait for Phase 2
wait_for_phase(&dkg, 2).await?;
// Get the shares
let shares = dkg.get_shares().call().await?;
println!("Got {} shares...", shares.len());
let shares = parse_bundle(&shares)?;
println!("Parsed {} shares. Running Phase 2", shares.len());
let phase2 = phase1.run(&mut dkg, &shares).await?;
// Get the responses
let responses = dkg.get_responses().call().await?;
println!("Got {} responses...", responses.len());
let responses = parse_bundle(&responses)?;
println!("Parsed the responses. Getting result.");
// Run Phase 2
let result = match phase2.run(&mut dkg, &responses).await? {
Phase2Result::Output(out) => Ok(out),
// Run Phase 3 if Phase 2 errored
Phase2Result::GoToPhase3(phase3) => {
println!("There were complaints. Running Phase 3.");
wait_for_phase(&dkg, 3).await?;
let justifications = dkg.get_justifications().call().await?;
let justifications = parse_bundle(&justifications)?;
phase3.run(&mut dkg, &justifications).await
}
};
match result {
Ok(output) => {
println!("Success. Your share and threshold pubkey are ready.");
if let Some(path) = output_path {
let file = File::create(path)?;
write_output(&file, &output)?;
} else {
write_output(std::io::stdout(), &output)?;
}
Ok(())
}
Err(err) => Err(anyhow::anyhow!("DKG error: {}", err)),
}
}
#[derive(serde::Serialize, Debug)]
struct OutputJson {
#[serde(rename = "publicKey")]
public_key: String,
#[serde(rename = "publicPolynomial")]
public_polynomial: String,
#[serde(rename = "share")]
share: String,
}
async fn wait_for_phase<M: Middleware>(
dkg: &DKGContract<M>,
num: u64,
) -> Result<(), ContractError<M>> {
println!("Waiting for Phase {} to start", num);
loop {
let phase = dkg.in_phase().call().await?;
if phase.as_u64() == num {
break;
}
print!(".");
// 6s for 1 Celo block
tokio::time::sleep(std::time::Duration::from_millis(6000)).await;
}
println!("\nIn Phase {}. Moving to the next step.", num);
Ok(())
}
fn parse_bundle<D: serde::de::DeserializeOwned>(
bundle: &[ethers::prelude::Bytes],
) -> Result<Vec<D>> {
bundle
.iter()
.filter(|item|!item.to_vec().is_empty()) // filter out empty items
.map(|item| Ok(bincode::deserialize::<D>(&item.to_vec()[..])?))
.collect()
}
fn write_output<C: Curve, W: Write>(writer: W, out: &DKGOutput<C>) -> Result<()> {
let output = OutputJson {
public_key: hex::encode(&bincode::serialize(&out.public.public_key())?),
public_polynomial: hex::encode(&bincode::serialize(&out.public)?),
share: hex::encode(&bincode::serialize(&out.share)?),
};
serde_json::to_writer(writer, &output)?;
Ok(())
} | let client = Arc::new(client); | random_line_split |
actions.rs | use crate::{
dkg_contract::{DKG as DKGContract, DKG_ABI},
opts::*,
};
use rand::{CryptoRng, RngCore};
use std::{fs::File, io::Write, sync::Arc};
use dkg_core::{
primitives::{joint_feldman::*, resharing::RDKG, *},
DKGPhase, Phase2Result,
};
use anyhow::Result;
use ethers::prelude::*;
use ethers::providers::Middleware;
use ethers::signers::LocalWallet;
use rustc_hex::{FromHex, ToHex};
use serde::{Deserialize, Serialize};
use std::convert::TryFrom;
use threshold_bls::{group::Curve, sig::Scheme};
use threshold_bls::{
poly::{Idx, PublicPoly},
sig::Share,
};
#[derive(Serialize, Deserialize, Debug)]
struct CeloKeypairJson {
address: Address,
#[serde(rename = "privateKey")]
private_key: String,
}
pub fn keygen<R>(opts: KeygenOpts, rng: &mut R) -> Result<()>
where
R: CryptoRng + RngCore,
{
let wallet = Wallet::new(rng);
let output = CeloKeypairJson {
private_key: hex::encode(&wallet.signer().to_bytes()),
address: wallet.address(),
};
if let Some(path) = opts.path {
let f = File::create(path)?;
serde_json::to_writer(&f, &output)?;
} else {
serde_json::to_writer(std::io::stdout(), &output)?;
}
Ok(())
}
pub async fn deploy(opts: DeployOpts) -> Result<()> {
// hard-code the contract's bytecode when deploying
let bytecode = include_str!["../dkg.bin"];
let bytecode = bytecode.from_hex::<Vec<u8>>()?;
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let abi = DKG_ABI.clone();
let factory = ContractFactory::new(abi, Bytes::from(bytecode), client);
let contract = factory
.deploy((opts.threshold as u64, opts.phase_duration as u64))?
.send()
.await?;
println!("Contract deployed at: {:?}", contract.address());
Ok(())
}
pub async fn allow(opts: AllowlistOpts) -> Result<()> {
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let contract = DKGContract::new(opts.contract_address, client);
for addr in opts.address {
let tx = contract.allowlist(addr).block(BlockNumber::Pending);
let tx = tx.send().await?.await?;
println!("Sent `allow` tx for {:?} (hash: {:?})", addr, tx);
}
Ok(())
}
pub async fn start(opts: StartOpts) -> Result<()> {
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let contract = DKGContract::new(opts.contract_address, client);
// Submit the tx and wait for the confirmation
let _tx_hash = contract.start().send().await?.await?;
Ok(())
}
pub async fn reshare<S, M, C, R>(opts: ReshareConfig, rng: &mut R) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
M: Middleware,
R: RngCore,
{
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
// we need the previous group and public poly for resharing
let previous_group = {
let previous_dkg = DKGContract::new(opts.previous_contract_address, client.clone());
let previous_group = previous_dkg.get_bls_keys().call().await?;
pubkeys_to_group::<C>(previous_group)?
};
let public_poly = opts.public_polynomial.from_hex::<Vec<u8>>()?;
let public_poly: PublicPoly<C> = bincode::deserialize(&public_poly)?;
let dkg = DKGContract::new(opts.contract_address, client.clone());
let (private_key, public_key) = S::keypair(rng);
register::<S, Provider<Http>, LocalWallet>(&dkg, &public_key).await?;
let new_group = get_group::<C, Provider<Http>, LocalWallet>(&dkg).await?;
let phase0 = if let Some(share) = opts.share {
let share = share.from_hex::<Vec<u8>>()?;
let share: Share<C::Scalar> = bincode::deserialize(&share)?;
let dkg_output = DKGOutput {
share,
qual: previous_group,
public: public_poly,
};
RDKG::new_from_share(private_key, dkg_output, new_group)
} else {
RDKG::new_member(private_key, previous_group, public_poly, new_group)
}?;
run_dkg(dkg, phase0, rng, opts.output_path).await
}
pub async fn run<S, C, R>(opts: DKGConfig, rng: &mut R) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
R: RngCore,
{
let provider = Provider::<Http>::try_from(opts.node_url.as_str())?;
let wallet = opts.private_key.parse::<LocalWallet>()?;
let client = SignerMiddleware::new(provider, wallet);
let client = Arc::new(client);
let dkg = DKGContract::new(opts.contract_address, client);
// 1. Generate the keys
let (private_key, public_key) = S::keypair(rng);
// 2. Register
register::<S, Provider<Http>, LocalWallet>(&dkg, &public_key).await?;
// Get the group info
let group = get_group::<C, Provider<Http>, LocalWallet>(&dkg).await?;
let phase0 = DKG::new(private_key, group)?;
run_dkg(dkg, phase0, rng, opts.output_path).await
}
async fn register<S: Scheme, M: Middleware +'static, Z: Signer +'static>(
dkg: &DKGContract<SignerMiddleware<M, Z>>,
public_key: &S::Public,
) -> Result<()> {
println!("Registering...");
let public_key_serialized = bincode::serialize(public_key)?;
let public_key_bytes = ethers::prelude::Bytes::from(public_key_serialized);
let _pending_tx = dkg.register(public_key_bytes).send().await?.await?;
// Wait for Phase 1
wait_for_phase(dkg, 1).await?;
Ok(())
}
async fn get_group<C: Curve, M: Middleware +'static, Z: Signer +'static>(
dkg: &DKGContract<SignerMiddleware<M, Z>>,
) -> Result<Group<C>> {
let group = dkg.get_bls_keys().call().await?;
let participants = dkg.get_participants().call().await?;
confirm_group(&group, participants)?;
let group = pubkeys_to_group::<C>(group)?;
Ok(group)
}
fn confirm_group(
pubkeys: &(U256, Vec<ethers::prelude::Bytes>),
participants: Vec<Address>,
) -> Result<()> {
// print some debug info
println!(
"Will run DKG with the group listed below and threshold {}",
pubkeys.0
);
for (bls_pubkey, address) in pubkeys.1.iter().zip(&participants) {
let key = bls_pubkey.to_vec().to_hex::<String>();
println!("{:?} -> {}", address, key)
}
if!clt::confirm(
"\nDoes the above group look good to you?",
false,
"\n",
true,
) {
return Err(anyhow::anyhow!("User rejected group choice."));
}
Ok(())
}
// Pass the result of `get_bls_keys` to convert the raw data to a group
fn pubkeys_to_group<C: Curve>(pubkeys: (U256, Vec<ethers::prelude::Bytes>)) -> Result<Group<C>> {
let nodes = pubkeys
.1
.into_iter()
.filter(|pubkey|!pubkey.to_vec().is_empty()) // skip users that did not register
.enumerate()
.map(|(i, pubkey)| {
let pubkey: C::Point = bincode::deserialize(&pubkey.to_vec()[..])?;
Ok(Node::<C>::new(i as Idx, pubkey))
})
.collect::<Result<_>>()?;
Ok(Group {
threshold: pubkeys.0.as_u64() as usize,
nodes,
})
}
// Shared helper for running the DKG in both normal and re-sharing mode
async fn | <P, C, R, M: Middleware +'static>(
mut dkg: DKGContract<M>,
phase0: P,
rng: &mut R,
output_path: Option<String>,
) -> Result<()>
where
C: Curve,
// We need to bind the Curve's Point and Scalars to the Scheme
// S: Scheme<Public = <C as Curve>::Point, Private = <C as Curve>::Scalar>,
P: Phase0<C>,
R: RngCore,
{
// Run Phase 1 and publish to the chain
println!("Calculating and broadcasting our shares...");
let phase1 = phase0.run(&mut dkg, rng).await?;
// Wait for Phase 2
wait_for_phase(&dkg, 2).await?;
// Get the shares
let shares = dkg.get_shares().call().await?;
println!("Got {} shares...", shares.len());
let shares = parse_bundle(&shares)?;
println!("Parsed {} shares. Running Phase 2", shares.len());
let phase2 = phase1.run(&mut dkg, &shares).await?;
// Get the responses
let responses = dkg.get_responses().call().await?;
println!("Got {} responses...", responses.len());
let responses = parse_bundle(&responses)?;
println!("Parsed the responses. Getting result.");
// Run Phase 2
let result = match phase2.run(&mut dkg, &responses).await? {
Phase2Result::Output(out) => Ok(out),
// Run Phase 3 if Phase 2 errored
Phase2Result::GoToPhase3(phase3) => {
println!("There were complaints. Running Phase 3.");
wait_for_phase(&dkg, 3).await?;
let justifications = dkg.get_justifications().call().await?;
let justifications = parse_bundle(&justifications)?;
phase3.run(&mut dkg, &justifications).await
}
};
match result {
Ok(output) => {
println!("Success. Your share and threshold pubkey are ready.");
if let Some(path) = output_path {
let file = File::create(path)?;
write_output(&file, &output)?;
} else {
write_output(std::io::stdout(), &output)?;
}
Ok(())
}
Err(err) => Err(anyhow::anyhow!("DKG error: {}", err)),
}
}
#[derive(serde::Serialize, Debug)]
struct OutputJson {
#[serde(rename = "publicKey")]
public_key: String,
#[serde(rename = "publicPolynomial")]
public_polynomial: String,
#[serde(rename = "share")]
share: String,
}
async fn wait_for_phase<M: Middleware>(
dkg: &DKGContract<M>,
num: u64,
) -> Result<(), ContractError<M>> {
println!("Waiting for Phase {} to start", num);
loop {
let phase = dkg.in_phase().call().await?;
if phase.as_u64() == num {
break;
}
print!(".");
// 6s for 1 Celo block
tokio::time::sleep(std::time::Duration::from_millis(6000)).await;
}
println!("\nIn Phase {}. Moving to the next step.", num);
Ok(())
}
fn parse_bundle<D: serde::de::DeserializeOwned>(
bundle: &[ethers::prelude::Bytes],
) -> Result<Vec<D>> {
bundle
.iter()
.filter(|item|!item.to_vec().is_empty()) // filter out empty items
.map(|item| Ok(bincode::deserialize::<D>(&item.to_vec()[..])?))
.collect()
}
fn write_output<C: Curve, W: Write>(writer: W, out: &DKGOutput<C>) -> Result<()> {
let output = OutputJson {
public_key: hex::encode(&bincode::serialize(&out.public.public_key())?),
public_polynomial: hex::encode(&bincode::serialize(&out.public)?),
share: hex::encode(&bincode::serialize(&out.share)?),
};
serde_json::to_writer(writer, &output)?;
Ok(())
}
| run_dkg | identifier_name |
transaction_verify_centre.rs | //! The `tvu` module implements the Transaction Validation Unit, a
//! multi-stage transaction validation pipeline in software.
//!
//! 1. BlobFetchStage
//! - Incoming blobs are picked up from the TVU sockets and repair socket.
//! 2. RetransmitStage
//! - Blobs are windowed until a contiguous chunk is available. This stage also repairs and
//! retransmits blobs that are in the queue.
//! 3. ReplayStage
//! - Transactions in blobs are processed and applied to the bank.
//! - TODO We need to verify the signatures in the blobs.
//! 4. StorageStage
//! - Generating the keys used to encrypt the ledger and sample it for storage mining.
// use crate::bank_forks::BankForks;
use crate::treasury_forks::BankForks;
use crate::fetch_spot_stage::BlobFetchStage;
use crate::block_stream_service::BlockstreamService;
use crate::block_buffer_pool::{BlockBufferPool, CompletedSlotsReceiver};
use crate::node_group_info::NodeGroupInfo;
use crate::leader_arrange_cache::LeaderScheduleCache;
use crate::water_clock_recorder::WaterClockRecorder;
use crate::repeat_stage::ReplayStage;
use crate::retransmit_stage::RetransmitStage;
use crate::rpc_subscriptions::RpcSubscriptions;
use crate::service::Service;
use crate::storage_stage::{StorageStage, StorageState};
use morgan_interface::hash::Hash;
use morgan_interface::pubkey::Pubkey;
use morgan_interface::signature::{Keypair, KeypairUtil};
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
pub struct Tvu {
fetch_stage: BlobFetchStage,
retransmit_stage: RetransmitStage,
replay_stage: ReplayStage,
blockstream_service: Option<BlockstreamService>,
storage_stage: StorageStage,
}
pub struct Sockets {
pub fetch: Vec<UdpSocket>,
pub repair: UdpSocket,
pub retransmit: UdpSocket,
}
impl Tvu {
/// This service receives messages from a leader in the network and processes the transactions
/// on the bank state.
/// # Arguments
/// * `node_group_info` - The node_group_info state.
/// * `sockets` - fetch, repair, and retransmit sockets
/// * `block_buffer_pool` - the ledger itself
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
pub fn new<T>(
vote_account: &Pubkey,
voting_keypair: Option<&Arc<T>>,
storage_keypair: &Arc<Keypair>,
bank_forks: &Arc<RwLock<BankForks>>,
node_group_info: &Arc<RwLock<NodeGroupInfo>>,
sockets: Sockets,
block_buffer_pool: Arc<BlockBufferPool>,
storage_rotate_count: u64,
storage_state: &StorageState,
blockstream: Option<&String>,
ledger_signal_receiver: Receiver<bool>,
subscriptions: &Arc<RpcSubscriptions>,
waterclock_recorder: &Arc<Mutex<WaterClockRecorder>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
exit: &Arc<AtomicBool>,
genesis_blockhash: &Hash,
completed_slots_receiver: CompletedSlotsReceiver,
) -> Self
where
T:'static + KeypairUtil + Sync + Send,
{
let keypair: Arc<Keypair> = node_group_info
.read()
.expect("Unable to read from node_group_info during Tvu creation")
.keypair
.clone();
let Sockets {
repair: repair_socket,
fetch: fetch_sockets,
retransmit: retransmit_socket,
} = sockets;
let (blob_fetch_sender, blob_fetch_receiver) = channel();
let repair_socket = Arc::new(repair_socket);
let mut blob_sockets: Vec<Arc<UdpSocket>> =
fetch_sockets.into_iter().map(Arc::new).collect();
blob_sockets.push(repair_socket.clone());
let fetch_stage = BlobFetchStage::new_multi_socket(blob_sockets, &blob_fetch_sender, &exit);
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let retransmit_stage = RetransmitStage::new(
bank_forks.clone(),
leader_schedule_cache,
block_buffer_pool.clone(),
&node_group_info,
Arc::new(retransmit_socket),
repair_socket,
blob_fetch_receiver,
&exit,
genesis_blockhash,
completed_slots_receiver,
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
);
let (replay_stage, slot_full_receiver, root_slot_receiver) = ReplayStage::new(
&keypair.pubkey(),
vote_account,
voting_keypair,
block_buffer_pool.clone(),
&bank_forks,
node_group_info.clone(),
&exit,
ledger_signal_receiver,
subscriptions,
waterclock_recorder,
leader_schedule_cache,
);
let blockstream_service = if blockstream.is_some() {
let blockstream_service = BlockstreamService::new(
slot_full_receiver,
block_buffer_pool.clone(),
blockstream.unwrap().to_string(),
&exit,
);
Some(blockstream_service)
} else {
None
};
let storage_stage = StorageStage::new(
storage_state,
root_slot_receiver,
Some(block_buffer_pool),
&keypair,
storage_keypair,
&exit,
&bank_forks,
storage_rotate_count,
&node_group_info,
);
Tvu {
fetch_stage,
retransmit_stage,
replay_stage,
blockstream_service,
storage_stage,
}
}
}
impl Service for Tvu {
type JoinReturnType = ();
fn join(self) -> thread::Result<()> {
self.retransmit_stage.join()?;
self.fetch_stage.join()?;
self.storage_stage.join()?;
if self.blockstream_service.is_some() {
self.blockstream_service.unwrap().join()?;
}
self.replay_stage.join()?;
Ok(())
}
}
use std::{borrow::Cow, convert, ffi::OsStr, path::Path};
static LICENSE_HEADER: &str = "Copyright (c) The Libra Core Contributors\n\
SPDX-License-Identifier: Apache-2.0\n\
";
#[allow(dead_code)]
pub(super) fn has_license_header(file: &Path, contents: &str) -> Result<(), Cow<'static, str>> {
enum FileType {
Rust,
Shell,
Proto,
}
let file_type = match file
.extension()
.map(OsStr::to_str)
.and_then(convert::identity)
{
Some("rs") => FileType::Rust,
Some("sh") => FileType::Shell,
Some("proto") => FileType::Proto,
_ => return Ok(()),
};
// Determine if the file is missing the license header
let missing_header = match file_type {
FileType::Rust | FileType::Proto => {
let maybe_license = contents
.lines()
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("// "));
!LICENSE_HEADER.lines().eq(maybe_license)
}
FileType::Shell => {
let maybe_license = contents
.lines() | .skip_while(|line| line.starts_with("#!"))
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("# "));
!LICENSE_HEADER.lines().eq(maybe_license)
}
};
if missing_header {
return Err("missing a license header".into());
}
Ok(())
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::treasury_stage::create_test_recorder;
use crate::block_buffer_pool::get_tmp_ledger_path;
use crate::node_group_info::{NodeGroupInfo, Node};
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use crate::storage_stage::STORAGE_ROTATE_TEST_COUNT;
use morgan_runtime::bank::Bank;
use std::sync::atomic::Ordering;
#[test]
fn test_tvu_exit() {
morgan_logger::setup();
let leader = Node::new_localhost();
let target1_keypair = Keypair::new();
let target1 = Node::new_localhost_with_pubkey(&target1_keypair.pubkey());
let starting_balance = 10_000;
let GenesisBlockInfo { genesis_block,.. } = create_genesis_block(starting_balance);
let bank_forks = BankForks::new(0, Bank::new(&genesis_block));
//start cluster_info1
let mut cluster_info1 = NodeGroupInfo::new_with_invalid_keypair(target1.info.clone());
cluster_info1.insert_info(leader.info.clone());
let cref1 = Arc::new(RwLock::new(cluster_info1));
let block_buffer_pool_path = get_tmp_ledger_path!();
let (block_buffer_pool, l_receiver, completed_slots_receiver) =
BlockBufferPool::open_by_message(&block_buffer_pool_path)
.expect("Expected to successfully open ledger");
let block_buffer_pool = Arc::new(block_buffer_pool);
let bank = bank_forks.working_bank();
let (exit, waterclock_recorder, waterclock_service, _entry_receiver) =
create_test_recorder(&bank, &block_buffer_pool);
let voting_keypair = Keypair::new();
let storage_keypair = Arc::new(Keypair::new());
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let tvu = Tvu::new(
&voting_keypair.pubkey(),
Some(&Arc::new(voting_keypair)),
&storage_keypair,
&Arc::new(RwLock::new(bank_forks)),
&cref1,
{
Sockets {
repair: target1.sockets.repair,
retransmit: target1.sockets.retransmit,
fetch: target1.sockets.tvu,
}
},
block_buffer_pool,
STORAGE_ROTATE_TEST_COUNT,
&StorageState::default(),
None,
l_receiver,
&Arc::new(RpcSubscriptions::default()),
&waterclock_recorder,
&leader_schedule_cache,
&exit,
&Hash::default(),
completed_slots_receiver,
);
exit.store(true, Ordering::Relaxed);
tvu.join().unwrap();
waterclock_service.join().unwrap();
}
} | random_line_split |
|
transaction_verify_centre.rs | //! The `tvu` module implements the Transaction Validation Unit, a
//! multi-stage transaction validation pipeline in software.
//!
//! 1. BlobFetchStage
//! - Incoming blobs are picked up from the TVU sockets and repair socket.
//! 2. RetransmitStage
//! - Blobs are windowed until a contiguous chunk is available. This stage also repairs and
//! retransmits blobs that are in the queue.
//! 3. ReplayStage
//! - Transactions in blobs are processed and applied to the bank.
//! - TODO We need to verify the signatures in the blobs.
//! 4. StorageStage
//! - Generating the keys used to encrypt the ledger and sample it for storage mining.
// use crate::bank_forks::BankForks;
use crate::treasury_forks::BankForks;
use crate::fetch_spot_stage::BlobFetchStage;
use crate::block_stream_service::BlockstreamService;
use crate::block_buffer_pool::{BlockBufferPool, CompletedSlotsReceiver};
use crate::node_group_info::NodeGroupInfo;
use crate::leader_arrange_cache::LeaderScheduleCache;
use crate::water_clock_recorder::WaterClockRecorder;
use crate::repeat_stage::ReplayStage;
use crate::retransmit_stage::RetransmitStage;
use crate::rpc_subscriptions::RpcSubscriptions;
use crate::service::Service;
use crate::storage_stage::{StorageStage, StorageState};
use morgan_interface::hash::Hash;
use morgan_interface::pubkey::Pubkey;
use morgan_interface::signature::{Keypair, KeypairUtil};
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
pub struct Tvu {
fetch_stage: BlobFetchStage,
retransmit_stage: RetransmitStage,
replay_stage: ReplayStage,
blockstream_service: Option<BlockstreamService>,
storage_stage: StorageStage,
}
pub struct | {
pub fetch: Vec<UdpSocket>,
pub repair: UdpSocket,
pub retransmit: UdpSocket,
}
impl Tvu {
/// This service receives messages from a leader in the network and processes the transactions
/// on the bank state.
/// # Arguments
/// * `node_group_info` - The node_group_info state.
/// * `sockets` - fetch, repair, and retransmit sockets
/// * `block_buffer_pool` - the ledger itself
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
pub fn new<T>(
vote_account: &Pubkey,
voting_keypair: Option<&Arc<T>>,
storage_keypair: &Arc<Keypair>,
bank_forks: &Arc<RwLock<BankForks>>,
node_group_info: &Arc<RwLock<NodeGroupInfo>>,
sockets: Sockets,
block_buffer_pool: Arc<BlockBufferPool>,
storage_rotate_count: u64,
storage_state: &StorageState,
blockstream: Option<&String>,
ledger_signal_receiver: Receiver<bool>,
subscriptions: &Arc<RpcSubscriptions>,
waterclock_recorder: &Arc<Mutex<WaterClockRecorder>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
exit: &Arc<AtomicBool>,
genesis_blockhash: &Hash,
completed_slots_receiver: CompletedSlotsReceiver,
) -> Self
where
T:'static + KeypairUtil + Sync + Send,
{
let keypair: Arc<Keypair> = node_group_info
.read()
.expect("Unable to read from node_group_info during Tvu creation")
.keypair
.clone();
let Sockets {
repair: repair_socket,
fetch: fetch_sockets,
retransmit: retransmit_socket,
} = sockets;
let (blob_fetch_sender, blob_fetch_receiver) = channel();
let repair_socket = Arc::new(repair_socket);
let mut blob_sockets: Vec<Arc<UdpSocket>> =
fetch_sockets.into_iter().map(Arc::new).collect();
blob_sockets.push(repair_socket.clone());
let fetch_stage = BlobFetchStage::new_multi_socket(blob_sockets, &blob_fetch_sender, &exit);
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let retransmit_stage = RetransmitStage::new(
bank_forks.clone(),
leader_schedule_cache,
block_buffer_pool.clone(),
&node_group_info,
Arc::new(retransmit_socket),
repair_socket,
blob_fetch_receiver,
&exit,
genesis_blockhash,
completed_slots_receiver,
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
);
let (replay_stage, slot_full_receiver, root_slot_receiver) = ReplayStage::new(
&keypair.pubkey(),
vote_account,
voting_keypair,
block_buffer_pool.clone(),
&bank_forks,
node_group_info.clone(),
&exit,
ledger_signal_receiver,
subscriptions,
waterclock_recorder,
leader_schedule_cache,
);
let blockstream_service = if blockstream.is_some() {
let blockstream_service = BlockstreamService::new(
slot_full_receiver,
block_buffer_pool.clone(),
blockstream.unwrap().to_string(),
&exit,
);
Some(blockstream_service)
} else {
None
};
let storage_stage = StorageStage::new(
storage_state,
root_slot_receiver,
Some(block_buffer_pool),
&keypair,
storage_keypair,
&exit,
&bank_forks,
storage_rotate_count,
&node_group_info,
);
Tvu {
fetch_stage,
retransmit_stage,
replay_stage,
blockstream_service,
storage_stage,
}
}
}
impl Service for Tvu {
type JoinReturnType = ();
fn join(self) -> thread::Result<()> {
self.retransmit_stage.join()?;
self.fetch_stage.join()?;
self.storage_stage.join()?;
if self.blockstream_service.is_some() {
self.blockstream_service.unwrap().join()?;
}
self.replay_stage.join()?;
Ok(())
}
}
use std::{borrow::Cow, convert, ffi::OsStr, path::Path};
static LICENSE_HEADER: &str = "Copyright (c) The Libra Core Contributors\n\
SPDX-License-Identifier: Apache-2.0\n\
";
#[allow(dead_code)]
pub(super) fn has_license_header(file: &Path, contents: &str) -> Result<(), Cow<'static, str>> {
enum FileType {
Rust,
Shell,
Proto,
}
let file_type = match file
.extension()
.map(OsStr::to_str)
.and_then(convert::identity)
{
Some("rs") => FileType::Rust,
Some("sh") => FileType::Shell,
Some("proto") => FileType::Proto,
_ => return Ok(()),
};
// Determine if the file is missing the license header
let missing_header = match file_type {
FileType::Rust | FileType::Proto => {
let maybe_license = contents
.lines()
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("// "));
!LICENSE_HEADER.lines().eq(maybe_license)
}
FileType::Shell => {
let maybe_license = contents
.lines()
.skip_while(|line| line.starts_with("#!"))
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("# "));
!LICENSE_HEADER.lines().eq(maybe_license)
}
};
if missing_header {
return Err("missing a license header".into());
}
Ok(())
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::treasury_stage::create_test_recorder;
use crate::block_buffer_pool::get_tmp_ledger_path;
use crate::node_group_info::{NodeGroupInfo, Node};
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use crate::storage_stage::STORAGE_ROTATE_TEST_COUNT;
use morgan_runtime::bank::Bank;
use std::sync::atomic::Ordering;
#[test]
fn test_tvu_exit() {
morgan_logger::setup();
let leader = Node::new_localhost();
let target1_keypair = Keypair::new();
let target1 = Node::new_localhost_with_pubkey(&target1_keypair.pubkey());
let starting_balance = 10_000;
let GenesisBlockInfo { genesis_block,.. } = create_genesis_block(starting_balance);
let bank_forks = BankForks::new(0, Bank::new(&genesis_block));
//start cluster_info1
let mut cluster_info1 = NodeGroupInfo::new_with_invalid_keypair(target1.info.clone());
cluster_info1.insert_info(leader.info.clone());
let cref1 = Arc::new(RwLock::new(cluster_info1));
let block_buffer_pool_path = get_tmp_ledger_path!();
let (block_buffer_pool, l_receiver, completed_slots_receiver) =
BlockBufferPool::open_by_message(&block_buffer_pool_path)
.expect("Expected to successfully open ledger");
let block_buffer_pool = Arc::new(block_buffer_pool);
let bank = bank_forks.working_bank();
let (exit, waterclock_recorder, waterclock_service, _entry_receiver) =
create_test_recorder(&bank, &block_buffer_pool);
let voting_keypair = Keypair::new();
let storage_keypair = Arc::new(Keypair::new());
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let tvu = Tvu::new(
&voting_keypair.pubkey(),
Some(&Arc::new(voting_keypair)),
&storage_keypair,
&Arc::new(RwLock::new(bank_forks)),
&cref1,
{
Sockets {
repair: target1.sockets.repair,
retransmit: target1.sockets.retransmit,
fetch: target1.sockets.tvu,
}
},
block_buffer_pool,
STORAGE_ROTATE_TEST_COUNT,
&StorageState::default(),
None,
l_receiver,
&Arc::new(RpcSubscriptions::default()),
&waterclock_recorder,
&leader_schedule_cache,
&exit,
&Hash::default(),
completed_slots_receiver,
);
exit.store(true, Ordering::Relaxed);
tvu.join().unwrap();
waterclock_service.join().unwrap();
}
}
| Sockets | identifier_name |
transaction_verify_centre.rs | //! The `tvu` module implements the Transaction Validation Unit, a
//! multi-stage transaction validation pipeline in software.
//!
//! 1. BlobFetchStage
//! - Incoming blobs are picked up from the TVU sockets and repair socket.
//! 2. RetransmitStage
//! - Blobs are windowed until a contiguous chunk is available. This stage also repairs and
//! retransmits blobs that are in the queue.
//! 3. ReplayStage
//! - Transactions in blobs are processed and applied to the bank.
//! - TODO We need to verify the signatures in the blobs.
//! 4. StorageStage
//! - Generating the keys used to encrypt the ledger and sample it for storage mining.
// use crate::bank_forks::BankForks;
use crate::treasury_forks::BankForks;
use crate::fetch_spot_stage::BlobFetchStage;
use crate::block_stream_service::BlockstreamService;
use crate::block_buffer_pool::{BlockBufferPool, CompletedSlotsReceiver};
use crate::node_group_info::NodeGroupInfo;
use crate::leader_arrange_cache::LeaderScheduleCache;
use crate::water_clock_recorder::WaterClockRecorder;
use crate::repeat_stage::ReplayStage;
use crate::retransmit_stage::RetransmitStage;
use crate::rpc_subscriptions::RpcSubscriptions;
use crate::service::Service;
use crate::storage_stage::{StorageStage, StorageState};
use morgan_interface::hash::Hash;
use morgan_interface::pubkey::Pubkey;
use morgan_interface::signature::{Keypair, KeypairUtil};
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
pub struct Tvu {
fetch_stage: BlobFetchStage,
retransmit_stage: RetransmitStage,
replay_stage: ReplayStage,
blockstream_service: Option<BlockstreamService>,
storage_stage: StorageStage,
}
pub struct Sockets {
pub fetch: Vec<UdpSocket>,
pub repair: UdpSocket,
pub retransmit: UdpSocket,
}
impl Tvu {
/// This service receives messages from a leader in the network and processes the transactions
/// on the bank state.
/// # Arguments
/// * `node_group_info` - The node_group_info state.
/// * `sockets` - fetch, repair, and retransmit sockets
/// * `block_buffer_pool` - the ledger itself
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
pub fn new<T>(
vote_account: &Pubkey,
voting_keypair: Option<&Arc<T>>,
storage_keypair: &Arc<Keypair>,
bank_forks: &Arc<RwLock<BankForks>>,
node_group_info: &Arc<RwLock<NodeGroupInfo>>,
sockets: Sockets,
block_buffer_pool: Arc<BlockBufferPool>,
storage_rotate_count: u64,
storage_state: &StorageState,
blockstream: Option<&String>,
ledger_signal_receiver: Receiver<bool>,
subscriptions: &Arc<RpcSubscriptions>,
waterclock_recorder: &Arc<Mutex<WaterClockRecorder>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
exit: &Arc<AtomicBool>,
genesis_blockhash: &Hash,
completed_slots_receiver: CompletedSlotsReceiver,
) -> Self
where
T:'static + KeypairUtil + Sync + Send,
|
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let retransmit_stage = RetransmitStage::new(
bank_forks.clone(),
leader_schedule_cache,
block_buffer_pool.clone(),
&node_group_info,
Arc::new(retransmit_socket),
repair_socket,
blob_fetch_receiver,
&exit,
genesis_blockhash,
completed_slots_receiver,
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
);
let (replay_stage, slot_full_receiver, root_slot_receiver) = ReplayStage::new(
&keypair.pubkey(),
vote_account,
voting_keypair,
block_buffer_pool.clone(),
&bank_forks,
node_group_info.clone(),
&exit,
ledger_signal_receiver,
subscriptions,
waterclock_recorder,
leader_schedule_cache,
);
let blockstream_service = if blockstream.is_some() {
let blockstream_service = BlockstreamService::new(
slot_full_receiver,
block_buffer_pool.clone(),
blockstream.unwrap().to_string(),
&exit,
);
Some(blockstream_service)
} else {
None
};
let storage_stage = StorageStage::new(
storage_state,
root_slot_receiver,
Some(block_buffer_pool),
&keypair,
storage_keypair,
&exit,
&bank_forks,
storage_rotate_count,
&node_group_info,
);
Tvu {
fetch_stage,
retransmit_stage,
replay_stage,
blockstream_service,
storage_stage,
}
}
}
impl Service for Tvu {
type JoinReturnType = ();
fn join(self) -> thread::Result<()> {
self.retransmit_stage.join()?;
self.fetch_stage.join()?;
self.storage_stage.join()?;
if self.blockstream_service.is_some() {
self.blockstream_service.unwrap().join()?;
}
self.replay_stage.join()?;
Ok(())
}
}
use std::{borrow::Cow, convert, ffi::OsStr, path::Path};
static LICENSE_HEADER: &str = "Copyright (c) The Libra Core Contributors\n\
SPDX-License-Identifier: Apache-2.0\n\
";
#[allow(dead_code)]
pub(super) fn has_license_header(file: &Path, contents: &str) -> Result<(), Cow<'static, str>> {
enum FileType {
Rust,
Shell,
Proto,
}
let file_type = match file
.extension()
.map(OsStr::to_str)
.and_then(convert::identity)
{
Some("rs") => FileType::Rust,
Some("sh") => FileType::Shell,
Some("proto") => FileType::Proto,
_ => return Ok(()),
};
// Determine if the file is missing the license header
let missing_header = match file_type {
FileType::Rust | FileType::Proto => {
let maybe_license = contents
.lines()
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("// "));
!LICENSE_HEADER.lines().eq(maybe_license)
}
FileType::Shell => {
let maybe_license = contents
.lines()
.skip_while(|line| line.starts_with("#!"))
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("# "));
!LICENSE_HEADER.lines().eq(maybe_license)
}
};
if missing_header {
return Err("missing a license header".into());
}
Ok(())
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::treasury_stage::create_test_recorder;
use crate::block_buffer_pool::get_tmp_ledger_path;
use crate::node_group_info::{NodeGroupInfo, Node};
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use crate::storage_stage::STORAGE_ROTATE_TEST_COUNT;
use morgan_runtime::bank::Bank;
use std::sync::atomic::Ordering;
#[test]
fn test_tvu_exit() {
morgan_logger::setup();
let leader = Node::new_localhost();
let target1_keypair = Keypair::new();
let target1 = Node::new_localhost_with_pubkey(&target1_keypair.pubkey());
let starting_balance = 10_000;
let GenesisBlockInfo { genesis_block,.. } = create_genesis_block(starting_balance);
let bank_forks = BankForks::new(0, Bank::new(&genesis_block));
//start cluster_info1
let mut cluster_info1 = NodeGroupInfo::new_with_invalid_keypair(target1.info.clone());
cluster_info1.insert_info(leader.info.clone());
let cref1 = Arc::new(RwLock::new(cluster_info1));
let block_buffer_pool_path = get_tmp_ledger_path!();
let (block_buffer_pool, l_receiver, completed_slots_receiver) =
BlockBufferPool::open_by_message(&block_buffer_pool_path)
.expect("Expected to successfully open ledger");
let block_buffer_pool = Arc::new(block_buffer_pool);
let bank = bank_forks.working_bank();
let (exit, waterclock_recorder, waterclock_service, _entry_receiver) =
create_test_recorder(&bank, &block_buffer_pool);
let voting_keypair = Keypair::new();
let storage_keypair = Arc::new(Keypair::new());
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let tvu = Tvu::new(
&voting_keypair.pubkey(),
Some(&Arc::new(voting_keypair)),
&storage_keypair,
&Arc::new(RwLock::new(bank_forks)),
&cref1,
{
Sockets {
repair: target1.sockets.repair,
retransmit: target1.sockets.retransmit,
fetch: target1.sockets.tvu,
}
},
block_buffer_pool,
STORAGE_ROTATE_TEST_COUNT,
&StorageState::default(),
None,
l_receiver,
&Arc::new(RpcSubscriptions::default()),
&waterclock_recorder,
&leader_schedule_cache,
&exit,
&Hash::default(),
completed_slots_receiver,
);
exit.store(true, Ordering::Relaxed);
tvu.join().unwrap();
waterclock_service.join().unwrap();
}
}
| {
let keypair: Arc<Keypair> = node_group_info
.read()
.expect("Unable to read from node_group_info during Tvu creation")
.keypair
.clone();
let Sockets {
repair: repair_socket,
fetch: fetch_sockets,
retransmit: retransmit_socket,
} = sockets;
let (blob_fetch_sender, blob_fetch_receiver) = channel();
let repair_socket = Arc::new(repair_socket);
let mut blob_sockets: Vec<Arc<UdpSocket>> =
fetch_sockets.into_iter().map(Arc::new).collect();
blob_sockets.push(repair_socket.clone());
let fetch_stage = BlobFetchStage::new_multi_socket(blob_sockets, &blob_fetch_sender, &exit); | identifier_body |
transaction_verify_centre.rs | //! The `tvu` module implements the Transaction Validation Unit, a
//! multi-stage transaction validation pipeline in software.
//!
//! 1. BlobFetchStage
//! - Incoming blobs are picked up from the TVU sockets and repair socket.
//! 2. RetransmitStage
//! - Blobs are windowed until a contiguous chunk is available. This stage also repairs and
//! retransmits blobs that are in the queue.
//! 3. ReplayStage
//! - Transactions in blobs are processed and applied to the bank.
//! - TODO We need to verify the signatures in the blobs.
//! 4. StorageStage
//! - Generating the keys used to encrypt the ledger and sample it for storage mining.
// use crate::bank_forks::BankForks;
use crate::treasury_forks::BankForks;
use crate::fetch_spot_stage::BlobFetchStage;
use crate::block_stream_service::BlockstreamService;
use crate::block_buffer_pool::{BlockBufferPool, CompletedSlotsReceiver};
use crate::node_group_info::NodeGroupInfo;
use crate::leader_arrange_cache::LeaderScheduleCache;
use crate::water_clock_recorder::WaterClockRecorder;
use crate::repeat_stage::ReplayStage;
use crate::retransmit_stage::RetransmitStage;
use crate::rpc_subscriptions::RpcSubscriptions;
use crate::service::Service;
use crate::storage_stage::{StorageStage, StorageState};
use morgan_interface::hash::Hash;
use morgan_interface::pubkey::Pubkey;
use morgan_interface::signature::{Keypair, KeypairUtil};
use std::net::UdpSocket;
use std::sync::atomic::AtomicBool;
use std::sync::mpsc::{channel, Receiver};
use std::sync::{Arc, Mutex, RwLock};
use std::thread;
pub struct Tvu {
fetch_stage: BlobFetchStage,
retransmit_stage: RetransmitStage,
replay_stage: ReplayStage,
blockstream_service: Option<BlockstreamService>,
storage_stage: StorageStage,
}
pub struct Sockets {
pub fetch: Vec<UdpSocket>,
pub repair: UdpSocket,
pub retransmit: UdpSocket,
}
impl Tvu {
/// This service receives messages from a leader in the network and processes the transactions
/// on the bank state.
/// # Arguments
/// * `node_group_info` - The node_group_info state.
/// * `sockets` - fetch, repair, and retransmit sockets
/// * `block_buffer_pool` - the ledger itself
#[allow(clippy::new_ret_no_self, clippy::too_many_arguments)]
pub fn new<T>(
vote_account: &Pubkey,
voting_keypair: Option<&Arc<T>>,
storage_keypair: &Arc<Keypair>,
bank_forks: &Arc<RwLock<BankForks>>,
node_group_info: &Arc<RwLock<NodeGroupInfo>>,
sockets: Sockets,
block_buffer_pool: Arc<BlockBufferPool>,
storage_rotate_count: u64,
storage_state: &StorageState,
blockstream: Option<&String>,
ledger_signal_receiver: Receiver<bool>,
subscriptions: &Arc<RpcSubscriptions>,
waterclock_recorder: &Arc<Mutex<WaterClockRecorder>>,
leader_schedule_cache: &Arc<LeaderScheduleCache>,
exit: &Arc<AtomicBool>,
genesis_blockhash: &Hash,
completed_slots_receiver: CompletedSlotsReceiver,
) -> Self
where
T:'static + KeypairUtil + Sync + Send,
{
let keypair: Arc<Keypair> = node_group_info
.read()
.expect("Unable to read from node_group_info during Tvu creation")
.keypair
.clone();
let Sockets {
repair: repair_socket,
fetch: fetch_sockets,
retransmit: retransmit_socket,
} = sockets;
let (blob_fetch_sender, blob_fetch_receiver) = channel();
let repair_socket = Arc::new(repair_socket);
let mut blob_sockets: Vec<Arc<UdpSocket>> =
fetch_sockets.into_iter().map(Arc::new).collect();
blob_sockets.push(repair_socket.clone());
let fetch_stage = BlobFetchStage::new_multi_socket(blob_sockets, &blob_fetch_sender, &exit);
//TODO
//the packets coming out of blob_receiver need to be sent to the GPU and verified
//then sent to the window, which does the erasure coding reconstruction
let retransmit_stage = RetransmitStage::new(
bank_forks.clone(),
leader_schedule_cache,
block_buffer_pool.clone(),
&node_group_info,
Arc::new(retransmit_socket),
repair_socket,
blob_fetch_receiver,
&exit,
genesis_blockhash,
completed_slots_receiver,
*bank_forks.read().unwrap().working_bank().epoch_schedule(),
);
let (replay_stage, slot_full_receiver, root_slot_receiver) = ReplayStage::new(
&keypair.pubkey(),
vote_account,
voting_keypair,
block_buffer_pool.clone(),
&bank_forks,
node_group_info.clone(),
&exit,
ledger_signal_receiver,
subscriptions,
waterclock_recorder,
leader_schedule_cache,
);
let blockstream_service = if blockstream.is_some() {
let blockstream_service = BlockstreamService::new(
slot_full_receiver,
block_buffer_pool.clone(),
blockstream.unwrap().to_string(),
&exit,
);
Some(blockstream_service)
} else {
None
};
let storage_stage = StorageStage::new(
storage_state,
root_slot_receiver,
Some(block_buffer_pool),
&keypair,
storage_keypair,
&exit,
&bank_forks,
storage_rotate_count,
&node_group_info,
);
Tvu {
fetch_stage,
retransmit_stage,
replay_stage,
blockstream_service,
storage_stage,
}
}
}
impl Service for Tvu {
type JoinReturnType = ();
fn join(self) -> thread::Result<()> {
self.retransmit_stage.join()?;
self.fetch_stage.join()?;
self.storage_stage.join()?;
if self.blockstream_service.is_some() {
self.blockstream_service.unwrap().join()?;
}
self.replay_stage.join()?;
Ok(())
}
}
use std::{borrow::Cow, convert, ffi::OsStr, path::Path};
static LICENSE_HEADER: &str = "Copyright (c) The Libra Core Contributors\n\
SPDX-License-Identifier: Apache-2.0\n\
";
#[allow(dead_code)]
pub(super) fn has_license_header(file: &Path, contents: &str) -> Result<(), Cow<'static, str>> {
enum FileType {
Rust,
Shell,
Proto,
}
let file_type = match file
.extension()
.map(OsStr::to_str)
.and_then(convert::identity)
{
Some("rs") => FileType::Rust,
Some("sh") => FileType::Shell,
Some("proto") => FileType::Proto,
_ => return Ok(()),
};
// Determine if the file is missing the license header
let missing_header = match file_type {
FileType::Rust | FileType::Proto => {
let maybe_license = contents
.lines()
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("// "));
!LICENSE_HEADER.lines().eq(maybe_license)
}
FileType::Shell => |
};
if missing_header {
return Err("missing a license header".into());
}
Ok(())
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::treasury_stage::create_test_recorder;
use crate::block_buffer_pool::get_tmp_ledger_path;
use crate::node_group_info::{NodeGroupInfo, Node};
use crate::genesis_utils::{create_genesis_block, GenesisBlockInfo};
use crate::storage_stage::STORAGE_ROTATE_TEST_COUNT;
use morgan_runtime::bank::Bank;
use std::sync::atomic::Ordering;
#[test]
fn test_tvu_exit() {
morgan_logger::setup();
let leader = Node::new_localhost();
let target1_keypair = Keypair::new();
let target1 = Node::new_localhost_with_pubkey(&target1_keypair.pubkey());
let starting_balance = 10_000;
let GenesisBlockInfo { genesis_block,.. } = create_genesis_block(starting_balance);
let bank_forks = BankForks::new(0, Bank::new(&genesis_block));
//start cluster_info1
let mut cluster_info1 = NodeGroupInfo::new_with_invalid_keypair(target1.info.clone());
cluster_info1.insert_info(leader.info.clone());
let cref1 = Arc::new(RwLock::new(cluster_info1));
let block_buffer_pool_path = get_tmp_ledger_path!();
let (block_buffer_pool, l_receiver, completed_slots_receiver) =
BlockBufferPool::open_by_message(&block_buffer_pool_path)
.expect("Expected to successfully open ledger");
let block_buffer_pool = Arc::new(block_buffer_pool);
let bank = bank_forks.working_bank();
let (exit, waterclock_recorder, waterclock_service, _entry_receiver) =
create_test_recorder(&bank, &block_buffer_pool);
let voting_keypair = Keypair::new();
let storage_keypair = Arc::new(Keypair::new());
let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let tvu = Tvu::new(
&voting_keypair.pubkey(),
Some(&Arc::new(voting_keypair)),
&storage_keypair,
&Arc::new(RwLock::new(bank_forks)),
&cref1,
{
Sockets {
repair: target1.sockets.repair,
retransmit: target1.sockets.retransmit,
fetch: target1.sockets.tvu,
}
},
block_buffer_pool,
STORAGE_ROTATE_TEST_COUNT,
&StorageState::default(),
None,
l_receiver,
&Arc::new(RpcSubscriptions::default()),
&waterclock_recorder,
&leader_schedule_cache,
&exit,
&Hash::default(),
completed_slots_receiver,
);
exit.store(true, Ordering::Relaxed);
tvu.join().unwrap();
waterclock_service.join().unwrap();
}
}
| {
let maybe_license = contents
.lines()
.skip_while(|line| line.starts_with("#!"))
.skip_while(|line| line.is_empty())
.take(2)
.map(|s| s.trim_start_matches("# "));
!LICENSE_HEADER.lines().eq(maybe_license)
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.