file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
runner.rs
use graph::Graph; use modules; use num_cpus; use runtime::{Environment, Runtime}; use std::cmp; use std::collections::{HashMap, HashSet}; use std::error::Error; use std::path::{Path, PathBuf}; use std::rc::Rc; use std::sync::mpsc; use std::thread; use task::Task; use term; #[derive(Clone)] pub struct EnvironmentSpec { /// Script path. path: PathBuf, /// Script directory. directory: PathBuf, /// Module include paths. include_paths: Vec<PathBuf>, /// Global environment variables. variables: Vec<(String, String)>, /// Indicates if actually running tasks should be skipped. dry_run: bool, /// Indicates if up-to-date tasks should be run anyway. always_run: bool, /// Indicates task errors should be ignored. keep_going: bool, } impl EnvironmentSpec { /// Creates an environment from the environment specification. pub fn create(&self) -> Result<Runtime, Box<Error>> { // Prepare a new environment. let environment = try!(Environment::new(self.path.clone())); let runtime = Runtime::new(environment); // Open standard library functions. runtime.state().open_libs(); // Register modules. modules::register_all(&runtime); // Set include paths. for path in &self.include_paths { runtime.include_path(&path); } // Set the OS runtime.state().push_string(if cfg!(windows) { "windows" } else { "unix" }); runtime.state().set_global("OS"); // Set configured variables. for &(ref name, ref value) in &self.variables { runtime.state().push(value.clone()); runtime.state().set_global(&name); } // Load the script. try!(runtime.load()); Ok(runtime) } } /// A task runner object that holds the state for defined tasks, dependencies, and the scripting /// runtime. pub struct Runner { /// The current DAG for tasks. graph: Graph, /// The number of threads to use. jobs: usize, /// Environment specification. spec: EnvironmentSpec, /// Runtime local owned by the master thread. runtime: Option<Runtime>, } impl Runner { /// Creates a new runner instance. pub fn new<P: Into<PathBuf>>(path: P) -> Result<Runner, Box<Error>> { // By default, set the number of jobs to be one less than the number of available CPU cores. let jobs = cmp::max(1, num_cpus::get() - 1); let path = path.into(); let directory: PathBuf = match path.parent() { Some(path) => path.into(), None => { return Err("failed to parse script directory".into()); } }; Ok(Runner { graph: Graph::new(), jobs: jobs as usize, spec: EnvironmentSpec { path: path.into(), directory: directory, include_paths: Vec::new(), variables: Vec::new(), dry_run: false, always_run: false, keep_going: false, }, runtime: None, }) } pub fn path(&self) -> &Path { &self.spec.path } pub fn directory(&self) -> &Path { &self.spec.directory } /// Sets "dry run" mode. /// /// When in "dry run" mode, running tasks will operate as normal, except that no task's actions /// will be actually run. pub fn dry_run(&mut self) { self.spec.dry_run = true; } /// Run all tasks even if they are up-to-date. pub fn always_run(&mut self)
/// Run all tasks even if they throw errors. pub fn keep_going(&mut self) { self.spec.keep_going = true; } /// Sets the number of threads to use to run tasks. pub fn jobs(&mut self, jobs: usize) { self.jobs = jobs; } /// Adds a path to Lua's require path for modules. pub fn include_path<P: Into<PathBuf>>(&mut self, path: P) { self.spec.include_paths.push(path.into()); } /// Sets a variable value. pub fn set_var<S: AsRef<str>, V: Into<String>>(&mut self, name: S, value: V) { self.spec.variables.push((name.as_ref().to_string(), value.into())); } /// Load the script. pub fn load(&mut self) -> Result<(), Box<Error>> { if self.runtime.is_none() { self.runtime = Some(try!(self.spec.create())); } Ok(()) } /// Prints the list of named tasks for a script. pub fn print_task_list(&mut self) { let mut tasks = self.runtime().environment().tasks(); tasks.sort_by(|a, b| a.name().cmp(b.name())); let mut out = term::stdout().unwrap(); println!("Available tasks:"); for task in tasks { out.fg(term::color::BRIGHT_GREEN).unwrap(); write!(out, " {:16}", task.name()).unwrap(); out.reset().unwrap(); if let Some(ref description) = task.description() { write!(out, "{}", description).unwrap(); } writeln!(out, "").unwrap(); } if let Some(ref default) = self.runtime().environment().default_task() { println!(""); println!("Default task: {}", default); } } /// Run the default task. pub fn run_default(&mut self) -> Result<(), Box<Error>> { if let Some(ref name) = self.runtime().environment().default_task() { let tasks = vec![name]; self.run(&tasks) } else { Err("no default task defined".into()) } } /// Runs the specified list of tasks. /// /// Tasks are run in parallel when possible during execution. The maximum number of parallel /// jobs can be set with the `jobs()` method. pub fn run<S: AsRef<str>>(&mut self, tasks: &[S]) -> Result<(), Box<Error>> { // Resolve all tasks given. for task in tasks { try!(self.resolve_task(task)); } // Determine the schedule of tasks to execute. let mut queue = try!(self.graph.solve(!self.spec.always_run)); let task_count = queue.len(); let thread_count = cmp::min(self.jobs, task_count); debug!("running {} task(s) across {} thread(s)", task_count, thread_count); // Spawn one thread for each job. let mut threads = Vec::new(); let mut free_threads: HashSet<usize> = HashSet::new(); let mut channels = Vec::new(); let (sender, receiver) = mpsc::channel::<Result<usize, usize>>(); // Spawn `jobs` number of threads (but no more than the task count!). for thread_id in 0..thread_count { let spec = self.spec.clone(); let thread_sender = sender.clone(); let (parent_sender, thread_receiver) = mpsc::sync_channel::<(String, usize)>(0); channels.push(parent_sender); threads.push(thread::spawn(move || { // Prepare a new runtime. let runtime = spec.create().unwrap_or_else(|e| { error!("{}", e); panic!(); }); if thread_sender.send(Ok(thread_id)).is_err() { trace!("thread {} failed to send channel", thread_id); } // Begin executing tasks! while let Ok((name, task_id)) = thread_receiver.recv() { info!("running task '{}' ({} of {})", name, task_id, task_count); // Lookup the task to run. let task = { // Lookup the task to run. if let Some(task) = runtime.environment().get_task(&name) { task as Rc<Task> } // Find a rule that matches the task name. else if let Some(rule) = runtime.environment() .rules() .iter() .find(|rule| rule.matches(&name)) { Rc::new(rule.create_task(name).unwrap()) as Rc<Task> } // No matching task. else { panic!("no matching task or rule for '{}'", name); } }; // Check for dry run. if!spec.dry_run { if let Err(e) = task.run() { // If we ought to keep going, just issue a warning. if spec.keep_going { warn!("ignoring error: {}", e); } else { error!("{}", e); thread_sender.send(Err(thread_id)).unwrap(); return; } } } else { info!("would run task '{}'", task.name()); } if thread_sender.send(Ok(thread_id)).is_err() { trace!("thread {} failed to send channel", thread_id); break; } } })) } drop(sender); // Keep track of tasks completed and tasks in progress. let mut completed_tasks: HashSet<String> = HashSet::new(); let mut current_tasks: HashMap<usize, String> = HashMap::new(); let all_tasks: HashSet<String> = queue.iter().map(|s| s.name().to_string()).collect(); while!queue.is_empty() ||!current_tasks.is_empty() { // Wait for a thread to request a task. let result = receiver.recv().unwrap(); // If the thread sent an error, we should stop everything if keep_going isn't enabled. if let Err(thread_id) = result { debug!("thread {} errored, waiting for remaining tasks...", thread_id); return Err("not all tasks completed successfully".into()); } let thread_id = result.unwrap(); free_threads.insert(thread_id); trace!("thread {} is idle", thread_id); // If the thread was previously running a task, mark it as completed. if let Some(task) = current_tasks.remove(&thread_id) { trace!("task '{}' completed", task); completed_tasks.insert(task); } // Attempt to schedule more tasks to run. The most we can schedule is the number of free // threads, but it is limited by the number of tasks that have their dependencies already // finished. 'schedule: for _ in 0..free_threads.len() { // If the queue is empty, we are done. if queue.is_empty() { break; } // Check the next task in the queue. If any of its dependencies have not yet been // completed, we cannot schedule it yet. for dependency in queue.front().unwrap().dependencies() { // Check that the dependency needs scheduled at all (some are already satisfied), // and that it hasn't already finished. if all_tasks.contains(dependency) &&!completed_tasks.contains(dependency) { // We can't run the next task, so we're done scheduling for now until another // thread finishes. break'schedule; } } // Get the available task from the queue. let task = queue.front().unwrap().clone(); // Pick a free thread to run the task in. if let Some(thread_id) = free_threads.iter().next().map(|t| *t) { trace!("scheduling task '{}' on thread {}", task.name(), thread_id); let data = (task.name().to_string(), task_count - queue.len() + 1); // Send the task name. if channels[thread_id].send(data).is_ok() { current_tasks.insert(thread_id, task.name().to_string()); free_threads.remove(&thread_id); // Scheduling was successful, so remove the task frome the queue. queue.pop_front().unwrap(); } else { trace!("failed to send channel to thread {}", thread_id); } } else { // We can schedule now, but there aren't any free threads. 😢 break; } } } // Close the input and wait for any remaining threads to finish. drop(channels); for (thread_id, thread) in threads.into_iter().enumerate() { if let Err(e) = thread.join() { trace!("thread {} closed with panic: {:?}", thread_id, e); } } info!("all tasks up to date"); Ok(()) } fn resolve_task<S: AsRef<str>>(&mut self, name: S) -> Result<(), Box<Error>> { if!self.graph.contains(&name) { // Lookup the task to run. if let Some(task) = self.runtime().environment().get_task(&name) { debug!("task '{}' matches named task", name.as_ref()); self.graph.insert(task.clone()); } // Find a rule that matches the task name. else if let Some(rule) = self.runtime() .environment() .rules() .iter() .find(|rule| rule.matches(&name)) { debug!("task '{}' matches rule '{}'", name.as_ref(), rule.pattern); // Create a task for the rule and insert it in the graph. self.graph.insert(Rc::new(rule.create_task(name.as_ref()).unwrap())); } // No matching task. else { return Err(format!("no matching task or rule for '{}'", name.as_ref()).into()); } } for dependency in self.graph.get(name).unwrap().dependencies() { if!self.graph.contains(dependency) { try!(self.resolve_task(dependency)); } } Ok(()) } fn runtime(&self) -> Runtime { self.runtime.as_ref().unwrap().clone() } }
{ self.spec.always_run = true; }
identifier_body
runner.rs
use graph::Graph; use modules; use num_cpus; use runtime::{Environment, Runtime}; use std::cmp; use std::collections::{HashMap, HashSet}; use std::error::Error; use std::path::{Path, PathBuf}; use std::rc::Rc; use std::sync::mpsc; use std::thread; use task::Task; use term; #[derive(Clone)] pub struct EnvironmentSpec { /// Script path. path: PathBuf, /// Script directory. directory: PathBuf, /// Module include paths. include_paths: Vec<PathBuf>, /// Global environment variables. variables: Vec<(String, String)>, /// Indicates if actually running tasks should be skipped. dry_run: bool, /// Indicates if up-to-date tasks should be run anyway. always_run: bool, /// Indicates task errors should be ignored. keep_going: bool, } impl EnvironmentSpec { /// Creates an environment from the environment specification. pub fn create(&self) -> Result<Runtime, Box<Error>> { // Prepare a new environment. let environment = try!(Environment::new(self.path.clone())); let runtime = Runtime::new(environment); // Open standard library functions. runtime.state().open_libs(); // Register modules. modules::register_all(&runtime); // Set include paths. for path in &self.include_paths { runtime.include_path(&path); } // Set the OS runtime.state().push_string(if cfg!(windows) { "windows" } else { "unix" }); runtime.state().set_global("OS"); // Set configured variables. for &(ref name, ref value) in &self.variables { runtime.state().push(value.clone()); runtime.state().set_global(&name); } // Load the script. try!(runtime.load()); Ok(runtime) } } /// A task runner object that holds the state for defined tasks, dependencies, and the scripting /// runtime. pub struct Runner { /// The current DAG for tasks. graph: Graph, /// The number of threads to use. jobs: usize, /// Environment specification. spec: EnvironmentSpec, /// Runtime local owned by the master thread. runtime: Option<Runtime>, } impl Runner { /// Creates a new runner instance. pub fn new<P: Into<PathBuf>>(path: P) -> Result<Runner, Box<Error>> { // By default, set the number of jobs to be one less than the number of available CPU cores. let jobs = cmp::max(1, num_cpus::get() - 1); let path = path.into(); let directory: PathBuf = match path.parent() { Some(path) => path.into(), None => { return Err("failed to parse script directory".into()); } }; Ok(Runner { graph: Graph::new(), jobs: jobs as usize, spec: EnvironmentSpec { path: path.into(), directory: directory, include_paths: Vec::new(), variables: Vec::new(), dry_run: false, always_run: false, keep_going: false, }, runtime: None, }) } pub fn path(&self) -> &Path { &self.spec.path } pub fn directory(&self) -> &Path { &self.spec.directory } /// Sets "dry run" mode. /// /// When in "dry run" mode, running tasks will operate as normal, except that no task's actions /// will be actually run. pub fn dry_run(&mut self) { self.spec.dry_run = true; } /// Run all tasks even if they are up-to-date. pub fn always_run(&mut self) { self.spec.always_run = true; } /// Run all tasks even if they throw errors. pub fn keep_going(&mut self) { self.spec.keep_going = true; } /// Sets the number of threads to use to run tasks. pub fn jobs(&mut self, jobs: usize) { self.jobs = jobs; } /// Adds a path to Lua's require path for modules. pub fn include_path<P: Into<PathBuf>>(&mut self, path: P) { self.spec.include_paths.push(path.into()); } /// Sets a variable value. pub fn set_var<S: AsRef<str>, V: Into<String>>(&mut self, name: S, value: V) { self.spec.variables.push((name.as_ref().to_string(), value.into())); } /// Load the script. pub fn load(&mut self) -> Result<(), Box<Error>> { if self.runtime.is_none() { self.runtime = Some(try!(self.spec.create())); } Ok(()) } /// Prints the list of named tasks for a script. pub fn print_task_list(&mut self) { let mut tasks = self.runtime().environment().tasks(); tasks.sort_by(|a, b| a.name().cmp(b.name())); let mut out = term::stdout().unwrap(); println!("Available tasks:"); for task in tasks { out.fg(term::color::BRIGHT_GREEN).unwrap(); write!(out, " {:16}", task.name()).unwrap(); out.reset().unwrap(); if let Some(ref description) = task.description() { write!(out, "{}", description).unwrap(); } writeln!(out, "").unwrap(); } if let Some(ref default) = self.runtime().environment().default_task() { println!(""); println!("Default task: {}", default); } } /// Run the default task. pub fn run_default(&mut self) -> Result<(), Box<Error>> { if let Some(ref name) = self.runtime().environment().default_task() { let tasks = vec![name]; self.run(&tasks) } else { Err("no default task defined".into()) } } /// Runs the specified list of tasks. /// /// Tasks are run in parallel when possible during execution. The maximum number of parallel /// jobs can be set with the `jobs()` method. pub fn run<S: AsRef<str>>(&mut self, tasks: &[S]) -> Result<(), Box<Error>> { // Resolve all tasks given. for task in tasks { try!(self.resolve_task(task)); } // Determine the schedule of tasks to execute. let mut queue = try!(self.graph.solve(!self.spec.always_run)); let task_count = queue.len(); let thread_count = cmp::min(self.jobs, task_count); debug!("running {} task(s) across {} thread(s)", task_count, thread_count); // Spawn one thread for each job. let mut threads = Vec::new(); let mut free_threads: HashSet<usize> = HashSet::new(); let mut channels = Vec::new(); let (sender, receiver) = mpsc::channel::<Result<usize, usize>>(); // Spawn `jobs` number of threads (but no more than the task count!). for thread_id in 0..thread_count { let spec = self.spec.clone(); let thread_sender = sender.clone(); let (parent_sender, thread_receiver) = mpsc::sync_channel::<(String, usize)>(0); channels.push(parent_sender); threads.push(thread::spawn(move || { // Prepare a new runtime. let runtime = spec.create().unwrap_or_else(|e| { error!("{}", e); panic!(); }); if thread_sender.send(Ok(thread_id)).is_err() { trace!("thread {} failed to send channel", thread_id); } // Begin executing tasks! while let Ok((name, task_id)) = thread_receiver.recv() { info!("running task '{}' ({} of {})", name, task_id, task_count); // Lookup the task to run. let task = { // Lookup the task to run. if let Some(task) = runtime.environment().get_task(&name) { task as Rc<Task> } // Find a rule that matches the task name. else if let Some(rule) = runtime.environment() .rules() .iter() .find(|rule| rule.matches(&name)) { Rc::new(rule.create_task(name).unwrap()) as Rc<Task> } // No matching task. else { panic!("no matching task or rule for '{}'", name); } }; // Check for dry run. if!spec.dry_run { if let Err(e) = task.run() { // If we ought to keep going, just issue a warning. if spec.keep_going { warn!("ignoring error: {}", e); } else { error!("{}", e); thread_sender.send(Err(thread_id)).unwrap(); return; } } } else { info!("would run task '{}'", task.name()); } if thread_sender.send(Ok(thread_id)).is_err() { trace!("thread {} failed to send channel", thread_id); break; } } })) } drop(sender); // Keep track of tasks completed and tasks in progress. let mut completed_tasks: HashSet<String> = HashSet::new(); let mut current_tasks: HashMap<usize, String> = HashMap::new(); let all_tasks: HashSet<String> = queue.iter().map(|s| s.name().to_string()).collect(); while!queue.is_empty() ||!current_tasks.is_empty() { // Wait for a thread to request a task. let result = receiver.recv().unwrap(); // If the thread sent an error, we should stop everything if keep_going isn't enabled. if let Err(thread_id) = result { debug!("thread {} errored, waiting for remaining tasks...", thread_id); return Err("not all tasks completed successfully".into()); } let thread_id = result.unwrap(); free_threads.insert(thread_id); trace!("thread {} is idle", thread_id); // If the thread was previously running a task, mark it as completed. if let Some(task) = current_tasks.remove(&thread_id) { trace!("task '{}' completed", task); completed_tasks.insert(task); } // Attempt to schedule more tasks to run. The most we can schedule is the number of free // threads, but it is limited by the number of tasks that have their dependencies already // finished. 'schedule: for _ in 0..free_threads.len() { // If the queue is empty, we are done. if queue.is_empty() { break; } // Check the next task in the queue. If any of its dependencies have not yet been // completed, we cannot schedule it yet. for dependency in queue.front().unwrap().dependencies() { // Check that the dependency needs scheduled at all (some are already satisfied), // and that it hasn't already finished. if all_tasks.contains(dependency) &&!completed_tasks.contains(dependency) { // We can't run the next task, so we're done scheduling for now until another // thread finishes. break'schedule; } } // Get the available task from the queue. let task = queue.front().unwrap().clone(); // Pick a free thread to run the task in. if let Some(thread_id) = free_threads.iter().next().map(|t| *t) { trace!("scheduling task '{}' on thread {}", task.name(), thread_id); let data = (task.name().to_string(), task_count - queue.len() + 1); // Send the task name. if channels[thread_id].send(data).is_ok() {
// Scheduling was successful, so remove the task frome the queue. queue.pop_front().unwrap(); } else { trace!("failed to send channel to thread {}", thread_id); } } else { // We can schedule now, but there aren't any free threads. 😢 break; } } } // Close the input and wait for any remaining threads to finish. drop(channels); for (thread_id, thread) in threads.into_iter().enumerate() { if let Err(e) = thread.join() { trace!("thread {} closed with panic: {:?}", thread_id, e); } } info!("all tasks up to date"); Ok(()) } fn resolve_task<S: AsRef<str>>(&mut self, name: S) -> Result<(), Box<Error>> { if!self.graph.contains(&name) { // Lookup the task to run. if let Some(task) = self.runtime().environment().get_task(&name) { debug!("task '{}' matches named task", name.as_ref()); self.graph.insert(task.clone()); } // Find a rule that matches the task name. else if let Some(rule) = self.runtime() .environment() .rules() .iter() .find(|rule| rule.matches(&name)) { debug!("task '{}' matches rule '{}'", name.as_ref(), rule.pattern); // Create a task for the rule and insert it in the graph. self.graph.insert(Rc::new(rule.create_task(name.as_ref()).unwrap())); } // No matching task. else { return Err(format!("no matching task or rule for '{}'", name.as_ref()).into()); } } for dependency in self.graph.get(name).unwrap().dependencies() { if!self.graph.contains(dependency) { try!(self.resolve_task(dependency)); } } Ok(()) } fn runtime(&self) -> Runtime { self.runtime.as_ref().unwrap().clone() } }
current_tasks.insert(thread_id, task.name().to_string()); free_threads.remove(&thread_id);
random_line_split
backtrace.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! As always, windows has something very different than unix, we mainly want //! to avoid having to depend too much on libunwind for windows. //! //! If you google around, you'll find a fair bit of references to built-in //! functions to get backtraces on windows. It turns out that most of these are //! in an external library called dbghelp. I was unable to find this library //! via `-ldbghelp`, but it is apparently normal to do the `dlopen` equivalent //! of it. //! //! You'll also find that there's a function called CaptureStackBackTrace //! mentioned frequently (which is also easy to use), but sadly I didn't have a //! copy of that function in my mingw install (maybe it was broken?). Instead, //! this takes the route of using StackWalk64 in order to walk the stack. #![allow(dead_code)] use prelude::v1::*; use io::prelude::*; use dynamic_lib::DynamicLibrary; use ffi::CStr; use intrinsics; use io; use libc; use mem; use path::Path; use ptr; use str; use sync::{StaticMutex, MUTEX_INIT}; use sys_common::backtrace::*; #[allow(non_snake_case)] extern "system" { fn GetCurrentProcess() -> libc::HANDLE; fn GetCurrentThread() -> libc::HANDLE; fn RtlCaptureContext(ctx: *mut arch::CONTEXT); } type SymFromAddrFn = extern "system" fn(libc::HANDLE, u64, *mut u64, *mut SYMBOL_INFO) -> libc::BOOL; type SymInitializeFn = extern "system" fn(libc::HANDLE, *mut libc::c_void, libc::BOOL) -> libc::BOOL; type SymCleanupFn = extern "system" fn(libc::HANDLE) -> libc::BOOL; type StackWalk64Fn = extern "system" fn(libc::DWORD, libc::HANDLE, libc::HANDLE, *mut STACKFRAME64, *mut arch::CONTEXT, *mut libc::c_void, *mut libc::c_void, *mut libc::c_void, *mut libc::c_void) -> libc::BOOL; const MAX_SYM_NAME: usize = 2000; const IMAGE_FILE_MACHINE_I386: libc::DWORD = 0x014c; const IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200; const IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664; #[repr(C)] struct SYMBOL_INFO { SizeOfStruct: libc::c_ulong, TypeIndex: libc::c_ulong, Reserved: [u64; 2], Index: libc::c_ulong, Size: libc::c_ulong, ModBase: u64, Flags: libc::c_ulong, Value: u64, Address: u64, Register: libc::c_ulong, Scope: libc::c_ulong, Tag: libc::c_ulong, NameLen: libc::c_ulong, MaxNameLen: libc::c_ulong, // note that windows has this as 1, but it basically just means that // the name is inline at the end of the struct. For us, we just bump // the struct size up to MAX_SYM_NAME. Name: [libc::c_char; MAX_SYM_NAME], } #[repr(C)] enum ADDRESS_MODE { AddrMode1616, AddrMode1632, AddrModeReal, AddrModeFlat, } struct
{ Offset: u64, Segment: u16, Mode: ADDRESS_MODE, } pub struct STACKFRAME64 { AddrPC: ADDRESS64, AddrReturn: ADDRESS64, AddrFrame: ADDRESS64, AddrStack: ADDRESS64, AddrBStore: ADDRESS64, FuncTableEntry: *mut libc::c_void, Params: [u64; 4], Far: libc::BOOL, Virtual: libc::BOOL, Reserved: [u64; 3], KdHelp: KDHELP64, } struct KDHELP64 { Thread: u64, ThCallbackStack: libc::DWORD, ThCallbackBStore: libc::DWORD, NextCallback: libc::DWORD, FramePointer: libc::DWORD, KiCallUserMode: u64, KeUserCallbackDispatcher: u64, SystemRangeStart: u64, KiUserExceptionDispatcher: u64, StackBase: u64, StackLimit: u64, Reserved: [u64; 5], } #[cfg(target_arch = "x86")] mod arch { use libc; const MAXIMUM_SUPPORTED_EXTENSION: usize = 512; #[repr(C)] pub struct CONTEXT { ContextFlags: libc::DWORD, Dr0: libc::DWORD, Dr1: libc::DWORD, Dr2: libc::DWORD, Dr3: libc::DWORD, Dr6: libc::DWORD, Dr7: libc::DWORD, FloatSave: FLOATING_SAVE_AREA, SegGs: libc::DWORD, SegFs: libc::DWORD, SegEs: libc::DWORD, SegDs: libc::DWORD, Edi: libc::DWORD, Esi: libc::DWORD, Ebx: libc::DWORD, Edx: libc::DWORD, Ecx: libc::DWORD, Eax: libc::DWORD, Ebp: libc::DWORD, Eip: libc::DWORD, SegCs: libc::DWORD, EFlags: libc::DWORD, Esp: libc::DWORD, SegSs: libc::DWORD, ExtendedRegisters: [u8; MAXIMUM_SUPPORTED_EXTENSION], } #[repr(C)] pub struct FLOATING_SAVE_AREA { ControlWord: libc::DWORD, StatusWord: libc::DWORD, TagWord: libc::DWORD, ErrorOffset: libc::DWORD, ErrorSelector: libc::DWORD, DataOffset: libc::DWORD, DataSelector: libc::DWORD, RegisterArea: [u8; 80], Cr0NpxState: libc::DWORD, } pub fn init_frame(frame: &mut super::STACKFRAME64, ctx: &CONTEXT) -> libc::DWORD { frame.AddrPC.Offset = ctx.Eip as u64; frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrStack.Offset = ctx.Esp as u64; frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrFrame.Offset = ctx.Ebp as u64; frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat; super::IMAGE_FILE_MACHINE_I386 } } #[cfg(target_arch = "x86_64")] mod arch { use libc::{c_longlong, c_ulonglong}; use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG}; use simd; #[repr(C)] pub struct CONTEXT { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte P1Home: DWORDLONG, P2Home: DWORDLONG, P3Home: DWORDLONG, P4Home: DWORDLONG, P5Home: DWORDLONG, P6Home: DWORDLONG, ContextFlags: DWORD, MxCsr: DWORD, SegCs: WORD, SegDs: WORD, SegEs: WORD, SegFs: WORD, SegGs: WORD, SegSs: WORD, EFlags: DWORD, Dr0: DWORDLONG, Dr1: DWORDLONG, Dr2: DWORDLONG, Dr3: DWORDLONG, Dr6: DWORDLONG, Dr7: DWORDLONG, Rax: DWORDLONG, Rcx: DWORDLONG, Rdx: DWORDLONG, Rbx: DWORDLONG, Rsp: DWORDLONG, Rbp: DWORDLONG, Rsi: DWORDLONG, Rdi: DWORDLONG, R8: DWORDLONG, R9: DWORDLONG, R10: DWORDLONG, R11: DWORDLONG, R12: DWORDLONG, R13: DWORDLONG, R14: DWORDLONG, R15: DWORDLONG, Rip: DWORDLONG, FltSave: FLOATING_SAVE_AREA, VectorRegister: [M128A; 26], VectorControl: DWORDLONG, DebugControl: DWORDLONG, LastBranchToRip: DWORDLONG, LastBranchFromRip: DWORDLONG, LastExceptionToRip: DWORDLONG, LastExceptionFromRip: DWORDLONG, } #[repr(C)] pub struct M128A { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte Low: c_ulonglong, High: c_longlong } #[repr(C)] pub struct FLOATING_SAVE_AREA { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte _Dummy: [u8; 512] // FIXME: Fill this out } pub fn init_frame(frame: &mut super::STACKFRAME64, ctx: &CONTEXT) -> DWORD { frame.AddrPC.Offset = ctx.Rip as u64; frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrStack.Offset = ctx.Rsp as u64; frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrFrame.Offset = ctx.Rbp as u64; frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat; super::IMAGE_FILE_MACHINE_AMD64 } } struct Cleanup { handle: libc::HANDLE, SymCleanup: SymCleanupFn, } impl Drop for Cleanup { fn drop(&mut self) { (self.SymCleanup)(self.handle); } } pub fn write(w: &mut Write) -> io::Result<()> { // According to windows documentation, all dbghelp functions are // single-threaded. static LOCK: StaticMutex = MUTEX_INIT; let _g = LOCK.lock(); // Open up dbghelp.dll, we don't link to it explicitly because it can't // always be found. Additionally, it's nice having fewer dependencies. let path = Path::new("dbghelp.dll"); let lib = match DynamicLibrary::open(Some(&path)) { Ok(lib) => lib, Err(..) => return Ok(()), }; macro_rules! sym{ ($e:expr, $t:ident) => (unsafe { match lib.symbol($e) { Ok(f) => mem::transmute::<*mut u8, $t>(f), Err(..) => return Ok(()) } }) } // Fetch the symbols necessary from dbghelp.dll let SymFromAddr = sym!("SymFromAddr", SymFromAddrFn); let SymInitialize = sym!("SymInitialize", SymInitializeFn); let SymCleanup = sym!("SymCleanup", SymCleanupFn); let StackWalk64 = sym!("StackWalk64", StackWalk64Fn); // Allocate necessary structures for doing the stack walk let process = unsafe { GetCurrentProcess() }; let thread = unsafe { GetCurrentThread() }; let mut context: arch::CONTEXT = unsafe { intrinsics::init() }; unsafe { RtlCaptureContext(&mut context); } let mut frame: STACKFRAME64 = unsafe { intrinsics::init() }; let image = arch::init_frame(&mut frame, &context); // Initialize this process's symbols let ret = SymInitialize(process, ptr::null_mut(), libc::TRUE); if ret!= libc::TRUE { return Ok(()) } let _c = Cleanup { handle: process, SymCleanup: SymCleanup }; // And now that we're done with all the setup, do the stack walking! let mut i = 0; try!(write!(w, "stack backtrace:\n")); while StackWalk64(image, process, thread, &mut frame, &mut context, ptr::null_mut(), ptr::null_mut(), ptr::null_mut(), ptr::null_mut()) == libc::TRUE{ let addr = frame.AddrPC.Offset; if addr == frame.AddrReturn.Offset || addr == 0 || frame.AddrReturn.Offset == 0 { break } i += 1; try!(write!(w, " {:2}: {:#2$x}", i, addr, HEX_WIDTH)); let mut info: SYMBOL_INFO = unsafe { intrinsics::init() }; info.MaxNameLen = MAX_SYM_NAME as libc::c_ulong; // the struct size in C. the value is different to // `size_of::<SYMBOL_INFO>() - MAX_SYM_NAME + 1` (== 81) // due to struct alignment. info.SizeOfStruct = 88; let mut displacement = 0u64; let ret = SymFromAddr(process, addr as u64, &mut displacement, &mut info); if ret == libc::TRUE { try!(write!(w, " - ")); let ptr = info.Name.as_ptr() as *const libc::c_char; let bytes = unsafe { CStr::from_ptr(ptr).to_bytes() }; match str::from_utf8(bytes) { Ok(s) => try!(demangle(w, s)), Err(..) => try!(w.write_all(&bytes[..bytes.len()-1])), } } try!(w.write_all(&['\n' as u8])); } Ok(()) }
ADDRESS64
identifier_name
backtrace.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! As always, windows has something very different than unix, we mainly want //! to avoid having to depend too much on libunwind for windows. //! //! If you google around, you'll find a fair bit of references to built-in //! functions to get backtraces on windows. It turns out that most of these are //! in an external library called dbghelp. I was unable to find this library //! via `-ldbghelp`, but it is apparently normal to do the `dlopen` equivalent //! of it. //! //! You'll also find that there's a function called CaptureStackBackTrace //! mentioned frequently (which is also easy to use), but sadly I didn't have a //! copy of that function in my mingw install (maybe it was broken?). Instead, //! this takes the route of using StackWalk64 in order to walk the stack. #![allow(dead_code)] use prelude::v1::*; use io::prelude::*; use dynamic_lib::DynamicLibrary; use ffi::CStr; use intrinsics; use io; use libc; use mem; use path::Path; use ptr; use str; use sync::{StaticMutex, MUTEX_INIT}; use sys_common::backtrace::*; #[allow(non_snake_case)] extern "system" { fn GetCurrentProcess() -> libc::HANDLE; fn GetCurrentThread() -> libc::HANDLE; fn RtlCaptureContext(ctx: *mut arch::CONTEXT); } type SymFromAddrFn = extern "system" fn(libc::HANDLE, u64, *mut u64,
type SymInitializeFn = extern "system" fn(libc::HANDLE, *mut libc::c_void, libc::BOOL) -> libc::BOOL; type SymCleanupFn = extern "system" fn(libc::HANDLE) -> libc::BOOL; type StackWalk64Fn = extern "system" fn(libc::DWORD, libc::HANDLE, libc::HANDLE, *mut STACKFRAME64, *mut arch::CONTEXT, *mut libc::c_void, *mut libc::c_void, *mut libc::c_void, *mut libc::c_void) -> libc::BOOL; const MAX_SYM_NAME: usize = 2000; const IMAGE_FILE_MACHINE_I386: libc::DWORD = 0x014c; const IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200; const IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664; #[repr(C)] struct SYMBOL_INFO { SizeOfStruct: libc::c_ulong, TypeIndex: libc::c_ulong, Reserved: [u64; 2], Index: libc::c_ulong, Size: libc::c_ulong, ModBase: u64, Flags: libc::c_ulong, Value: u64, Address: u64, Register: libc::c_ulong, Scope: libc::c_ulong, Tag: libc::c_ulong, NameLen: libc::c_ulong, MaxNameLen: libc::c_ulong, // note that windows has this as 1, but it basically just means that // the name is inline at the end of the struct. For us, we just bump // the struct size up to MAX_SYM_NAME. Name: [libc::c_char; MAX_SYM_NAME], } #[repr(C)] enum ADDRESS_MODE { AddrMode1616, AddrMode1632, AddrModeReal, AddrModeFlat, } struct ADDRESS64 { Offset: u64, Segment: u16, Mode: ADDRESS_MODE, } pub struct STACKFRAME64 { AddrPC: ADDRESS64, AddrReturn: ADDRESS64, AddrFrame: ADDRESS64, AddrStack: ADDRESS64, AddrBStore: ADDRESS64, FuncTableEntry: *mut libc::c_void, Params: [u64; 4], Far: libc::BOOL, Virtual: libc::BOOL, Reserved: [u64; 3], KdHelp: KDHELP64, } struct KDHELP64 { Thread: u64, ThCallbackStack: libc::DWORD, ThCallbackBStore: libc::DWORD, NextCallback: libc::DWORD, FramePointer: libc::DWORD, KiCallUserMode: u64, KeUserCallbackDispatcher: u64, SystemRangeStart: u64, KiUserExceptionDispatcher: u64, StackBase: u64, StackLimit: u64, Reserved: [u64; 5], } #[cfg(target_arch = "x86")] mod arch { use libc; const MAXIMUM_SUPPORTED_EXTENSION: usize = 512; #[repr(C)] pub struct CONTEXT { ContextFlags: libc::DWORD, Dr0: libc::DWORD, Dr1: libc::DWORD, Dr2: libc::DWORD, Dr3: libc::DWORD, Dr6: libc::DWORD, Dr7: libc::DWORD, FloatSave: FLOATING_SAVE_AREA, SegGs: libc::DWORD, SegFs: libc::DWORD, SegEs: libc::DWORD, SegDs: libc::DWORD, Edi: libc::DWORD, Esi: libc::DWORD, Ebx: libc::DWORD, Edx: libc::DWORD, Ecx: libc::DWORD, Eax: libc::DWORD, Ebp: libc::DWORD, Eip: libc::DWORD, SegCs: libc::DWORD, EFlags: libc::DWORD, Esp: libc::DWORD, SegSs: libc::DWORD, ExtendedRegisters: [u8; MAXIMUM_SUPPORTED_EXTENSION], } #[repr(C)] pub struct FLOATING_SAVE_AREA { ControlWord: libc::DWORD, StatusWord: libc::DWORD, TagWord: libc::DWORD, ErrorOffset: libc::DWORD, ErrorSelector: libc::DWORD, DataOffset: libc::DWORD, DataSelector: libc::DWORD, RegisterArea: [u8; 80], Cr0NpxState: libc::DWORD, } pub fn init_frame(frame: &mut super::STACKFRAME64, ctx: &CONTEXT) -> libc::DWORD { frame.AddrPC.Offset = ctx.Eip as u64; frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrStack.Offset = ctx.Esp as u64; frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrFrame.Offset = ctx.Ebp as u64; frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat; super::IMAGE_FILE_MACHINE_I386 } } #[cfg(target_arch = "x86_64")] mod arch { use libc::{c_longlong, c_ulonglong}; use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG}; use simd; #[repr(C)] pub struct CONTEXT { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte P1Home: DWORDLONG, P2Home: DWORDLONG, P3Home: DWORDLONG, P4Home: DWORDLONG, P5Home: DWORDLONG, P6Home: DWORDLONG, ContextFlags: DWORD, MxCsr: DWORD, SegCs: WORD, SegDs: WORD, SegEs: WORD, SegFs: WORD, SegGs: WORD, SegSs: WORD, EFlags: DWORD, Dr0: DWORDLONG, Dr1: DWORDLONG, Dr2: DWORDLONG, Dr3: DWORDLONG, Dr6: DWORDLONG, Dr7: DWORDLONG, Rax: DWORDLONG, Rcx: DWORDLONG, Rdx: DWORDLONG, Rbx: DWORDLONG, Rsp: DWORDLONG, Rbp: DWORDLONG, Rsi: DWORDLONG, Rdi: DWORDLONG, R8: DWORDLONG, R9: DWORDLONG, R10: DWORDLONG, R11: DWORDLONG, R12: DWORDLONG, R13: DWORDLONG, R14: DWORDLONG, R15: DWORDLONG, Rip: DWORDLONG, FltSave: FLOATING_SAVE_AREA, VectorRegister: [M128A; 26], VectorControl: DWORDLONG, DebugControl: DWORDLONG, LastBranchToRip: DWORDLONG, LastBranchFromRip: DWORDLONG, LastExceptionToRip: DWORDLONG, LastExceptionFromRip: DWORDLONG, } #[repr(C)] pub struct M128A { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte Low: c_ulonglong, High: c_longlong } #[repr(C)] pub struct FLOATING_SAVE_AREA { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte _Dummy: [u8; 512] // FIXME: Fill this out } pub fn init_frame(frame: &mut super::STACKFRAME64, ctx: &CONTEXT) -> DWORD { frame.AddrPC.Offset = ctx.Rip as u64; frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrStack.Offset = ctx.Rsp as u64; frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrFrame.Offset = ctx.Rbp as u64; frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat; super::IMAGE_FILE_MACHINE_AMD64 } } struct Cleanup { handle: libc::HANDLE, SymCleanup: SymCleanupFn, } impl Drop for Cleanup { fn drop(&mut self) { (self.SymCleanup)(self.handle); } } pub fn write(w: &mut Write) -> io::Result<()> { // According to windows documentation, all dbghelp functions are // single-threaded. static LOCK: StaticMutex = MUTEX_INIT; let _g = LOCK.lock(); // Open up dbghelp.dll, we don't link to it explicitly because it can't // always be found. Additionally, it's nice having fewer dependencies. let path = Path::new("dbghelp.dll"); let lib = match DynamicLibrary::open(Some(&path)) { Ok(lib) => lib, Err(..) => return Ok(()), }; macro_rules! sym{ ($e:expr, $t:ident) => (unsafe { match lib.symbol($e) { Ok(f) => mem::transmute::<*mut u8, $t>(f), Err(..) => return Ok(()) } }) } // Fetch the symbols necessary from dbghelp.dll let SymFromAddr = sym!("SymFromAddr", SymFromAddrFn); let SymInitialize = sym!("SymInitialize", SymInitializeFn); let SymCleanup = sym!("SymCleanup", SymCleanupFn); let StackWalk64 = sym!("StackWalk64", StackWalk64Fn); // Allocate necessary structures for doing the stack walk let process = unsafe { GetCurrentProcess() }; let thread = unsafe { GetCurrentThread() }; let mut context: arch::CONTEXT = unsafe { intrinsics::init() }; unsafe { RtlCaptureContext(&mut context); } let mut frame: STACKFRAME64 = unsafe { intrinsics::init() }; let image = arch::init_frame(&mut frame, &context); // Initialize this process's symbols let ret = SymInitialize(process, ptr::null_mut(), libc::TRUE); if ret!= libc::TRUE { return Ok(()) } let _c = Cleanup { handle: process, SymCleanup: SymCleanup }; // And now that we're done with all the setup, do the stack walking! let mut i = 0; try!(write!(w, "stack backtrace:\n")); while StackWalk64(image, process, thread, &mut frame, &mut context, ptr::null_mut(), ptr::null_mut(), ptr::null_mut(), ptr::null_mut()) == libc::TRUE{ let addr = frame.AddrPC.Offset; if addr == frame.AddrReturn.Offset || addr == 0 || frame.AddrReturn.Offset == 0 { break } i += 1; try!(write!(w, " {:2}: {:#2$x}", i, addr, HEX_WIDTH)); let mut info: SYMBOL_INFO = unsafe { intrinsics::init() }; info.MaxNameLen = MAX_SYM_NAME as libc::c_ulong; // the struct size in C. the value is different to // `size_of::<SYMBOL_INFO>() - MAX_SYM_NAME + 1` (== 81) // due to struct alignment. info.SizeOfStruct = 88; let mut displacement = 0u64; let ret = SymFromAddr(process, addr as u64, &mut displacement, &mut info); if ret == libc::TRUE { try!(write!(w, " - ")); let ptr = info.Name.as_ptr() as *const libc::c_char; let bytes = unsafe { CStr::from_ptr(ptr).to_bytes() }; match str::from_utf8(bytes) { Ok(s) => try!(demangle(w, s)), Err(..) => try!(w.write_all(&bytes[..bytes.len()-1])), } } try!(w.write_all(&['\n' as u8])); } Ok(()) }
*mut SYMBOL_INFO) -> libc::BOOL;
random_line_split
backtrace.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! As always, windows has something very different than unix, we mainly want //! to avoid having to depend too much on libunwind for windows. //! //! If you google around, you'll find a fair bit of references to built-in //! functions to get backtraces on windows. It turns out that most of these are //! in an external library called dbghelp. I was unable to find this library //! via `-ldbghelp`, but it is apparently normal to do the `dlopen` equivalent //! of it. //! //! You'll also find that there's a function called CaptureStackBackTrace //! mentioned frequently (which is also easy to use), but sadly I didn't have a //! copy of that function in my mingw install (maybe it was broken?). Instead, //! this takes the route of using StackWalk64 in order to walk the stack. #![allow(dead_code)] use prelude::v1::*; use io::prelude::*; use dynamic_lib::DynamicLibrary; use ffi::CStr; use intrinsics; use io; use libc; use mem; use path::Path; use ptr; use str; use sync::{StaticMutex, MUTEX_INIT}; use sys_common::backtrace::*; #[allow(non_snake_case)] extern "system" { fn GetCurrentProcess() -> libc::HANDLE; fn GetCurrentThread() -> libc::HANDLE; fn RtlCaptureContext(ctx: *mut arch::CONTEXT); } type SymFromAddrFn = extern "system" fn(libc::HANDLE, u64, *mut u64, *mut SYMBOL_INFO) -> libc::BOOL; type SymInitializeFn = extern "system" fn(libc::HANDLE, *mut libc::c_void, libc::BOOL) -> libc::BOOL; type SymCleanupFn = extern "system" fn(libc::HANDLE) -> libc::BOOL; type StackWalk64Fn = extern "system" fn(libc::DWORD, libc::HANDLE, libc::HANDLE, *mut STACKFRAME64, *mut arch::CONTEXT, *mut libc::c_void, *mut libc::c_void, *mut libc::c_void, *mut libc::c_void) -> libc::BOOL; const MAX_SYM_NAME: usize = 2000; const IMAGE_FILE_MACHINE_I386: libc::DWORD = 0x014c; const IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200; const IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664; #[repr(C)] struct SYMBOL_INFO { SizeOfStruct: libc::c_ulong, TypeIndex: libc::c_ulong, Reserved: [u64; 2], Index: libc::c_ulong, Size: libc::c_ulong, ModBase: u64, Flags: libc::c_ulong, Value: u64, Address: u64, Register: libc::c_ulong, Scope: libc::c_ulong, Tag: libc::c_ulong, NameLen: libc::c_ulong, MaxNameLen: libc::c_ulong, // note that windows has this as 1, but it basically just means that // the name is inline at the end of the struct. For us, we just bump // the struct size up to MAX_SYM_NAME. Name: [libc::c_char; MAX_SYM_NAME], } #[repr(C)] enum ADDRESS_MODE { AddrMode1616, AddrMode1632, AddrModeReal, AddrModeFlat, } struct ADDRESS64 { Offset: u64, Segment: u16, Mode: ADDRESS_MODE, } pub struct STACKFRAME64 { AddrPC: ADDRESS64, AddrReturn: ADDRESS64, AddrFrame: ADDRESS64, AddrStack: ADDRESS64, AddrBStore: ADDRESS64, FuncTableEntry: *mut libc::c_void, Params: [u64; 4], Far: libc::BOOL, Virtual: libc::BOOL, Reserved: [u64; 3], KdHelp: KDHELP64, } struct KDHELP64 { Thread: u64, ThCallbackStack: libc::DWORD, ThCallbackBStore: libc::DWORD, NextCallback: libc::DWORD, FramePointer: libc::DWORD, KiCallUserMode: u64, KeUserCallbackDispatcher: u64, SystemRangeStart: u64, KiUserExceptionDispatcher: u64, StackBase: u64, StackLimit: u64, Reserved: [u64; 5], } #[cfg(target_arch = "x86")] mod arch { use libc; const MAXIMUM_SUPPORTED_EXTENSION: usize = 512; #[repr(C)] pub struct CONTEXT { ContextFlags: libc::DWORD, Dr0: libc::DWORD, Dr1: libc::DWORD, Dr2: libc::DWORD, Dr3: libc::DWORD, Dr6: libc::DWORD, Dr7: libc::DWORD, FloatSave: FLOATING_SAVE_AREA, SegGs: libc::DWORD, SegFs: libc::DWORD, SegEs: libc::DWORD, SegDs: libc::DWORD, Edi: libc::DWORD, Esi: libc::DWORD, Ebx: libc::DWORD, Edx: libc::DWORD, Ecx: libc::DWORD, Eax: libc::DWORD, Ebp: libc::DWORD, Eip: libc::DWORD, SegCs: libc::DWORD, EFlags: libc::DWORD, Esp: libc::DWORD, SegSs: libc::DWORD, ExtendedRegisters: [u8; MAXIMUM_SUPPORTED_EXTENSION], } #[repr(C)] pub struct FLOATING_SAVE_AREA { ControlWord: libc::DWORD, StatusWord: libc::DWORD, TagWord: libc::DWORD, ErrorOffset: libc::DWORD, ErrorSelector: libc::DWORD, DataOffset: libc::DWORD, DataSelector: libc::DWORD, RegisterArea: [u8; 80], Cr0NpxState: libc::DWORD, } pub fn init_frame(frame: &mut super::STACKFRAME64, ctx: &CONTEXT) -> libc::DWORD { frame.AddrPC.Offset = ctx.Eip as u64; frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrStack.Offset = ctx.Esp as u64; frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrFrame.Offset = ctx.Ebp as u64; frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat; super::IMAGE_FILE_MACHINE_I386 } } #[cfg(target_arch = "x86_64")] mod arch { use libc::{c_longlong, c_ulonglong}; use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG}; use simd; #[repr(C)] pub struct CONTEXT { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte P1Home: DWORDLONG, P2Home: DWORDLONG, P3Home: DWORDLONG, P4Home: DWORDLONG, P5Home: DWORDLONG, P6Home: DWORDLONG, ContextFlags: DWORD, MxCsr: DWORD, SegCs: WORD, SegDs: WORD, SegEs: WORD, SegFs: WORD, SegGs: WORD, SegSs: WORD, EFlags: DWORD, Dr0: DWORDLONG, Dr1: DWORDLONG, Dr2: DWORDLONG, Dr3: DWORDLONG, Dr6: DWORDLONG, Dr7: DWORDLONG, Rax: DWORDLONG, Rcx: DWORDLONG, Rdx: DWORDLONG, Rbx: DWORDLONG, Rsp: DWORDLONG, Rbp: DWORDLONG, Rsi: DWORDLONG, Rdi: DWORDLONG, R8: DWORDLONG, R9: DWORDLONG, R10: DWORDLONG, R11: DWORDLONG, R12: DWORDLONG, R13: DWORDLONG, R14: DWORDLONG, R15: DWORDLONG, Rip: DWORDLONG, FltSave: FLOATING_SAVE_AREA, VectorRegister: [M128A; 26], VectorControl: DWORDLONG, DebugControl: DWORDLONG, LastBranchToRip: DWORDLONG, LastBranchFromRip: DWORDLONG, LastExceptionToRip: DWORDLONG, LastExceptionFromRip: DWORDLONG, } #[repr(C)] pub struct M128A { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte Low: c_ulonglong, High: c_longlong } #[repr(C)] pub struct FLOATING_SAVE_AREA { _align_hack: [simd::u64x2; 0], // FIXME align on 16-byte _Dummy: [u8; 512] // FIXME: Fill this out } pub fn init_frame(frame: &mut super::STACKFRAME64, ctx: &CONTEXT) -> DWORD { frame.AddrPC.Offset = ctx.Rip as u64; frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrStack.Offset = ctx.Rsp as u64; frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat; frame.AddrFrame.Offset = ctx.Rbp as u64; frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat; super::IMAGE_FILE_MACHINE_AMD64 } } struct Cleanup { handle: libc::HANDLE, SymCleanup: SymCleanupFn, } impl Drop for Cleanup { fn drop(&mut self) { (self.SymCleanup)(self.handle); } } pub fn write(w: &mut Write) -> io::Result<()> { // According to windows documentation, all dbghelp functions are // single-threaded. static LOCK: StaticMutex = MUTEX_INIT; let _g = LOCK.lock(); // Open up dbghelp.dll, we don't link to it explicitly because it can't // always be found. Additionally, it's nice having fewer dependencies. let path = Path::new("dbghelp.dll"); let lib = match DynamicLibrary::open(Some(&path)) { Ok(lib) => lib, Err(..) => return Ok(()), }; macro_rules! sym{ ($e:expr, $t:ident) => (unsafe { match lib.symbol($e) { Ok(f) => mem::transmute::<*mut u8, $t>(f), Err(..) => return Ok(()) } }) } // Fetch the symbols necessary from dbghelp.dll let SymFromAddr = sym!("SymFromAddr", SymFromAddrFn); let SymInitialize = sym!("SymInitialize", SymInitializeFn); let SymCleanup = sym!("SymCleanup", SymCleanupFn); let StackWalk64 = sym!("StackWalk64", StackWalk64Fn); // Allocate necessary structures for doing the stack walk let process = unsafe { GetCurrentProcess() }; let thread = unsafe { GetCurrentThread() }; let mut context: arch::CONTEXT = unsafe { intrinsics::init() }; unsafe { RtlCaptureContext(&mut context); } let mut frame: STACKFRAME64 = unsafe { intrinsics::init() }; let image = arch::init_frame(&mut frame, &context); // Initialize this process's symbols let ret = SymInitialize(process, ptr::null_mut(), libc::TRUE); if ret!= libc::TRUE { return Ok(()) } let _c = Cleanup { handle: process, SymCleanup: SymCleanup }; // And now that we're done with all the setup, do the stack walking! let mut i = 0; try!(write!(w, "stack backtrace:\n")); while StackWalk64(image, process, thread, &mut frame, &mut context, ptr::null_mut(), ptr::null_mut(), ptr::null_mut(), ptr::null_mut()) == libc::TRUE{ let addr = frame.AddrPC.Offset; if addr == frame.AddrReturn.Offset || addr == 0 || frame.AddrReturn.Offset == 0
i += 1; try!(write!(w, " {:2}: {:#2$x}", i, addr, HEX_WIDTH)); let mut info: SYMBOL_INFO = unsafe { intrinsics::init() }; info.MaxNameLen = MAX_SYM_NAME as libc::c_ulong; // the struct size in C. the value is different to // `size_of::<SYMBOL_INFO>() - MAX_SYM_NAME + 1` (== 81) // due to struct alignment. info.SizeOfStruct = 88; let mut displacement = 0u64; let ret = SymFromAddr(process, addr as u64, &mut displacement, &mut info); if ret == libc::TRUE { try!(write!(w, " - ")); let ptr = info.Name.as_ptr() as *const libc::c_char; let bytes = unsafe { CStr::from_ptr(ptr).to_bytes() }; match str::from_utf8(bytes) { Ok(s) => try!(demangle(w, s)), Err(..) => try!(w.write_all(&bytes[..bytes.len()-1])), } } try!(w.write_all(&['\n' as u8])); } Ok(()) }
{ break }
conditional_block
platform_types.rs
Paste(Option<String>), InsertNumbersAtCursors, AddOrSelectBuffer(BufferName, String), AddOrSelectBufferThenGoTo(BufferName, String, Position), NewScratchBuffer(Option<String>), TabIn, TabOut, StripTrailingWhitespace, AdjustBufferSelection(SelectionAdjustment), NextLanguage, PreviousLanguage, ToggleSingleLineComments, ToggleCase, AutoIndentSelection, DuplicateLines, SelectBuffer(BufferId), OpenOrSelectBuffer(PathBuf), CloseBuffer(g_i::Index), SetMenuMode(MenuMode), SubmitForm, ShowError(String), } d!(for Input : Input::None); #[derive(Clone, Copy, Default, Debug, Hash, PartialEq, Eq)] pub struct BufferId { pub kind: BufferIdKind, pub index: g_i::Index, } ord!(for BufferId: id, other in { id.kind.cmp(&other.kind).then_with(|| id.index.cmp(&other.index)) }); #[macro_export] macro_rules! b_id { // // Creation // ($kind: expr) => { BufferId { kind: $kind, index: d!(), } }; ($kind: expr, $index: expr) => { BufferId { kind: $kind, index: $index, } }; } #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub enum BufferIdKind { /// Used to indicate that the keyboard is focused on a non-buffer. None, /// Indicates a buffer repesenting an open file or an in memory scratch file. /// Almost all buffers are `Text` buffers. Text, Find, Replace, FileSwitcher, GoToPosition, } d!(for BufferIdKind: BufferIdKind::Text); impl From<&BufferIdKind> for u8 { fn from(kind: &BufferIdKind) -> Self { use BufferIdKind::*; match kind { None => 0, Text => 1, Find => 2, Replace => 3, FileSwitcher => 4, GoToPosition => 5, } } } ord!(for BufferIdKind: kind, other in { let k: u8 = kind.into(); let o: u8 = other.into(); k.cmp(&o) }); #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum HighlightKind { User, Result, CurrentResult, } d!(for HighlightKind: HighlightKind::User); #[derive(Clone, Debug, PartialEq, Eq)] pub struct Highlight { pub min: Position, pub max: Position, pub kind: HighlightKind, } impl Highlight { #[must_use] pub fn new((p1, p2): (Position, Position), kind: HighlightKind) -> Self { Highlight { min: std::cmp::min(p1, p2), max: std::cmp::max(p1, p2), kind, } } #[must_use] pub fn get(&self) -> (Position, Position) { (self.min, self.max) } } #[macro_export] macro_rules! highlight { (l $min_line:literal o $min_offset:literal l $max_line:literal o $max_offset:literal ) => { Highlight::new( ( Position { line: $min_line, offset: CharOffset($min_offset), }, Position { line: $max_line, offset: CharOffset($max_offset), }, ), d!() ) }; (l $min_line:literal o $min_offset:literal l $max_line:literal o max ) => { highlight!(l $min_line o $min_offset l $max_line o 0xFFFF_FFFF__FFFF_FFFF) }; } pub fn push_highlights<O: Into<Option<Position>>>( highlights: &mut Vec<Highlight>, position: Position, highlight_position: O, kind: HighlightKind, ) { match highlight_position.into() { Some(h) if h!= position => { let min = std::cmp::min(position, h); let max = std::cmp::max(position, h); if min.line == max.line { highlights.push(Highlight::new((min, max), kind)); return; } // This early return is merely an optimization from three rectangles to two. // TODO Is this optimization actually worth it? The sticky cursor offset does make this // more likely than it would otherwise be. if min.offset!= 0 && min.offset == max.offset { // [|_______________________|] // ^min_middle max_middle^ let min_middle = min.line + if min.offset == 0 { 0 } else { 1 }; // Since We know the lines must be different, we know `max.line > 0` let max_middle = max.line - 1; let offset = min.offset; highlights.push(Highlight::new( ( Position { offset, line: min.line, }, Position { offset: CharOffset::max_value(), line: max_middle, }, ), kind, )); highlights.push(Highlight::new( ( Position { offset: CharOffset(0), line: min_middle, }, Position { offset, line: max.line, }, ), kind, )); return; } if min.offset!= 0 { highlights.push(Highlight::new( ( min, Position { offset: CharOffset::max_value(), ..min }, ), kind, )); } let min_middle = min.line + if min.offset == 0 { 0 } else { 1 }; // Since We know the lines must be different, we know `max.line > 0` let max_middle = max.line - 1; if min_middle <= max_middle { highlights.push(Highlight::new( ( Position { offset: CharOffset(0), line: min_middle, }, Position { offset: CharOffset::max_value(), line: max_middle, }, ), kind, )); } if max.offset!= 0 { highlights.push(Highlight::new( ( Position { offset: CharOffset(0), ..max }, max, ), kind, )); } } _ => {} } } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum BufferName { Path(PathBuf), Scratch(u32), } d!(for BufferName: BufferName::Scratch(d!())); fmt_display!(for BufferName: name in "{}", match name { BufferName::Path(p) => p .file_name() .map_or_else( || "?Unknown Path?".to_string(), |s| s.to_string_lossy().into_owned() ), BufferName::Scratch(n) => format!("*scratch {}*", n), } ); ord!(for BufferName: name, other in { use BufferName::*; use std::cmp::Ordering::*; match (name, other) { (Path(p1), Path(p2)) => { match (p1.canonicalize(), p2.canonicalize() ) { (Ok(ref cp1), Ok(ref cp2)) if cp1 == cp2 => { Equal } _ => { p1.cmp(p2) } } } (Path(_), Scratch(_)) => { Less } (Scratch(_), Path(_)) => { Greater } (Scratch(n1), Scratch(n2)) => { n1.cmp(n2) } } }); impl BufferName { #[must_use] pub fn get_extension_or_empty(&self) -> &str { use BufferName::*; match self { Path(p) => { p.extension() .and_then(std::ffi::OsStr::to_str) .unwrap_or("") }, Scratch(..) => "", } } #[must_use] pub fn size_in_bytes(&self) -> usize { use core::mem; // TODO Do other platforms need adjusting as well? #[cfg(target_os = "windows")] const BYTES_PER_UNIT: usize = 2; #[cfg(not(target_os = "windows"))] const BYTES_PER_UNIT: usize = 1; match self { Self::Path(p) => { mem::size_of_val(p) + p.capacity() * BYTES_PER_UNIT }, Self::Scratch(n) => mem::size_of_val(n), } } } #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum CursorState { None, PressedAgainstWall(Move), } d!(for CursorState: CursorState::None); fmt_debug!(for CursorState: s in "{}", match s { CursorState::None => std::borrow::Cow::Borrowed("_"), CursorState::PressedAgainstWall(r#move) => std::borrow::Cow::Owned(format!("->|({})", r#move)) }); ord!(for CursorState: state, other in { use std::cmp::Ordering::*; match (state, other) { (CursorState::None, CursorState::None) => Equal, (CursorState::None, CursorState::PressedAgainstWall(_)) => Less, (CursorState::PressedAgainstWall(_), CursorState::None) => Greater, (CursorState::PressedAgainstWall(m1), CursorState::PressedAgainstWall(m2)) => { m1.cmp(m2) } } }); #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct CursorView { pub position: Position, pub state: CursorState, } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct StatusLineView { pub chars: String, } pub const DEFAULT_STATUS_LINE_CHARS: &str = "No buffer selected."; d!(for StatusLineView: StatusLineView {chars: DEFAULT_STATUS_LINE_CHARS.to_owned()}); #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum MenuMode { Hidden, FileSwitcher, FindReplace(FindReplaceMode), GoToPosition, } d!(for MenuMode: MenuMode::Hidden); #[derive(Clone, Debug, PartialEq)] pub enum MenuView { None, FileSwitcher(FileSwitcherView), FindReplace(FindReplaceView), GoToPosition(GoToPositionView) } d!(for MenuView: MenuView::None); impl MenuView { #[must_use] pub fn get_mode(&self) -> MenuMode { match self { Self::None => MenuMode::Hidden, Self::FileSwitcher(_) => MenuMode::FileSwitcher, Self::FindReplace(v) => MenuMode::FindReplace(v.mode), Self::GoToPosition(_) => MenuMode::GoToPosition, } } } #[must_use] pub fn kind_editable_during_mode(kind: BufferIdKind, menu_mode: MenuMode) -> bool { u!{MenuMode} match (kind, menu_mode) { // We want this to be true for `Text` always since it would be completely // reasonable behaviour for a different client to always show the text // buffers. (BufferIdKind::Text, _) | (BufferIdKind::Find | BufferIdKind::Replace, FindReplace(_)) | (BufferIdKind::FileSwitcher, MenuMode::FileSwitcher) | (BufferIdKind::GoToPosition, MenuMode::GoToPosition) => true, _ => { false }, } } pub type FileSwitcherResults = Vec<PathBuf>; #[derive(Clone, Default, Debug, PartialEq)] pub struct FileSwitcherView { pub search: BufferViewData, pub results: FileSwitcherResults, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum FindReplaceMode { CurrentFile, } d!(for FindReplaceMode: FindReplaceMode::CurrentFile); #[derive(Clone, Default, Debug, PartialEq)] pub struct FindReplaceView { pub mode: FindReplaceMode, pub find: BufferViewData, pub replace: BufferViewData, pub result_count: usize, } #[derive(Clone, Default, Debug, PartialEq)] pub struct GoToPositionView { pub go_to_position: BufferViewData, } #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum EditedTransition { ToEdited, ToUnedited, } pub type IndexedEditedTransition = (g_i::Index, EditedTransition); #[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct EditedTransitions(Vec<IndexedEditedTransition>); impl EditedTransitions { pub fn push(&mut self, iet: IndexedEditedTransition) { self.0.push(iet); } pub fn clear(&mut self) { self.0.clear(); } #[must_use] pub fn len(&self) -> usize { self.0.len() } #[must_use] pub fn is_empty(&self) -> bool { self.0.is_empty() } pub fn iter(&self) -> impl Iterator<Item = &IndexedEditedTransition> { self.0.iter() } } impl IntoIterator for EditedTransitions { type Item = IndexedEditedTransition; type IntoIter = std::vec::IntoIter<Self::Item>; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } #[derive(Clone, Default, PartialEq)] pub struct BufferLabel { pub name: BufferName, /// Having an owned version of the result of `name.to_string()` simplifies /// ownership in some cases. // TODO this could be truncated to a fixed length/on the stack pub name_string: String, } fmt_debug!(collapse default for BufferLabel: me { blank_if_default!(name); blank_if_default!(name_string, me.name_string.is_empty()); }); // This could arguably be ToOwned. impl From<&BufferName> for BufferLabel { fn from(name: &BufferName) -> Self { Self { name: name.clone(), name_string: name.to_string(), } } } impl From<BufferName> for BufferLabel { fn from(name: BufferName) -> Self { let name_string = name.to_string(); Self { name, name_string, } } } #[derive(Clone, Default, Debug, PartialEq)] pub struct View { pub buffers: SelectableVec1<BufferLabel>, pub menu: MenuView, pub status_line: StatusLineView, pub current_buffer_kind: BufferIdKind, pub edited_transitions: EditedTransitions, pub stats: ViewStats, } impl View { #[must_use] /// returns the currently visible editor buffer index. pub fn current_text_index(&self) -> g_i::Index { self.buffers.current_index() } #[must_use] /// returns the currently visible editor buffer view's index and label. pub fn current_text_index_and_buffer_label(&self) -> (g_i::Index, &BufferLabel) { ( self.buffers.current_index(), self.buffers.get_current_element() ) } #[must_use] pub fn get_buffer_label(&self, index: g_i::Index) -> Option<&BufferLabel> { self.buffers.get(index) } #[must_use] pub fn current_buffer_id(&self) -> BufferId { b_id!( self.current_buffer_kind, self.buffers.current_index() ) } #[must_use] /// returns the selected menu's cursors if there is a menu containing a buffer /// currently visible, or the current text buffer's cursors if not. pub fn get_selected_cursors(&self) -> Option<&[CursorView]> { use BufferIdKind::*; match self.current_buffer_kind { // Seems like we never actually need to access the Text buffer // cursors here. If we want to later, then some additional restructuring // will be needed, at least according to the comment this comment // replaced. commmit `680d9507` None | Text => Option::None, Find => match &self.menu { MenuView::FindReplace(ref fr) => Some(&fr.find), _ => Option::None, }, Replace => match &self.menu { MenuView::FindReplace(ref fr) => Some(&fr.replace), _ => Option::None, }, FileSwitcher => match &self.menu {
}, GoToPosition => match &self.menu { MenuView::GoToPosition(ref gtp) => Some(&gtp.go_to_position), _ => Option::None, }, }.map(|d| &d.cursors[..]) } #[must_use] /// returns the currently visible editor buffer path if it has one. pub fn current_path(&self) -> Option<PathBuf> { u!{BufferName} match self.buffers.get_current_element().name { Path(ref p) => Some(p.clone()), Scratch(_) => None, } } } #[derive(Clone, Default, PartialEq)] pub struct BufferView { pub label: BufferLabel, pub data: BufferViewData, } fmt_debug!(collapse default for BufferView: me { blank_if_default!(label); blank_if_default!(data); }); /// The reason we keep this as a separate struct from `BufferView` is to enable /// storage of buffers without a `BufferName`. #[derive(Clone, Default, PartialEq)] pub struct BufferViewData { pub chars: String, pub scroll: ScrollXY, pub cursors: Vec<CursorView>, pub highlights: Vec<Highlight>, pub spans: Spans, } fmt_debug!(collapse default for BufferViewData: me { blank_if_default!(chars, me.chars == Rope::default()); blank_if_default!(scroll); blank_if_default!(cursors, me
MenuView::FileSwitcher(ref fs) => Some(&fs.search), _ => Option::None,
random_line_split
platform_types.rs
Paste(Option<String>), InsertNumbersAtCursors, AddOrSelectBuffer(BufferName, String), AddOrSelectBufferThenGoTo(BufferName, String, Position), NewScratchBuffer(Option<String>), TabIn, TabOut, StripTrailingWhitespace, AdjustBufferSelection(SelectionAdjustment), NextLanguage, PreviousLanguage, ToggleSingleLineComments, ToggleCase, AutoIndentSelection, DuplicateLines, SelectBuffer(BufferId), OpenOrSelectBuffer(PathBuf), CloseBuffer(g_i::Index), SetMenuMode(MenuMode), SubmitForm, ShowError(String), } d!(for Input : Input::None); #[derive(Clone, Copy, Default, Debug, Hash, PartialEq, Eq)] pub struct BufferId { pub kind: BufferIdKind, pub index: g_i::Index, } ord!(for BufferId: id, other in { id.kind.cmp(&other.kind).then_with(|| id.index.cmp(&other.index)) }); #[macro_export] macro_rules! b_id { // // Creation // ($kind: expr) => { BufferId { kind: $kind, index: d!(), } }; ($kind: expr, $index: expr) => { BufferId { kind: $kind, index: $index, } }; } #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub enum BufferIdKind { /// Used to indicate that the keyboard is focused on a non-buffer. None, /// Indicates a buffer repesenting an open file or an in memory scratch file. /// Almost all buffers are `Text` buffers. Text, Find, Replace, FileSwitcher, GoToPosition, } d!(for BufferIdKind: BufferIdKind::Text); impl From<&BufferIdKind> for u8 { fn from(kind: &BufferIdKind) -> Self { use BufferIdKind::*; match kind { None => 0, Text => 1, Find => 2, Replace => 3, FileSwitcher => 4, GoToPosition => 5, } } } ord!(for BufferIdKind: kind, other in { let k: u8 = kind.into(); let o: u8 = other.into(); k.cmp(&o) }); #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum HighlightKind { User, Result, CurrentResult, } d!(for HighlightKind: HighlightKind::User); #[derive(Clone, Debug, PartialEq, Eq)] pub struct Highlight { pub min: Position, pub max: Position, pub kind: HighlightKind, } impl Highlight { #[must_use] pub fn new((p1, p2): (Position, Position), kind: HighlightKind) -> Self { Highlight { min: std::cmp::min(p1, p2), max: std::cmp::max(p1, p2), kind, } } #[must_use] pub fn get(&self) -> (Position, Position) { (self.min, self.max) } } #[macro_export] macro_rules! highlight { (l $min_line:literal o $min_offset:literal l $max_line:literal o $max_offset:literal ) => { Highlight::new( ( Position { line: $min_line, offset: CharOffset($min_offset), }, Position { line: $max_line, offset: CharOffset($max_offset), }, ), d!() ) }; (l $min_line:literal o $min_offset:literal l $max_line:literal o max ) => { highlight!(l $min_line o $min_offset l $max_line o 0xFFFF_FFFF__FFFF_FFFF) }; } pub fn push_highlights<O: Into<Option<Position>>>( highlights: &mut Vec<Highlight>, position: Position, highlight_position: O, kind: HighlightKind, ) { match highlight_position.into() { Some(h) if h!= position => { let min = std::cmp::min(position, h); let max = std::cmp::max(position, h); if min.line == max.line { highlights.push(Highlight::new((min, max), kind)); return; } // This early return is merely an optimization from three rectangles to two. // TODO Is this optimization actually worth it? The sticky cursor offset does make this // more likely than it would otherwise be. if min.offset!= 0 && min.offset == max.offset { // [|_______________________|] // ^min_middle max_middle^ let min_middle = min.line + if min.offset == 0 { 0 } else { 1 }; // Since We know the lines must be different, we know `max.line > 0` let max_middle = max.line - 1; let offset = min.offset; highlights.push(Highlight::new( ( Position { offset, line: min.line, }, Position { offset: CharOffset::max_value(), line: max_middle, }, ), kind, )); highlights.push(Highlight::new( ( Position { offset: CharOffset(0), line: min_middle, }, Position { offset, line: max.line, }, ), kind, )); return; } if min.offset!= 0 { highlights.push(Highlight::new( ( min, Position { offset: CharOffset::max_value(), ..min }, ), kind, )); } let min_middle = min.line + if min.offset == 0 { 0 } else { 1 }; // Since We know the lines must be different, we know `max.line > 0` let max_middle = max.line - 1; if min_middle <= max_middle { highlights.push(Highlight::new( ( Position { offset: CharOffset(0), line: min_middle, }, Position { offset: CharOffset::max_value(), line: max_middle, }, ), kind, )); } if max.offset!= 0 { highlights.push(Highlight::new( ( Position { offset: CharOffset(0), ..max }, max, ), kind, )); } } _ => {} } } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum BufferName { Path(PathBuf), Scratch(u32), } d!(for BufferName: BufferName::Scratch(d!())); fmt_display!(for BufferName: name in "{}", match name { BufferName::Path(p) => p .file_name() .map_or_else( || "?Unknown Path?".to_string(), |s| s.to_string_lossy().into_owned() ), BufferName::Scratch(n) => format!("*scratch {}*", n), } ); ord!(for BufferName: name, other in { use BufferName::*; use std::cmp::Ordering::*; match (name, other) { (Path(p1), Path(p2)) => { match (p1.canonicalize(), p2.canonicalize() ) { (Ok(ref cp1), Ok(ref cp2)) if cp1 == cp2 => { Equal } _ => { p1.cmp(p2) } } } (Path(_), Scratch(_)) => { Less } (Scratch(_), Path(_)) => { Greater } (Scratch(n1), Scratch(n2)) => { n1.cmp(n2) } } }); impl BufferName { #[must_use] pub fn get_extension_or_empty(&self) -> &str { use BufferName::*; match self { Path(p) => { p.extension() .and_then(std::ffi::OsStr::to_str) .unwrap_or("") }, Scratch(..) => "", } } #[must_use] pub fn size_in_bytes(&self) -> usize { use core::mem; // TODO Do other platforms need adjusting as well? #[cfg(target_os = "windows")] const BYTES_PER_UNIT: usize = 2; #[cfg(not(target_os = "windows"))] const BYTES_PER_UNIT: usize = 1; match self { Self::Path(p) => { mem::size_of_val(p) + p.capacity() * BYTES_PER_UNIT }, Self::Scratch(n) => mem::size_of_val(n), } } } #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum CursorState { None, PressedAgainstWall(Move), } d!(for CursorState: CursorState::None); fmt_debug!(for CursorState: s in "{}", match s { CursorState::None => std::borrow::Cow::Borrowed("_"), CursorState::PressedAgainstWall(r#move) => std::borrow::Cow::Owned(format!("->|({})", r#move)) }); ord!(for CursorState: state, other in { use std::cmp::Ordering::*; match (state, other) { (CursorState::None, CursorState::None) => Equal, (CursorState::None, CursorState::PressedAgainstWall(_)) => Less, (CursorState::PressedAgainstWall(_), CursorState::None) => Greater, (CursorState::PressedAgainstWall(m1), CursorState::PressedAgainstWall(m2)) => { m1.cmp(m2) } } }); #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct CursorView { pub position: Position, pub state: CursorState, } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct StatusLineView { pub chars: String, } pub const DEFAULT_STATUS_LINE_CHARS: &str = "No buffer selected."; d!(for StatusLineView: StatusLineView {chars: DEFAULT_STATUS_LINE_CHARS.to_owned()}); #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum MenuMode { Hidden, FileSwitcher, FindReplace(FindReplaceMode), GoToPosition, } d!(for MenuMode: MenuMode::Hidden); #[derive(Clone, Debug, PartialEq)] pub enum MenuView { None, FileSwitcher(FileSwitcherView), FindReplace(FindReplaceView), GoToPosition(GoToPositionView) } d!(for MenuView: MenuView::None); impl MenuView { #[must_use] pub fn
(&self) -> MenuMode { match self { Self::None => MenuMode::Hidden, Self::FileSwitcher(_) => MenuMode::FileSwitcher, Self::FindReplace(v) => MenuMode::FindReplace(v.mode), Self::GoToPosition(_) => MenuMode::GoToPosition, } } } #[must_use] pub fn kind_editable_during_mode(kind: BufferIdKind, menu_mode: MenuMode) -> bool { u!{MenuMode} match (kind, menu_mode) { // We want this to be true for `Text` always since it would be completely // reasonable behaviour for a different client to always show the text // buffers. (BufferIdKind::Text, _) | (BufferIdKind::Find | BufferIdKind::Replace, FindReplace(_)) | (BufferIdKind::FileSwitcher, MenuMode::FileSwitcher) | (BufferIdKind::GoToPosition, MenuMode::GoToPosition) => true, _ => { false }, } } pub type FileSwitcherResults = Vec<PathBuf>; #[derive(Clone, Default, Debug, PartialEq)] pub struct FileSwitcherView { pub search: BufferViewData, pub results: FileSwitcherResults, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum FindReplaceMode { CurrentFile, } d!(for FindReplaceMode: FindReplaceMode::CurrentFile); #[derive(Clone, Default, Debug, PartialEq)] pub struct FindReplaceView { pub mode: FindReplaceMode, pub find: BufferViewData, pub replace: BufferViewData, pub result_count: usize, } #[derive(Clone, Default, Debug, PartialEq)] pub struct GoToPositionView { pub go_to_position: BufferViewData, } #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum EditedTransition { ToEdited, ToUnedited, } pub type IndexedEditedTransition = (g_i::Index, EditedTransition); #[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct EditedTransitions(Vec<IndexedEditedTransition>); impl EditedTransitions { pub fn push(&mut self, iet: IndexedEditedTransition) { self.0.push(iet); } pub fn clear(&mut self) { self.0.clear(); } #[must_use] pub fn len(&self) -> usize { self.0.len() } #[must_use] pub fn is_empty(&self) -> bool { self.0.is_empty() } pub fn iter(&self) -> impl Iterator<Item = &IndexedEditedTransition> { self.0.iter() } } impl IntoIterator for EditedTransitions { type Item = IndexedEditedTransition; type IntoIter = std::vec::IntoIter<Self::Item>; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } #[derive(Clone, Default, PartialEq)] pub struct BufferLabel { pub name: BufferName, /// Having an owned version of the result of `name.to_string()` simplifies /// ownership in some cases. // TODO this could be truncated to a fixed length/on the stack pub name_string: String, } fmt_debug!(collapse default for BufferLabel: me { blank_if_default!(name); blank_if_default!(name_string, me.name_string.is_empty()); }); // This could arguably be ToOwned. impl From<&BufferName> for BufferLabel { fn from(name: &BufferName) -> Self { Self { name: name.clone(), name_string: name.to_string(), } } } impl From<BufferName> for BufferLabel { fn from(name: BufferName) -> Self { let name_string = name.to_string(); Self { name, name_string, } } } #[derive(Clone, Default, Debug, PartialEq)] pub struct View { pub buffers: SelectableVec1<BufferLabel>, pub menu: MenuView, pub status_line: StatusLineView, pub current_buffer_kind: BufferIdKind, pub edited_transitions: EditedTransitions, pub stats: ViewStats, } impl View { #[must_use] /// returns the currently visible editor buffer index. pub fn current_text_index(&self) -> g_i::Index { self.buffers.current_index() } #[must_use] /// returns the currently visible editor buffer view's index and label. pub fn current_text_index_and_buffer_label(&self) -> (g_i::Index, &BufferLabel) { ( self.buffers.current_index(), self.buffers.get_current_element() ) } #[must_use] pub fn get_buffer_label(&self, index: g_i::Index) -> Option<&BufferLabel> { self.buffers.get(index) } #[must_use] pub fn current_buffer_id(&self) -> BufferId { b_id!( self.current_buffer_kind, self.buffers.current_index() ) } #[must_use] /// returns the selected menu's cursors if there is a menu containing a buffer /// currently visible, or the current text buffer's cursors if not. pub fn get_selected_cursors(&self) -> Option<&[CursorView]> { use BufferIdKind::*; match self.current_buffer_kind { // Seems like we never actually need to access the Text buffer // cursors here. If we want to later, then some additional restructuring // will be needed, at least according to the comment this comment // replaced. commmit `680d9507` None | Text => Option::None, Find => match &self.menu { MenuView::FindReplace(ref fr) => Some(&fr.find), _ => Option::None, }, Replace => match &self.menu { MenuView::FindReplace(ref fr) => Some(&fr.replace), _ => Option::None, }, FileSwitcher => match &self.menu { MenuView::FileSwitcher(ref fs) => Some(&fs.search), _ => Option::None, }, GoToPosition => match &self.menu { MenuView::GoToPosition(ref gtp) => Some(&gtp.go_to_position), _ => Option::None, }, }.map(|d| &d.cursors[..]) } #[must_use] /// returns the currently visible editor buffer path if it has one. pub fn current_path(&self) -> Option<PathBuf> { u!{BufferName} match self.buffers.get_current_element().name { Path(ref p) => Some(p.clone()), Scratch(_) => None, } } } #[derive(Clone, Default, PartialEq)] pub struct BufferView { pub label: BufferLabel, pub data: BufferViewData, } fmt_debug!(collapse default for BufferView: me { blank_if_default!(label); blank_if_default!(data); }); /// The reason we keep this as a separate struct from `BufferView` is to enable /// storage of buffers without a `BufferName`. #[derive(Clone, Default, PartialEq)] pub struct BufferViewData { pub chars: String, pub scroll: ScrollXY, pub cursors: Vec<CursorView>, pub highlights: Vec<Highlight>, pub spans: Spans, } fmt_debug!(collapse default for BufferViewData: me { blank_if_default!(chars, me.chars == Rope::default()); blank_if_default!(scroll); blank_if_default!(cursors
get_mode
identifier_name
platform_types.rs
Paste(Option<String>), InsertNumbersAtCursors, AddOrSelectBuffer(BufferName, String), AddOrSelectBufferThenGoTo(BufferName, String, Position), NewScratchBuffer(Option<String>), TabIn, TabOut, StripTrailingWhitespace, AdjustBufferSelection(SelectionAdjustment), NextLanguage, PreviousLanguage, ToggleSingleLineComments, ToggleCase, AutoIndentSelection, DuplicateLines, SelectBuffer(BufferId), OpenOrSelectBuffer(PathBuf), CloseBuffer(g_i::Index), SetMenuMode(MenuMode), SubmitForm, ShowError(String), } d!(for Input : Input::None); #[derive(Clone, Copy, Default, Debug, Hash, PartialEq, Eq)] pub struct BufferId { pub kind: BufferIdKind, pub index: g_i::Index, } ord!(for BufferId: id, other in { id.kind.cmp(&other.kind).then_with(|| id.index.cmp(&other.index)) }); #[macro_export] macro_rules! b_id { // // Creation // ($kind: expr) => { BufferId { kind: $kind, index: d!(), } }; ($kind: expr, $index: expr) => { BufferId { kind: $kind, index: $index, } }; } #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub enum BufferIdKind { /// Used to indicate that the keyboard is focused on a non-buffer. None, /// Indicates a buffer repesenting an open file or an in memory scratch file. /// Almost all buffers are `Text` buffers. Text, Find, Replace, FileSwitcher, GoToPosition, } d!(for BufferIdKind: BufferIdKind::Text); impl From<&BufferIdKind> for u8 { fn from(kind: &BufferIdKind) -> Self { use BufferIdKind::*; match kind { None => 0, Text => 1, Find => 2, Replace => 3, FileSwitcher => 4, GoToPosition => 5, } } } ord!(for BufferIdKind: kind, other in { let k: u8 = kind.into(); let o: u8 = other.into(); k.cmp(&o) }); #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum HighlightKind { User, Result, CurrentResult, } d!(for HighlightKind: HighlightKind::User); #[derive(Clone, Debug, PartialEq, Eq)] pub struct Highlight { pub min: Position, pub max: Position, pub kind: HighlightKind, } impl Highlight { #[must_use] pub fn new((p1, p2): (Position, Position), kind: HighlightKind) -> Self { Highlight { min: std::cmp::min(p1, p2), max: std::cmp::max(p1, p2), kind, } } #[must_use] pub fn get(&self) -> (Position, Position) { (self.min, self.max) } } #[macro_export] macro_rules! highlight { (l $min_line:literal o $min_offset:literal l $max_line:literal o $max_offset:literal ) => { Highlight::new( ( Position { line: $min_line, offset: CharOffset($min_offset), }, Position { line: $max_line, offset: CharOffset($max_offset), }, ), d!() ) }; (l $min_line:literal o $min_offset:literal l $max_line:literal o max ) => { highlight!(l $min_line o $min_offset l $max_line o 0xFFFF_FFFF__FFFF_FFFF) }; } pub fn push_highlights<O: Into<Option<Position>>>( highlights: &mut Vec<Highlight>, position: Position, highlight_position: O, kind: HighlightKind, ) { match highlight_position.into() { Some(h) if h!= position => { let min = std::cmp::min(position, h); let max = std::cmp::max(position, h); if min.line == max.line { highlights.push(Highlight::new((min, max), kind)); return; } // This early return is merely an optimization from three rectangles to two. // TODO Is this optimization actually worth it? The sticky cursor offset does make this // more likely than it would otherwise be. if min.offset!= 0 && min.offset == max.offset { // [|_______________________|] // ^min_middle max_middle^ let min_middle = min.line + if min.offset == 0 { 0 } else { 1 }; // Since We know the lines must be different, we know `max.line > 0` let max_middle = max.line - 1; let offset = min.offset; highlights.push(Highlight::new( ( Position { offset, line: min.line, }, Position { offset: CharOffset::max_value(), line: max_middle, }, ), kind, )); highlights.push(Highlight::new( ( Position { offset: CharOffset(0), line: min_middle, }, Position { offset, line: max.line, }, ), kind, )); return; } if min.offset!= 0 { highlights.push(Highlight::new( ( min, Position { offset: CharOffset::max_value(), ..min }, ), kind, )); } let min_middle = min.line + if min.offset == 0 { 0 } else { 1 }; // Since We know the lines must be different, we know `max.line > 0` let max_middle = max.line - 1; if min_middle <= max_middle { highlights.push(Highlight::new( ( Position { offset: CharOffset(0), line: min_middle, }, Position { offset: CharOffset::max_value(), line: max_middle, }, ), kind, )); } if max.offset!= 0 { highlights.push(Highlight::new( ( Position { offset: CharOffset(0), ..max }, max, ), kind, )); } } _ => {} } } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum BufferName { Path(PathBuf), Scratch(u32), } d!(for BufferName: BufferName::Scratch(d!())); fmt_display!(for BufferName: name in "{}", match name { BufferName::Path(p) => p .file_name() .map_or_else( || "?Unknown Path?".to_string(), |s| s.to_string_lossy().into_owned() ), BufferName::Scratch(n) => format!("*scratch {}*", n), } ); ord!(for BufferName: name, other in { use BufferName::*; use std::cmp::Ordering::*; match (name, other) { (Path(p1), Path(p2)) => { match (p1.canonicalize(), p2.canonicalize() ) { (Ok(ref cp1), Ok(ref cp2)) if cp1 == cp2 => { Equal } _ => { p1.cmp(p2) } } } (Path(_), Scratch(_)) => { Less } (Scratch(_), Path(_)) => { Greater } (Scratch(n1), Scratch(n2)) => { n1.cmp(n2) } } }); impl BufferName { #[must_use] pub fn get_extension_or_empty(&self) -> &str { use BufferName::*; match self { Path(p) => { p.extension() .and_then(std::ffi::OsStr::to_str) .unwrap_or("") }, Scratch(..) => "", } } #[must_use] pub fn size_in_bytes(&self) -> usize { use core::mem; // TODO Do other platforms need adjusting as well? #[cfg(target_os = "windows")] const BYTES_PER_UNIT: usize = 2; #[cfg(not(target_os = "windows"))] const BYTES_PER_UNIT: usize = 1; match self { Self::Path(p) => { mem::size_of_val(p) + p.capacity() * BYTES_PER_UNIT }, Self::Scratch(n) => mem::size_of_val(n), } } } #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub enum CursorState { None, PressedAgainstWall(Move), } d!(for CursorState: CursorState::None); fmt_debug!(for CursorState: s in "{}", match s { CursorState::None => std::borrow::Cow::Borrowed("_"), CursorState::PressedAgainstWall(r#move) => std::borrow::Cow::Owned(format!("->|({})", r#move)) }); ord!(for CursorState: state, other in { use std::cmp::Ordering::*; match (state, other) { (CursorState::None, CursorState::None) => Equal, (CursorState::None, CursorState::PressedAgainstWall(_)) => Less, (CursorState::PressedAgainstWall(_), CursorState::None) => Greater, (CursorState::PressedAgainstWall(m1), CursorState::PressedAgainstWall(m2)) => { m1.cmp(m2) } } }); #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct CursorView { pub position: Position, pub state: CursorState, } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct StatusLineView { pub chars: String, } pub const DEFAULT_STATUS_LINE_CHARS: &str = "No buffer selected."; d!(for StatusLineView: StatusLineView {chars: DEFAULT_STATUS_LINE_CHARS.to_owned()}); #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum MenuMode { Hidden, FileSwitcher, FindReplace(FindReplaceMode), GoToPosition, } d!(for MenuMode: MenuMode::Hidden); #[derive(Clone, Debug, PartialEq)] pub enum MenuView { None, FileSwitcher(FileSwitcherView), FindReplace(FindReplaceView), GoToPosition(GoToPositionView) } d!(for MenuView: MenuView::None); impl MenuView { #[must_use] pub fn get_mode(&self) -> MenuMode { match self { Self::None => MenuMode::Hidden, Self::FileSwitcher(_) => MenuMode::FileSwitcher, Self::FindReplace(v) => MenuMode::FindReplace(v.mode), Self::GoToPosition(_) => MenuMode::GoToPosition, } } } #[must_use] pub fn kind_editable_during_mode(kind: BufferIdKind, menu_mode: MenuMode) -> bool { u!{MenuMode} match (kind, menu_mode) { // We want this to be true for `Text` always since it would be completely // reasonable behaviour for a different client to always show the text // buffers. (BufferIdKind::Text, _) | (BufferIdKind::Find | BufferIdKind::Replace, FindReplace(_)) | (BufferIdKind::FileSwitcher, MenuMode::FileSwitcher) | (BufferIdKind::GoToPosition, MenuMode::GoToPosition) => true, _ => { false }, } } pub type FileSwitcherResults = Vec<PathBuf>; #[derive(Clone, Default, Debug, PartialEq)] pub struct FileSwitcherView { pub search: BufferViewData, pub results: FileSwitcherResults, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum FindReplaceMode { CurrentFile, } d!(for FindReplaceMode: FindReplaceMode::CurrentFile); #[derive(Clone, Default, Debug, PartialEq)] pub struct FindReplaceView { pub mode: FindReplaceMode, pub find: BufferViewData, pub replace: BufferViewData, pub result_count: usize, } #[derive(Clone, Default, Debug, PartialEq)] pub struct GoToPositionView { pub go_to_position: BufferViewData, } #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum EditedTransition { ToEdited, ToUnedited, } pub type IndexedEditedTransition = (g_i::Index, EditedTransition); #[derive(Clone, Default, Debug, PartialEq, Eq)] pub struct EditedTransitions(Vec<IndexedEditedTransition>); impl EditedTransitions { pub fn push(&mut self, iet: IndexedEditedTransition) { self.0.push(iet); } pub fn clear(&mut self) { self.0.clear(); } #[must_use] pub fn len(&self) -> usize { self.0.len() } #[must_use] pub fn is_empty(&self) -> bool { self.0.is_empty() } pub fn iter(&self) -> impl Iterator<Item = &IndexedEditedTransition> { self.0.iter() } } impl IntoIterator for EditedTransitions { type Item = IndexedEditedTransition; type IntoIter = std::vec::IntoIter<Self::Item>; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } #[derive(Clone, Default, PartialEq)] pub struct BufferLabel { pub name: BufferName, /// Having an owned version of the result of `name.to_string()` simplifies /// ownership in some cases. // TODO this could be truncated to a fixed length/on the stack pub name_string: String, } fmt_debug!(collapse default for BufferLabel: me { blank_if_default!(name); blank_if_default!(name_string, me.name_string.is_empty()); }); // This could arguably be ToOwned. impl From<&BufferName> for BufferLabel { fn from(name: &BufferName) -> Self { Self { name: name.clone(), name_string: name.to_string(), } } } impl From<BufferName> for BufferLabel { fn from(name: BufferName) -> Self { let name_string = name.to_string(); Self { name, name_string, } } } #[derive(Clone, Default, Debug, PartialEq)] pub struct View { pub buffers: SelectableVec1<BufferLabel>, pub menu: MenuView, pub status_line: StatusLineView, pub current_buffer_kind: BufferIdKind, pub edited_transitions: EditedTransitions, pub stats: ViewStats, } impl View { #[must_use] /// returns the currently visible editor buffer index. pub fn current_text_index(&self) -> g_i::Index { self.buffers.current_index() } #[must_use] /// returns the currently visible editor buffer view's index and label. pub fn current_text_index_and_buffer_label(&self) -> (g_i::Index, &BufferLabel) { ( self.buffers.current_index(), self.buffers.get_current_element() ) } #[must_use] pub fn get_buffer_label(&self, index: g_i::Index) -> Option<&BufferLabel> { self.buffers.get(index) } #[must_use] pub fn current_buffer_id(&self) -> BufferId { b_id!( self.current_buffer_kind, self.buffers.current_index() ) } #[must_use] /// returns the selected menu's cursors if there is a menu containing a buffer /// currently visible, or the current text buffer's cursors if not. pub fn get_selected_cursors(&self) -> Option<&[CursorView]>
GoToPosition => match &self.menu { MenuView::GoToPosition(ref gtp) => Some(&gtp.go_to_position), _ => Option::None, }, }.map(|d| &d.cursors[..]) } #[must_use] /// returns the currently visible editor buffer path if it has one. pub fn current_path(&self) -> Option<PathBuf> { u!{BufferName} match self.buffers.get_current_element().name { Path(ref p) => Some(p.clone()), Scratch(_) => None, } } } #[derive(Clone, Default, PartialEq)] pub struct BufferView { pub label: BufferLabel, pub data: BufferViewData, } fmt_debug!(collapse default for BufferView: me { blank_if_default!(label); blank_if_default!(data); }); /// The reason we keep this as a separate struct from `BufferView` is to enable /// storage of buffers without a `BufferName`. #[derive(Clone, Default, PartialEq)] pub struct BufferViewData { pub chars: String, pub scroll: ScrollXY, pub cursors: Vec<CursorView>, pub highlights: Vec<Highlight>, pub spans: Spans, } fmt_debug!(collapse default for BufferViewData: me { blank_if_default!(chars, me.chars == Rope::default()); blank_if_default!(scroll); blank_if_default!(cursors
{ use BufferIdKind::*; match self.current_buffer_kind { // Seems like we never actually need to access the Text buffer // cursors here. If we want to later, then some additional restructuring // will be needed, at least according to the comment this comment // replaced. commmit `680d9507` None | Text => Option::None, Find => match &self.menu { MenuView::FindReplace(ref fr) => Some(&fr.find), _ => Option::None, }, Replace => match &self.menu { MenuView::FindReplace(ref fr) => Some(&fr.replace), _ => Option::None, }, FileSwitcher => match &self.menu { MenuView::FileSwitcher(ref fs) => Some(&fs.search), _ => Option::None, },
identifier_body
eval.rs
use error::*; use ast::*; use engine::*; use types::*; use eval_static::*; use std::fmt; use std::rc::Rc; use std::collections::HashMap; #[derive(Clone)] pub struct Macro(pub Ident, pub Rc<dyn Fn(&Exp, &Context) -> Ret<RunVal>>); impl fmt::Debug for Macro { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, ":macro: {}", self.0) } } impl PartialEq for Macro { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } fn ne(&self, other: &Self) -> bool { self.0!= other.0 } } #[derive(Clone,Debug,PartialEq)] pub enum RunVal { Index(usize), String(String), Data(Rc<DataType>, usize), Tuple(Vec<RunVal>), Func(Rc<Context>, Pat, Exp, Type), Macro(Macro), State(State, Type), Gate(Gate), } impl fmt::Display for RunVal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { &RunVal::Index(ref n) => write!(f, "{}", n), &RunVal::String(ref s) => write!(f, "{:?}", s), &RunVal::Data(ref dt, ref index) => write!(f, "{}", dt.variants[*index]), &RunVal::Tuple(ref vals) => write!(f, "({})", vals.iter().map(|val| format!("{}", val)).collect::<Vec<_>>().join(", ")), &RunVal::Func(ref _ctx, ref _pat, ref _body, ref ty) => write!(f, "fn{}", ty), &RunVal::Macro(ref mc) => write!(f, "{:?}", mc), &RunVal::State(ref state, ref ty) => if ty!= &Type::Any { write!(f, "{}: {}", StateView(state), ty) } else { write!(f, "{}", StateView(state)) }, &RunVal::Gate(ref gate) => write!(f, "[{}]", gate.iter().map(|state| format!("{}", StateView(state))).collect::<Vec<_>>().join(", ")), } } } #[derive(Clone,Debug,PartialEq)] pub struct Context { path: String, vars: HashMap<Ident, RunVal>, types: TypeContext, } impl Context { pub fn new(path: String) -> Context { Context { path, vars: HashMap::new(), types: TypeContext::new(), } } pub fn path(&self) -> &String { &self.path } pub fn types(&self) -> &TypeContext { &self.types } pub fn create_child(&self) -> Context { self.clone() } pub fn find_var(&self, id: &Ident) -> Ret<RunVal> { unwrap_from_context("Variable", id, self.vars.get(id)) } pub fn add_var(&mut self, id: Ident, val: RunVal, ty: Type) -> Ret { self.vars.insert(id.clone(), val); self.types.add_var_type(id, ty) } pub fn find_type(&self, id: &Ident) -> Ret<Type> { self.types.find_type(id) } pub fn add_type(&mut self, id: String, ty: Type) -> Ret { self.types.add_type(id, ty) } pub fn
(&mut self, id: String, variants: Vec<Ident>) -> Ret { let rc = Rc::new(DataType {id: id.clone(), variants: variants.clone()}); for (i, variant) in variants.iter().enumerate() { self.add_var(variant.clone(), RunVal::Data(rc.clone(), i), Type::Data(rc.clone()))?; } self.add_type(id, Type::Data(rc)) } pub fn add_macro(&mut self, id: &str, handle: &'static dyn Fn(&Exp, &Context) -> Ret<RunVal>) -> Ret { self.add_var(id.to_string(), RunVal::Macro(Macro(id.to_string(), Rc::new(handle))), Type::Any /* TODO define macro types */) } pub fn import(&self, path: &str) -> Ret<Module> { use regex::Regex; use std::path::Path; use resource; use stdlib; use parser; let (ctx, file) = if Regex::new("^[a-z]+:").unwrap().is_match(path) {(self.create_child(), path.to_string())} else { let import_path = Path::new(&self.path()).join(&resource::with_ext(path, "fqy")); let mut import_dir = import_path.clone(); import_dir.pop(); let file = import_path.to_string_lossy().to_string(); let ctx = stdlib::create_ctx(&import_dir.to_string_lossy())?; (ctx, file) }; let exp = parser::parse_resource(&file)?; Ok(Module {path: file.to_string(), exp: exp, ctx: ctx}) } pub fn import_eval(&self, path: &str) -> Ret<RunVal> { let mut module = self.import(path)?; Ok(eval_exp_inline(&module.exp, &mut module.ctx)) } } #[derive(Clone,Debug,PartialEq)] pub struct Module { pub path: String, pub exp: Exp, pub ctx: Context, } pub fn eval_exp(exp: &Exp, ctx: &Context) -> RunVal { match exp { &Exp::Index(n) => RunVal::Index(n), &Exp::String(ref s) => RunVal::String(s.to_string()), &Exp::Var(ref id) => ctx.find_var(id).unwrap(), &Exp::Scope(ref decls, ref ret) => { let mut child = ctx.create_child(); for decl in decls { eval_decl(decl, &mut child).unwrap(); } eval_exp(ret, &child) }, &Exp::Expand(_) => panic!("No context for expansion"), &Exp::Tuple(ref args) => RunVal::Tuple(eval_exp_seq(args, ctx)), &Exp::Concat(ref args) => { //TODO adjacent gates if args.len() == 1 { if let Some(gate) = build_gate(&eval_exp(&args[0], ctx), ctx) { return RunVal::Gate(gate) } } let div = (args.len() as f32).sqrt(); let states = args.iter() .map(|e| build_state_typed(eval_exp(e, ctx))) .collect::<Ret<Vec<(State, Type)>>>().unwrap(); RunVal::State(states.iter() .flat_map(|(s, _)| s) .map(|n| n / div) .collect(), Type::Concat(states.into_iter() .map(|(_, t)| t) .collect())) }, &Exp::Cond(ref cond_exp, ref then_exp, ref else_exp) => { let val = eval_exp(cond_exp, ctx); if let Some(b) = build_bool(&val) { eval_exp(if b {then_exp} else {else_exp}, ctx) } else { // TODO: consider removing in favor of using extract gates for explicitness // let state = build_state(val); // if state.len() > 2 { // panic!("Conditional state cannot be {}-dimensional", state.len()) // } // RunVal::State(state.extract(vec![ // build_state(eval_exp(else_exp, ctx)), // build_state(eval_exp(then_exp, ctx)), // ]), Type::Any /* TODO determine from then/else types */) panic!("Non-boolean value: {}", val) } }, &Exp::Lambda(ref pat, ref body) => { let ty = infer_type(exp, ctx.types()).unwrap(); RunVal::Func(Rc::new(ctx.clone()), pat.clone(), (**body).clone(), ty) }, &Exp::Invoke(ref target, ref arg) => { match eval_exp(target, ctx) { // TODO proper tuple function evaluation RunVal::Func(fn_ctx_rc, pat, body, _ty) => { let mut fn_ctx = (*fn_ctx_rc).clone(); assign_pat(&pat, &eval_exp(arg, ctx), &mut fn_ctx).unwrap(); eval_exp(&body, &fn_ctx) }, RunVal::Macro(Macro(_, handle)) => handle(arg, ctx).unwrap(), RunVal::Gate(gate) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s.extract(gate), t) }, val => { let msg = &format!("Cannot invoke {}", val); let state = build_state(eval_exp(arg, ctx)); let gate = build_gate(&val, ctx).expect(msg); RunVal::State(state.extract(gate), Type::Any /* TODO infer output type from `target` */) }, } }, &Exp::Repeat(n, ref exp) => { let val = eval_exp(&exp, ctx); RunVal::Tuple((0..n).map(|_| val.clone()).collect()) }, &Exp::State(ref arg) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s, t) }, &Exp::Phase(phase, ref arg) => { let val = eval_exp(arg, ctx); build_gate(&val, ctx) .map(|g| RunVal::Gate(g.power(phase))) .unwrap_or_else(|| { let (s, t) = build_state_typed(val).unwrap(); RunVal::State(s.phase(phase), t) }) }, &Exp::Extract(ref arg, ref cases) => { let state = build_state(eval_exp(arg, ctx)); let (gate, gt) = create_extract_gate_typed(cases, state.len(), ctx); RunVal::State(state.extract(gate), gt) }, &Exp::Anno(ref exp, ref anno) => eval_type(anno, ctx.types()).unwrap().assign(eval_exp(exp, ctx)).unwrap(), } } pub fn eval_exp_inline(exp: &Exp, ctx: &mut Context) -> RunVal { match exp { Exp::Scope(ref decls, ref exp) => { for decl in decls { eval_decl(decl, ctx).unwrap(); } eval_exp(exp, ctx) }, _ => eval_exp(exp, ctx), } } pub fn eval_exp_seq(seq: &Vec<Exp>, ctx: &Context) -> Vec<RunVal> { seq.iter().flat_map(|e| { if let Exp::Expand(ref e) = e { let val = eval_exp(e, ctx); let err = Error(format!("Cannot expand value: {}", val)); iterate_val(val).ok_or(err).unwrap() } else {vec![eval_exp(e, ctx)]} }).collect() } pub fn eval_decl(decl: &Decl, ctx: &mut Context) -> Ret { match decl { &Decl::Let(ref pat, ref exp) => assign_pat(pat, &eval_exp(exp, ctx), ctx), &Decl::Type(ref id, ref pat) => { let ty = eval_type(pat, ctx.types())?; ctx.add_type(id.clone(), ty) }, &Decl::Data(ref id, ref variants) => ctx.add_datatype(id.clone(), variants.clone()), &Decl::Assert(ref expect, ref result) => { let a = eval_exp(expect, ctx); let b = eval_exp(result, ctx); let eq = match (&a, &b) { (&RunVal::State(ref a, _), &RunVal::State(ref b, _)) => { a.iter().zip(b).map(|(a, b)| { let abs = (a - b).norm(); abs * abs }).sum::<f32>() < 0.00001_f32 }, (a, b) => a == b, }; if!eq {err!("Assertion failed: {}!= {}", a, b)} else {Ok(())} }, &Decl::Print(ref exp) => Ok(println!(":: {}", eval_exp(exp, ctx))), &Decl::Do(ref exp) => { eval_exp(exp, ctx); Ok(()) }, } } // TODO combine logic with eval_static::assign_pat_type() pub fn assign_pat(pat: &Pat, val: &RunVal, ctx: &mut Context) -> Ret { match (pat, val) { (&Pat::Any, _) => Ok(()), (&Pat::Var(ref id), _) => ctx.add_var(id.clone(), val.clone(), get_val_type(val)), //TODO use val type (&Pat::Tuple(ref pats), &RunVal::Tuple(ref vals)) => { if pats.len()!= vals.len() {err!("Cannot deconstruct {} values from value: {}", pats.len(), val)} else { pats.iter().zip(vals) .map(|(pat, val)| assign_pat(pat, val, ctx)) .collect::<Ret<_>>() } }, (&Pat::Anno(ref pat, ref anno), _) => assign_pat(pat, &eval_type(&anno, ctx.types())?.assign(val.clone())?, ctx), _ => err!("{:?} cannot deconstruct `{}`", pat, val), } } pub fn get_val_type(val: &RunVal) -> Type { match val { &RunVal::Index(_) => Type::Any, &RunVal::String(_) => Type::Any, &RunVal::Data(ref dt, _) => Type::Data((*dt).clone()), &RunVal::Tuple(ref vals) => Type::Tuple(vals.iter().map(get_val_type).collect()), &RunVal::Func(_, _, _, ref ty) => ty.clone(), &RunVal::Macro(_) => Type::Any, // TODO &RunVal::State(_, ref ty) => ty.clone(), &RunVal::Gate(_) => Type::Any, // TODO } } pub fn build_bool(val: &RunVal) -> Option<bool> { match val { &RunVal::Index(n) => Some(n > 0), &RunVal::Data(ref _ty, n) => Some(n > 0), &RunVal::Tuple(ref vec) => Some(vec.len() > 0), _ => None, } } pub fn build_state(val: RunVal) -> State { build_state_typed(val).unwrap().0 } pub fn build_state_typed(val: RunVal) -> Ret<(State, Type)> { match val { RunVal::Index(n) => Ok((get_state(n), Type::Any)), RunVal::Data(dt, index) => Ok((get_state(index).pad(dt.variants.len()), Type::Data(dt))), RunVal::Tuple(vals) => { let states = vals.into_iter().map(|v| build_state_typed(v)).collect::<Ret<Vec<(State, Type)>>>()?; let ty = Type::Tuple(states.iter().map(|(_, t)| t.clone()).collect()); Ok((states.into_iter().fold(get_state(0), |a, (b, _)| State::combine(a, b)), ty)) }, RunVal::State(state, ty) => Ok((state, ty)), val => err!("Cannot build state from {}", val) } } pub fn eval_gate_body(exp: &Exp, ctx: &Context) -> Option<Gate> { match exp { &Exp::Extract(ref _arg, ref cases) => Some(create_extract_gate_typed(cases, 0, ctx).0), _ => None, } } pub fn build_gate(val: &RunVal, ctx: &Context) -> Option<Gate> { match val { &RunVal::Tuple(ref vals) => vals.iter() .fold(Some(vec![get_state(0)]), |a, b| a.and_then(|a| build_gate(b, ctx).map(|b| a.combine(b)))), &RunVal::Func(ref fn_ctx, ref _pat, ref body, ref _ty) => eval_gate_body(body, fn_ctx), // TODO use type &RunVal::Gate(ref gate) => Some(gate.clone()), _ => None, } } pub fn iterate_val(val: RunVal) -> Option<Vec<RunVal>> { match val { RunVal::Index(i) => { Some((0..i).map(RunVal::Index).collect()) }, RunVal::Tuple(vals) => Some(vals), _ => None, } } pub fn create_extract_gate_typed(cases: &Vec<Case>, min_input_size: usize, ctx: &Context) -> (Gate, Type) { fn reduce_type(output_type: Option<Type>, t: Type) -> Option<Type> { Some(match output_type { None => t, Some(ot) => if ot == t {t} else {Type::Any}, }) } let mut dims: Gate = vec![]; let mut output_type = None; for case in cases.iter() { match case { &Case::Exp(ref selector, ref result) => { let selector_state = build_state(eval_exp(selector, ctx)); let (result_state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); while dims.len() < selector_state.len() || dims.len() < min_input_size { dims.push(vec![]); } for (i, s) in selector_state.iter().enumerate() { let len = ::std::cmp::max(result_state.len(), dims[i].len()); // TODO improve impl dims[i] = result_state.clone().pad(len).into_iter() .zip(dims[i].clone().pad(len).into_iter()) .map(|(r, d)| r * s + d) .collect(); } output_type = reduce_type(output_type, result_type); }, &Case::Default(ref result) => { let (state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); for i in 0..dims.len() { use num::Zero; if dims[i].prob_sum().is_zero() { dims[i] = state.clone(); } } output_type = reduce_type(output_type, result_type); }, } } let max_len = dims.iter().map(Vec::len).max().unwrap_or(0); let gate: Gate = dims.into_iter().map(|s| s.pad(max_len)).collect(); // if!gate.is_unitary() { // panic!("Non-unitary extraction: {:?}", cases); // } (gate, output_type.unwrap_or(Type::Any)) }
add_datatype
identifier_name
eval.rs
use error::*; use ast::*; use engine::*; use types::*; use eval_static::*; use std::fmt; use std::rc::Rc; use std::collections::HashMap; #[derive(Clone)] pub struct Macro(pub Ident, pub Rc<dyn Fn(&Exp, &Context) -> Ret<RunVal>>); impl fmt::Debug for Macro { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, ":macro: {}", self.0) } } impl PartialEq for Macro { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } fn ne(&self, other: &Self) -> bool { self.0!= other.0 } } #[derive(Clone,Debug,PartialEq)] pub enum RunVal { Index(usize), String(String), Data(Rc<DataType>, usize), Tuple(Vec<RunVal>), Func(Rc<Context>, Pat, Exp, Type), Macro(Macro), State(State, Type), Gate(Gate), } impl fmt::Display for RunVal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { &RunVal::Index(ref n) => write!(f, "{}", n), &RunVal::String(ref s) => write!(f, "{:?}", s), &RunVal::Data(ref dt, ref index) => write!(f, "{}", dt.variants[*index]), &RunVal::Tuple(ref vals) => write!(f, "({})", vals.iter().map(|val| format!("{}", val)).collect::<Vec<_>>().join(", ")), &RunVal::Func(ref _ctx, ref _pat, ref _body, ref ty) => write!(f, "fn{}", ty), &RunVal::Macro(ref mc) => write!(f, "{:?}", mc), &RunVal::State(ref state, ref ty) => if ty!= &Type::Any { write!(f, "{}: {}", StateView(state), ty) } else { write!(f, "{}", StateView(state)) }, &RunVal::Gate(ref gate) => write!(f, "[{}]", gate.iter().map(|state| format!("{}", StateView(state))).collect::<Vec<_>>().join(", ")), } } } #[derive(Clone,Debug,PartialEq)] pub struct Context { path: String, vars: HashMap<Ident, RunVal>, types: TypeContext, } impl Context { pub fn new(path: String) -> Context { Context { path, vars: HashMap::new(), types: TypeContext::new(), } } pub fn path(&self) -> &String { &self.path } pub fn types(&self) -> &TypeContext { &self.types } pub fn create_child(&self) -> Context { self.clone() } pub fn find_var(&self, id: &Ident) -> Ret<RunVal> { unwrap_from_context("Variable", id, self.vars.get(id)) } pub fn add_var(&mut self, id: Ident, val: RunVal, ty: Type) -> Ret { self.vars.insert(id.clone(), val); self.types.add_var_type(id, ty) } pub fn find_type(&self, id: &Ident) -> Ret<Type> { self.types.find_type(id) } pub fn add_type(&mut self, id: String, ty: Type) -> Ret { self.types.add_type(id, ty) } pub fn add_datatype(&mut self, id: String, variants: Vec<Ident>) -> Ret { let rc = Rc::new(DataType {id: id.clone(), variants: variants.clone()}); for (i, variant) in variants.iter().enumerate() { self.add_var(variant.clone(), RunVal::Data(rc.clone(), i), Type::Data(rc.clone()))?; } self.add_type(id, Type::Data(rc)) } pub fn add_macro(&mut self, id: &str, handle: &'static dyn Fn(&Exp, &Context) -> Ret<RunVal>) -> Ret { self.add_var(id.to_string(), RunVal::Macro(Macro(id.to_string(), Rc::new(handle))), Type::Any /* TODO define macro types */) } pub fn import(&self, path: &str) -> Ret<Module> { use regex::Regex; use std::path::Path; use resource; use stdlib; use parser; let (ctx, file) = if Regex::new("^[a-z]+:").unwrap().is_match(path) {(self.create_child(), path.to_string())} else { let import_path = Path::new(&self.path()).join(&resource::with_ext(path, "fqy")); let mut import_dir = import_path.clone(); import_dir.pop(); let file = import_path.to_string_lossy().to_string(); let ctx = stdlib::create_ctx(&import_dir.to_string_lossy())?; (ctx, file) }; let exp = parser::parse_resource(&file)?; Ok(Module {path: file.to_string(), exp: exp, ctx: ctx}) } pub fn import_eval(&self, path: &str) -> Ret<RunVal> { let mut module = self.import(path)?; Ok(eval_exp_inline(&module.exp, &mut module.ctx)) } } #[derive(Clone,Debug,PartialEq)] pub struct Module { pub path: String, pub exp: Exp, pub ctx: Context, } pub fn eval_exp(exp: &Exp, ctx: &Context) -> RunVal { match exp { &Exp::Index(n) => RunVal::Index(n), &Exp::String(ref s) => RunVal::String(s.to_string()), &Exp::Var(ref id) => ctx.find_var(id).unwrap(), &Exp::Scope(ref decls, ref ret) => { let mut child = ctx.create_child(); for decl in decls { eval_decl(decl, &mut child).unwrap(); } eval_exp(ret, &child) }, &Exp::Expand(_) => panic!("No context for expansion"), &Exp::Tuple(ref args) => RunVal::Tuple(eval_exp_seq(args, ctx)), &Exp::Concat(ref args) => { //TODO adjacent gates if args.len() == 1 { if let Some(gate) = build_gate(&eval_exp(&args[0], ctx), ctx) { return RunVal::Gate(gate) } } let div = (args.len() as f32).sqrt(); let states = args.iter() .map(|e| build_state_typed(eval_exp(e, ctx))) .collect::<Ret<Vec<(State, Type)>>>().unwrap(); RunVal::State(states.iter() .flat_map(|(s, _)| s) .map(|n| n / div) .collect(), Type::Concat(states.into_iter() .map(|(_, t)| t) .collect())) }, &Exp::Cond(ref cond_exp, ref then_exp, ref else_exp) => { let val = eval_exp(cond_exp, ctx); if let Some(b) = build_bool(&val) { eval_exp(if b {then_exp} else {else_exp}, ctx) } else { // TODO: consider removing in favor of using extract gates for explicitness // let state = build_state(val); // if state.len() > 2 { // panic!("Conditional state cannot be {}-dimensional", state.len()) // } // RunVal::State(state.extract(vec![ // build_state(eval_exp(else_exp, ctx)), // build_state(eval_exp(then_exp, ctx)), // ]), Type::Any /* TODO determine from then/else types */) panic!("Non-boolean value: {}", val) } }, &Exp::Lambda(ref pat, ref body) => { let ty = infer_type(exp, ctx.types()).unwrap(); RunVal::Func(Rc::new(ctx.clone()), pat.clone(), (**body).clone(), ty) }, &Exp::Invoke(ref target, ref arg) => { match eval_exp(target, ctx) { // TODO proper tuple function evaluation RunVal::Func(fn_ctx_rc, pat, body, _ty) => { let mut fn_ctx = (*fn_ctx_rc).clone(); assign_pat(&pat, &eval_exp(arg, ctx), &mut fn_ctx).unwrap(); eval_exp(&body, &fn_ctx) }, RunVal::Macro(Macro(_, handle)) => handle(arg, ctx).unwrap(), RunVal::Gate(gate) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s.extract(gate), t) }, val => { let msg = &format!("Cannot invoke {}", val); let state = build_state(eval_exp(arg, ctx)); let gate = build_gate(&val, ctx).expect(msg); RunVal::State(state.extract(gate), Type::Any /* TODO infer output type from `target` */) }, } }, &Exp::Repeat(n, ref exp) => { let val = eval_exp(&exp, ctx); RunVal::Tuple((0..n).map(|_| val.clone()).collect()) }, &Exp::State(ref arg) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s, t) }, &Exp::Phase(phase, ref arg) => { let val = eval_exp(arg, ctx); build_gate(&val, ctx) .map(|g| RunVal::Gate(g.power(phase))) .unwrap_or_else(|| { let (s, t) = build_state_typed(val).unwrap(); RunVal::State(s.phase(phase), t) }) }, &Exp::Extract(ref arg, ref cases) => { let state = build_state(eval_exp(arg, ctx)); let (gate, gt) = create_extract_gate_typed(cases, state.len(), ctx); RunVal::State(state.extract(gate), gt) }, &Exp::Anno(ref exp, ref anno) => eval_type(anno, ctx.types()).unwrap().assign(eval_exp(exp, ctx)).unwrap(), } } pub fn eval_exp_inline(exp: &Exp, ctx: &mut Context) -> RunVal { match exp { Exp::Scope(ref decls, ref exp) => {
} eval_exp(exp, ctx) }, _ => eval_exp(exp, ctx), } } pub fn eval_exp_seq(seq: &Vec<Exp>, ctx: &Context) -> Vec<RunVal> { seq.iter().flat_map(|e| { if let Exp::Expand(ref e) = e { let val = eval_exp(e, ctx); let err = Error(format!("Cannot expand value: {}", val)); iterate_val(val).ok_or(err).unwrap() } else {vec![eval_exp(e, ctx)]} }).collect() } pub fn eval_decl(decl: &Decl, ctx: &mut Context) -> Ret { match decl { &Decl::Let(ref pat, ref exp) => assign_pat(pat, &eval_exp(exp, ctx), ctx), &Decl::Type(ref id, ref pat) => { let ty = eval_type(pat, ctx.types())?; ctx.add_type(id.clone(), ty) }, &Decl::Data(ref id, ref variants) => ctx.add_datatype(id.clone(), variants.clone()), &Decl::Assert(ref expect, ref result) => { let a = eval_exp(expect, ctx); let b = eval_exp(result, ctx); let eq = match (&a, &b) { (&RunVal::State(ref a, _), &RunVal::State(ref b, _)) => { a.iter().zip(b).map(|(a, b)| { let abs = (a - b).norm(); abs * abs }).sum::<f32>() < 0.00001_f32 }, (a, b) => a == b, }; if!eq {err!("Assertion failed: {}!= {}", a, b)} else {Ok(())} }, &Decl::Print(ref exp) => Ok(println!(":: {}", eval_exp(exp, ctx))), &Decl::Do(ref exp) => { eval_exp(exp, ctx); Ok(()) }, } } // TODO combine logic with eval_static::assign_pat_type() pub fn assign_pat(pat: &Pat, val: &RunVal, ctx: &mut Context) -> Ret { match (pat, val) { (&Pat::Any, _) => Ok(()), (&Pat::Var(ref id), _) => ctx.add_var(id.clone(), val.clone(), get_val_type(val)), //TODO use val type (&Pat::Tuple(ref pats), &RunVal::Tuple(ref vals)) => { if pats.len()!= vals.len() {err!("Cannot deconstruct {} values from value: {}", pats.len(), val)} else { pats.iter().zip(vals) .map(|(pat, val)| assign_pat(pat, val, ctx)) .collect::<Ret<_>>() } }, (&Pat::Anno(ref pat, ref anno), _) => assign_pat(pat, &eval_type(&anno, ctx.types())?.assign(val.clone())?, ctx), _ => err!("{:?} cannot deconstruct `{}`", pat, val), } } pub fn get_val_type(val: &RunVal) -> Type { match val { &RunVal::Index(_) => Type::Any, &RunVal::String(_) => Type::Any, &RunVal::Data(ref dt, _) => Type::Data((*dt).clone()), &RunVal::Tuple(ref vals) => Type::Tuple(vals.iter().map(get_val_type).collect()), &RunVal::Func(_, _, _, ref ty) => ty.clone(), &RunVal::Macro(_) => Type::Any, // TODO &RunVal::State(_, ref ty) => ty.clone(), &RunVal::Gate(_) => Type::Any, // TODO } } pub fn build_bool(val: &RunVal) -> Option<bool> { match val { &RunVal::Index(n) => Some(n > 0), &RunVal::Data(ref _ty, n) => Some(n > 0), &RunVal::Tuple(ref vec) => Some(vec.len() > 0), _ => None, } } pub fn build_state(val: RunVal) -> State { build_state_typed(val).unwrap().0 } pub fn build_state_typed(val: RunVal) -> Ret<(State, Type)> { match val { RunVal::Index(n) => Ok((get_state(n), Type::Any)), RunVal::Data(dt, index) => Ok((get_state(index).pad(dt.variants.len()), Type::Data(dt))), RunVal::Tuple(vals) => { let states = vals.into_iter().map(|v| build_state_typed(v)).collect::<Ret<Vec<(State, Type)>>>()?; let ty = Type::Tuple(states.iter().map(|(_, t)| t.clone()).collect()); Ok((states.into_iter().fold(get_state(0), |a, (b, _)| State::combine(a, b)), ty)) }, RunVal::State(state, ty) => Ok((state, ty)), val => err!("Cannot build state from {}", val) } } pub fn eval_gate_body(exp: &Exp, ctx: &Context) -> Option<Gate> { match exp { &Exp::Extract(ref _arg, ref cases) => Some(create_extract_gate_typed(cases, 0, ctx).0), _ => None, } } pub fn build_gate(val: &RunVal, ctx: &Context) -> Option<Gate> { match val { &RunVal::Tuple(ref vals) => vals.iter() .fold(Some(vec![get_state(0)]), |a, b| a.and_then(|a| build_gate(b, ctx).map(|b| a.combine(b)))), &RunVal::Func(ref fn_ctx, ref _pat, ref body, ref _ty) => eval_gate_body(body, fn_ctx), // TODO use type &RunVal::Gate(ref gate) => Some(gate.clone()), _ => None, } } pub fn iterate_val(val: RunVal) -> Option<Vec<RunVal>> { match val { RunVal::Index(i) => { Some((0..i).map(RunVal::Index).collect()) }, RunVal::Tuple(vals) => Some(vals), _ => None, } } pub fn create_extract_gate_typed(cases: &Vec<Case>, min_input_size: usize, ctx: &Context) -> (Gate, Type) { fn reduce_type(output_type: Option<Type>, t: Type) -> Option<Type> { Some(match output_type { None => t, Some(ot) => if ot == t {t} else {Type::Any}, }) } let mut dims: Gate = vec![]; let mut output_type = None; for case in cases.iter() { match case { &Case::Exp(ref selector, ref result) => { let selector_state = build_state(eval_exp(selector, ctx)); let (result_state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); while dims.len() < selector_state.len() || dims.len() < min_input_size { dims.push(vec![]); } for (i, s) in selector_state.iter().enumerate() { let len = ::std::cmp::max(result_state.len(), dims[i].len()); // TODO improve impl dims[i] = result_state.clone().pad(len).into_iter() .zip(dims[i].clone().pad(len).into_iter()) .map(|(r, d)| r * s + d) .collect(); } output_type = reduce_type(output_type, result_type); }, &Case::Default(ref result) => { let (state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); for i in 0..dims.len() { use num::Zero; if dims[i].prob_sum().is_zero() { dims[i] = state.clone(); } } output_type = reduce_type(output_type, result_type); }, } } let max_len = dims.iter().map(Vec::len).max().unwrap_or(0); let gate: Gate = dims.into_iter().map(|s| s.pad(max_len)).collect(); // if!gate.is_unitary() { // panic!("Non-unitary extraction: {:?}", cases); // } (gate, output_type.unwrap_or(Type::Any)) }
for decl in decls { eval_decl(decl, ctx).unwrap();
random_line_split
eval.rs
use error::*; use ast::*; use engine::*; use types::*; use eval_static::*; use std::fmt; use std::rc::Rc; use std::collections::HashMap; #[derive(Clone)] pub struct Macro(pub Ident, pub Rc<dyn Fn(&Exp, &Context) -> Ret<RunVal>>); impl fmt::Debug for Macro { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, ":macro: {}", self.0) } } impl PartialEq for Macro { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } fn ne(&self, other: &Self) -> bool { self.0!= other.0 } } #[derive(Clone,Debug,PartialEq)] pub enum RunVal { Index(usize), String(String), Data(Rc<DataType>, usize), Tuple(Vec<RunVal>), Func(Rc<Context>, Pat, Exp, Type), Macro(Macro), State(State, Type), Gate(Gate), } impl fmt::Display for RunVal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { &RunVal::Index(ref n) => write!(f, "{}", n), &RunVal::String(ref s) => write!(f, "{:?}", s), &RunVal::Data(ref dt, ref index) => write!(f, "{}", dt.variants[*index]), &RunVal::Tuple(ref vals) => write!(f, "({})", vals.iter().map(|val| format!("{}", val)).collect::<Vec<_>>().join(", ")), &RunVal::Func(ref _ctx, ref _pat, ref _body, ref ty) => write!(f, "fn{}", ty), &RunVal::Macro(ref mc) => write!(f, "{:?}", mc), &RunVal::State(ref state, ref ty) => if ty!= &Type::Any { write!(f, "{}: {}", StateView(state), ty) } else { write!(f, "{}", StateView(state)) }, &RunVal::Gate(ref gate) => write!(f, "[{}]", gate.iter().map(|state| format!("{}", StateView(state))).collect::<Vec<_>>().join(", ")), } } } #[derive(Clone,Debug,PartialEq)] pub struct Context { path: String, vars: HashMap<Ident, RunVal>, types: TypeContext, } impl Context { pub fn new(path: String) -> Context { Context { path, vars: HashMap::new(), types: TypeContext::new(), } } pub fn path(&self) -> &String { &self.path } pub fn types(&self) -> &TypeContext { &self.types } pub fn create_child(&self) -> Context { self.clone() } pub fn find_var(&self, id: &Ident) -> Ret<RunVal> { unwrap_from_context("Variable", id, self.vars.get(id)) } pub fn add_var(&mut self, id: Ident, val: RunVal, ty: Type) -> Ret { self.vars.insert(id.clone(), val); self.types.add_var_type(id, ty) } pub fn find_type(&self, id: &Ident) -> Ret<Type> { self.types.find_type(id) } pub fn add_type(&mut self, id: String, ty: Type) -> Ret { self.types.add_type(id, ty) } pub fn add_datatype(&mut self, id: String, variants: Vec<Ident>) -> Ret { let rc = Rc::new(DataType {id: id.clone(), variants: variants.clone()}); for (i, variant) in variants.iter().enumerate() { self.add_var(variant.clone(), RunVal::Data(rc.clone(), i), Type::Data(rc.clone()))?; } self.add_type(id, Type::Data(rc)) } pub fn add_macro(&mut self, id: &str, handle: &'static dyn Fn(&Exp, &Context) -> Ret<RunVal>) -> Ret { self.add_var(id.to_string(), RunVal::Macro(Macro(id.to_string(), Rc::new(handle))), Type::Any /* TODO define macro types */) } pub fn import(&self, path: &str) -> Ret<Module> { use regex::Regex; use std::path::Path; use resource; use stdlib; use parser; let (ctx, file) = if Regex::new("^[a-z]+:").unwrap().is_match(path) {(self.create_child(), path.to_string())} else { let import_path = Path::new(&self.path()).join(&resource::with_ext(path, "fqy")); let mut import_dir = import_path.clone(); import_dir.pop(); let file = import_path.to_string_lossy().to_string(); let ctx = stdlib::create_ctx(&import_dir.to_string_lossy())?; (ctx, file) }; let exp = parser::parse_resource(&file)?; Ok(Module {path: file.to_string(), exp: exp, ctx: ctx}) } pub fn import_eval(&self, path: &str) -> Ret<RunVal> { let mut module = self.import(path)?; Ok(eval_exp_inline(&module.exp, &mut module.ctx)) } } #[derive(Clone,Debug,PartialEq)] pub struct Module { pub path: String, pub exp: Exp, pub ctx: Context, } pub fn eval_exp(exp: &Exp, ctx: &Context) -> RunVal { match exp { &Exp::Index(n) => RunVal::Index(n), &Exp::String(ref s) => RunVal::String(s.to_string()), &Exp::Var(ref id) => ctx.find_var(id).unwrap(), &Exp::Scope(ref decls, ref ret) => { let mut child = ctx.create_child(); for decl in decls { eval_decl(decl, &mut child).unwrap(); } eval_exp(ret, &child) }, &Exp::Expand(_) => panic!("No context for expansion"), &Exp::Tuple(ref args) => RunVal::Tuple(eval_exp_seq(args, ctx)), &Exp::Concat(ref args) => { //TODO adjacent gates if args.len() == 1 { if let Some(gate) = build_gate(&eval_exp(&args[0], ctx), ctx) { return RunVal::Gate(gate) } } let div = (args.len() as f32).sqrt(); let states = args.iter() .map(|e| build_state_typed(eval_exp(e, ctx))) .collect::<Ret<Vec<(State, Type)>>>().unwrap(); RunVal::State(states.iter() .flat_map(|(s, _)| s) .map(|n| n / div) .collect(), Type::Concat(states.into_iter() .map(|(_, t)| t) .collect())) }, &Exp::Cond(ref cond_exp, ref then_exp, ref else_exp) => { let val = eval_exp(cond_exp, ctx); if let Some(b) = build_bool(&val) { eval_exp(if b {then_exp} else {else_exp}, ctx) } else { // TODO: consider removing in favor of using extract gates for explicitness // let state = build_state(val); // if state.len() > 2 { // panic!("Conditional state cannot be {}-dimensional", state.len()) // } // RunVal::State(state.extract(vec![ // build_state(eval_exp(else_exp, ctx)), // build_state(eval_exp(then_exp, ctx)), // ]), Type::Any /* TODO determine from then/else types */) panic!("Non-boolean value: {}", val) } }, &Exp::Lambda(ref pat, ref body) => { let ty = infer_type(exp, ctx.types()).unwrap(); RunVal::Func(Rc::new(ctx.clone()), pat.clone(), (**body).clone(), ty) }, &Exp::Invoke(ref target, ref arg) => { match eval_exp(target, ctx) { // TODO proper tuple function evaluation RunVal::Func(fn_ctx_rc, pat, body, _ty) => { let mut fn_ctx = (*fn_ctx_rc).clone(); assign_pat(&pat, &eval_exp(arg, ctx), &mut fn_ctx).unwrap(); eval_exp(&body, &fn_ctx) }, RunVal::Macro(Macro(_, handle)) => handle(arg, ctx).unwrap(), RunVal::Gate(gate) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s.extract(gate), t) }, val => { let msg = &format!("Cannot invoke {}", val); let state = build_state(eval_exp(arg, ctx)); let gate = build_gate(&val, ctx).expect(msg); RunVal::State(state.extract(gate), Type::Any /* TODO infer output type from `target` */) }, } }, &Exp::Repeat(n, ref exp) => { let val = eval_exp(&exp, ctx); RunVal::Tuple((0..n).map(|_| val.clone()).collect()) }, &Exp::State(ref arg) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s, t) }, &Exp::Phase(phase, ref arg) => { let val = eval_exp(arg, ctx); build_gate(&val, ctx) .map(|g| RunVal::Gate(g.power(phase))) .unwrap_or_else(|| { let (s, t) = build_state_typed(val).unwrap(); RunVal::State(s.phase(phase), t) }) }, &Exp::Extract(ref arg, ref cases) => { let state = build_state(eval_exp(arg, ctx)); let (gate, gt) = create_extract_gate_typed(cases, state.len(), ctx); RunVal::State(state.extract(gate), gt) }, &Exp::Anno(ref exp, ref anno) => eval_type(anno, ctx.types()).unwrap().assign(eval_exp(exp, ctx)).unwrap(), } } pub fn eval_exp_inline(exp: &Exp, ctx: &mut Context) -> RunVal { match exp { Exp::Scope(ref decls, ref exp) => { for decl in decls { eval_decl(decl, ctx).unwrap(); } eval_exp(exp, ctx) }, _ => eval_exp(exp, ctx), } } pub fn eval_exp_seq(seq: &Vec<Exp>, ctx: &Context) -> Vec<RunVal> { seq.iter().flat_map(|e| { if let Exp::Expand(ref e) = e { let val = eval_exp(e, ctx); let err = Error(format!("Cannot expand value: {}", val)); iterate_val(val).ok_or(err).unwrap() } else {vec![eval_exp(e, ctx)]} }).collect() } pub fn eval_decl(decl: &Decl, ctx: &mut Context) -> Ret { match decl { &Decl::Let(ref pat, ref exp) => assign_pat(pat, &eval_exp(exp, ctx), ctx), &Decl::Type(ref id, ref pat) => { let ty = eval_type(pat, ctx.types())?; ctx.add_type(id.clone(), ty) }, &Decl::Data(ref id, ref variants) => ctx.add_datatype(id.clone(), variants.clone()), &Decl::Assert(ref expect, ref result) => { let a = eval_exp(expect, ctx); let b = eval_exp(result, ctx); let eq = match (&a, &b) { (&RunVal::State(ref a, _), &RunVal::State(ref b, _)) => { a.iter().zip(b).map(|(a, b)| { let abs = (a - b).norm(); abs * abs }).sum::<f32>() < 0.00001_f32 }, (a, b) => a == b, }; if!eq {err!("Assertion failed: {}!= {}", a, b)} else {Ok(())} }, &Decl::Print(ref exp) => Ok(println!(":: {}", eval_exp(exp, ctx))), &Decl::Do(ref exp) => { eval_exp(exp, ctx); Ok(()) }, } } // TODO combine logic with eval_static::assign_pat_type() pub fn assign_pat(pat: &Pat, val: &RunVal, ctx: &mut Context) -> Ret { match (pat, val) { (&Pat::Any, _) => Ok(()), (&Pat::Var(ref id), _) => ctx.add_var(id.clone(), val.clone(), get_val_type(val)), //TODO use val type (&Pat::Tuple(ref pats), &RunVal::Tuple(ref vals)) => { if pats.len()!= vals.len() {err!("Cannot deconstruct {} values from value: {}", pats.len(), val)} else { pats.iter().zip(vals) .map(|(pat, val)| assign_pat(pat, val, ctx)) .collect::<Ret<_>>() } }, (&Pat::Anno(ref pat, ref anno), _) => assign_pat(pat, &eval_type(&anno, ctx.types())?.assign(val.clone())?, ctx), _ => err!("{:?} cannot deconstruct `{}`", pat, val), } } pub fn get_val_type(val: &RunVal) -> Type { match val { &RunVal::Index(_) => Type::Any, &RunVal::String(_) => Type::Any, &RunVal::Data(ref dt, _) => Type::Data((*dt).clone()), &RunVal::Tuple(ref vals) => Type::Tuple(vals.iter().map(get_val_type).collect()), &RunVal::Func(_, _, _, ref ty) => ty.clone(), &RunVal::Macro(_) => Type::Any, // TODO &RunVal::State(_, ref ty) => ty.clone(), &RunVal::Gate(_) => Type::Any, // TODO } } pub fn build_bool(val: &RunVal) -> Option<bool> { match val { &RunVal::Index(n) => Some(n > 0), &RunVal::Data(ref _ty, n) => Some(n > 0), &RunVal::Tuple(ref vec) => Some(vec.len() > 0), _ => None, } } pub fn build_state(val: RunVal) -> State { build_state_typed(val).unwrap().0 } pub fn build_state_typed(val: RunVal) -> Ret<(State, Type)> { match val { RunVal::Index(n) => Ok((get_state(n), Type::Any)), RunVal::Data(dt, index) => Ok((get_state(index).pad(dt.variants.len()), Type::Data(dt))), RunVal::Tuple(vals) => { let states = vals.into_iter().map(|v| build_state_typed(v)).collect::<Ret<Vec<(State, Type)>>>()?; let ty = Type::Tuple(states.iter().map(|(_, t)| t.clone()).collect()); Ok((states.into_iter().fold(get_state(0), |a, (b, _)| State::combine(a, b)), ty)) }, RunVal::State(state, ty) => Ok((state, ty)), val => err!("Cannot build state from {}", val) } } pub fn eval_gate_body(exp: &Exp, ctx: &Context) -> Option<Gate> { match exp { &Exp::Extract(ref _arg, ref cases) => Some(create_extract_gate_typed(cases, 0, ctx).0), _ => None, } } pub fn build_gate(val: &RunVal, ctx: &Context) -> Option<Gate> { match val { &RunVal::Tuple(ref vals) => vals.iter() .fold(Some(vec![get_state(0)]), |a, b| a.and_then(|a| build_gate(b, ctx).map(|b| a.combine(b)))), &RunVal::Func(ref fn_ctx, ref _pat, ref body, ref _ty) => eval_gate_body(body, fn_ctx), // TODO use type &RunVal::Gate(ref gate) => Some(gate.clone()), _ => None, } } pub fn iterate_val(val: RunVal) -> Option<Vec<RunVal>> { match val { RunVal::Index(i) =>
, RunVal::Tuple(vals) => Some(vals), _ => None, } } pub fn create_extract_gate_typed(cases: &Vec<Case>, min_input_size: usize, ctx: &Context) -> (Gate, Type) { fn reduce_type(output_type: Option<Type>, t: Type) -> Option<Type> { Some(match output_type { None => t, Some(ot) => if ot == t {t} else {Type::Any}, }) } let mut dims: Gate = vec![]; let mut output_type = None; for case in cases.iter() { match case { &Case::Exp(ref selector, ref result) => { let selector_state = build_state(eval_exp(selector, ctx)); let (result_state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); while dims.len() < selector_state.len() || dims.len() < min_input_size { dims.push(vec![]); } for (i, s) in selector_state.iter().enumerate() { let len = ::std::cmp::max(result_state.len(), dims[i].len()); // TODO improve impl dims[i] = result_state.clone().pad(len).into_iter() .zip(dims[i].clone().pad(len).into_iter()) .map(|(r, d)| r * s + d) .collect(); } output_type = reduce_type(output_type, result_type); }, &Case::Default(ref result) => { let (state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); for i in 0..dims.len() { use num::Zero; if dims[i].prob_sum().is_zero() { dims[i] = state.clone(); } } output_type = reduce_type(output_type, result_type); }, } } let max_len = dims.iter().map(Vec::len).max().unwrap_or(0); let gate: Gate = dims.into_iter().map(|s| s.pad(max_len)).collect(); // if!gate.is_unitary() { // panic!("Non-unitary extraction: {:?}", cases); // } (gate, output_type.unwrap_or(Type::Any)) }
{ Some((0..i).map(RunVal::Index).collect()) }
conditional_block
eval.rs
use error::*; use ast::*; use engine::*; use types::*; use eval_static::*; use std::fmt; use std::rc::Rc; use std::collections::HashMap; #[derive(Clone)] pub struct Macro(pub Ident, pub Rc<dyn Fn(&Exp, &Context) -> Ret<RunVal>>); impl fmt::Debug for Macro { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, ":macro: {}", self.0) } } impl PartialEq for Macro { fn eq(&self, other: &Self) -> bool { self.0 == other.0 } fn ne(&self, other: &Self) -> bool { self.0!= other.0 } } #[derive(Clone,Debug,PartialEq)] pub enum RunVal { Index(usize), String(String), Data(Rc<DataType>, usize), Tuple(Vec<RunVal>), Func(Rc<Context>, Pat, Exp, Type), Macro(Macro), State(State, Type), Gate(Gate), } impl fmt::Display for RunVal { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
} #[derive(Clone,Debug,PartialEq)] pub struct Context { path: String, vars: HashMap<Ident, RunVal>, types: TypeContext, } impl Context { pub fn new(path: String) -> Context { Context { path, vars: HashMap::new(), types: TypeContext::new(), } } pub fn path(&self) -> &String { &self.path } pub fn types(&self) -> &TypeContext { &self.types } pub fn create_child(&self) -> Context { self.clone() } pub fn find_var(&self, id: &Ident) -> Ret<RunVal> { unwrap_from_context("Variable", id, self.vars.get(id)) } pub fn add_var(&mut self, id: Ident, val: RunVal, ty: Type) -> Ret { self.vars.insert(id.clone(), val); self.types.add_var_type(id, ty) } pub fn find_type(&self, id: &Ident) -> Ret<Type> { self.types.find_type(id) } pub fn add_type(&mut self, id: String, ty: Type) -> Ret { self.types.add_type(id, ty) } pub fn add_datatype(&mut self, id: String, variants: Vec<Ident>) -> Ret { let rc = Rc::new(DataType {id: id.clone(), variants: variants.clone()}); for (i, variant) in variants.iter().enumerate() { self.add_var(variant.clone(), RunVal::Data(rc.clone(), i), Type::Data(rc.clone()))?; } self.add_type(id, Type::Data(rc)) } pub fn add_macro(&mut self, id: &str, handle: &'static dyn Fn(&Exp, &Context) -> Ret<RunVal>) -> Ret { self.add_var(id.to_string(), RunVal::Macro(Macro(id.to_string(), Rc::new(handle))), Type::Any /* TODO define macro types */) } pub fn import(&self, path: &str) -> Ret<Module> { use regex::Regex; use std::path::Path; use resource; use stdlib; use parser; let (ctx, file) = if Regex::new("^[a-z]+:").unwrap().is_match(path) {(self.create_child(), path.to_string())} else { let import_path = Path::new(&self.path()).join(&resource::with_ext(path, "fqy")); let mut import_dir = import_path.clone(); import_dir.pop(); let file = import_path.to_string_lossy().to_string(); let ctx = stdlib::create_ctx(&import_dir.to_string_lossy())?; (ctx, file) }; let exp = parser::parse_resource(&file)?; Ok(Module {path: file.to_string(), exp: exp, ctx: ctx}) } pub fn import_eval(&self, path: &str) -> Ret<RunVal> { let mut module = self.import(path)?; Ok(eval_exp_inline(&module.exp, &mut module.ctx)) } } #[derive(Clone,Debug,PartialEq)] pub struct Module { pub path: String, pub exp: Exp, pub ctx: Context, } pub fn eval_exp(exp: &Exp, ctx: &Context) -> RunVal { match exp { &Exp::Index(n) => RunVal::Index(n), &Exp::String(ref s) => RunVal::String(s.to_string()), &Exp::Var(ref id) => ctx.find_var(id).unwrap(), &Exp::Scope(ref decls, ref ret) => { let mut child = ctx.create_child(); for decl in decls { eval_decl(decl, &mut child).unwrap(); } eval_exp(ret, &child) }, &Exp::Expand(_) => panic!("No context for expansion"), &Exp::Tuple(ref args) => RunVal::Tuple(eval_exp_seq(args, ctx)), &Exp::Concat(ref args) => { //TODO adjacent gates if args.len() == 1 { if let Some(gate) = build_gate(&eval_exp(&args[0], ctx), ctx) { return RunVal::Gate(gate) } } let div = (args.len() as f32).sqrt(); let states = args.iter() .map(|e| build_state_typed(eval_exp(e, ctx))) .collect::<Ret<Vec<(State, Type)>>>().unwrap(); RunVal::State(states.iter() .flat_map(|(s, _)| s) .map(|n| n / div) .collect(), Type::Concat(states.into_iter() .map(|(_, t)| t) .collect())) }, &Exp::Cond(ref cond_exp, ref then_exp, ref else_exp) => { let val = eval_exp(cond_exp, ctx); if let Some(b) = build_bool(&val) { eval_exp(if b {then_exp} else {else_exp}, ctx) } else { // TODO: consider removing in favor of using extract gates for explicitness // let state = build_state(val); // if state.len() > 2 { // panic!("Conditional state cannot be {}-dimensional", state.len()) // } // RunVal::State(state.extract(vec![ // build_state(eval_exp(else_exp, ctx)), // build_state(eval_exp(then_exp, ctx)), // ]), Type::Any /* TODO determine from then/else types */) panic!("Non-boolean value: {}", val) } }, &Exp::Lambda(ref pat, ref body) => { let ty = infer_type(exp, ctx.types()).unwrap(); RunVal::Func(Rc::new(ctx.clone()), pat.clone(), (**body).clone(), ty) }, &Exp::Invoke(ref target, ref arg) => { match eval_exp(target, ctx) { // TODO proper tuple function evaluation RunVal::Func(fn_ctx_rc, pat, body, _ty) => { let mut fn_ctx = (*fn_ctx_rc).clone(); assign_pat(&pat, &eval_exp(arg, ctx), &mut fn_ctx).unwrap(); eval_exp(&body, &fn_ctx) }, RunVal::Macro(Macro(_, handle)) => handle(arg, ctx).unwrap(), RunVal::Gate(gate) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s.extract(gate), t) }, val => { let msg = &format!("Cannot invoke {}", val); let state = build_state(eval_exp(arg, ctx)); let gate = build_gate(&val, ctx).expect(msg); RunVal::State(state.extract(gate), Type::Any /* TODO infer output type from `target` */) }, } }, &Exp::Repeat(n, ref exp) => { let val = eval_exp(&exp, ctx); RunVal::Tuple((0..n).map(|_| val.clone()).collect()) }, &Exp::State(ref arg) => { let (s, t) = build_state_typed(eval_exp(arg, ctx)).unwrap(); RunVal::State(s, t) }, &Exp::Phase(phase, ref arg) => { let val = eval_exp(arg, ctx); build_gate(&val, ctx) .map(|g| RunVal::Gate(g.power(phase))) .unwrap_or_else(|| { let (s, t) = build_state_typed(val).unwrap(); RunVal::State(s.phase(phase), t) }) }, &Exp::Extract(ref arg, ref cases) => { let state = build_state(eval_exp(arg, ctx)); let (gate, gt) = create_extract_gate_typed(cases, state.len(), ctx); RunVal::State(state.extract(gate), gt) }, &Exp::Anno(ref exp, ref anno) => eval_type(anno, ctx.types()).unwrap().assign(eval_exp(exp, ctx)).unwrap(), } } pub fn eval_exp_inline(exp: &Exp, ctx: &mut Context) -> RunVal { match exp { Exp::Scope(ref decls, ref exp) => { for decl in decls { eval_decl(decl, ctx).unwrap(); } eval_exp(exp, ctx) }, _ => eval_exp(exp, ctx), } } pub fn eval_exp_seq(seq: &Vec<Exp>, ctx: &Context) -> Vec<RunVal> { seq.iter().flat_map(|e| { if let Exp::Expand(ref e) = e { let val = eval_exp(e, ctx); let err = Error(format!("Cannot expand value: {}", val)); iterate_val(val).ok_or(err).unwrap() } else {vec![eval_exp(e, ctx)]} }).collect() } pub fn eval_decl(decl: &Decl, ctx: &mut Context) -> Ret { match decl { &Decl::Let(ref pat, ref exp) => assign_pat(pat, &eval_exp(exp, ctx), ctx), &Decl::Type(ref id, ref pat) => { let ty = eval_type(pat, ctx.types())?; ctx.add_type(id.clone(), ty) }, &Decl::Data(ref id, ref variants) => ctx.add_datatype(id.clone(), variants.clone()), &Decl::Assert(ref expect, ref result) => { let a = eval_exp(expect, ctx); let b = eval_exp(result, ctx); let eq = match (&a, &b) { (&RunVal::State(ref a, _), &RunVal::State(ref b, _)) => { a.iter().zip(b).map(|(a, b)| { let abs = (a - b).norm(); abs * abs }).sum::<f32>() < 0.00001_f32 }, (a, b) => a == b, }; if!eq {err!("Assertion failed: {}!= {}", a, b)} else {Ok(())} }, &Decl::Print(ref exp) => Ok(println!(":: {}", eval_exp(exp, ctx))), &Decl::Do(ref exp) => { eval_exp(exp, ctx); Ok(()) }, } } // TODO combine logic with eval_static::assign_pat_type() pub fn assign_pat(pat: &Pat, val: &RunVal, ctx: &mut Context) -> Ret { match (pat, val) { (&Pat::Any, _) => Ok(()), (&Pat::Var(ref id), _) => ctx.add_var(id.clone(), val.clone(), get_val_type(val)), //TODO use val type (&Pat::Tuple(ref pats), &RunVal::Tuple(ref vals)) => { if pats.len()!= vals.len() {err!("Cannot deconstruct {} values from value: {}", pats.len(), val)} else { pats.iter().zip(vals) .map(|(pat, val)| assign_pat(pat, val, ctx)) .collect::<Ret<_>>() } }, (&Pat::Anno(ref pat, ref anno), _) => assign_pat(pat, &eval_type(&anno, ctx.types())?.assign(val.clone())?, ctx), _ => err!("{:?} cannot deconstruct `{}`", pat, val), } } pub fn get_val_type(val: &RunVal) -> Type { match val { &RunVal::Index(_) => Type::Any, &RunVal::String(_) => Type::Any, &RunVal::Data(ref dt, _) => Type::Data((*dt).clone()), &RunVal::Tuple(ref vals) => Type::Tuple(vals.iter().map(get_val_type).collect()), &RunVal::Func(_, _, _, ref ty) => ty.clone(), &RunVal::Macro(_) => Type::Any, // TODO &RunVal::State(_, ref ty) => ty.clone(), &RunVal::Gate(_) => Type::Any, // TODO } } pub fn build_bool(val: &RunVal) -> Option<bool> { match val { &RunVal::Index(n) => Some(n > 0), &RunVal::Data(ref _ty, n) => Some(n > 0), &RunVal::Tuple(ref vec) => Some(vec.len() > 0), _ => None, } } pub fn build_state(val: RunVal) -> State { build_state_typed(val).unwrap().0 } pub fn build_state_typed(val: RunVal) -> Ret<(State, Type)> { match val { RunVal::Index(n) => Ok((get_state(n), Type::Any)), RunVal::Data(dt, index) => Ok((get_state(index).pad(dt.variants.len()), Type::Data(dt))), RunVal::Tuple(vals) => { let states = vals.into_iter().map(|v| build_state_typed(v)).collect::<Ret<Vec<(State, Type)>>>()?; let ty = Type::Tuple(states.iter().map(|(_, t)| t.clone()).collect()); Ok((states.into_iter().fold(get_state(0), |a, (b, _)| State::combine(a, b)), ty)) }, RunVal::State(state, ty) => Ok((state, ty)), val => err!("Cannot build state from {}", val) } } pub fn eval_gate_body(exp: &Exp, ctx: &Context) -> Option<Gate> { match exp { &Exp::Extract(ref _arg, ref cases) => Some(create_extract_gate_typed(cases, 0, ctx).0), _ => None, } } pub fn build_gate(val: &RunVal, ctx: &Context) -> Option<Gate> { match val { &RunVal::Tuple(ref vals) => vals.iter() .fold(Some(vec![get_state(0)]), |a, b| a.and_then(|a| build_gate(b, ctx).map(|b| a.combine(b)))), &RunVal::Func(ref fn_ctx, ref _pat, ref body, ref _ty) => eval_gate_body(body, fn_ctx), // TODO use type &RunVal::Gate(ref gate) => Some(gate.clone()), _ => None, } } pub fn iterate_val(val: RunVal) -> Option<Vec<RunVal>> { match val { RunVal::Index(i) => { Some((0..i).map(RunVal::Index).collect()) }, RunVal::Tuple(vals) => Some(vals), _ => None, } } pub fn create_extract_gate_typed(cases: &Vec<Case>, min_input_size: usize, ctx: &Context) -> (Gate, Type) { fn reduce_type(output_type: Option<Type>, t: Type) -> Option<Type> { Some(match output_type { None => t, Some(ot) => if ot == t {t} else {Type::Any}, }) } let mut dims: Gate = vec![]; let mut output_type = None; for case in cases.iter() { match case { &Case::Exp(ref selector, ref result) => { let selector_state = build_state(eval_exp(selector, ctx)); let (result_state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); while dims.len() < selector_state.len() || dims.len() < min_input_size { dims.push(vec![]); } for (i, s) in selector_state.iter().enumerate() { let len = ::std::cmp::max(result_state.len(), dims[i].len()); // TODO improve impl dims[i] = result_state.clone().pad(len).into_iter() .zip(dims[i].clone().pad(len).into_iter()) .map(|(r, d)| r * s + d) .collect(); } output_type = reduce_type(output_type, result_type); }, &Case::Default(ref result) => { let (state, result_type) = build_state_typed(eval_exp(result, ctx)).unwrap(); for i in 0..dims.len() { use num::Zero; if dims[i].prob_sum().is_zero() { dims[i] = state.clone(); } } output_type = reduce_type(output_type, result_type); }, } } let max_len = dims.iter().map(Vec::len).max().unwrap_or(0); let gate: Gate = dims.into_iter().map(|s| s.pad(max_len)).collect(); // if!gate.is_unitary() { // panic!("Non-unitary extraction: {:?}", cases); // } (gate, output_type.unwrap_or(Type::Any)) }
{ match self { &RunVal::Index(ref n) => write!(f, "{}", n), &RunVal::String(ref s) => write!(f, "{:?}", s), &RunVal::Data(ref dt, ref index) => write!(f, "{}", dt.variants[*index]), &RunVal::Tuple(ref vals) => write!(f, "({})", vals.iter().map(|val| format!("{}", val)).collect::<Vec<_>>().join(", ")), &RunVal::Func(ref _ctx, ref _pat, ref _body, ref ty) => write!(f, "fn{}", ty), &RunVal::Macro(ref mc) => write!(f, "{:?}", mc), &RunVal::State(ref state, ref ty) => if ty != &Type::Any { write!(f, "{}: {}", StateView(state), ty) } else { write!(f, "{}", StateView(state)) }, &RunVal::Gate(ref gate) => write!(f, "[{}]", gate.iter().map(|state| format!("{}", StateView(state))).collect::<Vec<_>>().join(", ")), } }
identifier_body
avx.rs
/* * Copyright (c) 2023. * * This software is free software; * * You can redistribute it or modify it under terms of the MIT, Apache License or Zlib license */ //! AVX color conversion routines //! //! Okay these codes are cool //! //! Herein lies super optimized codes to do color conversions. //! //! //! 1. The YCbCr to RGB use integer approximations and not the floating point equivalent. //! That means we may be +- 2 of pixels generated by libjpeg-turbo jpeg decoding //! (also libjpeg uses routines like `Y = 0.29900 * R + 0.33700 * G + 0.11400 * B + 0.25000 * G`) //! //! Firstly, we use integers (fun fact:there is no part of this code base where were dealing with //! floating points.., fun fact: the first fun fact wasn't even fun.) //! //! Secondly,we have cool clamping code, especially for rgba, where we don't need clamping and we //! spend our time cursing that Intel decided permute instructions to work like 2 128 bit vectors(the compiler opitmizes //! it out to something cool). //! //! There isn't a lot here (not as fun as bitstream ) but I hope you find what you're looking for. //! //! O and ~~subscribe to my youtube channel~~ #![cfg(any(target_arch = "x86", target_arch = "x86_64"))] #![cfg(feature = "x86")] #![allow( clippy::wildcard_imports, clippy::cast_possible_truncation, clippy::too_many_arguments, clippy::inline_always, clippy::doc_markdown, dead_code )] #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; pub union YmmRegister { // both are 32 when using std::mem::size_of mm256: __m256i, // for avx color conversion array: [i16; 16] } //-------------------------------------------------------------------------------------------------- // AVX conversion routines //-------------------------------------------------------------------------------------------------- /// /// Convert YCBCR to RGB using AVX instructions /// /// # Note ///**IT IS THE RESPONSIBILITY OF THE CALLER TO CALL THIS IN CPUS SUPPORTING /// AVX2 OTHERWISE THIS IS UB** /// /// *Peace* /// /// This library itself will ensure that it's never called in CPU's not /// supporting AVX2 /// /// # Arguments /// - `y`,`cb`,`cr`: A reference of 8 i32's /// - `out`: The output array where we store our converted items /// - `offset`: The position from 0 where we write these RGB values #[inline(always)] pub fn ycbcr_to_rgb_avx2( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { // call this in another function to tell RUST to vectorize this // storing unsafe { ycbcr_to_rgb_avx2_1(y, cb, cr, out, offset); } } #[inline] #[target_feature(enable = "avx2")] #[target_feature(enable = "avx")] unsafe fn ycbcr_to_rgb_avx2_1( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { // Load output buffer let tmp: &mut [u8; 48] = out .get_mut(*offset..*offset + 48) .expect("Slice to small cannot write") .try_into() .unwrap(); let (r, g, b) = ycbcr_to_rgb_baseline(y, cb, cr); let mut j = 0; let mut i = 0; while i < 48 { tmp[i] = r.array[j] as u8; tmp[i + 1] = g.array[j] as u8; tmp[i + 2] = b.array[j] as u8; i += 3; j += 1; } *offset += 48; } /// Baseline implementation of YCBCR to RGB for avx, /// /// It uses integer operations as opposed to floats, the approximation is /// difficult for the eye to see, but this means that it may produce different /// values with libjpeg_turbo. if accuracy is of utmost importance, use that. /// /// this function should be called for most implementations, including /// - ycbcr->rgb /// - ycbcr->rgba /// - ycbcr->brga /// - ycbcr->rgbx #[inline] #[target_feature(enable = "avx2")] #[target_feature(enable = "avx")] unsafe fn ycbcr_to_rgb_baseline( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16] ) -> (YmmRegister, YmmRegister, YmmRegister) { // Load values into a register // // dst[127:0] := MEM[loaddr+127:loaddr] // dst[255:128] := MEM[hiaddr+127:hiaddr] let y_c = _mm256_loadu_si256(y.as_ptr().cast()); let cb_c = _mm256_loadu_si256(cb.as_ptr().cast()); let cr_c = _mm256_loadu_si256(cr.as_ptr().cast()); // AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb // Cb = Cb-128; let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128)); // cr = Cb -128; let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128)); // Calculate Y->R // r = Y + 45 * Cr / 32 // 45*cr let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r); // r1>>5 let r2 = _mm256_srai_epi16::<5>(r1); //y+r2 let r = YmmRegister { mm256: clamp_avx(_mm256_add_epi16(y_c, r2)) }; // g = Y - (11 * Cb + 23 * Cr) / 32 ; // 11*cb let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r); // 23*cr let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r); //(11 //(11 * Cb + 23 * Cr) let g3 = _mm256_add_epi16(g1, g2); // (11 * Cb + 23 * Cr) / 32 let g4 = _mm256_srai_epi16::<5>(g3); // Y - (11 * Cb + 23 * Cr) / 32 ; let g = YmmRegister { mm256: clamp_avx(_mm256_sub_epi16(y_c, g4)) }; // b = Y + 113 * Cb / 64 // 113 * cb let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r); //113 * Cb / 64 let b2 = _mm256_srai_epi16::<6>(b1); // b = Y + 113 * Cb / 64 ; let b = YmmRegister { mm256: clamp_avx(_mm256_add_epi16(b2, y_c)) }; return (r, g, b); } #[inline] #[target_feature(enable = "avx2")] /// A baseline implementation of YCbCr to RGB conversion which does not carry /// out clamping /// /// This is used by the `ycbcr_to_rgba_avx` and `ycbcr_to_rgbx` conversion /// routines unsafe fn ycbcr_to_rgb_baseline_no_clamp( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16] ) -> (__m256i, __m256i, __m256i) { // Load values into a register // let y_c = _mm256_loadu_si256(y.as_ptr().cast()); let cb_c = _mm256_loadu_si256(cb.as_ptr().cast()); let cr_c = _mm256_loadu_si256(cr.as_ptr().cast()); // AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb // Cb = Cb-128; let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128)); // cr = Cb -128; let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128)); // Calculate Y->R // r = Y + 45 * Cr / 32 // 45*cr let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r); // r1>>5 let r2 = _mm256_srai_epi16::<5>(r1); //y+r2 let r = _mm256_add_epi16(y_c, r2); // g = Y - (11 * Cb + 23 * Cr) / 32 ; // 11*cb let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r); // 23*cr let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r); //(11 //(11 * Cb + 23 * Cr) let g3 = _mm256_add_epi16(g1, g2); // (11 * Cb + 23 * Cr) / 32 let g4 = _mm256_srai_epi16::<5>(g3); // Y - (11 * Cb + 23 * Cr) / 32 ; let g = _mm256_sub_epi16(y_c, g4); // b = Y + 113 * Cb / 64 // 113 * cb let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r); //113 * Cb / 64 let b2 = _mm256_srai_epi16::<6>(b1); // b = Y + 113 * Cb / 64 ; let b = _mm256_add_epi16(b2, y_c); return (r, g, b); } #[inline(always)] pub fn ycbcr_to_rgba_avx2( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { unsafe { ycbcr_to_rgba_unsafe(y, cb, cr, out, offset); } } #[inline] #[target_feature(enable = "avx2")] #[rustfmt::skip] unsafe fn ycbcr_to_rgba_unsafe( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize, ) { // check if we have enough space to write. let tmp:& mut [u8; 64] = out.get_mut(*offset..*offset + 64).expect("Slice to small cannot write").try_into().unwrap(); let (r, g, b) = ycbcr_to_rgb_baseline_no_clamp(y, cb, cr); // set alpha channel to 255 for opaque // And no these comments were not from me pressing the keyboard // Pack the integers into u8's using signed saturation. let c = _mm256_packus_epi16(r, g); //aaaaa_bbbbb_aaaaa_bbbbbb let d = _mm256_packus_epi16(b, _mm256_set1_epi16(255)); // cccccc_dddddd_ccccccc_ddddd // transpose_u16 and interleave channels let e = _mm256_unpacklo_epi8(c, d); //ab_ab_ab_ab_ab_ab_ab_ab let f = _mm256_unpackhi_epi8(c, d); //cd_cd_cd_cd_cd_cd_cd_cd // final transpose_u16 let g = _mm256_unpacklo_epi8(e, f); //abcd_abcd_abcd_abcd_abcd let h = _mm256_unpackhi_epi8(e, f); // undo packus shuffling... let i = _mm256_permute2x128_si256::<{ shuffle(3, 2, 1, 0) }>(g, h); let j = _mm256_permute2x128_si256::<{ shuffle(1, 2, 3, 0) }>(g, h); let k = _mm256_permute2x128_si256::<{ shuffle(3, 2, 0, 1) }>(g, h); let l = _mm256_permute2x128_si256::<{ shuffle(0, 3, 2, 1) }>(g, h); let m = _mm256_blend_epi32::<0b1111_0000>(i, j); let n = _mm256_blend_epi32::<0b1111_0000>(k, l); // Store // Use streaming instructions to prevent polluting the cache? _mm256_storeu_si256(tmp.as_mut_ptr().cast(), m); _mm256_storeu_si256(tmp[32..].as_mut_ptr().cast(), n); *offset += 64; } /// Clamp values between 0 and 255 /// /// This function clamps all values in `reg` to be between 0 and 255 ///( the accepted values for RGB) #[inline] #[target_feature(enable = "avx2")] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] unsafe fn clamp_avx(reg: __m256i) -> __m256i { // the lowest value let min_s = _mm256_set1_epi16(0); // Highest value let max_s = _mm256_set1_epi16(255);
return min_v; } #[inline] const fn shuffle(z: i32, y: i32, x: i32, w: i32) -> i32 { (z << 6) | (y << 4) | (x << 2) | w }
let max_v = _mm256_max_epi16(reg, min_s); //max(a,0) let min_v = _mm256_min_epi16(max_v, max_s); //min(max(a,0),255)
random_line_split
avx.rs
/* * Copyright (c) 2023. * * This software is free software; * * You can redistribute it or modify it under terms of the MIT, Apache License or Zlib license */ //! AVX color conversion routines //! //! Okay these codes are cool //! //! Herein lies super optimized codes to do color conversions. //! //! //! 1. The YCbCr to RGB use integer approximations and not the floating point equivalent. //! That means we may be +- 2 of pixels generated by libjpeg-turbo jpeg decoding //! (also libjpeg uses routines like `Y = 0.29900 * R + 0.33700 * G + 0.11400 * B + 0.25000 * G`) //! //! Firstly, we use integers (fun fact:there is no part of this code base where were dealing with //! floating points.., fun fact: the first fun fact wasn't even fun.) //! //! Secondly,we have cool clamping code, especially for rgba, where we don't need clamping and we //! spend our time cursing that Intel decided permute instructions to work like 2 128 bit vectors(the compiler opitmizes //! it out to something cool). //! //! There isn't a lot here (not as fun as bitstream ) but I hope you find what you're looking for. //! //! O and ~~subscribe to my youtube channel~~ #![cfg(any(target_arch = "x86", target_arch = "x86_64"))] #![cfg(feature = "x86")] #![allow( clippy::wildcard_imports, clippy::cast_possible_truncation, clippy::too_many_arguments, clippy::inline_always, clippy::doc_markdown, dead_code )] #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; pub union YmmRegister { // both are 32 when using std::mem::size_of mm256: __m256i, // for avx color conversion array: [i16; 16] } //-------------------------------------------------------------------------------------------------- // AVX conversion routines //-------------------------------------------------------------------------------------------------- /// /// Convert YCBCR to RGB using AVX instructions /// /// # Note ///**IT IS THE RESPONSIBILITY OF THE CALLER TO CALL THIS IN CPUS SUPPORTING /// AVX2 OTHERWISE THIS IS UB** /// /// *Peace* /// /// This library itself will ensure that it's never called in CPU's not /// supporting AVX2 /// /// # Arguments /// - `y`,`cb`,`cr`: A reference of 8 i32's /// - `out`: The output array where we store our converted items /// - `offset`: The position from 0 where we write these RGB values #[inline(always)] pub fn ycbcr_to_rgb_avx2( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { // call this in another function to tell RUST to vectorize this // storing unsafe { ycbcr_to_rgb_avx2_1(y, cb, cr, out, offset); } } #[inline] #[target_feature(enable = "avx2")] #[target_feature(enable = "avx")] unsafe fn ycbcr_to_rgb_avx2_1( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { // Load output buffer let tmp: &mut [u8; 48] = out .get_mut(*offset..*offset + 48) .expect("Slice to small cannot write") .try_into() .unwrap(); let (r, g, b) = ycbcr_to_rgb_baseline(y, cb, cr); let mut j = 0; let mut i = 0; while i < 48 { tmp[i] = r.array[j] as u8; tmp[i + 1] = g.array[j] as u8; tmp[i + 2] = b.array[j] as u8; i += 3; j += 1; } *offset += 48; } /// Baseline implementation of YCBCR to RGB for avx, /// /// It uses integer operations as opposed to floats, the approximation is /// difficult for the eye to see, but this means that it may produce different /// values with libjpeg_turbo. if accuracy is of utmost importance, use that. /// /// this function should be called for most implementations, including /// - ycbcr->rgb /// - ycbcr->rgba /// - ycbcr->brga /// - ycbcr->rgbx #[inline] #[target_feature(enable = "avx2")] #[target_feature(enable = "avx")] unsafe fn ycbcr_to_rgb_baseline( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16] ) -> (YmmRegister, YmmRegister, YmmRegister) { // Load values into a register // // dst[127:0] := MEM[loaddr+127:loaddr] // dst[255:128] := MEM[hiaddr+127:hiaddr] let y_c = _mm256_loadu_si256(y.as_ptr().cast()); let cb_c = _mm256_loadu_si256(cb.as_ptr().cast()); let cr_c = _mm256_loadu_si256(cr.as_ptr().cast()); // AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb // Cb = Cb-128; let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128)); // cr = Cb -128; let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128)); // Calculate Y->R // r = Y + 45 * Cr / 32 // 45*cr let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r); // r1>>5 let r2 = _mm256_srai_epi16::<5>(r1); //y+r2 let r = YmmRegister { mm256: clamp_avx(_mm256_add_epi16(y_c, r2)) }; // g = Y - (11 * Cb + 23 * Cr) / 32 ; // 11*cb let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r); // 23*cr let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r); //(11 //(11 * Cb + 23 * Cr) let g3 = _mm256_add_epi16(g1, g2); // (11 * Cb + 23 * Cr) / 32 let g4 = _mm256_srai_epi16::<5>(g3); // Y - (11 * Cb + 23 * Cr) / 32 ; let g = YmmRegister { mm256: clamp_avx(_mm256_sub_epi16(y_c, g4)) }; // b = Y + 113 * Cb / 64 // 113 * cb let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r); //113 * Cb / 64 let b2 = _mm256_srai_epi16::<6>(b1); // b = Y + 113 * Cb / 64 ; let b = YmmRegister { mm256: clamp_avx(_mm256_add_epi16(b2, y_c)) }; return (r, g, b); } #[inline] #[target_feature(enable = "avx2")] /// A baseline implementation of YCbCr to RGB conversion which does not carry /// out clamping /// /// This is used by the `ycbcr_to_rgba_avx` and `ycbcr_to_rgbx` conversion /// routines unsafe fn ycbcr_to_rgb_baseline_no_clamp( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16] ) -> (__m256i, __m256i, __m256i) { // Load values into a register // let y_c = _mm256_loadu_si256(y.as_ptr().cast()); let cb_c = _mm256_loadu_si256(cb.as_ptr().cast()); let cr_c = _mm256_loadu_si256(cr.as_ptr().cast()); // AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb // Cb = Cb-128; let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128)); // cr = Cb -128; let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128)); // Calculate Y->R // r = Y + 45 * Cr / 32 // 45*cr let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r); // r1>>5 let r2 = _mm256_srai_epi16::<5>(r1); //y+r2 let r = _mm256_add_epi16(y_c, r2); // g = Y - (11 * Cb + 23 * Cr) / 32 ; // 11*cb let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r); // 23*cr let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r); //(11 //(11 * Cb + 23 * Cr) let g3 = _mm256_add_epi16(g1, g2); // (11 * Cb + 23 * Cr) / 32 let g4 = _mm256_srai_epi16::<5>(g3); // Y - (11 * Cb + 23 * Cr) / 32 ; let g = _mm256_sub_epi16(y_c, g4); // b = Y + 113 * Cb / 64 // 113 * cb let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r); //113 * Cb / 64 let b2 = _mm256_srai_epi16::<6>(b1); // b = Y + 113 * Cb / 64 ; let b = _mm256_add_epi16(b2, y_c); return (r, g, b); } #[inline(always)] pub fn ycbcr_to_rgba_avx2( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize )
#[inline] #[target_feature(enable = "avx2")] #[rustfmt::skip] unsafe fn ycbcr_to_rgba_unsafe( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize, ) { // check if we have enough space to write. let tmp:& mut [u8; 64] = out.get_mut(*offset..*offset + 64).expect("Slice to small cannot write").try_into().unwrap(); let (r, g, b) = ycbcr_to_rgb_baseline_no_clamp(y, cb, cr); // set alpha channel to 255 for opaque // And no these comments were not from me pressing the keyboard // Pack the integers into u8's using signed saturation. let c = _mm256_packus_epi16(r, g); //aaaaa_bbbbb_aaaaa_bbbbbb let d = _mm256_packus_epi16(b, _mm256_set1_epi16(255)); // cccccc_dddddd_ccccccc_ddddd // transpose_u16 and interleave channels let e = _mm256_unpacklo_epi8(c, d); //ab_ab_ab_ab_ab_ab_ab_ab let f = _mm256_unpackhi_epi8(c, d); //cd_cd_cd_cd_cd_cd_cd_cd // final transpose_u16 let g = _mm256_unpacklo_epi8(e, f); //abcd_abcd_abcd_abcd_abcd let h = _mm256_unpackhi_epi8(e, f); // undo packus shuffling... let i = _mm256_permute2x128_si256::<{ shuffle(3, 2, 1, 0) }>(g, h); let j = _mm256_permute2x128_si256::<{ shuffle(1, 2, 3, 0) }>(g, h); let k = _mm256_permute2x128_si256::<{ shuffle(3, 2, 0, 1) }>(g, h); let l = _mm256_permute2x128_si256::<{ shuffle(0, 3, 2, 1) }>(g, h); let m = _mm256_blend_epi32::<0b1111_0000>(i, j); let n = _mm256_blend_epi32::<0b1111_0000>(k, l); // Store // Use streaming instructions to prevent polluting the cache? _mm256_storeu_si256(tmp.as_mut_ptr().cast(), m); _mm256_storeu_si256(tmp[32..].as_mut_ptr().cast(), n); *offset += 64; } /// Clamp values between 0 and 255 /// /// This function clamps all values in `reg` to be between 0 and 255 ///( the accepted values for RGB) #[inline] #[target_feature(enable = "avx2")] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] unsafe fn clamp_avx(reg: __m256i) -> __m256i { // the lowest value let min_s = _mm256_set1_epi16(0); // Highest value let max_s = _mm256_set1_epi16(255); let max_v = _mm256_max_epi16(reg, min_s); //max(a,0) let min_v = _mm256_min_epi16(max_v, max_s); //min(max(a,0),255) return min_v; } #[inline] const fn shuffle(z: i32, y: i32, x: i32, w: i32) -> i32 { (z << 6) | (y << 4) | (x << 2) | w }
{ unsafe { ycbcr_to_rgba_unsafe(y, cb, cr, out, offset); } }
identifier_body
avx.rs
/* * Copyright (c) 2023. * * This software is free software; * * You can redistribute it or modify it under terms of the MIT, Apache License or Zlib license */ //! AVX color conversion routines //! //! Okay these codes are cool //! //! Herein lies super optimized codes to do color conversions. //! //! //! 1. The YCbCr to RGB use integer approximations and not the floating point equivalent. //! That means we may be +- 2 of pixels generated by libjpeg-turbo jpeg decoding //! (also libjpeg uses routines like `Y = 0.29900 * R + 0.33700 * G + 0.11400 * B + 0.25000 * G`) //! //! Firstly, we use integers (fun fact:there is no part of this code base where were dealing with //! floating points.., fun fact: the first fun fact wasn't even fun.) //! //! Secondly,we have cool clamping code, especially for rgba, where we don't need clamping and we //! spend our time cursing that Intel decided permute instructions to work like 2 128 bit vectors(the compiler opitmizes //! it out to something cool). //! //! There isn't a lot here (not as fun as bitstream ) but I hope you find what you're looking for. //! //! O and ~~subscribe to my youtube channel~~ #![cfg(any(target_arch = "x86", target_arch = "x86_64"))] #![cfg(feature = "x86")] #![allow( clippy::wildcard_imports, clippy::cast_possible_truncation, clippy::too_many_arguments, clippy::inline_always, clippy::doc_markdown, dead_code )] #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; pub union YmmRegister { // both are 32 when using std::mem::size_of mm256: __m256i, // for avx color conversion array: [i16; 16] } //-------------------------------------------------------------------------------------------------- // AVX conversion routines //-------------------------------------------------------------------------------------------------- /// /// Convert YCBCR to RGB using AVX instructions /// /// # Note ///**IT IS THE RESPONSIBILITY OF THE CALLER TO CALL THIS IN CPUS SUPPORTING /// AVX2 OTHERWISE THIS IS UB** /// /// *Peace* /// /// This library itself will ensure that it's never called in CPU's not /// supporting AVX2 /// /// # Arguments /// - `y`,`cb`,`cr`: A reference of 8 i32's /// - `out`: The output array where we store our converted items /// - `offset`: The position from 0 where we write these RGB values #[inline(always)] pub fn ycbcr_to_rgb_avx2( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { // call this in another function to tell RUST to vectorize this // storing unsafe { ycbcr_to_rgb_avx2_1(y, cb, cr, out, offset); } } #[inline] #[target_feature(enable = "avx2")] #[target_feature(enable = "avx")] unsafe fn ycbcr_to_rgb_avx2_1( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { // Load output buffer let tmp: &mut [u8; 48] = out .get_mut(*offset..*offset + 48) .expect("Slice to small cannot write") .try_into() .unwrap(); let (r, g, b) = ycbcr_to_rgb_baseline(y, cb, cr); let mut j = 0; let mut i = 0; while i < 48 { tmp[i] = r.array[j] as u8; tmp[i + 1] = g.array[j] as u8; tmp[i + 2] = b.array[j] as u8; i += 3; j += 1; } *offset += 48; } /// Baseline implementation of YCBCR to RGB for avx, /// /// It uses integer operations as opposed to floats, the approximation is /// difficult for the eye to see, but this means that it may produce different /// values with libjpeg_turbo. if accuracy is of utmost importance, use that. /// /// this function should be called for most implementations, including /// - ycbcr->rgb /// - ycbcr->rgba /// - ycbcr->brga /// - ycbcr->rgbx #[inline] #[target_feature(enable = "avx2")] #[target_feature(enable = "avx")] unsafe fn ycbcr_to_rgb_baseline( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16] ) -> (YmmRegister, YmmRegister, YmmRegister) { // Load values into a register // // dst[127:0] := MEM[loaddr+127:loaddr] // dst[255:128] := MEM[hiaddr+127:hiaddr] let y_c = _mm256_loadu_si256(y.as_ptr().cast()); let cb_c = _mm256_loadu_si256(cb.as_ptr().cast()); let cr_c = _mm256_loadu_si256(cr.as_ptr().cast()); // AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb // Cb = Cb-128; let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128)); // cr = Cb -128; let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128)); // Calculate Y->R // r = Y + 45 * Cr / 32 // 45*cr let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r); // r1>>5 let r2 = _mm256_srai_epi16::<5>(r1); //y+r2 let r = YmmRegister { mm256: clamp_avx(_mm256_add_epi16(y_c, r2)) }; // g = Y - (11 * Cb + 23 * Cr) / 32 ; // 11*cb let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r); // 23*cr let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r); //(11 //(11 * Cb + 23 * Cr) let g3 = _mm256_add_epi16(g1, g2); // (11 * Cb + 23 * Cr) / 32 let g4 = _mm256_srai_epi16::<5>(g3); // Y - (11 * Cb + 23 * Cr) / 32 ; let g = YmmRegister { mm256: clamp_avx(_mm256_sub_epi16(y_c, g4)) }; // b = Y + 113 * Cb / 64 // 113 * cb let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r); //113 * Cb / 64 let b2 = _mm256_srai_epi16::<6>(b1); // b = Y + 113 * Cb / 64 ; let b = YmmRegister { mm256: clamp_avx(_mm256_add_epi16(b2, y_c)) }; return (r, g, b); } #[inline] #[target_feature(enable = "avx2")] /// A baseline implementation of YCbCr to RGB conversion which does not carry /// out clamping /// /// This is used by the `ycbcr_to_rgba_avx` and `ycbcr_to_rgbx` conversion /// routines unsafe fn ycbcr_to_rgb_baseline_no_clamp( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16] ) -> (__m256i, __m256i, __m256i) { // Load values into a register // let y_c = _mm256_loadu_si256(y.as_ptr().cast()); let cb_c = _mm256_loadu_si256(cb.as_ptr().cast()); let cr_c = _mm256_loadu_si256(cr.as_ptr().cast()); // AVX version of integer version in https://stackoverflow.com/questions/4041840/function-to-convert-ycbcr-to-rgb // Cb = Cb-128; let cb_r = _mm256_sub_epi16(cb_c, _mm256_set1_epi16(128)); // cr = Cb -128; let cr_r = _mm256_sub_epi16(cr_c, _mm256_set1_epi16(128)); // Calculate Y->R // r = Y + 45 * Cr / 32 // 45*cr let r1 = _mm256_mullo_epi16(_mm256_set1_epi16(45), cr_r); // r1>>5 let r2 = _mm256_srai_epi16::<5>(r1); //y+r2 let r = _mm256_add_epi16(y_c, r2); // g = Y - (11 * Cb + 23 * Cr) / 32 ; // 11*cb let g1 = _mm256_mullo_epi16(_mm256_set1_epi16(11), cb_r); // 23*cr let g2 = _mm256_mullo_epi16(_mm256_set1_epi16(23), cr_r); //(11 //(11 * Cb + 23 * Cr) let g3 = _mm256_add_epi16(g1, g2); // (11 * Cb + 23 * Cr) / 32 let g4 = _mm256_srai_epi16::<5>(g3); // Y - (11 * Cb + 23 * Cr) / 32 ; let g = _mm256_sub_epi16(y_c, g4); // b = Y + 113 * Cb / 64 // 113 * cb let b1 = _mm256_mullo_epi16(_mm256_set1_epi16(113), cb_r); //113 * Cb / 64 let b2 = _mm256_srai_epi16::<6>(b1); // b = Y + 113 * Cb / 64 ; let b = _mm256_add_epi16(b2, y_c); return (r, g, b); } #[inline(always)] pub fn ycbcr_to_rgba_avx2( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize ) { unsafe { ycbcr_to_rgba_unsafe(y, cb, cr, out, offset); } } #[inline] #[target_feature(enable = "avx2")] #[rustfmt::skip] unsafe fn
( y: &[i16; 16], cb: &[i16; 16], cr: &[i16; 16], out: &mut [u8], offset: &mut usize, ) { // check if we have enough space to write. let tmp:& mut [u8; 64] = out.get_mut(*offset..*offset + 64).expect("Slice to small cannot write").try_into().unwrap(); let (r, g, b) = ycbcr_to_rgb_baseline_no_clamp(y, cb, cr); // set alpha channel to 255 for opaque // And no these comments were not from me pressing the keyboard // Pack the integers into u8's using signed saturation. let c = _mm256_packus_epi16(r, g); //aaaaa_bbbbb_aaaaa_bbbbbb let d = _mm256_packus_epi16(b, _mm256_set1_epi16(255)); // cccccc_dddddd_ccccccc_ddddd // transpose_u16 and interleave channels let e = _mm256_unpacklo_epi8(c, d); //ab_ab_ab_ab_ab_ab_ab_ab let f = _mm256_unpackhi_epi8(c, d); //cd_cd_cd_cd_cd_cd_cd_cd // final transpose_u16 let g = _mm256_unpacklo_epi8(e, f); //abcd_abcd_abcd_abcd_abcd let h = _mm256_unpackhi_epi8(e, f); // undo packus shuffling... let i = _mm256_permute2x128_si256::<{ shuffle(3, 2, 1, 0) }>(g, h); let j = _mm256_permute2x128_si256::<{ shuffle(1, 2, 3, 0) }>(g, h); let k = _mm256_permute2x128_si256::<{ shuffle(3, 2, 0, 1) }>(g, h); let l = _mm256_permute2x128_si256::<{ shuffle(0, 3, 2, 1) }>(g, h); let m = _mm256_blend_epi32::<0b1111_0000>(i, j); let n = _mm256_blend_epi32::<0b1111_0000>(k, l); // Store // Use streaming instructions to prevent polluting the cache? _mm256_storeu_si256(tmp.as_mut_ptr().cast(), m); _mm256_storeu_si256(tmp[32..].as_mut_ptr().cast(), n); *offset += 64; } /// Clamp values between 0 and 255 /// /// This function clamps all values in `reg` to be between 0 and 255 ///( the accepted values for RGB) #[inline] #[target_feature(enable = "avx2")] #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] unsafe fn clamp_avx(reg: __m256i) -> __m256i { // the lowest value let min_s = _mm256_set1_epi16(0); // Highest value let max_s = _mm256_set1_epi16(255); let max_v = _mm256_max_epi16(reg, min_s); //max(a,0) let min_v = _mm256_min_epi16(max_v, max_s); //min(max(a,0),255) return min_v; } #[inline] const fn shuffle(z: i32, y: i32, x: i32, w: i32) -> i32 { (z << 6) | (y << 4) | (x << 2) | w }
ycbcr_to_rgba_unsafe
identifier_name
day_06.rs
/// --- Day 6: Chronal Coordinates --- /// /// The device on your wrist beeps several times, and once again you feel like /// you're falling. /// /// "Situation critical," the device announces. "Destination indeterminate. /// Chronal interference detected. Please specify new target coordinates." /// /// The device then produces a list of coordinates (your puzzle input). Are they /// places it thinks are safe or dangerous? It recommends you check manual page /// 729. The Elves did not give you a manual. /// /// If they're dangerous, maybe you can minimize the danger by finding the /// coordinate that gives the largest distance from the other points. /// /// Using only the Manhattan distance, determine the area around each coordinate /// by counting the number of integer X,Y locations that are closest to that /// coordinate (and aren't tied in distance to any other coordinate). /// /// Your goal is to find the size of the largest area that isn't infinite. For /// example, consider the following list of coordinates: /// /// 1, 1 /// 1, 6 /// 8, 3 /// 3, 4 /// 5, 5 /// 8, 9 /// /// If we name these coordinates A through F, we can draw them on a grid, /// putting 0,0 at the top left: /// ///.......... ///.A........ ///.......... ///........C. ///...D...... ///.....E.... ///.B........ ///.......... ///.......... ///........F. /// /// This view is partial - the actual grid extends infinitely in all directions. /// Using the Manhattan distance, each location's closest coordinate can be /// determined, shown here in lowercase: /// /// aaaaa.cccc /// aAaaa.cccc /// aaaddecccc /// aadddeccCc ///..dDdeeccc /// bb.deEeecc /// bBb.eeee.. /// bbb.eeefff /// bbb.eeffff /// bbb.ffffFf /// /// Locations shown as. are equally far from two or more coordinates, and so /// they don't count as being closest to any. /// /// In this example, the areas of coordinates A, B, C, and F are infinite - /// while not shown here, their areas extend forever outside the visible grid. /// However, the areas of coordinates D and E are finite: D is closest to 9 /// locations, and E is closest to 17 (both including the coordinate's location /// itself). Therefore, in this example, the size of the largest area is 17. /// /// What is the size of the largest area that isn't infinite? /// /// --- Part Two --- /// /// On the other hand, if the coordinates are safe, maybe the best you can do is /// try to find a region near as many coordinates as possible. /// /// For example, suppose you want the sum of the Manhattan distance to all of /// the coordinates to be less than 32. For each location, add up the distances /// to all of the given coordinates; if the total of those distances is less /// than 32, that location is within the desired region. Using the same /// coordinates as above, the resulting region looks like this: /// ///.......... ///.A........ ///.......... ///...###..C. ///..#D###... ///..###E#... ///.B.###.... ///.......... ///.......... ///........F. /// /// In particular, consider the highlighted location 4,3 located at the top /// middle of the region. Its calculation is as follows, where abs() is the /// absolute value function: /// /// Distance to coordinate A: abs(4-1) + abs(3-1) = 5 /// Distance to coordinate B: abs(4-1) + abs(3-6) = 6 /// Distance to coordinate C: abs(4-8) + abs(3-3) = 4 /// Distance to coordinate D: abs(4-3) + abs(3-4) = 2 /// Distance to coordinate E: abs(4-5) + abs(3-5) = 3 /// Distance to coordinate F: abs(4-8) + abs(3-9) = 10 /// Total distance: 5 + 6 + 4 + 2 + 3 + 10 = 30 /// /// Because the total distance to all coordinates (30) is less than 32, the /// location is within the region. /// /// This region, which also includes coordinates D and E, has a total size of /// 16. /// /// Your actual region will need to be much larger than this example, though, /// instead including all locations with a total distance of less than 10000. /// /// What is the size of the region containing all locations which have a total /// distance to all given coordinates of less than 10000? use regex::Regex; use std::collections::{HashMap, HashSet}; use std::cmp::Ordering; use std::i32; type Point = (i32, i32); type Grid = HashMap<Point, usize>; #[derive(Debug, PartialEq, Eq)] struct Range { min: i32, max: i32, } #[derive(Debug, PartialEq, Eq)] struct Bounds { x: Range, y: Range, } pub fn run()
.unwrap(); println!("The biggest non-infinite area size is: {}", biggest_area_size); let concentrated_area = count_points_below(&points, &bounds, 10_000); println!("The size of the area that have a total distance less than \ 10.000 is: {}", concentrated_area); } fn create_grid(points: &Vec<Point>, bounds: &Bounds) -> Grid { let mut grid = HashMap::new(); for x in bounds.x.min..=bounds.x.max { for y in bounds.y.min..=bounds.y.max { let point = (x, y); match closest_point(&point, points) { Some(area_number) => grid.insert(point, area_number), None => None, }; } } grid } fn count_points_below(points: &Vec<Point>, bounds: &Bounds, treshold: i32) -> i32 { let mut count = 0; for x in bounds.x.min..=bounds.x.max { for y in bounds.y.min..=bounds.y.max { let point = (x, y); if total_distance(&point, points) < treshold { count += 1; }; } } count } fn create_bounds(points: &Vec<Point>) -> Bounds { let x_min = points.iter() .map(|(x, _)| x) .min() .unwrap(); let x_max = points.iter() .map(|(x, _)| x) .max() .unwrap(); let x_range = Range {min:*x_min, max:*x_max}; let y_min = points.iter() .map(|(_, y)| y) .min() .unwrap(); let y_max = points.iter() .map(|(_, y)| y) .max() .unwrap(); let y_range = Range {min:*y_min, max:*y_max}; Bounds {x:x_range, y:y_range} } fn distance((x1, y1): &Point, (x2, y2): &Point) -> i32 { (x1 - x2).abs() + (y1 - y2).abs() } fn total_distance(reference_point: &Point, points: &Vec<Point>) -> i32 { points.iter() .map(|point| distance(reference_point, point)) .sum() } fn closest_point(reference_point: &Point, points: &Vec<Point>) -> Option<usize> { let (index, _) = points.iter() .map(|point| distance(reference_point, point)) .enumerate() .fold((None, i32::MAX), |(some_index, minimum), (new_index, new_value)| { match minimum.cmp(&new_value) { Ordering::Greater => (Some(new_index), new_value), Ordering::Less => (some_index, minimum), Ordering::Equal => (None, minimum), } }); index } fn on_bounds(&(x, y): &Point, bounds: &Bounds) -> bool { if (bounds.x.min == x || bounds.x.max == x) && (bounds.y.min <= y && bounds.y.max >= y) { return true } if (bounds.y.min == y || bounds.y.max == y) && (bounds.x.min <= x && bounds.x.max >= x) { return true } false } fn parse_input(input: &str) -> Vec<Point> { input.lines() .filter_map(|line| convert_line(line)) .collect() } fn convert_line(line: &str) -> Option<Point> { lazy_static! { static ref RE: Regex = Regex::new(r"(\d*), (\d*)").unwrap(); } let captures = RE.captures(line).unwrap(); match (captures.get(1), captures.get(2)) { (Some(x), Some(y)) => Some((x.as_str().parse().unwrap(), y.as_str().parse().unwrap())), _ => None, } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_input() { let input = "1, 1\n\ 1, 6\n\ 8, 3\n\ 3, 4\n\ 5, 5\n\ 8, 9"; let output: Vec<Point> = vec![ (1, 1), (1, 6), (8, 3), (3, 4), (5, 5), (8, 9)]; assert_eq!(parse_input(input), output); } #[test] fn test_create_bounds() { let input: Vec<Point> = vec![ (0, 0), (1, 1), (3, 2)]; let x_range = Range {min:0, max:3}; let y_range = Range {min:0, max:2}; let output = Bounds {x:x_range, y:y_range}; assert_eq!(create_bounds(&input), output); } #[test] fn test_create_grid() { let input: Vec<Point> = vec![ (0, 0), (2, 2)]; let x_range = Range {min:0, max:2}; let y_range = Range {min:0, max:2}; let bounds = Bounds {x:x_range, y:y_range}; let mut output = HashMap::new(); output.insert((0, 0), 0); output.insert((1, 0), 0); output.insert((0, 1), 0); output.insert((2, 1), 1); output.insert((1, 2), 1); output.insert((2, 2), 1); assert_eq!(create_grid(&input, &bounds), output); } #[test] fn test_on_bounds() { let x_range = Range {min:0, max:3}; let y_range = Range {min:2, max:6}; let bounds = Bounds {x:x_range, y:y_range}; assert!(on_bounds(&(0, 4), &bounds)); assert!(on_bounds(&(3, 2), &bounds)); assert!(on_bounds(&(2, 6), &bounds)); assert!(!on_bounds(&(2, 7), &bounds)); assert!(!on_bounds(&(2, 5), &bounds)); assert!(!on_bounds(&(1, 1), &bounds)); assert!(!on_bounds(&(11, 8), &bounds)); } #[test] fn test_distance() { assert_eq!(distance(&(0, 0), &(1, 1)), 2); assert_eq!(distance(&(1, 1), &(0, 0)), 2); assert_eq!(distance(&(10, 0), &(0, 10)), 20); assert_eq!(distance(&(5, 5), &(7, 3)), 4); } #[test] fn test_closest_point() { let points: Vec<Point> = vec![ (1, 1), (1, 6), (8, 9)]; assert_eq!(closest_point(&(2, 2), &points), Some(0)); assert_eq!(closest_point(&(1, 5), &points), Some(1)); assert_eq!(closest_point(&(10, 10), &points), Some(2)); } #[test] fn test_closest_point_equal_distance() { let points: Vec<Point> = vec![ (0, 1), (0, 3)]; assert_eq!(closest_point(&(0, 2), &points), None); } #[test] fn test_total_distance() { let points: Vec<Point> = vec![ (0, 0), (2, 3), (10, 5)]; assert_eq!(total_distance(&(1, 1), &points), 18); assert_eq!(total_distance(&(3, 3), &points), 16); assert_eq!(total_distance(&(0, 5), &points), 19); } }
{ let points = parse_input(include_str!("../input/day_06.txt")); let bounds = create_bounds(&points); let grid = create_grid(&points, &bounds); let mut areas = HashMap::new(); let mut infinite_areas = HashSet::new(); for (point, area_number) in grid.iter() { if on_bounds(point,&bounds) { infinite_areas.insert(*area_number); areas.remove(area_number); } if !infinite_areas.contains(area_number) { *areas.entry(area_number).or_insert(0) += 1; } } let biggest_area_size = areas.values() .max()
identifier_body
day_06.rs
/// --- Day 6: Chronal Coordinates --- /// /// The device on your wrist beeps several times, and once again you feel like /// you're falling. /// /// "Situation critical," the device announces. "Destination indeterminate. /// Chronal interference detected. Please specify new target coordinates." /// /// The device then produces a list of coordinates (your puzzle input). Are they /// places it thinks are safe or dangerous? It recommends you check manual page /// 729. The Elves did not give you a manual. /// /// If they're dangerous, maybe you can minimize the danger by finding the /// coordinate that gives the largest distance from the other points. /// /// Using only the Manhattan distance, determine the area around each coordinate /// by counting the number of integer X,Y locations that are closest to that /// coordinate (and aren't tied in distance to any other coordinate). /// /// Your goal is to find the size of the largest area that isn't infinite. For /// example, consider the following list of coordinates: /// /// 1, 1 /// 1, 6 /// 8, 3 /// 3, 4 /// 5, 5 /// 8, 9 /// /// If we name these coordinates A through F, we can draw them on a grid, /// putting 0,0 at the top left: /// ///.......... ///.A........ ///.......... ///........C. ///...D...... ///.....E.... ///.B........ ///.......... ///.......... ///........F. /// /// This view is partial - the actual grid extends infinitely in all directions. /// Using the Manhattan distance, each location's closest coordinate can be /// determined, shown here in lowercase: /// /// aaaaa.cccc /// aAaaa.cccc /// aaaddecccc /// aadddeccCc ///..dDdeeccc /// bb.deEeecc /// bBb.eeee.. /// bbb.eeefff /// bbb.eeffff /// bbb.ffffFf /// /// Locations shown as. are equally far from two or more coordinates, and so /// they don't count as being closest to any. /// /// In this example, the areas of coordinates A, B, C, and F are infinite - /// while not shown here, their areas extend forever outside the visible grid. /// However, the areas of coordinates D and E are finite: D is closest to 9 /// locations, and E is closest to 17 (both including the coordinate's location /// itself). Therefore, in this example, the size of the largest area is 17. /// /// What is the size of the largest area that isn't infinite? /// /// --- Part Two --- /// /// On the other hand, if the coordinates are safe, maybe the best you can do is /// try to find a region near as many coordinates as possible. /// /// For example, suppose you want the sum of the Manhattan distance to all of /// the coordinates to be less than 32. For each location, add up the distances /// to all of the given coordinates; if the total of those distances is less /// than 32, that location is within the desired region. Using the same /// coordinates as above, the resulting region looks like this: /// ///.......... ///.A........ ///.......... ///...###..C. ///..#D###... ///..###E#... ///.B.###.... ///.......... ///.......... ///........F. /// /// In particular, consider the highlighted location 4,3 located at the top /// middle of the region. Its calculation is as follows, where abs() is the /// absolute value function: /// /// Distance to coordinate A: abs(4-1) + abs(3-1) = 5 /// Distance to coordinate B: abs(4-1) + abs(3-6) = 6 /// Distance to coordinate C: abs(4-8) + abs(3-3) = 4 /// Distance to coordinate D: abs(4-3) + abs(3-4) = 2 /// Distance to coordinate E: abs(4-5) + abs(3-5) = 3 /// Distance to coordinate F: abs(4-8) + abs(3-9) = 10 /// Total distance: 5 + 6 + 4 + 2 + 3 + 10 = 30 /// /// Because the total distance to all coordinates (30) is less than 32, the /// location is within the region. /// /// This region, which also includes coordinates D and E, has a total size of /// 16. /// /// Your actual region will need to be much larger than this example, though, /// instead including all locations with a total distance of less than 10000. /// /// What is the size of the region containing all locations which have a total /// distance to all given coordinates of less than 10000? use regex::Regex; use std::collections::{HashMap, HashSet}; use std::cmp::Ordering; use std::i32; type Point = (i32, i32); type Grid = HashMap<Point, usize>; #[derive(Debug, PartialEq, Eq)] struct Range { min: i32, max: i32, } #[derive(Debug, PartialEq, Eq)] struct Bounds { x: Range, y: Range, } pub fn run() { let points = parse_input(include_str!("../input/day_06.txt")); let bounds = create_bounds(&points); let grid = create_grid(&points, &bounds); let mut areas = HashMap::new(); let mut infinite_areas = HashSet::new(); for (point, area_number) in grid.iter() { if on_bounds(point,&bounds) { infinite_areas.insert(*area_number); areas.remove(area_number); } if!infinite_areas.contains(area_number) { *areas.entry(area_number).or_insert(0) += 1; } } let biggest_area_size = areas.values() .max() .unwrap(); println!("The biggest non-infinite area size is: {}", biggest_area_size); let concentrated_area = count_points_below(&points, &bounds, 10_000); println!("The size of the area that have a total distance less than \ 10.000 is: {}", concentrated_area); } fn create_grid(points: &Vec<Point>, bounds: &Bounds) -> Grid { let mut grid = HashMap::new(); for x in bounds.x.min..=bounds.x.max { for y in bounds.y.min..=bounds.y.max { let point = (x, y); match closest_point(&point, points) { Some(area_number) => grid.insert(point, area_number), None => None, }; } } grid } fn count_points_below(points: &Vec<Point>, bounds: &Bounds, treshold: i32) -> i32 { let mut count = 0; for x in bounds.x.min..=bounds.x.max { for y in bounds.y.min..=bounds.y.max { let point = (x, y); if total_distance(&point, points) < treshold { count += 1; }; } } count } fn create_bounds(points: &Vec<Point>) -> Bounds { let x_min = points.iter() .map(|(x, _)| x) .min() .unwrap(); let x_max = points.iter() .map(|(x, _)| x) .max() .unwrap(); let x_range = Range {min:*x_min, max:*x_max}; let y_min = points.iter() .map(|(_, y)| y) .min() .unwrap(); let y_max = points.iter() .map(|(_, y)| y) .max() .unwrap(); let y_range = Range {min:*y_min, max:*y_max}; Bounds {x:x_range, y:y_range} } fn distance((x1, y1): &Point, (x2, y2): &Point) -> i32 { (x1 - x2).abs() + (y1 - y2).abs() } fn total_distance(reference_point: &Point, points: &Vec<Point>) -> i32 { points.iter() .map(|point| distance(reference_point, point)) .sum() } fn closest_point(reference_point: &Point, points: &Vec<Point>) -> Option<usize> { let (index, _) = points.iter() .map(|point| distance(reference_point, point)) .enumerate() .fold((None, i32::MAX), |(some_index, minimum), (new_index, new_value)| { match minimum.cmp(&new_value) { Ordering::Greater => (Some(new_index), new_value), Ordering::Less => (some_index, minimum), Ordering::Equal => (None, minimum), } }); index } fn on_bounds(&(x, y): &Point, bounds: &Bounds) -> bool { if (bounds.x.min == x || bounds.x.max == x) && (bounds.y.min <= y && bounds.y.max >= y) { return true } if (bounds.y.min == y || bounds.y.max == y) && (bounds.x.min <= x && bounds.x.max >= x) { return true } false } fn parse_input(input: &str) -> Vec<Point> { input.lines() .filter_map(|line| convert_line(line)) .collect() } fn convert_line(line: &str) -> Option<Point> { lazy_static! { static ref RE: Regex = Regex::new(r"(\d*), (\d*)").unwrap(); } let captures = RE.captures(line).unwrap(); match (captures.get(1), captures.get(2)) { (Some(x), Some(y)) => Some((x.as_str().parse().unwrap(), y.as_str().parse().unwrap())), _ => None, } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_input() { let input = "1, 1\n\ 1, 6\n\ 8, 3\n\ 3, 4\n\ 5, 5\n\ 8, 9"; let output: Vec<Point> = vec![ (1, 1), (1, 6), (8, 3), (3, 4), (5, 5), (8, 9)]; assert_eq!(parse_input(input), output); } #[test] fn test_create_bounds() { let input: Vec<Point> = vec![ (0, 0), (1, 1), (3, 2)]; let x_range = Range {min:0, max:3}; let y_range = Range {min:0, max:2}; let output = Bounds {x:x_range, y:y_range}; assert_eq!(create_bounds(&input), output); } #[test] fn test_create_grid() { let input: Vec<Point> = vec![ (0, 0), (2, 2)]; let x_range = Range {min:0, max:2}; let y_range = Range {min:0, max:2}; let bounds = Bounds {x:x_range, y:y_range}; let mut output = HashMap::new(); output.insert((0, 0), 0); output.insert((1, 0), 0); output.insert((0, 1), 0); output.insert((2, 1), 1); output.insert((1, 2), 1); output.insert((2, 2), 1); assert_eq!(create_grid(&input, &bounds), output); } #[test]
let bounds = Bounds {x:x_range, y:y_range}; assert!(on_bounds(&(0, 4), &bounds)); assert!(on_bounds(&(3, 2), &bounds)); assert!(on_bounds(&(2, 6), &bounds)); assert!(!on_bounds(&(2, 7), &bounds)); assert!(!on_bounds(&(2, 5), &bounds)); assert!(!on_bounds(&(1, 1), &bounds)); assert!(!on_bounds(&(11, 8), &bounds)); } #[test] fn test_distance() { assert_eq!(distance(&(0, 0), &(1, 1)), 2); assert_eq!(distance(&(1, 1), &(0, 0)), 2); assert_eq!(distance(&(10, 0), &(0, 10)), 20); assert_eq!(distance(&(5, 5), &(7, 3)), 4); } #[test] fn test_closest_point() { let points: Vec<Point> = vec![ (1, 1), (1, 6), (8, 9)]; assert_eq!(closest_point(&(2, 2), &points), Some(0)); assert_eq!(closest_point(&(1, 5), &points), Some(1)); assert_eq!(closest_point(&(10, 10), &points), Some(2)); } #[test] fn test_closest_point_equal_distance() { let points: Vec<Point> = vec![ (0, 1), (0, 3)]; assert_eq!(closest_point(&(0, 2), &points), None); } #[test] fn test_total_distance() { let points: Vec<Point> = vec![ (0, 0), (2, 3), (10, 5)]; assert_eq!(total_distance(&(1, 1), &points), 18); assert_eq!(total_distance(&(3, 3), &points), 16); assert_eq!(total_distance(&(0, 5), &points), 19); } }
fn test_on_bounds() { let x_range = Range {min:0, max:3}; let y_range = Range {min:2, max:6};
random_line_split
day_06.rs
/// --- Day 6: Chronal Coordinates --- /// /// The device on your wrist beeps several times, and once again you feel like /// you're falling. /// /// "Situation critical," the device announces. "Destination indeterminate. /// Chronal interference detected. Please specify new target coordinates." /// /// The device then produces a list of coordinates (your puzzle input). Are they /// places it thinks are safe or dangerous? It recommends you check manual page /// 729. The Elves did not give you a manual. /// /// If they're dangerous, maybe you can minimize the danger by finding the /// coordinate that gives the largest distance from the other points. /// /// Using only the Manhattan distance, determine the area around each coordinate /// by counting the number of integer X,Y locations that are closest to that /// coordinate (and aren't tied in distance to any other coordinate). /// /// Your goal is to find the size of the largest area that isn't infinite. For /// example, consider the following list of coordinates: /// /// 1, 1 /// 1, 6 /// 8, 3 /// 3, 4 /// 5, 5 /// 8, 9 /// /// If we name these coordinates A through F, we can draw them on a grid, /// putting 0,0 at the top left: /// ///.......... ///.A........ ///.......... ///........C. ///...D...... ///.....E.... ///.B........ ///.......... ///.......... ///........F. /// /// This view is partial - the actual grid extends infinitely in all directions. /// Using the Manhattan distance, each location's closest coordinate can be /// determined, shown here in lowercase: /// /// aaaaa.cccc /// aAaaa.cccc /// aaaddecccc /// aadddeccCc ///..dDdeeccc /// bb.deEeecc /// bBb.eeee.. /// bbb.eeefff /// bbb.eeffff /// bbb.ffffFf /// /// Locations shown as. are equally far from two or more coordinates, and so /// they don't count as being closest to any. /// /// In this example, the areas of coordinates A, B, C, and F are infinite - /// while not shown here, their areas extend forever outside the visible grid. /// However, the areas of coordinates D and E are finite: D is closest to 9 /// locations, and E is closest to 17 (both including the coordinate's location /// itself). Therefore, in this example, the size of the largest area is 17. /// /// What is the size of the largest area that isn't infinite? /// /// --- Part Two --- /// /// On the other hand, if the coordinates are safe, maybe the best you can do is /// try to find a region near as many coordinates as possible. /// /// For example, suppose you want the sum of the Manhattan distance to all of /// the coordinates to be less than 32. For each location, add up the distances /// to all of the given coordinates; if the total of those distances is less /// than 32, that location is within the desired region. Using the same /// coordinates as above, the resulting region looks like this: /// ///.......... ///.A........ ///.......... ///...###..C. ///..#D###... ///..###E#... ///.B.###.... ///.......... ///.......... ///........F. /// /// In particular, consider the highlighted location 4,3 located at the top /// middle of the region. Its calculation is as follows, where abs() is the /// absolute value function: /// /// Distance to coordinate A: abs(4-1) + abs(3-1) = 5 /// Distance to coordinate B: abs(4-1) + abs(3-6) = 6 /// Distance to coordinate C: abs(4-8) + abs(3-3) = 4 /// Distance to coordinate D: abs(4-3) + abs(3-4) = 2 /// Distance to coordinate E: abs(4-5) + abs(3-5) = 3 /// Distance to coordinate F: abs(4-8) + abs(3-9) = 10 /// Total distance: 5 + 6 + 4 + 2 + 3 + 10 = 30 /// /// Because the total distance to all coordinates (30) is less than 32, the /// location is within the region. /// /// This region, which also includes coordinates D and E, has a total size of /// 16. /// /// Your actual region will need to be much larger than this example, though, /// instead including all locations with a total distance of less than 10000. /// /// What is the size of the region containing all locations which have a total /// distance to all given coordinates of less than 10000? use regex::Regex; use std::collections::{HashMap, HashSet}; use std::cmp::Ordering; use std::i32; type Point = (i32, i32); type Grid = HashMap<Point, usize>; #[derive(Debug, PartialEq, Eq)] struct Range { min: i32, max: i32, } #[derive(Debug, PartialEq, Eq)] struct Bounds { x: Range, y: Range, } pub fn run() { let points = parse_input(include_str!("../input/day_06.txt")); let bounds = create_bounds(&points); let grid = create_grid(&points, &bounds); let mut areas = HashMap::new(); let mut infinite_areas = HashSet::new(); for (point, area_number) in grid.iter() { if on_bounds(point,&bounds) { infinite_areas.insert(*area_number); areas.remove(area_number); } if!infinite_areas.contains(area_number) { *areas.entry(area_number).or_insert(0) += 1; } } let biggest_area_size = areas.values() .max() .unwrap(); println!("The biggest non-infinite area size is: {}", biggest_area_size); let concentrated_area = count_points_below(&points, &bounds, 10_000); println!("The size of the area that have a total distance less than \ 10.000 is: {}", concentrated_area); } fn create_grid(points: &Vec<Point>, bounds: &Bounds) -> Grid { let mut grid = HashMap::new(); for x in bounds.x.min..=bounds.x.max { for y in bounds.y.min..=bounds.y.max { let point = (x, y); match closest_point(&point, points) { Some(area_number) => grid.insert(point, area_number), None => None, }; } } grid } fn count_points_below(points: &Vec<Point>, bounds: &Bounds, treshold: i32) -> i32 { let mut count = 0; for x in bounds.x.min..=bounds.x.max { for y in bounds.y.min..=bounds.y.max { let point = (x, y); if total_distance(&point, points) < treshold { count += 1; }; } } count } fn create_bounds(points: &Vec<Point>) -> Bounds { let x_min = points.iter() .map(|(x, _)| x) .min() .unwrap(); let x_max = points.iter() .map(|(x, _)| x) .max() .unwrap(); let x_range = Range {min:*x_min, max:*x_max}; let y_min = points.iter() .map(|(_, y)| y) .min() .unwrap(); let y_max = points.iter() .map(|(_, y)| y) .max() .unwrap(); let y_range = Range {min:*y_min, max:*y_max}; Bounds {x:x_range, y:y_range} } fn distance((x1, y1): &Point, (x2, y2): &Point) -> i32 { (x1 - x2).abs() + (y1 - y2).abs() } fn
(reference_point: &Point, points: &Vec<Point>) -> i32 { points.iter() .map(|point| distance(reference_point, point)) .sum() } fn closest_point(reference_point: &Point, points: &Vec<Point>) -> Option<usize> { let (index, _) = points.iter() .map(|point| distance(reference_point, point)) .enumerate() .fold((None, i32::MAX), |(some_index, minimum), (new_index, new_value)| { match minimum.cmp(&new_value) { Ordering::Greater => (Some(new_index), new_value), Ordering::Less => (some_index, minimum), Ordering::Equal => (None, minimum), } }); index } fn on_bounds(&(x, y): &Point, bounds: &Bounds) -> bool { if (bounds.x.min == x || bounds.x.max == x) && (bounds.y.min <= y && bounds.y.max >= y) { return true } if (bounds.y.min == y || bounds.y.max == y) && (bounds.x.min <= x && bounds.x.max >= x) { return true } false } fn parse_input(input: &str) -> Vec<Point> { input.lines() .filter_map(|line| convert_line(line)) .collect() } fn convert_line(line: &str) -> Option<Point> { lazy_static! { static ref RE: Regex = Regex::new(r"(\d*), (\d*)").unwrap(); } let captures = RE.captures(line).unwrap(); match (captures.get(1), captures.get(2)) { (Some(x), Some(y)) => Some((x.as_str().parse().unwrap(), y.as_str().parse().unwrap())), _ => None, } } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_input() { let input = "1, 1\n\ 1, 6\n\ 8, 3\n\ 3, 4\n\ 5, 5\n\ 8, 9"; let output: Vec<Point> = vec![ (1, 1), (1, 6), (8, 3), (3, 4), (5, 5), (8, 9)]; assert_eq!(parse_input(input), output); } #[test] fn test_create_bounds() { let input: Vec<Point> = vec![ (0, 0), (1, 1), (3, 2)]; let x_range = Range {min:0, max:3}; let y_range = Range {min:0, max:2}; let output = Bounds {x:x_range, y:y_range}; assert_eq!(create_bounds(&input), output); } #[test] fn test_create_grid() { let input: Vec<Point> = vec![ (0, 0), (2, 2)]; let x_range = Range {min:0, max:2}; let y_range = Range {min:0, max:2}; let bounds = Bounds {x:x_range, y:y_range}; let mut output = HashMap::new(); output.insert((0, 0), 0); output.insert((1, 0), 0); output.insert((0, 1), 0); output.insert((2, 1), 1); output.insert((1, 2), 1); output.insert((2, 2), 1); assert_eq!(create_grid(&input, &bounds), output); } #[test] fn test_on_bounds() { let x_range = Range {min:0, max:3}; let y_range = Range {min:2, max:6}; let bounds = Bounds {x:x_range, y:y_range}; assert!(on_bounds(&(0, 4), &bounds)); assert!(on_bounds(&(3, 2), &bounds)); assert!(on_bounds(&(2, 6), &bounds)); assert!(!on_bounds(&(2, 7), &bounds)); assert!(!on_bounds(&(2, 5), &bounds)); assert!(!on_bounds(&(1, 1), &bounds)); assert!(!on_bounds(&(11, 8), &bounds)); } #[test] fn test_distance() { assert_eq!(distance(&(0, 0), &(1, 1)), 2); assert_eq!(distance(&(1, 1), &(0, 0)), 2); assert_eq!(distance(&(10, 0), &(0, 10)), 20); assert_eq!(distance(&(5, 5), &(7, 3)), 4); } #[test] fn test_closest_point() { let points: Vec<Point> = vec![ (1, 1), (1, 6), (8, 9)]; assert_eq!(closest_point(&(2, 2), &points), Some(0)); assert_eq!(closest_point(&(1, 5), &points), Some(1)); assert_eq!(closest_point(&(10, 10), &points), Some(2)); } #[test] fn test_closest_point_equal_distance() { let points: Vec<Point> = vec![ (0, 1), (0, 3)]; assert_eq!(closest_point(&(0, 2), &points), None); } #[test] fn test_total_distance() { let points: Vec<Point> = vec![ (0, 0), (2, 3), (10, 5)]; assert_eq!(total_distance(&(1, 1), &points), 18); assert_eq!(total_distance(&(3, 3), &points), 16); assert_eq!(total_distance(&(0, 5), &points), 19); } }
total_distance
identifier_name
lib.rs
// Copyright 2018 Mozilla // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. // We take the "low road" here when returning the structs - we expose the // items (and arrays of items) as strings, which are JSON. The rust side of // the world gets serialization and deserialization for free and it makes // memory management that little bit simpler. extern crate failure; extern crate serde_json; extern crate url; extern crate reqwest; #[macro_use] extern crate ffi_toolkit; extern crate mentat; extern crate sync15_passwords; extern crate sync15_adapter as sync; #[macro_use] extern crate log; mod error; use error::{ ExternError, with_translated_result, with_translated_value_result, with_translated_void_result, with_translated_string_result, with_translated_opt_string_result, }; use std::os::raw::{ c_char, }; use std::sync::{Once, ONCE_INIT}; use ffi_toolkit::string::{ c_char_to_string, }; pub use ffi_toolkit::memory::{ destroy_c_char, }; use sync::{ Sync15StorageClient, Sync15StorageClientInit, GlobalState, }; use sync15_passwords::{ passwords, PasswordEngine, ServerPassword, }; pub struct SyncInfo { state: GlobalState, client: Sync15StorageClient, // Used so that we know whether or not we need to re-initialize `client` last_client_init: Sync15StorageClientInit, } pub struct PasswordState { engine: PasswordEngine, sync: Option<SyncInfo>, } #[cfg(target_os = "android")] extern { pub fn __android_log_write(level: ::std::os::raw::c_int, tag: *const c_char, text: *const c_char) -> ::std::os::raw::c_int; } struct DevLogger; impl log::Log for DevLogger { fn enabled(&self, _: &log::Metadata) -> bool { true } fn
(&self, record: &log::Record) { let message = format!("{}:{} -- {}", record.level(), record.target(), record.args()); println!("{}", message); #[cfg(target_os = "android")] { unsafe { let message = ::std::ffi::CString::new(message).unwrap(); let level_int = match record.level() { log::Level::Trace => 2, log::Level::Debug => 3, log::Level::Info => 4, log::Level::Warn => 5, log::Level::Error => 6, }; let message = message.as_ptr(); let tag = b"RustInternal\0"; __android_log_write(level_int, tag.as_ptr() as *const c_char, message); } } // TODO ios (use NSLog(__CFStringMakeConstantString(b"%s\0"),...), maybe windows? (OutputDebugStringA) } fn flush(&self) {} } static INIT_LOGGER: Once = ONCE_INIT; static DEV_LOGGER: &'static log::Log = &DevLogger; fn init_logger() { log::set_logger(DEV_LOGGER).unwrap(); log::set_max_level(log::LevelFilter::Trace); std::env::set_var("RUST_BACKTRACE", "1"); info!("Hooked up rust logger!"); } define_destructor!(sync15_passwords_state_destroy, PasswordState); // This is probably too many string arguments... #[no_mangle] pub unsafe extern "C" fn sync15_passwords_state_new( mentat_db_path: *const c_char, encryption_key: *const c_char, error: *mut ExternError ) -> *mut PasswordState { INIT_LOGGER.call_once(init_logger); with_translated_result(error, || { let store = mentat::Store::open_with_key(c_char_to_string(mentat_db_path), c_char_to_string(encryption_key))?; let engine = PasswordEngine::new(store)?; Ok(PasswordState { engine, sync: None, }) }) } // indirection to help `?` figure out the target error type fn parse_url(url: &str) -> sync::Result<url::Url> { Ok(url::Url::parse(url)?) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_sync( state: *mut PasswordState, key_id: *const c_char, access_token: *const c_char, sync_key: *const c_char, tokenserver_url: *const c_char, error: *mut ExternError ) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; let root_sync_key = sync::KeyBundle::from_ksync_base64( c_char_to_string(sync_key).into())?; let requested_init = Sync15StorageClientInit { key_id: c_char_to_string(key_id).into(), access_token: c_char_to_string(access_token).into(), tokenserver_url: parse_url(c_char_to_string(tokenserver_url))?, }; // TODO: If `to_ready` (or anything else with a?) fails below, this // `take()` means we end up with `state.sync.is_none()`, which means the // next sync will redownload meta/global, crypto/keys, etc. without // needing to. (AFAICT fixing this requires a change in sync15-adapter, // since to_ready takes GlobalState as a move, and it's not clear if // that change even is a good idea). let mut sync_info = state.sync.take().map(Ok) .unwrap_or_else(|| -> sync::Result<SyncInfo> { let state = GlobalState::default(); let client = Sync15StorageClient::new(requested_init.clone())?; Ok(SyncInfo { state, client, last_client_init: requested_init.clone(), }) })?; // If the options passed for initialization of the storage client aren't // the same as the ones we used last time, reinitialize it. (Note that // we could avoid the comparison in the case where we had `None` in // `state.sync` before, but this probably doesn't matter). if requested_init!= sync_info.last_client_init { sync_info.client = Sync15StorageClient::new(requested_init.clone())?; sync_info.last_client_init = requested_init; } { // Scope borrow of `sync_info.client` let mut state_machine = sync::SetupStateMachine::for_readonly_sync(&sync_info.client, &root_sync_key); let next_sync_state = state_machine.to_ready(sync_info.state)?; sync_info.state = next_sync_state; } // We don't use a? on the next line so that even if `state.engine.sync` // fails, we don't forget the sync_state. let result = state.engine.sync(&sync_info.client, &sync_info.state); state.sync = Some(sync_info); result }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_touch(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.touch_credential(c_char_to_string(id).into())?; Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_delete(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) -> bool { with_translated_value_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; let deleted = state.engine.delete_credential(c_char_to_string(id).into())?; Ok(deleted) }) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_wipe(state: *mut PasswordState, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.wipe()?; Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_reset(state: *mut PasswordState, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.reset()?; // XXX We probably need to clear out some things from `state.service`! Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_get_all(state: *mut PasswordState, error: *mut ExternError) -> *mut c_char { with_translated_string_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; // Type declaration is just to make sure we have the right type (and for documentation) let passwords: Vec<ServerPassword> = { let mut in_progress_read = state.engine.store.begin_read()?; passwords::get_all_sync_passwords(&mut in_progress_read)? }; let result = serde_json::to_string(&passwords)?; Ok(result) }) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_get_by_id(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) -> *mut c_char { with_translated_opt_string_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; // Type declaration is just to make sure we have the right type (and for documentation) let maybe_pass: Option<ServerPassword> = { let mut in_progress_read = state.engine.store.begin_read()?; passwords::get_sync_password(&mut in_progress_read, c_char_to_string(id).into())? }; let pass = if let Some(p) = maybe_pass { p } else { return Ok(None) }; Ok(Some(serde_json::to_string(&pass)?)) }) } #[no_mangle] pub extern "C" fn wtf_destroy_c_char(s: *mut c_char) { // the "pub use" above should should be enough to expose this? // It appears that is enough to expose it in a windows DLL, but for // some reason it's not expored for Android. // *sob* - and now that I've defined this, suddenly this *and* // destroy_c_char are exposed (and removing this again removes the // destroy_c_char) // Oh well, a yak for another day. destroy_c_char(s); }
log
identifier_name
lib.rs
// Copyright 2018 Mozilla // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed
// We take the "low road" here when returning the structs - we expose the // items (and arrays of items) as strings, which are JSON. The rust side of // the world gets serialization and deserialization for free and it makes // memory management that little bit simpler. extern crate failure; extern crate serde_json; extern crate url; extern crate reqwest; #[macro_use] extern crate ffi_toolkit; extern crate mentat; extern crate sync15_passwords; extern crate sync15_adapter as sync; #[macro_use] extern crate log; mod error; use error::{ ExternError, with_translated_result, with_translated_value_result, with_translated_void_result, with_translated_string_result, with_translated_opt_string_result, }; use std::os::raw::{ c_char, }; use std::sync::{Once, ONCE_INIT}; use ffi_toolkit::string::{ c_char_to_string, }; pub use ffi_toolkit::memory::{ destroy_c_char, }; use sync::{ Sync15StorageClient, Sync15StorageClientInit, GlobalState, }; use sync15_passwords::{ passwords, PasswordEngine, ServerPassword, }; pub struct SyncInfo { state: GlobalState, client: Sync15StorageClient, // Used so that we know whether or not we need to re-initialize `client` last_client_init: Sync15StorageClientInit, } pub struct PasswordState { engine: PasswordEngine, sync: Option<SyncInfo>, } #[cfg(target_os = "android")] extern { pub fn __android_log_write(level: ::std::os::raw::c_int, tag: *const c_char, text: *const c_char) -> ::std::os::raw::c_int; } struct DevLogger; impl log::Log for DevLogger { fn enabled(&self, _: &log::Metadata) -> bool { true } fn log(&self, record: &log::Record) { let message = format!("{}:{} -- {}", record.level(), record.target(), record.args()); println!("{}", message); #[cfg(target_os = "android")] { unsafe { let message = ::std::ffi::CString::new(message).unwrap(); let level_int = match record.level() { log::Level::Trace => 2, log::Level::Debug => 3, log::Level::Info => 4, log::Level::Warn => 5, log::Level::Error => 6, }; let message = message.as_ptr(); let tag = b"RustInternal\0"; __android_log_write(level_int, tag.as_ptr() as *const c_char, message); } } // TODO ios (use NSLog(__CFStringMakeConstantString(b"%s\0"),...), maybe windows? (OutputDebugStringA) } fn flush(&self) {} } static INIT_LOGGER: Once = ONCE_INIT; static DEV_LOGGER: &'static log::Log = &DevLogger; fn init_logger() { log::set_logger(DEV_LOGGER).unwrap(); log::set_max_level(log::LevelFilter::Trace); std::env::set_var("RUST_BACKTRACE", "1"); info!("Hooked up rust logger!"); } define_destructor!(sync15_passwords_state_destroy, PasswordState); // This is probably too many string arguments... #[no_mangle] pub unsafe extern "C" fn sync15_passwords_state_new( mentat_db_path: *const c_char, encryption_key: *const c_char, error: *mut ExternError ) -> *mut PasswordState { INIT_LOGGER.call_once(init_logger); with_translated_result(error, || { let store = mentat::Store::open_with_key(c_char_to_string(mentat_db_path), c_char_to_string(encryption_key))?; let engine = PasswordEngine::new(store)?; Ok(PasswordState { engine, sync: None, }) }) } // indirection to help `?` figure out the target error type fn parse_url(url: &str) -> sync::Result<url::Url> { Ok(url::Url::parse(url)?) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_sync( state: *mut PasswordState, key_id: *const c_char, access_token: *const c_char, sync_key: *const c_char, tokenserver_url: *const c_char, error: *mut ExternError ) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; let root_sync_key = sync::KeyBundle::from_ksync_base64( c_char_to_string(sync_key).into())?; let requested_init = Sync15StorageClientInit { key_id: c_char_to_string(key_id).into(), access_token: c_char_to_string(access_token).into(), tokenserver_url: parse_url(c_char_to_string(tokenserver_url))?, }; // TODO: If `to_ready` (or anything else with a?) fails below, this // `take()` means we end up with `state.sync.is_none()`, which means the // next sync will redownload meta/global, crypto/keys, etc. without // needing to. (AFAICT fixing this requires a change in sync15-adapter, // since to_ready takes GlobalState as a move, and it's not clear if // that change even is a good idea). let mut sync_info = state.sync.take().map(Ok) .unwrap_or_else(|| -> sync::Result<SyncInfo> { let state = GlobalState::default(); let client = Sync15StorageClient::new(requested_init.clone())?; Ok(SyncInfo { state, client, last_client_init: requested_init.clone(), }) })?; // If the options passed for initialization of the storage client aren't // the same as the ones we used last time, reinitialize it. (Note that // we could avoid the comparison in the case where we had `None` in // `state.sync` before, but this probably doesn't matter). if requested_init!= sync_info.last_client_init { sync_info.client = Sync15StorageClient::new(requested_init.clone())?; sync_info.last_client_init = requested_init; } { // Scope borrow of `sync_info.client` let mut state_machine = sync::SetupStateMachine::for_readonly_sync(&sync_info.client, &root_sync_key); let next_sync_state = state_machine.to_ready(sync_info.state)?; sync_info.state = next_sync_state; } // We don't use a? on the next line so that even if `state.engine.sync` // fails, we don't forget the sync_state. let result = state.engine.sync(&sync_info.client, &sync_info.state); state.sync = Some(sync_info); result }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_touch(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.touch_credential(c_char_to_string(id).into())?; Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_delete(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) -> bool { with_translated_value_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; let deleted = state.engine.delete_credential(c_char_to_string(id).into())?; Ok(deleted) }) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_wipe(state: *mut PasswordState, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.wipe()?; Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_reset(state: *mut PasswordState, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.reset()?; // XXX We probably need to clear out some things from `state.service`! Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_get_all(state: *mut PasswordState, error: *mut ExternError) -> *mut c_char { with_translated_string_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; // Type declaration is just to make sure we have the right type (and for documentation) let passwords: Vec<ServerPassword> = { let mut in_progress_read = state.engine.store.begin_read()?; passwords::get_all_sync_passwords(&mut in_progress_read)? }; let result = serde_json::to_string(&passwords)?; Ok(result) }) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_get_by_id(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) -> *mut c_char { with_translated_opt_string_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; // Type declaration is just to make sure we have the right type (and for documentation) let maybe_pass: Option<ServerPassword> = { let mut in_progress_read = state.engine.store.begin_read()?; passwords::get_sync_password(&mut in_progress_read, c_char_to_string(id).into())? }; let pass = if let Some(p) = maybe_pass { p } else { return Ok(None) }; Ok(Some(serde_json::to_string(&pass)?)) }) } #[no_mangle] pub extern "C" fn wtf_destroy_c_char(s: *mut c_char) { // the "pub use" above should should be enough to expose this? // It appears that is enough to expose it in a windows DLL, but for // some reason it's not expored for Android. // *sob* - and now that I've defined this, suddenly this *and* // destroy_c_char are exposed (and removing this again removes the // destroy_c_char) // Oh well, a yak for another day. destroy_c_char(s); }
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License.
random_line_split
lib.rs
// Copyright 2018 Mozilla // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. // We take the "low road" here when returning the structs - we expose the // items (and arrays of items) as strings, which are JSON. The rust side of // the world gets serialization and deserialization for free and it makes // memory management that little bit simpler. extern crate failure; extern crate serde_json; extern crate url; extern crate reqwest; #[macro_use] extern crate ffi_toolkit; extern crate mentat; extern crate sync15_passwords; extern crate sync15_adapter as sync; #[macro_use] extern crate log; mod error; use error::{ ExternError, with_translated_result, with_translated_value_result, with_translated_void_result, with_translated_string_result, with_translated_opt_string_result, }; use std::os::raw::{ c_char, }; use std::sync::{Once, ONCE_INIT}; use ffi_toolkit::string::{ c_char_to_string, }; pub use ffi_toolkit::memory::{ destroy_c_char, }; use sync::{ Sync15StorageClient, Sync15StorageClientInit, GlobalState, }; use sync15_passwords::{ passwords, PasswordEngine, ServerPassword, }; pub struct SyncInfo { state: GlobalState, client: Sync15StorageClient, // Used so that we know whether or not we need to re-initialize `client` last_client_init: Sync15StorageClientInit, } pub struct PasswordState { engine: PasswordEngine, sync: Option<SyncInfo>, } #[cfg(target_os = "android")] extern { pub fn __android_log_write(level: ::std::os::raw::c_int, tag: *const c_char, text: *const c_char) -> ::std::os::raw::c_int; } struct DevLogger; impl log::Log for DevLogger { fn enabled(&self, _: &log::Metadata) -> bool { true } fn log(&self, record: &log::Record) { let message = format!("{}:{} -- {}", record.level(), record.target(), record.args()); println!("{}", message); #[cfg(target_os = "android")] { unsafe { let message = ::std::ffi::CString::new(message).unwrap(); let level_int = match record.level() { log::Level::Trace => 2, log::Level::Debug => 3, log::Level::Info => 4, log::Level::Warn => 5, log::Level::Error => 6, }; let message = message.as_ptr(); let tag = b"RustInternal\0"; __android_log_write(level_int, tag.as_ptr() as *const c_char, message); } } // TODO ios (use NSLog(__CFStringMakeConstantString(b"%s\0"),...), maybe windows? (OutputDebugStringA) } fn flush(&self) {} } static INIT_LOGGER: Once = ONCE_INIT; static DEV_LOGGER: &'static log::Log = &DevLogger; fn init_logger() { log::set_logger(DEV_LOGGER).unwrap(); log::set_max_level(log::LevelFilter::Trace); std::env::set_var("RUST_BACKTRACE", "1"); info!("Hooked up rust logger!"); } define_destructor!(sync15_passwords_state_destroy, PasswordState); // This is probably too many string arguments... #[no_mangle] pub unsafe extern "C" fn sync15_passwords_state_new( mentat_db_path: *const c_char, encryption_key: *const c_char, error: *mut ExternError ) -> *mut PasswordState { INIT_LOGGER.call_once(init_logger); with_translated_result(error, || { let store = mentat::Store::open_with_key(c_char_to_string(mentat_db_path), c_char_to_string(encryption_key))?; let engine = PasswordEngine::new(store)?; Ok(PasswordState { engine, sync: None, }) }) } // indirection to help `?` figure out the target error type fn parse_url(url: &str) -> sync::Result<url::Url> {
#[no_mangle] pub unsafe extern "C" fn sync15_passwords_sync( state: *mut PasswordState, key_id: *const c_char, access_token: *const c_char, sync_key: *const c_char, tokenserver_url: *const c_char, error: *mut ExternError ) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; let root_sync_key = sync::KeyBundle::from_ksync_base64( c_char_to_string(sync_key).into())?; let requested_init = Sync15StorageClientInit { key_id: c_char_to_string(key_id).into(), access_token: c_char_to_string(access_token).into(), tokenserver_url: parse_url(c_char_to_string(tokenserver_url))?, }; // TODO: If `to_ready` (or anything else with a?) fails below, this // `take()` means we end up with `state.sync.is_none()`, which means the // next sync will redownload meta/global, crypto/keys, etc. without // needing to. (AFAICT fixing this requires a change in sync15-adapter, // since to_ready takes GlobalState as a move, and it's not clear if // that change even is a good idea). let mut sync_info = state.sync.take().map(Ok) .unwrap_or_else(|| -> sync::Result<SyncInfo> { let state = GlobalState::default(); let client = Sync15StorageClient::new(requested_init.clone())?; Ok(SyncInfo { state, client, last_client_init: requested_init.clone(), }) })?; // If the options passed for initialization of the storage client aren't // the same as the ones we used last time, reinitialize it. (Note that // we could avoid the comparison in the case where we had `None` in // `state.sync` before, but this probably doesn't matter). if requested_init!= sync_info.last_client_init { sync_info.client = Sync15StorageClient::new(requested_init.clone())?; sync_info.last_client_init = requested_init; } { // Scope borrow of `sync_info.client` let mut state_machine = sync::SetupStateMachine::for_readonly_sync(&sync_info.client, &root_sync_key); let next_sync_state = state_machine.to_ready(sync_info.state)?; sync_info.state = next_sync_state; } // We don't use a? on the next line so that even if `state.engine.sync` // fails, we don't forget the sync_state. let result = state.engine.sync(&sync_info.client, &sync_info.state); state.sync = Some(sync_info); result }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_touch(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.touch_credential(c_char_to_string(id).into())?; Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_delete(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) -> bool { with_translated_value_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; let deleted = state.engine.delete_credential(c_char_to_string(id).into())?; Ok(deleted) }) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_wipe(state: *mut PasswordState, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.wipe()?; Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_reset(state: *mut PasswordState, error: *mut ExternError) { with_translated_void_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; state.engine.reset()?; // XXX We probably need to clear out some things from `state.service`! Ok(()) }); } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_get_all(state: *mut PasswordState, error: *mut ExternError) -> *mut c_char { with_translated_string_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; // Type declaration is just to make sure we have the right type (and for documentation) let passwords: Vec<ServerPassword> = { let mut in_progress_read = state.engine.store.begin_read()?; passwords::get_all_sync_passwords(&mut in_progress_read)? }; let result = serde_json::to_string(&passwords)?; Ok(result) }) } #[no_mangle] pub unsafe extern "C" fn sync15_passwords_get_by_id(state: *mut PasswordState, id: *const c_char, error: *mut ExternError) -> *mut c_char { with_translated_opt_string_result(error, || { assert_pointer_not_null!(state); let state = &mut *state; // Type declaration is just to make sure we have the right type (and for documentation) let maybe_pass: Option<ServerPassword> = { let mut in_progress_read = state.engine.store.begin_read()?; passwords::get_sync_password(&mut in_progress_read, c_char_to_string(id).into())? }; let pass = if let Some(p) = maybe_pass { p } else { return Ok(None) }; Ok(Some(serde_json::to_string(&pass)?)) }) } #[no_mangle] pub extern "C" fn wtf_destroy_c_char(s: *mut c_char) { // the "pub use" above should should be enough to expose this? // It appears that is enough to expose it in a windows DLL, but for // some reason it's not expored for Android. // *sob* - and now that I've defined this, suddenly this *and* // destroy_c_char are exposed (and removing this again removes the // destroy_c_char) // Oh well, a yak for another day. destroy_c_char(s); }
Ok(url::Url::parse(url)?) }
identifier_body
console.rs
// Copyright (c) 2019 - 2020 ESRLabs // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::{Event, EventTx, Name, State, TerminationReason, SETTINGS}; use anyhow::{anyhow, Context, Result}; use async_std::{io, net::TcpListener, path::PathBuf, prelude::*, sync, task}; use itertools::Itertools; use log::{debug, warn, *}; use prettytable::{format, Table}; use std::{iter, time}; /// Helptext displayed on the `help` command. The `dcon` tool parses this text /// and creates suggestions and completions. Ensure to a correct helptext when /// adding/removing/changing commands. const HELP: &str = "\ help: Display help text\n\ list: List all loaded images\n\ ps: List running instances\n\ shutdown: Stop the north runtime\n\ settings: Dump north configuration\n\ start: PATTERN Start containers matching PATTERN e.g'start hello*'. Omit PATTERN to start all containers\n\ stop: PATTERN Stop all containers matching PATTERN. Omit PATTERN to stop all running containers\n\ uninstall: PATTERN: Unmount and remove all containers matching PATTERN\n\ update: Run update with provided ressources\n\ versions: Version list of installed applications"; pub async fn init(tx: &EventTx) -> Result<()> { let rx = serve().await?; let tx = tx.clone(); // Spawn a task that handles lines received on the debug port. task::spawn(async move { while let Ok((line, tx_reply)) = rx.recv().await { tx.send(Event::Console(line, tx_reply)).await; } }); Ok(()) } pub async fn process(state: &mut State, command: &str, reply: sync::Sender<String>) -> Result<()> { info!("Running \'{}\'", command); let mut commands = command.split_whitespace(); if let Some(cmd) = commands.next() { let args = commands.collect::<Vec<&str>>(); let start_timestamp = time::Instant::now(); match match cmd { "help" => help(), "list" => list(state).await, "ps" => ps(state).await, "settings" => settings(), "shutdown" => shutdown(state).await, "start" => start(state, &args).await, "stop" => stop(state, &args).await, "uninstall" => uninstall(state, &args).await, "update" => update(state, &args).await, "versions" => versions(state), _ => Err(anyhow!("Unknown command: {}", command)), } { Ok(mut r) => { r.push_str(&format!("Duration: {:?}\n", start_timestamp.elapsed())); reply.send(r).await } Err(e) => { let msg = format!("Failed to run: {} {:?}: {}\n", cmd, args, e); reply.send(msg).await } } } else { reply.send("Invalid command".into()).await } Ok(()) } /// Return the help text fn help() -> Result<String> { Ok(HELP.into()) } /// List all known containers instances and their state. async fn list(state: &State) -> Result<String>
.unwrap_or_else(|| "No".to_string()), if app.container().is_resource_container() { "resource" } else { "app" } .to_owned(), ] }), ), ) } /// List all running applications. #[cfg(all(not(target_os = "android"), not(target_os = "linux")))] async fn ps(state: &State) -> Result<String> { to_table( vec![vec![ "Name".to_string(), "Version".to_string(), "Uptime".to_string(), ]] .iter() .cloned() .chain( state .applications() .filter_map(|app| app.process_context().map(|p| (app, p))) .sorted_by_key(|(app, _)| app.name()) .map(|(app, context)| { vec![ app.name().to_string(), app.version().to_string(), format!("{:?}", context.uptime()), ] }), ), ) } /// List all running applications. #[cfg(any(target_os = "android", target_os = "linux"))] async fn ps(state: &State) -> Result<String> { use pretty_bytes::converter::convert; const PAGE_SIZE: usize = 4096; let mut result = vec![[ "Name", "Version", "PID", "Size", "Resident", "Shared", "Text", "Data", "Uptime", ] .iter() .map(ToString::to_string) .collect()]; for app in state.applications().sorted_by_key(|app| app.name()) { if let Some(ref context) = app.process_context() { let pid = context.process().pid(); let statm = procinfo::pid::statm(pid as i32)?; result.push(vec![ app.name().to_string(), app.version().to_string(), pid.to_string(), convert((statm.size * PAGE_SIZE) as f64), convert((statm.resident * PAGE_SIZE) as f64), convert((statm.share * PAGE_SIZE) as f64), convert((statm.text * PAGE_SIZE) as f64), convert((statm.data * PAGE_SIZE) as f64), format!("{:?}", context.uptime()), ]); } } to_table(result) } /// Start applications. If `args` is empty *all* known applications that /// are not in a running state are started. If a argument is supplied it /// is used to construct a Regex and all container (names) matching that /// Regex are attempted to be started. async fn start(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec![ "Name".to_string(), "Result".to_string(), "Duration".to_string(), ]]; let apps = state .applications() // Filter for not already running containers .filter(|app| app.process_context().is_none()) // Filter ressource container that are not startable .filter(|app|!app.container().is_resource_container()) // Filter matching container .filter(|app| re.is_match(app.name())) // Sort container by name .sorted_by_key(|app| app.name().clone()) .map(|app| app.name().clone()) .collect::<Vec<Name>>(); for app in &apps { let start = time::Instant::now(); match state.start(&app, 0).await { Ok(_) => result.push(vec![ app.to_string(), "Ok".to_string(), format!("{:?}", start.elapsed()), ]), Err(e) => result.push(vec![ app.to_string(), format!("Failed: {:?}", e), format!("{:?}", start.elapsed()), ]), } } to_table(result) } /// Dump settings fn settings() -> Result<String> { Ok(format!("{}", *SETTINGS)) } /// Stop one, some or all containers. See start for the argument handling. async fn stop(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec![ "Name".to_string(), "Result".to_string(), "Duration".to_string(), ]]; let apps = state .applications() .filter(|app| app.process_context().is_some()) .filter(|app| re.is_match(app.name())) .map(|app| app.name().clone()) .collect::<Vec<Name>>(); for app in &apps { let timeout = time::Duration::from_secs(10); let reason = TerminationReason::Stopped; let start = time::Instant::now(); match state.stop(&app, timeout, reason).await { Ok(()) => result.push(vec![ app.to_string(), "Ok".to_string(), format!("{:?}", start.elapsed()), ]), Err(e) => result.push(vec![ app.to_string(), e.to_string(), format!("{:?}", start.elapsed()), ]), } } to_table(result) } /// Umount and remove a containers. See `start` for the argument handling. /// The data directory is not removed. This needs discussion. async fn uninstall(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec!["Name".to_string(), "Result".to_string()]]; let to_uninstall = state .applications .values() .filter(|app| app.process_context().is_none()) .filter(|app| re.is_match(app.name())) .map(|app| app.name()) .cloned() .collect::<Vec<Name>>(); for app in &to_uninstall { match state.uninstall(&app).await { Ok(()) => result.push(vec![app.to_string(), "Ok".to_string()]), Err(e) => result.push(vec![app.to_string(), e.to_string()]), } } to_table(result) } /// Trigger the update module. async fn update(state: &mut State, args: &[&str]) -> Result<String> { if args.len()!= 1 { return Err(anyhow!("Invalid arguments for update command")); } let dir = PathBuf::from(args[0]); if!dir.exists().await { let err = anyhow!("Update directory {} does not exists", dir.display()); Err(err) } else { let updates = crate::update::update(state, &dir).await?; let mut result = vec![vec![ "Name".to_string(), "From".to_string(), "To".to_string(), ]]; for update in &updates { result.push(vec![ update.0.to_string(), (update.1).0.to_string(), (update.1).1.to_string(), ]) } to_table(result) } } /// Send a shutdown command to the main loop. async fn shutdown(state: &mut State) -> Result<String> { let stop = stop(state, &[]).await?; state.tx().send(Event::Shutdown).await; Ok(stop) } /// Open a TCP socket and read lines terminated with `\n`. async fn serve() -> Result<sync::Receiver<(String, sync::Sender<String>)>> { let address = &SETTINGS.console_address; debug!("Starting console on {}", address); let listener = TcpListener::bind(address) .await .with_context(|| format!("Failed to open listener on {}", address))?; let (tx, rx) = sync::channel(1000); task::spawn(async move { let mut incoming = listener.incoming(); // Spawn a task for each incoming connection. while let Some(stream) = incoming.next().await { let (tx_reply, rx_reply) = sync::channel::<String>(10); if let Ok(stream) = stream { let peer = match stream.peer_addr() { Ok(peer) => peer, Err(e) => { warn!("Failed to get peer from console connection: {}", e); return; } }; debug!("Client {:?} connected", peer); let tx = tx.clone(); task::spawn(async move { let (reader, writer) = &mut (&stream, &stream); let reader = io::BufReader::new(reader); let mut lines = reader.lines(); while let Some(Ok(line)) = lines.next().await { let line = line.trim(); tx.send((line.into(), tx_reply.clone())).await; if let Ok(reply) = rx_reply.recv().await { if let Err(e) = writer.write_all(reply.as_bytes()).await { warn!("Error on console connection {:?}: {}", peer, e); break; } } } }); } } }); Ok(rx) } /// List versions of currently known containers and applications. fn versions(state: &mut State) -> Result<String> { let versions = state .applications() .map(|app| app.manifest()) .map(|manifest| { ( manifest.name.clone(), manifest.version.clone(), manifest.arch.clone(), ) }) .collect::<Vec<_>>(); serde_json::to_string(&versions).context("Failed to encode manifest to json") } /// Format something iterateable into a ascii table. The first row of the table input /// contains the column titles. The table cannot be empty. fn to_table<T: iter::IntoIterator<Item = I>, I: iter::IntoIterator<Item = S>, S: ToString>( table: T, ) -> Result<String> { let mut t = Table::new(); let format = prettytable::format::FormatBuilder::new() .column_separator('|') .separators(&[], format::LineSeparator::new('-', '+', '+', '+')) .padding(1, 1) .build(); t.set_format(format); let mut rows = table.into_iter(); let titles = rows.next().ok_or_else(|| anyhow!("Missing titles"))?.into(); t.set_titles(titles); for r in rows { t.add_row(r.into()); } let mut result = vec![]; t.print(&mut result).context("Failed to format table")?; String::from_utf8(result).context("Invalid table content") } fn arg_regex(args: &[&str]) -> Result<regex::Regex> { match args.len() { 1 => regex::Regex::new(args[0]) .with_context(|| format!("Invalid container name regex {}", args[0])), 0 => regex::Regex::new(".*") .with_context(|| format!("Invalid container name regex {}", args[0])), _ => Err(anyhow!("Arguments invalid. Use `start PATTERN`",)), } }
{ to_table( vec![vec![ "Name".to_string(), "Version".to_string(), "Running".to_string(), "Type".to_string(), ]] .iter() .cloned() .chain( state .applications() .sorted_by_key(|app| app.name()) .map(|app| { vec![ app.name().to_string(), app.version().to_string(), app.process_context() .map(|c| format!("Yes (pid: {})", c.process().pid()))
identifier_body
console.rs
// Copyright (c) 2019 - 2020 ESRLabs // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::{Event, EventTx, Name, State, TerminationReason, SETTINGS}; use anyhow::{anyhow, Context, Result}; use async_std::{io, net::TcpListener, path::PathBuf, prelude::*, sync, task}; use itertools::Itertools; use log::{debug, warn, *}; use prettytable::{format, Table}; use std::{iter, time}; /// Helptext displayed on the `help` command. The `dcon` tool parses this text /// and creates suggestions and completions. Ensure to a correct helptext when /// adding/removing/changing commands. const HELP: &str = "\ help: Display help text\n\ list: List all loaded images\n\ ps: List running instances\n\ shutdown: Stop the north runtime\n\ settings: Dump north configuration\n\ start: PATTERN Start containers matching PATTERN e.g'start hello*'. Omit PATTERN to start all containers\n\ stop: PATTERN Stop all containers matching PATTERN. Omit PATTERN to stop all running containers\n\ uninstall: PATTERN: Unmount and remove all containers matching PATTERN\n\ update: Run update with provided ressources\n\ versions: Version list of installed applications"; pub async fn
(tx: &EventTx) -> Result<()> { let rx = serve().await?; let tx = tx.clone(); // Spawn a task that handles lines received on the debug port. task::spawn(async move { while let Ok((line, tx_reply)) = rx.recv().await { tx.send(Event::Console(line, tx_reply)).await; } }); Ok(()) } pub async fn process(state: &mut State, command: &str, reply: sync::Sender<String>) -> Result<()> { info!("Running \'{}\'", command); let mut commands = command.split_whitespace(); if let Some(cmd) = commands.next() { let args = commands.collect::<Vec<&str>>(); let start_timestamp = time::Instant::now(); match match cmd { "help" => help(), "list" => list(state).await, "ps" => ps(state).await, "settings" => settings(), "shutdown" => shutdown(state).await, "start" => start(state, &args).await, "stop" => stop(state, &args).await, "uninstall" => uninstall(state, &args).await, "update" => update(state, &args).await, "versions" => versions(state), _ => Err(anyhow!("Unknown command: {}", command)), } { Ok(mut r) => { r.push_str(&format!("Duration: {:?}\n", start_timestamp.elapsed())); reply.send(r).await } Err(e) => { let msg = format!("Failed to run: {} {:?}: {}\n", cmd, args, e); reply.send(msg).await } } } else { reply.send("Invalid command".into()).await } Ok(()) } /// Return the help text fn help() -> Result<String> { Ok(HELP.into()) } /// List all known containers instances and their state. async fn list(state: &State) -> Result<String> { to_table( vec![vec![ "Name".to_string(), "Version".to_string(), "Running".to_string(), "Type".to_string(), ]] .iter() .cloned() .chain( state .applications() .sorted_by_key(|app| app.name()) .map(|app| { vec![ app.name().to_string(), app.version().to_string(), app.process_context() .map(|c| format!("Yes (pid: {})", c.process().pid())) .unwrap_or_else(|| "No".to_string()), if app.container().is_resource_container() { "resource" } else { "app" } .to_owned(), ] }), ), ) } /// List all running applications. #[cfg(all(not(target_os = "android"), not(target_os = "linux")))] async fn ps(state: &State) -> Result<String> { to_table( vec![vec![ "Name".to_string(), "Version".to_string(), "Uptime".to_string(), ]] .iter() .cloned() .chain( state .applications() .filter_map(|app| app.process_context().map(|p| (app, p))) .sorted_by_key(|(app, _)| app.name()) .map(|(app, context)| { vec![ app.name().to_string(), app.version().to_string(), format!("{:?}", context.uptime()), ] }), ), ) } /// List all running applications. #[cfg(any(target_os = "android", target_os = "linux"))] async fn ps(state: &State) -> Result<String> { use pretty_bytes::converter::convert; const PAGE_SIZE: usize = 4096; let mut result = vec![[ "Name", "Version", "PID", "Size", "Resident", "Shared", "Text", "Data", "Uptime", ] .iter() .map(ToString::to_string) .collect()]; for app in state.applications().sorted_by_key(|app| app.name()) { if let Some(ref context) = app.process_context() { let pid = context.process().pid(); let statm = procinfo::pid::statm(pid as i32)?; result.push(vec![ app.name().to_string(), app.version().to_string(), pid.to_string(), convert((statm.size * PAGE_SIZE) as f64), convert((statm.resident * PAGE_SIZE) as f64), convert((statm.share * PAGE_SIZE) as f64), convert((statm.text * PAGE_SIZE) as f64), convert((statm.data * PAGE_SIZE) as f64), format!("{:?}", context.uptime()), ]); } } to_table(result) } /// Start applications. If `args` is empty *all* known applications that /// are not in a running state are started. If a argument is supplied it /// is used to construct a Regex and all container (names) matching that /// Regex are attempted to be started. async fn start(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec![ "Name".to_string(), "Result".to_string(), "Duration".to_string(), ]]; let apps = state .applications() // Filter for not already running containers .filter(|app| app.process_context().is_none()) // Filter ressource container that are not startable .filter(|app|!app.container().is_resource_container()) // Filter matching container .filter(|app| re.is_match(app.name())) // Sort container by name .sorted_by_key(|app| app.name().clone()) .map(|app| app.name().clone()) .collect::<Vec<Name>>(); for app in &apps { let start = time::Instant::now(); match state.start(&app, 0).await { Ok(_) => result.push(vec![ app.to_string(), "Ok".to_string(), format!("{:?}", start.elapsed()), ]), Err(e) => result.push(vec![ app.to_string(), format!("Failed: {:?}", e), format!("{:?}", start.elapsed()), ]), } } to_table(result) } /// Dump settings fn settings() -> Result<String> { Ok(format!("{}", *SETTINGS)) } /// Stop one, some or all containers. See start for the argument handling. async fn stop(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec![ "Name".to_string(), "Result".to_string(), "Duration".to_string(), ]]; let apps = state .applications() .filter(|app| app.process_context().is_some()) .filter(|app| re.is_match(app.name())) .map(|app| app.name().clone()) .collect::<Vec<Name>>(); for app in &apps { let timeout = time::Duration::from_secs(10); let reason = TerminationReason::Stopped; let start = time::Instant::now(); match state.stop(&app, timeout, reason).await { Ok(()) => result.push(vec![ app.to_string(), "Ok".to_string(), format!("{:?}", start.elapsed()), ]), Err(e) => result.push(vec![ app.to_string(), e.to_string(), format!("{:?}", start.elapsed()), ]), } } to_table(result) } /// Umount and remove a containers. See `start` for the argument handling. /// The data directory is not removed. This needs discussion. async fn uninstall(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec!["Name".to_string(), "Result".to_string()]]; let to_uninstall = state .applications .values() .filter(|app| app.process_context().is_none()) .filter(|app| re.is_match(app.name())) .map(|app| app.name()) .cloned() .collect::<Vec<Name>>(); for app in &to_uninstall { match state.uninstall(&app).await { Ok(()) => result.push(vec![app.to_string(), "Ok".to_string()]), Err(e) => result.push(vec![app.to_string(), e.to_string()]), } } to_table(result) } /// Trigger the update module. async fn update(state: &mut State, args: &[&str]) -> Result<String> { if args.len()!= 1 { return Err(anyhow!("Invalid arguments for update command")); } let dir = PathBuf::from(args[0]); if!dir.exists().await { let err = anyhow!("Update directory {} does not exists", dir.display()); Err(err) } else { let updates = crate::update::update(state, &dir).await?; let mut result = vec![vec![ "Name".to_string(), "From".to_string(), "To".to_string(), ]]; for update in &updates { result.push(vec![ update.0.to_string(), (update.1).0.to_string(), (update.1).1.to_string(), ]) } to_table(result) } } /// Send a shutdown command to the main loop. async fn shutdown(state: &mut State) -> Result<String> { let stop = stop(state, &[]).await?; state.tx().send(Event::Shutdown).await; Ok(stop) } /// Open a TCP socket and read lines terminated with `\n`. async fn serve() -> Result<sync::Receiver<(String, sync::Sender<String>)>> { let address = &SETTINGS.console_address; debug!("Starting console on {}", address); let listener = TcpListener::bind(address) .await .with_context(|| format!("Failed to open listener on {}", address))?; let (tx, rx) = sync::channel(1000); task::spawn(async move { let mut incoming = listener.incoming(); // Spawn a task for each incoming connection. while let Some(stream) = incoming.next().await { let (tx_reply, rx_reply) = sync::channel::<String>(10); if let Ok(stream) = stream { let peer = match stream.peer_addr() { Ok(peer) => peer, Err(e) => { warn!("Failed to get peer from console connection: {}", e); return; } }; debug!("Client {:?} connected", peer); let tx = tx.clone(); task::spawn(async move { let (reader, writer) = &mut (&stream, &stream); let reader = io::BufReader::new(reader); let mut lines = reader.lines(); while let Some(Ok(line)) = lines.next().await { let line = line.trim(); tx.send((line.into(), tx_reply.clone())).await; if let Ok(reply) = rx_reply.recv().await { if let Err(e) = writer.write_all(reply.as_bytes()).await { warn!("Error on console connection {:?}: {}", peer, e); break; } } } }); } } }); Ok(rx) } /// List versions of currently known containers and applications. fn versions(state: &mut State) -> Result<String> { let versions = state .applications() .map(|app| app.manifest()) .map(|manifest| { ( manifest.name.clone(), manifest.version.clone(), manifest.arch.clone(), ) }) .collect::<Vec<_>>(); serde_json::to_string(&versions).context("Failed to encode manifest to json") } /// Format something iterateable into a ascii table. The first row of the table input /// contains the column titles. The table cannot be empty. fn to_table<T: iter::IntoIterator<Item = I>, I: iter::IntoIterator<Item = S>, S: ToString>( table: T, ) -> Result<String> { let mut t = Table::new(); let format = prettytable::format::FormatBuilder::new() .column_separator('|') .separators(&[], format::LineSeparator::new('-', '+', '+', '+')) .padding(1, 1) .build(); t.set_format(format); let mut rows = table.into_iter(); let titles = rows.next().ok_or_else(|| anyhow!("Missing titles"))?.into(); t.set_titles(titles); for r in rows { t.add_row(r.into()); } let mut result = vec![]; t.print(&mut result).context("Failed to format table")?; String::from_utf8(result).context("Invalid table content") } fn arg_regex(args: &[&str]) -> Result<regex::Regex> { match args.len() { 1 => regex::Regex::new(args[0]) .with_context(|| format!("Invalid container name regex {}", args[0])), 0 => regex::Regex::new(".*") .with_context(|| format!("Invalid container name regex {}", args[0])), _ => Err(anyhow!("Arguments invalid. Use `start PATTERN`",)), } }
init
identifier_name
console.rs
// Copyright (c) 2019 - 2020 ESRLabs // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::{Event, EventTx, Name, State, TerminationReason, SETTINGS}; use anyhow::{anyhow, Context, Result}; use async_std::{io, net::TcpListener, path::PathBuf, prelude::*, sync, task}; use itertools::Itertools; use log::{debug, warn, *}; use prettytable::{format, Table}; use std::{iter, time}; /// Helptext displayed on the `help` command. The `dcon` tool parses this text /// and creates suggestions and completions. Ensure to a correct helptext when /// adding/removing/changing commands. const HELP: &str = "\ help: Display help text\n\ list: List all loaded images\n\ ps: List running instances\n\ shutdown: Stop the north runtime\n\ settings: Dump north configuration\n\ start: PATTERN Start containers matching PATTERN e.g'start hello*'. Omit PATTERN to start all containers\n\ stop: PATTERN Stop all containers matching PATTERN. Omit PATTERN to stop all running containers\n\ uninstall: PATTERN: Unmount and remove all containers matching PATTERN\n\ update: Run update with provided ressources\n\ versions: Version list of installed applications"; pub async fn init(tx: &EventTx) -> Result<()> { let rx = serve().await?; let tx = tx.clone(); // Spawn a task that handles lines received on the debug port. task::spawn(async move { while let Ok((line, tx_reply)) = rx.recv().await { tx.send(Event::Console(line, tx_reply)).await; } }); Ok(()) } pub async fn process(state: &mut State, command: &str, reply: sync::Sender<String>) -> Result<()> { info!("Running \'{}\'", command); let mut commands = command.split_whitespace(); if let Some(cmd) = commands.next() { let args = commands.collect::<Vec<&str>>(); let start_timestamp = time::Instant::now(); match match cmd { "help" => help(), "list" => list(state).await, "ps" => ps(state).await, "settings" => settings(), "shutdown" => shutdown(state).await, "start" => start(state, &args).await, "stop" => stop(state, &args).await, "uninstall" => uninstall(state, &args).await, "update" => update(state, &args).await, "versions" => versions(state), _ => Err(anyhow!("Unknown command: {}", command)), } { Ok(mut r) => { r.push_str(&format!("Duration: {:?}\n", start_timestamp.elapsed())); reply.send(r).await } Err(e) => { let msg = format!("Failed to run: {} {:?}: {}\n", cmd, args, e); reply.send(msg).await } } } else { reply.send("Invalid command".into()).await } Ok(()) } /// Return the help text fn help() -> Result<String> { Ok(HELP.into()) } /// List all known containers instances and their state. async fn list(state: &State) -> Result<String> { to_table( vec![vec![ "Name".to_string(), "Version".to_string(), "Running".to_string(), "Type".to_string(), ]] .iter() .cloned() .chain( state .applications() .sorted_by_key(|app| app.name()) .map(|app| { vec![ app.name().to_string(), app.version().to_string(), app.process_context() .map(|c| format!("Yes (pid: {})", c.process().pid())) .unwrap_or_else(|| "No".to_string()), if app.container().is_resource_container() { "resource" } else { "app" } .to_owned(), ] }), ), ) } /// List all running applications. #[cfg(all(not(target_os = "android"), not(target_os = "linux")))] async fn ps(state: &State) -> Result<String> { to_table( vec![vec![ "Name".to_string(), "Version".to_string(), "Uptime".to_string(), ]] .iter() .cloned() .chain( state .applications() .filter_map(|app| app.process_context().map(|p| (app, p))) .sorted_by_key(|(app, _)| app.name()) .map(|(app, context)| { vec![ app.name().to_string(), app.version().to_string(), format!("{:?}", context.uptime()), ] }), ), ) } /// List all running applications. #[cfg(any(target_os = "android", target_os = "linux"))] async fn ps(state: &State) -> Result<String> { use pretty_bytes::converter::convert; const PAGE_SIZE: usize = 4096; let mut result = vec![[ "Name", "Version", "PID", "Size", "Resident", "Shared", "Text", "Data", "Uptime", ] .iter() .map(ToString::to_string) .collect()]; for app in state.applications().sorted_by_key(|app| app.name()) { if let Some(ref context) = app.process_context() { let pid = context.process().pid(); let statm = procinfo::pid::statm(pid as i32)?; result.push(vec![ app.name().to_string(), app.version().to_string(), pid.to_string(), convert((statm.size * PAGE_SIZE) as f64), convert((statm.resident * PAGE_SIZE) as f64), convert((statm.share * PAGE_SIZE) as f64), convert((statm.text * PAGE_SIZE) as f64), convert((statm.data * PAGE_SIZE) as f64), format!("{:?}", context.uptime()), ]); } } to_table(result)
/// Start applications. If `args` is empty *all* known applications that /// are not in a running state are started. If a argument is supplied it /// is used to construct a Regex and all container (names) matching that /// Regex are attempted to be started. async fn start(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec![ "Name".to_string(), "Result".to_string(), "Duration".to_string(), ]]; let apps = state .applications() // Filter for not already running containers .filter(|app| app.process_context().is_none()) // Filter ressource container that are not startable .filter(|app|!app.container().is_resource_container()) // Filter matching container .filter(|app| re.is_match(app.name())) // Sort container by name .sorted_by_key(|app| app.name().clone()) .map(|app| app.name().clone()) .collect::<Vec<Name>>(); for app in &apps { let start = time::Instant::now(); match state.start(&app, 0).await { Ok(_) => result.push(vec![ app.to_string(), "Ok".to_string(), format!("{:?}", start.elapsed()), ]), Err(e) => result.push(vec![ app.to_string(), format!("Failed: {:?}", e), format!("{:?}", start.elapsed()), ]), } } to_table(result) } /// Dump settings fn settings() -> Result<String> { Ok(format!("{}", *SETTINGS)) } /// Stop one, some or all containers. See start for the argument handling. async fn stop(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec![ "Name".to_string(), "Result".to_string(), "Duration".to_string(), ]]; let apps = state .applications() .filter(|app| app.process_context().is_some()) .filter(|app| re.is_match(app.name())) .map(|app| app.name().clone()) .collect::<Vec<Name>>(); for app in &apps { let timeout = time::Duration::from_secs(10); let reason = TerminationReason::Stopped; let start = time::Instant::now(); match state.stop(&app, timeout, reason).await { Ok(()) => result.push(vec![ app.to_string(), "Ok".to_string(), format!("{:?}", start.elapsed()), ]), Err(e) => result.push(vec![ app.to_string(), e.to_string(), format!("{:?}", start.elapsed()), ]), } } to_table(result) } /// Umount and remove a containers. See `start` for the argument handling. /// The data directory is not removed. This needs discussion. async fn uninstall(state: &mut State, args: &[&str]) -> Result<String> { let re = arg_regex(args)?; let mut result = vec![vec!["Name".to_string(), "Result".to_string()]]; let to_uninstall = state .applications .values() .filter(|app| app.process_context().is_none()) .filter(|app| re.is_match(app.name())) .map(|app| app.name()) .cloned() .collect::<Vec<Name>>(); for app in &to_uninstall { match state.uninstall(&app).await { Ok(()) => result.push(vec![app.to_string(), "Ok".to_string()]), Err(e) => result.push(vec![app.to_string(), e.to_string()]), } } to_table(result) } /// Trigger the update module. async fn update(state: &mut State, args: &[&str]) -> Result<String> { if args.len()!= 1 { return Err(anyhow!("Invalid arguments for update command")); } let dir = PathBuf::from(args[0]); if!dir.exists().await { let err = anyhow!("Update directory {} does not exists", dir.display()); Err(err) } else { let updates = crate::update::update(state, &dir).await?; let mut result = vec![vec![ "Name".to_string(), "From".to_string(), "To".to_string(), ]]; for update in &updates { result.push(vec![ update.0.to_string(), (update.1).0.to_string(), (update.1).1.to_string(), ]) } to_table(result) } } /// Send a shutdown command to the main loop. async fn shutdown(state: &mut State) -> Result<String> { let stop = stop(state, &[]).await?; state.tx().send(Event::Shutdown).await; Ok(stop) } /// Open a TCP socket and read lines terminated with `\n`. async fn serve() -> Result<sync::Receiver<(String, sync::Sender<String>)>> { let address = &SETTINGS.console_address; debug!("Starting console on {}", address); let listener = TcpListener::bind(address) .await .with_context(|| format!("Failed to open listener on {}", address))?; let (tx, rx) = sync::channel(1000); task::spawn(async move { let mut incoming = listener.incoming(); // Spawn a task for each incoming connection. while let Some(stream) = incoming.next().await { let (tx_reply, rx_reply) = sync::channel::<String>(10); if let Ok(stream) = stream { let peer = match stream.peer_addr() { Ok(peer) => peer, Err(e) => { warn!("Failed to get peer from console connection: {}", e); return; } }; debug!("Client {:?} connected", peer); let tx = tx.clone(); task::spawn(async move { let (reader, writer) = &mut (&stream, &stream); let reader = io::BufReader::new(reader); let mut lines = reader.lines(); while let Some(Ok(line)) = lines.next().await { let line = line.trim(); tx.send((line.into(), tx_reply.clone())).await; if let Ok(reply) = rx_reply.recv().await { if let Err(e) = writer.write_all(reply.as_bytes()).await { warn!("Error on console connection {:?}: {}", peer, e); break; } } } }); } } }); Ok(rx) } /// List versions of currently known containers and applications. fn versions(state: &mut State) -> Result<String> { let versions = state .applications() .map(|app| app.manifest()) .map(|manifest| { ( manifest.name.clone(), manifest.version.clone(), manifest.arch.clone(), ) }) .collect::<Vec<_>>(); serde_json::to_string(&versions).context("Failed to encode manifest to json") } /// Format something iterateable into a ascii table. The first row of the table input /// contains the column titles. The table cannot be empty. fn to_table<T: iter::IntoIterator<Item = I>, I: iter::IntoIterator<Item = S>, S: ToString>( table: T, ) -> Result<String> { let mut t = Table::new(); let format = prettytable::format::FormatBuilder::new() .column_separator('|') .separators(&[], format::LineSeparator::new('-', '+', '+', '+')) .padding(1, 1) .build(); t.set_format(format); let mut rows = table.into_iter(); let titles = rows.next().ok_or_else(|| anyhow!("Missing titles"))?.into(); t.set_titles(titles); for r in rows { t.add_row(r.into()); } let mut result = vec![]; t.print(&mut result).context("Failed to format table")?; String::from_utf8(result).context("Invalid table content") } fn arg_regex(args: &[&str]) -> Result<regex::Regex> { match args.len() { 1 => regex::Regex::new(args[0]) .with_context(|| format!("Invalid container name regex {}", args[0])), 0 => regex::Regex::new(".*") .with_context(|| format!("Invalid container name regex {}", args[0])), _ => Err(anyhow!("Arguments invalid. Use `start PATTERN`",)), } }
}
random_line_split
basic.rs
let array_slice = &array[1..3]; assert_eq!([2, 3, 4], array_slice); // passing function arguments let string = String::new(); take_reference(&string); take_mut_ref(&mut string); string = take_and_give_back_ownership(string); take_ownership(string); } // function arguments fn take_ownership(arg: String) { println!("Taken ownership of `{}`", arg); } fn take_and_give_back_ownership(mut arg: String) -> String { arg.push_str("shesh"); arg } fn take_reference(arg: &String) { println!("Length of string is {}", arg.len()); } fn take_mut_ref(arg: &mut String) { arg.push_str("soos"); } fn first_word_pos(s: &String) -> usize { let bytes = s.as_bytes(); for (i, &item) in bytes.iter().enumerate() { if item == b''{ return i; } } s.len() } fn first_word(s: &str) -> &str { // str is the string-slice type let bytes = s.as_bytes(); for (i, &item) in bytes.iter().enumerate() { if item == b''{ return &s[0..i]; } } &s[..] } // structs #[derive(Debug)] // for debug printing struct User { username: String, // field email: String, sign_in_count: u64, active: bool, } fn build_user(email: String, username: String) -> User { User { email: email, username: username, active: true, sign_in_count: 1, } } fn code_holder_3() { let mut user1 = User { email: String::from("[email protected]"), username: String::from("someusername123"), active: true, sign_in_count: 69, }; user1.email = String::from("[email protected]"); let user2 = User { email: String::from("[email protected]"), username: String::from("diffrentName"), ..user1 // copies the other values of user1 }; // println!("user is {}", user2); error because User doesn't impelement'std::fmt::Display' println!("user is {:?}", user2); // using output format 'Debug' } // tuple struct struct Color(i32, i32, i32); // is its own type struct Point(i32, i32, i32); struct Rect { width: u32, height: u32, } impl Rect { // method (because takes self) fn area(&self) -> u32 { self.width * self.height } fn can_hold(&self, other: &Rect) -> bool { self.width > other.width && self.height > other.height } // associated function (bacause doesn't take self) -> Rect::square() fn square(size: u32) -> Rect { Rect { width: size, height: size, } } } // Enums enum IpAddrKind { // is a custom data type V4, // variant of enum V6, } struct IpAddrBad { kind: IpAddrKind, address: String, } enum IpAddr { // better way, also diffrent data types possible V4(u8, u8, u8, u8), V6(String), } fn route(ip_kind: IpAddrKind) {} fn code_holder_4() { let four = IpAddrKind::V4; // are of same type let six = IpAddrKind::V6; route(IpAddrKind::V4); route(IpAddrKind::V6); let home = IpAddr::V4(127, 0, 0, 1); let loopback = IpAddr::V6(String::from("::1")); } enum Message { Quit, Move { x: i32, y: i32 }, // struct Write(String), // tuple struct ChangeColor(i32, i32, i32), // tuple struct } impl Message { fn call(&self) { // code } } // option enum CustomOption<T> { // replaces 'null'-value Some(T), None, } fn code_block_5() { let some_number = Some(5); // option let some_string = Some("a string"); let absent_number: Option<i32> = None; } // match: control flow operator #[derive(Debug)] enum UsState { Alabama, Alaska, } enum Coin { Penny, Nickel, Dime, Quarter(UsState), } fn value_in_cents(coin: Coin) -> u8 { match coin { Coin::Penny => 1, Coin::Nickel => 5, Coin::Dime => 10, Coin::Quarter(state) => { println!("State quarter from {:?}!", state); 25 } } } fn plus_one(x: Option<i32>) -> Option<i32> { match x { None => None, Some(i) => Some(i + 1), } } fn matches_are_exhaustive(val: u8) { match val { 1 => println!("one"), 2 => println!("two"), 5 => println!("five"), 7 => println!("seven"), _ => (), } } // if let fn if_let() { let some_u8_value = Some(0u8); match some_u8_value { Some(3) => println!("three"), _ => (), } // equivalent to if let Some(3) = some_u8_value { println!("three"); } } // collections fn code_holder_6() { let v: Vec<i32> = Vec::new(); let v = vec![1, 2, 3]; let mut v = Vec::new(); v.push(5); v.push(6); let v = vec![1, 2, 3, 4, 5]; // two ways to access vector let third: &i32 = &v[2]; // panics if fails match v.get(2) { // doesn't panic Some(third) => (), None => (), } // iterating let mut v = vec![100, 32, 57]; for i in &v { println!("{}", i); } for i in &mut v { *i += 50; } // multiple type vector enum SpreadsheetCell { Int(i32), Float(f64), Text(String), } let row = vec![ SpreadsheetCell::Int(3), SpreadsheetCell::Text(String::from("blue")), SpreadsheetCell::Float(10.12), ]; } // strings // str is implemented in the core language and String is in the standard library fn code_holder_7() { let mut s = String::new(); let data = "inital contents"; // implements 'Display' trait let mut s = data.to_string(); s.push_str("bar"); s.push('a'); let s1 = String::from("Hello "); let s2 = String::from("World"); let s3 = s1 + &s2; // s1 was moved! (fn add(self, s: &str) -> String) let s1 = String::from("tic"); let s2 = String::from("tac"); let s3 = String::from("toe"); let s = format!("{}-{}-{}", s1, s2, s3); // you can't index into string, because of ambigueties and other reasons -> be more percise // slices... not so appropriate let hello = "Здравствуйте"; let s = &hello[0..4]; // 4 bytes -> "Зд" // best way: chars for c in "नमस्ते".chars() { println!("{}", c); } } // Hash Maps fn code_holder_8() { use std::collections::HashMap; let mut scores = HashMap::new(); scores.insert(String::from("Blue"), 10); scores.insert(String::from("Yellow"), 50); // morphing collections let teams = vec![String::from("Blue"), String::from("Yellow")]; let inital_scores = vec![10, 50]; let scores: HashMap<_, _> = teams.iter().zip(inital_scores.iter()).collect(); } // errors fn code_holder_9() { // panicing! // If rust panics before it quite it's starts unwinding (stack is cleared up), which takes a lot of time -> alternative abort (in Cargo.toml: panic = 'abort') panic!("crash and burn"); // Result use std::fs::File; use std::io::ErrorKind; use std::io::Read; let f = File::open("hello.txt"); let f = match f { Ok(file) => file, Err(error) => match error.kind() { ErrorKind::NotFound => match File::create("hello.txt") { Ok(fc) => fc, Err(e) => panic!("Problem creating the file: {:?}", e), }, other_error => panic!("Problem opening the file: {:?}", other_error), }, }; let f = File::open("hello.txt").unwrap(); // returns value if okay, panics otherwise let f = File::open("hello.txt").expect_err("Own error message"); // same as unwrap() just with custom error message // propagating error fn read_username_from_file_verbose() -> Result<String, std::io::Error> { // verbose way let f = File::open("hello.txt"); let mut f = match f { Ok(file) => file, Err(e) => return Err(e), }; let mut s = String::new(); match f.read_to_string(&mut s) { Ok(_) => Ok(s), Err(e) => Err(e), } } fn read_username_from_file() -> Result<String, std::io::Error> { // better way with? operator let mut f = File::open("hello.txt")?; let mut s = String::new(); f.read_to_string(&mut s)?; // if ok expression has value, if Err then function returns with error Ok(s) } } // generics (similar to C++ typenames/templates) enum own_Result<T, E> { Ok(T), Err(E), } struct Point1<T> { x: T, y: T, } impl<T> Point1<T> { fn x(&self) -> &T { &self.x } } impl Point1<f32> { fn distance_from_origin(&self) -> f32 { (self.x.powi(2) + self.y.powi(2)).sqrt() } } struct Point2<T, U> { x: T, y: U, } impl<T, U> Point2<T, U> { fn mixup<V, W>(self, other: Point2<V, W>) -> Point2<T, W> { Point2 { x: self.x, y: other.y, } } } // traits trait Summarizable { fn summarize_author(&self) -> String; fn summarize(&self) -> String { format!("(Read more from {}...)", self.summarize_author()) } } struct NewsArticle { headline: String, location: String, author: String, content: String, } impl Summarizable for NewsArticle { fn summarize_author(&self) -> String { format!("{}", self.author) } fn summarize(&self) -> String { format!( "{}, by {} ({})", self.headline, self.summarize_author(), self.location ) } } struct Tweet { username: String, content: String, reply: bool, retweet: bool, } impl Summarizable for Tweet { fn summarize_author(&self) -> String { format!("@{}", self.username) } fn summarize(&self) -> String { format!("{}: {}", self.summarize_author(), self.content) } } // traits as parameters/ Trait bounds fn notify(item: impl Summarizable) { println!("Breaking news! {}", item.summarize()); } // ^ syntactic sugar for: // fn notify<T: Summarizable>(item: T) { // println!("Breaking news! {}", item.summarize()); // } fn notfiy<T: Summarizable + std::fmt::Display>(item1: T) {} // when many traits are used -> prefer 'where'-clauses to not clutter the funciton definition fn some_function<T, U>(t: T, u: U) -> i32 where T: std::fmt::Display + Clone, U: Clone + std::fmt::Debug, { 4 } fn returns_summarizable() -> impl Summarizable { Tweet { username: String::from("horse_ebooks"), content: String::from("of cource, as you probablay already know people"), reply: false, retweet: false, } } fn largest<T: std::cmp::PartialOrd + Copy>(list: &[T]) -> T { let mut largest = list[0]; for &item in list.iter() { if item > largest { largest = item; } } largest } // Trait bounds to conditionally implement Methods struct Pair<T> { x: T, y: T, } impl<T> Pair<T> { fn new(x: T, y: T) -> Self { Self { x, y } } } // conditional implementation (only if traits are Display + PartialOrd) impl<T: std::fmt::Display + std::cmp::PartialOrd> Pair<T> { fn cmp_disply(&self) { if self.x >= self.y { println!("The largest member is x = {}", self.x); } else { println!("The largest member is y = {}", self.y); } } } // implement a trait if the type implements another train --- alias blanket implementations // impl<T: std::fmt::Display> ToString for T { // if T already implements Display, than it also implements ToString // } // lifetimes // lifetimes gurantee, that references are still valid, when used. // Most of the time they are implicitly inferred. If they can't, they have to be explicitly specified // &i32; a reference // &'a i32; a reference with the explicit lifetime "'a" // &'a mut i32; a mutable reference with the explicit lifetime "'a" fn longest<'a>(x: &'a str, y: &'a str) -> &'a str { // now the compiler knows, how long the return value can live. (as long as the smaller lifetime of x or y) if x.len() > y.len() { x } else { y } } struct ImportantExcerpt<'a> { part: &'a str, // if struct holds reference, a explicit lifetime is required } impl<'a> ImportantExcerpt<'a> { fn level(&self) -> i32 { 3 } } // static lifetimes (references live for entire duration of program)... applies to all string ltierals fn code_holder_10() { let s: &'static str = "I have a static lifetime."; } // all generics together fn longest_with_an_announcement<'a, T>(x: &'a str, y: &'a str, ann: T) -> &'a str where T: std::fmt::Display, { println!("Announcement! {}", ann); if x.len() > y.len() { x } else { y } } // closures fn code_holder_11() { // types are automatically inferred (but can be explicitly specified) let some_closure = |arg| { println!("this is the argument: {}", arg); }; let minimalist_closure = |x| x; // returns itself some_closure(5); minimalist_closure("lel"); // pattern: memorization / lazy evaluation struct NoArgsCacher<T> where T: Fn() -> u32, { calculation: T, value: Option<u32>, } impl<T> NoArgsCacher<T> where T: Fn() -> u32, { fn new(calculation: T) -> NoArgsCacher<T> { NoArgsCacher { calculation, value: None, } } fn value(&mut self) -> u32 { match self.value { Some(v) => v, None => { let v = (self.calculation)(); self.value = Some(v); v } } } } use std::thread; use std::time::Duration; let mut expensive_result = NoArgsCacher::new(|| { println!("performing expensive calculation..."); thread::sleep(Duration::from_secs(2)); 420 }); // TODO: create better Cacher with generics and a hash-table (args-result pairs) } // iterators // zero-cost abstraction -> are very fast USE THEM! fn code_holder_12() { let v1 = vec![1, 2, 3]; let v1_iter = v1.iter(); for val in v1_iter { println!("Got: {}", val); } pub trait CustomIteratorTrait { type Item; // associated type fn next(&mut self) -> Option<Self::Item>; } #[test] fn iterator_demonstration() { let v1 = vec![1, 2, 3]; let mut v1_iter = v1.iter(); assert_eq!(v1_iter.next(), Some(&1)); assert_eq!(v1_iter.next(), Some(&2)); assert_eq!(v1_iter.next(), Some(&3)); assert_eq!(v1_iter.next(), None); } #[test] fn iterator_sum() { let v1 = vec![1, 2, 3]; let v1_iter = v1.iter(); let total: i32 = v1_iter.sum(); // iter has been consumed (moved) -> cannot be moved any more } #[test] fn iterator_map() { let v1: Vec<i32> = vec![1, 2, 3]; let v2: Vec<_> = v1.iter().map(|x| x + 1).collect(); // collect() must be called because iterators are lazy assert_eq!(v2, vec![2, 3, 4]); } struct Shoe { size: u32, style: String, } fn shoes_in_my_size(shoes: Vec<Shoe>, shoe_size: u32) -> Vec<Shoe> { shoes.into_iter().filter(|s| s.size == shoe_size).collect() } #[test] fn filter_by_size() { let shoes = vec![ Shoe { size: 10, style: String::from("sneaker"), }, Shoe { size: 13, style: String::from("sandal"), }, Shoe { size: 10, style: String::from("boot"), }, ]; let in_my_size = shoes_in_my_size(shoes, 10); assert_eq!( in_my_size, vec![ Shoe { size: 10, style: String::from("sneaker") }, Shoe { size: 10, style: String::from("boot") }, ] ); } // own iterator struct Counter { count: u32, } impl Counter { fn new() -> Counter { Counter { count: 0 } } } impl Iterator for Counter { type Item = u32; fn next(&mut self) -> Option<Self::Item> { self.count += 1; if self.count < 6 { Some(self.count) } else {
} } #[test] fn using_other_iterator_trait_methods() { let sum: u32 = Counter::new() .zip(Counter::new().skip(1)) .map(|(a, b)| a * b) .filter(|x| x % 3 == 0) .sum(); assert_eq!(18, sum); } } // cargo and creates // //! Another documentation style, which is at the top of the page, generally in the crate root // //! re-exports are listed in documentation -> expose them /// Documentation comment (3 slashes) /// will be used to generate HTML documentation (cargo doc --open) -> runs rustdoc /// supports MarkDown! /// Some commonly used headings /// # Examples /// # Panics /// # Errors (when it returns Result) /// # Safety (if unsafe to call) /// ''' /// assert_eq!(true, true); /// ''' /// this code example will be run as a test with (cargo test)!!! AWESOME fn documented_function() {} // smart pointers // Vec and String are smart pointers, because they point at data and have some additional metadata // allocate data on heap // Box<T> for storing data on heap (no performnace overhead) // usages: dynamic memory (unknown size), transfer ownership without copying, value that implements a trait but the type doesn't matter fn code_holder_13() { let b = Box::new(5); // recursive types and Cons List // enum List { idea // Cons(i32, List), // Nil, // } // let list = Cons(1, Cons(2, Cons(3, Nil))); enum List { Cons(i32, Box<List>), Nil, } use List::Cons; use List::Nil; let list = Cons(1, Box::new(Cons(2, Box::new(Cons(3, Box::new(Nil)))))); // dereferencing let x = 5; let y = &x; // assert_eq!(5, x); doesn't compile because diffrent types assert_eq!(5, *y); // dereferenced // deref trait
None }
random_line_split
basic.rs
let array_slice = &array[1..3]; assert_eq!([2, 3, 4], array_slice); // passing function arguments let string = String::new(); take_reference(&string); take_mut_ref(&mut string); string = take_and_give_back_ownership(string); take_ownership(string); } // function arguments fn take_ownership(arg: String) { println!("Taken ownership of `{}`", arg); } fn take_and_give_back_ownership(mut arg: String) -> String { arg.push_str("shesh"); arg } fn take_reference(arg: &String) { println!("Length of string is {}", arg.len()); } fn take_mut_ref(arg: &mut String) { arg.push_str("soos"); } fn first_word_pos(s: &String) -> usize { let bytes = s.as_bytes(); for (i, &item) in bytes.iter().enumerate() { if item == b''{ return i; } } s.len() } fn first_word(s: &str) -> &str { // str is the string-slice type let bytes = s.as_bytes(); for (i, &item) in bytes.iter().enumerate() { if item == b''{ return &s[0..i]; } } &s[..] } // structs #[derive(Debug)] // for debug printing struct User { username: String, // field email: String, sign_in_count: u64, active: bool, } fn build_user(email: String, username: String) -> User { User { email: email, username: username, active: true, sign_in_count: 1, } } fn code_holder_3() { let mut user1 = User { email: String::from("[email protected]"), username: String::from("someusername123"), active: true, sign_in_count: 69, }; user1.email = String::from("[email protected]"); let user2 = User { email: String::from("[email protected]"), username: String::from("diffrentName"), ..user1 // copies the other values of user1 }; // println!("user is {}", user2); error because User doesn't impelement'std::fmt::Display' println!("user is {:?}", user2); // using output format 'Debug' } // tuple struct struct Color(i32, i32, i32); // is its own type struct Point(i32, i32, i32); struct Rect { width: u32, height: u32, } impl Rect { // method (because takes self) fn area(&self) -> u32 { self.width * self.height } fn can_hold(&self, other: &Rect) -> bool { self.width > other.width && self.height > other.height } // associated function (bacause doesn't take self) -> Rect::square() fn square(size: u32) -> Rect { Rect { width: size, height: size, } } } // Enums enum IpAddrKind { // is a custom data type V4, // variant of enum V6, } struct IpAddrBad { kind: IpAddrKind, address: String, } enum IpAddr { // better way, also diffrent data types possible V4(u8, u8, u8, u8), V6(String), } fn route(ip_kind: IpAddrKind) {} fn code_holder_4() { let four = IpAddrKind::V4; // are of same type let six = IpAddrKind::V6; route(IpAddrKind::V4); route(IpAddrKind::V6); let home = IpAddr::V4(127, 0, 0, 1); let loopback = IpAddr::V6(String::from("::1")); } enum Message { Quit, Move { x: i32, y: i32 }, // struct Write(String), // tuple struct ChangeColor(i32, i32, i32), // tuple struct } impl Message { fn call(&self) { // code } } // option enum CustomOption<T> { // replaces 'null'-value Some(T), None, } fn code_block_5() { let some_number = Some(5); // option let some_string = Some("a string"); let absent_number: Option<i32> = None; } // match: control flow operator #[derive(Debug)] enum UsState { Alabama, Alaska, } enum Coin { Penny, Nickel, Dime, Quarter(UsState), } fn value_in_cents(coin: Coin) -> u8 { match coin { Coin::Penny => 1, Coin::Nickel => 5, Coin::Dime => 10, Coin::Quarter(state) => { println!("State quarter from {:?}!", state); 25 } } } fn plus_one(x: Option<i32>) -> Option<i32> { match x { None => None, Some(i) => Some(i + 1), } } fn matches_are_exhaustive(val: u8) { match val { 1 => println!("one"), 2 => println!("two"), 5 => println!("five"), 7 => println!("seven"), _ => (), } } // if let fn if_let() { let some_u8_value = Some(0u8); match some_u8_value { Some(3) => println!("three"), _ => (), } // equivalent to if let Some(3) = some_u8_value { println!("three"); } } // collections fn code_holder_6() { let v: Vec<i32> = Vec::new(); let v = vec![1, 2, 3]; let mut v = Vec::new(); v.push(5); v.push(6); let v = vec![1, 2, 3, 4, 5]; // two ways to access vector let third: &i32 = &v[2]; // panics if fails match v.get(2) { // doesn't panic Some(third) => (), None => (), } // iterating let mut v = vec![100, 32, 57]; for i in &v { println!("{}", i); } for i in &mut v { *i += 50; } // multiple type vector enum
{ Int(i32), Float(f64), Text(String), } let row = vec![ SpreadsheetCell::Int(3), SpreadsheetCell::Text(String::from("blue")), SpreadsheetCell::Float(10.12), ]; } // strings // str is implemented in the core language and String is in the standard library fn code_holder_7() { let mut s = String::new(); let data = "inital contents"; // implements 'Display' trait let mut s = data.to_string(); s.push_str("bar"); s.push('a'); let s1 = String::from("Hello "); let s2 = String::from("World"); let s3 = s1 + &s2; // s1 was moved! (fn add(self, s: &str) -> String) let s1 = String::from("tic"); let s2 = String::from("tac"); let s3 = String::from("toe"); let s = format!("{}-{}-{}", s1, s2, s3); // you can't index into string, because of ambigueties and other reasons -> be more percise // slices... not so appropriate let hello = "Здравствуйте"; let s = &hello[0..4]; // 4 bytes -> "Зд" // best way: chars for c in "नमस्ते".chars() { println!("{}", c); } } // Hash Maps fn code_holder_8() { use std::collections::HashMap; let mut scores = HashMap::new(); scores.insert(String::from("Blue"), 10); scores.insert(String::from("Yellow"), 50); // morphing collections let teams = vec![String::from("Blue"), String::from("Yellow")]; let inital_scores = vec![10, 50]; let scores: HashMap<_, _> = teams.iter().zip(inital_scores.iter()).collect(); } // errors fn code_holder_9() { // panicing! // If rust panics before it quite it's starts unwinding (stack is cleared up), which takes a lot of time -> alternative abort (in Cargo.toml: panic = 'abort') panic!("crash and burn"); // Result use std::fs::File; use std::io::ErrorKind; use std::io::Read; let f = File::open("hello.txt"); let f = match f { Ok(file) => file, Err(error) => match error.kind() { ErrorKind::NotFound => match File::create("hello.txt") { Ok(fc) => fc, Err(e) => panic!("Problem creating the file: {:?}", e), }, other_error => panic!("Problem opening the file: {:?}", other_error), }, }; let f = File::open("hello.txt").unwrap(); // returns value if okay, panics otherwise let f = File::open("hello.txt").expect_err("Own error message"); // same as unwrap() just with custom error message // propagating error fn read_username_from_file_verbose() -> Result<String, std::io::Error> { // verbose way let f = File::open("hello.txt"); let mut f = match f { Ok(file) => file, Err(e) => return Err(e), }; let mut s = String::new(); match f.read_to_string(&mut s) { Ok(_) => Ok(s), Err(e) => Err(e), } } fn read_username_from_file() -> Result<String, std::io::Error> { // better way with? operator let mut f = File::open("hello.txt")?; let mut s = String::new(); f.read_to_string(&mut s)?; // if ok expression has value, if Err then function returns with error Ok(s) } } // generics (similar to C++ typenames/templates) enum own_Result<T, E> { Ok(T), Err(E), } struct Point1<T> { x: T, y: T, } impl<T> Point1<T> { fn x(&self) -> &T { &self.x } } impl Point1<f32> { fn distance_from_origin(&self) -> f32 { (self.x.powi(2) + self.y.powi(2)).sqrt() } } struct Point2<T, U> { x: T, y: U, } impl<T, U> Point2<T, U> { fn mixup<V, W>(self, other: Point2<V, W>) -> Point2<T, W> { Point2 { x: self.x, y: other.y, } } } // traits trait Summarizable { fn summarize_author(&self) -> String; fn summarize(&self) -> String { format!("(Read more from {}...)", self.summarize_author()) } } struct NewsArticle { headline: String, location: String, author: String, content: String, } impl Summarizable for NewsArticle { fn summarize_author(&self) -> String { format!("{}", self.author) } fn summarize(&self) -> String { format!( "{}, by {} ({})", self.headline, self.summarize_author(), self.location ) } } struct Tweet { username: String, content: String, reply: bool, retweet: bool, } impl Summarizable for Tweet { fn summarize_author(&self) -> String { format!("@{}", self.username) } fn summarize(&self) -> String { format!("{}: {}", self.summarize_author(), self.content) } } // traits as parameters/ Trait bounds fn notify(item: impl Summarizable) { println!("Breaking news! {}", item.summarize()); } // ^ syntactic sugar for: // fn notify<T: Summarizable>(item: T) { // println!("Breaking news! {}", item.summarize()); // } fn notfiy<T: Summarizable + std::fmt::Display>(item1: T) {} // when many traits are used -> prefer 'where'-clauses to not clutter the funciton definition fn some_function<T, U>(t: T, u: U) -> i32 where T: std::fmt::Display + Clone, U: Clone + std::fmt::Debug, { 4 } fn returns_summarizable() -> impl Summarizable { Tweet { username: String::from("horse_ebooks"), content: String::from("of cource, as you probablay already know people"), reply: false, retweet: false, } } fn largest<T: std::cmp::PartialOrd + Copy>(list: &[T]) -> T { let mut largest = list[0]; for &item in list.iter() { if item > largest { largest = item; } } largest } // Trait bounds to conditionally implement Methods struct Pair<T> { x: T, y: T, } impl<T> Pair<T> { fn new(x: T, y: T) -> Self { Self { x, y } } } // conditional implementation (only if traits are Display + PartialOrd) impl<T: std::fmt::Display + std::cmp::PartialOrd> Pair<T> { fn cmp_disply(&self) { if self.x >= self.y { println!("The largest member is x = {}", self.x); } else { println!("The largest member is y = {}", self.y); } } } // implement a trait if the type implements another train --- alias blanket implementations // impl<T: std::fmt::Display> ToString for T { // if T already implements Display, than it also implements ToString // } // lifetimes // lifetimes gurantee, that references are still valid, when used. // Most of the time they are implicitly inferred. If they can't, they have to be explicitly specified // &i32; a reference // &'a i32; a reference with the explicit lifetime "'a" // &'a mut i32; a mutable reference with the explicit lifetime "'a" fn longest<'a>(x: &'a str, y: &'a str) -> &'a str { // now the compiler knows, how long the return value can live. (as long as the smaller lifetime of x or y) if x.len() > y.len() { x } else { y } } struct ImportantExcerpt<'a> { part: &'a str, // if struct holds reference, a explicit lifetime is required } impl<'a> ImportantExcerpt<'a> { fn level(&self) -> i32 { 3 } } // static lifetimes (references live for entire duration of program)... applies to all string ltierals fn code_holder_10() { let s: &'static str = "I have a static lifetime."; } // all generics together fn longest_with_an_announcement<'a, T>(x: &'a str, y: &'a str, ann: T) -> &'a str where T: std::fmt::Display, { println!("Announcement! {}", ann); if x.len() > y.len() { x } else { y } } // closures fn code_holder_11() { // types are automatically inferred (but can be explicitly specified) let some_closure = |arg| { println!("this is the argument: {}", arg); }; let minimalist_closure = |x| x; // returns itself some_closure(5); minimalist_closure("lel"); // pattern: memorization / lazy evaluation struct NoArgsCacher<T> where T: Fn() -> u32, { calculation: T, value: Option<u32>, } impl<T> NoArgsCacher<T> where T: Fn() -> u32, { fn new(calculation: T) -> NoArgsCacher<T> { NoArgsCacher { calculation, value: None, } } fn value(&mut self) -> u32 { match self.value { Some(v) => v, None => { let v = (self.calculation)(); self.value = Some(v); v } } } } use std::thread; use std::time::Duration; let mut expensive_result = NoArgsCacher::new(|| { println!("performing expensive calculation..."); thread::sleep(Duration::from_secs(2)); 420 }); // TODO: create better Cacher with generics and a hash-table (args-result pairs) } // iterators // zero-cost abstraction -> are very fast USE THEM! fn code_holder_12() { let v1 = vec![1, 2, 3]; let v1_iter = v1.iter(); for val in v1_iter { println!("Got: {}", val); } pub trait CustomIteratorTrait { type Item; // associated type fn next(&mut self) -> Option<Self::Item>; } #[test] fn iterator_demonstration() { let v1 = vec![1, 2, 3]; let mut v1_iter = v1.iter(); assert_eq!(v1_iter.next(), Some(&1)); assert_eq!(v1_iter.next(), Some(&2)); assert_eq!(v1_iter.next(), Some(&3)); assert_eq!(v1_iter.next(), None); } #[test] fn iterator_sum() { let v1 = vec![1, 2, 3]; let v1_iter = v1.iter(); let total: i32 = v1_iter.sum(); // iter has been consumed (moved) -> cannot be moved any more } #[test] fn iterator_map() { let v1: Vec<i32> = vec![1, 2, 3]; let v2: Vec<_> = v1.iter().map(|x| x + 1).collect(); // collect() must be called because iterators are lazy assert_eq!(v2, vec![2, 3, 4]); } struct Shoe { size: u32, style: String, } fn shoes_in_my_size(shoes: Vec<Shoe>, shoe_size: u32) -> Vec<Shoe> { shoes.into_iter().filter(|s| s.size == shoe_size).collect() } #[test] fn filter_by_size() { let shoes = vec![ Shoe { size: 10, style: String::from("sneaker"), }, Shoe { size: 13, style: String::from("sandal"), }, Shoe { size: 10, style: String::from("boot"), }, ]; let in_my_size = shoes_in_my_size(shoes, 10); assert_eq!( in_my_size, vec![ Shoe { size: 10, style: String::from("sneaker") }, Shoe { size: 10, style: String::from("boot") }, ] ); } // own iterator struct Counter { count: u32, } impl Counter { fn new() -> Counter { Counter { count: 0 } } } impl Iterator for Counter { type Item = u32; fn next(&mut self) -> Option<Self::Item> { self.count += 1; if self.count < 6 { Some(self.count) } else { None } } } #[test] fn using_other_iterator_trait_methods() { let sum: u32 = Counter::new() .zip(Counter::new().skip(1)) .map(|(a, b)| a * b) .filter(|x| x % 3 == 0) .sum(); assert_eq!(18, sum); } } // cargo and creates // //! Another documentation style, which is at the top of the page, generally in the crate root // //! re-exports are listed in documentation -> expose them /// Documentation comment (3 slashes) /// will be used to generate HTML documentation (cargo doc --open) -> runs rustdoc /// supports MarkDown! /// Some commonly used headings /// # Examples /// # Panics /// # Errors (when it returns Result) /// # Safety (if unsafe to call) /// ''' /// assert_eq!(true, true); /// ''' /// this code example will be run as a test with (cargo test)!!! AWESOME fn documented_function() {} // smart pointers // Vec and String are smart pointers, because they point at data and have some additional metadata // allocate data on heap // Box<T> for storing data on heap (no performnace overhead) // usages: dynamic memory (unknown size), transfer ownership without copying, value that implements a trait but the type doesn't matter fn code_holder_13() { let b = Box::new(5); // recursive types and Cons List // enum List { idea // Cons(i32, List), // Nil, // } // let list = Cons(1, Cons(2, Cons(3, Nil))); enum List { Cons(i32, Box<List>), Nil, } use List::Cons; use List::Nil; let list = Cons(1, Box::new(Cons(2, Box::new(Cons(3, Box::new(Nil)))))); // dereferencing let x = 5; let y = &x; // assert_eq!(5, x); doesn't compile because diffrent types assert_eq!(5, *y); // dereferenced // deref
SpreadsheetCell
identifier_name
completion.rs
use std::{borrow::Cow, path::PathBuf, str::FromStr, sync::Arc}; use floem::{ peniko::kurbo::Rect, reactive::{ReadSignal, RwSignal, Scope}, }; use lapce_core::{buffer::rope_text::RopeText, movement::Movement}; use lapce_rpc::{plugin::PluginId, proxy::ProxyRpcHandler}; use lsp_types::{ CompletionItem, CompletionResponse, CompletionTextEdit, InsertTextFormat, Position, }; use nucleo::Utf32Str; use crate::{ config::LapceConfig, doc::Document, editor::view_data::EditorViewData, id::EditorId, snippet::Snippet, }; #[derive(Clone, Copy, PartialEq, Eq)] pub enum CompletionStatus { Inactive, Started, Done, } #[derive(Clone, PartialEq)] pub struct ScoredCompletionItem { pub item: CompletionItem, pub plugin_id: PluginId, pub score: u32, pub label_score: u32, pub indices: Vec<usize>, } #[derive(Clone)] pub struct CompletionData { pub status: CompletionStatus, /// The current request id. This is used to discard old requests. pub request_id: usize, /// An input id that is used for keeping track of whether the input has changed. pub input_id: usize, // TODO: A `PathBuf` has the issue that the proxy may not have the same format. // TODO(minor): It might be nice to not require a path. LSPs cannot operate on scratch buffers // as of now, but they might be allowed in the future. pub path: PathBuf, /// The offset that the completion is/was started at. Used for positioning the completion elem pub offset: usize, /// The active completion index in the list of filtered items pub active: RwSignal<usize>, /// The current input that the user has typed which is being sent for consideration by the LSP pub input: String, /// `(Input, CompletionItems)` pub input_items: im::HashMap<String, im::Vector<ScoredCompletionItem>>, /// The filtered items that are being displayed to the user pub filtered_items: im::Vector<ScoredCompletionItem>, /// The size of the completion element. /// This is used for positioning the element. /// As well, it is needed for some movement commands like page up/down that need to know the /// height to compute how far to move. pub layout_rect: Rect, /// The editor id that was most recently used to trigger a completion. pub latest_editor_id: Option<EditorId>, /// Matcher for filtering the completion items matcher: RwSignal<nucleo::Matcher>, config: ReadSignal<Arc<LapceConfig>>, } impl CompletionData { pub fn new(cx: Scope, config: ReadSignal<Arc<LapceConfig>>) -> Self { let active = cx.create_rw_signal(0); Self { status: CompletionStatus::Inactive, request_id: 0, input_id: 0, path: PathBuf::new(), offset: 0, active, input: "".to_string(), input_items: im::HashMap::new(), filtered_items: im::Vector::new(), layout_rect: Rect::ZERO, matcher: cx .create_rw_signal(nucleo::Matcher::new(nucleo::Config::DEFAULT)), latest_editor_id: None, config, } } /// Handle the response to a completion request. pub fn receive( &mut self, request_id: usize, input: &str, resp: &CompletionResponse, plugin_id: PluginId, ) { // If we've been canceled or the request id is old, ignore the response. if self.status == CompletionStatus::Inactive || self.request_id!= request_id { return; } let items = match resp { CompletionResponse::Array(items) => items, // TODO: Possibly handle the 'is_incomplete' field on List. CompletionResponse::List(list) => &list.items, }; let items: im::Vector<ScoredCompletionItem> = items .iter() .map(|i| ScoredCompletionItem { item: i.to_owned(), plugin_id, score: 0, label_score: 0, indices: Vec::new(), }) .collect(); self.input_items.insert(input.to_string(), items); self.filter_items(); } /// Request for completion items wit the current request id. pub fn request( &mut self, editor_id: EditorId, proxy_rpc: &ProxyRpcHandler, path: PathBuf, input: String, position: Position, ) { self.latest_editor_id = Some(editor_id); self.input_items.insert(input.clone(), im::Vector::new()); proxy_rpc.completion(self.request_id, path, input, position); } /// Close the completion, clearing all the data. pub fn cancel(&mut self) { if self.status == CompletionStatus::Inactive { return; } self.status = CompletionStatus::Inactive; self.input_id = 0; self.latest_editor_id = None; self.active.set(0); self.input.clear(); self.input_items.clear(); self.filtered_items.clear(); } pub fn update_input(&mut self, input: String) { if self.status == CompletionStatus::Inactive { return; } self.input = input; // TODO: If the user types a letter that continues the current active item, we should // try keeping that item active. Possibly give this a setting. // ex: `p` has `print!` and `println!` has options. If you select the second, then type // `r` then it should stay on `println!` even as the overall filtering of the list changes. self.active.set(0); self.filter_items(); } fn all_items(&self) -> im::Vector<ScoredCompletionItem> { self.input_items .get(&self.input) .cloned() .filter(|items|!items.is_empty()) .unwrap_or_else(move || { self.input_items.get("").cloned().unwrap_or_default() }) } pub fn filter_items(&mut self) { self.input_id += 1; if self.input.is_empty() { self.filtered_items = self.all_items(); return; } // Filter the items by the fuzzy matching with the input text. let mut items: im::Vector<ScoredCompletionItem> = self .matcher .try_update(|matcher| { let pattern = nucleo::pattern::Pattern::parse( &self.input, nucleo::pattern::CaseMatching::Ignore, ); self.all_items() .iter() .filter_map(|i| { let filter_text = i.item.filter_text.as_ref().unwrap_or(&i.item.label); let shift = i .item .label .match_indices(filter_text) .next() .map(|(shift, _)| shift) .unwrap_or(0); let mut indices = Vec::new(); let mut filter_text_buf = Vec::new(); let filter_text = Utf32Str::new(filter_text, &mut filter_text_buf); if let Some(score) = pattern.indices(filter_text, matcher, &mut indices) { if shift > 0 { for idx in indices.iter_mut() { *idx += shift as u32; } } let mut item = i.clone(); item.score = score; item.label_score = score; item.indices = indices.into_iter().map(|i| i as usize).collect(); let mut label_buf = Vec::new(); let label_text = Utf32Str::new(&i.item.label, &mut label_buf); if let Some(score) = pattern.score(label_text, matcher) { item.label_score = score; } Some(item) } else { None } }) .collect() }) .unwrap(); // Sort all the items by their score, then their label score, then their length. items.sort_by(|a, b| { b.score .cmp(&a.score) .then_with(|| b.label_score.cmp(&a.label_score)) .then_with(|| a.item.label.len().cmp(&b.item.label.len())) }); self.filtered_items = items; } /// Move down in the list of items. pub fn next(&mut self) { let active = self.active.get_untracked(); let new = Movement::Down.update_index(active, self.filtered_items.len(), 1, true); self.active.set(new); } /// Move up in the list of items. pub fn previous(&mut self) { let active = self.active.get_untracked(); let new = Movement::Up.update_index(active, self.filtered_items.len(), 1, true); self.active.set(new); } /// The amount of items that can be displayed in the current layout. fn display_count(&self) -> usize { let config = self.config.get_untracked(); ((self.layout_rect.size().height / config.editor.line_height() as f64) .floor() as usize) .saturating_sub(1) } /// Move to the next page of items. pub fn next_page(&mut self) { let count = self.display_count(); let active = self.active.get_untracked(); let new = Movement::Down.update_index( active, self.filtered_items.len(), count, false, ); self.active.set(new); } /// Move to the previous page of items. pub fn
(&mut self) { let count = self.display_count(); let active = self.active.get_untracked(); let new = Movement::Up.update_index( active, self.filtered_items.len(), count, false, ); self.active.set(new); } /// The currently selected/active item. pub fn current_item(&self) -> Option<&ScoredCompletionItem> { self.filtered_items.get(self.active.get_untracked()) } /// Update the completion lens of the document with the active completion item. pub fn update_document_completion( &self, view: &EditorViewData, cursor_offset: usize, ) { let doc = view.doc; if!doc.with_untracked(|doc| doc.content.is_file()) { return; } let config = self.config.get_untracked(); if!config.editor.enable_completion_lens { clear_completion_lens(doc); return; } let completion_lens = doc.with_untracked(|doc| { completion_lens_text( view.rope_text(), cursor_offset, self, doc.completion_lens(), ) }); match completion_lens { Some(Some(lens)) => { let offset = self.offset + self.input.len(); // TODO: will need to be adjusted to use visual line. // Could just store the offset in doc. let (line, col) = view.offset_to_line_col(offset); doc.update(|doc| { doc.set_completion_lens(lens, line, col); }); } // Unchanged Some(None) => {} None => { clear_completion_lens(doc); } } } } /// Clear the current completion lens. Only `update`s if there is a completion lens. pub fn clear_completion_lens(doc: RwSignal<Document>) { let has_completion = doc.with_untracked(|doc| doc.completion_lens().is_some()); if has_completion { doc.update(|doc| { doc.clear_completion_lens(); }); } } /// Get the text of the completion lens for the given completion item. /// Returns `None` if the completion lens should be hidden. /// Returns `Some(None)` if the completion lens should be shown, but not changed. /// Returns `Some(Some(text))` if the completion lens should be shown and changed to the given text. fn completion_lens_text( rope_text: impl RopeText, cursor_offset: usize, completion: &CompletionData, current_completion: Option<&str>, ) -> Option<Option<String>> { let item = &completion.current_item()?.item; let item: Cow<str> = if let Some(edit) = &item.text_edit { // A text edit is used, because that is what will actually be inserted. let text_format = item .insert_text_format .unwrap_or(InsertTextFormat::PLAIN_TEXT); // We don't display insert and replace let CompletionTextEdit::Edit(edit) = edit else { return None; }; // The completion offset can be different from the current cursor offset. let completion_offset = completion.offset; let start_offset = rope_text.prev_code_boundary(cursor_offset); let edit_start = rope_text.offset_of_position(&edit.range.start); // If the start of the edit isn't where the cursor currently is, // and it is not at the start of the completion, then we ignore it. // This captures most cases that we want, even if it skips over some // displayable edits. if start_offset!= edit_start && completion_offset!= edit_start { return None; } match text_format { InsertTextFormat::PLAIN_TEXT => { // This is not entirely correct because it assumes that the position is // `{start,end}_offset` when it may not necessarily be. Cow::Borrowed(&edit.new_text) } InsertTextFormat::SNIPPET => { // Parse the snippet. Bail if it's invalid. let snippet = Snippet::from_str(&edit.new_text).ok()?; let text = snippet.text(); Cow::Owned(text) } _ => { // We don't know how to support this text format. return None; } } } else { // There's no specific text edit, so we just use the label. Cow::Borrowed(&item.label) }; // We strip the prefix of the current input from the label. // So that, for example, `p` with a completion of `println` only sets the lens text to `rintln`. // If the text does not include a prefix in the expected position, then we do not display it. let item = item.as_ref().strip_prefix(&completion.input)?; // Get only the first line of text, because Lapce does not currently support // multi-line phantom text. let item = item.lines().next().unwrap_or(item); if Some(item) == current_completion { // If the item is the same as the current completion, then we don't display it. Some(None) } else { Some(Some(item.to_string())) } }
previous_page
identifier_name
completion.rs
use std::{borrow::Cow, path::PathBuf, str::FromStr, sync::Arc}; use floem::{ peniko::kurbo::Rect, reactive::{ReadSignal, RwSignal, Scope}, }; use lapce_core::{buffer::rope_text::RopeText, movement::Movement}; use lapce_rpc::{plugin::PluginId, proxy::ProxyRpcHandler}; use lsp_types::{ CompletionItem, CompletionResponse, CompletionTextEdit, InsertTextFormat, Position, }; use nucleo::Utf32Str; use crate::{ config::LapceConfig, doc::Document, editor::view_data::EditorViewData, id::EditorId, snippet::Snippet, }; #[derive(Clone, Copy, PartialEq, Eq)] pub enum CompletionStatus { Inactive, Started, Done, } #[derive(Clone, PartialEq)] pub struct ScoredCompletionItem { pub item: CompletionItem, pub plugin_id: PluginId, pub score: u32, pub label_score: u32, pub indices: Vec<usize>, } #[derive(Clone)] pub struct CompletionData { pub status: CompletionStatus, /// The current request id. This is used to discard old requests. pub request_id: usize, /// An input id that is used for keeping track of whether the input has changed. pub input_id: usize, // TODO: A `PathBuf` has the issue that the proxy may not have the same format. // TODO(minor): It might be nice to not require a path. LSPs cannot operate on scratch buffers // as of now, but they might be allowed in the future. pub path: PathBuf, /// The offset that the completion is/was started at. Used for positioning the completion elem pub offset: usize, /// The active completion index in the list of filtered items pub active: RwSignal<usize>, /// The current input that the user has typed which is being sent for consideration by the LSP pub input: String, /// `(Input, CompletionItems)` pub input_items: im::HashMap<String, im::Vector<ScoredCompletionItem>>, /// The filtered items that are being displayed to the user pub filtered_items: im::Vector<ScoredCompletionItem>, /// The size of the completion element. /// This is used for positioning the element. /// As well, it is needed for some movement commands like page up/down that need to know the /// height to compute how far to move. pub layout_rect: Rect, /// The editor id that was most recently used to trigger a completion. pub latest_editor_id: Option<EditorId>, /// Matcher for filtering the completion items matcher: RwSignal<nucleo::Matcher>, config: ReadSignal<Arc<LapceConfig>>, } impl CompletionData { pub fn new(cx: Scope, config: ReadSignal<Arc<LapceConfig>>) -> Self { let active = cx.create_rw_signal(0); Self { status: CompletionStatus::Inactive, request_id: 0, input_id: 0, path: PathBuf::new(), offset: 0, active, input: "".to_string(), input_items: im::HashMap::new(), filtered_items: im::Vector::new(), layout_rect: Rect::ZERO, matcher: cx .create_rw_signal(nucleo::Matcher::new(nucleo::Config::DEFAULT)), latest_editor_id: None, config, } } /// Handle the response to a completion request. pub fn receive( &mut self, request_id: usize, input: &str, resp: &CompletionResponse, plugin_id: PluginId, ) { // If we've been canceled or the request id is old, ignore the response. if self.status == CompletionStatus::Inactive || self.request_id!= request_id { return; } let items = match resp { CompletionResponse::Array(items) => items,
// TODO: Possibly handle the 'is_incomplete' field on List. CompletionResponse::List(list) => &list.items, }; let items: im::Vector<ScoredCompletionItem> = items .iter() .map(|i| ScoredCompletionItem { item: i.to_owned(), plugin_id, score: 0, label_score: 0, indices: Vec::new(), }) .collect(); self.input_items.insert(input.to_string(), items); self.filter_items(); } /// Request for completion items wit the current request id. pub fn request( &mut self, editor_id: EditorId, proxy_rpc: &ProxyRpcHandler, path: PathBuf, input: String, position: Position, ) { self.latest_editor_id = Some(editor_id); self.input_items.insert(input.clone(), im::Vector::new()); proxy_rpc.completion(self.request_id, path, input, position); } /// Close the completion, clearing all the data. pub fn cancel(&mut self) { if self.status == CompletionStatus::Inactive { return; } self.status = CompletionStatus::Inactive; self.input_id = 0; self.latest_editor_id = None; self.active.set(0); self.input.clear(); self.input_items.clear(); self.filtered_items.clear(); } pub fn update_input(&mut self, input: String) { if self.status == CompletionStatus::Inactive { return; } self.input = input; // TODO: If the user types a letter that continues the current active item, we should // try keeping that item active. Possibly give this a setting. // ex: `p` has `print!` and `println!` has options. If you select the second, then type // `r` then it should stay on `println!` even as the overall filtering of the list changes. self.active.set(0); self.filter_items(); } fn all_items(&self) -> im::Vector<ScoredCompletionItem> { self.input_items .get(&self.input) .cloned() .filter(|items|!items.is_empty()) .unwrap_or_else(move || { self.input_items.get("").cloned().unwrap_or_default() }) } pub fn filter_items(&mut self) { self.input_id += 1; if self.input.is_empty() { self.filtered_items = self.all_items(); return; } // Filter the items by the fuzzy matching with the input text. let mut items: im::Vector<ScoredCompletionItem> = self .matcher .try_update(|matcher| { let pattern = nucleo::pattern::Pattern::parse( &self.input, nucleo::pattern::CaseMatching::Ignore, ); self.all_items() .iter() .filter_map(|i| { let filter_text = i.item.filter_text.as_ref().unwrap_or(&i.item.label); let shift = i .item .label .match_indices(filter_text) .next() .map(|(shift, _)| shift) .unwrap_or(0); let mut indices = Vec::new(); let mut filter_text_buf = Vec::new(); let filter_text = Utf32Str::new(filter_text, &mut filter_text_buf); if let Some(score) = pattern.indices(filter_text, matcher, &mut indices) { if shift > 0 { for idx in indices.iter_mut() { *idx += shift as u32; } } let mut item = i.clone(); item.score = score; item.label_score = score; item.indices = indices.into_iter().map(|i| i as usize).collect(); let mut label_buf = Vec::new(); let label_text = Utf32Str::new(&i.item.label, &mut label_buf); if let Some(score) = pattern.score(label_text, matcher) { item.label_score = score; } Some(item) } else { None } }) .collect() }) .unwrap(); // Sort all the items by their score, then their label score, then their length. items.sort_by(|a, b| { b.score .cmp(&a.score) .then_with(|| b.label_score.cmp(&a.label_score)) .then_with(|| a.item.label.len().cmp(&b.item.label.len())) }); self.filtered_items = items; } /// Move down in the list of items. pub fn next(&mut self) { let active = self.active.get_untracked(); let new = Movement::Down.update_index(active, self.filtered_items.len(), 1, true); self.active.set(new); } /// Move up in the list of items. pub fn previous(&mut self) { let active = self.active.get_untracked(); let new = Movement::Up.update_index(active, self.filtered_items.len(), 1, true); self.active.set(new); } /// The amount of items that can be displayed in the current layout. fn display_count(&self) -> usize { let config = self.config.get_untracked(); ((self.layout_rect.size().height / config.editor.line_height() as f64) .floor() as usize) .saturating_sub(1) } /// Move to the next page of items. pub fn next_page(&mut self) { let count = self.display_count(); let active = self.active.get_untracked(); let new = Movement::Down.update_index( active, self.filtered_items.len(), count, false, ); self.active.set(new); } /// Move to the previous page of items. pub fn previous_page(&mut self) { let count = self.display_count(); let active = self.active.get_untracked(); let new = Movement::Up.update_index( active, self.filtered_items.len(), count, false, ); self.active.set(new); } /// The currently selected/active item. pub fn current_item(&self) -> Option<&ScoredCompletionItem> { self.filtered_items.get(self.active.get_untracked()) } /// Update the completion lens of the document with the active completion item. pub fn update_document_completion( &self, view: &EditorViewData, cursor_offset: usize, ) { let doc = view.doc; if!doc.with_untracked(|doc| doc.content.is_file()) { return; } let config = self.config.get_untracked(); if!config.editor.enable_completion_lens { clear_completion_lens(doc); return; } let completion_lens = doc.with_untracked(|doc| { completion_lens_text( view.rope_text(), cursor_offset, self, doc.completion_lens(), ) }); match completion_lens { Some(Some(lens)) => { let offset = self.offset + self.input.len(); // TODO: will need to be adjusted to use visual line. // Could just store the offset in doc. let (line, col) = view.offset_to_line_col(offset); doc.update(|doc| { doc.set_completion_lens(lens, line, col); }); } // Unchanged Some(None) => {} None => { clear_completion_lens(doc); } } } } /// Clear the current completion lens. Only `update`s if there is a completion lens. pub fn clear_completion_lens(doc: RwSignal<Document>) { let has_completion = doc.with_untracked(|doc| doc.completion_lens().is_some()); if has_completion { doc.update(|doc| { doc.clear_completion_lens(); }); } } /// Get the text of the completion lens for the given completion item. /// Returns `None` if the completion lens should be hidden. /// Returns `Some(None)` if the completion lens should be shown, but not changed. /// Returns `Some(Some(text))` if the completion lens should be shown and changed to the given text. fn completion_lens_text( rope_text: impl RopeText, cursor_offset: usize, completion: &CompletionData, current_completion: Option<&str>, ) -> Option<Option<String>> { let item = &completion.current_item()?.item; let item: Cow<str> = if let Some(edit) = &item.text_edit { // A text edit is used, because that is what will actually be inserted. let text_format = item .insert_text_format .unwrap_or(InsertTextFormat::PLAIN_TEXT); // We don't display insert and replace let CompletionTextEdit::Edit(edit) = edit else { return None; }; // The completion offset can be different from the current cursor offset. let completion_offset = completion.offset; let start_offset = rope_text.prev_code_boundary(cursor_offset); let edit_start = rope_text.offset_of_position(&edit.range.start); // If the start of the edit isn't where the cursor currently is, // and it is not at the start of the completion, then we ignore it. // This captures most cases that we want, even if it skips over some // displayable edits. if start_offset!= edit_start && completion_offset!= edit_start { return None; } match text_format { InsertTextFormat::PLAIN_TEXT => { // This is not entirely correct because it assumes that the position is // `{start,end}_offset` when it may not necessarily be. Cow::Borrowed(&edit.new_text) } InsertTextFormat::SNIPPET => { // Parse the snippet. Bail if it's invalid. let snippet = Snippet::from_str(&edit.new_text).ok()?; let text = snippet.text(); Cow::Owned(text) } _ => { // We don't know how to support this text format. return None; } } } else { // There's no specific text edit, so we just use the label. Cow::Borrowed(&item.label) }; // We strip the prefix of the current input from the label. // So that, for example, `p` with a completion of `println` only sets the lens text to `rintln`. // If the text does not include a prefix in the expected position, then we do not display it. let item = item.as_ref().strip_prefix(&completion.input)?; // Get only the first line of text, because Lapce does not currently support // multi-line phantom text. let item = item.lines().next().unwrap_or(item); if Some(item) == current_completion { // If the item is the same as the current completion, then we don't display it. Some(None) } else { Some(Some(item.to_string())) } }
random_line_split
completion.rs
use std::{borrow::Cow, path::PathBuf, str::FromStr, sync::Arc}; use floem::{ peniko::kurbo::Rect, reactive::{ReadSignal, RwSignal, Scope}, }; use lapce_core::{buffer::rope_text::RopeText, movement::Movement}; use lapce_rpc::{plugin::PluginId, proxy::ProxyRpcHandler}; use lsp_types::{ CompletionItem, CompletionResponse, CompletionTextEdit, InsertTextFormat, Position, }; use nucleo::Utf32Str; use crate::{ config::LapceConfig, doc::Document, editor::view_data::EditorViewData, id::EditorId, snippet::Snippet, }; #[derive(Clone, Copy, PartialEq, Eq)] pub enum CompletionStatus { Inactive, Started, Done, } #[derive(Clone, PartialEq)] pub struct ScoredCompletionItem { pub item: CompletionItem, pub plugin_id: PluginId, pub score: u32, pub label_score: u32, pub indices: Vec<usize>, } #[derive(Clone)] pub struct CompletionData { pub status: CompletionStatus, /// The current request id. This is used to discard old requests. pub request_id: usize, /// An input id that is used for keeping track of whether the input has changed. pub input_id: usize, // TODO: A `PathBuf` has the issue that the proxy may not have the same format. // TODO(minor): It might be nice to not require a path. LSPs cannot operate on scratch buffers // as of now, but they might be allowed in the future. pub path: PathBuf, /// The offset that the completion is/was started at. Used for positioning the completion elem pub offset: usize, /// The active completion index in the list of filtered items pub active: RwSignal<usize>, /// The current input that the user has typed which is being sent for consideration by the LSP pub input: String, /// `(Input, CompletionItems)` pub input_items: im::HashMap<String, im::Vector<ScoredCompletionItem>>, /// The filtered items that are being displayed to the user pub filtered_items: im::Vector<ScoredCompletionItem>, /// The size of the completion element. /// This is used for positioning the element. /// As well, it is needed for some movement commands like page up/down that need to know the /// height to compute how far to move. pub layout_rect: Rect, /// The editor id that was most recently used to trigger a completion. pub latest_editor_id: Option<EditorId>, /// Matcher for filtering the completion items matcher: RwSignal<nucleo::Matcher>, config: ReadSignal<Arc<LapceConfig>>, } impl CompletionData { pub fn new(cx: Scope, config: ReadSignal<Arc<LapceConfig>>) -> Self { let active = cx.create_rw_signal(0); Self { status: CompletionStatus::Inactive, request_id: 0, input_id: 0, path: PathBuf::new(), offset: 0, active, input: "".to_string(), input_items: im::HashMap::new(), filtered_items: im::Vector::new(), layout_rect: Rect::ZERO, matcher: cx .create_rw_signal(nucleo::Matcher::new(nucleo::Config::DEFAULT)), latest_editor_id: None, config, } } /// Handle the response to a completion request. pub fn receive( &mut self, request_id: usize, input: &str, resp: &CompletionResponse, plugin_id: PluginId, ) { // If we've been canceled or the request id is old, ignore the response. if self.status == CompletionStatus::Inactive || self.request_id!= request_id { return; } let items = match resp { CompletionResponse::Array(items) => items, // TODO: Possibly handle the 'is_incomplete' field on List. CompletionResponse::List(list) => &list.items, }; let items: im::Vector<ScoredCompletionItem> = items .iter() .map(|i| ScoredCompletionItem { item: i.to_owned(), plugin_id, score: 0, label_score: 0, indices: Vec::new(), }) .collect(); self.input_items.insert(input.to_string(), items); self.filter_items(); } /// Request for completion items wit the current request id. pub fn request( &mut self, editor_id: EditorId, proxy_rpc: &ProxyRpcHandler, path: PathBuf, input: String, position: Position, ) { self.latest_editor_id = Some(editor_id); self.input_items.insert(input.clone(), im::Vector::new()); proxy_rpc.completion(self.request_id, path, input, position); } /// Close the completion, clearing all the data. pub fn cancel(&mut self) { if self.status == CompletionStatus::Inactive { return; } self.status = CompletionStatus::Inactive; self.input_id = 0; self.latest_editor_id = None; self.active.set(0); self.input.clear(); self.input_items.clear(); self.filtered_items.clear(); } pub fn update_input(&mut self, input: String)
fn all_items(&self) -> im::Vector<ScoredCompletionItem> { self.input_items .get(&self.input) .cloned() .filter(|items|!items.is_empty()) .unwrap_or_else(move || { self.input_items.get("").cloned().unwrap_or_default() }) } pub fn filter_items(&mut self) { self.input_id += 1; if self.input.is_empty() { self.filtered_items = self.all_items(); return; } // Filter the items by the fuzzy matching with the input text. let mut items: im::Vector<ScoredCompletionItem> = self .matcher .try_update(|matcher| { let pattern = nucleo::pattern::Pattern::parse( &self.input, nucleo::pattern::CaseMatching::Ignore, ); self.all_items() .iter() .filter_map(|i| { let filter_text = i.item.filter_text.as_ref().unwrap_or(&i.item.label); let shift = i .item .label .match_indices(filter_text) .next() .map(|(shift, _)| shift) .unwrap_or(0); let mut indices = Vec::new(); let mut filter_text_buf = Vec::new(); let filter_text = Utf32Str::new(filter_text, &mut filter_text_buf); if let Some(score) = pattern.indices(filter_text, matcher, &mut indices) { if shift > 0 { for idx in indices.iter_mut() { *idx += shift as u32; } } let mut item = i.clone(); item.score = score; item.label_score = score; item.indices = indices.into_iter().map(|i| i as usize).collect(); let mut label_buf = Vec::new(); let label_text = Utf32Str::new(&i.item.label, &mut label_buf); if let Some(score) = pattern.score(label_text, matcher) { item.label_score = score; } Some(item) } else { None } }) .collect() }) .unwrap(); // Sort all the items by their score, then their label score, then their length. items.sort_by(|a, b| { b.score .cmp(&a.score) .then_with(|| b.label_score.cmp(&a.label_score)) .then_with(|| a.item.label.len().cmp(&b.item.label.len())) }); self.filtered_items = items; } /// Move down in the list of items. pub fn next(&mut self) { let active = self.active.get_untracked(); let new = Movement::Down.update_index(active, self.filtered_items.len(), 1, true); self.active.set(new); } /// Move up in the list of items. pub fn previous(&mut self) { let active = self.active.get_untracked(); let new = Movement::Up.update_index(active, self.filtered_items.len(), 1, true); self.active.set(new); } /// The amount of items that can be displayed in the current layout. fn display_count(&self) -> usize { let config = self.config.get_untracked(); ((self.layout_rect.size().height / config.editor.line_height() as f64) .floor() as usize) .saturating_sub(1) } /// Move to the next page of items. pub fn next_page(&mut self) { let count = self.display_count(); let active = self.active.get_untracked(); let new = Movement::Down.update_index( active, self.filtered_items.len(), count, false, ); self.active.set(new); } /// Move to the previous page of items. pub fn previous_page(&mut self) { let count = self.display_count(); let active = self.active.get_untracked(); let new = Movement::Up.update_index( active, self.filtered_items.len(), count, false, ); self.active.set(new); } /// The currently selected/active item. pub fn current_item(&self) -> Option<&ScoredCompletionItem> { self.filtered_items.get(self.active.get_untracked()) } /// Update the completion lens of the document with the active completion item. pub fn update_document_completion( &self, view: &EditorViewData, cursor_offset: usize, ) { let doc = view.doc; if!doc.with_untracked(|doc| doc.content.is_file()) { return; } let config = self.config.get_untracked(); if!config.editor.enable_completion_lens { clear_completion_lens(doc); return; } let completion_lens = doc.with_untracked(|doc| { completion_lens_text( view.rope_text(), cursor_offset, self, doc.completion_lens(), ) }); match completion_lens { Some(Some(lens)) => { let offset = self.offset + self.input.len(); // TODO: will need to be adjusted to use visual line. // Could just store the offset in doc. let (line, col) = view.offset_to_line_col(offset); doc.update(|doc| { doc.set_completion_lens(lens, line, col); }); } // Unchanged Some(None) => {} None => { clear_completion_lens(doc); } } } } /// Clear the current completion lens. Only `update`s if there is a completion lens. pub fn clear_completion_lens(doc: RwSignal<Document>) { let has_completion = doc.with_untracked(|doc| doc.completion_lens().is_some()); if has_completion { doc.update(|doc| { doc.clear_completion_lens(); }); } } /// Get the text of the completion lens for the given completion item. /// Returns `None` if the completion lens should be hidden. /// Returns `Some(None)` if the completion lens should be shown, but not changed. /// Returns `Some(Some(text))` if the completion lens should be shown and changed to the given text. fn completion_lens_text( rope_text: impl RopeText, cursor_offset: usize, completion: &CompletionData, current_completion: Option<&str>, ) -> Option<Option<String>> { let item = &completion.current_item()?.item; let item: Cow<str> = if let Some(edit) = &item.text_edit { // A text edit is used, because that is what will actually be inserted. let text_format = item .insert_text_format .unwrap_or(InsertTextFormat::PLAIN_TEXT); // We don't display insert and replace let CompletionTextEdit::Edit(edit) = edit else { return None; }; // The completion offset can be different from the current cursor offset. let completion_offset = completion.offset; let start_offset = rope_text.prev_code_boundary(cursor_offset); let edit_start = rope_text.offset_of_position(&edit.range.start); // If the start of the edit isn't where the cursor currently is, // and it is not at the start of the completion, then we ignore it. // This captures most cases that we want, even if it skips over some // displayable edits. if start_offset!= edit_start && completion_offset!= edit_start { return None; } match text_format { InsertTextFormat::PLAIN_TEXT => { // This is not entirely correct because it assumes that the position is // `{start,end}_offset` when it may not necessarily be. Cow::Borrowed(&edit.new_text) } InsertTextFormat::SNIPPET => { // Parse the snippet. Bail if it's invalid. let snippet = Snippet::from_str(&edit.new_text).ok()?; let text = snippet.text(); Cow::Owned(text) } _ => { // We don't know how to support this text format. return None; } } } else { // There's no specific text edit, so we just use the label. Cow::Borrowed(&item.label) }; // We strip the prefix of the current input from the label. // So that, for example, `p` with a completion of `println` only sets the lens text to `rintln`. // If the text does not include a prefix in the expected position, then we do not display it. let item = item.as_ref().strip_prefix(&completion.input)?; // Get only the first line of text, because Lapce does not currently support // multi-line phantom text. let item = item.lines().next().unwrap_or(item); if Some(item) == current_completion { // If the item is the same as the current completion, then we don't display it. Some(None) } else { Some(Some(item.to_string())) } }
{ if self.status == CompletionStatus::Inactive { return; } self.input = input; // TODO: If the user types a letter that continues the current active item, we should // try keeping that item active. Possibly give this a setting. // ex: `p` has `print!` and `println!` has options. If you select the second, then type // `r` then it should stay on `println!` even as the overall filtering of the list changes. self.active.set(0); self.filter_items(); }
identifier_body
lib.rs
// lib.rs -- RUST wasm interface for Conways game of life mod utils; use quad_rand; use js_sys; use wasm_bindgen::prelude::*; // When the `wee_alloc` feature is enabled, use `wee_alloc` as the global // allocator. #[cfg(feature = "wee_alloc")] #[global_allocator] static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; #[wasm_bindgen] pub fn init_panic_hook() { console_error_panic_hook::set_once(); } use web_sys::console; // A macro to provide `println!(..)`-style syntax for `console.log` logging. // allows use of log! macro ==> e.g. // log!("cell[{}, {}] is initially {:?} and has {} neighbors", // row, col, cell, neighbors); // log!(" it becomes {:?}", next_cell); macro_rules! log { ( $( $t:tt )* ) => { console::log_1(&format!( $( $t )* ).into()); } } // Timer generic for using web_sys::console::time and timeEnd. // Use new() constructor to call time and // use drop(&mut self) to call timeEnd. // So function wrapped with Timer will automatically be timed. // Then let _timer = Timer::new("Universe::tick"); // will cause every call to tick() to be timed and logged on console pub struct Timer<'a> { name: &'a str, } impl<'a> Timer<'a> { pub fn new(name: &'a str) -> Timer<'a> { console::time_with_label(name); Timer { name } } } impl<'a> Drop for Timer<'a> { fn drop(&mut self) { console::time_end_with_label(self.name); } } // Define a cell for the 'Universe', each 1 byte // use repr(u8) to ensure 1 byte unsigned values // // NOTE: Define Dead value as zero and alive as one allow simple summing // to determine how many live cells. #[wasm_bindgen] #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq)] pub enum Cell { Dead = 0, Alive = 1 } impl Cell { fn toggle(&mut self) { *self = match *self { Cell::Dead => Cell::Alive, Cell::Alive => Cell::Dead, }; } fn set_cell(&mut self, cell_state: Cell) { //log!("set_cell ({:?})", cell_state); *self = cell_state; } } #[wasm_bindgen] #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq)] pub enum InitialPattern { Complex1 = 0, Random5050 = 1 } // Define the 'Universe', a 1D array of Cell values (byte values, 0 or 1 per Cell def) // Give the width of the universe, each row of the universe is the next set // of 'width' cells, starting with the first row from indexes 0:<width> #[wasm_bindgen] pub struct
{ width: u32, // width of each row height: u32, // number of rows cells: Vec<Cell>, // width*height cells, each one byte prevcells: Vec<Cell>, // cells from previous tick mousedown: bool // set when shift-click event, so that associated click ignored } // methods for Universe, but not exposed to JS impl Universe { // get_index - Return 1D array index of Cell at position (row,column) in Universe fn get_index(&self, row: u32, column: u32) -> usize { (row * self.width + column) as usize } // Count live neighbors of cell at (row, column) fn live_neighbor_count(&self, row: u32, col: u32) -> u8 { // avoid modulus, division slows us down as seen in profiling let up = if row == 0 { self.height - 1 } else { row - 1 }; let down = if row == self.height - 1 { 0 } else { row + 1 }; let left = if col == 0 { self.width - 1 } else { col - 1 }; let right = if col == self.width - 1 { 0 } else { col + 1 }; let neighbors = if self.cells[self.get_index(up,left)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(up,col)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(up,right)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(row,left)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(row,right)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(down,left)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(down,col)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(down,right)] == Cell::Alive { 1 } else { 0 }; neighbors } } // standalone method, not part of Universe directly fn generate_cells(width: u32, height: u32, _pattern: InitialPattern) -> Vec<Cell> { // expression generating Vec<Cell> let cells = (0..width * height).map(|_i| { //if pattern == InitialPattern::Complex1 { // // hardcode-pattern, depends on 8x8 definition // if i % 2 == 0 || i % 7 == 0 { // Cell::Alive // } else { // Cell::Dead // } // } else { // InitialPattern::Random5050 if quad_rand::gen_range(0, 20) == 0 { Cell::Alive } else { Cell::Dead } // } }).collect(); cells } fn invert_cells(cells: &Vec<Cell>) -> Vec<Cell> { let count = cells.len(); let inverted_cells = (0..count).map(|i| { if cells[i] == Cell::Alive { Cell::Dead } else { Cell::Alive } }).collect(); inverted_cells } // Public methods, exposed to JS #[wasm_bindgen] impl Universe { pub fn width(&self) -> u32 { self.width } pub fn height(&self) -> u32 { self.height } // set_width -- set width of Universe, set all cells to Dead state pub fn set_width(&mut self, width: u32) { self.width = width; self.cells = (0..width * self.height) .map(|_i| Cell::Dead).collect(); } // Set the height of the Universe, set all cells to Dead state pub fn set_height(&mut self, height: u32) { self.height = height; self.cells = (0..self.width * height) .map(|_i| Cell::Dead).collect(); } pub fn get_cell_index(&self, row: u32, column: u32) -> u32 { row * self.width + column } // return pointer to 1D array of byte Cell values to JS // NOTE: *const Cell syntax // => pointer to non-mutable array??? pub fn cells(&self) -> *const Cell { self.cells.as_ptr() } pub fn prevcells(&self) -> *const Cell { self.prevcells.as_ptr() } pub fn tick(&mut self) { let _timer = Timer::new("Universe::tick"); // times the method, timing in browser console // NOTE: timing ended when _timer falls out of scope at end of method let mut next = self.cells.clone(); // copy of current cells, modify ==> next state self.prevcells = next.clone(); // previous cell values // Determine next state of Universe by applying conways' 4 rules for row in 0..self.height { for col in 0..self.width { let idx = self.get_index(row, col); let cell = self.cells[idx]; // Cell::Alive (1), or Dead (0) let neighbors = self.live_neighbor_count(row, col); let next_cell = match (cell, neighbors) { // Rule 1: any live cell with < 2 live neighbors dies, (loneliness) (Cell::Alive, x) if x < 2 => Cell::Dead, // Rule 2: any live cell with 2 to 3 live neighbors continues to live (stable) (Cell::Alive, 2) | (Cell::Alive, 3) => Cell::Alive, // Rule 3: any live cell with > 3 live neighbors dies (overpopulation) (Cell::Alive, x) if x > 3 => Cell::Dead, // Rule 4: any dead cel with = 3 live neighbors comes alive (reproduction) (Cell::Dead, 3) => Cell::Alive, // Otherwise -- no change (otherwise, _) => otherwise }; next[idx] = next_cell; } } self.cells = next; // next state for Universe determined } // toggle cell (row, column) pub fn toggle_cell(&mut self, row: u32, column: u32) { let idx = self.get_index(row, column); self.cells[idx].toggle(); } pub fn set_cell_value(&mut self, row: u32, column: u32, valu: Cell) { let idx = self.get_index(row, column); self.cells[idx].set_cell(valu); } // allow JS to determine if mousedown event occurring (shift-click) pub fn is_mousedown(&self) -> bool { return self.mousedown; } // allow JS to reset the mousedown value pub fn set_mousedown_value(&mut self, valu: bool) { self.mousedown = valu; } // Constructor, initialize the universe to hard-coded pattern pub fn new() -> Universe { utils::set_panic_hook(); // panic will show up in JS console, vs 'unreachable' message let now = js_sys::Date::now(); let now_date = js_sys::Date::new(&JsValue::from_f64(now)); let ms_u64: u64 = now_date.get_milliseconds() as u64; quad_rand::srand(ms_u64); // u64 let width = 128; // was 64 let height = 128; // Randomly decide whether to use Complex1 or Random5050 let _pattern: InitialPattern = if quad_rand::gen_range(0, 2) == 0 { InitialPattern::Complex1 } else { InitialPattern::Random5050 }; let pattern = InitialPattern::Random5050; let cells = generate_cells(width, height, pattern); let prevcells = invert_cells(&cells); let mousedown = false; Universe { width, height, cells, prevcells, mousedown } } pub fn reset_board(&mut self, pattern: InitialPattern) { log!("reset_board() : {:?}", pattern); let width = self.width(); let height = self.height(); self.prevcells = self.cells.clone(); // current grid, needed for correct redraw self.cells = generate_cells(width, height, pattern); } } // impl Universe block w/o wasm_bindgen attribute // Needed for testing -- don't expose to our JS. // Rust-generated WebAsm functions cannot return borrowed references. // NOTE/SUGGEST: Try compiling the Rust-generated WebAsm with // the wasm_bindgen attribute and examine errors. // NOTE: get_cells returns borrowed reference &self.cells impl Universe { /// Get the dead and alive values of the entire universe. pub fn get_cells(&self) -> &[Cell] { &self.cells } /// Set specific cells in a universe to Alive, give slice of (row,col) Tuples. pub fn set_cells(&mut self, cells: &[(u32, u32)]) { for (row, col) in cells.iter().cloned() { let idx = self.get_index(row, col); self.cells[idx] = Cell::Alive; // NOTE: can't use self.cells[ self.get_index(row,col) ] = Cell::Alive // claims immutable borrow on self.get_index call and // mutable borrow later used here. (I don't follow personally.) } } }
Universe
identifier_name
lib.rs
// lib.rs -- RUST wasm interface for Conways game of life mod utils; use quad_rand; use js_sys; use wasm_bindgen::prelude::*; // When the `wee_alloc` feature is enabled, use `wee_alloc` as the global // allocator. #[cfg(feature = "wee_alloc")] #[global_allocator] static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; #[wasm_bindgen] pub fn init_panic_hook() { console_error_panic_hook::set_once(); } use web_sys::console; // A macro to provide `println!(..)`-style syntax for `console.log` logging. // allows use of log! macro ==> e.g. // log!("cell[{}, {}] is initially {:?} and has {} neighbors", // row, col, cell, neighbors); // log!(" it becomes {:?}", next_cell); macro_rules! log { ( $( $t:tt )* ) => { console::log_1(&format!( $( $t )* ).into()); } } // Timer generic for using web_sys::console::time and timeEnd. // Use new() constructor to call time and // use drop(&mut self) to call timeEnd. // So function wrapped with Timer will automatically be timed. // Then let _timer = Timer::new("Universe::tick"); // will cause every call to tick() to be timed and logged on console pub struct Timer<'a> { name: &'a str, } impl<'a> Timer<'a> { pub fn new(name: &'a str) -> Timer<'a> { console::time_with_label(name); Timer { name } } } impl<'a> Drop for Timer<'a> { fn drop(&mut self) { console::time_end_with_label(self.name); } } // Define a cell for the 'Universe', each 1 byte // use repr(u8) to ensure 1 byte unsigned values // // NOTE: Define Dead value as zero and alive as one allow simple summing // to determine how many live cells. #[wasm_bindgen] #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq)] pub enum Cell { Dead = 0, Alive = 1 } impl Cell { fn toggle(&mut self) { *self = match *self { Cell::Dead => Cell::Alive, Cell::Alive => Cell::Dead, }; } fn set_cell(&mut self, cell_state: Cell) { //log!("set_cell ({:?})", cell_state); *self = cell_state; } } #[wasm_bindgen] #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq)] pub enum InitialPattern { Complex1 = 0, Random5050 = 1 } // Define the 'Universe', a 1D array of Cell values (byte values, 0 or 1 per Cell def) // Give the width of the universe, each row of the universe is the next set // of 'width' cells, starting with the first row from indexes 0:<width> #[wasm_bindgen] pub struct Universe { width: u32, // width of each row height: u32, // number of rows cells: Vec<Cell>, // width*height cells, each one byte prevcells: Vec<Cell>, // cells from previous tick mousedown: bool // set when shift-click event, so that associated click ignored } // methods for Universe, but not exposed to JS impl Universe { // get_index - Return 1D array index of Cell at position (row,column) in Universe fn get_index(&self, row: u32, column: u32) -> usize { (row * self.width + column) as usize } // Count live neighbors of cell at (row, column) fn live_neighbor_count(&self, row: u32, col: u32) -> u8 { // avoid modulus, division slows us down as seen in profiling let up = if row == 0 { self.height - 1 } else { row - 1 }; let down = if row == self.height - 1 { 0 } else { row + 1 }; let left = if col == 0 { self.width - 1 } else { col - 1 }; let right = if col == self.width - 1 { 0 } else { col + 1 }; let neighbors = if self.cells[self.get_index(up,left)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(up,col)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(up,right)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(row,left)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(row,right)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(down,left)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(down,col)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(down,right)] == Cell::Alive { 1 } else { 0 }; neighbors } } // standalone method, not part of Universe directly fn generate_cells(width: u32, height: u32, _pattern: InitialPattern) -> Vec<Cell> { // expression generating Vec<Cell> let cells = (0..width * height).map(|_i| { //if pattern == InitialPattern::Complex1 { // // hardcode-pattern, depends on 8x8 definition // if i % 2 == 0 || i % 7 == 0 { // Cell::Alive // } else { // Cell::Dead // } // } else { // InitialPattern::Random5050 if quad_rand::gen_range(0, 20) == 0 { Cell::Alive } else { Cell::Dead } // } }).collect(); cells } fn invert_cells(cells: &Vec<Cell>) -> Vec<Cell> { let count = cells.len(); let inverted_cells = (0..count).map(|i| { if cells[i] == Cell::Alive { Cell::Dead } else { Cell::Alive } }).collect(); inverted_cells } // Public methods, exposed to JS #[wasm_bindgen] impl Universe { pub fn width(&self) -> u32 { self.width } pub fn height(&self) -> u32 { self.height } // set_width -- set width of Universe, set all cells to Dead state pub fn set_width(&mut self, width: u32) { self.width = width; self.cells = (0..width * self.height) .map(|_i| Cell::Dead).collect(); } // Set the height of the Universe, set all cells to Dead state pub fn set_height(&mut self, height: u32) { self.height = height; self.cells = (0..self.width * height) .map(|_i| Cell::Dead).collect(); } pub fn get_cell_index(&self, row: u32, column: u32) -> u32 { row * self.width + column } // return pointer to 1D array of byte Cell values to JS // NOTE: *const Cell syntax // => pointer to non-mutable array??? pub fn cells(&self) -> *const Cell { self.cells.as_ptr() } pub fn prevcells(&self) -> *const Cell { self.prevcells.as_ptr() } pub fn tick(&mut self) { let _timer = Timer::new("Universe::tick"); // times the method, timing in browser console // NOTE: timing ended when _timer falls out of scope at end of method let mut next = self.cells.clone(); // copy of current cells, modify ==> next state self.prevcells = next.clone(); // previous cell values // Determine next state of Universe by applying conways' 4 rules for row in 0..self.height { for col in 0..self.width { let idx = self.get_index(row, col); let cell = self.cells[idx]; // Cell::Alive (1), or Dead (0) let neighbors = self.live_neighbor_count(row, col); let next_cell = match (cell, neighbors) { // Rule 1: any live cell with < 2 live neighbors dies, (loneliness) (Cell::Alive, x) if x < 2 => Cell::Dead, // Rule 2: any live cell with 2 to 3 live neighbors continues to live (stable) (Cell::Alive, 2) | (Cell::Alive, 3) => Cell::Alive, // Rule 3: any live cell with > 3 live neighbors dies (overpopulation) (Cell::Alive, x) if x > 3 => Cell::Dead, // Rule 4: any dead cel with = 3 live neighbors comes alive (reproduction) (Cell::Dead, 3) => Cell::Alive, // Otherwise -- no change (otherwise, _) => otherwise }; next[idx] = next_cell; } } self.cells = next; // next state for Universe determined } // toggle cell (row, column) pub fn toggle_cell(&mut self, row: u32, column: u32) { let idx = self.get_index(row, column); self.cells[idx].toggle(); } pub fn set_cell_value(&mut self, row: u32, column: u32, valu: Cell) { let idx = self.get_index(row, column); self.cells[idx].set_cell(valu); } // allow JS to determine if mousedown event occurring (shift-click) pub fn is_mousedown(&self) -> bool { return self.mousedown; } // allow JS to reset the mousedown value pub fn set_mousedown_value(&mut self, valu: bool) { self.mousedown = valu; } // Constructor, initialize the universe to hard-coded pattern pub fn new() -> Universe { utils::set_panic_hook(); // panic will show up in JS console, vs 'unreachable' message let now = js_sys::Date::now(); let now_date = js_sys::Date::new(&JsValue::from_f64(now)); let ms_u64: u64 = now_date.get_milliseconds() as u64; quad_rand::srand(ms_u64); // u64 let width = 128; // was 64 let height = 128; // Randomly decide whether to use Complex1 or Random5050 let _pattern: InitialPattern = if quad_rand::gen_range(0, 2) == 0 { InitialPattern::Complex1 } else { InitialPattern::Random5050 }; let pattern = InitialPattern::Random5050; let cells = generate_cells(width, height, pattern); let prevcells = invert_cells(&cells); let mousedown = false; Universe { width, height, cells, prevcells, mousedown } } pub fn reset_board(&mut self, pattern: InitialPattern)
} // impl Universe block w/o wasm_bindgen attribute // Needed for testing -- don't expose to our JS. // Rust-generated WebAsm functions cannot return borrowed references. // NOTE/SUGGEST: Try compiling the Rust-generated WebAsm with // the wasm_bindgen attribute and examine errors. // NOTE: get_cells returns borrowed reference &self.cells impl Universe { /// Get the dead and alive values of the entire universe. pub fn get_cells(&self) -> &[Cell] { &self.cells } /// Set specific cells in a universe to Alive, give slice of (row,col) Tuples. pub fn set_cells(&mut self, cells: &[(u32, u32)]) { for (row, col) in cells.iter().cloned() { let idx = self.get_index(row, col); self.cells[idx] = Cell::Alive; // NOTE: can't use self.cells[ self.get_index(row,col) ] = Cell::Alive // claims immutable borrow on self.get_index call and // mutable borrow later used here. (I don't follow personally.) } } }
{ log!("reset_board() : {:?}", pattern); let width = self.width(); let height = self.height(); self.prevcells = self.cells.clone(); // current grid, needed for correct redraw self.cells = generate_cells(width, height, pattern); }
identifier_body
lib.rs
// lib.rs -- RUST wasm interface for Conways game of life mod utils; use quad_rand; use js_sys; use wasm_bindgen::prelude::*; // When the `wee_alloc` feature is enabled, use `wee_alloc` as the global // allocator. #[cfg(feature = "wee_alloc")] #[global_allocator] static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; #[wasm_bindgen] pub fn init_panic_hook() { console_error_panic_hook::set_once(); } use web_sys::console; // A macro to provide `println!(..)`-style syntax for `console.log` logging. // allows use of log! macro ==> e.g. // log!("cell[{}, {}] is initially {:?} and has {} neighbors", // row, col, cell, neighbors); // log!(" it becomes {:?}", next_cell); macro_rules! log { ( $( $t:tt )* ) => { console::log_1(&format!( $( $t )* ).into()); } } // Timer generic for using web_sys::console::time and timeEnd. // Use new() constructor to call time and // use drop(&mut self) to call timeEnd. // So function wrapped with Timer will automatically be timed. // Then let _timer = Timer::new("Universe::tick"); // will cause every call to tick() to be timed and logged on console pub struct Timer<'a> { name: &'a str, } impl<'a> Timer<'a> { pub fn new(name: &'a str) -> Timer<'a> { console::time_with_label(name); Timer { name } } } impl<'a> Drop for Timer<'a> { fn drop(&mut self) { console::time_end_with_label(self.name); } } // Define a cell for the 'Universe', each 1 byte // use repr(u8) to ensure 1 byte unsigned values // // NOTE: Define Dead value as zero and alive as one allow simple summing // to determine how many live cells. #[wasm_bindgen] #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq)] pub enum Cell { Dead = 0, Alive = 1 } impl Cell { fn toggle(&mut self) { *self = match *self { Cell::Dead => Cell::Alive, Cell::Alive => Cell::Dead, }; } fn set_cell(&mut self, cell_state: Cell) { //log!("set_cell ({:?})", cell_state); *self = cell_state; } } #[wasm_bindgen] #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq)] pub enum InitialPattern { Complex1 = 0, Random5050 = 1 } // Define the 'Universe', a 1D array of Cell values (byte values, 0 or 1 per Cell def) // Give the width of the universe, each row of the universe is the next set // of 'width' cells, starting with the first row from indexes 0:<width> #[wasm_bindgen] pub struct Universe { width: u32, // width of each row height: u32, // number of rows cells: Vec<Cell>, // width*height cells, each one byte prevcells: Vec<Cell>, // cells from previous tick mousedown: bool // set when shift-click event, so that associated click ignored } // methods for Universe, but not exposed to JS impl Universe { // get_index - Return 1D array index of Cell at position (row,column) in Universe fn get_index(&self, row: u32, column: u32) -> usize { (row * self.width + column) as usize } // Count live neighbors of cell at (row, column) fn live_neighbor_count(&self, row: u32, col: u32) -> u8 { // avoid modulus, division slows us down as seen in profiling let up = if row == 0 { self.height - 1 } else { row - 1 }; let down = if row == self.height - 1 { 0 } else { row + 1 }; let left = if col == 0 { self.width - 1 } else { col - 1 }; let right = if col == self.width - 1 { 0 } else { col + 1 }; let neighbors = if self.cells[self.get_index(up,left)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(up,col)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(up,right)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(row,left)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(row,right)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(down,left)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(down,col)] == Cell::Alive { 1 } else
+ if self.cells[self.get_index(down,right)] == Cell::Alive { 1 } else { 0 }; neighbors } } // standalone method, not part of Universe directly fn generate_cells(width: u32, height: u32, _pattern: InitialPattern) -> Vec<Cell> { // expression generating Vec<Cell> let cells = (0..width * height).map(|_i| { //if pattern == InitialPattern::Complex1 { // // hardcode-pattern, depends on 8x8 definition // if i % 2 == 0 || i % 7 == 0 { // Cell::Alive // } else { // Cell::Dead // } // } else { // InitialPattern::Random5050 if quad_rand::gen_range(0, 20) == 0 { Cell::Alive } else { Cell::Dead } // } }).collect(); cells } fn invert_cells(cells: &Vec<Cell>) -> Vec<Cell> { let count = cells.len(); let inverted_cells = (0..count).map(|i| { if cells[i] == Cell::Alive { Cell::Dead } else { Cell::Alive } }).collect(); inverted_cells } // Public methods, exposed to JS #[wasm_bindgen] impl Universe { pub fn width(&self) -> u32 { self.width } pub fn height(&self) -> u32 { self.height } // set_width -- set width of Universe, set all cells to Dead state pub fn set_width(&mut self, width: u32) { self.width = width; self.cells = (0..width * self.height) .map(|_i| Cell::Dead).collect(); } // Set the height of the Universe, set all cells to Dead state pub fn set_height(&mut self, height: u32) { self.height = height; self.cells = (0..self.width * height) .map(|_i| Cell::Dead).collect(); } pub fn get_cell_index(&self, row: u32, column: u32) -> u32 { row * self.width + column } // return pointer to 1D array of byte Cell values to JS // NOTE: *const Cell syntax // => pointer to non-mutable array??? pub fn cells(&self) -> *const Cell { self.cells.as_ptr() } pub fn prevcells(&self) -> *const Cell { self.prevcells.as_ptr() } pub fn tick(&mut self) { let _timer = Timer::new("Universe::tick"); // times the method, timing in browser console // NOTE: timing ended when _timer falls out of scope at end of method let mut next = self.cells.clone(); // copy of current cells, modify ==> next state self.prevcells = next.clone(); // previous cell values // Determine next state of Universe by applying conways' 4 rules for row in 0..self.height { for col in 0..self.width { let idx = self.get_index(row, col); let cell = self.cells[idx]; // Cell::Alive (1), or Dead (0) let neighbors = self.live_neighbor_count(row, col); let next_cell = match (cell, neighbors) { // Rule 1: any live cell with < 2 live neighbors dies, (loneliness) (Cell::Alive, x) if x < 2 => Cell::Dead, // Rule 2: any live cell with 2 to 3 live neighbors continues to live (stable) (Cell::Alive, 2) | (Cell::Alive, 3) => Cell::Alive, // Rule 3: any live cell with > 3 live neighbors dies (overpopulation) (Cell::Alive, x) if x > 3 => Cell::Dead, // Rule 4: any dead cel with = 3 live neighbors comes alive (reproduction) (Cell::Dead, 3) => Cell::Alive, // Otherwise -- no change (otherwise, _) => otherwise }; next[idx] = next_cell; } } self.cells = next; // next state for Universe determined } // toggle cell (row, column) pub fn toggle_cell(&mut self, row: u32, column: u32) { let idx = self.get_index(row, column); self.cells[idx].toggle(); } pub fn set_cell_value(&mut self, row: u32, column: u32, valu: Cell) { let idx = self.get_index(row, column); self.cells[idx].set_cell(valu); } // allow JS to determine if mousedown event occurring (shift-click) pub fn is_mousedown(&self) -> bool { return self.mousedown; } // allow JS to reset the mousedown value pub fn set_mousedown_value(&mut self, valu: bool) { self.mousedown = valu; } // Constructor, initialize the universe to hard-coded pattern pub fn new() -> Universe { utils::set_panic_hook(); // panic will show up in JS console, vs 'unreachable' message let now = js_sys::Date::now(); let now_date = js_sys::Date::new(&JsValue::from_f64(now)); let ms_u64: u64 = now_date.get_milliseconds() as u64; quad_rand::srand(ms_u64); // u64 let width = 128; // was 64 let height = 128; // Randomly decide whether to use Complex1 or Random5050 let _pattern: InitialPattern = if quad_rand::gen_range(0, 2) == 0 { InitialPattern::Complex1 } else { InitialPattern::Random5050 }; let pattern = InitialPattern::Random5050; let cells = generate_cells(width, height, pattern); let prevcells = invert_cells(&cells); let mousedown = false; Universe { width, height, cells, prevcells, mousedown } } pub fn reset_board(&mut self, pattern: InitialPattern) { log!("reset_board() : {:?}", pattern); let width = self.width(); let height = self.height(); self.prevcells = self.cells.clone(); // current grid, needed for correct redraw self.cells = generate_cells(width, height, pattern); } } // impl Universe block w/o wasm_bindgen attribute // Needed for testing -- don't expose to our JS. // Rust-generated WebAsm functions cannot return borrowed references. // NOTE/SUGGEST: Try compiling the Rust-generated WebAsm with // the wasm_bindgen attribute and examine errors. // NOTE: get_cells returns borrowed reference &self.cells impl Universe { /// Get the dead and alive values of the entire universe. pub fn get_cells(&self) -> &[Cell] { &self.cells } /// Set specific cells in a universe to Alive, give slice of (row,col) Tuples. pub fn set_cells(&mut self, cells: &[(u32, u32)]) { for (row, col) in cells.iter().cloned() { let idx = self.get_index(row, col); self.cells[idx] = Cell::Alive; // NOTE: can't use self.cells[ self.get_index(row,col) ] = Cell::Alive // claims immutable borrow on self.get_index call and // mutable borrow later used here. (I don't follow personally.) } } }
{ 0 }
conditional_block
lib.rs
// lib.rs -- RUST wasm interface for Conways game of life mod utils; use quad_rand; use js_sys; use wasm_bindgen::prelude::*; // When the `wee_alloc` feature is enabled, use `wee_alloc` as the global // allocator. #[cfg(feature = "wee_alloc")] #[global_allocator] static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; #[wasm_bindgen] pub fn init_panic_hook() { console_error_panic_hook::set_once(); } use web_sys::console; // A macro to provide `println!(..)`-style syntax for `console.log` logging. // allows use of log! macro ==> e.g. // log!("cell[{}, {}] is initially {:?} and has {} neighbors", // row, col, cell, neighbors); // log!(" it becomes {:?}", next_cell); macro_rules! log { ( $( $t:tt )* ) => { console::log_1(&format!( $( $t )* ).into()); } } // Timer generic for using web_sys::console::time and timeEnd. // Use new() constructor to call time and // use drop(&mut self) to call timeEnd. // So function wrapped with Timer will automatically be timed. // Then let _timer = Timer::new("Universe::tick"); // will cause every call to tick() to be timed and logged on console pub struct Timer<'a> { name: &'a str, } impl<'a> Timer<'a> { pub fn new(name: &'a str) -> Timer<'a> { console::time_with_label(name); Timer { name } } } impl<'a> Drop for Timer<'a> { fn drop(&mut self) { console::time_end_with_label(self.name); } } // Define a cell for the 'Universe', each 1 byte // use repr(u8) to ensure 1 byte unsigned values // // NOTE: Define Dead value as zero and alive as one allow simple summing // to determine how many live cells. #[wasm_bindgen] #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq)] pub enum Cell { Dead = 0, Alive = 1 } impl Cell { fn toggle(&mut self) { *self = match *self { Cell::Dead => Cell::Alive, Cell::Alive => Cell::Dead, }; } fn set_cell(&mut self, cell_state: Cell) { //log!("set_cell ({:?})", cell_state); *self = cell_state; } } #[wasm_bindgen] #[repr(u8)] #[derive(Clone, Copy, Debug, PartialEq)] pub enum InitialPattern { Complex1 = 0, Random5050 = 1 } // Define the 'Universe', a 1D array of Cell values (byte values, 0 or 1 per Cell def) // Give the width of the universe, each row of the universe is the next set // of 'width' cells, starting with the first row from indexes 0:<width> #[wasm_bindgen] pub struct Universe { width: u32, // width of each row height: u32, // number of rows cells: Vec<Cell>, // width*height cells, each one byte prevcells: Vec<Cell>, // cells from previous tick mousedown: bool // set when shift-click event, so that associated click ignored } // methods for Universe, but not exposed to JS impl Universe { // get_index - Return 1D array index of Cell at position (row,column) in Universe fn get_index(&self, row: u32, column: u32) -> usize { (row * self.width + column) as usize } // Count live neighbors of cell at (row, column) fn live_neighbor_count(&self, row: u32, col: u32) -> u8 { // avoid modulus, division slows us down as seen in profiling let up = if row == 0 { self.height - 1 } else { row - 1 }; let down = if row == self.height - 1 { 0 } else { row + 1 }; let left = if col == 0 { self.width - 1 } else { col - 1 }; let right = if col == self.width - 1 { 0 } else { col + 1 }; let neighbors = if self.cells[self.get_index(up,left)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(up,col)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(up,right)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(row,left)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(row,right)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(down,left)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(down,col)] == Cell::Alive { 1 } else { 0 } + if self.cells[self.get_index(down,right)] == Cell::Alive { 1 } else { 0 }; neighbors } } // standalone method, not part of Universe directly fn generate_cells(width: u32, height: u32, _pattern: InitialPattern) -> Vec<Cell> { // expression generating Vec<Cell> let cells = (0..width * height).map(|_i| { //if pattern == InitialPattern::Complex1 { // // hardcode-pattern, depends on 8x8 definition // if i % 2 == 0 || i % 7 == 0 { // Cell::Alive // } else { // Cell::Dead // } // } else { // InitialPattern::Random5050 if quad_rand::gen_range(0, 20) == 0 { Cell::Alive } else { Cell::Dead } // } }).collect(); cells } fn invert_cells(cells: &Vec<Cell>) -> Vec<Cell> { let count = cells.len();
}).collect(); inverted_cells } // Public methods, exposed to JS #[wasm_bindgen] impl Universe { pub fn width(&self) -> u32 { self.width } pub fn height(&self) -> u32 { self.height } // set_width -- set width of Universe, set all cells to Dead state pub fn set_width(&mut self, width: u32) { self.width = width; self.cells = (0..width * self.height) .map(|_i| Cell::Dead).collect(); } // Set the height of the Universe, set all cells to Dead state pub fn set_height(&mut self, height: u32) { self.height = height; self.cells = (0..self.width * height) .map(|_i| Cell::Dead).collect(); } pub fn get_cell_index(&self, row: u32, column: u32) -> u32 { row * self.width + column } // return pointer to 1D array of byte Cell values to JS // NOTE: *const Cell syntax // => pointer to non-mutable array??? pub fn cells(&self) -> *const Cell { self.cells.as_ptr() } pub fn prevcells(&self) -> *const Cell { self.prevcells.as_ptr() } pub fn tick(&mut self) { let _timer = Timer::new("Universe::tick"); // times the method, timing in browser console // NOTE: timing ended when _timer falls out of scope at end of method let mut next = self.cells.clone(); // copy of current cells, modify ==> next state self.prevcells = next.clone(); // previous cell values // Determine next state of Universe by applying conways' 4 rules for row in 0..self.height { for col in 0..self.width { let idx = self.get_index(row, col); let cell = self.cells[idx]; // Cell::Alive (1), or Dead (0) let neighbors = self.live_neighbor_count(row, col); let next_cell = match (cell, neighbors) { // Rule 1: any live cell with < 2 live neighbors dies, (loneliness) (Cell::Alive, x) if x < 2 => Cell::Dead, // Rule 2: any live cell with 2 to 3 live neighbors continues to live (stable) (Cell::Alive, 2) | (Cell::Alive, 3) => Cell::Alive, // Rule 3: any live cell with > 3 live neighbors dies (overpopulation) (Cell::Alive, x) if x > 3 => Cell::Dead, // Rule 4: any dead cel with = 3 live neighbors comes alive (reproduction) (Cell::Dead, 3) => Cell::Alive, // Otherwise -- no change (otherwise, _) => otherwise }; next[idx] = next_cell; } } self.cells = next; // next state for Universe determined } // toggle cell (row, column) pub fn toggle_cell(&mut self, row: u32, column: u32) { let idx = self.get_index(row, column); self.cells[idx].toggle(); } pub fn set_cell_value(&mut self, row: u32, column: u32, valu: Cell) { let idx = self.get_index(row, column); self.cells[idx].set_cell(valu); } // allow JS to determine if mousedown event occurring (shift-click) pub fn is_mousedown(&self) -> bool { return self.mousedown; } // allow JS to reset the mousedown value pub fn set_mousedown_value(&mut self, valu: bool) { self.mousedown = valu; } // Constructor, initialize the universe to hard-coded pattern pub fn new() -> Universe { utils::set_panic_hook(); // panic will show up in JS console, vs 'unreachable' message let now = js_sys::Date::now(); let now_date = js_sys::Date::new(&JsValue::from_f64(now)); let ms_u64: u64 = now_date.get_milliseconds() as u64; quad_rand::srand(ms_u64); // u64 let width = 128; // was 64 let height = 128; // Randomly decide whether to use Complex1 or Random5050 let _pattern: InitialPattern = if quad_rand::gen_range(0, 2) == 0 { InitialPattern::Complex1 } else { InitialPattern::Random5050 }; let pattern = InitialPattern::Random5050; let cells = generate_cells(width, height, pattern); let prevcells = invert_cells(&cells); let mousedown = false; Universe { width, height, cells, prevcells, mousedown } } pub fn reset_board(&mut self, pattern: InitialPattern) { log!("reset_board() : {:?}", pattern); let width = self.width(); let height = self.height(); self.prevcells = self.cells.clone(); // current grid, needed for correct redraw self.cells = generate_cells(width, height, pattern); } } // impl Universe block w/o wasm_bindgen attribute // Needed for testing -- don't expose to our JS. // Rust-generated WebAsm functions cannot return borrowed references. // NOTE/SUGGEST: Try compiling the Rust-generated WebAsm with // the wasm_bindgen attribute and examine errors. // NOTE: get_cells returns borrowed reference &self.cells impl Universe { /// Get the dead and alive values of the entire universe. pub fn get_cells(&self) -> &[Cell] { &self.cells } /// Set specific cells in a universe to Alive, give slice of (row,col) Tuples. pub fn set_cells(&mut self, cells: &[(u32, u32)]) { for (row, col) in cells.iter().cloned() { let idx = self.get_index(row, col); self.cells[idx] = Cell::Alive; // NOTE: can't use self.cells[ self.get_index(row,col) ] = Cell::Alive // claims immutable borrow on self.get_index call and // mutable borrow later used here. (I don't follow personally.) } } }
let inverted_cells = (0..count).map(|i| { if cells[i] == Cell::Alive { Cell::Dead } else { Cell::Alive }
random_line_split
mod.rs
//! Drop range analysis finds the portions of the tree where a value is guaranteed to be dropped //! (i.e. moved, uninitialized, etc.). This is used to exclude the types of those values from the //! generator type. See `InteriorVisitor::record` for where the results of this analysis are used. //! //! There are three phases to this analysis: //! 1. Use `ExprUseVisitor` to identify the interesting values that are consumed and borrowed. //! 2. Use `DropRangeVisitor` to find where the interesting values are dropped or reinitialized, //! and also build a control flow graph. //! 3. Use `DropRanges::propagate_to_fixpoint` to flow the dropped/reinitialized information through //! the CFG and find the exact points where we know a value is definitely dropped. //! //! The end result is a data structure that maps the post-order index of each node in the HIR tree //! to a set of values that are known to be dropped at that location.
use crate::FnCtxt; use hir::def_id::DefId; use hir::{Body, HirId, HirIdMap, Node}; use rustc_data_structures::unord::{UnordMap, UnordSet}; use rustc_hir as hir; use rustc_index::bit_set::BitSet; use rustc_index::IndexVec; use rustc_middle::hir::map::Map; use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId}; use rustc_middle::ty; use std::collections::BTreeMap; use std::fmt::Debug; mod cfg_build; mod cfg_propagate; mod cfg_visualize; mod record_consumed_borrow; pub fn compute_drop_ranges<'a, 'tcx>( fcx: &'a FnCtxt<'a, 'tcx>, def_id: DefId, body: &'tcx Body<'tcx>, ) -> DropRanges { if fcx.sess().opts.unstable_opts.drop_tracking { let consumed_borrowed_places = find_consumed_and_borrowed(fcx, def_id, body); let typeck_results = &fcx.typeck_results.borrow(); let num_exprs = fcx.tcx.region_scope_tree(def_id).body_expr_count(body.id()).unwrap_or(0); let (mut drop_ranges, borrowed_temporaries) = build_control_flow_graph( &fcx, typeck_results, fcx.param_env, consumed_borrowed_places, body, num_exprs, ); drop_ranges.propagate_to_fixpoint(); debug!("borrowed_temporaries = {borrowed_temporaries:?}"); DropRanges { tracked_value_map: drop_ranges.tracked_value_map, nodes: drop_ranges.nodes, borrowed_temporaries: Some(borrowed_temporaries), } } else { // If drop range tracking is not enabled, skip all the analysis and produce an // empty set of DropRanges. DropRanges { tracked_value_map: UnordMap::default(), nodes: IndexVec::new(), borrowed_temporaries: None, } } } /// Applies `f` to consumable node in the HIR subtree pointed to by `place`. /// /// This includes the place itself, and if the place is a reference to a local /// variable then `f` is also called on the HIR node for that variable as well. /// /// For example, if `place` points to `foo()`, then `f` is called once for the /// result of `foo`. On the other hand, if `place` points to `x` then `f` will /// be called both on the `ExprKind::Path` node that represents the expression /// as well as the HirId of the local `x` itself. fn for_each_consumable(hir: Map<'_>, place: TrackedValue, mut f: impl FnMut(TrackedValue)) { f(place); let node = hir.find(place.hir_id()); if let Some(Node::Expr(expr)) = node { match expr.kind { hir::ExprKind::Path(hir::QPath::Resolved( _, hir::Path { res: hir::def::Res::Local(hir_id),.. }, )) => { f(TrackedValue::Variable(*hir_id)); } _ => (), } } } rustc_index::newtype_index! { #[debug_format = "id({})"] pub struct PostOrderId {} } rustc_index::newtype_index! { #[debug_format = "hidx({})"] pub struct TrackedValueIndex {} } /// Identifies a value whose drop state we need to track. #[derive(PartialEq, Eq, Hash, Clone, Copy)] enum TrackedValue { /// Represents a named variable, such as a let binding, parameter, or upvar. /// /// The HirId points to the variable's definition site. Variable(HirId), /// A value produced as a result of an expression. /// /// The HirId points to the expression that returns this value. Temporary(HirId), } impl Debug for TrackedValue { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { ty::tls::with_opt(|opt_tcx| { if let Some(tcx) = opt_tcx { write!(f, "{}", tcx.hir().node_to_string(self.hir_id())) } else { match self { Self::Variable(hir_id) => write!(f, "Variable({hir_id:?})"), Self::Temporary(hir_id) => write!(f, "Temporary({hir_id:?})"), } } }) } } impl TrackedValue { fn hir_id(&self) -> HirId { match self { TrackedValue::Variable(hir_id) | TrackedValue::Temporary(hir_id) => *hir_id, } } fn from_place_with_projections_allowed(place_with_id: &PlaceWithHirId<'_>) -> Self { match place_with_id.place.base { PlaceBase::Rvalue | PlaceBase::StaticItem => { TrackedValue::Temporary(place_with_id.hir_id) } PlaceBase::Local(hir_id) | PlaceBase::Upvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id },.. }) => { TrackedValue::Variable(hir_id) } } } } /// Represents a reason why we might not be able to convert a HirId or Place /// into a tracked value. #[derive(Debug)] enum TrackedValueConversionError { /// Place projects are not currently supported. /// /// The reasoning around these is kind of subtle, so we choose to be more /// conservative around these for now. There is no reason in theory we /// cannot support these, we just have not implemented it yet. PlaceProjectionsNotSupported, } impl TryFrom<&PlaceWithHirId<'_>> for TrackedValue { type Error = TrackedValueConversionError; fn try_from(place_with_id: &PlaceWithHirId<'_>) -> Result<Self, Self::Error> { if!place_with_id.place.projections.is_empty() { debug!( "TrackedValue from PlaceWithHirId: {:?} has projections, which are not supported.", place_with_id ); return Err(TrackedValueConversionError::PlaceProjectionsNotSupported); } Ok(TrackedValue::from_place_with_projections_allowed(place_with_id)) } } pub struct DropRanges { tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>, nodes: IndexVec<PostOrderId, NodeInfo>, borrowed_temporaries: Option<UnordSet<HirId>>, } impl DropRanges { pub fn is_dropped_at(&self, hir_id: HirId, location: usize) -> bool { self.tracked_value_map .get(&TrackedValue::Temporary(hir_id)) .or(self.tracked_value_map.get(&TrackedValue::Variable(hir_id))) .cloned() .is_some_and(|tracked_value_id| { self.expect_node(location.into()).drop_state.contains(tracked_value_id) }) } pub fn is_borrowed_temporary(&self, expr: &hir::Expr<'_>) -> bool { if let Some(b) = &self.borrowed_temporaries { b.contains(&expr.hir_id) } else { true } } /// Returns a reference to the NodeInfo for a node, panicking if it does not exist fn expect_node(&self, id: PostOrderId) -> &NodeInfo { &self.nodes[id] } } /// Tracks information needed to compute drop ranges. struct DropRangesBuilder { /// The core of DropRangesBuilder is a set of nodes, which each represent /// one expression. We primarily refer to them by their index in a /// post-order traversal of the HIR tree, since this is what /// generator_interior uses to talk about yield positions. /// /// This IndexVec keeps the relevant details for each node. See the /// NodeInfo struct for more details, but this information includes things /// such as the set of control-flow successors, which variables are dropped /// or reinitialized, and whether each variable has been inferred to be /// known-dropped or potentially reinitialized at each point. nodes: IndexVec<PostOrderId, NodeInfo>, /// We refer to values whose drop state we are tracking by the HirId of /// where they are defined. Within a NodeInfo, however, we store the /// drop-state in a bit vector indexed by a HirIdIndex /// (see NodeInfo::drop_state). The hir_id_map field stores the mapping /// from HirIds to the HirIdIndex that is used to represent that value in /// bitvector. tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>, /// When building the control flow graph, we don't always know the /// post-order index of the target node at the point we encounter it. /// For example, this happens with break and continue. In those cases, /// we store a pair of the PostOrderId of the source and the HirId /// of the target. Once we have gathered all of these edges, we make a /// pass over the set of deferred edges (see process_deferred_edges in /// cfg_build.rs), look up the PostOrderId for the target (since now the /// post-order index for all nodes is known), and add missing control flow /// edges. deferred_edges: Vec<(PostOrderId, HirId)>, /// This maps HirIds of expressions to their post-order index. It is /// used in process_deferred_edges to correctly add back-edges. post_order_map: HirIdMap<PostOrderId>, } impl Debug for DropRangesBuilder { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DropRanges") .field("hir_id_map", &self.tracked_value_map) .field("post_order_maps", &self.post_order_map) .field("nodes", &self.nodes.iter_enumerated().collect::<BTreeMap<_, _>>()) .finish() } } /// DropRanges keeps track of what values are definitely dropped at each point in the code. /// /// Values of interest are defined by the hir_id of their place. Locations in code are identified /// by their index in the post-order traversal. At its core, DropRanges maps /// (hir_id, post_order_id) -> bool, where a true value indicates that the value is definitely /// dropped at the point of the node identified by post_order_id. impl DropRangesBuilder { /// Returns the number of values (hir_ids) that are tracked fn num_values(&self) -> usize { self.tracked_value_map.len() } fn node_mut(&mut self, id: PostOrderId) -> &mut NodeInfo { let size = self.num_values(); self.nodes.ensure_contains_elem(id, || NodeInfo::new(size)) } fn add_control_edge(&mut self, from: PostOrderId, to: PostOrderId) { trace!("adding control edge from {:?} to {:?}", from, to); self.node_mut(from).successors.push(to); } } #[derive(Debug)] struct NodeInfo { /// IDs of nodes that can follow this one in the control flow /// /// If the vec is empty, then control proceeds to the next node. successors: Vec<PostOrderId>, /// List of hir_ids that are dropped by this node. drops: Vec<TrackedValueIndex>, /// List of hir_ids that are reinitialized by this node. reinits: Vec<TrackedValueIndex>, /// Set of values that are definitely dropped at this point. drop_state: BitSet<TrackedValueIndex>, } impl NodeInfo { fn new(num_values: usize) -> Self { Self { successors: vec![], drops: vec![], reinits: vec![], drop_state: BitSet::new_filled(num_values), } } }
use self::cfg_build::build_control_flow_graph; use self::record_consumed_borrow::find_consumed_and_borrowed;
random_line_split
mod.rs
//! Drop range analysis finds the portions of the tree where a value is guaranteed to be dropped //! (i.e. moved, uninitialized, etc.). This is used to exclude the types of those values from the //! generator type. See `InteriorVisitor::record` for where the results of this analysis are used. //! //! There are three phases to this analysis: //! 1. Use `ExprUseVisitor` to identify the interesting values that are consumed and borrowed. //! 2. Use `DropRangeVisitor` to find where the interesting values are dropped or reinitialized, //! and also build a control flow graph. //! 3. Use `DropRanges::propagate_to_fixpoint` to flow the dropped/reinitialized information through //! the CFG and find the exact points where we know a value is definitely dropped. //! //! The end result is a data structure that maps the post-order index of each node in the HIR tree //! to a set of values that are known to be dropped at that location. use self::cfg_build::build_control_flow_graph; use self::record_consumed_borrow::find_consumed_and_borrowed; use crate::FnCtxt; use hir::def_id::DefId; use hir::{Body, HirId, HirIdMap, Node}; use rustc_data_structures::unord::{UnordMap, UnordSet}; use rustc_hir as hir; use rustc_index::bit_set::BitSet; use rustc_index::IndexVec; use rustc_middle::hir::map::Map; use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId}; use rustc_middle::ty; use std::collections::BTreeMap; use std::fmt::Debug; mod cfg_build; mod cfg_propagate; mod cfg_visualize; mod record_consumed_borrow; pub fn compute_drop_ranges<'a, 'tcx>( fcx: &'a FnCtxt<'a, 'tcx>, def_id: DefId, body: &'tcx Body<'tcx>, ) -> DropRanges { if fcx.sess().opts.unstable_opts.drop_tracking { let consumed_borrowed_places = find_consumed_and_borrowed(fcx, def_id, body); let typeck_results = &fcx.typeck_results.borrow(); let num_exprs = fcx.tcx.region_scope_tree(def_id).body_expr_count(body.id()).unwrap_or(0); let (mut drop_ranges, borrowed_temporaries) = build_control_flow_graph( &fcx, typeck_results, fcx.param_env, consumed_borrowed_places, body, num_exprs, ); drop_ranges.propagate_to_fixpoint(); debug!("borrowed_temporaries = {borrowed_temporaries:?}"); DropRanges { tracked_value_map: drop_ranges.tracked_value_map, nodes: drop_ranges.nodes, borrowed_temporaries: Some(borrowed_temporaries), } } else { // If drop range tracking is not enabled, skip all the analysis and produce an // empty set of DropRanges. DropRanges { tracked_value_map: UnordMap::default(), nodes: IndexVec::new(), borrowed_temporaries: None, } } } /// Applies `f` to consumable node in the HIR subtree pointed to by `place`. /// /// This includes the place itself, and if the place is a reference to a local /// variable then `f` is also called on the HIR node for that variable as well. /// /// For example, if `place` points to `foo()`, then `f` is called once for the /// result of `foo`. On the other hand, if `place` points to `x` then `f` will /// be called both on the `ExprKind::Path` node that represents the expression /// as well as the HirId of the local `x` itself. fn for_each_consumable(hir: Map<'_>, place: TrackedValue, mut f: impl FnMut(TrackedValue)) { f(place); let node = hir.find(place.hir_id()); if let Some(Node::Expr(expr)) = node { match expr.kind { hir::ExprKind::Path(hir::QPath::Resolved( _, hir::Path { res: hir::def::Res::Local(hir_id),.. }, )) => { f(TrackedValue::Variable(*hir_id)); } _ => (), } } } rustc_index::newtype_index! { #[debug_format = "id({})"] pub struct PostOrderId {} } rustc_index::newtype_index! { #[debug_format = "hidx({})"] pub struct TrackedValueIndex {} } /// Identifies a value whose drop state we need to track. #[derive(PartialEq, Eq, Hash, Clone, Copy)] enum TrackedValue { /// Represents a named variable, such as a let binding, parameter, or upvar. /// /// The HirId points to the variable's definition site. Variable(HirId), /// A value produced as a result of an expression. /// /// The HirId points to the expression that returns this value. Temporary(HirId), } impl Debug for TrackedValue { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { ty::tls::with_opt(|opt_tcx| { if let Some(tcx) = opt_tcx { write!(f, "{}", tcx.hir().node_to_string(self.hir_id())) } else { match self { Self::Variable(hir_id) => write!(f, "Variable({hir_id:?})"), Self::Temporary(hir_id) => write!(f, "Temporary({hir_id:?})"), } } }) } } impl TrackedValue { fn hir_id(&self) -> HirId { match self { TrackedValue::Variable(hir_id) | TrackedValue::Temporary(hir_id) => *hir_id, } } fn from_place_with_projections_allowed(place_with_id: &PlaceWithHirId<'_>) -> Self { match place_with_id.place.base { PlaceBase::Rvalue | PlaceBase::StaticItem => { TrackedValue::Temporary(place_with_id.hir_id) } PlaceBase::Local(hir_id) | PlaceBase::Upvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id },.. }) => { TrackedValue::Variable(hir_id) } } } } /// Represents a reason why we might not be able to convert a HirId or Place /// into a tracked value. #[derive(Debug)] enum TrackedValueConversionError { /// Place projects are not currently supported. /// /// The reasoning around these is kind of subtle, so we choose to be more /// conservative around these for now. There is no reason in theory we /// cannot support these, we just have not implemented it yet. PlaceProjectionsNotSupported, } impl TryFrom<&PlaceWithHirId<'_>> for TrackedValue { type Error = TrackedValueConversionError; fn try_from(place_with_id: &PlaceWithHirId<'_>) -> Result<Self, Self::Error> { if!place_with_id.place.projections.is_empty() { debug!( "TrackedValue from PlaceWithHirId: {:?} has projections, which are not supported.", place_with_id ); return Err(TrackedValueConversionError::PlaceProjectionsNotSupported); } Ok(TrackedValue::from_place_with_projections_allowed(place_with_id)) } } pub struct DropRanges { tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>, nodes: IndexVec<PostOrderId, NodeInfo>, borrowed_temporaries: Option<UnordSet<HirId>>, } impl DropRanges { pub fn is_dropped_at(&self, hir_id: HirId, location: usize) -> bool { self.tracked_value_map .get(&TrackedValue::Temporary(hir_id)) .or(self.tracked_value_map.get(&TrackedValue::Variable(hir_id))) .cloned() .is_some_and(|tracked_value_id| { self.expect_node(location.into()).drop_state.contains(tracked_value_id) }) } pub fn is_borrowed_temporary(&self, expr: &hir::Expr<'_>) -> bool { if let Some(b) = &self.borrowed_temporaries { b.contains(&expr.hir_id) } else { true } } /// Returns a reference to the NodeInfo for a node, panicking if it does not exist fn expect_node(&self, id: PostOrderId) -> &NodeInfo { &self.nodes[id] } } /// Tracks information needed to compute drop ranges. struct DropRangesBuilder { /// The core of DropRangesBuilder is a set of nodes, which each represent /// one expression. We primarily refer to them by their index in a /// post-order traversal of the HIR tree, since this is what /// generator_interior uses to talk about yield positions. /// /// This IndexVec keeps the relevant details for each node. See the /// NodeInfo struct for more details, but this information includes things /// such as the set of control-flow successors, which variables are dropped /// or reinitialized, and whether each variable has been inferred to be /// known-dropped or potentially reinitialized at each point. nodes: IndexVec<PostOrderId, NodeInfo>, /// We refer to values whose drop state we are tracking by the HirId of /// where they are defined. Within a NodeInfo, however, we store the /// drop-state in a bit vector indexed by a HirIdIndex /// (see NodeInfo::drop_state). The hir_id_map field stores the mapping /// from HirIds to the HirIdIndex that is used to represent that value in /// bitvector. tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>, /// When building the control flow graph, we don't always know the /// post-order index of the target node at the point we encounter it. /// For example, this happens with break and continue. In those cases, /// we store a pair of the PostOrderId of the source and the HirId /// of the target. Once we have gathered all of these edges, we make a /// pass over the set of deferred edges (see process_deferred_edges in /// cfg_build.rs), look up the PostOrderId for the target (since now the /// post-order index for all nodes is known), and add missing control flow /// edges. deferred_edges: Vec<(PostOrderId, HirId)>, /// This maps HirIds of expressions to their post-order index. It is /// used in process_deferred_edges to correctly add back-edges. post_order_map: HirIdMap<PostOrderId>, } impl Debug for DropRangesBuilder { fn
(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DropRanges") .field("hir_id_map", &self.tracked_value_map) .field("post_order_maps", &self.post_order_map) .field("nodes", &self.nodes.iter_enumerated().collect::<BTreeMap<_, _>>()) .finish() } } /// DropRanges keeps track of what values are definitely dropped at each point in the code. /// /// Values of interest are defined by the hir_id of their place. Locations in code are identified /// by their index in the post-order traversal. At its core, DropRanges maps /// (hir_id, post_order_id) -> bool, where a true value indicates that the value is definitely /// dropped at the point of the node identified by post_order_id. impl DropRangesBuilder { /// Returns the number of values (hir_ids) that are tracked fn num_values(&self) -> usize { self.tracked_value_map.len() } fn node_mut(&mut self, id: PostOrderId) -> &mut NodeInfo { let size = self.num_values(); self.nodes.ensure_contains_elem(id, || NodeInfo::new(size)) } fn add_control_edge(&mut self, from: PostOrderId, to: PostOrderId) { trace!("adding control edge from {:?} to {:?}", from, to); self.node_mut(from).successors.push(to); } } #[derive(Debug)] struct NodeInfo { /// IDs of nodes that can follow this one in the control flow /// /// If the vec is empty, then control proceeds to the next node. successors: Vec<PostOrderId>, /// List of hir_ids that are dropped by this node. drops: Vec<TrackedValueIndex>, /// List of hir_ids that are reinitialized by this node. reinits: Vec<TrackedValueIndex>, /// Set of values that are definitely dropped at this point. drop_state: BitSet<TrackedValueIndex>, } impl NodeInfo { fn new(num_values: usize) -> Self { Self { successors: vec![], drops: vec![], reinits: vec![], drop_state: BitSet::new_filled(num_values), } } }
fmt
identifier_name
mod.rs
//! Drop range analysis finds the portions of the tree where a value is guaranteed to be dropped //! (i.e. moved, uninitialized, etc.). This is used to exclude the types of those values from the //! generator type. See `InteriorVisitor::record` for where the results of this analysis are used. //! //! There are three phases to this analysis: //! 1. Use `ExprUseVisitor` to identify the interesting values that are consumed and borrowed. //! 2. Use `DropRangeVisitor` to find where the interesting values are dropped or reinitialized, //! and also build a control flow graph. //! 3. Use `DropRanges::propagate_to_fixpoint` to flow the dropped/reinitialized information through //! the CFG and find the exact points where we know a value is definitely dropped. //! //! The end result is a data structure that maps the post-order index of each node in the HIR tree //! to a set of values that are known to be dropped at that location. use self::cfg_build::build_control_flow_graph; use self::record_consumed_borrow::find_consumed_and_borrowed; use crate::FnCtxt; use hir::def_id::DefId; use hir::{Body, HirId, HirIdMap, Node}; use rustc_data_structures::unord::{UnordMap, UnordSet}; use rustc_hir as hir; use rustc_index::bit_set::BitSet; use rustc_index::IndexVec; use rustc_middle::hir::map::Map; use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId}; use rustc_middle::ty; use std::collections::BTreeMap; use std::fmt::Debug; mod cfg_build; mod cfg_propagate; mod cfg_visualize; mod record_consumed_borrow; pub fn compute_drop_ranges<'a, 'tcx>( fcx: &'a FnCtxt<'a, 'tcx>, def_id: DefId, body: &'tcx Body<'tcx>, ) -> DropRanges { if fcx.sess().opts.unstable_opts.drop_tracking { let consumed_borrowed_places = find_consumed_and_borrowed(fcx, def_id, body); let typeck_results = &fcx.typeck_results.borrow(); let num_exprs = fcx.tcx.region_scope_tree(def_id).body_expr_count(body.id()).unwrap_or(0); let (mut drop_ranges, borrowed_temporaries) = build_control_flow_graph( &fcx, typeck_results, fcx.param_env, consumed_borrowed_places, body, num_exprs, ); drop_ranges.propagate_to_fixpoint(); debug!("borrowed_temporaries = {borrowed_temporaries:?}"); DropRanges { tracked_value_map: drop_ranges.tracked_value_map, nodes: drop_ranges.nodes, borrowed_temporaries: Some(borrowed_temporaries), } } else { // If drop range tracking is not enabled, skip all the analysis and produce an // empty set of DropRanges. DropRanges { tracked_value_map: UnordMap::default(), nodes: IndexVec::new(), borrowed_temporaries: None, } } } /// Applies `f` to consumable node in the HIR subtree pointed to by `place`. /// /// This includes the place itself, and if the place is a reference to a local /// variable then `f` is also called on the HIR node for that variable as well. /// /// For example, if `place` points to `foo()`, then `f` is called once for the /// result of `foo`. On the other hand, if `place` points to `x` then `f` will /// be called both on the `ExprKind::Path` node that represents the expression /// as well as the HirId of the local `x` itself. fn for_each_consumable(hir: Map<'_>, place: TrackedValue, mut f: impl FnMut(TrackedValue)) { f(place); let node = hir.find(place.hir_id()); if let Some(Node::Expr(expr)) = node { match expr.kind { hir::ExprKind::Path(hir::QPath::Resolved( _, hir::Path { res: hir::def::Res::Local(hir_id),.. }, )) => { f(TrackedValue::Variable(*hir_id)); } _ => (), } } } rustc_index::newtype_index! { #[debug_format = "id({})"] pub struct PostOrderId {} } rustc_index::newtype_index! { #[debug_format = "hidx({})"] pub struct TrackedValueIndex {} } /// Identifies a value whose drop state we need to track. #[derive(PartialEq, Eq, Hash, Clone, Copy)] enum TrackedValue { /// Represents a named variable, such as a let binding, parameter, or upvar. /// /// The HirId points to the variable's definition site. Variable(HirId), /// A value produced as a result of an expression. /// /// The HirId points to the expression that returns this value. Temporary(HirId), } impl Debug for TrackedValue { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { ty::tls::with_opt(|opt_tcx| { if let Some(tcx) = opt_tcx
else { match self { Self::Variable(hir_id) => write!(f, "Variable({hir_id:?})"), Self::Temporary(hir_id) => write!(f, "Temporary({hir_id:?})"), } } }) } } impl TrackedValue { fn hir_id(&self) -> HirId { match self { TrackedValue::Variable(hir_id) | TrackedValue::Temporary(hir_id) => *hir_id, } } fn from_place_with_projections_allowed(place_with_id: &PlaceWithHirId<'_>) -> Self { match place_with_id.place.base { PlaceBase::Rvalue | PlaceBase::StaticItem => { TrackedValue::Temporary(place_with_id.hir_id) } PlaceBase::Local(hir_id) | PlaceBase::Upvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id },.. }) => { TrackedValue::Variable(hir_id) } } } } /// Represents a reason why we might not be able to convert a HirId or Place /// into a tracked value. #[derive(Debug)] enum TrackedValueConversionError { /// Place projects are not currently supported. /// /// The reasoning around these is kind of subtle, so we choose to be more /// conservative around these for now. There is no reason in theory we /// cannot support these, we just have not implemented it yet. PlaceProjectionsNotSupported, } impl TryFrom<&PlaceWithHirId<'_>> for TrackedValue { type Error = TrackedValueConversionError; fn try_from(place_with_id: &PlaceWithHirId<'_>) -> Result<Self, Self::Error> { if!place_with_id.place.projections.is_empty() { debug!( "TrackedValue from PlaceWithHirId: {:?} has projections, which are not supported.", place_with_id ); return Err(TrackedValueConversionError::PlaceProjectionsNotSupported); } Ok(TrackedValue::from_place_with_projections_allowed(place_with_id)) } } pub struct DropRanges { tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>, nodes: IndexVec<PostOrderId, NodeInfo>, borrowed_temporaries: Option<UnordSet<HirId>>, } impl DropRanges { pub fn is_dropped_at(&self, hir_id: HirId, location: usize) -> bool { self.tracked_value_map .get(&TrackedValue::Temporary(hir_id)) .or(self.tracked_value_map.get(&TrackedValue::Variable(hir_id))) .cloned() .is_some_and(|tracked_value_id| { self.expect_node(location.into()).drop_state.contains(tracked_value_id) }) } pub fn is_borrowed_temporary(&self, expr: &hir::Expr<'_>) -> bool { if let Some(b) = &self.borrowed_temporaries { b.contains(&expr.hir_id) } else { true } } /// Returns a reference to the NodeInfo for a node, panicking if it does not exist fn expect_node(&self, id: PostOrderId) -> &NodeInfo { &self.nodes[id] } } /// Tracks information needed to compute drop ranges. struct DropRangesBuilder { /// The core of DropRangesBuilder is a set of nodes, which each represent /// one expression. We primarily refer to them by their index in a /// post-order traversal of the HIR tree, since this is what /// generator_interior uses to talk about yield positions. /// /// This IndexVec keeps the relevant details for each node. See the /// NodeInfo struct for more details, but this information includes things /// such as the set of control-flow successors, which variables are dropped /// or reinitialized, and whether each variable has been inferred to be /// known-dropped or potentially reinitialized at each point. nodes: IndexVec<PostOrderId, NodeInfo>, /// We refer to values whose drop state we are tracking by the HirId of /// where they are defined. Within a NodeInfo, however, we store the /// drop-state in a bit vector indexed by a HirIdIndex /// (see NodeInfo::drop_state). The hir_id_map field stores the mapping /// from HirIds to the HirIdIndex that is used to represent that value in /// bitvector. tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>, /// When building the control flow graph, we don't always know the /// post-order index of the target node at the point we encounter it. /// For example, this happens with break and continue. In those cases, /// we store a pair of the PostOrderId of the source and the HirId /// of the target. Once we have gathered all of these edges, we make a /// pass over the set of deferred edges (see process_deferred_edges in /// cfg_build.rs), look up the PostOrderId for the target (since now the /// post-order index for all nodes is known), and add missing control flow /// edges. deferred_edges: Vec<(PostOrderId, HirId)>, /// This maps HirIds of expressions to their post-order index. It is /// used in process_deferred_edges to correctly add back-edges. post_order_map: HirIdMap<PostOrderId>, } impl Debug for DropRangesBuilder { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DropRanges") .field("hir_id_map", &self.tracked_value_map) .field("post_order_maps", &self.post_order_map) .field("nodes", &self.nodes.iter_enumerated().collect::<BTreeMap<_, _>>()) .finish() } } /// DropRanges keeps track of what values are definitely dropped at each point in the code. /// /// Values of interest are defined by the hir_id of their place. Locations in code are identified /// by their index in the post-order traversal. At its core, DropRanges maps /// (hir_id, post_order_id) -> bool, where a true value indicates that the value is definitely /// dropped at the point of the node identified by post_order_id. impl DropRangesBuilder { /// Returns the number of values (hir_ids) that are tracked fn num_values(&self) -> usize { self.tracked_value_map.len() } fn node_mut(&mut self, id: PostOrderId) -> &mut NodeInfo { let size = self.num_values(); self.nodes.ensure_contains_elem(id, || NodeInfo::new(size)) } fn add_control_edge(&mut self, from: PostOrderId, to: PostOrderId) { trace!("adding control edge from {:?} to {:?}", from, to); self.node_mut(from).successors.push(to); } } #[derive(Debug)] struct NodeInfo { /// IDs of nodes that can follow this one in the control flow /// /// If the vec is empty, then control proceeds to the next node. successors: Vec<PostOrderId>, /// List of hir_ids that are dropped by this node. drops: Vec<TrackedValueIndex>, /// List of hir_ids that are reinitialized by this node. reinits: Vec<TrackedValueIndex>, /// Set of values that are definitely dropped at this point. drop_state: BitSet<TrackedValueIndex>, } impl NodeInfo { fn new(num_values: usize) -> Self { Self { successors: vec![], drops: vec![], reinits: vec![], drop_state: BitSet::new_filled(num_values), } } }
{ write!(f, "{}", tcx.hir().node_to_string(self.hir_id())) }
conditional_block
mod.rs
//! Drop range analysis finds the portions of the tree where a value is guaranteed to be dropped //! (i.e. moved, uninitialized, etc.). This is used to exclude the types of those values from the //! generator type. See `InteriorVisitor::record` for where the results of this analysis are used. //! //! There are three phases to this analysis: //! 1. Use `ExprUseVisitor` to identify the interesting values that are consumed and borrowed. //! 2. Use `DropRangeVisitor` to find where the interesting values are dropped or reinitialized, //! and also build a control flow graph. //! 3. Use `DropRanges::propagate_to_fixpoint` to flow the dropped/reinitialized information through //! the CFG and find the exact points where we know a value is definitely dropped. //! //! The end result is a data structure that maps the post-order index of each node in the HIR tree //! to a set of values that are known to be dropped at that location. use self::cfg_build::build_control_flow_graph; use self::record_consumed_borrow::find_consumed_and_borrowed; use crate::FnCtxt; use hir::def_id::DefId; use hir::{Body, HirId, HirIdMap, Node}; use rustc_data_structures::unord::{UnordMap, UnordSet}; use rustc_hir as hir; use rustc_index::bit_set::BitSet; use rustc_index::IndexVec; use rustc_middle::hir::map::Map; use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId}; use rustc_middle::ty; use std::collections::BTreeMap; use std::fmt::Debug; mod cfg_build; mod cfg_propagate; mod cfg_visualize; mod record_consumed_borrow; pub fn compute_drop_ranges<'a, 'tcx>( fcx: &'a FnCtxt<'a, 'tcx>, def_id: DefId, body: &'tcx Body<'tcx>, ) -> DropRanges { if fcx.sess().opts.unstable_opts.drop_tracking { let consumed_borrowed_places = find_consumed_and_borrowed(fcx, def_id, body); let typeck_results = &fcx.typeck_results.borrow(); let num_exprs = fcx.tcx.region_scope_tree(def_id).body_expr_count(body.id()).unwrap_or(0); let (mut drop_ranges, borrowed_temporaries) = build_control_flow_graph( &fcx, typeck_results, fcx.param_env, consumed_borrowed_places, body, num_exprs, ); drop_ranges.propagate_to_fixpoint(); debug!("borrowed_temporaries = {borrowed_temporaries:?}"); DropRanges { tracked_value_map: drop_ranges.tracked_value_map, nodes: drop_ranges.nodes, borrowed_temporaries: Some(borrowed_temporaries), } } else { // If drop range tracking is not enabled, skip all the analysis and produce an // empty set of DropRanges. DropRanges { tracked_value_map: UnordMap::default(), nodes: IndexVec::new(), borrowed_temporaries: None, } } } /// Applies `f` to consumable node in the HIR subtree pointed to by `place`. /// /// This includes the place itself, and if the place is a reference to a local /// variable then `f` is also called on the HIR node for that variable as well. /// /// For example, if `place` points to `foo()`, then `f` is called once for the /// result of `foo`. On the other hand, if `place` points to `x` then `f` will /// be called both on the `ExprKind::Path` node that represents the expression /// as well as the HirId of the local `x` itself. fn for_each_consumable(hir: Map<'_>, place: TrackedValue, mut f: impl FnMut(TrackedValue)) { f(place); let node = hir.find(place.hir_id()); if let Some(Node::Expr(expr)) = node { match expr.kind { hir::ExprKind::Path(hir::QPath::Resolved( _, hir::Path { res: hir::def::Res::Local(hir_id),.. }, )) => { f(TrackedValue::Variable(*hir_id)); } _ => (), } } } rustc_index::newtype_index! { #[debug_format = "id({})"] pub struct PostOrderId {} } rustc_index::newtype_index! { #[debug_format = "hidx({})"] pub struct TrackedValueIndex {} } /// Identifies a value whose drop state we need to track. #[derive(PartialEq, Eq, Hash, Clone, Copy)] enum TrackedValue { /// Represents a named variable, such as a let binding, parameter, or upvar. /// /// The HirId points to the variable's definition site. Variable(HirId), /// A value produced as a result of an expression. /// /// The HirId points to the expression that returns this value. Temporary(HirId), } impl Debug for TrackedValue { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { ty::tls::with_opt(|opt_tcx| { if let Some(tcx) = opt_tcx { write!(f, "{}", tcx.hir().node_to_string(self.hir_id())) } else { match self { Self::Variable(hir_id) => write!(f, "Variable({hir_id:?})"), Self::Temporary(hir_id) => write!(f, "Temporary({hir_id:?})"), } } }) } } impl TrackedValue { fn hir_id(&self) -> HirId { match self { TrackedValue::Variable(hir_id) | TrackedValue::Temporary(hir_id) => *hir_id, } } fn from_place_with_projections_allowed(place_with_id: &PlaceWithHirId<'_>) -> Self { match place_with_id.place.base { PlaceBase::Rvalue | PlaceBase::StaticItem => { TrackedValue::Temporary(place_with_id.hir_id) } PlaceBase::Local(hir_id) | PlaceBase::Upvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id },.. }) => { TrackedValue::Variable(hir_id) } } } } /// Represents a reason why we might not be able to convert a HirId or Place /// into a tracked value. #[derive(Debug)] enum TrackedValueConversionError { /// Place projects are not currently supported. /// /// The reasoning around these is kind of subtle, so we choose to be more /// conservative around these for now. There is no reason in theory we /// cannot support these, we just have not implemented it yet. PlaceProjectionsNotSupported, } impl TryFrom<&PlaceWithHirId<'_>> for TrackedValue { type Error = TrackedValueConversionError; fn try_from(place_with_id: &PlaceWithHirId<'_>) -> Result<Self, Self::Error> { if!place_with_id.place.projections.is_empty() { debug!( "TrackedValue from PlaceWithHirId: {:?} has projections, which are not supported.", place_with_id ); return Err(TrackedValueConversionError::PlaceProjectionsNotSupported); } Ok(TrackedValue::from_place_with_projections_allowed(place_with_id)) } } pub struct DropRanges { tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>, nodes: IndexVec<PostOrderId, NodeInfo>, borrowed_temporaries: Option<UnordSet<HirId>>, } impl DropRanges { pub fn is_dropped_at(&self, hir_id: HirId, location: usize) -> bool { self.tracked_value_map .get(&TrackedValue::Temporary(hir_id)) .or(self.tracked_value_map.get(&TrackedValue::Variable(hir_id))) .cloned() .is_some_and(|tracked_value_id| { self.expect_node(location.into()).drop_state.contains(tracked_value_id) }) } pub fn is_borrowed_temporary(&self, expr: &hir::Expr<'_>) -> bool { if let Some(b) = &self.borrowed_temporaries { b.contains(&expr.hir_id) } else { true } } /// Returns a reference to the NodeInfo for a node, panicking if it does not exist fn expect_node(&self, id: PostOrderId) -> &NodeInfo { &self.nodes[id] } } /// Tracks information needed to compute drop ranges. struct DropRangesBuilder { /// The core of DropRangesBuilder is a set of nodes, which each represent /// one expression. We primarily refer to them by their index in a /// post-order traversal of the HIR tree, since this is what /// generator_interior uses to talk about yield positions. /// /// This IndexVec keeps the relevant details for each node. See the /// NodeInfo struct for more details, but this information includes things /// such as the set of control-flow successors, which variables are dropped /// or reinitialized, and whether each variable has been inferred to be /// known-dropped or potentially reinitialized at each point. nodes: IndexVec<PostOrderId, NodeInfo>, /// We refer to values whose drop state we are tracking by the HirId of /// where they are defined. Within a NodeInfo, however, we store the /// drop-state in a bit vector indexed by a HirIdIndex /// (see NodeInfo::drop_state). The hir_id_map field stores the mapping /// from HirIds to the HirIdIndex that is used to represent that value in /// bitvector. tracked_value_map: UnordMap<TrackedValue, TrackedValueIndex>, /// When building the control flow graph, we don't always know the /// post-order index of the target node at the point we encounter it. /// For example, this happens with break and continue. In those cases, /// we store a pair of the PostOrderId of the source and the HirId /// of the target. Once we have gathered all of these edges, we make a /// pass over the set of deferred edges (see process_deferred_edges in /// cfg_build.rs), look up the PostOrderId for the target (since now the /// post-order index for all nodes is known), and add missing control flow /// edges. deferred_edges: Vec<(PostOrderId, HirId)>, /// This maps HirIds of expressions to their post-order index. It is /// used in process_deferred_edges to correctly add back-edges. post_order_map: HirIdMap<PostOrderId>, } impl Debug for DropRangesBuilder { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DropRanges") .field("hir_id_map", &self.tracked_value_map) .field("post_order_maps", &self.post_order_map) .field("nodes", &self.nodes.iter_enumerated().collect::<BTreeMap<_, _>>()) .finish() } } /// DropRanges keeps track of what values are definitely dropped at each point in the code. /// /// Values of interest are defined by the hir_id of their place. Locations in code are identified /// by their index in the post-order traversal. At its core, DropRanges maps /// (hir_id, post_order_id) -> bool, where a true value indicates that the value is definitely /// dropped at the point of the node identified by post_order_id. impl DropRangesBuilder { /// Returns the number of values (hir_ids) that are tracked fn num_values(&self) -> usize
fn node_mut(&mut self, id: PostOrderId) -> &mut NodeInfo { let size = self.num_values(); self.nodes.ensure_contains_elem(id, || NodeInfo::new(size)) } fn add_control_edge(&mut self, from: PostOrderId, to: PostOrderId) { trace!("adding control edge from {:?} to {:?}", from, to); self.node_mut(from).successors.push(to); } } #[derive(Debug)] struct NodeInfo { /// IDs of nodes that can follow this one in the control flow /// /// If the vec is empty, then control proceeds to the next node. successors: Vec<PostOrderId>, /// List of hir_ids that are dropped by this node. drops: Vec<TrackedValueIndex>, /// List of hir_ids that are reinitialized by this node. reinits: Vec<TrackedValueIndex>, /// Set of values that are definitely dropped at this point. drop_state: BitSet<TrackedValueIndex>, } impl NodeInfo { fn new(num_values: usize) -> Self { Self { successors: vec![], drops: vec![], reinits: vec![], drop_state: BitSet::new_filled(num_values), } } }
{ self.tracked_value_map.len() }
identifier_body
mod.rs
//! Code to compute example inputs given a backtrace. use crate::grammar::repr::*; use crate::message::builder::InlineBuilder; use crate::message::Content; use crate::style::Style; use crate::tls::Tls; use ascii_canvas::AsciiView; use std::{ cmp::Ordering, fmt::{Debug, Error, Formatter}, }; #[cfg(test)] mod test; /// An "example" input and the way it was derived. This can be /// serialized into useful text. For example, it might represent /// something like this: /// /// ``` /// Looking at /// | /// v /// Ty "->" Ty "->" Ty /// | | | /// +-Ty-----+ | /// | | /// +-Ty-------------+ /// ``` /// /// The top-line is the `symbols` vector. The groupings below are /// stored in the `reductions` vector, in order from smallest to /// largest (they are always properly nested). The `cursor` field /// indicates the current lookahead token. /// /// The `symbols` vector is actually `Option<Symbol>` to account /// for empty reductions: /// /// ``` /// A B /// | | | | /// | +-Y-+ | /// +-Z-----+ /// ``` /// /// The "empty space" between A and B would be represented as `None`. #[derive(Clone, Debug)] pub struct Example { pub symbols: Vec<ExampleSymbol>, pub cursor: usize, pub reductions: Vec<Reduction>, } #[derive(Clone, Debug, PartialEq, Eq)] pub enum ExampleSymbol { Symbol(Symbol), Epsilon, } #[derive(Copy, Clone, Default)] pub struct ExampleStyles { pub before_cursor: Style, pub on_cursor: Style, pub after_cursor: Style, } #[derive(Clone, Debug)] pub struct Reduction { pub start: usize, pub end: usize, pub nonterminal: NonterminalString, } impl Example { /// Length of each symbol. Each will need *at least* that amount /// of space. :) Measure in characters, under the assumption of a /// mono-spaced font. Also add a final `0` marker which will serve /// as the end position. fn lengths(&self) -> Vec<usize> { self.symbols .iter() .map(|s| match *s { ExampleSymbol::Symbol(ref s) => format!("{}", s).chars().count(), ExampleSymbol::Epsilon => 1, // display as " " }) .chain(Some(0)) .collect() } /// Extract a prefix of the list of symbols from this `Example` /// and make a styled list of them, like: /// /// Ty "->" Ty -> "Ty" pub fn to_symbol_list(&self, length: usize, styles: ExampleStyles) -> Box<dyn Content> { let mut builder = InlineBuilder::new().begin_spaced(); for (index, symbol) in self.symbols[..length].iter().enumerate() { let style = match index.cmp(&self.cursor) { Ordering::Less => styles.before_cursor, Ordering::Equal => match *symbol { ExampleSymbol::Symbol(Symbol::Terminal(_)) => styles.on_cursor, ExampleSymbol::Symbol(Symbol::Nonterminal(_)) => styles.after_cursor, ExampleSymbol::Epsilon => styles.after_cursor, }, Ordering::Greater => styles.after_cursor, }; if let ExampleSymbol::Symbol(ref s) = symbol { builder = builder.push(s.clone()).styled(style); } } builder.end().indented().end() } /// Render the example into a styled diagram suitable for /// embedding in an error message. pub fn into_picture(self, styles: ExampleStyles) -> Box<dyn Content>
fn starting_positions(&self, lengths: &[usize]) -> Vec<usize> { lengths .iter() .scan(0, |counter, &len| { let start = *counter; // Leave space for "NT " (if "NT" is the name // of the nonterminal). *counter = start + len + 1; Some(start) }) .collect() } /// Start index where each symbol in the example should appear, /// measured in characters. These are spaced to leave enough room /// for the reductions below. fn positions(&self, lengths: &[usize]) -> Vec<usize> { // Initially, position each symbol with one space in between, // like: // // X Y Z let mut positions = self.starting_positions(lengths); // Adjust spacing to account for the nonterminal labels // we will have to add. It will display // like this: // // A1 B2 C3 D4 E5 F6 // | | // +-Label---+ // // But if the label is long we may have to adjust the spacing // of the covered items (here, we changed them to two spaces, // except the first gap, which got 3 spaces): // // A1 B2 C3 D4 E5 F6 // | | // +-LongLabel22-+ for &Reduction { start, end, ref nonterminal, } in &self.reductions { let nt_len = format!("{}", nonterminal).chars().count(); // Number of symbols we are reducing. This should always // be non-zero because even in the case of a \epsilon // rule, we ought to be have a `None` entry in the symbol array. let num_syms = end - start; assert!(num_syms > 0); // Let's use the expansion from above as our running example. // We start out with positions like this: // // A1 B2 C3 D4 E5 F6 // | | // +-LongLabel22-+ // // But we want LongLabel to end at D4. No good. // Start of first symbol to be reduced. Here, 0. // // A1 B2 C3 D4 // ^ here let start_position = positions[start]; // End of last symbol to be reduced. Here, 11. // // A1 B2 C3 D4 E5 // ^ positions[end] // ^ here -- positions[end] - 1 let end_position = positions[end] - 1; // We need space to draw `+-Label-+` between // start_position and end_position. let required_len = nt_len + 4; // here, 15 let actual_len = end_position - start_position; // here, 10 if required_len < actual_len { continue; // Got enough space, all set. } // Have to add `difference` characters altogether. let difference = required_len - actual_len; // here, 4 // Increment over everything that is not part of this nonterminal. // In the example above, that is E5 and F6. shift(&mut positions[end..], difference); if num_syms > 1 { // If there is just one symbol being reduced here, // then we have shifted over the things that follow // it, and we are done. This would be a case like: // // X Y Z // | | // +-Label-+ // // (which maybe ought to be rendered slightly // differently). // // But if there are multiple symbols, we're not quite // done, because there would be an unsightly gap: // // (gaps) // | | | // v v v // A1 B2 C3 D4 E5 F6 // | | // +-LongLabel22-+ // // we'd like to make things line up, so we have to // distribute that extra space internally by // increasing the "gaps" (marked above) as evenly as // possible (basically, full justification). // // We do this by dividing up the spaces evenly and // then taking the remainder `N` and distributing 1 // extra to the first N. let num_gaps = num_syms - 1; // number of gaps we can adjust. Here, 3. let amount = difference / num_gaps; // what to add to each gap. Here, 1. let extra = difference % num_gaps; // the remainder. Here, 1. // For the first `extra` symbols, give them amount + 1 // extra space. After that, just amount. (O(n^2). Sue me.) for i in 0..extra { shift(&mut positions[start + 1 + i..end], amount + 1); } for i in extra..num_gaps { shift(&mut positions[start + 1 + i..end], amount); } } } positions } #[cfg(test)] pub fn paint_unstyled(&self) -> Vec<::ascii_canvas::Row> { let this = self.clone(); let content = this.into_picture(ExampleStyles::default()); let min_width = content.min_width(); let canvas = content.emit_to_canvas(min_width); canvas.to_strings() } fn paint_on(&self, styles: &ExampleStyles, positions: &[usize], view: &mut dyn AsciiView) { // Draw the brackets for each reduction: for (index, reduction) in self.reductions.iter().enumerate() { let start_column = positions[reduction.start]; let end_column = positions[reduction.end] - 1; let row = 1 + index; view.draw_vertical_line(0..row + 1, start_column); view.draw_vertical_line(0..row + 1, end_column - 1); view.draw_horizontal_line(row, start_column..end_column); } // Write the labels for each reduction. Do this after the // brackets so that ascii canvas can convert `|` to `+` // without interfering with the text (in case of weird overlap). let session = Tls::session(); for (index, reduction) in self.reductions.iter().enumerate() { let column = positions[reduction.start] + 2; let row = 1 + index; view.write_chars( row, column, reduction.nonterminal.to_string().chars(), session.nonterminal_symbol, ); } // Write the labels on top: // A1 B2 C3 D4 E5 F6 self.paint_symbols_on(&self.symbols, positions, styles, view); } fn paint_symbols_on( &self, symbols: &[ExampleSymbol], positions: &[usize], styles: &ExampleStyles, view: &mut dyn AsciiView, ) { let session = Tls::session(); for (index, ex_symbol) in symbols.iter().enumerate() { let style = match index.cmp(&self.cursor) { Ordering::Less => styles.before_cursor, Ordering::Equal => { // Only display actual terminals in the "on-cursor" // font, because it might be misleading to show a // nonterminal that way. Really it'd be nice to expand // so that the cursor is always a terminal. match *ex_symbol { ExampleSymbol::Symbol(Symbol::Terminal(_)) => styles.on_cursor, _ => styles.after_cursor, } } Ordering::Greater => styles.after_cursor, }; let column = positions[index]; match *ex_symbol { ExampleSymbol::Symbol(Symbol::Terminal(ref term)) => { view.write_chars( 0, column, term.to_string().chars(), style.with(session.terminal_symbol), ); } ExampleSymbol::Symbol(Symbol::Nonterminal(ref nt)) => { view.write_chars( 0, column, nt.to_string().chars(), style.with(session.nonterminal_symbol), ); } ExampleSymbol::Epsilon => {} } } } } struct ExamplePicture { example: Example, positions: Vec<usize>, styles: ExampleStyles, } impl Content for ExamplePicture { fn min_width(&self) -> usize { *self.positions.last().unwrap() } fn emit(&self, view: &mut dyn AsciiView) { self.example.paint_on(&self.styles, &self.positions, view); } fn into_wrap_items(self: Box<Self>, wrap_items: &mut Vec<Box<dyn Content>>) { wrap_items.push(self); } } impl Debug for ExamplePicture { fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { Debug::fmt(&self.example, fmt) } } fn shift(positions: &mut [usize], amount: usize) { for position in positions { *position += amount; } } impl ExampleStyles { pub fn ambig() -> Self { let session = Tls::session(); ExampleStyles { before_cursor: session.ambig_symbols, on_cursor: session.ambig_symbols, after_cursor: session.ambig_symbols, } } pub fn new() -> Self { let session = Tls::session(); ExampleStyles { before_cursor: session.observed_symbols, on_cursor: session.cursor_symbol, after_cursor: session.unobserved_symbols, } } }
{ let lengths = self.lengths(); let positions = self.positions(&lengths); InlineBuilder::new() .push(Box::new(ExamplePicture { example: self, positions, styles, })) .indented() .end() }
identifier_body
mod.rs
//! Code to compute example inputs given a backtrace. use crate::grammar::repr::*; use crate::message::builder::InlineBuilder; use crate::message::Content; use crate::style::Style; use crate::tls::Tls; use ascii_canvas::AsciiView; use std::{ cmp::Ordering, fmt::{Debug, Error, Formatter}, }; #[cfg(test)] mod test; /// An "example" input and the way it was derived. This can be /// serialized into useful text. For example, it might represent /// something like this: /// /// ``` /// Looking at /// | /// v /// Ty "->" Ty "->" Ty /// | | | /// +-Ty-----+ | /// | | /// +-Ty-------------+ /// ``` /// /// The top-line is the `symbols` vector. The groupings below are /// stored in the `reductions` vector, in order from smallest to /// largest (they are always properly nested). The `cursor` field /// indicates the current lookahead token. /// /// The `symbols` vector is actually `Option<Symbol>` to account /// for empty reductions: /// /// ``` /// A B /// | | | | /// | +-Y-+ | /// +-Z-----+ /// ``` /// /// The "empty space" between A and B would be represented as `None`. #[derive(Clone, Debug)] pub struct Example { pub symbols: Vec<ExampleSymbol>, pub cursor: usize, pub reductions: Vec<Reduction>, } #[derive(Clone, Debug, PartialEq, Eq)] pub enum ExampleSymbol { Symbol(Symbol), Epsilon, } #[derive(Copy, Clone, Default)] pub struct ExampleStyles { pub before_cursor: Style, pub on_cursor: Style, pub after_cursor: Style, } #[derive(Clone, Debug)] pub struct Reduction { pub start: usize, pub end: usize, pub nonterminal: NonterminalString, } impl Example { /// Length of each symbol. Each will need *at least* that amount /// of space. :) Measure in characters, under the assumption of a /// mono-spaced font. Also add a final `0` marker which will serve /// as the end position. fn lengths(&self) -> Vec<usize> { self.symbols .iter() .map(|s| match *s { ExampleSymbol::Symbol(ref s) => format!("{}", s).chars().count(), ExampleSymbol::Epsilon => 1, // display as " " }) .chain(Some(0)) .collect() } /// Extract a prefix of the list of symbols from this `Example` /// and make a styled list of them, like: /// /// Ty "->" Ty -> "Ty" pub fn to_symbol_list(&self, length: usize, styles: ExampleStyles) -> Box<dyn Content> { let mut builder = InlineBuilder::new().begin_spaced(); for (index, symbol) in self.symbols[..length].iter().enumerate() { let style = match index.cmp(&self.cursor) { Ordering::Less => styles.before_cursor, Ordering::Equal => match *symbol { ExampleSymbol::Symbol(Symbol::Terminal(_)) => styles.on_cursor, ExampleSymbol::Symbol(Symbol::Nonterminal(_)) => styles.after_cursor, ExampleSymbol::Epsilon => styles.after_cursor, }, Ordering::Greater => styles.after_cursor, }; if let ExampleSymbol::Symbol(ref s) = symbol { builder = builder.push(s.clone()).styled(style); } } builder.end().indented().end() } /// Render the example into a styled diagram suitable for /// embedding in an error message. pub fn into_picture(self, styles: ExampleStyles) -> Box<dyn Content> { let lengths = self.lengths(); let positions = self.positions(&lengths); InlineBuilder::new() .push(Box::new(ExamplePicture { example: self, positions, styles, })) .indented() .end() } fn starting_positions(&self, lengths: &[usize]) -> Vec<usize> { lengths .iter() .scan(0, |counter, &len| { let start = *counter; // Leave space for "NT " (if "NT" is the name // of the nonterminal). *counter = start + len + 1; Some(start) }) .collect() } /// Start index where each symbol in the example should appear, /// measured in characters. These are spaced to leave enough room /// for the reductions below. fn positions(&self, lengths: &[usize]) -> Vec<usize> { // Initially, position each symbol with one space in between, // like: // // X Y Z let mut positions = self.starting_positions(lengths); // Adjust spacing to account for the nonterminal labels // we will have to add. It will display // like this: // // A1 B2 C3 D4 E5 F6 // | | // +-Label---+ // // But if the label is long we may have to adjust the spacing // of the covered items (here, we changed them to two spaces, // except the first gap, which got 3 spaces): // // A1 B2 C3 D4 E5 F6 // | | // +-LongLabel22-+ for &Reduction { start, end, ref nonterminal, } in &self.reductions { let nt_len = format!("{}", nonterminal).chars().count(); // Number of symbols we are reducing. This should always // be non-zero because even in the case of a \epsilon // rule, we ought to be have a `None` entry in the symbol array. let num_syms = end - start; assert!(num_syms > 0); // Let's use the expansion from above as our running example. // We start out with positions like this: // // A1 B2 C3 D4 E5 F6 // | | // +-LongLabel22-+ // // But we want LongLabel to end at D4. No good. // Start of first symbol to be reduced. Here, 0. // // A1 B2 C3 D4 // ^ here let start_position = positions[start]; // End of last symbol to be reduced. Here, 11. // // A1 B2 C3 D4 E5 // ^ positions[end] // ^ here -- positions[end] - 1 let end_position = positions[end] - 1; // We need space to draw `+-Label-+` between // start_position and end_position. let required_len = nt_len + 4; // here, 15 let actual_len = end_position - start_position; // here, 10 if required_len < actual_len { continue; // Got enough space, all set. } // Have to add `difference` characters altogether. let difference = required_len - actual_len; // here, 4 // Increment over everything that is not part of this nonterminal. // In the example above, that is E5 and F6. shift(&mut positions[end..], difference); if num_syms > 1 { // If there is just one symbol being reduced here, // then we have shifted over the things that follow // it, and we are done. This would be a case like: // // X Y Z // | | // +-Label-+ // // (which maybe ought to be rendered slightly // differently). // // But if there are multiple symbols, we're not quite // done, because there would be an unsightly gap: // // (gaps) // | | | // v v v // A1 B2 C3 D4 E5 F6 // | | // +-LongLabel22-+ // // we'd like to make things line up, so we have to // distribute that extra space internally by // increasing the "gaps" (marked above) as evenly as // possible (basically, full justification). // // We do this by dividing up the spaces evenly and // then taking the remainder `N` and distributing 1 // extra to the first N. let num_gaps = num_syms - 1; // number of gaps we can adjust. Here, 3. let amount = difference / num_gaps; // what to add to each gap. Here, 1. let extra = difference % num_gaps; // the remainder. Here, 1. // For the first `extra` symbols, give them amount + 1 // extra space. After that, just amount. (O(n^2). Sue me.) for i in 0..extra { shift(&mut positions[start + 1 + i..end], amount + 1); } for i in extra..num_gaps { shift(&mut positions[start + 1 + i..end], amount); } } } positions } #[cfg(test)] pub fn
(&self) -> Vec<::ascii_canvas::Row> { let this = self.clone(); let content = this.into_picture(ExampleStyles::default()); let min_width = content.min_width(); let canvas = content.emit_to_canvas(min_width); canvas.to_strings() } fn paint_on(&self, styles: &ExampleStyles, positions: &[usize], view: &mut dyn AsciiView) { // Draw the brackets for each reduction: for (index, reduction) in self.reductions.iter().enumerate() { let start_column = positions[reduction.start]; let end_column = positions[reduction.end] - 1; let row = 1 + index; view.draw_vertical_line(0..row + 1, start_column); view.draw_vertical_line(0..row + 1, end_column - 1); view.draw_horizontal_line(row, start_column..end_column); } // Write the labels for each reduction. Do this after the // brackets so that ascii canvas can convert `|` to `+` // without interfering with the text (in case of weird overlap). let session = Tls::session(); for (index, reduction) in self.reductions.iter().enumerate() { let column = positions[reduction.start] + 2; let row = 1 + index; view.write_chars( row, column, reduction.nonterminal.to_string().chars(), session.nonterminal_symbol, ); } // Write the labels on top: // A1 B2 C3 D4 E5 F6 self.paint_symbols_on(&self.symbols, positions, styles, view); } fn paint_symbols_on( &self, symbols: &[ExampleSymbol], positions: &[usize], styles: &ExampleStyles, view: &mut dyn AsciiView, ) { let session = Tls::session(); for (index, ex_symbol) in symbols.iter().enumerate() { let style = match index.cmp(&self.cursor) { Ordering::Less => styles.before_cursor, Ordering::Equal => { // Only display actual terminals in the "on-cursor" // font, because it might be misleading to show a // nonterminal that way. Really it'd be nice to expand // so that the cursor is always a terminal. match *ex_symbol { ExampleSymbol::Symbol(Symbol::Terminal(_)) => styles.on_cursor, _ => styles.after_cursor, } } Ordering::Greater => styles.after_cursor, }; let column = positions[index]; match *ex_symbol { ExampleSymbol::Symbol(Symbol::Terminal(ref term)) => { view.write_chars( 0, column, term.to_string().chars(), style.with(session.terminal_symbol), ); } ExampleSymbol::Symbol(Symbol::Nonterminal(ref nt)) => { view.write_chars( 0, column, nt.to_string().chars(), style.with(session.nonterminal_symbol), ); } ExampleSymbol::Epsilon => {} } } } } struct ExamplePicture { example: Example, positions: Vec<usize>, styles: ExampleStyles, } impl Content for ExamplePicture { fn min_width(&self) -> usize { *self.positions.last().unwrap() } fn emit(&self, view: &mut dyn AsciiView) { self.example.paint_on(&self.styles, &self.positions, view); } fn into_wrap_items(self: Box<Self>, wrap_items: &mut Vec<Box<dyn Content>>) { wrap_items.push(self); } } impl Debug for ExamplePicture { fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { Debug::fmt(&self.example, fmt) } } fn shift(positions: &mut [usize], amount: usize) { for position in positions { *position += amount; } } impl ExampleStyles { pub fn ambig() -> Self { let session = Tls::session(); ExampleStyles { before_cursor: session.ambig_symbols, on_cursor: session.ambig_symbols, after_cursor: session.ambig_symbols, } } pub fn new() -> Self { let session = Tls::session(); ExampleStyles { before_cursor: session.observed_symbols, on_cursor: session.cursor_symbol, after_cursor: session.unobserved_symbols, } } }
paint_unstyled
identifier_name
mod.rs
//! Code to compute example inputs given a backtrace. use crate::grammar::repr::*; use crate::message::builder::InlineBuilder; use crate::message::Content; use crate::style::Style; use crate::tls::Tls; use ascii_canvas::AsciiView; use std::{ cmp::Ordering, fmt::{Debug, Error, Formatter}, }; #[cfg(test)] mod test; /// An "example" input and the way it was derived. This can be /// serialized into useful text. For example, it might represent /// something like this: /// /// ``` /// Looking at /// | /// v /// Ty "->" Ty "->" Ty /// | | | /// +-Ty-----+ | /// | | /// +-Ty-------------+
/// /// The top-line is the `symbols` vector. The groupings below are /// stored in the `reductions` vector, in order from smallest to /// largest (they are always properly nested). The `cursor` field /// indicates the current lookahead token. /// /// The `symbols` vector is actually `Option<Symbol>` to account /// for empty reductions: /// /// ``` /// A B /// | | | | /// | +-Y-+ | /// +-Z-----+ /// ``` /// /// The "empty space" between A and B would be represented as `None`. #[derive(Clone, Debug)] pub struct Example { pub symbols: Vec<ExampleSymbol>, pub cursor: usize, pub reductions: Vec<Reduction>, } #[derive(Clone, Debug, PartialEq, Eq)] pub enum ExampleSymbol { Symbol(Symbol), Epsilon, } #[derive(Copy, Clone, Default)] pub struct ExampleStyles { pub before_cursor: Style, pub on_cursor: Style, pub after_cursor: Style, } #[derive(Clone, Debug)] pub struct Reduction { pub start: usize, pub end: usize, pub nonterminal: NonterminalString, } impl Example { /// Length of each symbol. Each will need *at least* that amount /// of space. :) Measure in characters, under the assumption of a /// mono-spaced font. Also add a final `0` marker which will serve /// as the end position. fn lengths(&self) -> Vec<usize> { self.symbols .iter() .map(|s| match *s { ExampleSymbol::Symbol(ref s) => format!("{}", s).chars().count(), ExampleSymbol::Epsilon => 1, // display as " " }) .chain(Some(0)) .collect() } /// Extract a prefix of the list of symbols from this `Example` /// and make a styled list of them, like: /// /// Ty "->" Ty -> "Ty" pub fn to_symbol_list(&self, length: usize, styles: ExampleStyles) -> Box<dyn Content> { let mut builder = InlineBuilder::new().begin_spaced(); for (index, symbol) in self.symbols[..length].iter().enumerate() { let style = match index.cmp(&self.cursor) { Ordering::Less => styles.before_cursor, Ordering::Equal => match *symbol { ExampleSymbol::Symbol(Symbol::Terminal(_)) => styles.on_cursor, ExampleSymbol::Symbol(Symbol::Nonterminal(_)) => styles.after_cursor, ExampleSymbol::Epsilon => styles.after_cursor, }, Ordering::Greater => styles.after_cursor, }; if let ExampleSymbol::Symbol(ref s) = symbol { builder = builder.push(s.clone()).styled(style); } } builder.end().indented().end() } /// Render the example into a styled diagram suitable for /// embedding in an error message. pub fn into_picture(self, styles: ExampleStyles) -> Box<dyn Content> { let lengths = self.lengths(); let positions = self.positions(&lengths); InlineBuilder::new() .push(Box::new(ExamplePicture { example: self, positions, styles, })) .indented() .end() } fn starting_positions(&self, lengths: &[usize]) -> Vec<usize> { lengths .iter() .scan(0, |counter, &len| { let start = *counter; // Leave space for "NT " (if "NT" is the name // of the nonterminal). *counter = start + len + 1; Some(start) }) .collect() } /// Start index where each symbol in the example should appear, /// measured in characters. These are spaced to leave enough room /// for the reductions below. fn positions(&self, lengths: &[usize]) -> Vec<usize> { // Initially, position each symbol with one space in between, // like: // // X Y Z let mut positions = self.starting_positions(lengths); // Adjust spacing to account for the nonterminal labels // we will have to add. It will display // like this: // // A1 B2 C3 D4 E5 F6 // | | // +-Label---+ // // But if the label is long we may have to adjust the spacing // of the covered items (here, we changed them to two spaces, // except the first gap, which got 3 spaces): // // A1 B2 C3 D4 E5 F6 // | | // +-LongLabel22-+ for &Reduction { start, end, ref nonterminal, } in &self.reductions { let nt_len = format!("{}", nonterminal).chars().count(); // Number of symbols we are reducing. This should always // be non-zero because even in the case of a \epsilon // rule, we ought to be have a `None` entry in the symbol array. let num_syms = end - start; assert!(num_syms > 0); // Let's use the expansion from above as our running example. // We start out with positions like this: // // A1 B2 C3 D4 E5 F6 // | | // +-LongLabel22-+ // // But we want LongLabel to end at D4. No good. // Start of first symbol to be reduced. Here, 0. // // A1 B2 C3 D4 // ^ here let start_position = positions[start]; // End of last symbol to be reduced. Here, 11. // // A1 B2 C3 D4 E5 // ^ positions[end] // ^ here -- positions[end] - 1 let end_position = positions[end] - 1; // We need space to draw `+-Label-+` between // start_position and end_position. let required_len = nt_len + 4; // here, 15 let actual_len = end_position - start_position; // here, 10 if required_len < actual_len { continue; // Got enough space, all set. } // Have to add `difference` characters altogether. let difference = required_len - actual_len; // here, 4 // Increment over everything that is not part of this nonterminal. // In the example above, that is E5 and F6. shift(&mut positions[end..], difference); if num_syms > 1 { // If there is just one symbol being reduced here, // then we have shifted over the things that follow // it, and we are done. This would be a case like: // // X Y Z // | | // +-Label-+ // // (which maybe ought to be rendered slightly // differently). // // But if there are multiple symbols, we're not quite // done, because there would be an unsightly gap: // // (gaps) // | | | // v v v // A1 B2 C3 D4 E5 F6 // | | // +-LongLabel22-+ // // we'd like to make things line up, so we have to // distribute that extra space internally by // increasing the "gaps" (marked above) as evenly as // possible (basically, full justification). // // We do this by dividing up the spaces evenly and // then taking the remainder `N` and distributing 1 // extra to the first N. let num_gaps = num_syms - 1; // number of gaps we can adjust. Here, 3. let amount = difference / num_gaps; // what to add to each gap. Here, 1. let extra = difference % num_gaps; // the remainder. Here, 1. // For the first `extra` symbols, give them amount + 1 // extra space. After that, just amount. (O(n^2). Sue me.) for i in 0..extra { shift(&mut positions[start + 1 + i..end], amount + 1); } for i in extra..num_gaps { shift(&mut positions[start + 1 + i..end], amount); } } } positions } #[cfg(test)] pub fn paint_unstyled(&self) -> Vec<::ascii_canvas::Row> { let this = self.clone(); let content = this.into_picture(ExampleStyles::default()); let min_width = content.min_width(); let canvas = content.emit_to_canvas(min_width); canvas.to_strings() } fn paint_on(&self, styles: &ExampleStyles, positions: &[usize], view: &mut dyn AsciiView) { // Draw the brackets for each reduction: for (index, reduction) in self.reductions.iter().enumerate() { let start_column = positions[reduction.start]; let end_column = positions[reduction.end] - 1; let row = 1 + index; view.draw_vertical_line(0..row + 1, start_column); view.draw_vertical_line(0..row + 1, end_column - 1); view.draw_horizontal_line(row, start_column..end_column); } // Write the labels for each reduction. Do this after the // brackets so that ascii canvas can convert `|` to `+` // without interfering with the text (in case of weird overlap). let session = Tls::session(); for (index, reduction) in self.reductions.iter().enumerate() { let column = positions[reduction.start] + 2; let row = 1 + index; view.write_chars( row, column, reduction.nonterminal.to_string().chars(), session.nonterminal_symbol, ); } // Write the labels on top: // A1 B2 C3 D4 E5 F6 self.paint_symbols_on(&self.symbols, positions, styles, view); } fn paint_symbols_on( &self, symbols: &[ExampleSymbol], positions: &[usize], styles: &ExampleStyles, view: &mut dyn AsciiView, ) { let session = Tls::session(); for (index, ex_symbol) in symbols.iter().enumerate() { let style = match index.cmp(&self.cursor) { Ordering::Less => styles.before_cursor, Ordering::Equal => { // Only display actual terminals in the "on-cursor" // font, because it might be misleading to show a // nonterminal that way. Really it'd be nice to expand // so that the cursor is always a terminal. match *ex_symbol { ExampleSymbol::Symbol(Symbol::Terminal(_)) => styles.on_cursor, _ => styles.after_cursor, } } Ordering::Greater => styles.after_cursor, }; let column = positions[index]; match *ex_symbol { ExampleSymbol::Symbol(Symbol::Terminal(ref term)) => { view.write_chars( 0, column, term.to_string().chars(), style.with(session.terminal_symbol), ); } ExampleSymbol::Symbol(Symbol::Nonterminal(ref nt)) => { view.write_chars( 0, column, nt.to_string().chars(), style.with(session.nonterminal_symbol), ); } ExampleSymbol::Epsilon => {} } } } } struct ExamplePicture { example: Example, positions: Vec<usize>, styles: ExampleStyles, } impl Content for ExamplePicture { fn min_width(&self) -> usize { *self.positions.last().unwrap() } fn emit(&self, view: &mut dyn AsciiView) { self.example.paint_on(&self.styles, &self.positions, view); } fn into_wrap_items(self: Box<Self>, wrap_items: &mut Vec<Box<dyn Content>>) { wrap_items.push(self); } } impl Debug for ExamplePicture { fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> { Debug::fmt(&self.example, fmt) } } fn shift(positions: &mut [usize], amount: usize) { for position in positions { *position += amount; } } impl ExampleStyles { pub fn ambig() -> Self { let session = Tls::session(); ExampleStyles { before_cursor: session.ambig_symbols, on_cursor: session.ambig_symbols, after_cursor: session.ambig_symbols, } } pub fn new() -> Self { let session = Tls::session(); ExampleStyles { before_cursor: session.observed_symbols, on_cursor: session.cursor_symbol, after_cursor: session.unobserved_symbols, } } }
/// ```
random_line_split
mod.rs
/* File_config_functionalities_pmz */ // conditions: /* exec trait: [alias:(file/any)] [operations] [path/name] [parameters/--options] Read:Content _ none Write:Content _ String : if use multi line content -> check len()::Enum -> i32|&str Update:Content _ String Delete:Content _ none Some file operations need parameters and some don't; */ mod parameters; pub use super::interface::{self, components, printer, template_engine, text_processing}; pub use super::utility::{self, ErrorHandler::FileError}; use parameters::filter_param; use printer::TermCfg; use std::{ fs::File, io::prelude::*, io::{self}, time::Duration, }; use template_engine::TemplateBuilder; use template_engine::TemplateEngine; type Params = [Vec<String>; 2]; use std::collections::hash_map::HashMap; #[derive(Debug, PartialOrd, PartialEq)] pub struct Fileconfig { name: String, access_at: Duration, query: String, parameters: Params, // content:Option<String>, content: String, path: String, } impl Fileconfig { pub fn new(param: &str, timestamp: fn() -> Duration) -> Result<Fileconfig, &'static str> { let mut command_chunk = Vec::new(); for res in param.trim().split_whitespace() { command_chunk.push(res.to_owned()); } if command_chunk.len() < 3 { return Err("Insufficient parameters to run file operations!"); } let capture = |index: usize| command_chunk.get(index).unwrap().to_owned(); let mut vc: [Vec<String>; 2] = [Vec::new(), Vec::new()]; if command_chunk.len() > 3 { let v_param = command_chunk[3..command_chunk.len()].to_owned(); let p_vec = v_param.into_iter().map(|p_str| String::from(p_str)); // let tup = (p_reg,quote_word); //^"[a-zA-Z-\s]+" let throw_reg_panic = |regex_err: regex::Error| panic!("Verification Errors! : {}", regex_err); //^<\w++>$ let p_reg = regex::Regex::new(r"^\--+[a-zA-Z]+").unwrap_or_else(|x| throw_reg_panic(x)); let quote_word = regex::Regex::new(r#"(["'])((\\{2})*|(.*?[^\\](\\{2})*))"#) .unwrap_or_else(|x| throw_reg_panic(x)); let match_inside_brac = regex::Regex::new(r"^\[(.*)\]$").unwrap(); p_vec.for_each(|x| { if match_inside_brac.is_match(&x) || quote_word.is_match(&x) { vc[0].push(x); } else if p_reg.is_match(&x) { vc[1].push(x); } }) } let result = Fileconfig { name: capture(2), query: capture(1), path: capture(2), access_at: timestamp(), parameters: vc, content: String::from("None"), }; Ok(result) } fn parse_quotation(&self, param: &Vec<String>) -> Vec<String> { let quoted = |st: &str| st.starts_with("\"") && st.ends_with("\""); param .into_iter() .filter(|st| quoted(st)) .map(|quote_par| { text_processing::CrumpCluster::break_chunk(&quote_par) .delete(0, Some(1)) .delete(quote_par.len() - 1, Some(quote_par.len())) .merge_crump() }) .collect::<Vec<String>>() } fn parse_bracket(&self, param: &Vec<String>) -> Vec<String> { let match_brack: &[_] = &['[', ']', '\"']; param .iter() //.filter(|general_param| match_inside_brac.is_match(general_param)) .flat_map(|bk_par| { let split_brack = bk_par .trim_matches(match_brack) .split_whitespace() .map(|f| f.to_string()) .collect::<Vec<String>>(); return split_brack; }) .collect::<Vec<String>>() // .filter(|bracketed|); } pub fn run(&self) -> Result<(), FileError> { let init_ptr = TermCfg::new() .set_attr(console::Attribute::Bold) .set_attr(console::Attribute::Italic); let print = init_ptr.gen_print(Some(console::Color::Blue)); let mut print_ln = init_ptr.gen_println(Some(console::Color::Blue)); let mut err_collector: Vec<FileError> = Vec::new(); let display_txt = |txt: &str| -> template_engine::Template { let mut tmp_engine = template_engine::TemplateFactory::init() .parse_in_template(txt) .create_movable() .collect(); let template = tmp_engine.padding(vec![1, 6, 6, 3]); template.to_owned() }; match self.query.as_str() { "update" => { // self.write(params[0], params[1].parse::<i32>().unwrap()); println!("what is your ct?"); let elim_quote = self.parse_bracket(&self.parameters[0]); self.update(&elim_quote[1], elim_quote[0].clone().as_str()); } "search" => { let unquote = self.parse_bracket(&self.parameters[0]); print_ln(&format!("<->statistics of word {:?}<->", unquote))?; let mut p = init_ptr.gen_println(Some(console::Color::Blue)); for quoted in unquote { let quoted = filter_param(&self.parameters[1], &quoted); let filtered = filter_param(&self.parameters[1], &quoted); match self.search(&filtered) { Ok(found_map) => { print!("Highligted-Text: \n"); let full_content = self.read().unwrap(); let total_line = found_map.len(); let mut key_holder = Vec::new(); found_map.iter().for_each(|(key, _)| key_holder.push(key)); let mut count = 0; let mut crumps = full_content .lines() .into_iter() .enumerate() .map(|(idx, x)| { (idx as i64, text_processing::CrumpCluster::break_chunk(x)) }) .collect::<Vec<(i64, text_processing::CrumpCluster)>>(); while count < found_map.len() { // each_indx.iter().for_each(|x|) crumps.iter_mut().for_each(|(loc, crump)| { if loc == key_holder[count] { let locations = found_map.get(loc).unwrap(); locations.into_iter().for_each(|(start, end)| { crump.delete(*start, Some(*end)); crump.insert( *start, &format!("--->\"{}\"<---", quoted.clone().trim(),) .trim(), ); }); } }); count += 1; } let fully_merged = crumps .iter() .map(|(_, crump)| { let merged = crump.merge_crump(); return merged; }) .collect::<String>(); // display_txt(&fully_merged, "+/"); if total_line <= 1 { p(&"No word found in the text!")?; } else { display_txt(&fully_merged) .border("+", components::BorderWeight::Bold) .center_box() .display(); p(&format!( "->Number of line that contain word /{}/: {}", quoted, total_line ))?; p(&format!( "Total number of words /{}/ {}", quoted, count_found_map(found_map) ))?; } } Err(file_err) => err_collector.push(file_err), } } } "read" => { let result = self.read(); print_ln("Reading contains : ")?; match result { Ok(txt) => { display_txt(&filter_param(&self.parameters[1], &txt)) .border("+", components::BorderWeight::Bold) .center_box() .display(); } Err(file_err) => { err_collector.push(file_err); } } } _ => err_collector.push(FileError::new().set_message("Invalid operation!")), } if err_collector.len() > 0 { Err(err_collector.into_iter().next().unwrap()) } else { Ok(()) } } } type OriResult<T> = Result<T, FileError>; /*positions : [{ Number of line to modify / word to replace / newdoc }]*/ pub trait TextPos { fn modify(&self, content: String, new_str: &str) -> Vec<String>; } // [x1,x2,"string"] // replace all word within that target across all content impl TextPos for &str { fn modify(&self, content: String, new_str: &str) -> Vec<String> { if self.contains(" ") { let multi_tar = self.split_whitespace().collect::<Vec<&str>>(); let emp = multi_tar .iter() .map(|x| { let xt = content.replace(*x, new_str); if xt!= content { return xt; } else { "None".to_string() } }) .filter(|x| *x!= "None".to_string()) .collect::<Vec<String>>(); // println!("special emp {:#?}",emp); return emp; } else { let mut result: Vec<String> = Vec::new(); result.push(content.replace(self, new_str)); return result; } } } pub trait Operation { fn read(&self) -> OriResult<String>; fn update<T>(&self, new_content: &str, target: T) where T: TextPos; fn search(&self, target: &str) -> Result<HashMap<i64, Vec<(usize, usize)>>, FileError>; } fn checkempty(result: &str) -> OriResult<String> { if result.is_empty() { let empty_err = FileError::new().set_message("The Folder is Empty inside"); Err(empty_err) } else { Ok(result.trim().to_string()) } } impl Operation for Fileconfig { fn read(&self) -> OriResult<String> { let file = File::open(&self.path)?; let mut buffer = io::BufReader::new(file); let mut result = String::new(); buffer.read_to_string(&mut result)?; checkempty(&result) } // use for string only fn update<T: TextPos>(&self, new_content: &str, target: T) { /* if target is multiple start spit out different result to different file! */ let existed_content = self.read().expect("Cannot open that file"); let mutation = target.modify(existed_content.to_string(), new_content); println!("muttip {:?}", mutation); let mut count = 0; for n in mutation { let new_path = format!("output -- {} [{}]", self.path, count); let mut newfile = File::create(new_path).unwrap(); newfile.write_all(n.as_bytes()).unwrap(); count += 1; } } // regex for search: ^"[a-zA-Z-\s]+" fn search(&self, target: &str) -> Result<HashMap<i64, Vec<(usize, usize)>>, FileError> { let mut err_clt = String::new(); // let found_map = Vec::new(); let mut found_map: HashMap<i64, Vec<(usize, usize)>> = HashMap::new(); if self.parameters.is_empty() { err_clt.push_str("No params!") } let mut content = String::new(); match self.read() { Ok(ct) => content.push_str(&ct), Err(read_error) => err_clt.push_str(&read_error.message), } let mut count: i64 = 0; let mut line_found = Vec::new(); for (line_num, line) in content.lines().enumerate() { let each_line = line.trim(); let word_group = each_line.split_whitespace().collect::<Vec<&str>>(); let reg = regex::Regex::new(&format!(r"{}", target)).unwrap(); let mut indx_vec = Vec::new(); for found in reg.find_iter(line) { let key_indx = (found.start(), found.end()); indx_vec.push(key_indx); } if word_group.len() >= 1 && word_group.into_iter().any(|word| word.contains(target)) { line_found.push(line_num); found_map.insert(line_num as i64, indx_vec); count += 1; } } if err_clt.len() > 0 { let bruh = FileError::new().set_message(&err_clt.clone()); return Err(bruh); } else { return Ok(found_map); } /**/ } } impl Clone for Fileconfig { fn clone(&self) -> Self { return Fileconfig { name: self.name.clone(), access_at: self.access_at, query: self.query.clone(), parameters: self.parameters.clone(), // content:Option<String>, content: self.content.clone(), path: self.path.clone(), }; } } fn count_found_map(hsm: HashMap<i64, Vec<(usize, usize)>>) -> usize { let mut count: usize = 0; for (_, hs) in hsm { hs.iter().for_each(|_| count += 1); } return count; } #[test] fn test() { let match_inside_brac = regex::Regex::new(r"^\[(.*)\]$").unwrap(); let test = "[Apple sauce bananan ba;;;a]";
println!( "test {:?} ", (match_inside_brac.is_match(test), test.trim_matches(x)) ); }
println!("t {}", test); let x: &[_] = &['[', ']'];
random_line_split
mod.rs
/* File_config_functionalities_pmz */ // conditions: /* exec trait: [alias:(file/any)] [operations] [path/name] [parameters/--options] Read:Content _ none Write:Content _ String : if use multi line content -> check len()::Enum -> i32|&str Update:Content _ String Delete:Content _ none Some file operations need parameters and some don't; */ mod parameters; pub use super::interface::{self, components, printer, template_engine, text_processing}; pub use super::utility::{self, ErrorHandler::FileError}; use parameters::filter_param; use printer::TermCfg; use std::{ fs::File, io::prelude::*, io::{self}, time::Duration, }; use template_engine::TemplateBuilder; use template_engine::TemplateEngine; type Params = [Vec<String>; 2]; use std::collections::hash_map::HashMap; #[derive(Debug, PartialOrd, PartialEq)] pub struct Fileconfig { name: String, access_at: Duration, query: String, parameters: Params, // content:Option<String>, content: String, path: String, } impl Fileconfig { pub fn new(param: &str, timestamp: fn() -> Duration) -> Result<Fileconfig, &'static str> { let mut command_chunk = Vec::new(); for res in param.trim().split_whitespace() { command_chunk.push(res.to_owned()); } if command_chunk.len() < 3 { return Err("Insufficient parameters to run file operations!"); } let capture = |index: usize| command_chunk.get(index).unwrap().to_owned(); let mut vc: [Vec<String>; 2] = [Vec::new(), Vec::new()]; if command_chunk.len() > 3 { let v_param = command_chunk[3..command_chunk.len()].to_owned(); let p_vec = v_param.into_iter().map(|p_str| String::from(p_str)); // let tup = (p_reg,quote_word); //^"[a-zA-Z-\s]+" let throw_reg_panic = |regex_err: regex::Error| panic!("Verification Errors! : {}", regex_err); //^<\w++>$ let p_reg = regex::Regex::new(r"^\--+[a-zA-Z]+").unwrap_or_else(|x| throw_reg_panic(x)); let quote_word = regex::Regex::new(r#"(["'])((\\{2})*|(.*?[^\\](\\{2})*))"#) .unwrap_or_else(|x| throw_reg_panic(x)); let match_inside_brac = regex::Regex::new(r"^\[(.*)\]$").unwrap(); p_vec.for_each(|x| { if match_inside_brac.is_match(&x) || quote_word.is_match(&x) { vc[0].push(x); } else if p_reg.is_match(&x) { vc[1].push(x); } }) } let result = Fileconfig { name: capture(2), query: capture(1), path: capture(2), access_at: timestamp(), parameters: vc, content: String::from("None"), }; Ok(result) } fn parse_quotation(&self, param: &Vec<String>) -> Vec<String> { let quoted = |st: &str| st.starts_with("\"") && st.ends_with("\""); param .into_iter() .filter(|st| quoted(st)) .map(|quote_par| { text_processing::CrumpCluster::break_chunk(&quote_par) .delete(0, Some(1)) .delete(quote_par.len() - 1, Some(quote_par.len())) .merge_crump() }) .collect::<Vec<String>>() } fn parse_bracket(&self, param: &Vec<String>) -> Vec<String> { let match_brack: &[_] = &['[', ']', '\"']; param .iter() //.filter(|general_param| match_inside_brac.is_match(general_param)) .flat_map(|bk_par| { let split_brack = bk_par .trim_matches(match_brack) .split_whitespace() .map(|f| f.to_string()) .collect::<Vec<String>>(); return split_brack; }) .collect::<Vec<String>>() // .filter(|bracketed|); } pub fn run(&self) -> Result<(), FileError> { let init_ptr = TermCfg::new() .set_attr(console::Attribute::Bold) .set_attr(console::Attribute::Italic); let print = init_ptr.gen_print(Some(console::Color::Blue)); let mut print_ln = init_ptr.gen_println(Some(console::Color::Blue)); let mut err_collector: Vec<FileError> = Vec::new(); let display_txt = |txt: &str| -> template_engine::Template { let mut tmp_engine = template_engine::TemplateFactory::init() .parse_in_template(txt) .create_movable() .collect(); let template = tmp_engine.padding(vec![1, 6, 6, 3]); template.to_owned() }; match self.query.as_str() { "update" => { // self.write(params[0], params[1].parse::<i32>().unwrap()); println!("what is your ct?"); let elim_quote = self.parse_bracket(&self.parameters[0]); self.update(&elim_quote[1], elim_quote[0].clone().as_str()); } "search" => { let unquote = self.parse_bracket(&self.parameters[0]); print_ln(&format!("<->statistics of word {:?}<->", unquote))?; let mut p = init_ptr.gen_println(Some(console::Color::Blue)); for quoted in unquote { let quoted = filter_param(&self.parameters[1], &quoted); let filtered = filter_param(&self.parameters[1], &quoted); match self.search(&filtered) { Ok(found_map) => { print!("Highligted-Text: \n"); let full_content = self.read().unwrap(); let total_line = found_map.len(); let mut key_holder = Vec::new(); found_map.iter().for_each(|(key, _)| key_holder.push(key)); let mut count = 0; let mut crumps = full_content .lines() .into_iter() .enumerate() .map(|(idx, x)| { (idx as i64, text_processing::CrumpCluster::break_chunk(x)) }) .collect::<Vec<(i64, text_processing::CrumpCluster)>>(); while count < found_map.len() { // each_indx.iter().for_each(|x|) crumps.iter_mut().for_each(|(loc, crump)| { if loc == key_holder[count] { let locations = found_map.get(loc).unwrap(); locations.into_iter().for_each(|(start, end)| { crump.delete(*start, Some(*end)); crump.insert( *start, &format!("--->\"{}\"<---", quoted.clone().trim(),) .trim(), ); }); } }); count += 1; } let fully_merged = crumps .iter() .map(|(_, crump)| { let merged = crump.merge_crump(); return merged; }) .collect::<String>(); // display_txt(&fully_merged, "+/"); if total_line <= 1 { p(&"No word found in the text!")?; } else { display_txt(&fully_merged) .border("+", components::BorderWeight::Bold) .center_box() .display(); p(&format!( "->Number of line that contain word /{}/: {}", quoted, total_line ))?; p(&format!( "Total number of words /{}/ {}", quoted, count_found_map(found_map) ))?; } } Err(file_err) => err_collector.push(file_err), } } } "read" => { let result = self.read(); print_ln("Reading contains : ")?; match result { Ok(txt) => { display_txt(&filter_param(&self.parameters[1], &txt)) .border("+", components::BorderWeight::Bold) .center_box() .display(); } Err(file_err) => { err_collector.push(file_err); } } } _ => err_collector.push(FileError::new().set_message("Invalid operation!")), } if err_collector.len() > 0 { Err(err_collector.into_iter().next().unwrap()) } else { Ok(()) } } } type OriResult<T> = Result<T, FileError>; /*positions : [{ Number of line to modify / word to replace / newdoc }]*/ pub trait TextPos { fn modify(&self, content: String, new_str: &str) -> Vec<String>; } // [x1,x2,"string"] // replace all word within that target across all content impl TextPos for &str { fn modify(&self, content: String, new_str: &str) -> Vec<String> { if self.contains(" ") { let multi_tar = self.split_whitespace().collect::<Vec<&str>>(); let emp = multi_tar .iter() .map(|x| { let xt = content.replace(*x, new_str); if xt!= content { return xt; } else { "None".to_string() } }) .filter(|x| *x!= "None".to_string()) .collect::<Vec<String>>(); // println!("special emp {:#?}",emp); return emp; } else { let mut result: Vec<String> = Vec::new(); result.push(content.replace(self, new_str)); return result; } } } pub trait Operation { fn read(&self) -> OriResult<String>; fn update<T>(&self, new_content: &str, target: T) where T: TextPos; fn search(&self, target: &str) -> Result<HashMap<i64, Vec<(usize, usize)>>, FileError>; } fn checkempty(result: &str) -> OriResult<String> { if result.is_empty() { let empty_err = FileError::new().set_message("The Folder is Empty inside"); Err(empty_err) } else { Ok(result.trim().to_string()) } } impl Operation for Fileconfig { fn read(&self) -> OriResult<String> { let file = File::open(&self.path)?; let mut buffer = io::BufReader::new(file); let mut result = String::new(); buffer.read_to_string(&mut result)?; checkempty(&result) } // use for string only fn update<T: TextPos>(&self, new_content: &str, target: T) { /* if target is multiple start spit out different result to different file! */ let existed_content = self.read().expect("Cannot open that file"); let mutation = target.modify(existed_content.to_string(), new_content); println!("muttip {:?}", mutation); let mut count = 0; for n in mutation { let new_path = format!("output -- {} [{}]", self.path, count); let mut newfile = File::create(new_path).unwrap(); newfile.write_all(n.as_bytes()).unwrap(); count += 1; } } // regex for search: ^"[a-zA-Z-\s]+" fn search(&self, target: &str) -> Result<HashMap<i64, Vec<(usize, usize)>>, FileError> { let mut err_clt = String::new(); // let found_map = Vec::new(); let mut found_map: HashMap<i64, Vec<(usize, usize)>> = HashMap::new(); if self.parameters.is_empty() { err_clt.push_str("No params!") } let mut content = String::new(); match self.read() { Ok(ct) => content.push_str(&ct), Err(read_error) => err_clt.push_str(&read_error.message), } let mut count: i64 = 0; let mut line_found = Vec::new(); for (line_num, line) in content.lines().enumerate() { let each_line = line.trim(); let word_group = each_line.split_whitespace().collect::<Vec<&str>>(); let reg = regex::Regex::new(&format!(r"{}", target)).unwrap(); let mut indx_vec = Vec::new(); for found in reg.find_iter(line) { let key_indx = (found.start(), found.end()); indx_vec.push(key_indx); } if word_group.len() >= 1 && word_group.into_iter().any(|word| word.contains(target)) { line_found.push(line_num); found_map.insert(line_num as i64, indx_vec); count += 1; } } if err_clt.len() > 0 { let bruh = FileError::new().set_message(&err_clt.clone()); return Err(bruh); } else { return Ok(found_map); } /**/ } } impl Clone for Fileconfig { fn clone(&self) -> Self { return Fileconfig { name: self.name.clone(), access_at: self.access_at, query: self.query.clone(), parameters: self.parameters.clone(), // content:Option<String>, content: self.content.clone(), path: self.path.clone(), }; } } fn count_found_map(hsm: HashMap<i64, Vec<(usize, usize)>>) -> usize { let mut count: usize = 0; for (_, hs) in hsm { hs.iter().for_each(|_| count += 1); } return count; } #[test] fn test()
{ let match_inside_brac = regex::Regex::new(r"^\[(.*)\]$").unwrap(); let test = "[Apple sauce bananan ba;;;a]"; println!("t {}", test); let x: &[_] = &['[', ']']; println!( "test {:?} ", (match_inside_brac.is_match(test), test.trim_matches(x)) ); }
identifier_body
mod.rs
/* File_config_functionalities_pmz */ // conditions: /* exec trait: [alias:(file/any)] [operations] [path/name] [parameters/--options] Read:Content _ none Write:Content _ String : if use multi line content -> check len()::Enum -> i32|&str Update:Content _ String Delete:Content _ none Some file operations need parameters and some don't; */ mod parameters; pub use super::interface::{self, components, printer, template_engine, text_processing}; pub use super::utility::{self, ErrorHandler::FileError}; use parameters::filter_param; use printer::TermCfg; use std::{ fs::File, io::prelude::*, io::{self}, time::Duration, }; use template_engine::TemplateBuilder; use template_engine::TemplateEngine; type Params = [Vec<String>; 2]; use std::collections::hash_map::HashMap; #[derive(Debug, PartialOrd, PartialEq)] pub struct Fileconfig { name: String, access_at: Duration, query: String, parameters: Params, // content:Option<String>, content: String, path: String, } impl Fileconfig { pub fn new(param: &str, timestamp: fn() -> Duration) -> Result<Fileconfig, &'static str> { let mut command_chunk = Vec::new(); for res in param.trim().split_whitespace() { command_chunk.push(res.to_owned()); } if command_chunk.len() < 3 { return Err("Insufficient parameters to run file operations!"); } let capture = |index: usize| command_chunk.get(index).unwrap().to_owned(); let mut vc: [Vec<String>; 2] = [Vec::new(), Vec::new()]; if command_chunk.len() > 3 { let v_param = command_chunk[3..command_chunk.len()].to_owned(); let p_vec = v_param.into_iter().map(|p_str| String::from(p_str)); // let tup = (p_reg,quote_word); //^"[a-zA-Z-\s]+" let throw_reg_panic = |regex_err: regex::Error| panic!("Verification Errors! : {}", regex_err); //^<\w++>$ let p_reg = regex::Regex::new(r"^\--+[a-zA-Z]+").unwrap_or_else(|x| throw_reg_panic(x)); let quote_word = regex::Regex::new(r#"(["'])((\\{2})*|(.*?[^\\](\\{2})*))"#) .unwrap_or_else(|x| throw_reg_panic(x)); let match_inside_brac = regex::Regex::new(r"^\[(.*)\]$").unwrap(); p_vec.for_each(|x| { if match_inside_brac.is_match(&x) || quote_word.is_match(&x) { vc[0].push(x); } else if p_reg.is_match(&x) { vc[1].push(x); } }) } let result = Fileconfig { name: capture(2), query: capture(1), path: capture(2), access_at: timestamp(), parameters: vc, content: String::from("None"), }; Ok(result) } fn parse_quotation(&self, param: &Vec<String>) -> Vec<String> { let quoted = |st: &str| st.starts_with("\"") && st.ends_with("\""); param .into_iter() .filter(|st| quoted(st)) .map(|quote_par| { text_processing::CrumpCluster::break_chunk(&quote_par) .delete(0, Some(1)) .delete(quote_par.len() - 1, Some(quote_par.len())) .merge_crump() }) .collect::<Vec<String>>() } fn parse_bracket(&self, param: &Vec<String>) -> Vec<String> { let match_brack: &[_] = &['[', ']', '\"']; param .iter() //.filter(|general_param| match_inside_brac.is_match(general_param)) .flat_map(|bk_par| { let split_brack = bk_par .trim_matches(match_brack) .split_whitespace() .map(|f| f.to_string()) .collect::<Vec<String>>(); return split_brack; }) .collect::<Vec<String>>() // .filter(|bracketed|); } pub fn run(&self) -> Result<(), FileError> { let init_ptr = TermCfg::new() .set_attr(console::Attribute::Bold) .set_attr(console::Attribute::Italic); let print = init_ptr.gen_print(Some(console::Color::Blue)); let mut print_ln = init_ptr.gen_println(Some(console::Color::Blue)); let mut err_collector: Vec<FileError> = Vec::new(); let display_txt = |txt: &str| -> template_engine::Template { let mut tmp_engine = template_engine::TemplateFactory::init() .parse_in_template(txt) .create_movable() .collect(); let template = tmp_engine.padding(vec![1, 6, 6, 3]); template.to_owned() }; match self.query.as_str() { "update" => { // self.write(params[0], params[1].parse::<i32>().unwrap()); println!("what is your ct?"); let elim_quote = self.parse_bracket(&self.parameters[0]); self.update(&elim_quote[1], elim_quote[0].clone().as_str()); } "search" => { let unquote = self.parse_bracket(&self.parameters[0]); print_ln(&format!("<->statistics of word {:?}<->", unquote))?; let mut p = init_ptr.gen_println(Some(console::Color::Blue)); for quoted in unquote { let quoted = filter_param(&self.parameters[1], &quoted); let filtered = filter_param(&self.parameters[1], &quoted); match self.search(&filtered) { Ok(found_map) => { print!("Highligted-Text: \n"); let full_content = self.read().unwrap(); let total_line = found_map.len(); let mut key_holder = Vec::new(); found_map.iter().for_each(|(key, _)| key_holder.push(key)); let mut count = 0; let mut crumps = full_content .lines() .into_iter() .enumerate() .map(|(idx, x)| { (idx as i64, text_processing::CrumpCluster::break_chunk(x)) }) .collect::<Vec<(i64, text_processing::CrumpCluster)>>(); while count < found_map.len() { // each_indx.iter().for_each(|x|) crumps.iter_mut().for_each(|(loc, crump)| { if loc == key_holder[count] { let locations = found_map.get(loc).unwrap(); locations.into_iter().for_each(|(start, end)| { crump.delete(*start, Some(*end)); crump.insert( *start, &format!("--->\"{}\"<---", quoted.clone().trim(),) .trim(), ); }); } }); count += 1; } let fully_merged = crumps .iter() .map(|(_, crump)| { let merged = crump.merge_crump(); return merged; }) .collect::<String>(); // display_txt(&fully_merged, "+/"); if total_line <= 1 { p(&"No word found in the text!")?; } else { display_txt(&fully_merged) .border("+", components::BorderWeight::Bold) .center_box() .display(); p(&format!( "->Number of line that contain word /{}/: {}", quoted, total_line ))?; p(&format!( "Total number of words /{}/ {}", quoted, count_found_map(found_map) ))?; } } Err(file_err) => err_collector.push(file_err), } } } "read" => { let result = self.read(); print_ln("Reading contains : ")?; match result { Ok(txt) => { display_txt(&filter_param(&self.parameters[1], &txt)) .border("+", components::BorderWeight::Bold) .center_box() .display(); } Err(file_err) => { err_collector.push(file_err); } } } _ => err_collector.push(FileError::new().set_message("Invalid operation!")), } if err_collector.len() > 0 { Err(err_collector.into_iter().next().unwrap()) } else { Ok(()) } } } type OriResult<T> = Result<T, FileError>; /*positions : [{ Number of line to modify / word to replace / newdoc }]*/ pub trait TextPos { fn modify(&self, content: String, new_str: &str) -> Vec<String>; } // [x1,x2,"string"] // replace all word within that target across all content impl TextPos for &str { fn modify(&self, content: String, new_str: &str) -> Vec<String> { if self.contains(" ") { let multi_tar = self.split_whitespace().collect::<Vec<&str>>(); let emp = multi_tar .iter() .map(|x| { let xt = content.replace(*x, new_str); if xt!= content { return xt; } else { "None".to_string() } }) .filter(|x| *x!= "None".to_string()) .collect::<Vec<String>>(); // println!("special emp {:#?}",emp); return emp; } else { let mut result: Vec<String> = Vec::new(); result.push(content.replace(self, new_str)); return result; } } } pub trait Operation { fn read(&self) -> OriResult<String>; fn update<T>(&self, new_content: &str, target: T) where T: TextPos; fn search(&self, target: &str) -> Result<HashMap<i64, Vec<(usize, usize)>>, FileError>; } fn
(result: &str) -> OriResult<String> { if result.is_empty() { let empty_err = FileError::new().set_message("The Folder is Empty inside"); Err(empty_err) } else { Ok(result.trim().to_string()) } } impl Operation for Fileconfig { fn read(&self) -> OriResult<String> { let file = File::open(&self.path)?; let mut buffer = io::BufReader::new(file); let mut result = String::new(); buffer.read_to_string(&mut result)?; checkempty(&result) } // use for string only fn update<T: TextPos>(&self, new_content: &str, target: T) { /* if target is multiple start spit out different result to different file! */ let existed_content = self.read().expect("Cannot open that file"); let mutation = target.modify(existed_content.to_string(), new_content); println!("muttip {:?}", mutation); let mut count = 0; for n in mutation { let new_path = format!("output -- {} [{}]", self.path, count); let mut newfile = File::create(new_path).unwrap(); newfile.write_all(n.as_bytes()).unwrap(); count += 1; } } // regex for search: ^"[a-zA-Z-\s]+" fn search(&self, target: &str) -> Result<HashMap<i64, Vec<(usize, usize)>>, FileError> { let mut err_clt = String::new(); // let found_map = Vec::new(); let mut found_map: HashMap<i64, Vec<(usize, usize)>> = HashMap::new(); if self.parameters.is_empty() { err_clt.push_str("No params!") } let mut content = String::new(); match self.read() { Ok(ct) => content.push_str(&ct), Err(read_error) => err_clt.push_str(&read_error.message), } let mut count: i64 = 0; let mut line_found = Vec::new(); for (line_num, line) in content.lines().enumerate() { let each_line = line.trim(); let word_group = each_line.split_whitespace().collect::<Vec<&str>>(); let reg = regex::Regex::new(&format!(r"{}", target)).unwrap(); let mut indx_vec = Vec::new(); for found in reg.find_iter(line) { let key_indx = (found.start(), found.end()); indx_vec.push(key_indx); } if word_group.len() >= 1 && word_group.into_iter().any(|word| word.contains(target)) { line_found.push(line_num); found_map.insert(line_num as i64, indx_vec); count += 1; } } if err_clt.len() > 0 { let bruh = FileError::new().set_message(&err_clt.clone()); return Err(bruh); } else { return Ok(found_map); } /**/ } } impl Clone for Fileconfig { fn clone(&self) -> Self { return Fileconfig { name: self.name.clone(), access_at: self.access_at, query: self.query.clone(), parameters: self.parameters.clone(), // content:Option<String>, content: self.content.clone(), path: self.path.clone(), }; } } fn count_found_map(hsm: HashMap<i64, Vec<(usize, usize)>>) -> usize { let mut count: usize = 0; for (_, hs) in hsm { hs.iter().for_each(|_| count += 1); } return count; } #[test] fn test() { let match_inside_brac = regex::Regex::new(r"^\[(.*)\]$").unwrap(); let test = "[Apple sauce bananan ba;;;a]"; println!("t {}", test); let x: &[_] = &['[', ']']; println!( "test {:?} ", (match_inside_brac.is_match(test), test.trim_matches(x)) ); }
checkempty
identifier_name
main.rs
use crossterm::{ cursor, event::{self, DisableMouseCapture, EnableMouseCapture, Event}, queue, style::{self, Color::Rgb, Colors, Print, SetColors}, terminal, }; use serde::{Deserialize, Serialize}; use std::{ cmp::min, collections::HashMap, env, fs, io::{self, Write}, iter, process::exit, }; use unicode_width::UnicodeWidthChar; mod view; use view::{Page, Toc, View}; mod epub; fn wrap(text: &str, max_cols: usize) -> Vec<(usize, usize)> { let mut lines = Vec::new(); // bytes let mut start = 0; let mut end = 0; // cols after the break let mut after = 0; // cols of unbroken line let mut cols = 0; // are we breaking on whitespace? let mut space = false; // should probably use unicode_segmentation grapheme_indices for (i, c) in text.char_indices() { // https://github.com/unicode-rs/unicode-width/issues/6 let char_cols = c.width().unwrap_or(0); cols += char_cols; match c { '\n' => { after = 0; end = i; space = true; cols = max_cols + 1; } '' => { after = 0; end = i; space = true; } '-' | '—' if cols <= max_cols => { after = 0; end = i + c.len_utf8(); space = false; } _ => after += char_cols, } if cols > max_cols { // break a single long word if cols == after { after = char_cols; end = i; space = false; } lines.push((start, end)); start = end; if space { start += 1; } cols = after; } } lines } struct Se
dir: Direction, skip: bool, } #[derive(Clone)] enum Direction { Next, Prev, } pub struct Bk<'a> { quit: bool, chapters: Vec<epub::Chapter>, // position in the book chapter: usize, line: usize, mark: HashMap<char, (usize, usize)>, links: HashMap<String, (usize, usize)>, // layout colors: Colors, cols: u16, rows: usize, max_width: u16, // view state view: &'a dyn View, cursor: usize, dir: Direction, meta: Vec<String>, query: String, } impl Bk<'_> { fn new(epub: epub::Epub, args: Props) -> Self { let (cols, rows) = terminal::size().unwrap(); let width = min(cols, args.width) as usize; let meta = wrap(&epub.meta, width) .into_iter() .map(|(a, b)| String::from(&epub.meta[a..b])) .collect(); let mut chapters = epub.chapters; for c in &mut chapters { c.lines = wrap(&c.text, width); if c.title.chars().count() > width { c.title = c .title .chars() .take(width - 1) .chain(std::iter::once('…')) .collect(); } } let mut bk = Bk { quit: false, chapters, chapter: 0, line: 0, mark: HashMap::new(), links: epub.links, colors: args.colors, cols, rows: rows as usize, max_width: args.width, view: if args.toc { &Toc } else { &Page }, cursor: 0, dir: Direction::Next, meta, query: String::new(), }; bk.jump_byte(args.chapter, args.byte); bk.mark('\''); bk } fn run(&mut self) -> io::Result<()> { let mut stdout = io::stdout(); queue!( stdout, terminal::EnterAlternateScreen, cursor::Hide, EnableMouseCapture, )?; terminal::enable_raw_mode()?; let mut render = |bk: &Bk| { queue!( stdout, Print(style::Attribute::Reset), SetColors(bk.colors), terminal::Clear(terminal::ClearType::All), ) .unwrap(); for (i, line) in bk.view.render(bk).iter().enumerate() { queue!(stdout, cursor::MoveTo(bk.pad(), i as u16), Print(line)).unwrap(); } queue!(stdout, cursor::MoveTo(bk.pad(), bk.cursor as u16)).unwrap(); stdout.flush().unwrap(); }; render(self); loop { match event::read()? { Event::Key(e) => self.view.on_key(self, e.code), Event::Mouse(e) => { // XXX idk seems lame if e.kind == event::MouseEventKind::Moved { continue; } self.view.on_mouse(self, e); } Event::Resize(cols, rows) => { self.rows = rows as usize; if cols!= self.cols { self.cols = cols; let width = min(cols, self.max_width) as usize; for c in &mut self.chapters { c.lines = wrap(&c.text, width); } } self.view.on_resize(self); // XXX marks aren't updated } } if self.quit { break; } render(self); } queue!( stdout, terminal::LeaveAlternateScreen, cursor::Show, DisableMouseCapture )?; terminal::disable_raw_mode() } fn jump(&mut self, (c, l): (usize, usize)) { self.mark('\''); self.chapter = c; self.line = l; } fn jump_byte(&mut self, c: usize, byte: usize) { self.chapter = c; self.line = match self.chapters[c] .lines .binary_search_by_key(&byte, |&(a, _)| a) { Ok(n) => n, Err(n) => n - 1, } } fn jump_reset(&mut self) { let &(c, l) = self.mark.get(&'\'').unwrap(); self.chapter = c; self.line = l; } fn mark(&mut self, c: char) { self.mark.insert(c, (self.chapter, self.line)); } fn pad(&self) -> u16 { self.cols.saturating_sub(self.max_width) / 2 } fn search(&mut self, args: SearchArgs) -> bool { let (start, end) = self.chapters[self.chapter].lines[self.line]; match args.dir { Direction::Next => { let byte = if args.skip { end } else { start }; let head = (self.chapter, byte); let tail = (self.chapter + 1..self.chapters.len() - 1).map(|n| (n, 0)); for (c, byte) in iter::once(head).chain(tail) { if let Some(index) = self.chapters[c].text[byte..].find(&self.query) { self.jump_byte(c, index + byte); return true; } } false } Direction::Prev => { let byte = if args.skip { start } else { end }; let head = (self.chapter, byte); let tail = (0..self.chapter) .rev() .map(|c| (c, self.chapters[c].text.len())); for (c, byte) in iter::once(head).chain(tail) { if let Some(index) = self.chapters[c].text[..byte].rfind(&self.query) { self.jump_byte(c, index); return true; } } false } } } } #[derive(argh::FromArgs)] /// read a book struct Args { #[argh(positional)] path: Option<String>, /// background color (eg 282a36) #[argh(option)] bg: Option<String>, /// foreground color (eg f8f8f2) #[argh(option)] fg: Option<String>, /// print metadata and exit #[argh(switch, short ='m')] meta: bool, /// start with table of contents open #[argh(switch, short = 't')] toc: bool, /// characters per line #[argh(option, short = 'w', default = "75")] width: u16, } struct Props { colors: Colors, chapter: usize, byte: usize, width: u16, toc: bool, } #[derive(Default, Deserialize, Serialize)] struct Save { last: String, files: HashMap<String, (usize, usize)>, } struct State { save: Save, save_path: String, path: String, meta: bool, bk: Props, } fn init() -> Result<State, Box<dyn std::error::Error>> { let save_path = if cfg!(windows) { format!("{}\\bk", env::var("APPDATA")?) } else { format!("{}/.local/share/bk", env::var("HOME")?) }; // XXX will silently create a new default save if ron errors but path arg works. // revisit if/when stabilizing. ez file format upgrades let save: io::Result<Save> = fs::read_to_string(&save_path).and_then(|s| { ron::from_str(&s) .map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "invalid save file")) }); let args: Args = argh::from_env(); let path = match args.path { Some(p) => Some(fs::canonicalize(p)?.to_str().unwrap().to_string()), None => None, }; let (path, save, chapter, byte) = match (save, path) { (Err(e), None) => return Err(Box::new(e)), (Err(_), Some(p)) => (p, Save::default(), 0, 0), (Ok(s), None) => { let &(chapter, byte) = s.files.get(&s.last).unwrap(); (s.last.clone(), s, chapter, byte) } (Ok(s), Some(p)) => { if s.files.contains_key(&p) { let &(chapter, byte) = s.files.get(&p).unwrap(); (p, s, chapter, byte) } else { (p, s, 0, 0) } } }; // XXX oh god what let fg = args .fg .map(|s| Rgb { r: u8::from_str_radix(&s[0..2], 16).unwrap(), g: u8::from_str_radix(&s[2..4], 16).unwrap(), b: u8::from_str_radix(&s[4..6], 16).unwrap(), }) .unwrap_or(style::Color::Reset); let bg = args .bg .map(|s| Rgb { r: u8::from_str_radix(&s[0..2], 16).unwrap(), g: u8::from_str_radix(&s[2..4], 16).unwrap(), b: u8::from_str_radix(&s[4..6], 16).unwrap(), }) .unwrap_or(style::Color::Reset); Ok(State { path, save, save_path, meta: args.meta, bk: Props { colors: Colors::new(fg, bg), chapter, byte, width: args.width, toc: args.toc, }, }) } fn main() { let mut state = init().unwrap_or_else(|e| { println!("init error: {}", e); exit(1); }); let epub = epub::Epub::new(&state.path, state.meta).unwrap_or_else(|e| { println!("epub error: {}", e); exit(1); }); if state.meta { println!("{}", epub.meta); exit(0); } let mut bk = Bk::new(epub, state.bk); bk.run().unwrap_or_else(|e| { println!("run error: {}", e); exit(1); }); let byte = bk.chapters[bk.chapter].lines[bk.line].0; state .save .files .insert(state.path.clone(), (bk.chapter, byte)); state.save.last = state.path; let serialized = ron::to_string(&state.save).unwrap(); fs::write(state.save_path, serialized).unwrap_or_else(|e| { println!("error saving state: {}", e); exit(1); }); }
archArgs {
identifier_name
main.rs
use crossterm::{ cursor, event::{self, DisableMouseCapture, EnableMouseCapture, Event}, queue, style::{self, Color::Rgb, Colors, Print, SetColors}, terminal, }; use serde::{Deserialize, Serialize}; use std::{ cmp::min, collections::HashMap, env, fs, io::{self, Write}, iter, process::exit, }; use unicode_width::UnicodeWidthChar; mod view; use view::{Page, Toc, View}; mod epub; fn wrap(text: &str, max_cols: usize) -> Vec<(usize, usize)> { let mut lines = Vec::new(); // bytes let mut start = 0; let mut end = 0; // cols after the break let mut after = 0; // cols of unbroken line let mut cols = 0; // are we breaking on whitespace? let mut space = false; // should probably use unicode_segmentation grapheme_indices for (i, c) in text.char_indices() { // https://github.com/unicode-rs/unicode-width/issues/6 let char_cols = c.width().unwrap_or(0); cols += char_cols; match c { '\n' => { after = 0; end = i; space = true; cols = max_cols + 1; } '' => { after = 0; end = i; space = true; } '-' | '—' if cols <= max_cols => { after = 0; end = i + c.len_utf8(); space = false; } _ => after += char_cols, } if cols > max_cols { // break a single long word if cols == after { after = char_cols; end = i; space = false; } lines.push((start, end)); start = end; if space { start += 1; } cols = after; } } lines } struct SearchArgs { dir: Direction, skip: bool, } #[derive(Clone)] enum Direction { Next, Prev, } pub struct Bk<'a> { quit: bool, chapters: Vec<epub::Chapter>, // position in the book chapter: usize, line: usize, mark: HashMap<char, (usize, usize)>, links: HashMap<String, (usize, usize)>, // layout colors: Colors, cols: u16, rows: usize, max_width: u16, // view state view: &'a dyn View, cursor: usize, dir: Direction, meta: Vec<String>, query: String, } impl Bk<'_> { fn new(epub: epub::Epub, args: Props) -> Self { let (cols, rows) = terminal::size().unwrap(); let width = min(cols, args.width) as usize; let meta = wrap(&epub.meta, width) .into_iter() .map(|(a, b)| String::from(&epub.meta[a..b])) .collect(); let mut chapters = epub.chapters; for c in &mut chapters { c.lines = wrap(&c.text, width); if c.title.chars().count() > width { c.title = c .title .chars() .take(width - 1) .chain(std::iter::once('…')) .collect(); } } let mut bk = Bk { quit: false, chapters, chapter: 0, line: 0, mark: HashMap::new(), links: epub.links, colors: args.colors, cols, rows: rows as usize, max_width: args.width, view: if args.toc { &Toc } else { &Page }, cursor: 0, dir: Direction::Next, meta, query: String::new(), }; bk.jump_byte(args.chapter, args.byte); bk.mark('\''); bk } fn run(&mut self) -> io::Result<()> { let mut stdout = io::stdout(); queue!( stdout, terminal::EnterAlternateScreen, cursor::Hide, EnableMouseCapture, )?; terminal::enable_raw_mode()?; let mut render = |bk: &Bk| { queue!( stdout, Print(style::Attribute::Reset), SetColors(bk.colors), terminal::Clear(terminal::ClearType::All), ) .unwrap(); for (i, line) in bk.view.render(bk).iter().enumerate() { queue!(stdout, cursor::MoveTo(bk.pad(), i as u16), Print(line)).unwrap(); } queue!(stdout, cursor::MoveTo(bk.pad(), bk.cursor as u16)).unwrap(); stdout.flush().unwrap(); }; render(self); loop { match event::read()? { Event::Key(e) => self.view.on_key(self, e.code), Event::Mouse(e) => { // XXX idk seems lame if e.kind == event::MouseEventKind::Moved { continue; } self.view.on_mouse(self, e); } Event::Resize(cols, rows) => { self.rows = rows as usize; if cols!= self.cols { self.cols = cols; let width = min(cols, self.max_width) as usize; for c in &mut self.chapters { c.lines = wrap(&c.text, width); } } self.view.on_resize(self); // XXX marks aren't updated } } if self.quit { break; } render(self); } queue!( stdout, terminal::LeaveAlternateScreen, cursor::Show, DisableMouseCapture )?; terminal::disable_raw_mode() } fn jump(&mut self, (c, l): (usize, usize)) { self.mark('\''); self.chapter = c; self.line = l; } fn jump_byte(&mut self, c: usize, byte: usize) { self.chapter = c; self.line = match self.chapters[c] .lines .binary_search_by_key(&byte, |&(a, _)| a) { Ok(n) => n, Err(n) => n - 1, } } fn jump_reset(&mut self) { let &(c, l) = self.mark.get(&'\'').unwrap(); self.chapter = c; self.line = l; } fn mark(&mut self, c: char) { self.mark.insert(c, (self.chapter, self.line)); } fn pad(&self) -> u16 { self.cols.saturating_sub(self.max_width) / 2 } fn search(&mut self, args: SearchArgs) -> bool { let (start, end) = self.chapters[self.chapter].lines[self.line]; match args.dir { Direction::Next => { let byte = if args.skip { end } else { start }; let head = (self.chapter, byte); let tail = (self.chapter + 1..self.chapters.len() - 1).map(|n| (n, 0)); for (c, byte) in iter::once(head).chain(tail) { if let Some(index) = self.chapters[c].text[byte..].find(&self.query) { self.jump_byte(c, index + byte); return true; } } false } Direction::Prev => { let byte = if args.skip { start } else { end }; let head = (self.chapter, byte); let tail = (0..self.chapter) .rev() .map(|c| (c, self.chapters[c].text.len())); for (c, byte) in iter::once(head).chain(tail) { if let Some(index) = self.chapters[c].text[..byte].rfind(&self.query) {
} false } } } } #[derive(argh::FromArgs)] /// read a book struct Args { #[argh(positional)] path: Option<String>, /// background color (eg 282a36) #[argh(option)] bg: Option<String>, /// foreground color (eg f8f8f2) #[argh(option)] fg: Option<String>, /// print metadata and exit #[argh(switch, short ='m')] meta: bool, /// start with table of contents open #[argh(switch, short = 't')] toc: bool, /// characters per line #[argh(option, short = 'w', default = "75")] width: u16, } struct Props { colors: Colors, chapter: usize, byte: usize, width: u16, toc: bool, } #[derive(Default, Deserialize, Serialize)] struct Save { last: String, files: HashMap<String, (usize, usize)>, } struct State { save: Save, save_path: String, path: String, meta: bool, bk: Props, } fn init() -> Result<State, Box<dyn std::error::Error>> { let save_path = if cfg!(windows) { format!("{}\\bk", env::var("APPDATA")?) } else { format!("{}/.local/share/bk", env::var("HOME")?) }; // XXX will silently create a new default save if ron errors but path arg works. // revisit if/when stabilizing. ez file format upgrades let save: io::Result<Save> = fs::read_to_string(&save_path).and_then(|s| { ron::from_str(&s) .map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "invalid save file")) }); let args: Args = argh::from_env(); let path = match args.path { Some(p) => Some(fs::canonicalize(p)?.to_str().unwrap().to_string()), None => None, }; let (path, save, chapter, byte) = match (save, path) { (Err(e), None) => return Err(Box::new(e)), (Err(_), Some(p)) => (p, Save::default(), 0, 0), (Ok(s), None) => { let &(chapter, byte) = s.files.get(&s.last).unwrap(); (s.last.clone(), s, chapter, byte) } (Ok(s), Some(p)) => { if s.files.contains_key(&p) { let &(chapter, byte) = s.files.get(&p).unwrap(); (p, s, chapter, byte) } else { (p, s, 0, 0) } } }; // XXX oh god what let fg = args .fg .map(|s| Rgb { r: u8::from_str_radix(&s[0..2], 16).unwrap(), g: u8::from_str_radix(&s[2..4], 16).unwrap(), b: u8::from_str_radix(&s[4..6], 16).unwrap(), }) .unwrap_or(style::Color::Reset); let bg = args .bg .map(|s| Rgb { r: u8::from_str_radix(&s[0..2], 16).unwrap(), g: u8::from_str_radix(&s[2..4], 16).unwrap(), b: u8::from_str_radix(&s[4..6], 16).unwrap(), }) .unwrap_or(style::Color::Reset); Ok(State { path, save, save_path, meta: args.meta, bk: Props { colors: Colors::new(fg, bg), chapter, byte, width: args.width, toc: args.toc, }, }) } fn main() { let mut state = init().unwrap_or_else(|e| { println!("init error: {}", e); exit(1); }); let epub = epub::Epub::new(&state.path, state.meta).unwrap_or_else(|e| { println!("epub error: {}", e); exit(1); }); if state.meta { println!("{}", epub.meta); exit(0); } let mut bk = Bk::new(epub, state.bk); bk.run().unwrap_or_else(|e| { println!("run error: {}", e); exit(1); }); let byte = bk.chapters[bk.chapter].lines[bk.line].0; state .save .files .insert(state.path.clone(), (bk.chapter, byte)); state.save.last = state.path; let serialized = ron::to_string(&state.save).unwrap(); fs::write(state.save_path, serialized).unwrap_or_else(|e| { println!("error saving state: {}", e); exit(1); }); }
self.jump_byte(c, index); return true; }
conditional_block
main.rs
use crossterm::{ cursor, event::{self, DisableMouseCapture, EnableMouseCapture, Event}, queue, style::{self, Color::Rgb, Colors, Print, SetColors}, terminal, }; use serde::{Deserialize, Serialize}; use std::{ cmp::min, collections::HashMap, env, fs, io::{self, Write}, iter, process::exit, }; use unicode_width::UnicodeWidthChar; mod view; use view::{Page, Toc, View}; mod epub; fn wrap(text: &str, max_cols: usize) -> Vec<(usize, usize)> { let mut lines = Vec::new(); // bytes let mut start = 0; let mut end = 0; // cols after the break let mut after = 0; // cols of unbroken line let mut cols = 0; // are we breaking on whitespace? let mut space = false; // should probably use unicode_segmentation grapheme_indices for (i, c) in text.char_indices() { // https://github.com/unicode-rs/unicode-width/issues/6 let char_cols = c.width().unwrap_or(0); cols += char_cols; match c { '\n' => { after = 0; end = i; space = true; cols = max_cols + 1; } '' => { after = 0; end = i; space = true; } '-' | '—' if cols <= max_cols => { after = 0; end = i + c.len_utf8(); space = false; } _ => after += char_cols, } if cols > max_cols { // break a single long word if cols == after { after = char_cols; end = i; space = false; } lines.push((start, end)); start = end; if space { start += 1; } cols = after; } } lines } struct SearchArgs { dir: Direction, skip: bool, } #[derive(Clone)] enum Direction { Next, Prev, } pub struct Bk<'a> { quit: bool, chapters: Vec<epub::Chapter>, // position in the book chapter: usize, line: usize, mark: HashMap<char, (usize, usize)>, links: HashMap<String, (usize, usize)>, // layout colors: Colors, cols: u16, rows: usize, max_width: u16, // view state view: &'a dyn View, cursor: usize, dir: Direction, meta: Vec<String>, query: String, } impl Bk<'_> { fn new(epub: epub::Epub, args: Props) -> Self { let (cols, rows) = terminal::size().unwrap(); let width = min(cols, args.width) as usize; let meta = wrap(&epub.meta, width) .into_iter() .map(|(a, b)| String::from(&epub.meta[a..b])) .collect(); let mut chapters = epub.chapters; for c in &mut chapters { c.lines = wrap(&c.text, width); if c.title.chars().count() > width { c.title = c .title .chars() .take(width - 1) .chain(std::iter::once('…')) .collect(); } } let mut bk = Bk { quit: false, chapters, chapter: 0, line: 0, mark: HashMap::new(), links: epub.links, colors: args.colors, cols, rows: rows as usize, max_width: args.width, view: if args.toc { &Toc } else { &Page }, cursor: 0, dir: Direction::Next, meta, query: String::new(), }; bk.jump_byte(args.chapter, args.byte); bk.mark('\''); bk } fn run(&mut self) -> io::Result<()> { let mut stdout = io::stdout(); queue!( stdout, terminal::EnterAlternateScreen, cursor::Hide, EnableMouseCapture, )?; terminal::enable_raw_mode()?; let mut render = |bk: &Bk| { queue!( stdout, Print(style::Attribute::Reset), SetColors(bk.colors), terminal::Clear(terminal::ClearType::All), ) .unwrap(); for (i, line) in bk.view.render(bk).iter().enumerate() { queue!(stdout, cursor::MoveTo(bk.pad(), i as u16), Print(line)).unwrap(); } queue!(stdout, cursor::MoveTo(bk.pad(), bk.cursor as u16)).unwrap(); stdout.flush().unwrap(); }; render(self); loop { match event::read()? { Event::Key(e) => self.view.on_key(self, e.code), Event::Mouse(e) => { // XXX idk seems lame if e.kind == event::MouseEventKind::Moved { continue; } self.view.on_mouse(self, e); } Event::Resize(cols, rows) => { self.rows = rows as usize; if cols!= self.cols { self.cols = cols; let width = min(cols, self.max_width) as usize; for c in &mut self.chapters { c.lines = wrap(&c.text, width); } } self.view.on_resize(self); // XXX marks aren't updated } } if self.quit { break; } render(self); } queue!( stdout, terminal::LeaveAlternateScreen, cursor::Show, DisableMouseCapture )?; terminal::disable_raw_mode() } fn jump(&mut self, (c, l): (usize, usize)) { self.mark('\''); self.chapter = c; self.line = l; } fn jump_byte(&mut self, c: usize, byte: usize) { self.chapter = c; self.line = match self.chapters[c] .lines .binary_search_by_key(&byte, |&(a, _)| a) { Ok(n) => n, Err(n) => n - 1, } } fn jump_reset(&mut self) { let &(c, l) = self.mark.get(&'\'').unwrap(); self.chapter = c; self.line = l; } fn mark(&mut self, c: char) { self.mark.insert(c, (self.chapter, self.line)); } fn pad(&self) -> u16 { self.cols.saturating_sub(self.max_width) / 2 } fn search(&mut self, args: SearchArgs) -> bool { let (start, end) = self.chapters[self.chapter].lines[self.line]; match args.dir { Direction::Next => { let byte = if args.skip { end } else { start }; let head = (self.chapter, byte); let tail = (self.chapter + 1..self.chapters.len() - 1).map(|n| (n, 0)); for (c, byte) in iter::once(head).chain(tail) { if let Some(index) = self.chapters[c].text[byte..].find(&self.query) { self.jump_byte(c, index + byte); return true; } } false } Direction::Prev => { let byte = if args.skip { start } else { end }; let head = (self.chapter, byte); let tail = (0..self.chapter) .rev() .map(|c| (c, self.chapters[c].text.len())); for (c, byte) in iter::once(head).chain(tail) { if let Some(index) = self.chapters[c].text[..byte].rfind(&self.query) { self.jump_byte(c, index); return true; } } false } } } } #[derive(argh::FromArgs)] /// read a book struct Args { #[argh(positional)] path: Option<String>, /// background color (eg 282a36) #[argh(option)] bg: Option<String>, /// foreground color (eg f8f8f2) #[argh(option)] fg: Option<String>, /// print metadata and exit #[argh(switch, short ='m')] meta: bool, /// start with table of contents open #[argh(switch, short = 't')] toc: bool, /// characters per line #[argh(option, short = 'w', default = "75")] width: u16, } struct Props { colors: Colors, chapter: usize, byte: usize, width: u16, toc: bool, } #[derive(Default, Deserialize, Serialize)] struct Save { last: String, files: HashMap<String, (usize, usize)>, } struct State { save: Save, save_path: String, path: String, meta: bool, bk: Props, } fn init() -> Result<State, Box<dyn std::error::Error>> { let save_path = if cfg!(windows) { format!("{}\\bk", env::var("APPDATA")?) } else { format!("{}/.local/share/bk", env::var("HOME")?) }; // XXX will silently create a new default save if ron errors but path arg works. // revisit if/when stabilizing. ez file format upgrades let save: io::Result<Save> = fs::read_to_string(&save_path).and_then(|s| { ron::from_str(&s) .map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "invalid save file")) }); let args: Args = argh::from_env(); let path = match args.path { Some(p) => Some(fs::canonicalize(p)?.to_str().unwrap().to_string()), None => None, }; let (path, save, chapter, byte) = match (save, path) { (Err(e), None) => return Err(Box::new(e)), (Err(_), Some(p)) => (p, Save::default(), 0, 0), (Ok(s), None) => { let &(chapter, byte) = s.files.get(&s.last).unwrap(); (s.last.clone(), s, chapter, byte) } (Ok(s), Some(p)) => { if s.files.contains_key(&p) { let &(chapter, byte) = s.files.get(&p).unwrap(); (p, s, chapter, byte) } else { (p, s, 0, 0) } } }; // XXX oh god what let fg = args .fg .map(|s| Rgb { r: u8::from_str_radix(&s[0..2], 16).unwrap(), g: u8::from_str_radix(&s[2..4], 16).unwrap(), b: u8::from_str_radix(&s[4..6], 16).unwrap(), }) .unwrap_or(style::Color::Reset); let bg = args .bg .map(|s| Rgb { r: u8::from_str_radix(&s[0..2], 16).unwrap(), g: u8::from_str_radix(&s[2..4], 16).unwrap(), b: u8::from_str_radix(&s[4..6], 16).unwrap(), }) .unwrap_or(style::Color::Reset); Ok(State { path, save, save_path, meta: args.meta, bk: Props { colors: Colors::new(fg, bg), chapter, byte, width: args.width, toc: args.toc, }, }) } fn main() { let mut state = init().unwrap_or_else(|e| { println!("init error: {}", e); exit(1); }); let epub = epub::Epub::new(&state.path, state.meta).unwrap_or_else(|e| { println!("epub error: {}", e); exit(1); }); if state.meta { println!("{}", epub.meta); exit(0); } let mut bk = Bk::new(epub, state.bk); bk.run().unwrap_or_else(|e| { println!("run error: {}", e); exit(1); });
let byte = bk.chapters[bk.chapter].lines[bk.line].0; state .save .files .insert(state.path.clone(), (bk.chapter, byte)); state.save.last = state.path; let serialized = ron::to_string(&state.save).unwrap(); fs::write(state.save_path, serialized).unwrap_or_else(|e| { println!("error saving state: {}", e); exit(1); }); }
random_line_split
colors.rs
use std::io; use std::mem; use std::os::windows::io::AsRawHandle; use std::str::Bytes; use windows_sys::Win32::Foundation::HANDLE; use windows_sys::Win32::System::Console::{ GetConsoleScreenBufferInfo, SetConsoleTextAttribute, CONSOLE_SCREEN_BUFFER_INFO, FOREGROUND_BLUE as FG_BLUE, FOREGROUND_GREEN as FG_GREEN, FOREGROUND_INTENSITY as FG_INTENSITY, FOREGROUND_RED as FG_RED, }; use crate::Term; type WORD = u16; const FG_CYAN: WORD = FG_BLUE | FG_GREEN; const FG_MAGENTA: WORD = FG_BLUE | FG_RED; const FG_YELLOW: WORD = FG_GREEN | FG_RED; const FG_WHITE: WORD = FG_BLUE | FG_GREEN | FG_RED; /// Query the given handle for information about the console's screen buffer. /// /// The given handle should represent a console. Otherwise, an error is /// returned. /// /// This corresponds to calling [`GetConsoleScreenBufferInfo`]. /// /// [`GetConsoleScreenBufferInfo`]: https://docs.microsoft.com/en-us/windows/console/getconsolescreenbufferinfo pub fn screen_buffer_info(h: HANDLE) -> io::Result<ScreenBufferInfo> { unsafe { let mut info: CONSOLE_SCREEN_BUFFER_INFO = mem::zeroed(); let rc = GetConsoleScreenBufferInfo(h, &mut info); if rc == 0 { return Err(io::Error::last_os_error()); } Ok(ScreenBufferInfo(info)) } } /// Set the text attributes of the console represented by the given handle. /// /// This corresponds to calling [`SetConsoleTextAttribute`]. /// /// [`SetConsoleTextAttribute`]: https://docs.microsoft.com/en-us/windows/console/setconsoletextattribute pub fn set_text_attributes(h: HANDLE, attributes: u16) -> io::Result<()> { if unsafe { SetConsoleTextAttribute(h, attributes) } == 0 { Err(io::Error::last_os_error()) } else { Ok(()) } } /// Represents console screen buffer information such as size, cursor position /// and styling attributes. /// /// This wraps a [`CONSOLE_SCREEN_BUFFER_INFO`]. /// /// [`CONSOLE_SCREEN_BUFFER_INFO`]: https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str #[derive(Clone)] pub struct ScreenBufferInfo(CONSOLE_SCREEN_BUFFER_INFO); impl ScreenBufferInfo { /// Returns the character attributes associated with this console. /// /// This corresponds to `wAttributes`. /// /// See [`char info`] for more details. /// /// [`char info`]: https://docs.microsoft.com/en-us/windows/console/char-info-str pub fn attributes(&self) -> u16 { self.0.wAttributes } } /// A Windows console. /// /// This represents a very limited set of functionality available to a Windows /// console. In particular, it can only change text attributes such as color /// and intensity. This may grow over time. If you need more routines, please /// file an issue and/or PR. /// /// There is no way to "write" to this console. Simply write to /// stdout or stderr instead, while interleaving instructions to the console /// to change text attributes. /// /// A common pitfall when using a console is to forget to flush writes to /// stdout before setting new text attributes.
cur_attr: TextAttributes, } #[derive(Clone, Copy, Debug)] enum HandleKind { Stdout, Stderr, } impl HandleKind { fn handle(&self) -> HANDLE { match *self { HandleKind::Stdout => io::stdout().as_raw_handle() as HANDLE, HandleKind::Stderr => io::stderr().as_raw_handle() as HANDLE, } } } impl Console { /// Get a console for a standard I/O stream. fn create_for_stream(kind: HandleKind) -> io::Result<Console> { let h = kind.handle(); let info = screen_buffer_info(h)?; let attr = TextAttributes::from_word(info.attributes()); Ok(Console { kind: kind, start_attr: attr, cur_attr: attr, }) } /// Create a new Console to stdout. /// /// If there was a problem creating the console, then an error is returned. pub fn stdout() -> io::Result<Console> { Self::create_for_stream(HandleKind::Stdout) } /// Create a new Console to stderr. /// /// If there was a problem creating the console, then an error is returned. pub fn stderr() -> io::Result<Console> { Self::create_for_stream(HandleKind::Stderr) } /// Applies the current text attributes. fn set(&mut self) -> io::Result<()> { set_text_attributes(self.kind.handle(), self.cur_attr.to_word()) } /// Apply the given intensity and color attributes to the console /// foreground. /// /// If there was a problem setting attributes on the console, then an error /// is returned. pub fn fg(&mut self, intense: Intense, color: Color) -> io::Result<()> { self.cur_attr.fg_color = color; self.cur_attr.fg_intense = intense; self.set() } /// Apply the given intensity and color attributes to the console /// background. /// /// If there was a problem setting attributes on the console, then an error /// is returned. pub fn bg(&mut self, intense: Intense, color: Color) -> io::Result<()> { self.cur_attr.bg_color = color; self.cur_attr.bg_intense = intense; self.set() } /// Reset the console text attributes to their original settings. /// /// The original settings correspond to the text attributes on the console /// when this `Console` value was created. /// /// If there was a problem setting attributes on the console, then an error /// is returned. pub fn reset(&mut self) -> io::Result<()> { self.cur_attr = self.start_attr; self.set() } } /// A representation of text attributes for the Windows console. #[derive(Copy, Clone, Debug, Eq, PartialEq)] struct TextAttributes { fg_color: Color, fg_intense: Intense, bg_color: Color, bg_intense: Intense, } impl TextAttributes { fn to_word(&self) -> WORD { let mut w = 0; w |= self.fg_color.to_fg(); w |= self.fg_intense.to_fg(); w |= self.bg_color.to_bg(); w |= self.bg_intense.to_bg(); w } fn from_word(word: WORD) -> TextAttributes { TextAttributes { fg_color: Color::from_fg(word), fg_intense: Intense::from_fg(word), bg_color: Color::from_bg(word), bg_intense: Intense::from_bg(word), } } } /// Whether to use intense colors or not. #[allow(missing_docs)] #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Intense { Yes, No, } impl Intense { fn to_bg(&self) -> WORD { self.to_fg() << 4 } fn from_bg(word: WORD) -> Intense { Intense::from_fg(word >> 4) } fn to_fg(&self) -> WORD { match *self { Intense::No => 0, Intense::Yes => FG_INTENSITY, } } fn from_fg(word: WORD) -> Intense { if word & FG_INTENSITY > 0 { Intense::Yes } else { Intense::No } } } /// The set of available colors for use with a Windows console. #[allow(missing_docs)] #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Color { Black, Blue, Green, Red, Cyan, Magenta, Yellow, White, } impl Color { fn to_bg(&self) -> WORD { self.to_fg() << 4 } fn from_bg(word: WORD) -> Color { Color::from_fg(word >> 4) } fn to_fg(&self) -> WORD { match *self { Color::Black => 0, Color::Blue => FG_BLUE, Color::Green => FG_GREEN, Color::Red => FG_RED, Color::Cyan => FG_CYAN, Color::Magenta => FG_MAGENTA, Color::Yellow => FG_YELLOW, Color::White => FG_WHITE, } } fn from_fg(word: WORD) -> Color { match word & 0b111 { FG_BLUE => Color::Blue, FG_GREEN => Color::Green, FG_RED => Color::Red, FG_CYAN => Color::Cyan, FG_MAGENTA => Color::Magenta, FG_YELLOW => Color::Yellow, FG_WHITE => Color::White, _ => Color::Black, } } } pub fn console_colors(out: &Term, mut con: Console, bytes: &[u8]) -> io::Result<()> { use crate::ansi::AnsiCodeIterator; use std::str::from_utf8; let s = from_utf8(bytes).expect("data to be printed is not an ansi string"); let mut iter = AnsiCodeIterator::new(s); while!iter.rest_slice().is_empty() { if let Some((part, is_esc)) = iter.next() { if!is_esc { out.write_through_common(part.as_bytes())?; } else if part == "\x1b[0m" { con.reset()?; } else if let Some((intense, color, fg_bg)) = driver(parse_color, part) { match fg_bg { FgBg::Foreground => con.fg(intense, color), FgBg::Background => con.bg(intense, color), }?; } else if driver(parse_attr, part).is_none() { out.write_through_common(part.as_bytes())?; } } } Ok(()) } #[derive(Debug, PartialEq, Eq)] enum FgBg { Foreground, Background, } impl FgBg { fn new(byte: u8) -> Option<Self> { match byte { b'3' => Some(Self::Foreground), b'4' => Some(Self::Background), _ => None, } } } fn driver<Out>(parse: fn(Bytes<'_>) -> Option<Out>, part: &str) -> Option<Out> { let mut bytes = part.bytes(); loop { while bytes.next()?!= b'\x1b' {} if let ret @ Some(_) = (parse)(bytes.clone()) { return ret; } } } // `driver(parse_color, s)` parses the equivalent of the regex // \x1b\[(3|4)8;5;(8|9|1[0-5])m // for intense or // \x1b\[(3|4)([0-7])m // for normal fn parse_color(mut bytes: Bytes<'_>) -> Option<(Intense, Color, FgBg)> { parse_prefix(&mut bytes)?; let fg_bg = FgBg::new(bytes.next()?)?; let (intense, color) = match bytes.next()? { b @ b'0'..=b'7' => (Intense::No, normal_color_ansi_from_byte(b)?), b'8' => { if &[bytes.next()?, bytes.next()?, bytes.next()?]!= b";5;" { return None; } (Intense::Yes, parse_intense_color_ansi(&mut bytes)?) } _ => return None, }; parse_suffix(&mut bytes)?; Some((intense, color, fg_bg)) } // `driver(parse_attr, s)` parses the equivalent of the regex // \x1b\[([1-8])m fn parse_attr(mut bytes: Bytes<'_>) -> Option<u8> { parse_prefix(&mut bytes)?; let attr = match bytes.next()? { attr @ b'1'..=b'8' => attr, _ => return None, }; parse_suffix(&mut bytes)?; Some(attr) } fn parse_prefix(bytes: &mut Bytes<'_>) -> Option<()> { if bytes.next()? == b'[' { Some(()) } else { None } } fn parse_intense_color_ansi(bytes: &mut Bytes<'_>) -> Option<Color> { let color = match bytes.next()? { b'8' => Color::Black, b'9' => Color::Red, b'1' => match bytes.next()? { b'0' => Color::Green, b'1' => Color::Yellow, b'2' => Color::Blue, b'3' => Color::Magenta, b'4' => Color::Cyan, b'5' => Color::White, _ => return None, }, _ => return None, }; Some(color) } fn normal_color_ansi_from_byte(b: u8) -> Option<Color> { let color = match b { b'0' => Color::Black, b'1' => Color::Red, b'2' => Color::Green, b'3' => Color::Yellow, b'4' => Color::Blue, b'5' => Color::Magenta, b'6' => Color::Cyan, b'7' => Color::White, _ => return None, }; Some(color) } fn parse_suffix(bytes: &mut Bytes<'_>) -> Option<()> { if bytes.next()? == b'm' { Some(()) } else { None } } #[cfg(test)] mod tests { use super::*; #[test] fn color_parsing() { let intense_color = "leading bytes \x1b[38;5;10m trailing bytes"; let parsed = driver(parse_color, intense_color).unwrap(); assert_eq!(parsed, (Intense::Yes, Color::Green, FgBg::Foreground)); let normal_color = "leading bytes \x1b[40m trailing bytes"; let parsed = driver(parse_color, normal_color).unwrap(); assert_eq!(parsed, (Intense::No, Color::Black, FgBg::Background)); } #[test] fn attr_parsing() { let attr = "leading bytes \x1b[1m trailing bytes"; let parsed = driver(parse_attr, attr).unwrap(); assert_eq!(parsed, b'1'); } }
#[derive(Debug)] pub struct Console { kind: HandleKind, start_attr: TextAttributes,
random_line_split
colors.rs
use std::io; use std::mem; use std::os::windows::io::AsRawHandle; use std::str::Bytes; use windows_sys::Win32::Foundation::HANDLE; use windows_sys::Win32::System::Console::{ GetConsoleScreenBufferInfo, SetConsoleTextAttribute, CONSOLE_SCREEN_BUFFER_INFO, FOREGROUND_BLUE as FG_BLUE, FOREGROUND_GREEN as FG_GREEN, FOREGROUND_INTENSITY as FG_INTENSITY, FOREGROUND_RED as FG_RED, }; use crate::Term; type WORD = u16; const FG_CYAN: WORD = FG_BLUE | FG_GREEN; const FG_MAGENTA: WORD = FG_BLUE | FG_RED; const FG_YELLOW: WORD = FG_GREEN | FG_RED; const FG_WHITE: WORD = FG_BLUE | FG_GREEN | FG_RED; /// Query the given handle for information about the console's screen buffer. /// /// The given handle should represent a console. Otherwise, an error is /// returned. /// /// This corresponds to calling [`GetConsoleScreenBufferInfo`]. /// /// [`GetConsoleScreenBufferInfo`]: https://docs.microsoft.com/en-us/windows/console/getconsolescreenbufferinfo pub fn screen_buffer_info(h: HANDLE) -> io::Result<ScreenBufferInfo> { unsafe { let mut info: CONSOLE_SCREEN_BUFFER_INFO = mem::zeroed(); let rc = GetConsoleScreenBufferInfo(h, &mut info); if rc == 0 { return Err(io::Error::last_os_error()); } Ok(ScreenBufferInfo(info)) } } /// Set the text attributes of the console represented by the given handle. /// /// This corresponds to calling [`SetConsoleTextAttribute`]. /// /// [`SetConsoleTextAttribute`]: https://docs.microsoft.com/en-us/windows/console/setconsoletextattribute pub fn set_text_attributes(h: HANDLE, attributes: u16) -> io::Result<()> { if unsafe { SetConsoleTextAttribute(h, attributes) } == 0 { Err(io::Error::last_os_error()) } else { Ok(()) } } /// Represents console screen buffer information such as size, cursor position /// and styling attributes. /// /// This wraps a [`CONSOLE_SCREEN_BUFFER_INFO`]. /// /// [`CONSOLE_SCREEN_BUFFER_INFO`]: https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str #[derive(Clone)] pub struct ScreenBufferInfo(CONSOLE_SCREEN_BUFFER_INFO); impl ScreenBufferInfo { /// Returns the character attributes associated with this console. /// /// This corresponds to `wAttributes`. /// /// See [`char info`] for more details. /// /// [`char info`]: https://docs.microsoft.com/en-us/windows/console/char-info-str pub fn attributes(&self) -> u16 { self.0.wAttributes } } /// A Windows console. /// /// This represents a very limited set of functionality available to a Windows /// console. In particular, it can only change text attributes such as color /// and intensity. This may grow over time. If you need more routines, please /// file an issue and/or PR. /// /// There is no way to "write" to this console. Simply write to /// stdout or stderr instead, while interleaving instructions to the console /// to change text attributes. /// /// A common pitfall when using a console is to forget to flush writes to /// stdout before setting new text attributes. #[derive(Debug)] pub struct Console { kind: HandleKind, start_attr: TextAttributes, cur_attr: TextAttributes, } #[derive(Clone, Copy, Debug)] enum HandleKind { Stdout, Stderr, } impl HandleKind { fn handle(&self) -> HANDLE { match *self { HandleKind::Stdout => io::stdout().as_raw_handle() as HANDLE, HandleKind::Stderr => io::stderr().as_raw_handle() as HANDLE, } } } impl Console { /// Get a console for a standard I/O stream. fn create_for_stream(kind: HandleKind) -> io::Result<Console> { let h = kind.handle(); let info = screen_buffer_info(h)?; let attr = TextAttributes::from_word(info.attributes()); Ok(Console { kind: kind, start_attr: attr, cur_attr: attr, }) } /// Create a new Console to stdout. /// /// If there was a problem creating the console, then an error is returned. pub fn stdout() -> io::Result<Console> { Self::create_for_stream(HandleKind::Stdout) } /// Create a new Console to stderr. /// /// If there was a problem creating the console, then an error is returned. pub fn stderr() -> io::Result<Console> { Self::create_for_stream(HandleKind::Stderr) } /// Applies the current text attributes. fn set(&mut self) -> io::Result<()> { set_text_attributes(self.kind.handle(), self.cur_attr.to_word()) } /// Apply the given intensity and color attributes to the console /// foreground. /// /// If there was a problem setting attributes on the console, then an error /// is returned. pub fn fg(&mut self, intense: Intense, color: Color) -> io::Result<()> { self.cur_attr.fg_color = color; self.cur_attr.fg_intense = intense; self.set() } /// Apply the given intensity and color attributes to the console /// background. /// /// If there was a problem setting attributes on the console, then an error /// is returned. pub fn bg(&mut self, intense: Intense, color: Color) -> io::Result<()> { self.cur_attr.bg_color = color; self.cur_attr.bg_intense = intense; self.set() } /// Reset the console text attributes to their original settings. /// /// The original settings correspond to the text attributes on the console /// when this `Console` value was created. /// /// If there was a problem setting attributes on the console, then an error /// is returned. pub fn reset(&mut self) -> io::Result<()> { self.cur_attr = self.start_attr; self.set() } } /// A representation of text attributes for the Windows console. #[derive(Copy, Clone, Debug, Eq, PartialEq)] struct TextAttributes { fg_color: Color, fg_intense: Intense, bg_color: Color, bg_intense: Intense, } impl TextAttributes { fn to_word(&self) -> WORD { let mut w = 0; w |= self.fg_color.to_fg(); w |= self.fg_intense.to_fg(); w |= self.bg_color.to_bg(); w |= self.bg_intense.to_bg(); w } fn from_word(word: WORD) -> TextAttributes { TextAttributes { fg_color: Color::from_fg(word), fg_intense: Intense::from_fg(word), bg_color: Color::from_bg(word), bg_intense: Intense::from_bg(word), } } } /// Whether to use intense colors or not. #[allow(missing_docs)] #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Intense { Yes, No, } impl Intense { fn to_bg(&self) -> WORD { self.to_fg() << 4 } fn from_bg(word: WORD) -> Intense { Intense::from_fg(word >> 4) } fn to_fg(&self) -> WORD { match *self { Intense::No => 0, Intense::Yes => FG_INTENSITY, } } fn from_fg(word: WORD) -> Intense { if word & FG_INTENSITY > 0 { Intense::Yes } else { Intense::No } } } /// The set of available colors for use with a Windows console. #[allow(missing_docs)] #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Color { Black, Blue, Green, Red, Cyan, Magenta, Yellow, White, } impl Color { fn to_bg(&self) -> WORD { self.to_fg() << 4 } fn from_bg(word: WORD) -> Color { Color::from_fg(word >> 4) } fn to_fg(&self) -> WORD { match *self { Color::Black => 0, Color::Blue => FG_BLUE, Color::Green => FG_GREEN, Color::Red => FG_RED, Color::Cyan => FG_CYAN, Color::Magenta => FG_MAGENTA, Color::Yellow => FG_YELLOW, Color::White => FG_WHITE, } } fn from_fg(word: WORD) -> Color { match word & 0b111 { FG_BLUE => Color::Blue, FG_GREEN => Color::Green, FG_RED => Color::Red, FG_CYAN => Color::Cyan, FG_MAGENTA => Color::Magenta, FG_YELLOW => Color::Yellow, FG_WHITE => Color::White, _ => Color::Black, } } } pub fn console_colors(out: &Term, mut con: Console, bytes: &[u8]) -> io::Result<()> { use crate::ansi::AnsiCodeIterator; use std::str::from_utf8; let s = from_utf8(bytes).expect("data to be printed is not an ansi string"); let mut iter = AnsiCodeIterator::new(s); while!iter.rest_slice().is_empty() { if let Some((part, is_esc)) = iter.next() { if!is_esc { out.write_through_common(part.as_bytes())?; } else if part == "\x1b[0m" { con.reset()?; } else if let Some((intense, color, fg_bg)) = driver(parse_color, part) { match fg_bg { FgBg::Foreground => con.fg(intense, color), FgBg::Background => con.bg(intense, color), }?; } else if driver(parse_attr, part).is_none() { out.write_through_common(part.as_bytes())?; } } } Ok(()) } #[derive(Debug, PartialEq, Eq)] enum FgBg { Foreground, Background, } impl FgBg { fn new(byte: u8) -> Option<Self> { match byte { b'3' => Some(Self::Foreground), b'4' => Some(Self::Background), _ => None, } } } fn driver<Out>(parse: fn(Bytes<'_>) -> Option<Out>, part: &str) -> Option<Out> { let mut bytes = part.bytes(); loop { while bytes.next()?!= b'\x1b' {} if let ret @ Some(_) = (parse)(bytes.clone()) { return ret; } } } // `driver(parse_color, s)` parses the equivalent of the regex // \x1b\[(3|4)8;5;(8|9|1[0-5])m // for intense or // \x1b\[(3|4)([0-7])m // for normal fn parse_color(mut bytes: Bytes<'_>) -> Option<(Intense, Color, FgBg)> { parse_prefix(&mut bytes)?; let fg_bg = FgBg::new(bytes.next()?)?; let (intense, color) = match bytes.next()? { b @ b'0'..=b'7' => (Intense::No, normal_color_ansi_from_byte(b)?), b'8' => { if &[bytes.next()?, bytes.next()?, bytes.next()?]!= b";5;" { return None; } (Intense::Yes, parse_intense_color_ansi(&mut bytes)?) } _ => return None, }; parse_suffix(&mut bytes)?; Some((intense, color, fg_bg)) } // `driver(parse_attr, s)` parses the equivalent of the regex // \x1b\[([1-8])m fn parse_attr(mut bytes: Bytes<'_>) -> Option<u8> { parse_prefix(&mut bytes)?; let attr = match bytes.next()? { attr @ b'1'..=b'8' => attr, _ => return None, }; parse_suffix(&mut bytes)?; Some(attr) } fn parse_prefix(bytes: &mut Bytes<'_>) -> Option<()> { if bytes.next()? == b'[' { Some(()) } else { None } } fn parse_intense_color_ansi(bytes: &mut Bytes<'_>) -> Option<Color> { let color = match bytes.next()? { b'8' => Color::Black, b'9' => Color::Red, b'1' => match bytes.next()? { b'0' => Color::Green, b'1' => Color::Yellow, b'2' => Color::Blue, b'3' => Color::Magenta, b'4' => Color::Cyan, b'5' => Color::White, _ => return None, }, _ => return None, }; Some(color) } fn normal_color_ansi_from_byte(b: u8) -> Option<Color> { let color = match b { b'0' => Color::Black, b'1' => Color::Red, b'2' => Color::Green, b'3' => Color::Yellow, b'4' => Color::Blue, b'5' => Color::Magenta, b'6' => Color::Cyan, b'7' => Color::White, _ => return None, }; Some(color) } fn parse_suffix(bytes: &mut Bytes<'_>) -> Option<()> { if bytes.next()? == b'm' { Some(()) } else { None } } #[cfg(test)] mod tests { use super::*; #[test] fn color_parsing() { let intense_color = "leading bytes \x1b[38;5;10m trailing bytes"; let parsed = driver(parse_color, intense_color).unwrap(); assert_eq!(parsed, (Intense::Yes, Color::Green, FgBg::Foreground)); let normal_color = "leading bytes \x1b[40m trailing bytes"; let parsed = driver(parse_color, normal_color).unwrap(); assert_eq!(parsed, (Intense::No, Color::Black, FgBg::Background)); } #[test] fn attr_parsing()
}
{ let attr = "leading bytes \x1b[1m trailing bytes"; let parsed = driver(parse_attr, attr).unwrap(); assert_eq!(parsed, b'1'); }
identifier_body
colors.rs
use std::io; use std::mem; use std::os::windows::io::AsRawHandle; use std::str::Bytes; use windows_sys::Win32::Foundation::HANDLE; use windows_sys::Win32::System::Console::{ GetConsoleScreenBufferInfo, SetConsoleTextAttribute, CONSOLE_SCREEN_BUFFER_INFO, FOREGROUND_BLUE as FG_BLUE, FOREGROUND_GREEN as FG_GREEN, FOREGROUND_INTENSITY as FG_INTENSITY, FOREGROUND_RED as FG_RED, }; use crate::Term; type WORD = u16; const FG_CYAN: WORD = FG_BLUE | FG_GREEN; const FG_MAGENTA: WORD = FG_BLUE | FG_RED; const FG_YELLOW: WORD = FG_GREEN | FG_RED; const FG_WHITE: WORD = FG_BLUE | FG_GREEN | FG_RED; /// Query the given handle for information about the console's screen buffer. /// /// The given handle should represent a console. Otherwise, an error is /// returned. /// /// This corresponds to calling [`GetConsoleScreenBufferInfo`]. /// /// [`GetConsoleScreenBufferInfo`]: https://docs.microsoft.com/en-us/windows/console/getconsolescreenbufferinfo pub fn screen_buffer_info(h: HANDLE) -> io::Result<ScreenBufferInfo> { unsafe { let mut info: CONSOLE_SCREEN_BUFFER_INFO = mem::zeroed(); let rc = GetConsoleScreenBufferInfo(h, &mut info); if rc == 0 { return Err(io::Error::last_os_error()); } Ok(ScreenBufferInfo(info)) } } /// Set the text attributes of the console represented by the given handle. /// /// This corresponds to calling [`SetConsoleTextAttribute`]. /// /// [`SetConsoleTextAttribute`]: https://docs.microsoft.com/en-us/windows/console/setconsoletextattribute pub fn set_text_attributes(h: HANDLE, attributes: u16) -> io::Result<()> { if unsafe { SetConsoleTextAttribute(h, attributes) } == 0 { Err(io::Error::last_os_error()) } else { Ok(()) } } /// Represents console screen buffer information such as size, cursor position /// and styling attributes. /// /// This wraps a [`CONSOLE_SCREEN_BUFFER_INFO`]. /// /// [`CONSOLE_SCREEN_BUFFER_INFO`]: https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str #[derive(Clone)] pub struct ScreenBufferInfo(CONSOLE_SCREEN_BUFFER_INFO); impl ScreenBufferInfo { /// Returns the character attributes associated with this console. /// /// This corresponds to `wAttributes`. /// /// See [`char info`] for more details. /// /// [`char info`]: https://docs.microsoft.com/en-us/windows/console/char-info-str pub fn attributes(&self) -> u16 { self.0.wAttributes } } /// A Windows console. /// /// This represents a very limited set of functionality available to a Windows /// console. In particular, it can only change text attributes such as color /// and intensity. This may grow over time. If you need more routines, please /// file an issue and/or PR. /// /// There is no way to "write" to this console. Simply write to /// stdout or stderr instead, while interleaving instructions to the console /// to change text attributes. /// /// A common pitfall when using a console is to forget to flush writes to /// stdout before setting new text attributes. #[derive(Debug)] pub struct Console { kind: HandleKind, start_attr: TextAttributes, cur_attr: TextAttributes, } #[derive(Clone, Copy, Debug)] enum HandleKind { Stdout, Stderr, } impl HandleKind { fn handle(&self) -> HANDLE { match *self { HandleKind::Stdout => io::stdout().as_raw_handle() as HANDLE, HandleKind::Stderr => io::stderr().as_raw_handle() as HANDLE, } } } impl Console { /// Get a console for a standard I/O stream. fn create_for_stream(kind: HandleKind) -> io::Result<Console> { let h = kind.handle(); let info = screen_buffer_info(h)?; let attr = TextAttributes::from_word(info.attributes()); Ok(Console { kind: kind, start_attr: attr, cur_attr: attr, }) } /// Create a new Console to stdout. /// /// If there was a problem creating the console, then an error is returned. pub fn
() -> io::Result<Console> { Self::create_for_stream(HandleKind::Stdout) } /// Create a new Console to stderr. /// /// If there was a problem creating the console, then an error is returned. pub fn stderr() -> io::Result<Console> { Self::create_for_stream(HandleKind::Stderr) } /// Applies the current text attributes. fn set(&mut self) -> io::Result<()> { set_text_attributes(self.kind.handle(), self.cur_attr.to_word()) } /// Apply the given intensity and color attributes to the console /// foreground. /// /// If there was a problem setting attributes on the console, then an error /// is returned. pub fn fg(&mut self, intense: Intense, color: Color) -> io::Result<()> { self.cur_attr.fg_color = color; self.cur_attr.fg_intense = intense; self.set() } /// Apply the given intensity and color attributes to the console /// background. /// /// If there was a problem setting attributes on the console, then an error /// is returned. pub fn bg(&mut self, intense: Intense, color: Color) -> io::Result<()> { self.cur_attr.bg_color = color; self.cur_attr.bg_intense = intense; self.set() } /// Reset the console text attributes to their original settings. /// /// The original settings correspond to the text attributes on the console /// when this `Console` value was created. /// /// If there was a problem setting attributes on the console, then an error /// is returned. pub fn reset(&mut self) -> io::Result<()> { self.cur_attr = self.start_attr; self.set() } } /// A representation of text attributes for the Windows console. #[derive(Copy, Clone, Debug, Eq, PartialEq)] struct TextAttributes { fg_color: Color, fg_intense: Intense, bg_color: Color, bg_intense: Intense, } impl TextAttributes { fn to_word(&self) -> WORD { let mut w = 0; w |= self.fg_color.to_fg(); w |= self.fg_intense.to_fg(); w |= self.bg_color.to_bg(); w |= self.bg_intense.to_bg(); w } fn from_word(word: WORD) -> TextAttributes { TextAttributes { fg_color: Color::from_fg(word), fg_intense: Intense::from_fg(word), bg_color: Color::from_bg(word), bg_intense: Intense::from_bg(word), } } } /// Whether to use intense colors or not. #[allow(missing_docs)] #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Intense { Yes, No, } impl Intense { fn to_bg(&self) -> WORD { self.to_fg() << 4 } fn from_bg(word: WORD) -> Intense { Intense::from_fg(word >> 4) } fn to_fg(&self) -> WORD { match *self { Intense::No => 0, Intense::Yes => FG_INTENSITY, } } fn from_fg(word: WORD) -> Intense { if word & FG_INTENSITY > 0 { Intense::Yes } else { Intense::No } } } /// The set of available colors for use with a Windows console. #[allow(missing_docs)] #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Color { Black, Blue, Green, Red, Cyan, Magenta, Yellow, White, } impl Color { fn to_bg(&self) -> WORD { self.to_fg() << 4 } fn from_bg(word: WORD) -> Color { Color::from_fg(word >> 4) } fn to_fg(&self) -> WORD { match *self { Color::Black => 0, Color::Blue => FG_BLUE, Color::Green => FG_GREEN, Color::Red => FG_RED, Color::Cyan => FG_CYAN, Color::Magenta => FG_MAGENTA, Color::Yellow => FG_YELLOW, Color::White => FG_WHITE, } } fn from_fg(word: WORD) -> Color { match word & 0b111 { FG_BLUE => Color::Blue, FG_GREEN => Color::Green, FG_RED => Color::Red, FG_CYAN => Color::Cyan, FG_MAGENTA => Color::Magenta, FG_YELLOW => Color::Yellow, FG_WHITE => Color::White, _ => Color::Black, } } } pub fn console_colors(out: &Term, mut con: Console, bytes: &[u8]) -> io::Result<()> { use crate::ansi::AnsiCodeIterator; use std::str::from_utf8; let s = from_utf8(bytes).expect("data to be printed is not an ansi string"); let mut iter = AnsiCodeIterator::new(s); while!iter.rest_slice().is_empty() { if let Some((part, is_esc)) = iter.next() { if!is_esc { out.write_through_common(part.as_bytes())?; } else if part == "\x1b[0m" { con.reset()?; } else if let Some((intense, color, fg_bg)) = driver(parse_color, part) { match fg_bg { FgBg::Foreground => con.fg(intense, color), FgBg::Background => con.bg(intense, color), }?; } else if driver(parse_attr, part).is_none() { out.write_through_common(part.as_bytes())?; } } } Ok(()) } #[derive(Debug, PartialEq, Eq)] enum FgBg { Foreground, Background, } impl FgBg { fn new(byte: u8) -> Option<Self> { match byte { b'3' => Some(Self::Foreground), b'4' => Some(Self::Background), _ => None, } } } fn driver<Out>(parse: fn(Bytes<'_>) -> Option<Out>, part: &str) -> Option<Out> { let mut bytes = part.bytes(); loop { while bytes.next()?!= b'\x1b' {} if let ret @ Some(_) = (parse)(bytes.clone()) { return ret; } } } // `driver(parse_color, s)` parses the equivalent of the regex // \x1b\[(3|4)8;5;(8|9|1[0-5])m // for intense or // \x1b\[(3|4)([0-7])m // for normal fn parse_color(mut bytes: Bytes<'_>) -> Option<(Intense, Color, FgBg)> { parse_prefix(&mut bytes)?; let fg_bg = FgBg::new(bytes.next()?)?; let (intense, color) = match bytes.next()? { b @ b'0'..=b'7' => (Intense::No, normal_color_ansi_from_byte(b)?), b'8' => { if &[bytes.next()?, bytes.next()?, bytes.next()?]!= b";5;" { return None; } (Intense::Yes, parse_intense_color_ansi(&mut bytes)?) } _ => return None, }; parse_suffix(&mut bytes)?; Some((intense, color, fg_bg)) } // `driver(parse_attr, s)` parses the equivalent of the regex // \x1b\[([1-8])m fn parse_attr(mut bytes: Bytes<'_>) -> Option<u8> { parse_prefix(&mut bytes)?; let attr = match bytes.next()? { attr @ b'1'..=b'8' => attr, _ => return None, }; parse_suffix(&mut bytes)?; Some(attr) } fn parse_prefix(bytes: &mut Bytes<'_>) -> Option<()> { if bytes.next()? == b'[' { Some(()) } else { None } } fn parse_intense_color_ansi(bytes: &mut Bytes<'_>) -> Option<Color> { let color = match bytes.next()? { b'8' => Color::Black, b'9' => Color::Red, b'1' => match bytes.next()? { b'0' => Color::Green, b'1' => Color::Yellow, b'2' => Color::Blue, b'3' => Color::Magenta, b'4' => Color::Cyan, b'5' => Color::White, _ => return None, }, _ => return None, }; Some(color) } fn normal_color_ansi_from_byte(b: u8) -> Option<Color> { let color = match b { b'0' => Color::Black, b'1' => Color::Red, b'2' => Color::Green, b'3' => Color::Yellow, b'4' => Color::Blue, b'5' => Color::Magenta, b'6' => Color::Cyan, b'7' => Color::White, _ => return None, }; Some(color) } fn parse_suffix(bytes: &mut Bytes<'_>) -> Option<()> { if bytes.next()? == b'm' { Some(()) } else { None } } #[cfg(test)] mod tests { use super::*; #[test] fn color_parsing() { let intense_color = "leading bytes \x1b[38;5;10m trailing bytes"; let parsed = driver(parse_color, intense_color).unwrap(); assert_eq!(parsed, (Intense::Yes, Color::Green, FgBg::Foreground)); let normal_color = "leading bytes \x1b[40m trailing bytes"; let parsed = driver(parse_color, normal_color).unwrap(); assert_eq!(parsed, (Intense::No, Color::Black, FgBg::Background)); } #[test] fn attr_parsing() { let attr = "leading bytes \x1b[1m trailing bytes"; let parsed = driver(parse_attr, attr).unwrap(); assert_eq!(parsed, b'1'); } }
stdout
identifier_name
appendlist.rs
use std::cell::{Cell, UnsafeCell}; use std::fmt::{self, Debug}; use std::iter::FromIterator; use std::ops::Index; use crate::common::{chunk_size, chunk_start, index_chunk}; /// A list that can be appended to while elements are borrowed /// /// This looks like a fairly bare-bones list API, except that it has a `push` /// method that works on non-`mut` lists. It is safe to hold references to /// values inside this list and push a new value onto the end. /// /// Additionally, the list has O(1) index and O(1) push (not amortized!). /// /// For example, this would be illegal with a `Vec`: /// /// ``` /// use appendlist::AppendList; /// /// let list = AppendList::new(); /// /// list.push(1); /// let first_item = &list[0]; /// list.push(2); /// let second_item = &list[1]; /// /// assert_eq!(*first_item, list[0]); /// assert_eq!(*second_item, list[1]); /// ``` /// /// # Implementation details /// /// This section is not necessary to use the API, it just describes the underlying /// allocation and indexing strategies. /// /// The list is a `Vec` of *chunks*. Each chunk is itself a `Vec<T>`. The list /// will fill up a chunk, then allocate a new chunk with its full capacity. /// Because the capacity of a given chunk never changes, the underlying `Vec<T>` /// never reallocates, so references to that chunk are never invalidated. Each /// chunk is twice the size of the previous chunk, so there will never be more /// than O(log(n)) chunks. /// /// Constant-time indexing is achieved because the chunk ID of a particular index /// can be quickly calculated: if the first chunk has size c, index i will be /// located in chunk floor(log2(i + c) - log2(c)). If c is a power of 2, this /// is equivalent to floor(log2(i + c)) - floor(log2(c)), and a very fast floor /// log2 algorithm can be derived from `usize::leading_zeros()`. pub struct AppendList<T> { chunks: UnsafeCell<Vec<Vec<T>>>, len: Cell<usize>, } impl<T> AppendList<T> { /// Wrapper to get the list of chunks immutably fn chunks(&self) -> &[Vec<T>] { unsafe { &*self.chunks.get() } } /// In test builds, check all of the unsafe invariants /// /// In release builds, no-op fn check_invariants(&self)
self.len.get() - chunk_start(self.chunks().len() - 1) ); } else { // No chunks assert_eq!(0, self.chunks().len()); } } } /// Create a new `AppendList` pub fn new() -> Self { Self { chunks: UnsafeCell::new(Vec::new()), len: Cell::new(0), } } /// Append an item to the end /// /// Note that this does not require `mut`. pub fn push(&self, item: T) { self.check_invariants(); // Unsafe code alert! // // Preserve the following invariants: // - Only the last chunk may be modified // - A chunk cannot ever be reallocated // - len must reflect the length // // Invariants are checked in the check_invariants method let mut_chunks = unsafe { &mut *self.chunks.get() }; let new_index = self.len.get(); let chunk_id = index_chunk(new_index); if chunk_id < mut_chunks.len() { // We should always be inserting into the last chunk debug_assert_eq!(chunk_id, mut_chunks.len() - 1); // Insert into the appropriate chunk let chunk = &mut mut_chunks[chunk_id]; // The chunk must not be reallocated! Save the pre-insertion capacity // so we can check it later (debug builds only) #[cfg(test)] let prev_capacity = chunk.capacity(); // Do the insertion chunk.push(item); // Check that the capacity didn't change (debug builds only) #[cfg(test)] assert_eq!(prev_capacity, chunk.capacity()); } else { // Need to allocate a new chunk // New chunk should be the immediate next chunk debug_assert_eq!(chunk_id, mut_chunks.len()); // New chunk must be big enough let mut new_chunk = Vec::with_capacity(chunk_size(chunk_id)); debug_assert!(new_chunk.capacity() >= chunk_size(chunk_id)); new_chunk.push(item); mut_chunks.push(new_chunk); } // Increment the length self.len.set(self.len.get() + 1); self.check_invariants(); } /// Get the length of the list pub fn len(&self) -> usize { self.check_invariants(); self.len.get() } /// Get an item from the list, if it is in bounds /// /// Returns `None` if the `index` is out-of-bounds. Note that you can also /// index with `[]`, which will panic on out-of-bounds. pub fn get(&self, index: usize) -> Option<&T> { self.check_invariants(); if index >= self.len() { return None; } let chunk_id = index_chunk(index); let chunk_start = chunk_start(chunk_id); return Some(&self.chunks()[chunk_id][index - chunk_start]); } /// Get an iterator over the list pub fn iter(&self) -> Iter<T> { self.check_invariants(); Iter { list: &self, index: 0, } } } impl<T> Default for AppendList<T> { fn default() -> Self { Self::new() } } impl<T> Index<usize> for AppendList<T> { type Output = T; fn index(&self, index: usize) -> &Self::Output { self.get(index) .expect("AppendList indexed beyond its length") } } impl<T> FromIterator<T> for AppendList<T> { fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self { let list = Self::new(); for item in iter { list.push(item); } list } } impl<'l, T> IntoIterator for &'l AppendList<T> { type Item = &'l T; type IntoIter = Iter<'l, T>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<T: PartialEq> PartialEq for AppendList<T> { fn eq(&self, other: &AppendList<T>) -> bool { let mut s = self.iter(); let mut o = other.iter(); loop { match (s.next(), o.next()) { (Some(a), Some(b)) if a == b => {}, (None, None) => return true, _ => return false, } } } } impl<T: Debug> Debug for AppendList<T> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_list().entries(self.iter()).finish() } } pub struct Iter<'l, T> { list: &'l AppendList<T>, index: usize, } impl<'l, T> Iterator for Iter<'l, T> { type Item = &'l T; fn next(&mut self) -> Option<Self::Item> { let item = self.list.get(self.index); self.index += 1; item } fn size_hint(&self) -> (usize, Option<usize>) { let remaining = self.list.len() - self.index; (remaining, Some(remaining)) } } #[cfg(test)] mod test { use super::*; #[test] fn from_iterator() { let l: AppendList<i32> = (0..100).collect(); for i in 0..100 { assert_eq!(l[i], i as i32); } } #[test] fn iterator() { let l: AppendList<i32> = (0..100).collect(); let mut i1 = l.iter(); let mut i2 = l.into_iter(); for item in 0..100 { assert_eq!(i1.next(), Some(&item)); assert_eq!(i2.next(), Some(&item)); } assert_eq!(i1.next(), None); assert_eq!(i2.next(), None); } #[test] fn equality() { let a = AppendList::new(); let b = AppendList::new(); assert_eq!(a, b); a.push("foo"); assert_ne!(a, b); b.push("foo"); assert_eq!(a, b); a.push("bar"); a.push("baz"); assert_ne!(a, b); } #[test] fn iterator_size_hint() { let l: AppendList<i32> = AppendList::new(); let mut i = l.iter(); assert_eq!(i.size_hint(), (0, Some(0))); l.push(1); assert_eq!(i.size_hint(), (1, Some(1))); l.push(2); assert_eq!(i.size_hint(), (2, Some(2))); i.next(); assert_eq!(i.size_hint(), (1, Some(1))); l.push(3); assert_eq!(i.size_hint(), (2, Some(2))); i.next(); assert_eq!(i.size_hint(), (1, Some(1))); i.next(); assert_eq!(i.size_hint(), (0, Some(0))); } #[test] fn empty_list() { let n: AppendList<usize> = AppendList::new(); assert_eq!(n.len(), 0); assert_eq!(n.get(0), None); let d: AppendList<usize> = AppendList::default(); assert_eq!(d.len(), 0); assert_eq!(d.get(0), None); } #[test] fn thousand_item_list() { test_big_list(1_000); } #[test] #[ignore] fn million_item_list() { test_big_list(1_000_000); } fn test_big_list(size: usize) { let l = AppendList::new(); let mut refs = Vec::new(); for i in 0..size { assert_eq!(l.len(), i); l.push(i); refs.push(l[i]); assert_eq!(l.len(), i + 1); } for i in 0..size { assert_eq!(Some(&refs[i]), l.get(i)); } } }
{ #[cfg(test)] { if self.len.get() > 0 { // Correct number of chunks assert_eq!(index_chunk(self.len.get() - 1), self.chunks().len() - 1); // Every chunk holds enough items for chunk_id in 0..self.chunks().len() { assert!(chunk_size(chunk_id) <= self.chunks()[chunk_id].capacity()); } // Intermediate chunks are full for chunk_id in 0..self.chunks().len() - 1 { assert_eq!(chunk_size(chunk_id), self.chunks()[chunk_id].len()); } // Last chunk is correct length assert_eq!( self.chunks().last().unwrap().len(),
identifier_body
appendlist.rs
use std::cell::{Cell, UnsafeCell}; use std::fmt::{self, Debug}; use std::iter::FromIterator; use std::ops::Index; use crate::common::{chunk_size, chunk_start, index_chunk}; /// A list that can be appended to while elements are borrowed /// /// This looks like a fairly bare-bones list API, except that it has a `push` /// method that works on non-`mut` lists. It is safe to hold references to /// values inside this list and push a new value onto the end. /// /// Additionally, the list has O(1) index and O(1) push (not amortized!). /// /// For example, this would be illegal with a `Vec`: /// /// ``` /// use appendlist::AppendList; /// /// let list = AppendList::new(); /// /// list.push(1); /// let first_item = &list[0]; /// list.push(2); /// let second_item = &list[1]; /// /// assert_eq!(*first_item, list[0]); /// assert_eq!(*second_item, list[1]); /// ``` /// /// # Implementation details /// /// This section is not necessary to use the API, it just describes the underlying /// allocation and indexing strategies. /// /// The list is a `Vec` of *chunks*. Each chunk is itself a `Vec<T>`. The list /// will fill up a chunk, then allocate a new chunk with its full capacity. /// Because the capacity of a given chunk never changes, the underlying `Vec<T>` /// never reallocates, so references to that chunk are never invalidated. Each /// chunk is twice the size of the previous chunk, so there will never be more /// than O(log(n)) chunks. /// /// Constant-time indexing is achieved because the chunk ID of a particular index /// can be quickly calculated: if the first chunk has size c, index i will be /// located in chunk floor(log2(i + c) - log2(c)). If c is a power of 2, this /// is equivalent to floor(log2(i + c)) - floor(log2(c)), and a very fast floor /// log2 algorithm can be derived from `usize::leading_zeros()`. pub struct AppendList<T> { chunks: UnsafeCell<Vec<Vec<T>>>, len: Cell<usize>, } impl<T> AppendList<T> { /// Wrapper to get the list of chunks immutably fn chunks(&self) -> &[Vec<T>] { unsafe { &*self.chunks.get() } } /// In test builds, check all of the unsafe invariants /// /// In release builds, no-op fn check_invariants(&self) { #[cfg(test)] { if self.len.get() > 0 { // Correct number of chunks assert_eq!(index_chunk(self.len.get() - 1), self.chunks().len() - 1); // Every chunk holds enough items for chunk_id in 0..self.chunks().len() { assert!(chunk_size(chunk_id) <= self.chunks()[chunk_id].capacity()); } // Intermediate chunks are full for chunk_id in 0..self.chunks().len() - 1 { assert_eq!(chunk_size(chunk_id), self.chunks()[chunk_id].len()); } // Last chunk is correct length assert_eq!( self.chunks().last().unwrap().len(), self.len.get() - chunk_start(self.chunks().len() - 1) ); } else { // No chunks assert_eq!(0, self.chunks().len()); } } } /// Create a new `AppendList` pub fn new() -> Self { Self { chunks: UnsafeCell::new(Vec::new()), len: Cell::new(0), } } /// Append an item to the end /// /// Note that this does not require `mut`. pub fn push(&self, item: T) { self.check_invariants(); // Unsafe code alert! // // Preserve the following invariants: // - Only the last chunk may be modified // - A chunk cannot ever be reallocated // - len must reflect the length // // Invariants are checked in the check_invariants method let mut_chunks = unsafe { &mut *self.chunks.get() }; let new_index = self.len.get(); let chunk_id = index_chunk(new_index); if chunk_id < mut_chunks.len() { // We should always be inserting into the last chunk debug_assert_eq!(chunk_id, mut_chunks.len() - 1); // Insert into the appropriate chunk let chunk = &mut mut_chunks[chunk_id]; // The chunk must not be reallocated! Save the pre-insertion capacity // so we can check it later (debug builds only) #[cfg(test)] let prev_capacity = chunk.capacity(); // Do the insertion chunk.push(item); // Check that the capacity didn't change (debug builds only) #[cfg(test)] assert_eq!(prev_capacity, chunk.capacity()); } else { // Need to allocate a new chunk // New chunk should be the immediate next chunk debug_assert_eq!(chunk_id, mut_chunks.len()); // New chunk must be big enough let mut new_chunk = Vec::with_capacity(chunk_size(chunk_id)); debug_assert!(new_chunk.capacity() >= chunk_size(chunk_id)); new_chunk.push(item); mut_chunks.push(new_chunk); } // Increment the length self.len.set(self.len.get() + 1); self.check_invariants(); } /// Get the length of the list pub fn
(&self) -> usize { self.check_invariants(); self.len.get() } /// Get an item from the list, if it is in bounds /// /// Returns `None` if the `index` is out-of-bounds. Note that you can also /// index with `[]`, which will panic on out-of-bounds. pub fn get(&self, index: usize) -> Option<&T> { self.check_invariants(); if index >= self.len() { return None; } let chunk_id = index_chunk(index); let chunk_start = chunk_start(chunk_id); return Some(&self.chunks()[chunk_id][index - chunk_start]); } /// Get an iterator over the list pub fn iter(&self) -> Iter<T> { self.check_invariants(); Iter { list: &self, index: 0, } } } impl<T> Default for AppendList<T> { fn default() -> Self { Self::new() } } impl<T> Index<usize> for AppendList<T> { type Output = T; fn index(&self, index: usize) -> &Self::Output { self.get(index) .expect("AppendList indexed beyond its length") } } impl<T> FromIterator<T> for AppendList<T> { fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self { let list = Self::new(); for item in iter { list.push(item); } list } } impl<'l, T> IntoIterator for &'l AppendList<T> { type Item = &'l T; type IntoIter = Iter<'l, T>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<T: PartialEq> PartialEq for AppendList<T> { fn eq(&self, other: &AppendList<T>) -> bool { let mut s = self.iter(); let mut o = other.iter(); loop { match (s.next(), o.next()) { (Some(a), Some(b)) if a == b => {}, (None, None) => return true, _ => return false, } } } } impl<T: Debug> Debug for AppendList<T> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_list().entries(self.iter()).finish() } } pub struct Iter<'l, T> { list: &'l AppendList<T>, index: usize, } impl<'l, T> Iterator for Iter<'l, T> { type Item = &'l T; fn next(&mut self) -> Option<Self::Item> { let item = self.list.get(self.index); self.index += 1; item } fn size_hint(&self) -> (usize, Option<usize>) { let remaining = self.list.len() - self.index; (remaining, Some(remaining)) } } #[cfg(test)] mod test { use super::*; #[test] fn from_iterator() { let l: AppendList<i32> = (0..100).collect(); for i in 0..100 { assert_eq!(l[i], i as i32); } } #[test] fn iterator() { let l: AppendList<i32> = (0..100).collect(); let mut i1 = l.iter(); let mut i2 = l.into_iter(); for item in 0..100 { assert_eq!(i1.next(), Some(&item)); assert_eq!(i2.next(), Some(&item)); } assert_eq!(i1.next(), None); assert_eq!(i2.next(), None); } #[test] fn equality() { let a = AppendList::new(); let b = AppendList::new(); assert_eq!(a, b); a.push("foo"); assert_ne!(a, b); b.push("foo"); assert_eq!(a, b); a.push("bar"); a.push("baz"); assert_ne!(a, b); } #[test] fn iterator_size_hint() { let l: AppendList<i32> = AppendList::new(); let mut i = l.iter(); assert_eq!(i.size_hint(), (0, Some(0))); l.push(1); assert_eq!(i.size_hint(), (1, Some(1))); l.push(2); assert_eq!(i.size_hint(), (2, Some(2))); i.next(); assert_eq!(i.size_hint(), (1, Some(1))); l.push(3); assert_eq!(i.size_hint(), (2, Some(2))); i.next(); assert_eq!(i.size_hint(), (1, Some(1))); i.next(); assert_eq!(i.size_hint(), (0, Some(0))); } #[test] fn empty_list() { let n: AppendList<usize> = AppendList::new(); assert_eq!(n.len(), 0); assert_eq!(n.get(0), None); let d: AppendList<usize> = AppendList::default(); assert_eq!(d.len(), 0); assert_eq!(d.get(0), None); } #[test] fn thousand_item_list() { test_big_list(1_000); } #[test] #[ignore] fn million_item_list() { test_big_list(1_000_000); } fn test_big_list(size: usize) { let l = AppendList::new(); let mut refs = Vec::new(); for i in 0..size { assert_eq!(l.len(), i); l.push(i); refs.push(l[i]); assert_eq!(l.len(), i + 1); } for i in 0..size { assert_eq!(Some(&refs[i]), l.get(i)); } } }
len
identifier_name
appendlist.rs
use std::cell::{Cell, UnsafeCell}; use std::fmt::{self, Debug}; use std::iter::FromIterator; use std::ops::Index; use crate::common::{chunk_size, chunk_start, index_chunk}; /// A list that can be appended to while elements are borrowed /// /// This looks like a fairly bare-bones list API, except that it has a `push` /// method that works on non-`mut` lists. It is safe to hold references to /// values inside this list and push a new value onto the end. /// /// Additionally, the list has O(1) index and O(1) push (not amortized!). /// /// For example, this would be illegal with a `Vec`: /// /// ``` /// use appendlist::AppendList; /// /// let list = AppendList::new(); /// /// list.push(1); /// let first_item = &list[0]; /// list.push(2); /// let second_item = &list[1]; /// /// assert_eq!(*first_item, list[0]); /// assert_eq!(*second_item, list[1]); /// ``` /// /// # Implementation details /// /// This section is not necessary to use the API, it just describes the underlying /// allocation and indexing strategies. /// /// The list is a `Vec` of *chunks*. Each chunk is itself a `Vec<T>`. The list /// will fill up a chunk, then allocate a new chunk with its full capacity. /// Because the capacity of a given chunk never changes, the underlying `Vec<T>` /// never reallocates, so references to that chunk are never invalidated. Each /// chunk is twice the size of the previous chunk, so there will never be more /// than O(log(n)) chunks. /// /// Constant-time indexing is achieved because the chunk ID of a particular index /// can be quickly calculated: if the first chunk has size c, index i will be /// located in chunk floor(log2(i + c) - log2(c)). If c is a power of 2, this /// is equivalent to floor(log2(i + c)) - floor(log2(c)), and a very fast floor /// log2 algorithm can be derived from `usize::leading_zeros()`. pub struct AppendList<T> { chunks: UnsafeCell<Vec<Vec<T>>>, len: Cell<usize>, } impl<T> AppendList<T> { /// Wrapper to get the list of chunks immutably fn chunks(&self) -> &[Vec<T>] { unsafe { &*self.chunks.get() } } /// In test builds, check all of the unsafe invariants /// /// In release builds, no-op fn check_invariants(&self) { #[cfg(test)] { if self.len.get() > 0 { // Correct number of chunks assert_eq!(index_chunk(self.len.get() - 1), self.chunks().len() - 1); // Every chunk holds enough items for chunk_id in 0..self.chunks().len() { assert!(chunk_size(chunk_id) <= self.chunks()[chunk_id].capacity()); } // Intermediate chunks are full for chunk_id in 0..self.chunks().len() - 1 { assert_eq!(chunk_size(chunk_id), self.chunks()[chunk_id].len()); } // Last chunk is correct length assert_eq!( self.chunks().last().unwrap().len(), self.len.get() - chunk_start(self.chunks().len() - 1) ); } else { // No chunks assert_eq!(0, self.chunks().len()); } } } /// Create a new `AppendList` pub fn new() -> Self { Self { chunks: UnsafeCell::new(Vec::new()), len: Cell::new(0), } } /// Append an item to the end /// /// Note that this does not require `mut`. pub fn push(&self, item: T) { self.check_invariants(); // Unsafe code alert! // // Preserve the following invariants: // - Only the last chunk may be modified // - A chunk cannot ever be reallocated // - len must reflect the length // // Invariants are checked in the check_invariants method let mut_chunks = unsafe { &mut *self.chunks.get() }; let new_index = self.len.get(); let chunk_id = index_chunk(new_index); if chunk_id < mut_chunks.len() { // We should always be inserting into the last chunk debug_assert_eq!(chunk_id, mut_chunks.len() - 1); // Insert into the appropriate chunk let chunk = &mut mut_chunks[chunk_id]; // The chunk must not be reallocated! Save the pre-insertion capacity // so we can check it later (debug builds only) #[cfg(test)] let prev_capacity = chunk.capacity(); // Do the insertion chunk.push(item); // Check that the capacity didn't change (debug builds only) #[cfg(test)] assert_eq!(prev_capacity, chunk.capacity()); } else { // Need to allocate a new chunk // New chunk should be the immediate next chunk debug_assert_eq!(chunk_id, mut_chunks.len()); // New chunk must be big enough let mut new_chunk = Vec::with_capacity(chunk_size(chunk_id)); debug_assert!(new_chunk.capacity() >= chunk_size(chunk_id)); new_chunk.push(item); mut_chunks.push(new_chunk); } // Increment the length self.len.set(self.len.get() + 1); self.check_invariants(); } /// Get the length of the list pub fn len(&self) -> usize { self.check_invariants(); self.len.get() } /// Get an item from the list, if it is in bounds /// /// Returns `None` if the `index` is out-of-bounds. Note that you can also /// index with `[]`, which will panic on out-of-bounds. pub fn get(&self, index: usize) -> Option<&T> { self.check_invariants(); if index >= self.len() { return None; } let chunk_id = index_chunk(index); let chunk_start = chunk_start(chunk_id); return Some(&self.chunks()[chunk_id][index - chunk_start]); } /// Get an iterator over the list pub fn iter(&self) -> Iter<T> { self.check_invariants(); Iter { list: &self, index: 0, } } } impl<T> Default for AppendList<T> { fn default() -> Self { Self::new() } } impl<T> Index<usize> for AppendList<T> { type Output = T; fn index(&self, index: usize) -> &Self::Output { self.get(index) .expect("AppendList indexed beyond its length") } } impl<T> FromIterator<T> for AppendList<T> { fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self { let list = Self::new(); for item in iter { list.push(item); } list } } impl<'l, T> IntoIterator for &'l AppendList<T> { type Item = &'l T; type IntoIter = Iter<'l, T>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<T: PartialEq> PartialEq for AppendList<T> { fn eq(&self, other: &AppendList<T>) -> bool { let mut s = self.iter(); let mut o = other.iter(); loop { match (s.next(), o.next()) { (Some(a), Some(b)) if a == b => {}, (None, None) => return true, _ => return false, } } } } impl<T: Debug> Debug for AppendList<T> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_list().entries(self.iter()).finish() } } pub struct Iter<'l, T> { list: &'l AppendList<T>,
impl<'l, T> Iterator for Iter<'l, T> { type Item = &'l T; fn next(&mut self) -> Option<Self::Item> { let item = self.list.get(self.index); self.index += 1; item } fn size_hint(&self) -> (usize, Option<usize>) { let remaining = self.list.len() - self.index; (remaining, Some(remaining)) } } #[cfg(test)] mod test { use super::*; #[test] fn from_iterator() { let l: AppendList<i32> = (0..100).collect(); for i in 0..100 { assert_eq!(l[i], i as i32); } } #[test] fn iterator() { let l: AppendList<i32> = (0..100).collect(); let mut i1 = l.iter(); let mut i2 = l.into_iter(); for item in 0..100 { assert_eq!(i1.next(), Some(&item)); assert_eq!(i2.next(), Some(&item)); } assert_eq!(i1.next(), None); assert_eq!(i2.next(), None); } #[test] fn equality() { let a = AppendList::new(); let b = AppendList::new(); assert_eq!(a, b); a.push("foo"); assert_ne!(a, b); b.push("foo"); assert_eq!(a, b); a.push("bar"); a.push("baz"); assert_ne!(a, b); } #[test] fn iterator_size_hint() { let l: AppendList<i32> = AppendList::new(); let mut i = l.iter(); assert_eq!(i.size_hint(), (0, Some(0))); l.push(1); assert_eq!(i.size_hint(), (1, Some(1))); l.push(2); assert_eq!(i.size_hint(), (2, Some(2))); i.next(); assert_eq!(i.size_hint(), (1, Some(1))); l.push(3); assert_eq!(i.size_hint(), (2, Some(2))); i.next(); assert_eq!(i.size_hint(), (1, Some(1))); i.next(); assert_eq!(i.size_hint(), (0, Some(0))); } #[test] fn empty_list() { let n: AppendList<usize> = AppendList::new(); assert_eq!(n.len(), 0); assert_eq!(n.get(0), None); let d: AppendList<usize> = AppendList::default(); assert_eq!(d.len(), 0); assert_eq!(d.get(0), None); } #[test] fn thousand_item_list() { test_big_list(1_000); } #[test] #[ignore] fn million_item_list() { test_big_list(1_000_000); } fn test_big_list(size: usize) { let l = AppendList::new(); let mut refs = Vec::new(); for i in 0..size { assert_eq!(l.len(), i); l.push(i); refs.push(l[i]); assert_eq!(l.len(), i + 1); } for i in 0..size { assert_eq!(Some(&refs[i]), l.get(i)); } } }
index: usize, }
random_line_split
unified.rs
(Self::Sapling, Self::Sapling) | (Self::P2sh, Self::P2sh) | (Self::P2pkh, Self::P2pkh) => cmp::Ordering::Equal, // We don't know for certain the preference order of unknown receivers, but it // is likely that the higher typecode has higher preference. The exact order // doesn't really matter, as unknown receivers have lower preference than // known receivers. (Self::Unknown(a), Self::Unknown(b)) => b.cmp(a), // For the remaining cases, we rely on `match` always choosing the first arm // with a matching pattern. Patterns below are listed in priority order: (Self::Orchard, _) => cmp::Ordering::Less, (_, Self::Orchard) => cmp::Ordering::Greater, (Self::Sapling, _) => cmp::Ordering::Less, (_, Self::Sapling) => cmp::Ordering::Greater, (Self::P2sh, _) => cmp::Ordering::Less, (_, Self::P2sh) => cmp::Ordering::Greater, (Self::P2pkh, _) => cmp::Ordering::Less, (_, Self::P2pkh) => cmp::Ordering::Greater, } } } impl PartialOrd for Typecode { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { Some(self.cmp(other)) } } impl TryFrom<u32> for Typecode { type Error = ParseError; fn try_from(typecode: u32) -> Result<Self, Self::Error> { match typecode { 0x00 => Ok(Typecode::P2pkh), 0x01 => Ok(Typecode::P2sh), 0x02 => Ok(Typecode::Sapling), 0x03 => Ok(Typecode::Orchard), 0x04..=0x02000000 => Ok(Typecode::Unknown(typecode)), 0x02000001..=u32::MAX => Err(ParseError::InvalidTypecodeValue(typecode as u64)), } } } impl From<Typecode> for u32 { fn from(t: Typecode) -> Self { match t { Typecode::P2pkh => 0x00, Typecode::P2sh => 0x01, Typecode::Sapling => 0x02, Typecode::Orchard => 0x03, Typecode::Unknown(typecode) => typecode, } } } impl Typecode { fn is_transparent(&self) -> bool { // Unknown typecodes are treated as not transparent for the purpose of disallowing // only-transparent UAs, which can be represented with existing address encodings. matches!(self, Typecode::P2pkh | Typecode::P2sh) } } /// An error while attempting to parse a string as a Zcash address. #[derive(Debug, PartialEq)] pub enum ParseError { /// The unified address contains both P2PKH and P2SH receivers. BothP2phkAndP2sh, /// The unified address contains a duplicated typecode. DuplicateTypecode(Typecode), /// The parsed typecode exceeds the maximum allowed CompactSize value. InvalidTypecodeValue(u64), /// The string is an invalid encoding. InvalidEncoding(String), /// The unified address only contains transparent receivers. OnlyTransparent, } impl fmt::Display for ParseError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { ParseError::BothP2phkAndP2sh => write!(f, "UA contains both P2PKH and P2SH receivers"), ParseError::DuplicateTypecode(c) => write!(f, "Duplicate typecode {}", u32::from(*c)), ParseError::InvalidTypecodeValue(v) => write!(f, "Typecode value out of range {}", v), ParseError::InvalidEncoding(msg) => write!(f, "Invalid encoding: {}", msg), ParseError::OnlyTransparent => write!(f, "UA only contains transparent receivers"), } } } impl Error for ParseError {} /// The set of known Receivers for Unified Addresses. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum Receiver { Orchard([u8; 43]), Sapling(kind::sapling::Data), P2pkh(kind::p2pkh::Data), P2sh(kind::p2sh::Data), Unknown { typecode: u32, data: Vec<u8> }, } impl cmp::Ord for Receiver { fn cmp(&self, other: &Self) -> cmp::Ordering { match self.typecode().cmp(&other.typecode()) { cmp::Ordering::Equal => self.addr().cmp(other.addr()), res => res, } } } impl cmp::PartialOrd for Receiver { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { Some(self.cmp(other)) } } impl TryFrom<(u32, &[u8])> for Receiver { type Error = ParseError; fn try_from((typecode, addr): (u32, &[u8])) -> Result<Self, Self::Error> { match typecode.try_into()? { Typecode::P2pkh => addr.try_into().map(Receiver::P2pkh), Typecode::P2sh => addr.try_into().map(Receiver::P2sh), Typecode::Sapling => addr.try_into().map(Receiver::Sapling), Typecode::Orchard => addr.try_into().map(Receiver::Orchard), Typecode::Unknown(_) => Ok(Receiver::Unknown { typecode, data: addr.to_vec(), }), } .map_err(|e| { ParseError::InvalidEncoding(format!("Invalid address for typecode {}: {}", typecode, e)) }) } } impl Receiver { fn typecode(&self) -> Typecode { match self { Receiver::P2pkh(_) => Typecode::P2pkh, Receiver::P2sh(_) => Typecode::P2sh, Receiver::Sapling(_) => Typecode::Sapling, Receiver::Orchard(_) => Typecode::Orchard, Receiver::Unknown { typecode,.. } => Typecode::Unknown(*typecode), } } fn addr(&self) -> &[u8] { match self { Receiver::P2pkh(data) => data, Receiver::P2sh(data) => data, Receiver::Sapling(data) => data, Receiver::Orchard(data) => data, Receiver::Unknown { data,.. } => data, } } } /// A Unified Address. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Address(pub(crate) Vec<Receiver>); impl TryFrom<(&str, &[u8])> for Address { type Error = ParseError; fn try_from((hrp, buf): (&str, &[u8])) -> Result<Self, Self::Error> { fn read_receiver(mut cursor: &mut std::io::Cursor<&[u8]>) -> Result<Receiver, ParseError> { let typecode = CompactSize::read(&mut cursor) .map(|v| u32::try_from(v).expect("CompactSize::read enforces MAX_SIZE limit")) .map_err(|e| { ParseError::InvalidEncoding(format!( "Failed to deserialize CompactSize-encoded typecode {}", e )) })?; let length = CompactSize::read(&mut cursor).map_err(|e| { ParseError::InvalidEncoding(format!( "Failed to deserialize CompactSize-encoded length {}", e )) })?; let addr_end = cursor.position().checked_add(length).ok_or_else(|| { ParseError::InvalidEncoding(format!( "Length value {} caused an overflow error", length )) })?; let buf = cursor.get_ref(); if (buf.len() as u64) < addr_end { return Err(ParseError::InvalidEncoding(format!( "Truncated: unable to read {} bytes of address data", length ))); } let result = Receiver::try_from(( typecode, &buf[cursor.position() as usize..addr_end as usize], )); cursor.set_position(addr_end); result } let encoded = f4jumble::f4jumble_inv(buf) .ok_or_else(|| ParseError::InvalidEncoding("F4Jumble decoding failed".to_owned()))?; // Validate and strip trailing padding bytes. if hrp.len() > 16 { return Err(ParseError::InvalidEncoding( "Invalid human-readable part".to_owned(), )); } let mut expected_padding = [0; PADDING_LEN]; expected_padding[0..hrp.len()].copy_from_slice(hrp.as_bytes()); let encoded = match encoded.split_at(encoded.len() - PADDING_LEN) { (encoded, tail) if tail == expected_padding => Ok(encoded), _ => Err(ParseError::InvalidEncoding( "Invalid padding bytes".to_owned(), )), }?; let mut cursor = std::io::Cursor::new(encoded); let mut result = vec![]; while cursor.position() < encoded.len().try_into().unwrap() { result.push(read_receiver(&mut cursor)?); } assert_eq!(cursor.position(), encoded.len().try_into().unwrap()); result.try_into() } } impl TryFrom<Vec<Receiver>> for Address { type Error = ParseError; fn try_from(receivers: Vec<Receiver>) -> Result<Self, Self::Error> { let mut typecodes = HashSet::with_capacity(receivers.len()); for receiver in &receivers { let t = receiver.typecode(); if typecodes.contains(&t) { return Err(ParseError::DuplicateTypecode(t)); } else if (t == Typecode::P2pkh && typecodes.contains(&Typecode::P2sh)) || (t == Typecode::P2sh && typecodes.contains(&Typecode::P2pkh))
else { typecodes.insert(t); } } if typecodes.iter().all(|t| t.is_transparent()) { Err(ParseError::OnlyTransparent) } else { // All checks pass! Ok(Address(receivers)) } } } impl Address { /// Returns the raw encoding of this Unified Address. pub(crate) fn to_bytes(&self, hrp: &str) -> Vec<u8> { assert!(hrp.len() <= PADDING_LEN); let mut writer = std::io::Cursor::new(Vec::new()); for receiver in &self.0 { let addr = receiver.addr(); CompactSize::write( &mut writer, <u32>::from(receiver.typecode()).try_into().unwrap(), ) .unwrap(); CompactSize::write(&mut writer, addr.len()).unwrap(); writer.write_all(addr).unwrap(); } let mut padding = [0u8; PADDING_LEN]; padding[0..hrp.len()].copy_from_slice(&hrp.as_bytes()); writer.write_all(&padding).unwrap(); f4jumble::f4jumble(&writer.into_inner()).unwrap() } /// Returns the receivers contained within this address, sorted in preference order. pub fn receivers(&self) -> Vec<Receiver> { let mut receivers = self.0.clone(); // Unstable sorting is fine, because all receivers are guaranteed by construction // to have distinct typecodes. receivers.sort_unstable_by_key(|r| r.typecode()); receivers } /// Returns the receivers contained within this address, in the order they were /// parsed from the string encoding. /// /// This API is for advanced usage; in most cases you should use `Address::receivers`. pub fn receivers_as_parsed(&self) -> &[Receiver] { &self.0 } } #[cfg(test)] pub(crate) mod test_vectors; #[cfg(test)] mod tests { use assert_matches::assert_matches; use std::convert::TryFrom; use proptest::{ array::{uniform11, uniform20, uniform32}, prelude::*, }; use super::{Address, ParseError, Receiver, Typecode, MAINNET, REGTEST, TESTNET}; prop_compose! { fn uniform43()(a in uniform11(0u8..), b in uniform32(0u8..)) -> [u8; 43] { let mut c = [0; 43]; c[..11].copy_from_slice(&a); c[11..].copy_from_slice(&b); c } } fn arb_shielded_receiver() -> BoxedStrategy<Receiver> { prop_oneof![ uniform43().prop_map(Receiver::Sapling), uniform43().prop_map(Receiver::Orchard), ] .boxed() } fn arb_transparent_receiver() -> BoxedStrategy<Receiver> { prop_oneof![ uniform20(0u8..).prop_map(Receiver::P2pkh), uniform20(0u8..).prop_map(Receiver::P2sh), ] .boxed() } prop_compose! { fn arb_unified_address()( shielded in prop::collection::hash_set(arb_shielded_receiver(), 1..2), transparent in prop::option::of(arb_transparent_receiver()), ) -> Address { Address(shielded.into_iter().chain(transparent).collect()) } } proptest! { #[test] fn ua_roundtrip( hrp in prop_oneof![MAINNET, TESTNET, REGTEST], ua in arb_unified_address(), ) { let bytes = ua.to_bytes(&hrp); let decoded = Address::try_from((hrp.as_str(), &bytes[..])); prop_assert_eq!(decoded, Ok(ua)); } } #[test] fn padding() { // The test cases below use `Address(vec![Receiver::Orchard([1; 43])])` as base. // Invalid padding ([0xff; 16] instead of [b'u', 0x00, 0x00, 0x00...]) let invalid_padding = [ 0xe6, 0x59, 0xd1, 0xed, 0xf7, 0x4b, 0xe3, 0x5e, 0x5a, 0x54, 0x0e, 0x41, 0x5d, 0x2f, 0x0c, 0x0d, 0x33, 0x42, 0xbd, 0xbe, 0x9f, 0x82, 0x62, 0x01, 0xc1, 0x1b, 0xd4, 0x1e, 0x42, 0x47, 0x86, 0x23, 0x05, 0x4b, 0x98, 0xd7, 0x76, 0x86, 0xa5, 0xe3, 0x1b, 0xd3, 0x03, 0xca, 0x24, 0x44, 0x8e, 0x72, 0xc1, 0x4a, 0xc6, 0xbf, 0x3f, 0x2b, 0xce, 0xa7, 0x7b, 0x28, 0x69, 0xc9, 0x84, ]; assert_eq!( Address::try_from((MAINNET, &invalid_padding[..])), Err(ParseError::InvalidEncoding( "Invalid padding bytes".to_owned() )) ); // Short padding (padded to 15 bytes instead of 16) let truncated_padding = [ 0x9a, 0x56, 0x12, 0xa3, 0x43, 0x45, 0xe0, 0x82, 0x6c, 0xac, 0x24, 0x8b, 0x3b, 0x45, 0x72, 0x9a, 0x53, 0xd5, 0xf8, 0xda, 0xec, 0x07, 0x7c, 0xba, 0x9f, 0xa8, 0xd2, 0x97, 0x5b, 0xda, 0x73, 0x1b, 0xd2, 0xd1, 0x32, 0x6b, 0x7b, 0x36, 0xdd, 0x57, 0x84, 0x2a, 0xa0, 0x21, 0x23, 0x89, 0x73, 0x85, 0xe1, 0x4b, 0x3e, 0x95, 0xb7, 0xd4, 0x67, 0xbc, 0x4b, 0x31, 0xee, 0x5a, ]; assert_eq!( Address::try_from((MAINNET, &truncated_padding[..])), Err(ParseError::InvalidEncoding( "Invalid padding bytes".to_owned() )) ); } #[test] fn truncated() { // The test cases below start from an encoding of // `Address(vec![Receiver::Orchard([1; 43]), Receiver::Sapling([2; 43])])` // with the receiver data truncated, but valid padding. // - Missing the last data byte of the Sapling receiver. let truncated_sapling_data = [ 0xaa, 0xb0, 0x6e, 0x7b, 0x26, 0x7a, 0x22, 0x17, 0x39, 0xfa, 0x07, 0x69, 0xe9, 0x32, 0x2b, 0xac, 0x8c, 0x9e, 0x5e, 0x8a, 0xd9, 0x24, 0x06, 0x5a, 0x13, 0x79, 0x3a, 0x8d, 0xb4, 0x52, 0xfa, 0x18, 0x4e, 0x33, 0x4d, 0x8c, 0x17, 0x77, 0x4d, 0x63, 0x69, 0x34, 0x22, 0x70, 0x3a, 0xea, 0x30, 0x82, 0x5a, 0x6b, 0x37, 0xd1, 0x0d, 0xbe, 0x20, 0xab, 0x82, 0x86, 0x98, 0x34, 0x6a, 0xd8, 0x45, 0x40, 0xd0, 0x25, 0x60, 0xbf, 0x1e, 0xb6, 0xeb, 0x06, 0x85, 0x70, 0x4c, 0x42, 0xbc, 0x19, 0x14, 0xef, 0x7a, 0x05, 0xa0, 0x71, 0xb2, 0x63, 0x80, 0xbb, 0xdc, 0x12, 0x08, 0x48, 0x28, 0x8f, 0x1c, 0x9e, 0xc3, 0x42, 0xc6, 0x5e, 0x68, 0xa2, 0x78, 0x6c, 0x9e, ]; assert_matches!( Address::try_from((MAINNET, &truncated_sapling_data[..])), Err(ParseError::InvalidEncoding(_)) ); // - Truncated after the typecode of the Sapling receiver. let truncated_after_sapling_typecode = [ 0x87, 0x7a, 0xdf, 0x79, 0x6b, 0xe3, 0xb3, 0x40, 0xef, 0xe4, 0x5d, 0xc2, 0x91, 0xa2, 0x81, 0xfc, 0x7d, 0x76, 0xbb, 0xb0, 0x58, 0x98, 0x53, 0x59, 0xd3, 0x3f, 0xbc, 0x4b, 0x86, 0x59, 0x66, 0x62, 0x75, 0x92, 0xba, 0xcc, 0x31, 0x1e, 0x60, 0x02, 0x3b, 0xd8, 0x4c, 0xdf, 0x36, 0xa1, 0xac, 0x82, 0x57, 0xed, 0x0c, 0x98, 0x49, 0x8f, 0x49, 0x7e, 0xe6, 0x70, 0x36, 0x5b, 0x7b, 0x9e, ]; assert_matches!( Address::try_from((MAINNET, &truncated_after_sapling_typecode[..])), Err(ParseError::InvalidEncoding(_)) ); } #[test] fn duplicate_typecode() { // Construct and serialize an invalid UA. let ua = Address(vec![Receiver::Sapling([1; 43]), Receiver::Sapling([2; 43])]); let encoded = ua.to_bytes(MAINNET); assert_eq!( Address::try_from((MAINNET, &encoded[..])), Err(ParseError::DuplicateTypecode(Typecode::Sapling)) ); } #[test] fn p2pkh_and_p2sh() { // Construct and serialize an invalid UA. let ua = Address(vec![Receiver::P2pkh([0; 20]), Receiver::P2sh([0; 20])]); let encoded = ua.to_bytes(MAINNET);
{ return Err(ParseError::BothP2phkAndP2sh); }
conditional_block
unified.rs
(Self::Sapling, Self::Sapling) | (Self::P2sh, Self::P2sh) | (Self::P2pkh, Self::P2pkh) => cmp::Ordering::Equal, // We don't know for certain the preference order of unknown receivers, but it // is likely that the higher typecode has higher preference. The exact order // doesn't really matter, as unknown receivers have lower preference than // known receivers. (Self::Unknown(a), Self::Unknown(b)) => b.cmp(a), // For the remaining cases, we rely on `match` always choosing the first arm // with a matching pattern. Patterns below are listed in priority order: (Self::Orchard, _) => cmp::Ordering::Less, (_, Self::Orchard) => cmp::Ordering::Greater, (Self::Sapling, _) => cmp::Ordering::Less, (_, Self::Sapling) => cmp::Ordering::Greater, (Self::P2sh, _) => cmp::Ordering::Less, (_, Self::P2sh) => cmp::Ordering::Greater, (Self::P2pkh, _) => cmp::Ordering::Less, (_, Self::P2pkh) => cmp::Ordering::Greater, } } } impl PartialOrd for Typecode { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { Some(self.cmp(other)) } } impl TryFrom<u32> for Typecode { type Error = ParseError; fn try_from(typecode: u32) -> Result<Self, Self::Error> { match typecode { 0x00 => Ok(Typecode::P2pkh), 0x01 => Ok(Typecode::P2sh), 0x02 => Ok(Typecode::Sapling), 0x03 => Ok(Typecode::Orchard), 0x04..=0x02000000 => Ok(Typecode::Unknown(typecode)), 0x02000001..=u32::MAX => Err(ParseError::InvalidTypecodeValue(typecode as u64)), } } } impl From<Typecode> for u32 { fn from(t: Typecode) -> Self { match t { Typecode::P2pkh => 0x00, Typecode::P2sh => 0x01, Typecode::Sapling => 0x02, Typecode::Orchard => 0x03, Typecode::Unknown(typecode) => typecode, } } } impl Typecode { fn is_transparent(&self) -> bool { // Unknown typecodes are treated as not transparent for the purpose of disallowing // only-transparent UAs, which can be represented with existing address encodings. matches!(self, Typecode::P2pkh | Typecode::P2sh) } } /// An error while attempting to parse a string as a Zcash address. #[derive(Debug, PartialEq)] pub enum
{ /// The unified address contains both P2PKH and P2SH receivers. BothP2phkAndP2sh, /// The unified address contains a duplicated typecode. DuplicateTypecode(Typecode), /// The parsed typecode exceeds the maximum allowed CompactSize value. InvalidTypecodeValue(u64), /// The string is an invalid encoding. InvalidEncoding(String), /// The unified address only contains transparent receivers. OnlyTransparent, } impl fmt::Display for ParseError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { ParseError::BothP2phkAndP2sh => write!(f, "UA contains both P2PKH and P2SH receivers"), ParseError::DuplicateTypecode(c) => write!(f, "Duplicate typecode {}", u32::from(*c)), ParseError::InvalidTypecodeValue(v) => write!(f, "Typecode value out of range {}", v), ParseError::InvalidEncoding(msg) => write!(f, "Invalid encoding: {}", msg), ParseError::OnlyTransparent => write!(f, "UA only contains transparent receivers"), } } } impl Error for ParseError {} /// The set of known Receivers for Unified Addresses. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum Receiver { Orchard([u8; 43]), Sapling(kind::sapling::Data), P2pkh(kind::p2pkh::Data), P2sh(kind::p2sh::Data), Unknown { typecode: u32, data: Vec<u8> }, } impl cmp::Ord for Receiver { fn cmp(&self, other: &Self) -> cmp::Ordering { match self.typecode().cmp(&other.typecode()) { cmp::Ordering::Equal => self.addr().cmp(other.addr()), res => res, } } } impl cmp::PartialOrd for Receiver { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { Some(self.cmp(other)) } } impl TryFrom<(u32, &[u8])> for Receiver { type Error = ParseError; fn try_from((typecode, addr): (u32, &[u8])) -> Result<Self, Self::Error> { match typecode.try_into()? { Typecode::P2pkh => addr.try_into().map(Receiver::P2pkh), Typecode::P2sh => addr.try_into().map(Receiver::P2sh), Typecode::Sapling => addr.try_into().map(Receiver::Sapling), Typecode::Orchard => addr.try_into().map(Receiver::Orchard), Typecode::Unknown(_) => Ok(Receiver::Unknown { typecode, data: addr.to_vec(), }), } .map_err(|e| { ParseError::InvalidEncoding(format!("Invalid address for typecode {}: {}", typecode, e)) }) } } impl Receiver { fn typecode(&self) -> Typecode { match self { Receiver::P2pkh(_) => Typecode::P2pkh, Receiver::P2sh(_) => Typecode::P2sh, Receiver::Sapling(_) => Typecode::Sapling, Receiver::Orchard(_) => Typecode::Orchard, Receiver::Unknown { typecode,.. } => Typecode::Unknown(*typecode), } } fn addr(&self) -> &[u8] { match self { Receiver::P2pkh(data) => data, Receiver::P2sh(data) => data, Receiver::Sapling(data) => data, Receiver::Orchard(data) => data, Receiver::Unknown { data,.. } => data, } } } /// A Unified Address. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Address(pub(crate) Vec<Receiver>); impl TryFrom<(&str, &[u8])> for Address { type Error = ParseError; fn try_from((hrp, buf): (&str, &[u8])) -> Result<Self, Self::Error> { fn read_receiver(mut cursor: &mut std::io::Cursor<&[u8]>) -> Result<Receiver, ParseError> { let typecode = CompactSize::read(&mut cursor) .map(|v| u32::try_from(v).expect("CompactSize::read enforces MAX_SIZE limit")) .map_err(|e| { ParseError::InvalidEncoding(format!( "Failed to deserialize CompactSize-encoded typecode {}", e )) })?; let length = CompactSize::read(&mut cursor).map_err(|e| { ParseError::InvalidEncoding(format!( "Failed to deserialize CompactSize-encoded length {}", e )) })?; let addr_end = cursor.position().checked_add(length).ok_or_else(|| { ParseError::InvalidEncoding(format!( "Length value {} caused an overflow error", length )) })?; let buf = cursor.get_ref(); if (buf.len() as u64) < addr_end { return Err(ParseError::InvalidEncoding(format!( "Truncated: unable to read {} bytes of address data", length ))); } let result = Receiver::try_from(( typecode, &buf[cursor.position() as usize..addr_end as usize], )); cursor.set_position(addr_end); result } let encoded = f4jumble::f4jumble_inv(buf) .ok_or_else(|| ParseError::InvalidEncoding("F4Jumble decoding failed".to_owned()))?; // Validate and strip trailing padding bytes. if hrp.len() > 16 { return Err(ParseError::InvalidEncoding( "Invalid human-readable part".to_owned(), )); } let mut expected_padding = [0; PADDING_LEN]; expected_padding[0..hrp.len()].copy_from_slice(hrp.as_bytes()); let encoded = match encoded.split_at(encoded.len() - PADDING_LEN) { (encoded, tail) if tail == expected_padding => Ok(encoded), _ => Err(ParseError::InvalidEncoding( "Invalid padding bytes".to_owned(), )), }?; let mut cursor = std::io::Cursor::new(encoded); let mut result = vec![]; while cursor.position() < encoded.len().try_into().unwrap() { result.push(read_receiver(&mut cursor)?); } assert_eq!(cursor.position(), encoded.len().try_into().unwrap()); result.try_into() } } impl TryFrom<Vec<Receiver>> for Address { type Error = ParseError; fn try_from(receivers: Vec<Receiver>) -> Result<Self, Self::Error> { let mut typecodes = HashSet::with_capacity(receivers.len()); for receiver in &receivers { let t = receiver.typecode(); if typecodes.contains(&t) { return Err(ParseError::DuplicateTypecode(t)); } else if (t == Typecode::P2pkh && typecodes.contains(&Typecode::P2sh)) || (t == Typecode::P2sh && typecodes.contains(&Typecode::P2pkh)) { return Err(ParseError::BothP2phkAndP2sh); } else { typecodes.insert(t); } } if typecodes.iter().all(|t| t.is_transparent()) { Err(ParseError::OnlyTransparent) } else { // All checks pass! Ok(Address(receivers)) } } } impl Address { /// Returns the raw encoding of this Unified Address. pub(crate) fn to_bytes(&self, hrp: &str) -> Vec<u8> { assert!(hrp.len() <= PADDING_LEN); let mut writer = std::io::Cursor::new(Vec::new()); for receiver in &self.0 { let addr = receiver.addr(); CompactSize::write( &mut writer, <u32>::from(receiver.typecode()).try_into().unwrap(), ) .unwrap(); CompactSize::write(&mut writer, addr.len()).unwrap(); writer.write_all(addr).unwrap(); } let mut padding = [0u8; PADDING_LEN]; padding[0..hrp.len()].copy_from_slice(&hrp.as_bytes()); writer.write_all(&padding).unwrap(); f4jumble::f4jumble(&writer.into_inner()).unwrap() } /// Returns the receivers contained within this address, sorted in preference order. pub fn receivers(&self) -> Vec<Receiver> { let mut receivers = self.0.clone(); // Unstable sorting is fine, because all receivers are guaranteed by construction // to have distinct typecodes. receivers.sort_unstable_by_key(|r| r.typecode()); receivers } /// Returns the receivers contained within this address, in the order they were /// parsed from the string encoding. /// /// This API is for advanced usage; in most cases you should use `Address::receivers`. pub fn receivers_as_parsed(&self) -> &[Receiver] { &self.0 } } #[cfg(test)] pub(crate) mod test_vectors; #[cfg(test)] mod tests { use assert_matches::assert_matches; use std::convert::TryFrom; use proptest::{ array::{uniform11, uniform20, uniform32}, prelude::*, }; use super::{Address, ParseError, Receiver, Typecode, MAINNET, REGTEST, TESTNET}; prop_compose! { fn uniform43()(a in uniform11(0u8..), b in uniform32(0u8..)) -> [u8; 43] { let mut c = [0; 43]; c[..11].copy_from_slice(&a); c[11..].copy_from_slice(&b); c } } fn arb_shielded_receiver() -> BoxedStrategy<Receiver> { prop_oneof![ uniform43().prop_map(Receiver::Sapling), uniform43().prop_map(Receiver::Orchard), ] .boxed() } fn arb_transparent_receiver() -> BoxedStrategy<Receiver> { prop_oneof![ uniform20(0u8..).prop_map(Receiver::P2pkh), uniform20(0u8..).prop_map(Receiver::P2sh), ] .boxed() } prop_compose! { fn arb_unified_address()( shielded in prop::collection::hash_set(arb_shielded_receiver(), 1..2), transparent in prop::option::of(arb_transparent_receiver()), ) -> Address { Address(shielded.into_iter().chain(transparent).collect()) } } proptest! { #[test] fn ua_roundtrip( hrp in prop_oneof![MAINNET, TESTNET, REGTEST], ua in arb_unified_address(), ) { let bytes = ua.to_bytes(&hrp); let decoded = Address::try_from((hrp.as_str(), &bytes[..])); prop_assert_eq!(decoded, Ok(ua)); } } #[test] fn padding() { // The test cases below use `Address(vec![Receiver::Orchard([1; 43])])` as base. // Invalid padding ([0xff; 16] instead of [b'u', 0x00, 0x00, 0x00...]) let invalid_padding = [ 0xe6, 0x59, 0xd1, 0xed, 0xf7, 0x4b, 0xe3, 0x5e, 0x5a, 0x54, 0x0e, 0x41, 0x5d, 0x2f, 0x0c, 0x0d, 0x33, 0x42, 0xbd, 0xbe, 0x9f, 0x82, 0x62, 0x01, 0xc1, 0x1b, 0xd4, 0x1e, 0x42, 0x47, 0x86, 0x23, 0x05, 0x4b, 0x98, 0xd7, 0x76, 0x86, 0xa5, 0xe3, 0x1b, 0xd3, 0x03, 0xca, 0x24, 0x44, 0x8e, 0x72, 0xc1, 0x4a, 0xc6, 0xbf, 0x3f, 0x2b, 0xce, 0xa7, 0x7b, 0x28, 0x69, 0xc9, 0x84, ]; assert_eq!( Address::try_from((MAINNET, &invalid_padding[..])), Err(ParseError::InvalidEncoding( "Invalid padding bytes".to_owned() )) ); // Short padding (padded to 15 bytes instead of 16) let truncated_padding = [ 0x9a, 0x56, 0x12, 0xa3, 0x43, 0x45, 0xe0, 0x82, 0x6c, 0xac, 0x24, 0x8b, 0x3b, 0x45, 0x72, 0x9a, 0x53, 0xd5, 0xf8, 0xda, 0xec, 0x07, 0x7c, 0xba, 0x9f, 0xa8, 0xd2, 0x97, 0x5b, 0xda, 0x73, 0x1b, 0xd2, 0xd1, 0x32, 0x6b, 0x7b, 0x36, 0xdd, 0x57, 0x84, 0x2a, 0xa0, 0x21, 0x23, 0x89, 0x73, 0x85, 0xe1, 0x4b, 0x3e, 0x95, 0xb7, 0xd4, 0x67, 0xbc, 0x4b, 0x31, 0xee, 0x5a, ]; assert_eq!( Address::try_from((MAINNET, &truncated_padding[..])), Err(ParseError::InvalidEncoding( "Invalid padding bytes".to_owned() )) ); } #[test] fn truncated() { // The test cases below start from an encoding of // `Address(vec![Receiver::Orchard([1; 43]), Receiver::Sapling([2; 43])])` // with the receiver data truncated, but valid padding. // - Missing the last data byte of the Sapling receiver. let truncated_sapling_data = [ 0xaa, 0xb0, 0x6e, 0x7b, 0x26, 0x7a, 0x22, 0x17, 0x39, 0xfa, 0x07, 0x69, 0xe9, 0x32, 0x2b, 0xac, 0x8c, 0x9e, 0x5e, 0x8a, 0xd9, 0x24, 0x06, 0x5a, 0x13, 0x79, 0x3a, 0x8d, 0xb4, 0x52, 0xfa, 0x18, 0x4e, 0x33, 0x4d, 0x8c, 0x17, 0x77, 0x4d, 0x63, 0x69, 0x34, 0x22, 0x70, 0x3a, 0xea, 0x30, 0x82, 0x5a, 0x6b, 0x37, 0xd1, 0x0d, 0xbe, 0x20, 0xab, 0x82, 0x86, 0x98, 0x34, 0x6a, 0xd8, 0x45, 0x40, 0xd0, 0x25, 0x60, 0xbf, 0x1e, 0xb6, 0xeb, 0x06, 0x85, 0x70, 0x4c, 0x42, 0xbc, 0x19, 0x14, 0xef, 0x7a, 0x05, 0xa0, 0x71, 0xb2, 0x63, 0x80, 0xbb, 0xdc, 0x12, 0x08, 0x48, 0x28, 0x8f, 0x1c, 0x9e, 0xc3, 0x42, 0xc6, 0x5e, 0x68, 0xa2, 0x78, 0x6c, 0x9e, ]; assert_matches!( Address::try_from((MAINNET, &truncated_sapling_data[..])), Err(ParseError::InvalidEncoding(_)) ); // - Truncated after the typecode of the Sapling receiver. let truncated_after_sapling_typecode = [ 0x87, 0x7a, 0xdf, 0x79, 0x6b, 0xe3, 0xb3, 0x40, 0xef, 0xe4, 0x5d, 0xc2, 0x91, 0xa2, 0x81, 0xfc, 0x7d, 0x76, 0xbb, 0xb0, 0x58, 0x98, 0x53, 0x59, 0xd3, 0x3f, 0xbc, 0x4b, 0x86, 0x59, 0x66, 0x62, 0x75, 0x92, 0xba, 0xcc, 0x31, 0x1e, 0x60, 0x02, 0x3b, 0xd8, 0x4c, 0xdf, 0x36, 0xa1, 0xac, 0x82, 0x57, 0xed, 0x0c, 0x98, 0x49, 0x8f, 0x49, 0x7e, 0xe6, 0x70, 0x36, 0x5b, 0x7b, 0x9e, ]; assert_matches!( Address::try_from((MAINNET, &truncated_after_sapling_typecode[..])), Err(ParseError::InvalidEncoding(_)) ); } #[test] fn duplicate_typecode() { // Construct and serialize an invalid UA. let ua = Address(vec![Receiver::Sapling([1; 43]), Receiver::Sapling([2; 43])]); let encoded = ua.to_bytes(MAINNET); assert_eq!( Address::try_from((MAINNET, &encoded[..])), Err(ParseError::DuplicateTypecode(Typecode::Sapling)) ); } #[test] fn p2pkh_and_p2sh() { // Construct and serialize an invalid UA. let ua = Address(vec![Receiver::P2pkh([0; 20]), Receiver::P2sh([0; 20])]); let encoded = ua.to_bytes(MAINNET);
ParseError
identifier_name
unified.rs
| (Self::Sapling, Self::Sapling) | (Self::P2sh, Self::P2sh) | (Self::P2pkh, Self::P2pkh) => cmp::Ordering::Equal, // We don't know for certain the preference order of unknown receivers, but it // is likely that the higher typecode has higher preference. The exact order // doesn't really matter, as unknown receivers have lower preference than // known receivers. (Self::Unknown(a), Self::Unknown(b)) => b.cmp(a), // For the remaining cases, we rely on `match` always choosing the first arm // with a matching pattern. Patterns below are listed in priority order: (Self::Orchard, _) => cmp::Ordering::Less, (_, Self::Orchard) => cmp::Ordering::Greater, (Self::Sapling, _) => cmp::Ordering::Less, (_, Self::Sapling) => cmp::Ordering::Greater, (Self::P2sh, _) => cmp::Ordering::Less, (_, Self::P2sh) => cmp::Ordering::Greater, (Self::P2pkh, _) => cmp::Ordering::Less, (_, Self::P2pkh) => cmp::Ordering::Greater, } } } impl PartialOrd for Typecode { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { Some(self.cmp(other)) } } impl TryFrom<u32> for Typecode { type Error = ParseError; fn try_from(typecode: u32) -> Result<Self, Self::Error> { match typecode { 0x00 => Ok(Typecode::P2pkh), 0x01 => Ok(Typecode::P2sh), 0x02 => Ok(Typecode::Sapling), 0x03 => Ok(Typecode::Orchard), 0x04..=0x02000000 => Ok(Typecode::Unknown(typecode)), 0x02000001..=u32::MAX => Err(ParseError::InvalidTypecodeValue(typecode as u64)), } } } impl From<Typecode> for u32 { fn from(t: Typecode) -> Self { match t { Typecode::P2pkh => 0x00, Typecode::P2sh => 0x01, Typecode::Sapling => 0x02, Typecode::Orchard => 0x03, Typecode::Unknown(typecode) => typecode, } } }
// Unknown typecodes are treated as not transparent for the purpose of disallowing // only-transparent UAs, which can be represented with existing address encodings. matches!(self, Typecode::P2pkh | Typecode::P2sh) } } /// An error while attempting to parse a string as a Zcash address. #[derive(Debug, PartialEq)] pub enum ParseError { /// The unified address contains both P2PKH and P2SH receivers. BothP2phkAndP2sh, /// The unified address contains a duplicated typecode. DuplicateTypecode(Typecode), /// The parsed typecode exceeds the maximum allowed CompactSize value. InvalidTypecodeValue(u64), /// The string is an invalid encoding. InvalidEncoding(String), /// The unified address only contains transparent receivers. OnlyTransparent, } impl fmt::Display for ParseError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { ParseError::BothP2phkAndP2sh => write!(f, "UA contains both P2PKH and P2SH receivers"), ParseError::DuplicateTypecode(c) => write!(f, "Duplicate typecode {}", u32::from(*c)), ParseError::InvalidTypecodeValue(v) => write!(f, "Typecode value out of range {}", v), ParseError::InvalidEncoding(msg) => write!(f, "Invalid encoding: {}", msg), ParseError::OnlyTransparent => write!(f, "UA only contains transparent receivers"), } } } impl Error for ParseError {} /// The set of known Receivers for Unified Addresses. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum Receiver { Orchard([u8; 43]), Sapling(kind::sapling::Data), P2pkh(kind::p2pkh::Data), P2sh(kind::p2sh::Data), Unknown { typecode: u32, data: Vec<u8> }, } impl cmp::Ord for Receiver { fn cmp(&self, other: &Self) -> cmp::Ordering { match self.typecode().cmp(&other.typecode()) { cmp::Ordering::Equal => self.addr().cmp(other.addr()), res => res, } } } impl cmp::PartialOrd for Receiver { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { Some(self.cmp(other)) } } impl TryFrom<(u32, &[u8])> for Receiver { type Error = ParseError; fn try_from((typecode, addr): (u32, &[u8])) -> Result<Self, Self::Error> { match typecode.try_into()? { Typecode::P2pkh => addr.try_into().map(Receiver::P2pkh), Typecode::P2sh => addr.try_into().map(Receiver::P2sh), Typecode::Sapling => addr.try_into().map(Receiver::Sapling), Typecode::Orchard => addr.try_into().map(Receiver::Orchard), Typecode::Unknown(_) => Ok(Receiver::Unknown { typecode, data: addr.to_vec(), }), } .map_err(|e| { ParseError::InvalidEncoding(format!("Invalid address for typecode {}: {}", typecode, e)) }) } } impl Receiver { fn typecode(&self) -> Typecode { match self { Receiver::P2pkh(_) => Typecode::P2pkh, Receiver::P2sh(_) => Typecode::P2sh, Receiver::Sapling(_) => Typecode::Sapling, Receiver::Orchard(_) => Typecode::Orchard, Receiver::Unknown { typecode,.. } => Typecode::Unknown(*typecode), } } fn addr(&self) -> &[u8] { match self { Receiver::P2pkh(data) => data, Receiver::P2sh(data) => data, Receiver::Sapling(data) => data, Receiver::Orchard(data) => data, Receiver::Unknown { data,.. } => data, } } } /// A Unified Address. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Address(pub(crate) Vec<Receiver>); impl TryFrom<(&str, &[u8])> for Address { type Error = ParseError; fn try_from((hrp, buf): (&str, &[u8])) -> Result<Self, Self::Error> { fn read_receiver(mut cursor: &mut std::io::Cursor<&[u8]>) -> Result<Receiver, ParseError> { let typecode = CompactSize::read(&mut cursor) .map(|v| u32::try_from(v).expect("CompactSize::read enforces MAX_SIZE limit")) .map_err(|e| { ParseError::InvalidEncoding(format!( "Failed to deserialize CompactSize-encoded typecode {}", e )) })?; let length = CompactSize::read(&mut cursor).map_err(|e| { ParseError::InvalidEncoding(format!( "Failed to deserialize CompactSize-encoded length {}", e )) })?; let addr_end = cursor.position().checked_add(length).ok_or_else(|| { ParseError::InvalidEncoding(format!( "Length value {} caused an overflow error", length )) })?; let buf = cursor.get_ref(); if (buf.len() as u64) < addr_end { return Err(ParseError::InvalidEncoding(format!( "Truncated: unable to read {} bytes of address data", length ))); } let result = Receiver::try_from(( typecode, &buf[cursor.position() as usize..addr_end as usize], )); cursor.set_position(addr_end); result } let encoded = f4jumble::f4jumble_inv(buf) .ok_or_else(|| ParseError::InvalidEncoding("F4Jumble decoding failed".to_owned()))?; // Validate and strip trailing padding bytes. if hrp.len() > 16 { return Err(ParseError::InvalidEncoding( "Invalid human-readable part".to_owned(), )); } let mut expected_padding = [0; PADDING_LEN]; expected_padding[0..hrp.len()].copy_from_slice(hrp.as_bytes()); let encoded = match encoded.split_at(encoded.len() - PADDING_LEN) { (encoded, tail) if tail == expected_padding => Ok(encoded), _ => Err(ParseError::InvalidEncoding( "Invalid padding bytes".to_owned(), )), }?; let mut cursor = std::io::Cursor::new(encoded); let mut result = vec![]; while cursor.position() < encoded.len().try_into().unwrap() { result.push(read_receiver(&mut cursor)?); } assert_eq!(cursor.position(), encoded.len().try_into().unwrap()); result.try_into() } } impl TryFrom<Vec<Receiver>> for Address { type Error = ParseError; fn try_from(receivers: Vec<Receiver>) -> Result<Self, Self::Error> { let mut typecodes = HashSet::with_capacity(receivers.len()); for receiver in &receivers { let t = receiver.typecode(); if typecodes.contains(&t) { return Err(ParseError::DuplicateTypecode(t)); } else if (t == Typecode::P2pkh && typecodes.contains(&Typecode::P2sh)) || (t == Typecode::P2sh && typecodes.contains(&Typecode::P2pkh)) { return Err(ParseError::BothP2phkAndP2sh); } else { typecodes.insert(t); } } if typecodes.iter().all(|t| t.is_transparent()) { Err(ParseError::OnlyTransparent) } else { // All checks pass! Ok(Address(receivers)) } } } impl Address { /// Returns the raw encoding of this Unified Address. pub(crate) fn to_bytes(&self, hrp: &str) -> Vec<u8> { assert!(hrp.len() <= PADDING_LEN); let mut writer = std::io::Cursor::new(Vec::new()); for receiver in &self.0 { let addr = receiver.addr(); CompactSize::write( &mut writer, <u32>::from(receiver.typecode()).try_into().unwrap(), ) .unwrap(); CompactSize::write(&mut writer, addr.len()).unwrap(); writer.write_all(addr).unwrap(); } let mut padding = [0u8; PADDING_LEN]; padding[0..hrp.len()].copy_from_slice(&hrp.as_bytes()); writer.write_all(&padding).unwrap(); f4jumble::f4jumble(&writer.into_inner()).unwrap() } /// Returns the receivers contained within this address, sorted in preference order. pub fn receivers(&self) -> Vec<Receiver> { let mut receivers = self.0.clone(); // Unstable sorting is fine, because all receivers are guaranteed by construction // to have distinct typecodes. receivers.sort_unstable_by_key(|r| r.typecode()); receivers } /// Returns the receivers contained within this address, in the order they were /// parsed from the string encoding. /// /// This API is for advanced usage; in most cases you should use `Address::receivers`. pub fn receivers_as_parsed(&self) -> &[Receiver] { &self.0 } } #[cfg(test)] pub(crate) mod test_vectors; #[cfg(test)] mod tests { use assert_matches::assert_matches; use std::convert::TryFrom; use proptest::{ array::{uniform11, uniform20, uniform32}, prelude::*, }; use super::{Address, ParseError, Receiver, Typecode, MAINNET, REGTEST, TESTNET}; prop_compose! { fn uniform43()(a in uniform11(0u8..), b in uniform32(0u8..)) -> [u8; 43] { let mut c = [0; 43]; c[..11].copy_from_slice(&a); c[11..].copy_from_slice(&b); c } } fn arb_shielded_receiver() -> BoxedStrategy<Receiver> { prop_oneof![ uniform43().prop_map(Receiver::Sapling), uniform43().prop_map(Receiver::Orchard), ] .boxed() } fn arb_transparent_receiver() -> BoxedStrategy<Receiver> { prop_oneof![ uniform20(0u8..).prop_map(Receiver::P2pkh), uniform20(0u8..).prop_map(Receiver::P2sh), ] .boxed() } prop_compose! { fn arb_unified_address()( shielded in prop::collection::hash_set(arb_shielded_receiver(), 1..2), transparent in prop::option::of(arb_transparent_receiver()), ) -> Address { Address(shielded.into_iter().chain(transparent).collect()) } } proptest! { #[test] fn ua_roundtrip( hrp in prop_oneof![MAINNET, TESTNET, REGTEST], ua in arb_unified_address(), ) { let bytes = ua.to_bytes(&hrp); let decoded = Address::try_from((hrp.as_str(), &bytes[..])); prop_assert_eq!(decoded, Ok(ua)); } } #[test] fn padding() { // The test cases below use `Address(vec![Receiver::Orchard([1; 43])])` as base. // Invalid padding ([0xff; 16] instead of [b'u', 0x00, 0x00, 0x00...]) let invalid_padding = [ 0xe6, 0x59, 0xd1, 0xed, 0xf7, 0x4b, 0xe3, 0x5e, 0x5a, 0x54, 0x0e, 0x41, 0x5d, 0x2f, 0x0c, 0x0d, 0x33, 0x42, 0xbd, 0xbe, 0x9f, 0x82, 0x62, 0x01, 0xc1, 0x1b, 0xd4, 0x1e, 0x42, 0x47, 0x86, 0x23, 0x05, 0x4b, 0x98, 0xd7, 0x76, 0x86, 0xa5, 0xe3, 0x1b, 0xd3, 0x03, 0xca, 0x24, 0x44, 0x8e, 0x72, 0xc1, 0x4a, 0xc6, 0xbf, 0x3f, 0x2b, 0xce, 0xa7, 0x7b, 0x28, 0x69, 0xc9, 0x84, ]; assert_eq!( Address::try_from((MAINNET, &invalid_padding[..])), Err(ParseError::InvalidEncoding( "Invalid padding bytes".to_owned() )) ); // Short padding (padded to 15 bytes instead of 16) let truncated_padding = [ 0x9a, 0x56, 0x12, 0xa3, 0x43, 0x45, 0xe0, 0x82, 0x6c, 0xac, 0x24, 0x8b, 0x3b, 0x45, 0x72, 0x9a, 0x53, 0xd5, 0xf8, 0xda, 0xec, 0x07, 0x7c, 0xba, 0x9f, 0xa8, 0xd2, 0x97, 0x5b, 0xda, 0x73, 0x1b, 0xd2, 0xd1, 0x32, 0x6b, 0x7b, 0x36, 0xdd, 0x57, 0x84, 0x2a, 0xa0, 0x21, 0x23, 0x89, 0x73, 0x85, 0xe1, 0x4b, 0x3e, 0x95, 0xb7, 0xd4, 0x67, 0xbc, 0x4b, 0x31, 0xee, 0x5a, ]; assert_eq!( Address::try_from((MAINNET, &truncated_padding[..])), Err(ParseError::InvalidEncoding( "Invalid padding bytes".to_owned() )) ); } #[test] fn truncated() { // The test cases below start from an encoding of // `Address(vec![Receiver::Orchard([1; 43]), Receiver::Sapling([2; 43])])` // with the receiver data truncated, but valid padding. // - Missing the last data byte of the Sapling receiver. let truncated_sapling_data = [ 0xaa, 0xb0, 0x6e, 0x7b, 0x26, 0x7a, 0x22, 0x17, 0x39, 0xfa, 0x07, 0x69, 0xe9, 0x32, 0x2b, 0xac, 0x8c, 0x9e, 0x5e, 0x8a, 0xd9, 0x24, 0x06, 0x5a, 0x13, 0x79, 0x3a, 0x8d, 0xb4, 0x52, 0xfa, 0x18, 0x4e, 0x33, 0x4d, 0x8c, 0x17, 0x77, 0x4d, 0x63, 0x69, 0x34, 0x22, 0x70, 0x3a, 0xea, 0x30, 0x82, 0x5a, 0x6b, 0x37, 0xd1, 0x0d, 0xbe, 0x20, 0xab, 0x82, 0x86, 0x98, 0x34, 0x6a, 0xd8, 0x45, 0x40, 0xd0, 0x25, 0x60, 0xbf, 0x1e, 0xb6, 0xeb, 0x06, 0x85, 0x70, 0x4c, 0x42, 0xbc, 0x19, 0x14, 0xef, 0x7a, 0x05, 0xa0, 0x71, 0xb2, 0x63, 0x80, 0xbb, 0xdc, 0x12, 0x08, 0x48, 0x28, 0x8f, 0x1c, 0x9e, 0xc3, 0x42, 0xc6, 0x5e, 0x68, 0xa2, 0x78, 0x6c, 0x9e, ]; assert_matches!( Address::try_from((MAINNET, &truncated_sapling_data[..])), Err(ParseError::InvalidEncoding(_)) ); // - Truncated after the typecode of the Sapling receiver. let truncated_after_sapling_typecode = [ 0x87, 0x7a, 0xdf, 0x79, 0x6b, 0xe3, 0xb3, 0x40, 0xef, 0xe4, 0x5d, 0xc2, 0x91, 0xa2, 0x81, 0xfc, 0x7d, 0x76, 0xbb, 0xb0, 0x58, 0x98, 0x53, 0x59, 0xd3, 0x3f, 0xbc, 0x4b, 0x86, 0x59, 0x66, 0x62, 0x75, 0x92, 0xba, 0xcc, 0x31, 0x1e, 0x60, 0x02, 0x3b, 0xd8, 0x4c, 0xdf, 0x36, 0xa1, 0xac, 0x82, 0x57, 0xed, 0x0c, 0x98, 0x49, 0x8f, 0x49, 0x7e, 0xe6, 0x70, 0x36, 0x5b, 0x7b, 0x9e, ]; assert_matches!( Address::try_from((MAINNET, &truncated_after_sapling_typecode[..])), Err(ParseError::InvalidEncoding(_)) ); } #[test] fn duplicate_typecode() { // Construct and serialize an invalid UA. let ua = Address(vec![Receiver::Sapling([1; 43]), Receiver::Sapling([2; 43])]); let encoded = ua.to_bytes(MAINNET); assert_eq!( Address::try_from((MAINNET, &encoded[..])), Err(ParseError::DuplicateTypecode(Typecode::Sapling)) ); } #[test] fn p2pkh_and_p2sh() { // Construct and serialize an invalid UA. let ua = Address(vec![Receiver::P2pkh([0; 20]), Receiver::P2sh([0; 20])]); let encoded = ua.to_bytes(MAINNET);
impl Typecode { fn is_transparent(&self) -> bool {
random_line_split
game.rs
use std::ops::Add; use super::rand::{thread_rng, Rng}; use super::direction::Direction; /// A mask with a single section of 16 bits set to 0. /// Used to extract a "horizontal slice" out of a 64 bit integer. pub static ROW_MASK: u64 = 0xFFFF; /// A `u64` mask with 4 sections each starting after the n * 16th bit. /// Used to extract a "vertical slice" out of a 64 bit integer. pub static COL_MASK: u64 = 0x000F_000F_000F_000F_u64; /// Struct that contains all available moves per row for up, down, right and left. /// Also stores the score for a given row. /// /// Moves are stored as power values for tiles. /// if a power value is `> 0`, print the tile value using `2 << tile` where tile is any 4-bit /// "nybble" otherwise print a `0` instead. struct Moves { pub left: Vec<u64>, pub right: Vec<u64>, pub down: Vec<u64>, pub up: Vec<u64>, pub scores: Vec<u64> } impl Moves { /// Returns the 4th bit from each row in given board OR'd. pub fn column_from(board: u64) -> u64 { (board | (board << 12) | (board << 24) | (board << 36)) & COL_MASK } } lazy_static! { /// Constructs a new `tfe::Moves`. /// /// `Moves` stores `right`, `left`, `up`, and `down` moves per row. /// e.g. left: `0x0011 -> 0x2000` and right: `0x0011 -> 0x0002`. /// /// Also stores the `scores` per row. /// The score of a row is the sum of the tile and all intermediate tile merges. /// e.g. row `0x0002` has a score of `4` and row `0x0003` has a score of `16`. static ref MOVES: Moves = { // initialization of move tables let mut left_moves = vec![0; 65536]; let mut right_moves = vec![0; 65536]; let mut up_moves = vec![0; 65536]; let mut down_moves = vec![0; 65536]; let mut scores = vec![0; 65536]; for row in 0.. 65536 { // break row into cells let mut line = [ (row >> 0) & 0xF, (row >> 4) & 0xF, (row >> 8) & 0xF, (row >> 12) & 0xF ]; // calculate score for given row let mut s = 0; for i in 0.. 4 { if line[i] > 1 { s += (line[i] - 1) * (2 << line[i]) } } scores[row as usize] = s; let mut i = 0; // perform a move to the left using current {row} as board // generates 4 output moves for up, down, left and right by transposing and reversing // this result. while i < 3 { // initial counter for the cell next to the current one (j) let mut j = i + 1; // find the next non-zero cell index while j < 4 { if line[j]!= 0 { break }; j += 1; }; // if j is out of bounds (> 3), all other cells are empty and we are done looping if j == 4 { break }; // this is the part responsible for skipping empty (0 value) cells // if the current cell is zero, shift the next non-zero cell to position i // and retry this entry until line[i] becomes non-zero if line[i] == 0 { line[i] = line[j]; line[j] = 0; continue; // otherwise, if the current cell and next cell are the same, merge them } else if line[i] == line[j] { if line[i]!= 0xF { line[i] += 1 }; line[j] = 0; } // finally, move to the next (or current, if i was 0) row i += 1; } // put the new row after merging back together into a "merged" row let result = (line[0] << 0) | (line[1] << 4) | (line[2] << 8) | (line[3] << 12); // right and down use normal row and result variables. // for left and up, we create a reverse of the row and result. let rev_row = (row >> 12) & 0x000F | (row >> 4) & 0x00F0 | (row << 4) & 0x0F00 | (row << 12) & 0xF000; let rev_res = (result >> 12) & 0x000F | (result >> 4) & 0x00F0 | (result << 4) & 0x0F00 | (result << 12) & 0xF000; // results are keyed by row / reverse row index. let row_idx = row as usize; let rev_idx = rev_row as usize; right_moves[row_idx] = row ^ result; left_moves[rev_idx] = rev_row ^ rev_res; up_moves[rev_idx] = Moves::column_from(rev_row) ^ Moves::column_from(rev_res); down_moves[row_idx] = Moves::column_from(row) ^ Moves::column_from(result); }; Moves { left: left_moves, right: right_moves, down: down_moves, up: up_moves, scores: scores } }; } /// Struct used to play a single game of 2048. /// /// `tfe::Game` uses a single `u64` as board value. /// The board itself is divided into rows (x4 16 bit "row" per "board") which are /// divided into tiles (4x 4 bit "nybbles" per "row"). /// /// All manipulations are done using bit-shifts and a precomputed table of moves and scores. /// Every move is stored as four lookups total, one for each row. The result of XOR'ing each row /// back into the board at the right position is the output board. pub struct Game { pub board: u64 } impl Game { /// Constructs a new `tfe::Game`. /// /// `Game` stores a board internally as a `u64`. /// /// # Examples /// /// Simple example: /// /// ``` /// use tfe::Game; /// /// let mut game = Game::new(); /// # println!("{:016x}", game.board); /// ``` /// /// Accessing board value: /// /// ``` /// use tfe::Game; /// /// let mut game = Game::new(); /// println!("{:016x}", game.board); /// ``` pub fn new() -> Self { let mut game = Game { board: 0x0000_0000_0000_0000_u64 }; game.board |= Self::spawn_tile(game.board); game.board |= Self::spawn_tile(game.board); game } /// Like `new` but takes a closure that accepts two parameters and returns /// a `Direction`. The parameters passed to the closure: /// /// - `u64`: The current board /// - `&Vec<Direction>`: A list of attempted moves that had no effect. /// Gets cleared when a move succeeds. /// /// # Examples /// /// Simple example: /// /// ``` /// use tfe::{Game, Direction}; /// /// let game = Game::play(|_board, failed| Direction::sample_without(failed)); /// ``` /// /// In this example, the variable `game` will have a value of a single `Game` played to /// completion. A game is over when it has no moves left. This is true when all possible /// moves return the same resulting board as before the move was executed. /// /// The `failed: &Vec<Direction>` will contain **at most** 3 items, when the 4th item is added /// the game ends automatically without calling the closure again. pub fn play<F: Fn(u64, &Vec<Direction>) -> Direction>(mv: F) -> Self { let mut game = Self::new(); let mut attempted: Vec<Direction> = Vec::with_capacity(4); loop { let mv = mv(game.board, &attempted); if!attempted.iter().any(|dir| dir == &mv) { let result_board = Self::execute(game.board, &mv); if game.board == result_board { if attempted.len() == 3 { break } attempted.push(mv); } else { game.board = result_board | Self::spawn_tile(result_board); attempted.clear(); } } } game } /// Returns `board` moved in given `direction`. /// /// - When `Direction::Left`, return board moved left /// - When `Direction::Right`, return board moved right /// - When `Direction::Down`, return board moved down /// - When `Direction::Up`, return board moved up /// /// # Examples /// /// Simple example: /// /// ``` /// use tfe::{Game, Direction}; /// /// let board = 0x0000_0000_0022_1100; /// let moved = Game::execute(board, &Direction::Left); /// /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 4 | 4 | | 8 | 0 | 0 | 0 | /// // | 2 | 2 | 0 | 0 | | 4 | 0 | 0 | 0 | /// /// assert_eq!(board, 0x0000_0000_0022_1100); /// assert_eq!(moved, 0x0000_0000_3000_2000); /// ``` pub fn execute(board: u64, direction: &Direction) -> u64 { match direction { Direction::Left => Self::move_left(board), Direction::Right => Self::move_right(board), Direction::Down => Self::move_down(board), Direction::Up => Self::move_up(board) } } /// Returns a transposed board where rows are transformed into columns and vice versa. /// /// ``` /// use tfe::Game;
/// /// // | F | E | D | C | | F | B | 7 | 3 | /// // | B | A | 9 | 8 | => | E | A | 6 | 2 | /// // | 7 | 6 | 5 | 4 | | D | 9 | 5 | 1 | /// // | 3 | 2 | 1 | 0 | | C | 8 | 4 | 0 | /// /// assert_eq!(Game::transpose(0xFEDC_BA98_7654_3210), 0xFB73_EA62_D951_C840); /// ``` pub fn transpose(board: u64) -> u64 { let a1 = board & 0xF0F0_0F0F_F0F0_0F0F_u64; let a2 = board & 0x0000_F0F0_0000_F0F0_u64; let a3 = board & 0x0F0F_0000_0F0F_0000_u64; let a = a1 | (a2 << 12) | (a3 >> 12); let b1 = a & 0xFF00_FF00_00FF_00FF_u64; let b2 = a & 0x00FF_00FF_0000_0000_u64; let b3 = a & 0x0000_0000_FF00_FF00_u64; b1 | (b2 >> 24) | (b3 << 24) } /// Returns a `u64` board moved up. /// This is the same as calling `Game::execute(board, &Direction::Up)`; /// /// # Examples /// /// ``` /// use tfe::Game; /// /// let board = 0x0000_0000_0000_0011_u64; /// let result = Game::move_up(board); /// /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 1 | 1 | /// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 1 | 1 | | 0 | 0 | 0 | 0 | /// /// assert_eq!(result, 0x0011_0000_0000_0000); /// ``` pub fn move_up(board: u64) -> u64 { let mut result = board; let transposed = Self::transpose(board); result ^= MOVES.up[((transposed >> 0) & ROW_MASK) as usize] << 0; result ^= MOVES.up[((transposed >> 16) & ROW_MASK) as usize] << 4; result ^= MOVES.up[((transposed >> 32) & ROW_MASK) as usize] << 8; result ^= MOVES.up[((transposed >> 48) & ROW_MASK) as usize] << 12; result } /// Returns a `u64` board moved down. /// This is the same as calling `Game::execute(board, &Direction::Down)`; /// /// # Examples /// /// ``` /// use tfe::Game; /// /// let board = 0x0011_0000_0000_0011_u64; /// let result = Game::move_down(board); /// /// // | 0 | 0 | 1 | 1 | | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 1 | 1 | | 0 | 0 | 2 | 2 | /// /// assert_eq!(result, 0x0000_0000_0000_0022); /// ``` pub fn move_down(board: u64) -> u64 { let mut result = board; let transposed = Self::transpose(board); result ^= MOVES.down[((transposed >> 0) & ROW_MASK) as usize] << 0; result ^= MOVES.down[((transposed >> 16) & ROW_MASK) as usize] << 4; result ^= MOVES.down[((transposed >> 32) & ROW_MASK) as usize] << 8; result ^= MOVES.down[((transposed >> 48) & ROW_MASK) as usize] << 12; result } /// Returns a `u64` board moved right. /// This is the same as calling `Game::execute(board, &Direction::Right)`; /// /// # Examples /// /// ``` /// use tfe::Game; /// /// let board = 0x0000_0000_0000_2211_u64; /// let result = Game::move_right(board); /// /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | /// // | 2 | 2 | 1 | 1 | | 0 | 0 | 3 | 2 | /// /// assert_eq!(result, 0x0000_0000_0000_0032); /// ``` pub fn move_right(board: u64) -> u64 { let mut result = board; result ^= MOVES.right[((board >> 0) & ROW_MASK) as usize] << 0; result ^= MOVES.right[((board >> 16) & ROW_MASK) as usize] << 16; result ^= MOVES.right[((board >> 32) & ROW_MASK) as usize] << 32; result ^= MOVES.right[((board >> 48) & ROW_MASK) as usize] << 48; result } /// Returns a `u64` board moved left. /// This is the same as calling `Game::execute(board, &Direction::Left)`; /// /// # Examples /// /// ``` /// use tfe::Game; /// /// let board = 0x0000_0000_0000_2211_u64; /// let result = Game::move_left(board); /// /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | /// // | 2 | 2 | 1 | 1 | | 3 | 2 | 0 | 0 | /// /// assert_eq!(result, 0x0000_0000_0000_3200); /// ``` pub fn move_left(board: u64) -> u64 { let mut result: u64 = board; result ^= MOVES.left[((board >> 0) & ROW_MASK) as usize] << 0; result ^= MOVES.left[((board >> 16) & ROW_MASK) as usize] << 16; result ^= MOVES.left[((board >> 32) & ROW_MASK) as usize] << 32; result ^= MOVES.left[((board >> 48) & ROW_MASK) as usize] << 48; result } /// Returns the count of tiles with a value of `0`. /// /// # Examples /// /// ``` /// use tfe::Game; /// /// let board = 0x0000_0000_0000_2211_u64; /// let result = Game::count_empty(board); /// /// assert_eq!(result, 12); /// ``` pub fn count_empty(board: u64) -> u32 { let mut empty = 0; for i in 0.. 16 { if ((board >> (i * 4)) & 0xF) == 0 { empty += 1 } } empty } /// Returns the sum of 4 lookups in `table` for each "row" in `board`. pub fn table_helper<T: Clone + Add<Output = T>>(board: u64, table: &Vec<T>) -> T { table[((board >> 0) & ROW_MASK) as usize].clone() + table[((board >> 16) & ROW_MASK) as usize].clone() + table[((board >> 32) & ROW_MASK) as usize].clone() + table[((board >> 48) & ROW_MASK) as usize].clone() } /// Returns the score of a given `board`. /// The score of a single tile is the sum of the tile value and all intermediate merged tiles. pub fn score(board: u64) -> u64 { Self::table_helper(board, &MOVES.scores) } /// Returns a `2` with 90% chance and `4` with 10% chance. pub fn tile() -> u64 { if thread_rng().gen_range(0, 10) == 10 { 2 } else { 1 } } /// Returns a `1` shifted to the position of any `0` bit in `board` randomly. pub fn spawn_tile(board: u64) -> u64 { let mut tmp = board; let mut idx = thread_rng().gen_range(0, Self::count_empty(board)); let mut t = Self::tile(); loop { while (tmp & 0xF)!= 0 { tmp >>= 4; t <<= 4; } if idx == 0 { break } else { idx -= 1 } tmp >>= 4; t <<= 4 } t } }
random_line_split
game.rs
use std::ops::Add; use super::rand::{thread_rng, Rng}; use super::direction::Direction; /// A mask with a single section of 16 bits set to 0. /// Used to extract a "horizontal slice" out of a 64 bit integer. pub static ROW_MASK: u64 = 0xFFFF; /// A `u64` mask with 4 sections each starting after the n * 16th bit. /// Used to extract a "vertical slice" out of a 64 bit integer. pub static COL_MASK: u64 = 0x000F_000F_000F_000F_u64; /// Struct that contains all available moves per row for up, down, right and left. /// Also stores the score for a given row. /// /// Moves are stored as power values for tiles. /// if a power value is `> 0`, print the tile value using `2 << tile` where tile is any 4-bit /// "nybble" otherwise print a `0` instead. struct Moves { pub left: Vec<u64>, pub right: Vec<u64>, pub down: Vec<u64>, pub up: Vec<u64>, pub scores: Vec<u64> } impl Moves { /// Returns the 4th bit from each row in given board OR'd. pub fn column_from(board: u64) -> u64 { (board | (board << 12) | (board << 24) | (board << 36)) & COL_MASK } } lazy_static! { /// Constructs a new `tfe::Moves`. /// /// `Moves` stores `right`, `left`, `up`, and `down` moves per row. /// e.g. left: `0x0011 -> 0x2000` and right: `0x0011 -> 0x0002`. /// /// Also stores the `scores` per row. /// The score of a row is the sum of the tile and all intermediate tile merges. /// e.g. row `0x0002` has a score of `4` and row `0x0003` has a score of `16`. static ref MOVES: Moves = { // initialization of move tables let mut left_moves = vec![0; 65536]; let mut right_moves = vec![0; 65536]; let mut up_moves = vec![0; 65536]; let mut down_moves = vec![0; 65536]; let mut scores = vec![0; 65536]; for row in 0.. 65536 { // break row into cells let mut line = [ (row >> 0) & 0xF, (row >> 4) & 0xF, (row >> 8) & 0xF, (row >> 12) & 0xF ]; // calculate score for given row let mut s = 0; for i in 0.. 4 { if line[i] > 1 { s += (line[i] - 1) * (2 << line[i]) } } scores[row as usize] = s; let mut i = 0; // perform a move to the left using current {row} as board // generates 4 output moves for up, down, left and right by transposing and reversing // this result. while i < 3 { // initial counter for the cell next to the current one (j) let mut j = i + 1; // find the next non-zero cell index while j < 4 { if line[j]!= 0 { break }; j += 1; }; // if j is out of bounds (> 3), all other cells are empty and we are done looping if j == 4 { break }; // this is the part responsible for skipping empty (0 value) cells // if the current cell is zero, shift the next non-zero cell to position i // and retry this entry until line[i] becomes non-zero if line[i] == 0 { line[i] = line[j]; line[j] = 0; continue; // otherwise, if the current cell and next cell are the same, merge them } else if line[i] == line[j] { if line[i]!= 0xF { line[i] += 1 }; line[j] = 0; } // finally, move to the next (or current, if i was 0) row i += 1; } // put the new row after merging back together into a "merged" row let result = (line[0] << 0) | (line[1] << 4) | (line[2] << 8) | (line[3] << 12); // right and down use normal row and result variables. // for left and up, we create a reverse of the row and result. let rev_row = (row >> 12) & 0x000F | (row >> 4) & 0x00F0 | (row << 4) & 0x0F00 | (row << 12) & 0xF000; let rev_res = (result >> 12) & 0x000F | (result >> 4) & 0x00F0 | (result << 4) & 0x0F00 | (result << 12) & 0xF000; // results are keyed by row / reverse row index. let row_idx = row as usize; let rev_idx = rev_row as usize; right_moves[row_idx] = row ^ result; left_moves[rev_idx] = rev_row ^ rev_res; up_moves[rev_idx] = Moves::column_from(rev_row) ^ Moves::column_from(rev_res); down_moves[row_idx] = Moves::column_from(row) ^ Moves::column_from(result); }; Moves { left: left_moves, right: right_moves, down: down_moves, up: up_moves, scores: scores } }; } /// Struct used to play a single game of 2048. /// /// `tfe::Game` uses a single `u64` as board value. /// The board itself is divided into rows (x4 16 bit "row" per "board") which are /// divided into tiles (4x 4 bit "nybbles" per "row"). /// /// All manipulations are done using bit-shifts and a precomputed table of moves and scores. /// Every move is stored as four lookups total, one for each row. The result of XOR'ing each row /// back into the board at the right position is the output board. pub struct
{ pub board: u64 } impl Game { /// Constructs a new `tfe::Game`. /// /// `Game` stores a board internally as a `u64`. /// /// # Examples /// /// Simple example: /// /// ``` /// use tfe::Game; /// /// let mut game = Game::new(); /// # println!("{:016x}", game.board); /// ``` /// /// Accessing board value: /// /// ``` /// use tfe::Game; /// /// let mut game = Game::new(); /// println!("{:016x}", game.board); /// ``` pub fn new() -> Self { let mut game = Game { board: 0x0000_0000_0000_0000_u64 }; game.board |= Self::spawn_tile(game.board); game.board |= Self::spawn_tile(game.board); game } /// Like `new` but takes a closure that accepts two parameters and returns /// a `Direction`. The parameters passed to the closure: /// /// - `u64`: The current board /// - `&Vec<Direction>`: A list of attempted moves that had no effect. /// Gets cleared when a move succeeds. /// /// # Examples /// /// Simple example: /// /// ``` /// use tfe::{Game, Direction}; /// /// let game = Game::play(|_board, failed| Direction::sample_without(failed)); /// ``` /// /// In this example, the variable `game` will have a value of a single `Game` played to /// completion. A game is over when it has no moves left. This is true when all possible /// moves return the same resulting board as before the move was executed. /// /// The `failed: &Vec<Direction>` will contain **at most** 3 items, when the 4th item is added /// the game ends automatically without calling the closure again. pub fn play<F: Fn(u64, &Vec<Direction>) -> Direction>(mv: F) -> Self { let mut game = Self::new(); let mut attempted: Vec<Direction> = Vec::with_capacity(4); loop { let mv = mv(game.board, &attempted); if!attempted.iter().any(|dir| dir == &mv) { let result_board = Self::execute(game.board, &mv); if game.board == result_board { if attempted.len() == 3 { break } attempted.push(mv); } else { game.board = result_board | Self::spawn_tile(result_board); attempted.clear(); } } } game } /// Returns `board` moved in given `direction`. /// /// - When `Direction::Left`, return board moved left /// - When `Direction::Right`, return board moved right /// - When `Direction::Down`, return board moved down /// - When `Direction::Up`, return board moved up /// /// # Examples /// /// Simple example: /// /// ``` /// use tfe::{Game, Direction}; /// /// let board = 0x0000_0000_0022_1100; /// let moved = Game::execute(board, &Direction::Left); /// /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 4 | 4 | | 8 | 0 | 0 | 0 | /// // | 2 | 2 | 0 | 0 | | 4 | 0 | 0 | 0 | /// /// assert_eq!(board, 0x0000_0000_0022_1100); /// assert_eq!(moved, 0x0000_0000_3000_2000); /// ``` pub fn execute(board: u64, direction: &Direction) -> u64 { match direction { Direction::Left => Self::move_left(board), Direction::Right => Self::move_right(board), Direction::Down => Self::move_down(board), Direction::Up => Self::move_up(board) } } /// Returns a transposed board where rows are transformed into columns and vice versa. /// /// ``` /// use tfe::Game; /// /// // | F | E | D | C | | F | B | 7 | 3 | /// // | B | A | 9 | 8 | => | E | A | 6 | 2 | /// // | 7 | 6 | 5 | 4 | | D | 9 | 5 | 1 | /// // | 3 | 2 | 1 | 0 | | C | 8 | 4 | 0 | /// /// assert_eq!(Game::transpose(0xFEDC_BA98_7654_3210), 0xFB73_EA62_D951_C840); /// ``` pub fn transpose(board: u64) -> u64 { let a1 = board & 0xF0F0_0F0F_F0F0_0F0F_u64; let a2 = board & 0x0000_F0F0_0000_F0F0_u64; let a3 = board & 0x0F0F_0000_0F0F_0000_u64; let a = a1 | (a2 << 12) | (a3 >> 12); let b1 = a & 0xFF00_FF00_00FF_00FF_u64; let b2 = a & 0x00FF_00FF_0000_0000_u64; let b3 = a & 0x0000_0000_FF00_FF00_u64; b1 | (b2 >> 24) | (b3 << 24) } /// Returns a `u64` board moved up. /// This is the same as calling `Game::execute(board, &Direction::Up)`; /// /// # Examples /// /// ``` /// use tfe::Game; /// /// let board = 0x0000_0000_0000_0011_u64; /// let result = Game::move_up(board); /// /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 1 | 1 | /// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 1 | 1 | | 0 | 0 | 0 | 0 | /// /// assert_eq!(result, 0x0011_0000_0000_0000); /// ``` pub fn move_up(board: u64) -> u64 { let mut result = board; let transposed = Self::transpose(board); result ^= MOVES.up[((transposed >> 0) & ROW_MASK) as usize] << 0; result ^= MOVES.up[((transposed >> 16) & ROW_MASK) as usize] << 4; result ^= MOVES.up[((transposed >> 32) & ROW_MASK) as usize] << 8; result ^= MOVES.up[((transposed >> 48) & ROW_MASK) as usize] << 12; result } /// Returns a `u64` board moved down. /// This is the same as calling `Game::execute(board, &Direction::Down)`; /// /// # Examples /// /// ``` /// use tfe::Game; /// /// let board = 0x0011_0000_0000_0011_u64; /// let result = Game::move_down(board); /// /// // | 0 | 0 | 1 | 1 | | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 1 | 1 | | 0 | 0 | 2 | 2 | /// /// assert_eq!(result, 0x0000_0000_0000_0022); /// ``` pub fn move_down(board: u64) -> u64 { let mut result = board; let transposed = Self::transpose(board); result ^= MOVES.down[((transposed >> 0) & ROW_MASK) as usize] << 0; result ^= MOVES.down[((transposed >> 16) & ROW_MASK) as usize] << 4; result ^= MOVES.down[((transposed >> 32) & ROW_MASK) as usize] << 8; result ^= MOVES.down[((transposed >> 48) & ROW_MASK) as usize] << 12; result } /// Returns a `u64` board moved right. /// This is the same as calling `Game::execute(board, &Direction::Right)`; /// /// # Examples /// /// ``` /// use tfe::Game; /// /// let board = 0x0000_0000_0000_2211_u64; /// let result = Game::move_right(board); /// /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | /// // | 2 | 2 | 1 | 1 | | 0 | 0 | 3 | 2 | /// /// assert_eq!(result, 0x0000_0000_0000_0032); /// ``` pub fn move_right(board: u64) -> u64 { let mut result = board; result ^= MOVES.right[((board >> 0) & ROW_MASK) as usize] << 0; result ^= MOVES.right[((board >> 16) & ROW_MASK) as usize] << 16; result ^= MOVES.right[((board >> 32) & ROW_MASK) as usize] << 32; result ^= MOVES.right[((board >> 48) & ROW_MASK) as usize] << 48; result } /// Returns a `u64` board moved left. /// This is the same as calling `Game::execute(board, &Direction::Left)`; /// /// # Examples /// /// ``` /// use tfe::Game; /// /// let board = 0x0000_0000_0000_2211_u64; /// let result = Game::move_left(board); /// /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | => | 0 | 0 | 0 | 0 | /// // | 0 | 0 | 0 | 0 | | 0 | 0 | 0 | 0 | /// // | 2 | 2 | 1 | 1 | | 3 | 2 | 0 | 0 | /// /// assert_eq!(result, 0x0000_0000_0000_3200); /// ``` pub fn move_left(board: u64) -> u64 { let mut result: u64 = board; result ^= MOVES.left[((board >> 0) & ROW_MASK) as usize] << 0; result ^= MOVES.left[((board >> 16) & ROW_MASK) as usize] << 16; result ^= MOVES.left[((board >> 32) & ROW_MASK) as usize] << 32; result ^= MOVES.left[((board >> 48) & ROW_MASK) as usize] << 48; result } /// Returns the count of tiles with a value of `0`. /// /// # Examples /// /// ``` /// use tfe::Game; /// /// let board = 0x0000_0000_0000_2211_u64; /// let result = Game::count_empty(board); /// /// assert_eq!(result, 12); /// ``` pub fn count_empty(board: u64) -> u32 { let mut empty = 0; for i in 0.. 16 { if ((board >> (i * 4)) & 0xF) == 0 { empty += 1 } } empty } /// Returns the sum of 4 lookups in `table` for each "row" in `board`. pub fn table_helper<T: Clone + Add<Output = T>>(board: u64, table: &Vec<T>) -> T { table[((board >> 0) & ROW_MASK) as usize].clone() + table[((board >> 16) & ROW_MASK) as usize].clone() + table[((board >> 32) & ROW_MASK) as usize].clone() + table[((board >> 48) & ROW_MASK) as usize].clone() } /// Returns the score of a given `board`. /// The score of a single tile is the sum of the tile value and all intermediate merged tiles. pub fn score(board: u64) -> u64 { Self::table_helper(board, &MOVES.scores) } /// Returns a `2` with 90% chance and `4` with 10% chance. pub fn tile() -> u64 { if thread_rng().gen_range(0, 10) == 10 { 2 } else { 1 } } /// Returns a `1` shifted to the position of any `0` bit in `board` randomly. pub fn spawn_tile(board: u64) -> u64 { let mut tmp = board; let mut idx = thread_rng().gen_range(0, Self::count_empty(board)); let mut t = Self::tile(); loop { while (tmp & 0xF)!= 0 { tmp >>= 4; t <<= 4; } if idx == 0 { break } else { idx -= 1 } tmp >>= 4; t <<= 4 } t } }
Game
identifier_name
git.rs
//---------------------------------------------------------------------------// // Copyright (c) 2017-2023 Ismael Gutiérrez González. All rights reserved. // // This file is part of the Rusted PackFile Manager (RPFM) project, // which can be found here: https://github.com/Frodo45127/rpfm. // // This file is licensed under the MIT license, which can be found here: // https://github.com/Frodo45127/rpfm/blob/master/LICENSE. //---------------------------------------------------------------------------// //! This module contains the code for the limited Git support. use git2::{Reference, ReferenceFormat, Repository, Signature, StashFlags, build::CheckoutBuilder}; use std::fs::{DirBuilder, File}; use std::io::{BufWriter, Write}; use std::path::{Path, PathBuf}; use std::process::Command as SystemCommand; use crate::error::{RLibError, Result}; //-------------------------------------------------------------------------------// // Enums & Structs //-------------------------------------------------------------------------------// /// Struct containing the data needed to perform a fetch/pull from a repo. #[derive(Debug)] pub struct GitIntegration { /// Local Path of the repo. local_path: PathBuf, /// URL of the repo. url: String, /// Branch to fetch/pull. branch: String, /// Remote to fetch/pull from. remote: String, } /// Possible responses we can get from a fetch/pull. #[derive(Debug)] pub enum GitResponse { NewUpdate, NoUpdate, NoLocalFiles, Diverged, } //---------------------------------------------------------------------------// // Enum & Structs Implementations //---------------------------------------------------------------------------// impl GitIntegration { /// This function creates a new GitIntegration struct with data for a git operation. pub fn new(local_path: &Path, url: &str, branch: &str, remote: &str) -> Self { Self { local_path: local_path.to_owned(), url: url.to_owned(), branch: branch.to_owned(), remote: remote.to_owned(), } } /// This function tries to initializes a git repo. pub fn init(&self) -> Result<Repository> { Repository::init(&self.local_path).map_err(From::from) } /// This function generates a gitignore file for the git repo. /// /// If it already exists, it'll replace the existing file. pub fn add_gitignore(&self, contents: &str) -> Result<()> { let mut file = BufWriter::new(File::create(self.local_path.join(".gitignore"))?); file.write_all(contents.as_bytes()).map_err(From::from) } /// This function switches the branch of a `GitIntegration` to the provided refspec. pub fn checkout_branch(&self, repo: &Repository, refs: &str) -> Result<()> { let head = repo.head().unwrap(); let oid = head.target().unwrap(); let commit = repo.find_commit(oid)?; let branch_name = refs.splitn(3, '/').collect::<Vec<_>>()[2].to_owned(); let _ = repo.branch(&branch_name, &commit, false); let branch_object = repo.revparse_single(refs)?; repo.checkout_tree(&branch_object, None)?; repo.set_head(refs)?; Ok(()) } /// This function checks if there is a new update for the current repo. pub fn check_update(&self) -> Result<GitResponse> { let mut repo = match Repository::open(&self.local_path) { Ok(repo) => repo, // If this fails, it means we either we don´t have the repo downloaded, or we have a folder without the.git folder. Err(_) => return Ok(GitResponse::NoLocalFiles), }; // Just in case there are loose changes, stash them. // Ignore a fail on this, as it's possible we don't have contents to stash. let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase(); let master_refname = format!("refs/heads/{}", self.branch); let signature = Signature::now("RPFM Updater", "-")?; let stash_id = repo.stash_save(&signature, &format!("Stashed changes before checking for updates from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED)); // In case we're not in master, checkout the master branch. if current_branch_name!= master_refname { self.checkout_branch(&repo, &master_refname)?; } // Fetch the info of the master branch. repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?; let analysis = { let fetch_head = repo.find_reference("FETCH_HEAD")?; let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?; repo.merge_analysis(&[&fetch_commit])? }; // Reset the repo to his original state after the check if current_branch_name!= master_refname { self.checkout_branch(&repo, &current_branch_name)?; } if stash_id.is_ok() { let _ = repo.stash_pop(0, None); } if analysis.0.is_up_to_date() { Ok(GitResponse::NoUpdate) } // If the branch is a fast-forward, or has diverged, ask for an update. else if analysis.0.is_fast_forward() || analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() { Ok(GitResponse::NewUpdate) } // Otherwise, it means the branches diverged. In this case, return a diverged. else { Ok(GitResponse::Diverged) } } /// This function downloads the latest revision of the current repository. pub fn update_repo(&self) -> Result<()> { let mut new_repo = false; let mut repo = match Repository::open(&self.local_path) { Ok(repo) => repo, Err(_) => { // If it fails to open, it means either we don't have the.git folder, or we don't have a folder at all. // In either case, recreate it and redownload the repo. No more steps are needed here. // On windows, remove the read-only flags before doing anything else, or this will fail. if cfg!(target_os = "windows") { let path = self.local_path.to_string_lossy().to_string() + "\\*.*"; let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output(); } let _ = std::fs::remove_dir_all(&self.local_path); DirBuilder::new().recursive(true).create(&self.local_path)?; match Repository::clone(&self.url, &self.local_path) { Ok(repo) => { new_repo = true; repo }, Err(_) => return Err(RLibError::GitErrorDownloadFromRepo(self.url.to_owned())), } } }; // Just in case there are loose changes, stash them. // Ignore a fail on this, as it's possible we don't have contents to stash. let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase(); let master_refname = format!("refs/heads/{}", self.branch); let signature = Signature::now("RPFM Updater", "-")?; let stash_id = repo.stash_save(&signature, &format!("Stashed changes before update from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED)); // In case we're not in master, checkout the master branch. if current_branch_name!= master_refname { self.checkout_branch(&repo, &master_refname)?; } // If we just cloned a new repo and changed branches, return. if new_repo { return Ok(()); } // If it worked, now we have to do a pull from master. Sadly, git2-rs does not support pull. // Instead, we kinda force a fast-forward. Made in StackOverflow. repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?; let (analysis, fetch_commit_id) = { let fetch_head = repo.find_reference("FETCH_HEAD")?; let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?; (repo.merge_analysis(&[&fetch_commit])?, fetch_commit.id()) }; // If we're up to date, nothing more is needed. if analysis.0.is_up_to_date() { // Reset the repo to his original state after the check if current_branch_name!= master_refname { self.checkout_branch(&repo, &current_branch_name)?; } if stash_id.is_ok() { let _ = repo.stash_pop(0, None); } Err(RLibError::GitErrorNoUpdatesAvailable(self.url.to_owned())) } // If we can do a fast-forward, we do it. This is the preferred option. else if analysis.0.is_fast_forward() { let mut reference = repo.find_reference(&master_refname)?; reference.set_target(fetch_commit_id, "Fast-Forward")?; repo.set_head(&master_refname)?; repo.checkout_head(Some(CheckoutBuilder::default().force())).map_err(From::from) } // If not, we face multiple problems: // - If there are uncommitted changes: covered by the stash. // - If we're not in the branch: covered by the branch switch. // - If the branches diverged: this one... the cleanest way to deal with it should be redownload the repo. else if analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() { // On windows, remove the read-only flags before doing anything else, or this will fail. if cfg!(target_os = "windows") {
let _ = std::fs::remove_dir_all(&self.local_path); self.update_repo() } else { // Reset the repo to his original state after the check if current_branch_name!= master_refname { self.checkout_branch(&repo, &current_branch_name)?; } if stash_id.is_ok() { let _ = repo.stash_pop(0, None); } Err(RLibError::GitErrorDownloadFromRepo(self.url.to_owned())) } } }
let path = self.local_path.to_string_lossy().to_string() + "\\*.*"; let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output(); }
conditional_block
git.rs
//---------------------------------------------------------------------------// // Copyright (c) 2017-2023 Ismael Gutiérrez González. All rights reserved. // // This file is part of the Rusted PackFile Manager (RPFM) project, // which can be found here: https://github.com/Frodo45127/rpfm. // // This file is licensed under the MIT license, which can be found here: // https://github.com/Frodo45127/rpfm/blob/master/LICENSE. //---------------------------------------------------------------------------// //! This module contains the code for the limited Git support. use git2::{Reference, ReferenceFormat, Repository, Signature, StashFlags, build::CheckoutBuilder}; use std::fs::{DirBuilder, File}; use std::io::{BufWriter, Write}; use std::path::{Path, PathBuf}; use std::process::Command as SystemCommand; use crate::error::{RLibError, Result}; //-------------------------------------------------------------------------------// // Enums & Structs //-------------------------------------------------------------------------------// /// Struct containing the data needed to perform a fetch/pull from a repo. #[derive(Debug)] pub struct GitIntegration { /// Local Path of the repo. local_path: PathBuf, /// URL of the repo. url: String, /// Branch to fetch/pull. branch: String, /// Remote to fetch/pull from. remote: String, } /// Possible responses we can get from a fetch/pull. #[derive(Debug)] pub enum GitResponse { NewUpdate, NoUpdate, NoLocalFiles, Diverged, } //---------------------------------------------------------------------------// // Enum & Structs Implementations //---------------------------------------------------------------------------// impl GitIntegration { /// This function creates a new GitIntegration struct with data for a git operation. pub fn new(local_path: &Path, url: &str, branch: &str, remote: &str) -> Self { Self { local_path: local_path.to_owned(), url: url.to_owned(), branch: branch.to_owned(), remote: remote.to_owned(), } } /// This function tries to initializes a git repo. pub fn init(&self) -> Result<Repository> { Repository::init(&self.local_path).map_err(From::from) } /// This function generates a gitignore file for the git repo. /// /// If it already exists, it'll replace the existing file. pub fn ad
self, contents: &str) -> Result<()> { let mut file = BufWriter::new(File::create(self.local_path.join(".gitignore"))?); file.write_all(contents.as_bytes()).map_err(From::from) } /// This function switches the branch of a `GitIntegration` to the provided refspec. pub fn checkout_branch(&self, repo: &Repository, refs: &str) -> Result<()> { let head = repo.head().unwrap(); let oid = head.target().unwrap(); let commit = repo.find_commit(oid)?; let branch_name = refs.splitn(3, '/').collect::<Vec<_>>()[2].to_owned(); let _ = repo.branch(&branch_name, &commit, false); let branch_object = repo.revparse_single(refs)?; repo.checkout_tree(&branch_object, None)?; repo.set_head(refs)?; Ok(()) } /// This function checks if there is a new update for the current repo. pub fn check_update(&self) -> Result<GitResponse> { let mut repo = match Repository::open(&self.local_path) { Ok(repo) => repo, // If this fails, it means we either we don´t have the repo downloaded, or we have a folder without the.git folder. Err(_) => return Ok(GitResponse::NoLocalFiles), }; // Just in case there are loose changes, stash them. // Ignore a fail on this, as it's possible we don't have contents to stash. let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase(); let master_refname = format!("refs/heads/{}", self.branch); let signature = Signature::now("RPFM Updater", "-")?; let stash_id = repo.stash_save(&signature, &format!("Stashed changes before checking for updates from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED)); // In case we're not in master, checkout the master branch. if current_branch_name!= master_refname { self.checkout_branch(&repo, &master_refname)?; } // Fetch the info of the master branch. repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?; let analysis = { let fetch_head = repo.find_reference("FETCH_HEAD")?; let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?; repo.merge_analysis(&[&fetch_commit])? }; // Reset the repo to his original state after the check if current_branch_name!= master_refname { self.checkout_branch(&repo, &current_branch_name)?; } if stash_id.is_ok() { let _ = repo.stash_pop(0, None); } if analysis.0.is_up_to_date() { Ok(GitResponse::NoUpdate) } // If the branch is a fast-forward, or has diverged, ask for an update. else if analysis.0.is_fast_forward() || analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() { Ok(GitResponse::NewUpdate) } // Otherwise, it means the branches diverged. In this case, return a diverged. else { Ok(GitResponse::Diverged) } } /// This function downloads the latest revision of the current repository. pub fn update_repo(&self) -> Result<()> { let mut new_repo = false; let mut repo = match Repository::open(&self.local_path) { Ok(repo) => repo, Err(_) => { // If it fails to open, it means either we don't have the.git folder, or we don't have a folder at all. // In either case, recreate it and redownload the repo. No more steps are needed here. // On windows, remove the read-only flags before doing anything else, or this will fail. if cfg!(target_os = "windows") { let path = self.local_path.to_string_lossy().to_string() + "\\*.*"; let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output(); } let _ = std::fs::remove_dir_all(&self.local_path); DirBuilder::new().recursive(true).create(&self.local_path)?; match Repository::clone(&self.url, &self.local_path) { Ok(repo) => { new_repo = true; repo }, Err(_) => return Err(RLibError::GitErrorDownloadFromRepo(self.url.to_owned())), } } }; // Just in case there are loose changes, stash them. // Ignore a fail on this, as it's possible we don't have contents to stash. let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase(); let master_refname = format!("refs/heads/{}", self.branch); let signature = Signature::now("RPFM Updater", "-")?; let stash_id = repo.stash_save(&signature, &format!("Stashed changes before update from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED)); // In case we're not in master, checkout the master branch. if current_branch_name!= master_refname { self.checkout_branch(&repo, &master_refname)?; } // If we just cloned a new repo and changed branches, return. if new_repo { return Ok(()); } // If it worked, now we have to do a pull from master. Sadly, git2-rs does not support pull. // Instead, we kinda force a fast-forward. Made in StackOverflow. repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?; let (analysis, fetch_commit_id) = { let fetch_head = repo.find_reference("FETCH_HEAD")?; let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?; (repo.merge_analysis(&[&fetch_commit])?, fetch_commit.id()) }; // If we're up to date, nothing more is needed. if analysis.0.is_up_to_date() { // Reset the repo to his original state after the check if current_branch_name!= master_refname { self.checkout_branch(&repo, &current_branch_name)?; } if stash_id.is_ok() { let _ = repo.stash_pop(0, None); } Err(RLibError::GitErrorNoUpdatesAvailable(self.url.to_owned())) } // If we can do a fast-forward, we do it. This is the preferred option. else if analysis.0.is_fast_forward() { let mut reference = repo.find_reference(&master_refname)?; reference.set_target(fetch_commit_id, "Fast-Forward")?; repo.set_head(&master_refname)?; repo.checkout_head(Some(CheckoutBuilder::default().force())).map_err(From::from) } // If not, we face multiple problems: // - If there are uncommitted changes: covered by the stash. // - If we're not in the branch: covered by the branch switch. // - If the branches diverged: this one... the cleanest way to deal with it should be redownload the repo. else if analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() { // On windows, remove the read-only flags before doing anything else, or this will fail. if cfg!(target_os = "windows") { let path = self.local_path.to_string_lossy().to_string() + "\\*.*"; let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output(); } let _ = std::fs::remove_dir_all(&self.local_path); self.update_repo() } else { // Reset the repo to his original state after the check if current_branch_name!= master_refname { self.checkout_branch(&repo, &current_branch_name)?; } if stash_id.is_ok() { let _ = repo.stash_pop(0, None); } Err(RLibError::GitErrorDownloadFromRepo(self.url.to_owned())) } } }
d_gitignore(&
identifier_name
git.rs
//---------------------------------------------------------------------------// // Copyright (c) 2017-2023 Ismael Gutiérrez González. All rights reserved. // // This file is part of the Rusted PackFile Manager (RPFM) project, // which can be found here: https://github.com/Frodo45127/rpfm. // // This file is licensed under the MIT license, which can be found here: // https://github.com/Frodo45127/rpfm/blob/master/LICENSE. //---------------------------------------------------------------------------// //! This module contains the code for the limited Git support. use git2::{Reference, ReferenceFormat, Repository, Signature, StashFlags, build::CheckoutBuilder}; use std::fs::{DirBuilder, File}; use std::io::{BufWriter, Write}; use std::path::{Path, PathBuf}; use std::process::Command as SystemCommand; use crate::error::{RLibError, Result}; //-------------------------------------------------------------------------------// // Enums & Structs //-------------------------------------------------------------------------------// /// Struct containing the data needed to perform a fetch/pull from a repo. #[derive(Debug)] pub struct GitIntegration { /// Local Path of the repo. local_path: PathBuf, /// URL of the repo. url: String, /// Branch to fetch/pull. branch: String, /// Remote to fetch/pull from. remote: String, } /// Possible responses we can get from a fetch/pull. #[derive(Debug)] pub enum GitResponse { NewUpdate, NoUpdate, NoLocalFiles, Diverged, } //---------------------------------------------------------------------------// // Enum & Structs Implementations //---------------------------------------------------------------------------// impl GitIntegration { /// This function creates a new GitIntegration struct with data for a git operation. pub fn new(local_path: &Path, url: &str, branch: &str, remote: &str) -> Self { Self { local_path: local_path.to_owned(), url: url.to_owned(), branch: branch.to_owned(), remote: remote.to_owned(), } } /// This function tries to initializes a git repo. pub fn init(&self) -> Result<Repository> { Repository::init(&self.local_path).map_err(From::from) } /// This function generates a gitignore file for the git repo. /// /// If it already exists, it'll replace the existing file. pub fn add_gitignore(&self, contents: &str) -> Result<()> { let mut file = BufWriter::new(File::create(self.local_path.join(".gitignore"))?); file.write_all(contents.as_bytes()).map_err(From::from) } /// This function switches the branch of a `GitIntegration` to the provided refspec. pub fn checkout_branch(&self, repo: &Repository, refs: &str) -> Result<()> { let head = repo.head().unwrap(); let oid = head.target().unwrap(); let commit = repo.find_commit(oid)?; let branch_name = refs.splitn(3, '/').collect::<Vec<_>>()[2].to_owned(); let _ = repo.branch(&branch_name, &commit, false); let branch_object = repo.revparse_single(refs)?; repo.checkout_tree(&branch_object, None)?; repo.set_head(refs)?; Ok(()) } /// This function checks if there is a new update for the current repo. pub fn check_update(&self) -> Result<GitResponse> {
Err(_) => return Ok(GitResponse::NoLocalFiles), }; // Just in case there are loose changes, stash them. // Ignore a fail on this, as it's possible we don't have contents to stash. let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase(); let master_refname = format!("refs/heads/{}", self.branch); let signature = Signature::now("RPFM Updater", "-")?; let stash_id = repo.stash_save(&signature, &format!("Stashed changes before checking for updates from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED)); // In case we're not in master, checkout the master branch. if current_branch_name!= master_refname { self.checkout_branch(&repo, &master_refname)?; } // Fetch the info of the master branch. repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?; let analysis = { let fetch_head = repo.find_reference("FETCH_HEAD")?; let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?; repo.merge_analysis(&[&fetch_commit])? }; // Reset the repo to his original state after the check if current_branch_name!= master_refname { self.checkout_branch(&repo, &current_branch_name)?; } if stash_id.is_ok() { let _ = repo.stash_pop(0, None); } if analysis.0.is_up_to_date() { Ok(GitResponse::NoUpdate) } // If the branch is a fast-forward, or has diverged, ask for an update. else if analysis.0.is_fast_forward() || analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() { Ok(GitResponse::NewUpdate) } // Otherwise, it means the branches diverged. In this case, return a diverged. else { Ok(GitResponse::Diverged) } } /// This function downloads the latest revision of the current repository. pub fn update_repo(&self) -> Result<()> { let mut new_repo = false; let mut repo = match Repository::open(&self.local_path) { Ok(repo) => repo, Err(_) => { // If it fails to open, it means either we don't have the.git folder, or we don't have a folder at all. // In either case, recreate it and redownload the repo. No more steps are needed here. // On windows, remove the read-only flags before doing anything else, or this will fail. if cfg!(target_os = "windows") { let path = self.local_path.to_string_lossy().to_string() + "\\*.*"; let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output(); } let _ = std::fs::remove_dir_all(&self.local_path); DirBuilder::new().recursive(true).create(&self.local_path)?; match Repository::clone(&self.url, &self.local_path) { Ok(repo) => { new_repo = true; repo }, Err(_) => return Err(RLibError::GitErrorDownloadFromRepo(self.url.to_owned())), } } }; // Just in case there are loose changes, stash them. // Ignore a fail on this, as it's possible we don't have contents to stash. let current_branch_name = Reference::normalize_name(repo.head()?.name().unwrap(), ReferenceFormat::ALLOW_ONELEVEL)?.to_lowercase(); let master_refname = format!("refs/heads/{}", self.branch); let signature = Signature::now("RPFM Updater", "-")?; let stash_id = repo.stash_save(&signature, &format!("Stashed changes before update from branch {current_branch_name}"), Some(StashFlags::INCLUDE_UNTRACKED)); // In case we're not in master, checkout the master branch. if current_branch_name!= master_refname { self.checkout_branch(&repo, &master_refname)?; } // If we just cloned a new repo and changed branches, return. if new_repo { return Ok(()); } // If it worked, now we have to do a pull from master. Sadly, git2-rs does not support pull. // Instead, we kinda force a fast-forward. Made in StackOverflow. repo.find_remote(&self.remote)?.fetch(&[&self.branch], None, None)?; let (analysis, fetch_commit_id) = { let fetch_head = repo.find_reference("FETCH_HEAD")?; let fetch_commit = repo.reference_to_annotated_commit(&fetch_head)?; (repo.merge_analysis(&[&fetch_commit])?, fetch_commit.id()) }; // If we're up to date, nothing more is needed. if analysis.0.is_up_to_date() { // Reset the repo to his original state after the check if current_branch_name!= master_refname { self.checkout_branch(&repo, &current_branch_name)?; } if stash_id.is_ok() { let _ = repo.stash_pop(0, None); } Err(RLibError::GitErrorNoUpdatesAvailable(self.url.to_owned())) } // If we can do a fast-forward, we do it. This is the preferred option. else if analysis.0.is_fast_forward() { let mut reference = repo.find_reference(&master_refname)?; reference.set_target(fetch_commit_id, "Fast-Forward")?; repo.set_head(&master_refname)?; repo.checkout_head(Some(CheckoutBuilder::default().force())).map_err(From::from) } // If not, we face multiple problems: // - If there are uncommitted changes: covered by the stash. // - If we're not in the branch: covered by the branch switch. // - If the branches diverged: this one... the cleanest way to deal with it should be redownload the repo. else if analysis.0.is_normal() || analysis.0.is_none() || analysis.0.is_unborn() { // On windows, remove the read-only flags before doing anything else, or this will fail. if cfg!(target_os = "windows") { let path = self.local_path.to_string_lossy().to_string() + "\\*.*"; let _ = SystemCommand::new("attrib").arg("-r").arg(path).arg("/s").output(); } let _ = std::fs::remove_dir_all(&self.local_path); self.update_repo() } else { // Reset the repo to his original state after the check if current_branch_name!= master_refname { self.checkout_branch(&repo, &current_branch_name)?; } if stash_id.is_ok() { let _ = repo.stash_pop(0, None); } Err(RLibError::GitErrorDownloadFromRepo(self.url.to_owned())) } } }
let mut repo = match Repository::open(&self.local_path) { Ok(repo) => repo, // If this fails, it means we either we don´t have the repo downloaded, or we have a folder without the .git folder.
random_line_split
main.rs
extern crate bible_reference_rs; extern crate chrono; extern crate futures; extern crate hyper; extern crate postgres; extern crate serde; extern crate url; #[macro_use] extern crate serde_json; mod models; use bible_reference_rs::*; use futures::future::{Future, FutureResult}; use hyper::service::{NewService, Service}; use hyper::{header, Body, Method, Request, Response, Server, StatusCode}; use models::*; use postgres::{Connection, TlsMode}; use serde_json::Value; use std::env; use std::fmt; const DEFAULT_URL: &'static str = "postgres://docker:docker@localhost:5432/bible"; #[derive(Debug)] enum ServiceError { NoInput, NoDatabaseConnection(String), } impl std::error::Error for ServiceError {} impl fmt::Display for ServiceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { ServiceError::NoInput => write!(f, "No input provided"), ServiceError::NoDatabaseConnection(details) => write!(f, "DB: {}", details), } } } fn connect_db() -> Result<Connection, ServiceError> { let url = env::var("DATABASE_URL").unwrap_or(String::from(DEFAULT_URL)); println!("Connecting: {}", &url); match Connection::connect(url, TlsMode::None) { Ok(connection) => Ok(connection), Err(error) => { println!("Connection: {}", error); Err(ServiceError::NoDatabaseConnection(format!("{}", error))) } } } fn verses_by_chapters(db: &Connection, id: i16, chapters: Vec<i16>) -> Vec<Value> { db.query( "SELECT row_to_json(rst_bible) FROM rst_bible WHERE book_id = $1 AND chapter = ANY($2)", &[&id, &chapters], ).unwrap() .iter() .map(|row| row.get(0)) .collect() } fn verses_in_chapter_by_verses( db: &Connection, id: i16, chapter: i16, verses: Vec<i16>, ) -> Vec<Value> { db.query( "SELECT row_to_json(rst_bible) FROM rst_bible WHERE book_id = $1 AND chapter = $2 AND verse = ANY($3)", &[&id, &chapter, &verses], ).unwrap() .iter() .map(|row| row.get(0)) .collect() } fn fetch_results(db: &Connection, refs: Vec<BibleReference>) -> Vec<Value> { if refs.is_empty() { return vec![]; } let valid: Vec<BookRef> = refs .iter() .flat_map(|r| { let statement = db .prepare( "SELECT id, book as title, alt, abbr FROM rst_bible_books WHERE book ~* $1 OR alt ~* $1 OR abbr ~* $1 LIMIT 1", ).unwrap(); let rows = statement.query(&[&r.book]).unwrap(); if rows.is_empty() { None } else { let row = rows.iter().next().unwrap(); Some(BookRef { id: row.get(0), name: row.get(1), alt: row.get(2), locations: r.locations.clone(), }) } }).collect(); valid .iter() .map(|reference| { let book_id = reference.id; let book_title = &reference.name; let book_alt = &reference.alt; let texts = reference .locations .iter() .flat_map( move |location| match (&location.chapters, &location.verses) { // Fetch verses by chapters (chapters, None) => { let ch = chapters.into_iter().map(|v| *v as i16).collect(); Some(verses_by_chapters(&db, book_id, ch)) } // Fetch verses by chapter and verses (chapters, Some(verses)) if chapters.len() == 1 => { let ch = chapters[0] as i16; let vs = verses.into_iter().map(|v| *v as i16).collect(); Some(verses_in_chapter_by_verses(&db, book_id, ch, vs)) } _ => None, }, ).collect::<Vec<_>>(); json!({ "reference": { "title": book_title, "alt": book_alt }, "texts": texts }) }).collect::<Vec<_>>() } fn fetch_daily_verses(db: &Connection) -> Vec<String> { use chrono::{Datelike, Utc}; let now = Utc::now(); let month = now.month() as i16; let day = now.day() as i16; db.query( "SELECT verses FROM rst_bible_daily WHERE month = $1 AND day = $2", &[&month, &day], ).unwrap() .iter() .map(|row| row.get(0)) .collect() } fn parse_query(query: Option<&str>) -> FutureResult<String, ServiceError> { use std::collections::HashMap; let query = &query.unwrap_or(""); let args = url::form_urlencoded::parse(&query.as_bytes()) .into_owned() .collect::<HashMap<String, String>>(); match args .get("q") .map(|v| v.to_string()) .filter(|s|!s.is_empty()) { Some(value) => futures::future::ok(value), None => futures::future::err(ServiceError::NoInput), } } #[derive(Debug)] struct SearchPaginate { text: String, page: i16, } fn parse_query_paginate(query: Option<&str>) -> FutureResult<SearchPaginate, ServiceError> { use std::collections::HashMap; let query = &query.unwrap_or(""); let args = url::form_urlencoded::parse(&query.as_bytes()) .into_owned() .collect::<HashMap<String, String>>(); let q = args .get("q") .map(|v| v.to_string()) .filter(|s|!s.is_empty()); let p = args .get("p") .map(|v| v.parse::<i16>().unwrap_or(1)) .unwrap_or(1); match (q, p) { (Some(q), p) => futures::future::ok(SearchPaginate { text: q, page: p }), _ => futures::future::err(ServiceError::NoInput), } } // Verse Of the Day fn vod_response_body(db: &Connection) -> Body { let results = fetch_daily_verses(&db) .into_iter() .flat_map(|daily| { let refs = parse(daily.as_str()); let results = fetch_results(&db, refs); if results.is_empty() { None } else { Some(results) } }).flatten() .collect::<Vec<_>>(); Body::from(json!({ "results": results }).to_string()) } fn search_results(query: String, db: &Connection) -> FutureResult<Body, ServiceError> { let refs = parse(query.as_str()); futures::future::ok(Body::from( json!({ "results": fetch_results(&db, refs) }).to_string(), )) } fn fetch_search_results(text: String, page: i16, db: &Connection) -> (Vec<Value>, i64) { let page = if page <= 0 { 1 } else { page }; let count_rows = db .query( "SELECT COUNT(book_id) FROM rst_bible WHERE text ~* $1", &[&text], ).unwrap(); let mut total: i64 = 0; if count_rows.is_empty() { return (vec![json!([])], total); } else { total = count_rows.get(0).get("count"); } let offset = ((page - 1) * 10) as i64; let rows = db .query( "SELECT row_to_json(t) FROM ( SELECT v.book_id, v.text, v.chapter, v.verse, b.book as book_name, b.alt as book_alt from rst_bible v LEFT OUTER JOIN rst_bible_books b on (v.book_id = b.id) WHERE text ~* $1 ) t LIMIT 10 OFFSET $2", &[&text, &offset], ).unwrap(); let results = rows.into_iter().map(|r| r.get(0)).collect::<Vec<Value>>(); (vec![json!(results)], (total as f64 / 10_f64).ceil() as i64) } fn search_text(query: SearchPaginate, db: &Connection) -> FutureResult<Body, ServiceError> { let text = &query.text; let results = fetch_search_results(text.to_string(), query.page, db); futures::future::ok(Body::from( json!({ "meta": { "text": text, "page": query.page, "total": results.1 }, "results": results.0 }).to_string(), )) } fn success_response(body: Body) -> FutureResult<Response<Body>, ServiceError>
struct SearchService; impl NewService for SearchService { type ReqBody = Body; type ResBody = Body; type Error = ServiceError; type Service = SearchService; type Future = Box<Future<Item = Self::Service, Error = Self::Error> + Send>; type InitError = ServiceError; fn new_service(&self) -> Self::Future { Box::new(futures::future::ok(SearchService)) } } impl Service for SearchService { type ReqBody = Body; type ResBody = Body; type Error = ServiceError; type Future = Box<Future<Item = Response<Self::ResBody>, Error = Self::Error> + Send>; fn call(&mut self, request: Request<Self::ReqBody>) -> Self::Future { let db_connection = match connect_db() { Ok(db) => db, Err(_) => { return Box::new(futures::future::ok( Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .body(Body::empty()) .unwrap(), )) } }; match (request.method(), request.uri().path()) { (&Method::GET, "/refs") => Box::new( parse_query(request.uri().query()) .and_then(move |query| search_results(query, &db_connection)) .and_then(success_response) .or_else(|_| { futures::future::ok( Response::builder() .status(StatusCode::BAD_REQUEST) .body(Body::empty()) .unwrap(), ) }), ), (&Method::GET, "/search") => Box::new( parse_query_paginate(request.uri().query()) .and_then(move |query| search_text(query, &db_connection)) .and_then(success_response) .or_else(|_| { futures::future::ok( Response::builder() .status(StatusCode::BAD_REQUEST) .body(Body::empty()) .unwrap(), ) }), ), (&Method::GET, "/daily") => { Box::new(success_response(vod_response_body(&db_connection))) } _ => Box::new(futures::future::ok( Response::builder() .status(StatusCode::NOT_FOUND) .body(Body::empty()) .unwrap(), )), } } } fn main() { let addr = "127.0.0.1:8080".parse().unwrap(); let server = Server::bind(&addr) .serve(SearchService) .map_err(|e| eprintln!("Server error: {}", e)); println!("Listening {}", addr); hyper::rt::run(server); } #[cfg(test)] mod tests { use super::*; #[test] fn test_fetch_chapter() { let db = connect_db().unwrap(); let refs = parse("Быт 1"); let verses = fetch_results(&db, refs); assert_eq!(verses.len(), 1); } }
{ futures::future::ok( Response::builder() .header(header::CONTENT_TYPE, "application/json") .header(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") .header(header::ACCESS_CONTROL_ALLOW_METHODS, "GET") .header(header::ACCESS_CONTROL_ALLOW_HEADERS, "Content-Type") .body(body) .unwrap(), ) }
identifier_body
main.rs
extern crate bible_reference_rs; extern crate chrono; extern crate futures; extern crate hyper; extern crate postgres; extern crate serde; extern crate url; #[macro_use] extern crate serde_json; mod models; use bible_reference_rs::*; use futures::future::{Future, FutureResult}; use hyper::service::{NewService, Service}; use hyper::{header, Body, Method, Request, Response, Server, StatusCode}; use models::*; use postgres::{Connection, TlsMode}; use serde_json::Value; use std::env; use std::fmt; const DEFAULT_URL: &'static str = "postgres://docker:docker@localhost:5432/bible"; #[derive(Debug)] enum ServiceError { NoInput, NoDatabaseConnection(String), } impl std::error::Error for ServiceError {} impl fmt::Display for ServiceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { ServiceError::NoInput => write!(f, "No input provided"), ServiceError::NoDatabaseConnection(details) => write!(f, "DB: {}", details), } } } fn connect_db() -> Result<Connection, ServiceError> { let url = env::var("DATABASE_URL").unwrap_or(String::from(DEFAULT_URL)); println!("Connecting: {}", &url); match Connection::connect(url, TlsMode::None) { Ok(connection) => Ok(connection), Err(error) => { println!("Connection: {}", error); Err(ServiceError::NoDatabaseConnection(format!("{}", error))) } } } fn verses_by_chapters(db: &Connection, id: i16, chapters: Vec<i16>) -> Vec<Value> { db.query( "SELECT row_to_json(rst_bible) FROM rst_bible WHERE book_id = $1 AND chapter = ANY($2)", &[&id, &chapters], ).unwrap() .iter() .map(|row| row.get(0)) .collect() } fn verses_in_chapter_by_verses( db: &Connection, id: i16, chapter: i16, verses: Vec<i16>, ) -> Vec<Value> { db.query( "SELECT row_to_json(rst_bible) FROM rst_bible WHERE book_id = $1 AND chapter = $2 AND verse = ANY($3)", &[&id, &chapter, &verses], ).unwrap() .iter() .map(|row| row.get(0)) .collect() } fn fetch_results(db: &Connection, refs: Vec<BibleReference>) -> Vec<Value> { if refs.is_empty() { return vec![]; } let valid: Vec<BookRef> = refs .iter() .flat_map(|r| { let statement = db .prepare( "SELECT id, book as title, alt, abbr FROM rst_bible_books WHERE book ~* $1 OR alt ~* $1 OR abbr ~* $1 LIMIT 1", ).unwrap(); let rows = statement.query(&[&r.book]).unwrap(); if rows.is_empty() { None } else { let row = rows.iter().next().unwrap(); Some(BookRef { id: row.get(0), name: row.get(1), alt: row.get(2), locations: r.locations.clone(), }) } }).collect(); valid .iter() .map(|reference| { let book_id = reference.id; let book_title = &reference.name; let book_alt = &reference.alt; let texts = reference .locations .iter() .flat_map( move |location| match (&location.chapters, &location.verses) { // Fetch verses by chapters (chapters, None) => { let ch = chapters.into_iter().map(|v| *v as i16).collect(); Some(verses_by_chapters(&db, book_id, ch)) } // Fetch verses by chapter and verses (chapters, Some(verses)) if chapters.len() == 1 => { let ch = chapters[0] as i16; let vs = verses.into_iter().map(|v| *v as i16).collect(); Some(verses_in_chapter_by_verses(&db, book_id, ch, vs)) } _ => None, }, ).collect::<Vec<_>>(); json!({ "reference": { "title": book_title, "alt": book_alt }, "texts": texts }) }).collect::<Vec<_>>() } fn fetch_daily_verses(db: &Connection) -> Vec<String> { use chrono::{Datelike, Utc}; let now = Utc::now(); let month = now.month() as i16; let day = now.day() as i16; db.query( "SELECT verses FROM rst_bible_daily WHERE month = $1 AND day = $2", &[&month, &day], ).unwrap() .iter() .map(|row| row.get(0)) .collect() } fn parse_query(query: Option<&str>) -> FutureResult<String, ServiceError> { use std::collections::HashMap; let query = &query.unwrap_or(""); let args = url::form_urlencoded::parse(&query.as_bytes()) .into_owned() .collect::<HashMap<String, String>>(); match args .get("q") .map(|v| v.to_string()) .filter(|s|!s.is_empty()) { Some(value) => futures::future::ok(value), None => futures::future::err(ServiceError::NoInput), } } #[derive(Debug)] struct SearchPaginate { text: String, page: i16, } fn parse_query_paginate(query: Option<&str>) -> FutureResult<SearchPaginate, ServiceError> { use std::collections::HashMap; let query = &query.unwrap_or(""); let args = url::form_urlencoded::parse(&query.as_bytes()) .into_owned() .collect::<HashMap<String, String>>(); let q = args .get("q") .map(|v| v.to_string()) .filter(|s|!s.is_empty()); let p = args .get("p") .map(|v| v.parse::<i16>().unwrap_or(1)) .unwrap_or(1); match (q, p) { (Some(q), p) => futures::future::ok(SearchPaginate { text: q, page: p }), _ => futures::future::err(ServiceError::NoInput), } } // Verse Of the Day fn vod_response_body(db: &Connection) -> Body { let results = fetch_daily_verses(&db) .into_iter() .flat_map(|daily| { let refs = parse(daily.as_str()); let results = fetch_results(&db, refs); if results.is_empty() { None } else { Some(results) } }).flatten() .collect::<Vec<_>>(); Body::from(json!({ "results": results }).to_string()) } fn search_results(query: String, db: &Connection) -> FutureResult<Body, ServiceError> { let refs = parse(query.as_str()); futures::future::ok(Body::from( json!({ "results": fetch_results(&db, refs) }).to_string(), )) } fn fetch_search_results(text: String, page: i16, db: &Connection) -> (Vec<Value>, i64) { let page = if page <= 0 { 1 } else { page }; let count_rows = db .query( "SELECT COUNT(book_id) FROM rst_bible WHERE text ~* $1", &[&text], ).unwrap(); let mut total: i64 = 0; if count_rows.is_empty() { return (vec![json!([])], total); } else { total = count_rows.get(0).get("count"); } let offset = ((page - 1) * 10) as i64; let rows = db .query( "SELECT row_to_json(t) FROM ( SELECT v.book_id, v.text, v.chapter, v.verse, b.book as book_name, b.alt as book_alt from rst_bible v LEFT OUTER JOIN rst_bible_books b on (v.book_id = b.id) WHERE text ~* $1 ) t LIMIT 10 OFFSET $2", &[&text, &offset], ).unwrap(); let results = rows.into_iter().map(|r| r.get(0)).collect::<Vec<Value>>(); (vec![json!(results)], (total as f64 / 10_f64).ceil() as i64) } fn
(query: SearchPaginate, db: &Connection) -> FutureResult<Body, ServiceError> { let text = &query.text; let results = fetch_search_results(text.to_string(), query.page, db); futures::future::ok(Body::from( json!({ "meta": { "text": text, "page": query.page, "total": results.1 }, "results": results.0 }).to_string(), )) } fn success_response(body: Body) -> FutureResult<Response<Body>, ServiceError> { futures::future::ok( Response::builder() .header(header::CONTENT_TYPE, "application/json") .header(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*") .header(header::ACCESS_CONTROL_ALLOW_METHODS, "GET") .header(header::ACCESS_CONTROL_ALLOW_HEADERS, "Content-Type") .body(body) .unwrap(), ) } struct SearchService; impl NewService for SearchService { type ReqBody = Body; type ResBody = Body; type Error = ServiceError; type Service = SearchService; type Future = Box<Future<Item = Self::Service, Error = Self::Error> + Send>; type InitError = ServiceError; fn new_service(&self) -> Self::Future { Box::new(futures::future::ok(SearchService)) } } impl Service for SearchService { type ReqBody = Body; type ResBody = Body; type Error = ServiceError; type Future = Box<Future<Item = Response<Self::ResBody>, Error = Self::Error> + Send>; fn call(&mut self, request: Request<Self::ReqBody>) -> Self::Future { let db_connection = match connect_db() { Ok(db) => db, Err(_) => { return Box::new(futures::future::ok( Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .body(Body::empty()) .unwrap(), )) } }; match (request.method(), request.uri().path()) { (&Method::GET, "/refs") => Box::new( parse_query(request.uri().query()) .and_then(move |query| search_results(query, &db_connection)) .and_then(success_response) .or_else(|_| { futures::future::ok( Response::builder() .status(StatusCode::BAD_REQUEST) .body(Body::empty()) .unwrap(), ) }), ), (&Method::GET, "/search") => Box::new( parse_query_paginate(request.uri().query()) .and_then(move |query| search_text(query, &db_connection)) .and_then(success_response) .or_else(|_| { futures::future::ok( Response::builder() .status(StatusCode::BAD_REQUEST) .body(Body::empty()) .unwrap(), ) }), ), (&Method::GET, "/daily") => { Box::new(success_response(vod_response_body(&db_connection))) } _ => Box::new(futures::future::ok( Response::builder() .status(StatusCode::NOT_FOUND) .body(Body::empty()) .unwrap(), )), } } } fn main() { let addr = "127.0.0.1:8080".parse().unwrap(); let server = Server::bind(&addr) .serve(SearchService) .map_err(|e| eprintln!("Server error: {}", e)); println!("Listening {}", addr); hyper::rt::run(server); } #[cfg(test)] mod tests { use super::*; #[test] fn test_fetch_chapter() { let db = connect_db().unwrap(); let refs = parse("Быт 1"); let verses = fetch_results(&db, refs); assert_eq!(verses.len(), 1); } }
search_text
identifier_name
main.rs
extern crate bible_reference_rs; extern crate chrono; extern crate futures; extern crate hyper; extern crate postgres; extern crate serde; extern crate url; #[macro_use] extern crate serde_json; mod models; use bible_reference_rs::*; use futures::future::{Future, FutureResult}; use hyper::service::{NewService, Service}; use hyper::{header, Body, Method, Request, Response, Server, StatusCode}; use models::*; use postgres::{Connection, TlsMode}; use serde_json::Value; use std::env; use std::fmt; const DEFAULT_URL: &'static str = "postgres://docker:docker@localhost:5432/bible"; #[derive(Debug)] enum ServiceError { NoInput, NoDatabaseConnection(String), } impl std::error::Error for ServiceError {} impl fmt::Display for ServiceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { ServiceError::NoInput => write!(f, "No input provided"), ServiceError::NoDatabaseConnection(details) => write!(f, "DB: {}", details), } } } fn connect_db() -> Result<Connection, ServiceError> { let url = env::var("DATABASE_URL").unwrap_or(String::from(DEFAULT_URL)); println!("Connecting: {}", &url); match Connection::connect(url, TlsMode::None) { Ok(connection) => Ok(connection), Err(error) => { println!("Connection: {}", error); Err(ServiceError::NoDatabaseConnection(format!("{}", error))) } } } fn verses_by_chapters(db: &Connection, id: i16, chapters: Vec<i16>) -> Vec<Value> { db.query( "SELECT row_to_json(rst_bible) FROM rst_bible WHERE book_id = $1 AND chapter = ANY($2)", &[&id, &chapters], ).unwrap() .iter() .map(|row| row.get(0)) .collect() } fn verses_in_chapter_by_verses( db: &Connection, id: i16, chapter: i16, verses: Vec<i16>, ) -> Vec<Value> { db.query( "SELECT row_to_json(rst_bible) FROM rst_bible WHERE book_id = $1 AND chapter = $2 AND verse = ANY($3)", &[&id, &chapter, &verses], ).unwrap() .iter() .map(|row| row.get(0)) .collect() } fn fetch_results(db: &Connection, refs: Vec<BibleReference>) -> Vec<Value> { if refs.is_empty() { return vec![]; } let valid: Vec<BookRef> = refs .iter() .flat_map(|r| { let statement = db .prepare( "SELECT id, book as title, alt, abbr FROM rst_bible_books WHERE book ~* $1 OR alt ~* $1 OR abbr ~* $1 LIMIT 1", ).unwrap(); let rows = statement.query(&[&r.book]).unwrap(); if rows.is_empty() { None } else { let row = rows.iter().next().unwrap(); Some(BookRef { id: row.get(0), name: row.get(1), alt: row.get(2), locations: r.locations.clone(), }) } }).collect(); valid .iter() .map(|reference| { let book_id = reference.id; let book_title = &reference.name; let book_alt = &reference.alt; let texts = reference .locations .iter() .flat_map( move |location| match (&location.chapters, &location.verses) { // Fetch verses by chapters (chapters, None) => { let ch = chapters.into_iter().map(|v| *v as i16).collect(); Some(verses_by_chapters(&db, book_id, ch)) } // Fetch verses by chapter and verses (chapters, Some(verses)) if chapters.len() == 1 => { let ch = chapters[0] as i16; let vs = verses.into_iter().map(|v| *v as i16).collect(); Some(verses_in_chapter_by_verses(&db, book_id, ch, vs)) } _ => None, }, ).collect::<Vec<_>>(); json!({ "reference": { "title": book_title, "alt": book_alt }, "texts": texts }) }).collect::<Vec<_>>() } fn fetch_daily_verses(db: &Connection) -> Vec<String> { use chrono::{Datelike, Utc}; let now = Utc::now(); let month = now.month() as i16; let day = now.day() as i16; db.query( "SELECT verses FROM rst_bible_daily WHERE month = $1 AND day = $2", &[&month, &day], ).unwrap() .iter() .map(|row| row.get(0)) .collect() } fn parse_query(query: Option<&str>) -> FutureResult<String, ServiceError> { use std::collections::HashMap; let query = &query.unwrap_or(""); let args = url::form_urlencoded::parse(&query.as_bytes()) .into_owned() .collect::<HashMap<String, String>>(); match args .get("q") .map(|v| v.to_string()) .filter(|s|!s.is_empty()) { Some(value) => futures::future::ok(value), None => futures::future::err(ServiceError::NoInput), } } #[derive(Debug)] struct SearchPaginate { text: String, page: i16, } fn parse_query_paginate(query: Option<&str>) -> FutureResult<SearchPaginate, ServiceError> { use std::collections::HashMap; let query = &query.unwrap_or(""); let args = url::form_urlencoded::parse(&query.as_bytes()) .into_owned() .collect::<HashMap<String, String>>(); let q = args .get("q") .map(|v| v.to_string()) .filter(|s|!s.is_empty()); let p = args .get("p") .map(|v| v.parse::<i16>().unwrap_or(1)) .unwrap_or(1); match (q, p) { (Some(q), p) => futures::future::ok(SearchPaginate { text: q, page: p }), _ => futures::future::err(ServiceError::NoInput), } } // Verse Of the Day fn vod_response_body(db: &Connection) -> Body { let results = fetch_daily_verses(&db) .into_iter() .flat_map(|daily| { let refs = parse(daily.as_str()); let results = fetch_results(&db, refs); if results.is_empty() { None } else { Some(results) } }).flatten() .collect::<Vec<_>>(); Body::from(json!({ "results": results }).to_string()) } fn search_results(query: String, db: &Connection) -> FutureResult<Body, ServiceError> { let refs = parse(query.as_str()); futures::future::ok(Body::from( json!({ "results": fetch_results(&db, refs) }).to_string(), )) } fn fetch_search_results(text: String, page: i16, db: &Connection) -> (Vec<Value>, i64) { let page = if page <= 0 { 1 } else { page }; let count_rows = db .query( "SELECT COUNT(book_id) FROM rst_bible WHERE text ~* $1", &[&text], ).unwrap(); let mut total: i64 = 0; if count_rows.is_empty() { return (vec![json!([])], total); } else { total = count_rows.get(0).get("count"); } let offset = ((page - 1) * 10) as i64; let rows = db .query( "SELECT row_to_json(t) FROM ( SELECT v.book_id, v.text, v.chapter, v.verse, b.book as book_name, b.alt as book_alt from rst_bible v LEFT OUTER JOIN rst_bible_books b on (v.book_id = b.id) WHERE text ~* $1 ) t LIMIT 10 OFFSET $2", &[&text, &offset], ).unwrap(); let results = rows.into_iter().map(|r| r.get(0)).collect::<Vec<Value>>(); (vec![json!(results)], (total as f64 / 10_f64).ceil() as i64) } fn search_text(query: SearchPaginate, db: &Connection) -> FutureResult<Body, ServiceError> { let text = &query.text; let results = fetch_search_results(text.to_string(), query.page, db); futures::future::ok(Body::from( json!({ "meta": { "text": text, "page": query.page, "total": results.1 }, "results": results.0 }).to_string(), )) } fn success_response(body: Body) -> FutureResult<Response<Body>, ServiceError> { futures::future::ok( Response::builder() .header(header::CONTENT_TYPE, "application/json") .header(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*")
.unwrap(), ) } struct SearchService; impl NewService for SearchService { type ReqBody = Body; type ResBody = Body; type Error = ServiceError; type Service = SearchService; type Future = Box<Future<Item = Self::Service, Error = Self::Error> + Send>; type InitError = ServiceError; fn new_service(&self) -> Self::Future { Box::new(futures::future::ok(SearchService)) } } impl Service for SearchService { type ReqBody = Body; type ResBody = Body; type Error = ServiceError; type Future = Box<Future<Item = Response<Self::ResBody>, Error = Self::Error> + Send>; fn call(&mut self, request: Request<Self::ReqBody>) -> Self::Future { let db_connection = match connect_db() { Ok(db) => db, Err(_) => { return Box::new(futures::future::ok( Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .body(Body::empty()) .unwrap(), )) } }; match (request.method(), request.uri().path()) { (&Method::GET, "/refs") => Box::new( parse_query(request.uri().query()) .and_then(move |query| search_results(query, &db_connection)) .and_then(success_response) .or_else(|_| { futures::future::ok( Response::builder() .status(StatusCode::BAD_REQUEST) .body(Body::empty()) .unwrap(), ) }), ), (&Method::GET, "/search") => Box::new( parse_query_paginate(request.uri().query()) .and_then(move |query| search_text(query, &db_connection)) .and_then(success_response) .or_else(|_| { futures::future::ok( Response::builder() .status(StatusCode::BAD_REQUEST) .body(Body::empty()) .unwrap(), ) }), ), (&Method::GET, "/daily") => { Box::new(success_response(vod_response_body(&db_connection))) } _ => Box::new(futures::future::ok( Response::builder() .status(StatusCode::NOT_FOUND) .body(Body::empty()) .unwrap(), )), } } } fn main() { let addr = "127.0.0.1:8080".parse().unwrap(); let server = Server::bind(&addr) .serve(SearchService) .map_err(|e| eprintln!("Server error: {}", e)); println!("Listening {}", addr); hyper::rt::run(server); } #[cfg(test)] mod tests { use super::*; #[test] fn test_fetch_chapter() { let db = connect_db().unwrap(); let refs = parse("Быт 1"); let verses = fetch_results(&db, refs); assert_eq!(verses.len(), 1); } }
.header(header::ACCESS_CONTROL_ALLOW_METHODS, "GET") .header(header::ACCESS_CONTROL_ALLOW_HEADERS, "Content-Type") .body(body)
random_line_split
borrow_set.rs
use crate::borrow_check::place_ext::PlaceExt; use crate::borrow_check::nll::ToRegionVid; use crate::borrow_check::path_utils::allow_two_phase_borrow; use crate::dataflow::indexes::BorrowIndex; use crate::dataflow::move_paths::MoveData; use rustc::mir::traversal; use rustc::mir::visit::{PlaceContext, Visitor, NonUseContext, MutatingUseContext}; use rustc::mir::{self, Location, Body, Local}; use rustc::ty::{RegionVid, TyCtxt}; use rustc::util::nodemap::{FxHashMap, FxHashSet}; use rustc_index::vec::IndexVec; use rustc_index::bit_set::BitSet; use std::fmt; use std::ops::Index; crate struct BorrowSet<'tcx> { /// The fundamental map relating bitvector indexes to the borrows /// in the MIR. crate borrows: IndexVec<BorrowIndex, BorrowData<'tcx>>, /// Each borrow is also uniquely identified in the MIR by the /// `Location` of the assignment statement in which it appears on /// the right hand side; we map each such location to the /// corresponding `BorrowIndex`. crate location_map: FxHashMap<Location, BorrowIndex>, /// Locations which activate borrows. /// NOTE: a given location may activate more than one borrow in the future /// when more general two-phase borrow support is introduced, but for now we /// only need to store one borrow index. crate activation_map: FxHashMap<Location, Vec<BorrowIndex>>, /// Map from local to all the borrows on that local. crate local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>, crate locals_state_at_exit: LocalsStateAtExit, } impl<'tcx> Index<BorrowIndex> for BorrowSet<'tcx> { type Output = BorrowData<'tcx>; fn index(&self, index: BorrowIndex) -> &BorrowData<'tcx> { &self.borrows[index] } } /// Location where a two-phase borrow is activated, if a borrow /// is in fact a two-phase borrow. #[derive(Copy, Clone, PartialEq, Eq, Debug)] crate enum TwoPhaseActivation { NotTwoPhase, NotActivated, ActivatedAt(Location), } #[derive(Debug, Clone)] crate struct BorrowData<'tcx> { /// Location where the borrow reservation starts. /// In many cases, this will be equal to the activation location but not always. crate reserve_location: Location, /// Location where the borrow is activated. crate activation_location: TwoPhaseActivation, /// What kind of borrow this is crate kind: mir::BorrowKind, /// The region for which this borrow is live crate region: RegionVid, /// Place from which we are borrowing crate borrowed_place: mir::Place<'tcx>, /// Place to which the borrow was stored crate assigned_place: mir::Place<'tcx>, } impl<'tcx> fmt::Display for BorrowData<'tcx> { fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result { let kind = match self.kind { mir::BorrowKind::Shared => "", mir::BorrowKind::Shallow => "shallow ", mir::BorrowKind::Unique => "uniq ", mir::BorrowKind::Mut {.. } => "mut ", }; write!(w, "&{:?} {}{:?}", self.region, kind, self.borrowed_place) } } crate enum LocalsStateAtExit { AllAreInvalidated, SomeAreInvalidated { has_storage_dead_or_moved: BitSet<Local> } } impl LocalsStateAtExit { fn
( locals_are_invalidated_at_exit: bool, body: &Body<'tcx>, move_data: &MoveData<'tcx> ) -> Self { struct HasStorageDead(BitSet<Local>); impl<'tcx> Visitor<'tcx> for HasStorageDead { fn visit_local(&mut self, local: &Local, ctx: PlaceContext, _: Location) { if ctx == PlaceContext::NonUse(NonUseContext::StorageDead) { self.0.insert(*local); } } } if locals_are_invalidated_at_exit { LocalsStateAtExit::AllAreInvalidated } else { let mut has_storage_dead = HasStorageDead(BitSet::new_empty(body.local_decls.len())); has_storage_dead.visit_body(body); let mut has_storage_dead_or_moved = has_storage_dead.0; for move_out in &move_data.moves { if let Some(index) = move_data.base_local(move_out.path) { has_storage_dead_or_moved.insert(index); } } LocalsStateAtExit::SomeAreInvalidated{ has_storage_dead_or_moved } } } } impl<'tcx> BorrowSet<'tcx> { pub fn build( tcx: TyCtxt<'tcx>, body: &Body<'tcx>, locals_are_invalidated_at_exit: bool, move_data: &MoveData<'tcx>, ) -> Self { let mut visitor = GatherBorrows { tcx, body, idx_vec: IndexVec::new(), location_map: Default::default(), activation_map: Default::default(), local_map: Default::default(), pending_activations: Default::default(), locals_state_at_exit: LocalsStateAtExit::build(locals_are_invalidated_at_exit, body, move_data), }; for (block, block_data) in traversal::preorder(body) { visitor.visit_basic_block_data(block, block_data); } BorrowSet { borrows: visitor.idx_vec, location_map: visitor.location_map, activation_map: visitor.activation_map, local_map: visitor.local_map, locals_state_at_exit: visitor.locals_state_at_exit, } } crate fn activations_at_location(&self, location: Location) -> &[BorrowIndex] { self.activation_map .get(&location) .map(|activations| &activations[..]) .unwrap_or(&[]) } } struct GatherBorrows<'a, 'tcx> { tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, idx_vec: IndexVec<BorrowIndex, BorrowData<'tcx>>, location_map: FxHashMap<Location, BorrowIndex>, activation_map: FxHashMap<Location, Vec<BorrowIndex>>, local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>, /// When we encounter a 2-phase borrow statement, it will always /// be assigning into a temporary TEMP: /// /// TEMP = &foo /// /// We add TEMP into this map with `b`, where `b` is the index of /// the borrow. When we find a later use of this activation, we /// remove from the map (and add to the "tombstone" set below). pending_activations: FxHashMap<mir::Local, BorrowIndex>, locals_state_at_exit: LocalsStateAtExit, } impl<'a, 'tcx> Visitor<'tcx> for GatherBorrows<'a, 'tcx> { fn visit_assign( &mut self, assigned_place: &mir::Place<'tcx>, rvalue: &mir::Rvalue<'tcx>, location: mir::Location, ) { if let mir::Rvalue::Ref(region, kind, ref borrowed_place) = *rvalue { if borrowed_place.ignore_borrow( self.tcx, self.body, &self.locals_state_at_exit) { return; } let region = region.to_region_vid(); let borrow = BorrowData { kind, region, reserve_location: location, activation_location: TwoPhaseActivation::NotTwoPhase, borrowed_place: borrowed_place.clone(), assigned_place: assigned_place.clone(), }; let idx = self.idx_vec.push(borrow); self.location_map.insert(location, idx); self.insert_as_pending_if_two_phase(location, &assigned_place, kind, idx); if let mir::PlaceBase::Local(local) = borrowed_place.base { self.local_map.entry(local).or_default().insert(idx); } } self.super_assign(assigned_place, rvalue, location) } fn visit_local( &mut self, temp: &Local, context: PlaceContext, location: Location, ) { if!context.is_use() { return; } // We found a use of some temporary TMP // check whether we (earlier) saw a 2-phase borrow like // // TMP = &mut place if let Some(&borrow_index) = self.pending_activations.get(temp) { let borrow_data = &mut self.idx_vec[borrow_index]; // Watch out: the use of TMP in the borrow itself // doesn't count as an activation. =) if borrow_data.reserve_location == location && context == PlaceContext::MutatingUse(MutatingUseContext::Store) { return; } if let TwoPhaseActivation::ActivatedAt(other_location) = borrow_data.activation_location { span_bug!( self.body.source_info(location).span, "found two uses for 2-phase borrow temporary {:?}: \ {:?} and {:?}", temp, location, other_location, ); } // Otherwise, this is the unique later use that we expect. // Double check: This borrow is indeed a two-phase borrow (that is, // we are 'transitioning' from `NotActivated` to `ActivatedAt`) and // we've not found any other activations (checked above). assert_eq!( borrow_data.activation_location, TwoPhaseActivation::NotActivated, "never found an activation for this borrow!", ); self.activation_map .entry(location) .or_default() .push(borrow_index); borrow_data.activation_location = TwoPhaseActivation::ActivatedAt(location); } } fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: mir::Location) { if let mir::Rvalue::Ref(region, kind, ref place) = *rvalue { // double-check that we already registered a BorrowData for this let borrow_index = self.location_map[&location]; let borrow_data = &self.idx_vec[borrow_index]; assert_eq!(borrow_data.reserve_location, location); assert_eq!(borrow_data.kind, kind); assert_eq!(borrow_data.region, region.to_region_vid()); assert_eq!(borrow_data.borrowed_place, *place); } return self.super_rvalue(rvalue, location); } } impl<'a, 'tcx> GatherBorrows<'a, 'tcx> { /// If this is a two-phase borrow, then we will record it /// as "pending" until we find the activating use. fn insert_as_pending_if_two_phase( &mut self, start_location: Location, assigned_place: &mir::Place<'tcx>, kind: mir::BorrowKind, borrow_index: BorrowIndex, ) { debug!( "Borrows::insert_as_pending_if_two_phase({:?}, {:?}, {:?})", start_location, assigned_place, borrow_index, ); if!allow_two_phase_borrow(kind) { debug!(" -> {:?}", start_location); return; } // When we encounter a 2-phase borrow statement, it will always // be assigning into a temporary TEMP: // // TEMP = &foo // // so extract `temp`. let temp = if let &mir::Place { base: mir::PlaceBase::Local(temp), projection: box [], } = assigned_place { temp } else { span_bug!( self.body.source_info(start_location).span, "expected 2-phase borrow to assign to a local, not `{:?}`", assigned_place, ); }; // Consider the borrow not activated to start. When we find an activation, we'll update // this field. { let borrow_data = &mut self.idx_vec[borrow_index]; borrow_data.activation_location = TwoPhaseActivation::NotActivated; } // Insert `temp` into the list of pending activations. From // now on, we'll be on the lookout for a use of it. Note that // we are guaranteed that this use will come after the // assignment. let old_value = self.pending_activations.insert(temp, borrow_index); if let Some(old_index) = old_value { span_bug!(self.body.source_info(start_location).span, "found already pending activation for temp: {:?} \ at borrow_index: {:?} with associated data {:?}", temp, old_index, self.idx_vec[old_index]); } } }
build
identifier_name
borrow_set.rs
use crate::borrow_check::place_ext::PlaceExt; use crate::borrow_check::nll::ToRegionVid; use crate::borrow_check::path_utils::allow_two_phase_borrow; use crate::dataflow::indexes::BorrowIndex; use crate::dataflow::move_paths::MoveData; use rustc::mir::traversal; use rustc::mir::visit::{PlaceContext, Visitor, NonUseContext, MutatingUseContext}; use rustc::mir::{self, Location, Body, Local}; use rustc::ty::{RegionVid, TyCtxt}; use rustc::util::nodemap::{FxHashMap, FxHashSet}; use rustc_index::vec::IndexVec; use rustc_index::bit_set::BitSet; use std::fmt; use std::ops::Index; crate struct BorrowSet<'tcx> { /// The fundamental map relating bitvector indexes to the borrows /// in the MIR. crate borrows: IndexVec<BorrowIndex, BorrowData<'tcx>>, /// Each borrow is also uniquely identified in the MIR by the /// `Location` of the assignment statement in which it appears on /// the right hand side; we map each such location to the /// corresponding `BorrowIndex`. crate location_map: FxHashMap<Location, BorrowIndex>, /// Locations which activate borrows. /// NOTE: a given location may activate more than one borrow in the future /// when more general two-phase borrow support is introduced, but for now we /// only need to store one borrow index. crate activation_map: FxHashMap<Location, Vec<BorrowIndex>>, /// Map from local to all the borrows on that local. crate local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>, crate locals_state_at_exit: LocalsStateAtExit, } impl<'tcx> Index<BorrowIndex> for BorrowSet<'tcx> { type Output = BorrowData<'tcx>; fn index(&self, index: BorrowIndex) -> &BorrowData<'tcx> { &self.borrows[index] } } /// Location where a two-phase borrow is activated, if a borrow /// is in fact a two-phase borrow. #[derive(Copy, Clone, PartialEq, Eq, Debug)] crate enum TwoPhaseActivation { NotTwoPhase, NotActivated, ActivatedAt(Location), } #[derive(Debug, Clone)] crate struct BorrowData<'tcx> { /// Location where the borrow reservation starts. /// In many cases, this will be equal to the activation location but not always. crate reserve_location: Location, /// Location where the borrow is activated. crate activation_location: TwoPhaseActivation, /// What kind of borrow this is crate kind: mir::BorrowKind, /// The region for which this borrow is live crate region: RegionVid, /// Place from which we are borrowing crate borrowed_place: mir::Place<'tcx>, /// Place to which the borrow was stored crate assigned_place: mir::Place<'tcx>, } impl<'tcx> fmt::Display for BorrowData<'tcx> { fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result { let kind = match self.kind { mir::BorrowKind::Shared => "", mir::BorrowKind::Shallow => "shallow ", mir::BorrowKind::Unique => "uniq ", mir::BorrowKind::Mut {.. } => "mut ", }; write!(w, "&{:?} {}{:?}", self.region, kind, self.borrowed_place) } } crate enum LocalsStateAtExit { AllAreInvalidated, SomeAreInvalidated { has_storage_dead_or_moved: BitSet<Local> } } impl LocalsStateAtExit { fn build( locals_are_invalidated_at_exit: bool, body: &Body<'tcx>, move_data: &MoveData<'tcx> ) -> Self { struct HasStorageDead(BitSet<Local>); impl<'tcx> Visitor<'tcx> for HasStorageDead { fn visit_local(&mut self, local: &Local, ctx: PlaceContext, _: Location) { if ctx == PlaceContext::NonUse(NonUseContext::StorageDead)
} } if locals_are_invalidated_at_exit { LocalsStateAtExit::AllAreInvalidated } else { let mut has_storage_dead = HasStorageDead(BitSet::new_empty(body.local_decls.len())); has_storage_dead.visit_body(body); let mut has_storage_dead_or_moved = has_storage_dead.0; for move_out in &move_data.moves { if let Some(index) = move_data.base_local(move_out.path) { has_storage_dead_or_moved.insert(index); } } LocalsStateAtExit::SomeAreInvalidated{ has_storage_dead_or_moved } } } } impl<'tcx> BorrowSet<'tcx> { pub fn build( tcx: TyCtxt<'tcx>, body: &Body<'tcx>, locals_are_invalidated_at_exit: bool, move_data: &MoveData<'tcx>, ) -> Self { let mut visitor = GatherBorrows { tcx, body, idx_vec: IndexVec::new(), location_map: Default::default(), activation_map: Default::default(), local_map: Default::default(), pending_activations: Default::default(), locals_state_at_exit: LocalsStateAtExit::build(locals_are_invalidated_at_exit, body, move_data), }; for (block, block_data) in traversal::preorder(body) { visitor.visit_basic_block_data(block, block_data); } BorrowSet { borrows: visitor.idx_vec, location_map: visitor.location_map, activation_map: visitor.activation_map, local_map: visitor.local_map, locals_state_at_exit: visitor.locals_state_at_exit, } } crate fn activations_at_location(&self, location: Location) -> &[BorrowIndex] { self.activation_map .get(&location) .map(|activations| &activations[..]) .unwrap_or(&[]) } } struct GatherBorrows<'a, 'tcx> { tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, idx_vec: IndexVec<BorrowIndex, BorrowData<'tcx>>, location_map: FxHashMap<Location, BorrowIndex>, activation_map: FxHashMap<Location, Vec<BorrowIndex>>, local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>, /// When we encounter a 2-phase borrow statement, it will always /// be assigning into a temporary TEMP: /// /// TEMP = &foo /// /// We add TEMP into this map with `b`, where `b` is the index of /// the borrow. When we find a later use of this activation, we /// remove from the map (and add to the "tombstone" set below). pending_activations: FxHashMap<mir::Local, BorrowIndex>, locals_state_at_exit: LocalsStateAtExit, } impl<'a, 'tcx> Visitor<'tcx> for GatherBorrows<'a, 'tcx> { fn visit_assign( &mut self, assigned_place: &mir::Place<'tcx>, rvalue: &mir::Rvalue<'tcx>, location: mir::Location, ) { if let mir::Rvalue::Ref(region, kind, ref borrowed_place) = *rvalue { if borrowed_place.ignore_borrow( self.tcx, self.body, &self.locals_state_at_exit) { return; } let region = region.to_region_vid(); let borrow = BorrowData { kind, region, reserve_location: location, activation_location: TwoPhaseActivation::NotTwoPhase, borrowed_place: borrowed_place.clone(), assigned_place: assigned_place.clone(), }; let idx = self.idx_vec.push(borrow); self.location_map.insert(location, idx); self.insert_as_pending_if_two_phase(location, &assigned_place, kind, idx); if let mir::PlaceBase::Local(local) = borrowed_place.base { self.local_map.entry(local).or_default().insert(idx); } } self.super_assign(assigned_place, rvalue, location) } fn visit_local( &mut self, temp: &Local, context: PlaceContext, location: Location, ) { if!context.is_use() { return; } // We found a use of some temporary TMP // check whether we (earlier) saw a 2-phase borrow like // // TMP = &mut place if let Some(&borrow_index) = self.pending_activations.get(temp) { let borrow_data = &mut self.idx_vec[borrow_index]; // Watch out: the use of TMP in the borrow itself // doesn't count as an activation. =) if borrow_data.reserve_location == location && context == PlaceContext::MutatingUse(MutatingUseContext::Store) { return; } if let TwoPhaseActivation::ActivatedAt(other_location) = borrow_data.activation_location { span_bug!( self.body.source_info(location).span, "found two uses for 2-phase borrow temporary {:?}: \ {:?} and {:?}", temp, location, other_location, ); } // Otherwise, this is the unique later use that we expect. // Double check: This borrow is indeed a two-phase borrow (that is, // we are 'transitioning' from `NotActivated` to `ActivatedAt`) and // we've not found any other activations (checked above). assert_eq!( borrow_data.activation_location, TwoPhaseActivation::NotActivated, "never found an activation for this borrow!", ); self.activation_map .entry(location) .or_default() .push(borrow_index); borrow_data.activation_location = TwoPhaseActivation::ActivatedAt(location); } } fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: mir::Location) { if let mir::Rvalue::Ref(region, kind, ref place) = *rvalue { // double-check that we already registered a BorrowData for this let borrow_index = self.location_map[&location]; let borrow_data = &self.idx_vec[borrow_index]; assert_eq!(borrow_data.reserve_location, location); assert_eq!(borrow_data.kind, kind); assert_eq!(borrow_data.region, region.to_region_vid()); assert_eq!(borrow_data.borrowed_place, *place); } return self.super_rvalue(rvalue, location); } } impl<'a, 'tcx> GatherBorrows<'a, 'tcx> { /// If this is a two-phase borrow, then we will record it /// as "pending" until we find the activating use. fn insert_as_pending_if_two_phase( &mut self, start_location: Location, assigned_place: &mir::Place<'tcx>, kind: mir::BorrowKind, borrow_index: BorrowIndex, ) { debug!( "Borrows::insert_as_pending_if_two_phase({:?}, {:?}, {:?})", start_location, assigned_place, borrow_index, ); if!allow_two_phase_borrow(kind) { debug!(" -> {:?}", start_location); return; } // When we encounter a 2-phase borrow statement, it will always // be assigning into a temporary TEMP: // // TEMP = &foo // // so extract `temp`. let temp = if let &mir::Place { base: mir::PlaceBase::Local(temp), projection: box [], } = assigned_place { temp } else { span_bug!( self.body.source_info(start_location).span, "expected 2-phase borrow to assign to a local, not `{:?}`", assigned_place, ); }; // Consider the borrow not activated to start. When we find an activation, we'll update // this field. { let borrow_data = &mut self.idx_vec[borrow_index]; borrow_data.activation_location = TwoPhaseActivation::NotActivated; } // Insert `temp` into the list of pending activations. From // now on, we'll be on the lookout for a use of it. Note that // we are guaranteed that this use will come after the // assignment. let old_value = self.pending_activations.insert(temp, borrow_index); if let Some(old_index) = old_value { span_bug!(self.body.source_info(start_location).span, "found already pending activation for temp: {:?} \ at borrow_index: {:?} with associated data {:?}", temp, old_index, self.idx_vec[old_index]); } } }
{ self.0.insert(*local); }
conditional_block
borrow_set.rs
use crate::borrow_check::place_ext::PlaceExt; use crate::borrow_check::nll::ToRegionVid; use crate::borrow_check::path_utils::allow_two_phase_borrow; use crate::dataflow::indexes::BorrowIndex; use crate::dataflow::move_paths::MoveData; use rustc::mir::traversal; use rustc::mir::visit::{PlaceContext, Visitor, NonUseContext, MutatingUseContext}; use rustc::mir::{self, Location, Body, Local}; use rustc::ty::{RegionVid, TyCtxt}; use rustc::util::nodemap::{FxHashMap, FxHashSet}; use rustc_index::vec::IndexVec; use rustc_index::bit_set::BitSet; use std::fmt; use std::ops::Index; crate struct BorrowSet<'tcx> { /// The fundamental map relating bitvector indexes to the borrows /// in the MIR. crate borrows: IndexVec<BorrowIndex, BorrowData<'tcx>>, /// Each borrow is also uniquely identified in the MIR by the /// `Location` of the assignment statement in which it appears on /// the right hand side; we map each such location to the /// corresponding `BorrowIndex`. crate location_map: FxHashMap<Location, BorrowIndex>, /// Locations which activate borrows. /// NOTE: a given location may activate more than one borrow in the future /// when more general two-phase borrow support is introduced, but for now we /// only need to store one borrow index. crate activation_map: FxHashMap<Location, Vec<BorrowIndex>>, /// Map from local to all the borrows on that local. crate local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>, crate locals_state_at_exit: LocalsStateAtExit, } impl<'tcx> Index<BorrowIndex> for BorrowSet<'tcx> { type Output = BorrowData<'tcx>; fn index(&self, index: BorrowIndex) -> &BorrowData<'tcx> { &self.borrows[index] } } /// Location where a two-phase borrow is activated, if a borrow /// is in fact a two-phase borrow. #[derive(Copy, Clone, PartialEq, Eq, Debug)] crate enum TwoPhaseActivation { NotTwoPhase, NotActivated, ActivatedAt(Location), } #[derive(Debug, Clone)] crate struct BorrowData<'tcx> { /// Location where the borrow reservation starts. /// In many cases, this will be equal to the activation location but not always. crate reserve_location: Location, /// Location where the borrow is activated. crate activation_location: TwoPhaseActivation, /// What kind of borrow this is crate kind: mir::BorrowKind, /// The region for which this borrow is live crate region: RegionVid, /// Place from which we are borrowing crate borrowed_place: mir::Place<'tcx>, /// Place to which the borrow was stored crate assigned_place: mir::Place<'tcx>, } impl<'tcx> fmt::Display for BorrowData<'tcx> { fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result { let kind = match self.kind { mir::BorrowKind::Shared => "", mir::BorrowKind::Shallow => "shallow ", mir::BorrowKind::Unique => "uniq ", mir::BorrowKind::Mut {.. } => "mut ", }; write!(w, "&{:?} {}{:?}", self.region, kind, self.borrowed_place) } } crate enum LocalsStateAtExit { AllAreInvalidated, SomeAreInvalidated { has_storage_dead_or_moved: BitSet<Local> } } impl LocalsStateAtExit { fn build( locals_are_invalidated_at_exit: bool, body: &Body<'tcx>, move_data: &MoveData<'tcx> ) -> Self { struct HasStorageDead(BitSet<Local>); impl<'tcx> Visitor<'tcx> for HasStorageDead { fn visit_local(&mut self, local: &Local, ctx: PlaceContext, _: Location) { if ctx == PlaceContext::NonUse(NonUseContext::StorageDead) { self.0.insert(*local); } } } if locals_are_invalidated_at_exit { LocalsStateAtExit::AllAreInvalidated } else { let mut has_storage_dead = HasStorageDead(BitSet::new_empty(body.local_decls.len())); has_storage_dead.visit_body(body); let mut has_storage_dead_or_moved = has_storage_dead.0; for move_out in &move_data.moves { if let Some(index) = move_data.base_local(move_out.path) { has_storage_dead_or_moved.insert(index); } }
} } impl<'tcx> BorrowSet<'tcx> { pub fn build( tcx: TyCtxt<'tcx>, body: &Body<'tcx>, locals_are_invalidated_at_exit: bool, move_data: &MoveData<'tcx>, ) -> Self { let mut visitor = GatherBorrows { tcx, body, idx_vec: IndexVec::new(), location_map: Default::default(), activation_map: Default::default(), local_map: Default::default(), pending_activations: Default::default(), locals_state_at_exit: LocalsStateAtExit::build(locals_are_invalidated_at_exit, body, move_data), }; for (block, block_data) in traversal::preorder(body) { visitor.visit_basic_block_data(block, block_data); } BorrowSet { borrows: visitor.idx_vec, location_map: visitor.location_map, activation_map: visitor.activation_map, local_map: visitor.local_map, locals_state_at_exit: visitor.locals_state_at_exit, } } crate fn activations_at_location(&self, location: Location) -> &[BorrowIndex] { self.activation_map .get(&location) .map(|activations| &activations[..]) .unwrap_or(&[]) } } struct GatherBorrows<'a, 'tcx> { tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, idx_vec: IndexVec<BorrowIndex, BorrowData<'tcx>>, location_map: FxHashMap<Location, BorrowIndex>, activation_map: FxHashMap<Location, Vec<BorrowIndex>>, local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>, /// When we encounter a 2-phase borrow statement, it will always /// be assigning into a temporary TEMP: /// /// TEMP = &foo /// /// We add TEMP into this map with `b`, where `b` is the index of /// the borrow. When we find a later use of this activation, we /// remove from the map (and add to the "tombstone" set below). pending_activations: FxHashMap<mir::Local, BorrowIndex>, locals_state_at_exit: LocalsStateAtExit, } impl<'a, 'tcx> Visitor<'tcx> for GatherBorrows<'a, 'tcx> { fn visit_assign( &mut self, assigned_place: &mir::Place<'tcx>, rvalue: &mir::Rvalue<'tcx>, location: mir::Location, ) { if let mir::Rvalue::Ref(region, kind, ref borrowed_place) = *rvalue { if borrowed_place.ignore_borrow( self.tcx, self.body, &self.locals_state_at_exit) { return; } let region = region.to_region_vid(); let borrow = BorrowData { kind, region, reserve_location: location, activation_location: TwoPhaseActivation::NotTwoPhase, borrowed_place: borrowed_place.clone(), assigned_place: assigned_place.clone(), }; let idx = self.idx_vec.push(borrow); self.location_map.insert(location, idx); self.insert_as_pending_if_two_phase(location, &assigned_place, kind, idx); if let mir::PlaceBase::Local(local) = borrowed_place.base { self.local_map.entry(local).or_default().insert(idx); } } self.super_assign(assigned_place, rvalue, location) } fn visit_local( &mut self, temp: &Local, context: PlaceContext, location: Location, ) { if!context.is_use() { return; } // We found a use of some temporary TMP // check whether we (earlier) saw a 2-phase borrow like // // TMP = &mut place if let Some(&borrow_index) = self.pending_activations.get(temp) { let borrow_data = &mut self.idx_vec[borrow_index]; // Watch out: the use of TMP in the borrow itself // doesn't count as an activation. =) if borrow_data.reserve_location == location && context == PlaceContext::MutatingUse(MutatingUseContext::Store) { return; } if let TwoPhaseActivation::ActivatedAt(other_location) = borrow_data.activation_location { span_bug!( self.body.source_info(location).span, "found two uses for 2-phase borrow temporary {:?}: \ {:?} and {:?}", temp, location, other_location, ); } // Otherwise, this is the unique later use that we expect. // Double check: This borrow is indeed a two-phase borrow (that is, // we are 'transitioning' from `NotActivated` to `ActivatedAt`) and // we've not found any other activations (checked above). assert_eq!( borrow_data.activation_location, TwoPhaseActivation::NotActivated, "never found an activation for this borrow!", ); self.activation_map .entry(location) .or_default() .push(borrow_index); borrow_data.activation_location = TwoPhaseActivation::ActivatedAt(location); } } fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: mir::Location) { if let mir::Rvalue::Ref(region, kind, ref place) = *rvalue { // double-check that we already registered a BorrowData for this let borrow_index = self.location_map[&location]; let borrow_data = &self.idx_vec[borrow_index]; assert_eq!(borrow_data.reserve_location, location); assert_eq!(borrow_data.kind, kind); assert_eq!(borrow_data.region, region.to_region_vid()); assert_eq!(borrow_data.borrowed_place, *place); } return self.super_rvalue(rvalue, location); } } impl<'a, 'tcx> GatherBorrows<'a, 'tcx> { /// If this is a two-phase borrow, then we will record it /// as "pending" until we find the activating use. fn insert_as_pending_if_two_phase( &mut self, start_location: Location, assigned_place: &mir::Place<'tcx>, kind: mir::BorrowKind, borrow_index: BorrowIndex, ) { debug!( "Borrows::insert_as_pending_if_two_phase({:?}, {:?}, {:?})", start_location, assigned_place, borrow_index, ); if!allow_two_phase_borrow(kind) { debug!(" -> {:?}", start_location); return; } // When we encounter a 2-phase borrow statement, it will always // be assigning into a temporary TEMP: // // TEMP = &foo // // so extract `temp`. let temp = if let &mir::Place { base: mir::PlaceBase::Local(temp), projection: box [], } = assigned_place { temp } else { span_bug!( self.body.source_info(start_location).span, "expected 2-phase borrow to assign to a local, not `{:?}`", assigned_place, ); }; // Consider the borrow not activated to start. When we find an activation, we'll update // this field. { let borrow_data = &mut self.idx_vec[borrow_index]; borrow_data.activation_location = TwoPhaseActivation::NotActivated; } // Insert `temp` into the list of pending activations. From // now on, we'll be on the lookout for a use of it. Note that // we are guaranteed that this use will come after the // assignment. let old_value = self.pending_activations.insert(temp, borrow_index); if let Some(old_index) = old_value { span_bug!(self.body.source_info(start_location).span, "found already pending activation for temp: {:?} \ at borrow_index: {:?} with associated data {:?}", temp, old_index, self.idx_vec[old_index]); } } }
LocalsStateAtExit::SomeAreInvalidated{ has_storage_dead_or_moved } }
random_line_split
zigzag_graph.rs
forward: vec![None; cache_entries as usize], reverse: vec![None; cache_entries as usize], cache_entries, } } pub fn contains_forward(&self, node: u32) -> bool { assert!(node < self.cache_entries); self.forward[node as usize].is_some() } pub fn contains_reverse(&self, node: u32) -> bool { assert!(node < self.cache_entries); self.reverse[node as usize].is_some() } pub fn read_forward<F, T>(&self, node: u32, mut cb: F) -> T where F: FnMut(Option<&Vec<u32>>) -> T, { assert!(node < self.cache_entries); cb(self.forward[node as usize].as_ref()) } pub fn read_reverse<F, T>(&self, node: u32, mut cb: F) -> T where F: FnMut(Option<&Vec<u32>>) -> T, { assert!(node < self.cache_entries); cb(self.reverse[node as usize].as_ref()) } pub fn write_forward(&mut self, node: u32, parents: Vec<u32>) { assert!(node < self.cache_entries); let old_value = std::mem::replace(&mut self.forward[node as usize], Some(parents)); debug_assert_eq!(old_value, None); // We shouldn't be rewriting entries (with most likely the same values), // this would be a clear indication of a bug. } pub fn write_reverse(&mut self, node: u32, parents: Vec<u32>) { assert!(node < self.cache_entries); let old_value = std::mem::replace(&mut self.reverse[node as usize], Some(parents)); debug_assert_eq!(old_value, None); // We shouldn't be rewriting entries (with most likely the same values), // this would be a clear indication of a bug. } } #[derive(Debug, Clone)] pub struct ZigZagGraph<H, G> where H: Hasher, G: Graph<H> +'static, { expansion_degree: usize, base_graph: G, pub reversed: bool, feistel_precomputed: FeistelPrecomputed, id: String, use_cache: bool, _h: PhantomData<H>, } pub type ZigZagBucketGraph<H> = ZigZagGraph<H, BucketGraph<H>>; impl<'a, H, G> Layerable<H> for ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata +'static, { } impl<H, G> ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { pub fn new( base_graph: Option<G>, nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7], ) -> Self { if!cfg!(feature = "unchecked-degrees") { assert_eq!(base_degree, BASE_DEGREE); assert_eq!(expansion_degree, EXP_DEGREE); } let use_cache = settings::SETTINGS.lock().unwrap().maximize_caching; let base_graph = match base_graph { Some(graph) => graph, None => G::new(nodes, base_degree, 0, seed), }; let bg_id = base_graph.identifier(); let res = ZigZagGraph { base_graph, id: format!( "zigzag_graph::ZigZagGraph{{expansion_degree: {} base_graph: {} }}", expansion_degree, bg_id, ), expansion_degree, use_cache, reversed: false, feistel_precomputed: feistel::precompute((expansion_degree * nodes) as feistel::Index), _h: PhantomData, }; if use_cache { info!("using parents cache of unlimited size",); assert!(nodes <= std::u32::MAX as usize); if!PARENT_CACHE.read().unwrap().contains_key(&res.id) { PARENT_CACHE .write() .unwrap() .insert(res.id.clone(), ParentCache::new(nodes as u32)); } } res } } impl<H, G> ParameterSetMetadata for ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { fn identifier(&self) -> String { self.id.clone() } fn sector_size(&self) -> u64 { self.base_graph.sector_size() } } pub trait ZigZag: ::std::fmt::Debug + Clone + PartialEq + Eq { type BaseHasher: Hasher; type BaseGraph: Graph<Self::BaseHasher>; /// zigzag returns a new graph with expansion component inverted and a distinct /// base DRG graph -- with the direction of drg connections reversed. (i.e. from high-to-low nodes). /// The name is 'weird', but so is the operation -- hence the choice. fn zigzag(&self) -> Self; /// Constructs a new graph. fn base_graph(&self) -> Self::BaseGraph; fn expansion_degree(&self) -> usize; fn reversed(&self) -> bool; fn expanded_parents<F, T>(&self, node: usize, cb: F) -> T where F: FnMut(&Vec<u32>) -> T; fn real_index(&self, i: usize) -> usize; fn new_zigzag( nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7], ) -> Self; } impl<Z: ZigZag> Graph<Z::BaseHasher> for Z { fn size(&self) -> usize { self.base_graph().size() } fn degree(&self) -> usize { self.base_graph().degree() + self.expansion_degree() } #[inline] fn parents(&self, raw_node: usize, parents: &mut [usize]) { // If graph is reversed, use real_index to convert index to reversed index. // So we convert a raw reversed node to an unreversed node, calculate its parents, // then convert the parents to reversed. self.base_graph() .parents(self.real_index(raw_node), parents); for parent in parents.iter_mut().take(self.base_graph().degree()) { *parent = self.real_index(*parent); } // expanded_parents takes raw_node self.expanded_parents(raw_node, |expanded_parents| { for (ii, value) in expanded_parents.iter().enumerate() { parents[ii + self.base_graph().degree()] = *value as usize } // Pad so all nodes have correct degree. let current_length = self.base_graph().degree() + expanded_parents.len(); for ii in 0..(self.degree() - current_length) { if self.reversed() { parents[ii + current_length] = self.size() - 1 } else { parents[ii + current_length] = 0 } } }); assert!(parents.len() == self.degree()); if self.forward() { parents.sort(); } else { // Sort in reverse order. parents.sort_by(|a, b| a.cmp(b).reverse()); } assert!(parents.iter().all(|p| if self.forward() { *p <= raw_node } else { *p >= raw_node })); } fn seed(&self) -> [u32; 7] { self.base_graph().seed() } fn new(nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7]) -> Self { Z::new_zigzag(nodes, base_degree, expansion_degree, seed) } fn forward(&self) -> bool { !self.reversed() } } impl<'a, H, G> ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { // Assign `expansion_degree` parents to `node` using an invertible function. That // means we can't just generate random values between `[0, size())`, we need to // expand the search space (domain) to accommodate every unique parent assignment // generated here. This can be visualized more clearly as a matrix where the each // new parent of each new node is assigned a unique `index`: // // // | Parent 1 | Parent 2 | Parent 3 | // // | Node 1 | 0 | 1 | 2 | // // | Node 2 | 3 | 4 | 5 | // // | Node 3 | 6 | 7 | 8 | // // | Node 4 | 9 | A | B | // // This starting `index` will be shuffled to another position to generate a // parent-child relationship, e.g., if generating the parents for the second node, // `permute` would be called with values `[3; 4; 5]` that would be mapped to other // indexes in the search space of `[0, B]`, say, values `[A; 0; 4]`, that would // correspond to nodes numbered `[4; 1, 2]` which will become the parents of the // second node. In a later pass invalid parents like 2, self-referencing, and parents // with indexes bigger than 2 (if in the `forward` direction, smaller than 2 if the // inverse), will be removed. // // Since `permute` is a bijective function which has the inverse `invert_permute`, // it is guaranteed that when looking for the parents in the `reversed` direction // the child `node` used earlier will now actually be the parent of the output // parents generated before (inverting the relationship). Following the example, // in the reverse direction, when looking for the parents of, say, node 1, // `invert_permute` (that maps back the output of `permute` to its input) would // receive the indexes `[0; 1; 2]`, where the index `0` is guaranteed to map back // to the index `4` that generated it earlier, corresponding to the node 2, inverting // in fact the child-parent relationship. fn correspondent(&self, node: usize, i: usize) -> usize { let a = (node * self.expansion_degree) as feistel::Index + i as feistel::Index; let feistel_keys = &[1, 2, 3, 4]; let transformed = if self.reversed { feistel::invert_permute( self.size() as feistel::Index * self.expansion_degree as feistel::Index, a, feistel_keys, self.feistel_precomputed, ) } else { feistel::permute( self.size() as feistel::Index * self.expansion_degree as feistel::Index, a, feistel_keys, self.feistel_precomputed, ) }; transformed as usize / self.expansion_degree // Collapse the output in the matrix search space to the row of the corresponding // node (losing the column information, that will be regenerated later when calling // back this function in the `reversed` direction). } // Read the `node` entry in the parents cache (which may not exist) for // the current direction set in the graph and return a copy of it (or // `None` to signal a cache miss). fn contains_parents_cache(&self, node: usize) -> bool { if self.use_cache { if let Some(ref cache) = PARENT_CACHE.read().unwrap().get(&self.id) { if self.forward() { cache.contains_forward(node as u32) } else { cache.contains_reverse(node as u32) } } else { false } } else { false } } fn generate_expanded_parents(&self, node: usize) -> Vec<u32> { (0..self.expansion_degree) .filter_map(|i| { let other = self.correspondent(node, i); if self.reversed { if other > node { Some(other as u32) } else { None } } else if other < node { Some(other as u32) } else { None } }) .collect() } } impl<'a, H, G> ZigZag for ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { type BaseHasher = H; type BaseGraph = G; fn new_zigzag( nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7], ) -> Self { Self::new(None, nodes, base_degree, expansion_degree, seed) } /// To zigzag a graph, we just toggle its reversed field. /// All the real work happens when we calculate node parents on-demand. // We always share the two caches (forward/reversed) between // ZigZag graphs even if each graph will use only one of those // caches (depending of its direction). This allows to propagate // the caches across different layers, where consecutive even+odd // layers have inverse directions. fn zigzag(&self) -> Self { let mut zigzag = self.clone(); zigzag.reversed =!zigzag.reversed; zigzag } fn base_graph(&self) -> Self::BaseGraph { self.base_graph.clone() } fn expansion_degree(&self) -> usize { self.expansion_degree } fn reversed(&self) -> bool { self.reversed } // TODO: Optimization: Evaluate providing an `all_parents` (and hence // `all_expanded_parents`) method that would return the entire cache // in a single lock operation, or at least (if the cache is not big enough) // it would allow to batch parents calculations with that single lock. Also, // since there is a reciprocity between forward and reversed parents, // we would only need to compute the parents in one direction and with // that fill both caches. #[inline] fn expanded_parents<F, T>(&self, node: usize, mut cb: F) -> T where F: FnMut(&Vec<u32>) -> T, { if!self.use_cache { // No cache usage, generate on demand. return cb(&self.generate_expanded_parents(node)); } // Check if we need to fill the cache. if!self.contains_parents_cache(node) { // Cache is empty so we need to generate the parents. let parents = self.generate_expanded_parents(node); // Store the newly generated cached value. let mut cache_lock = PARENT_CACHE.write().unwrap(); let cache = cache_lock .get_mut(&self.id) .expect("Invalid cache construction"); if self.forward() { cache.write_forward(node as u32, parents); } else { cache.write_reverse(node as u32, parents); } } // We made sure the cache is filled above, now we can return the value. let cache_lock = PARENT_CACHE.read().unwrap(); let cache = cache_lock .get(&self.id) .expect("Invalid cache construction"); if self.forward() { cache.read_forward(node as u32, |parents| cb(parents.unwrap())) } else
} #[inline] fn real_index(&self, i: usize) -> usize { if self.reversed { (self.size() - 1) - i } else { i } } } impl<H, G> PartialEq for ZigZagGraph<H, G> where H: Hasher, G: Graph<H>, { fn eq(&self, other: &ZigZagGraph<H, G>) -> bool { self.base_graph == other.base_graph && self.expansion_degree == other.expansion_degree && self.reversed == other.reversed } } impl<H, G> Eq for ZigZagGraph<H, G> where H: Hasher, G: Graph<H>, { } #[cfg(test)] mod tests { use super::*; use std::collections::{HashMap, HashSet}; use crate::drgraph::{new_seed, BASE_DEGREE}; use crate::hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher}; fn assert_graph_ascending<H: Hasher, G: Graph<H>>(g: G) { for i in 0..g.size() { let mut parents = vec![0; g.degree()]; g.parents(i, &mut parents); for p in parents { if i == 0 { assert!(p == i); } else { assert!(p < i); } } } } fn assert_graph_descending<H: Hasher, G: Graph<H>>(g: G) { for i in 0..g.size() { let mut parents = vec![0; g.degree()]; g.parents(i, &mut parents); for p in parents { if i == g.size() - 1 { assert!(p == i); } else { assert!(p > i); } } } } #[test] fn zigzag_graph_zigzags_pedersen() { test_zigzag_graph_zigzags::<PedersenHasher>(); } #[test] fn zigzag_graph_zigzags_sha256() { test_zigzag_graph_zigzags::<Sha256Hasher>(); } #[test] fn zigzag_graph_zigzags_blake2s() { test_zigzag_graph_zigzags::<Blake2sHasher>(); } fn test_zigzag_graph_zigzags<H:'static + Hasher>() { let g = ZigZagBucketGraph::<H>::new_zigzag(50, BASE_DEGREE, EXP_DEGREE, new_seed()); let gz = g.zigzag(); assert_graph_ascending(g); assert_graph_descending(gz); } #[test] fn expansion_pedersen() { test_expansion::<PedersenHasher>(); } #[test] fn expansion_sha256() { test_expansion::<Sha256Hasher>(); } #[test] fn expansion_blake2s() { test_expansion::<Blake2sHasher>(); } fn test_expansion<H:'static + Hasher>() { // We need a graph. let g = ZigZagBucketGraph::<H>::new_zigzag(25, BASE_DEGREE, EXP_DEGREE, new_seed()); // We're going to fully realize the expansion-graph component, in a HashMap. let gcache = get_all_expanded_parents(&g); // Here's the zigzag version of the graph. let gz = g.zigzag(); // And a HashMap to hold the expanded parents. let gzcache = get_all_expanded_parents(&gz); for i in 0..gz.size() { let parents = gzcache.get(&i).unwrap(); // Check to make sure all (expanded) node-parent relationships also exist in reverse, // in the original graph's Hashmap. for p in parents { assert!(gcache[&(*p as usize)].contains(&(i as u32))); } } // And then do the same check to make sure all (expanded) node-parent relationships from the original // are present in the zigzag, just reversed. for i in 0..g.size() { g.expanded_parents(i, |parents| { for p in parents.iter() { assert!(gzcache[&(*p as usize)].contains(&(i as u32))); } });
{ cache.read_reverse(node as u32, |parents| cb(parents.unwrap())) }
conditional_block
zigzag_graph.rs
forward: vec![None; cache_entries as usize], reverse: vec![None; cache_entries as usize], cache_entries, } } pub fn contains_forward(&self, node: u32) -> bool { assert!(node < self.cache_entries); self.forward[node as usize].is_some() } pub fn contains_reverse(&self, node: u32) -> bool { assert!(node < self.cache_entries); self.reverse[node as usize].is_some() } pub fn read_forward<F, T>(&self, node: u32, mut cb: F) -> T where F: FnMut(Option<&Vec<u32>>) -> T, { assert!(node < self.cache_entries); cb(self.forward[node as usize].as_ref()) } pub fn read_reverse<F, T>(&self, node: u32, mut cb: F) -> T where F: FnMut(Option<&Vec<u32>>) -> T, { assert!(node < self.cache_entries); cb(self.reverse[node as usize].as_ref()) } pub fn write_forward(&mut self, node: u32, parents: Vec<u32>) { assert!(node < self.cache_entries); let old_value = std::mem::replace(&mut self.forward[node as usize], Some(parents)); debug_assert_eq!(old_value, None); // We shouldn't be rewriting entries (with most likely the same values), // this would be a clear indication of a bug. } pub fn write_reverse(&mut self, node: u32, parents: Vec<u32>) { assert!(node < self.cache_entries); let old_value = std::mem::replace(&mut self.reverse[node as usize], Some(parents)); debug_assert_eq!(old_value, None); // We shouldn't be rewriting entries (with most likely the same values), // this would be a clear indication of a bug. } } #[derive(Debug, Clone)] pub struct ZigZagGraph<H, G> where H: Hasher, G: Graph<H> +'static, { expansion_degree: usize, base_graph: G, pub reversed: bool, feistel_precomputed: FeistelPrecomputed, id: String, use_cache: bool, _h: PhantomData<H>, } pub type ZigZagBucketGraph<H> = ZigZagGraph<H, BucketGraph<H>>; impl<'a, H, G> Layerable<H> for ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata +'static, { } impl<H, G> ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { pub fn new( base_graph: Option<G>, nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7], ) -> Self { if!cfg!(feature = "unchecked-degrees") { assert_eq!(base_degree, BASE_DEGREE); assert_eq!(expansion_degree, EXP_DEGREE); } let use_cache = settings::SETTINGS.lock().unwrap().maximize_caching; let base_graph = match base_graph { Some(graph) => graph, None => G::new(nodes, base_degree, 0, seed), }; let bg_id = base_graph.identifier(); let res = ZigZagGraph { base_graph, id: format!( "zigzag_graph::ZigZagGraph{{expansion_degree: {} base_graph: {} }}", expansion_degree, bg_id, ), expansion_degree, use_cache, reversed: false, feistel_precomputed: feistel::precompute((expansion_degree * nodes) as feistel::Index), _h: PhantomData, }; if use_cache { info!("using parents cache of unlimited size",); assert!(nodes <= std::u32::MAX as usize); if!PARENT_CACHE.read().unwrap().contains_key(&res.id) { PARENT_CACHE .write() .unwrap() .insert(res.id.clone(), ParentCache::new(nodes as u32)); } } res } } impl<H, G> ParameterSetMetadata for ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { fn identifier(&self) -> String { self.id.clone() } fn sector_size(&self) -> u64 { self.base_graph.sector_size() } } pub trait ZigZag: ::std::fmt::Debug + Clone + PartialEq + Eq { type BaseHasher: Hasher; type BaseGraph: Graph<Self::BaseHasher>; /// zigzag returns a new graph with expansion component inverted and a distinct /// base DRG graph -- with the direction of drg connections reversed. (i.e. from high-to-low nodes). /// The name is 'weird', but so is the operation -- hence the choice. fn zigzag(&self) -> Self; /// Constructs a new graph. fn base_graph(&self) -> Self::BaseGraph; fn expansion_degree(&self) -> usize; fn reversed(&self) -> bool; fn expanded_parents<F, T>(&self, node: usize, cb: F) -> T where F: FnMut(&Vec<u32>) -> T; fn real_index(&self, i: usize) -> usize; fn new_zigzag( nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7], ) -> Self; } impl<Z: ZigZag> Graph<Z::BaseHasher> for Z { fn size(&self) -> usize { self.base_graph().size() } fn degree(&self) -> usize { self.base_graph().degree() + self.expansion_degree() } #[inline] fn parents(&self, raw_node: usize, parents: &mut [usize]) { // If graph is reversed, use real_index to convert index to reversed index. // So we convert a raw reversed node to an unreversed node, calculate its parents, // then convert the parents to reversed. self.base_graph() .parents(self.real_index(raw_node), parents); for parent in parents.iter_mut().take(self.base_graph().degree()) { *parent = self.real_index(*parent); } // expanded_parents takes raw_node self.expanded_parents(raw_node, |expanded_parents| { for (ii, value) in expanded_parents.iter().enumerate() { parents[ii + self.base_graph().degree()] = *value as usize } // Pad so all nodes have correct degree. let current_length = self.base_graph().degree() + expanded_parents.len(); for ii in 0..(self.degree() - current_length) { if self.reversed() { parents[ii + current_length] = self.size() - 1 } else { parents[ii + current_length] = 0 } } }); assert!(parents.len() == self.degree()); if self.forward() { parents.sort(); } else { // Sort in reverse order. parents.sort_by(|a, b| a.cmp(b).reverse()); } assert!(parents.iter().all(|p| if self.forward() { *p <= raw_node } else { *p >= raw_node })); } fn seed(&self) -> [u32; 7] { self.base_graph().seed() } fn new(nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7]) -> Self { Z::new_zigzag(nodes, base_degree, expansion_degree, seed) } fn forward(&self) -> bool { !self.reversed() } } impl<'a, H, G> ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { // Assign `expansion_degree` parents to `node` using an invertible function. That // means we can't just generate random values between `[0, size())`, we need to // expand the search space (domain) to accommodate every unique parent assignment // generated here. This can be visualized more clearly as a matrix where the each // new parent of each new node is assigned a unique `index`: // // // | Parent 1 | Parent 2 | Parent 3 | // // | Node 1 | 0 | 1 | 2 | // // | Node 2 | 3 | 4 | 5 | // // | Node 3 | 6 | 7 | 8 | // // | Node 4 | 9 | A | B | // // This starting `index` will be shuffled to another position to generate a // parent-child relationship, e.g., if generating the parents for the second node, // `permute` would be called with values `[3; 4; 5]` that would be mapped to other // indexes in the search space of `[0, B]`, say, values `[A; 0; 4]`, that would // correspond to nodes numbered `[4; 1, 2]` which will become the parents of the // second node. In a later pass invalid parents like 2, self-referencing, and parents // with indexes bigger than 2 (if in the `forward` direction, smaller than 2 if the // inverse), will be removed. // // Since `permute` is a bijective function which has the inverse `invert_permute`, // it is guaranteed that when looking for the parents in the `reversed` direction // the child `node` used earlier will now actually be the parent of the output // parents generated before (inverting the relationship). Following the example, // in the reverse direction, when looking for the parents of, say, node 1, // `invert_permute` (that maps back the output of `permute` to its input) would // receive the indexes `[0; 1; 2]`, where the index `0` is guaranteed to map back // to the index `4` that generated it earlier, corresponding to the node 2, inverting // in fact the child-parent relationship. fn correspondent(&self, node: usize, i: usize) -> usize { let a = (node * self.expansion_degree) as feistel::Index + i as feistel::Index; let feistel_keys = &[1, 2, 3, 4]; let transformed = if self.reversed { feistel::invert_permute( self.size() as feistel::Index * self.expansion_degree as feistel::Index, a, feistel_keys, self.feistel_precomputed, ) } else { feistel::permute( self.size() as feistel::Index * self.expansion_degree as feistel::Index, a, feistel_keys, self.feistel_precomputed, ) }; transformed as usize / self.expansion_degree // Collapse the output in the matrix search space to the row of the corresponding // node (losing the column information, that will be regenerated later when calling // back this function in the `reversed` direction). } // Read the `node` entry in the parents cache (which may not exist) for // the current direction set in the graph and return a copy of it (or // `None` to signal a cache miss). fn contains_parents_cache(&self, node: usize) -> bool { if self.use_cache { if let Some(ref cache) = PARENT_CACHE.read().unwrap().get(&self.id) { if self.forward() { cache.contains_forward(node as u32) } else { cache.contains_reverse(node as u32) } } else { false } } else { false } } fn generate_expanded_parents(&self, node: usize) -> Vec<u32> { (0..self.expansion_degree) .filter_map(|i| { let other = self.correspondent(node, i); if self.reversed { if other > node { Some(other as u32) } else { None } } else if other < node { Some(other as u32) } else { None } }) .collect() } } impl<'a, H, G> ZigZag for ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { type BaseHasher = H; type BaseGraph = G; fn new_zigzag( nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7], ) -> Self { Self::new(None, nodes, base_degree, expansion_degree, seed) } /// To zigzag a graph, we just toggle its reversed field. /// All the real work happens when we calculate node parents on-demand. // We always share the two caches (forward/reversed) between // ZigZag graphs even if each graph will use only one of those // caches (depending of its direction). This allows to propagate // the caches across different layers, where consecutive even+odd // layers have inverse directions. fn zigzag(&self) -> Self { let mut zigzag = self.clone(); zigzag.reversed =!zigzag.reversed; zigzag } fn base_graph(&self) -> Self::BaseGraph { self.base_graph.clone() } fn
(&self) -> usize { self.expansion_degree } fn reversed(&self) -> bool { self.reversed } // TODO: Optimization: Evaluate providing an `all_parents` (and hence // `all_expanded_parents`) method that would return the entire cache // in a single lock operation, or at least (if the cache is not big enough) // it would allow to batch parents calculations with that single lock. Also, // since there is a reciprocity between forward and reversed parents, // we would only need to compute the parents in one direction and with // that fill both caches. #[inline] fn expanded_parents<F, T>(&self, node: usize, mut cb: F) -> T where F: FnMut(&Vec<u32>) -> T, { if!self.use_cache { // No cache usage, generate on demand. return cb(&self.generate_expanded_parents(node)); } // Check if we need to fill the cache. if!self.contains_parents_cache(node) { // Cache is empty so we need to generate the parents. let parents = self.generate_expanded_parents(node); // Store the newly generated cached value. let mut cache_lock = PARENT_CACHE.write().unwrap(); let cache = cache_lock .get_mut(&self.id) .expect("Invalid cache construction"); if self.forward() { cache.write_forward(node as u32, parents); } else { cache.write_reverse(node as u32, parents); } } // We made sure the cache is filled above, now we can return the value. let cache_lock = PARENT_CACHE.read().unwrap(); let cache = cache_lock .get(&self.id) .expect("Invalid cache construction"); if self.forward() { cache.read_forward(node as u32, |parents| cb(parents.unwrap())) } else { cache.read_reverse(node as u32, |parents| cb(parents.unwrap())) } } #[inline] fn real_index(&self, i: usize) -> usize { if self.reversed { (self.size() - 1) - i } else { i } } } impl<H, G> PartialEq for ZigZagGraph<H, G> where H: Hasher, G: Graph<H>, { fn eq(&self, other: &ZigZagGraph<H, G>) -> bool { self.base_graph == other.base_graph && self.expansion_degree == other.expansion_degree && self.reversed == other.reversed } } impl<H, G> Eq for ZigZagGraph<H, G> where H: Hasher, G: Graph<H>, { } #[cfg(test)] mod tests { use super::*; use std::collections::{HashMap, HashSet}; use crate::drgraph::{new_seed, BASE_DEGREE}; use crate::hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher}; fn assert_graph_ascending<H: Hasher, G: Graph<H>>(g: G) { for i in 0..g.size() { let mut parents = vec![0; g.degree()]; g.parents(i, &mut parents); for p in parents { if i == 0 { assert!(p == i); } else { assert!(p < i); } } } } fn assert_graph_descending<H: Hasher, G: Graph<H>>(g: G) { for i in 0..g.size() { let mut parents = vec![0; g.degree()]; g.parents(i, &mut parents); for p in parents { if i == g.size() - 1 { assert!(p == i); } else { assert!(p > i); } } } } #[test] fn zigzag_graph_zigzags_pedersen() { test_zigzag_graph_zigzags::<PedersenHasher>(); } #[test] fn zigzag_graph_zigzags_sha256() { test_zigzag_graph_zigzags::<Sha256Hasher>(); } #[test] fn zigzag_graph_zigzags_blake2s() { test_zigzag_graph_zigzags::<Blake2sHasher>(); } fn test_zigzag_graph_zigzags<H:'static + Hasher>() { let g = ZigZagBucketGraph::<H>::new_zigzag(50, BASE_DEGREE, EXP_DEGREE, new_seed()); let gz = g.zigzag(); assert_graph_ascending(g); assert_graph_descending(gz); } #[test] fn expansion_pedersen() { test_expansion::<PedersenHasher>(); } #[test] fn expansion_sha256() { test_expansion::<Sha256Hasher>(); } #[test] fn expansion_blake2s() { test_expansion::<Blake2sHasher>(); } fn test_expansion<H:'static + Hasher>() { // We need a graph. let g = ZigZagBucketGraph::<H>::new_zigzag(25, BASE_DEGREE, EXP_DEGREE, new_seed()); // We're going to fully realize the expansion-graph component, in a HashMap. let gcache = get_all_expanded_parents(&g); // Here's the zigzag version of the graph. let gz = g.zigzag(); // And a HashMap to hold the expanded parents. let gzcache = get_all_expanded_parents(&gz); for i in 0..gz.size() { let parents = gzcache.get(&i).unwrap(); // Check to make sure all (expanded) node-parent relationships also exist in reverse, // in the original graph's Hashmap. for p in parents { assert!(gcache[&(*p as usize)].contains(&(i as u32))); } } // And then do the same check to make sure all (expanded) node-parent relationships from the original // are present in the zigzag, just reversed. for i in 0..g.size() { g.expanded_parents(i, |parents| { for p in parents.iter() { assert!(gzcache[&(*p as usize)].contains(&(i as u32))); } });
expansion_degree
identifier_name
zigzag_graph.rs
forward: vec![None; cache_entries as usize], reverse: vec![None; cache_entries as usize], cache_entries, } } pub fn contains_forward(&self, node: u32) -> bool { assert!(node < self.cache_entries); self.forward[node as usize].is_some() } pub fn contains_reverse(&self, node: u32) -> bool { assert!(node < self.cache_entries); self.reverse[node as usize].is_some() } pub fn read_forward<F, T>(&self, node: u32, mut cb: F) -> T where F: FnMut(Option<&Vec<u32>>) -> T, { assert!(node < self.cache_entries); cb(self.forward[node as usize].as_ref()) } pub fn read_reverse<F, T>(&self, node: u32, mut cb: F) -> T where F: FnMut(Option<&Vec<u32>>) -> T, { assert!(node < self.cache_entries); cb(self.reverse[node as usize].as_ref()) } pub fn write_forward(&mut self, node: u32, parents: Vec<u32>) { assert!(node < self.cache_entries); let old_value = std::mem::replace(&mut self.forward[node as usize], Some(parents)); debug_assert_eq!(old_value, None); // We shouldn't be rewriting entries (with most likely the same values), // this would be a clear indication of a bug. } pub fn write_reverse(&mut self, node: u32, parents: Vec<u32>) { assert!(node < self.cache_entries); let old_value = std::mem::replace(&mut self.reverse[node as usize], Some(parents)); debug_assert_eq!(old_value, None); // We shouldn't be rewriting entries (with most likely the same values), // this would be a clear indication of a bug. } } #[derive(Debug, Clone)] pub struct ZigZagGraph<H, G> where H: Hasher, G: Graph<H> +'static, { expansion_degree: usize, base_graph: G, pub reversed: bool, feistel_precomputed: FeistelPrecomputed, id: String, use_cache: bool, _h: PhantomData<H>, } pub type ZigZagBucketGraph<H> = ZigZagGraph<H, BucketGraph<H>>; impl<'a, H, G> Layerable<H> for ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata +'static, { } impl<H, G> ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { pub fn new( base_graph: Option<G>, nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7], ) -> Self { if!cfg!(feature = "unchecked-degrees") { assert_eq!(base_degree, BASE_DEGREE); assert_eq!(expansion_degree, EXP_DEGREE); } let use_cache = settings::SETTINGS.lock().unwrap().maximize_caching; let base_graph = match base_graph { Some(graph) => graph, None => G::new(nodes, base_degree, 0, seed), }; let bg_id = base_graph.identifier(); let res = ZigZagGraph { base_graph, id: format!( "zigzag_graph::ZigZagGraph{{expansion_degree: {} base_graph: {} }}", expansion_degree, bg_id, ), expansion_degree, use_cache, reversed: false, feistel_precomputed: feistel::precompute((expansion_degree * nodes) as feistel::Index), _h: PhantomData, }; if use_cache { info!("using parents cache of unlimited size",); assert!(nodes <= std::u32::MAX as usize); if!PARENT_CACHE.read().unwrap().contains_key(&res.id) { PARENT_CACHE .write() .unwrap() .insert(res.id.clone(), ParentCache::new(nodes as u32)); } } res } } impl<H, G> ParameterSetMetadata for ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { fn identifier(&self) -> String { self.id.clone() } fn sector_size(&self) -> u64 { self.base_graph.sector_size() } } pub trait ZigZag: ::std::fmt::Debug + Clone + PartialEq + Eq { type BaseHasher: Hasher; type BaseGraph: Graph<Self::BaseHasher>; /// zigzag returns a new graph with expansion component inverted and a distinct /// base DRG graph -- with the direction of drg connections reversed. (i.e. from high-to-low nodes). /// The name is 'weird', but so is the operation -- hence the choice. fn zigzag(&self) -> Self; /// Constructs a new graph. fn base_graph(&self) -> Self::BaseGraph; fn expansion_degree(&self) -> usize; fn reversed(&self) -> bool; fn expanded_parents<F, T>(&self, node: usize, cb: F) -> T where F: FnMut(&Vec<u32>) -> T; fn real_index(&self, i: usize) -> usize; fn new_zigzag( nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7], ) -> Self; } impl<Z: ZigZag> Graph<Z::BaseHasher> for Z { fn size(&self) -> usize { self.base_graph().size() } fn degree(&self) -> usize { self.base_graph().degree() + self.expansion_degree() } #[inline] fn parents(&self, raw_node: usize, parents: &mut [usize]) { // If graph is reversed, use real_index to convert index to reversed index. // So we convert a raw reversed node to an unreversed node, calculate its parents, // then convert the parents to reversed. self.base_graph() .parents(self.real_index(raw_node), parents); for parent in parents.iter_mut().take(self.base_graph().degree()) { *parent = self.real_index(*parent); } // expanded_parents takes raw_node self.expanded_parents(raw_node, |expanded_parents| { for (ii, value) in expanded_parents.iter().enumerate() { parents[ii + self.base_graph().degree()] = *value as usize } // Pad so all nodes have correct degree. let current_length = self.base_graph().degree() + expanded_parents.len(); for ii in 0..(self.degree() - current_length) { if self.reversed() { parents[ii + current_length] = self.size() - 1 } else { parents[ii + current_length] = 0 } } }); assert!(parents.len() == self.degree()); if self.forward() { parents.sort(); } else { // Sort in reverse order. parents.sort_by(|a, b| a.cmp(b).reverse()); } assert!(parents.iter().all(|p| if self.forward() { *p <= raw_node } else { *p >= raw_node })); } fn seed(&self) -> [u32; 7] { self.base_graph().seed() } fn new(nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7]) -> Self { Z::new_zigzag(nodes, base_degree, expansion_degree, seed) } fn forward(&self) -> bool { !self.reversed() } } impl<'a, H, G> ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { // Assign `expansion_degree` parents to `node` using an invertible function. That // means we can't just generate random values between `[0, size())`, we need to // expand the search space (domain) to accommodate every unique parent assignment // generated here. This can be visualized more clearly as a matrix where the each // new parent of each new node is assigned a unique `index`: // // // | Parent 1 | Parent 2 | Parent 3 | // // | Node 1 | 0 | 1 | 2 | // // | Node 2 | 3 | 4 | 5 | // // | Node 3 | 6 | 7 | 8 | // // | Node 4 | 9 | A | B | // // This starting `index` will be shuffled to another position to generate a // parent-child relationship, e.g., if generating the parents for the second node, // `permute` would be called with values `[3; 4; 5]` that would be mapped to other // indexes in the search space of `[0, B]`, say, values `[A; 0; 4]`, that would // correspond to nodes numbered `[4; 1, 2]` which will become the parents of the // second node. In a later pass invalid parents like 2, self-referencing, and parents // with indexes bigger than 2 (if in the `forward` direction, smaller than 2 if the // inverse), will be removed. // // Since `permute` is a bijective function which has the inverse `invert_permute`, // it is guaranteed that when looking for the parents in the `reversed` direction // the child `node` used earlier will now actually be the parent of the output // parents generated before (inverting the relationship). Following the example, // in the reverse direction, when looking for the parents of, say, node 1, // `invert_permute` (that maps back the output of `permute` to its input) would // receive the indexes `[0; 1; 2]`, where the index `0` is guaranteed to map back // to the index `4` that generated it earlier, corresponding to the node 2, inverting // in fact the child-parent relationship. fn correspondent(&self, node: usize, i: usize) -> usize { let a = (node * self.expansion_degree) as feistel::Index + i as feistel::Index; let feistel_keys = &[1, 2, 3, 4]; let transformed = if self.reversed { feistel::invert_permute( self.size() as feistel::Index * self.expansion_degree as feistel::Index, a, feistel_keys, self.feistel_precomputed, ) } else { feistel::permute( self.size() as feistel::Index * self.expansion_degree as feistel::Index, a, feistel_keys, self.feistel_precomputed, ) }; transformed as usize / self.expansion_degree // Collapse the output in the matrix search space to the row of the corresponding // node (losing the column information, that will be regenerated later when calling // back this function in the `reversed` direction). } // Read the `node` entry in the parents cache (which may not exist) for // the current direction set in the graph and return a copy of it (or // `None` to signal a cache miss). fn contains_parents_cache(&self, node: usize) -> bool { if self.use_cache { if let Some(ref cache) = PARENT_CACHE.read().unwrap().get(&self.id) { if self.forward() { cache.contains_forward(node as u32) } else { cache.contains_reverse(node as u32) } } else { false } } else { false } } fn generate_expanded_parents(&self, node: usize) -> Vec<u32> { (0..self.expansion_degree) .filter_map(|i| { let other = self.correspondent(node, i); if self.reversed { if other > node { Some(other as u32) } else { None } } else if other < node { Some(other as u32) } else { None } }) .collect() } } impl<'a, H, G> ZigZag for ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { type BaseHasher = H; type BaseGraph = G; fn new_zigzag( nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7], ) -> Self
/// To zigzag a graph, we just toggle its reversed field. /// All the real work happens when we calculate node parents on-demand. // We always share the two caches (forward/reversed) between // ZigZag graphs even if each graph will use only one of those // caches (depending of its direction). This allows to propagate // the caches across different layers, where consecutive even+odd // layers have inverse directions. fn zigzag(&self) -> Self { let mut zigzag = self.clone(); zigzag.reversed =!zigzag.reversed; zigzag } fn base_graph(&self) -> Self::BaseGraph { self.base_graph.clone() } fn expansion_degree(&self) -> usize { self.expansion_degree } fn reversed(&self) -> bool { self.reversed } // TODO: Optimization: Evaluate providing an `all_parents` (and hence // `all_expanded_parents`) method that would return the entire cache // in a single lock operation, or at least (if the cache is not big enough) // it would allow to batch parents calculations with that single lock. Also, // since there is a reciprocity between forward and reversed parents, // we would only need to compute the parents in one direction and with // that fill both caches. #[inline] fn expanded_parents<F, T>(&self, node: usize, mut cb: F) -> T where F: FnMut(&Vec<u32>) -> T, { if!self.use_cache { // No cache usage, generate on demand. return cb(&self.generate_expanded_parents(node)); } // Check if we need to fill the cache. if!self.contains_parents_cache(node) { // Cache is empty so we need to generate the parents. let parents = self.generate_expanded_parents(node); // Store the newly generated cached value. let mut cache_lock = PARENT_CACHE.write().unwrap(); let cache = cache_lock .get_mut(&self.id) .expect("Invalid cache construction"); if self.forward() { cache.write_forward(node as u32, parents); } else { cache.write_reverse(node as u32, parents); } } // We made sure the cache is filled above, now we can return the value. let cache_lock = PARENT_CACHE.read().unwrap(); let cache = cache_lock .get(&self.id) .expect("Invalid cache construction"); if self.forward() { cache.read_forward(node as u32, |parents| cb(parents.unwrap())) } else { cache.read_reverse(node as u32, |parents| cb(parents.unwrap())) } } #[inline] fn real_index(&self, i: usize) -> usize { if self.reversed { (self.size() - 1) - i } else { i } } } impl<H, G> PartialEq for ZigZagGraph<H, G> where H: Hasher, G: Graph<H>, { fn eq(&self, other: &ZigZagGraph<H, G>) -> bool { self.base_graph == other.base_graph && self.expansion_degree == other.expansion_degree && self.reversed == other.reversed } } impl<H, G> Eq for ZigZagGraph<H, G> where H: Hasher, G: Graph<H>, { } #[cfg(test)] mod tests { use super::*; use std::collections::{HashMap, HashSet}; use crate::drgraph::{new_seed, BASE_DEGREE}; use crate::hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher}; fn assert_graph_ascending<H: Hasher, G: Graph<H>>(g: G) { for i in 0..g.size() { let mut parents = vec![0; g.degree()]; g.parents(i, &mut parents); for p in parents { if i == 0 { assert!(p == i); } else { assert!(p < i); } } } } fn assert_graph_descending<H: Hasher, G: Graph<H>>(g: G) { for i in 0..g.size() { let mut parents = vec![0; g.degree()]; g.parents(i, &mut parents); for p in parents { if i == g.size() - 1 { assert!(p == i); } else { assert!(p > i); } } } } #[test] fn zigzag_graph_zigzags_pedersen() { test_zigzag_graph_zigzags::<PedersenHasher>(); } #[test] fn zigzag_graph_zigzags_sha256() { test_zigzag_graph_zigzags::<Sha256Hasher>(); } #[test] fn zigzag_graph_zigzags_blake2s() { test_zigzag_graph_zigzags::<Blake2sHasher>(); } fn test_zigzag_graph_zigzags<H:'static + Hasher>() { let g = ZigZagBucketGraph::<H>::new_zigzag(50, BASE_DEGREE, EXP_DEGREE, new_seed()); let gz = g.zigzag(); assert_graph_ascending(g); assert_graph_descending(gz); } #[test] fn expansion_pedersen() { test_expansion::<PedersenHasher>(); } #[test] fn expansion_sha256() { test_expansion::<Sha256Hasher>(); } #[test] fn expansion_blake2s() { test_expansion::<Blake2sHasher>(); } fn test_expansion<H:'static + Hasher>() { // We need a graph. let g = ZigZagBucketGraph::<H>::new_zigzag(25, BASE_DEGREE, EXP_DEGREE, new_seed()); // We're going to fully realize the expansion-graph component, in a HashMap. let gcache = get_all_expanded_parents(&g); // Here's the zigzag version of the graph. let gz = g.zigzag(); // And a HashMap to hold the expanded parents. let gzcache = get_all_expanded_parents(&gz); for i in 0..gz.size() { let parents = gzcache.get(&i).unwrap(); // Check to make sure all (expanded) node-parent relationships also exist in reverse, // in the original graph's Hashmap. for p in parents { assert!(gcache[&(*p as usize)].contains(&(i as u32))); } } // And then do the same check to make sure all (expanded) node-parent relationships from the original // are present in the zigzag, just reversed. for i in 0..g.size() { g.expanded_parents(i, |parents| { for p in parents.iter() { assert!(gzcache[&(*p as usize)].contains(&(i as u32))); } });
{ Self::new(None, nodes, base_degree, expansion_degree, seed) }
identifier_body
zigzag_graph.rs
forward: vec![None; cache_entries as usize], reverse: vec![None; cache_entries as usize], cache_entries, } } pub fn contains_forward(&self, node: u32) -> bool { assert!(node < self.cache_entries); self.forward[node as usize].is_some() } pub fn contains_reverse(&self, node: u32) -> bool { assert!(node < self.cache_entries); self.reverse[node as usize].is_some() } pub fn read_forward<F, T>(&self, node: u32, mut cb: F) -> T where F: FnMut(Option<&Vec<u32>>) -> T, { assert!(node < self.cache_entries); cb(self.forward[node as usize].as_ref()) } pub fn read_reverse<F, T>(&self, node: u32, mut cb: F) -> T where F: FnMut(Option<&Vec<u32>>) -> T, { assert!(node < self.cache_entries); cb(self.reverse[node as usize].as_ref()) } pub fn write_forward(&mut self, node: u32, parents: Vec<u32>) { assert!(node < self.cache_entries); let old_value = std::mem::replace(&mut self.forward[node as usize], Some(parents)); debug_assert_eq!(old_value, None); // We shouldn't be rewriting entries (with most likely the same values), // this would be a clear indication of a bug. } pub fn write_reverse(&mut self, node: u32, parents: Vec<u32>) { assert!(node < self.cache_entries); let old_value = std::mem::replace(&mut self.reverse[node as usize], Some(parents)); debug_assert_eq!(old_value, None); // We shouldn't be rewriting entries (with most likely the same values), // this would be a clear indication of a bug. } } #[derive(Debug, Clone)] pub struct ZigZagGraph<H, G> where H: Hasher, G: Graph<H> +'static, { expansion_degree: usize, base_graph: G, pub reversed: bool, feistel_precomputed: FeistelPrecomputed, id: String, use_cache: bool, _h: PhantomData<H>, } pub type ZigZagBucketGraph<H> = ZigZagGraph<H, BucketGraph<H>>; impl<'a, H, G> Layerable<H> for ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata +'static, { } impl<H, G> ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { pub fn new( base_graph: Option<G>, nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7], ) -> Self { if!cfg!(feature = "unchecked-degrees") { assert_eq!(base_degree, BASE_DEGREE); assert_eq!(expansion_degree, EXP_DEGREE); } let use_cache = settings::SETTINGS.lock().unwrap().maximize_caching; let base_graph = match base_graph { Some(graph) => graph, None => G::new(nodes, base_degree, 0, seed), }; let bg_id = base_graph.identifier(); let res = ZigZagGraph { base_graph, id: format!( "zigzag_graph::ZigZagGraph{{expansion_degree: {} base_graph: {} }}", expansion_degree, bg_id, ), expansion_degree, use_cache, reversed: false, feistel_precomputed: feistel::precompute((expansion_degree * nodes) as feistel::Index), _h: PhantomData, }; if use_cache { info!("using parents cache of unlimited size",); assert!(nodes <= std::u32::MAX as usize); if!PARENT_CACHE.read().unwrap().contains_key(&res.id) { PARENT_CACHE .write() .unwrap() .insert(res.id.clone(), ParentCache::new(nodes as u32)); } } res } } impl<H, G> ParameterSetMetadata for ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { fn identifier(&self) -> String { self.id.clone() } fn sector_size(&self) -> u64 { self.base_graph.sector_size() } } pub trait ZigZag: ::std::fmt::Debug + Clone + PartialEq + Eq { type BaseHasher: Hasher; type BaseGraph: Graph<Self::BaseHasher>; /// zigzag returns a new graph with expansion component inverted and a distinct /// base DRG graph -- with the direction of drg connections reversed. (i.e. from high-to-low nodes). /// The name is 'weird', but so is the operation -- hence the choice. fn zigzag(&self) -> Self; /// Constructs a new graph. fn base_graph(&self) -> Self::BaseGraph; fn expansion_degree(&self) -> usize; fn reversed(&self) -> bool; fn expanded_parents<F, T>(&self, node: usize, cb: F) -> T where F: FnMut(&Vec<u32>) -> T; fn real_index(&self, i: usize) -> usize; fn new_zigzag( nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7], ) -> Self; } impl<Z: ZigZag> Graph<Z::BaseHasher> for Z { fn size(&self) -> usize { self.base_graph().size() } fn degree(&self) -> usize { self.base_graph().degree() + self.expansion_degree() } #[inline] fn parents(&self, raw_node: usize, parents: &mut [usize]) { // If graph is reversed, use real_index to convert index to reversed index. // So we convert a raw reversed node to an unreversed node, calculate its parents, // then convert the parents to reversed. self.base_graph() .parents(self.real_index(raw_node), parents); for parent in parents.iter_mut().take(self.base_graph().degree()) { *parent = self.real_index(*parent); } // expanded_parents takes raw_node self.expanded_parents(raw_node, |expanded_parents| { for (ii, value) in expanded_parents.iter().enumerate() { parents[ii + self.base_graph().degree()] = *value as usize } // Pad so all nodes have correct degree. let current_length = self.base_graph().degree() + expanded_parents.len(); for ii in 0..(self.degree() - current_length) { if self.reversed() { parents[ii + current_length] = self.size() - 1 } else { parents[ii + current_length] = 0 } } }); assert!(parents.len() == self.degree()); if self.forward() { parents.sort(); } else { // Sort in reverse order. parents.sort_by(|a, b| a.cmp(b).reverse()); } assert!(parents.iter().all(|p| if self.forward() { *p <= raw_node } else { *p >= raw_node })); } fn seed(&self) -> [u32; 7] { self.base_graph().seed() } fn new(nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7]) -> Self { Z::new_zigzag(nodes, base_degree, expansion_degree, seed) } fn forward(&self) -> bool { !self.reversed() } } impl<'a, H, G> ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { // Assign `expansion_degree` parents to `node` using an invertible function. That // means we can't just generate random values between `[0, size())`, we need to // expand the search space (domain) to accommodate every unique parent assignment // generated here. This can be visualized more clearly as a matrix where the each // new parent of each new node is assigned a unique `index`: // // // | Parent 1 | Parent 2 | Parent 3 | // // | Node 1 | 0 | 1 | 2 | // // | Node 2 | 3 | 4 | 5 | // // | Node 3 | 6 | 7 | 8 | // // | Node 4 | 9 | A | B | // // This starting `index` will be shuffled to another position to generate a // parent-child relationship, e.g., if generating the parents for the second node, // `permute` would be called with values `[3; 4; 5]` that would be mapped to other // indexes in the search space of `[0, B]`, say, values `[A; 0; 4]`, that would // correspond to nodes numbered `[4; 1, 2]` which will become the parents of the // second node. In a later pass invalid parents like 2, self-referencing, and parents // with indexes bigger than 2 (if in the `forward` direction, smaller than 2 if the // inverse), will be removed. // // Since `permute` is a bijective function which has the inverse `invert_permute`, // it is guaranteed that when looking for the parents in the `reversed` direction // the child `node` used earlier will now actually be the parent of the output // parents generated before (inverting the relationship). Following the example, // in the reverse direction, when looking for the parents of, say, node 1, // `invert_permute` (that maps back the output of `permute` to its input) would // receive the indexes `[0; 1; 2]`, where the index `0` is guaranteed to map back // to the index `4` that generated it earlier, corresponding to the node 2, inverting // in fact the child-parent relationship. fn correspondent(&self, node: usize, i: usize) -> usize { let a = (node * self.expansion_degree) as feistel::Index + i as feistel::Index; let feistel_keys = &[1, 2, 3, 4]; let transformed = if self.reversed { feistel::invert_permute( self.size() as feistel::Index * self.expansion_degree as feistel::Index, a, feistel_keys, self.feistel_precomputed, ) } else { feistel::permute( self.size() as feistel::Index * self.expansion_degree as feistel::Index, a, feistel_keys, self.feistel_precomputed, ) }; transformed as usize / self.expansion_degree // Collapse the output in the matrix search space to the row of the corresponding // node (losing the column information, that will be regenerated later when calling // back this function in the `reversed` direction). } // Read the `node` entry in the parents cache (which may not exist) for // the current direction set in the graph and return a copy of it (or // `None` to signal a cache miss). fn contains_parents_cache(&self, node: usize) -> bool { if self.use_cache { if let Some(ref cache) = PARENT_CACHE.read().unwrap().get(&self.id) { if self.forward() { cache.contains_forward(node as u32) } else { cache.contains_reverse(node as u32) } } else { false } } else { false } } fn generate_expanded_parents(&self, node: usize) -> Vec<u32> { (0..self.expansion_degree) .filter_map(|i| { let other = self.correspondent(node, i); if self.reversed { if other > node { Some(other as u32) } else { None } } else if other < node { Some(other as u32) } else { None } }) .collect() } } impl<'a, H, G> ZigZag for ZigZagGraph<H, G> where H: Hasher, G: Graph<H> + ParameterSetMetadata, { type BaseHasher = H; type BaseGraph = G; fn new_zigzag( nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7], ) -> Self { Self::new(None, nodes, base_degree, expansion_degree, seed) } /// To zigzag a graph, we just toggle its reversed field. /// All the real work happens when we calculate node parents on-demand. // We always share the two caches (forward/reversed) between // ZigZag graphs even if each graph will use only one of those // caches (depending of its direction). This allows to propagate // the caches across different layers, where consecutive even+odd // layers have inverse directions. fn zigzag(&self) -> Self { let mut zigzag = self.clone(); zigzag.reversed =!zigzag.reversed; zigzag } fn base_graph(&self) -> Self::BaseGraph { self.base_graph.clone() } fn expansion_degree(&self) -> usize { self.expansion_degree } fn reversed(&self) -> bool { self.reversed } // TODO: Optimization: Evaluate providing an `all_parents` (and hence // `all_expanded_parents`) method that would return the entire cache // in a single lock operation, or at least (if the cache is not big enough) // it would allow to batch parents calculations with that single lock. Also, // since there is a reciprocity between forward and reversed parents, // we would only need to compute the parents in one direction and with // that fill both caches. #[inline] fn expanded_parents<F, T>(&self, node: usize, mut cb: F) -> T where
F: FnMut(&Vec<u32>) -> T, { if!self.use_cache { // No cache usage, generate on demand. return cb(&self.generate_expanded_parents(node)); } // Check if we need to fill the cache. if!self.contains_parents_cache(node) { // Cache is empty so we need to generate the parents. let parents = self.generate_expanded_parents(node); // Store the newly generated cached value. let mut cache_lock = PARENT_CACHE.write().unwrap(); let cache = cache_lock .get_mut(&self.id) .expect("Invalid cache construction"); if self.forward() { cache.write_forward(node as u32, parents); } else { cache.write_reverse(node as u32, parents); } } // We made sure the cache is filled above, now we can return the value. let cache_lock = PARENT_CACHE.read().unwrap(); let cache = cache_lock .get(&self.id) .expect("Invalid cache construction"); if self.forward() { cache.read_forward(node as u32, |parents| cb(parents.unwrap())) } else { cache.read_reverse(node as u32, |parents| cb(parents.unwrap())) } } #[inline] fn real_index(&self, i: usize) -> usize { if self.reversed { (self.size() - 1) - i } else { i } } } impl<H, G> PartialEq for ZigZagGraph<H, G> where H: Hasher, G: Graph<H>, { fn eq(&self, other: &ZigZagGraph<H, G>) -> bool { self.base_graph == other.base_graph && self.expansion_degree == other.expansion_degree && self.reversed == other.reversed } } impl<H, G> Eq for ZigZagGraph<H, G> where H: Hasher, G: Graph<H>, { } #[cfg(test)] mod tests { use super::*; use std::collections::{HashMap, HashSet}; use crate::drgraph::{new_seed, BASE_DEGREE}; use crate::hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher}; fn assert_graph_ascending<H: Hasher, G: Graph<H>>(g: G) { for i in 0..g.size() { let mut parents = vec![0; g.degree()]; g.parents(i, &mut parents); for p in parents { if i == 0 { assert!(p == i); } else { assert!(p < i); } } } } fn assert_graph_descending<H: Hasher, G: Graph<H>>(g: G) { for i in 0..g.size() { let mut parents = vec![0; g.degree()]; g.parents(i, &mut parents); for p in parents { if i == g.size() - 1 { assert!(p == i); } else { assert!(p > i); } } } } #[test] fn zigzag_graph_zigzags_pedersen() { test_zigzag_graph_zigzags::<PedersenHasher>(); } #[test] fn zigzag_graph_zigzags_sha256() { test_zigzag_graph_zigzags::<Sha256Hasher>(); } #[test] fn zigzag_graph_zigzags_blake2s() { test_zigzag_graph_zigzags::<Blake2sHasher>(); } fn test_zigzag_graph_zigzags<H:'static + Hasher>() { let g = ZigZagBucketGraph::<H>::new_zigzag(50, BASE_DEGREE, EXP_DEGREE, new_seed()); let gz = g.zigzag(); assert_graph_ascending(g); assert_graph_descending(gz); } #[test] fn expansion_pedersen() { test_expansion::<PedersenHasher>(); } #[test] fn expansion_sha256() { test_expansion::<Sha256Hasher>(); } #[test] fn expansion_blake2s() { test_expansion::<Blake2sHasher>(); } fn test_expansion<H:'static + Hasher>() { // We need a graph. let g = ZigZagBucketGraph::<H>::new_zigzag(25, BASE_DEGREE, EXP_DEGREE, new_seed()); // We're going to fully realize the expansion-graph component, in a HashMap. let gcache = get_all_expanded_parents(&g); // Here's the zigzag version of the graph. let gz = g.zigzag(); // And a HashMap to hold the expanded parents. let gzcache = get_all_expanded_parents(&gz); for i in 0..gz.size() { let parents = gzcache.get(&i).unwrap(); // Check to make sure all (expanded) node-parent relationships also exist in reverse, // in the original graph's Hashmap. for p in parents { assert!(gcache[&(*p as usize)].contains(&(i as u32))); } } // And then do the same check to make sure all (expanded) node-parent relationships from the original // are present in the zigzag, just reversed. for i in 0..g.size() { g.expanded_parents(i, |parents| { for p in parents.iter() { assert!(gzcache[&(*p as usize)].contains(&(i as u32))); } });
random_line_split
diagnostic_server.rs
//! A small TCP server to handle collection of diagnostics information in a //! cross-platform way for the `cargo fix` command. use std::collections::HashSet; use std::io::{BufReader, Read, Write}; use std::net::{Shutdown, SocketAddr, TcpListener, TcpStream}; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::thread::{self, JoinHandle}; use anyhow::{Context, Error}; use cargo_util::ProcessBuilder; use serde::{Deserialize, Serialize}; use tracing::warn; use crate::core::Edition; use crate::util::errors::CargoResult; use crate::util::Config; const DIAGNOSTICS_SERVER_VAR: &str = "__CARGO_FIX_DIAGNOSTICS_SERVER"; #[derive(Deserialize, Serialize, Hash, Eq, PartialEq, Clone)] pub enum Message { Migrating { file: String, from_edition: Edition, to_edition: Edition, }, Fixing { file: String, }, Fixed { file: String, fixes: u32, }, FixFailed { files: Vec<String>, krate: Option<String>, errors: Vec<String>, abnormal_exit: Option<String>, }, ReplaceFailed { file: String, message: String, }, EditionAlreadyEnabled { message: String, edition: Edition, }, } impl Message { pub fn post(&self, config: &Config) -> Result<(), Error> { let addr = config .get_env(DIAGNOSTICS_SERVER_VAR) .context("diagnostics collector misconfigured")?; let mut client = TcpStream::connect(&addr).context("failed to connect to parent diagnostics target")?; let s = serde_json::to_string(self).context("failed to serialize message")?; client .write_all(s.as_bytes()) .context("failed to write message to diagnostics target")?; client .shutdown(Shutdown::Write) .context("failed to shutdown")?; client .read_to_end(&mut Vec::new()) .context("failed to receive a disconnect")?; Ok(()) } } /// A printer that will print diagnostics messages to the shell. pub struct DiagnosticPrinter<'a> { /// The config to get the shell to print to. config: &'a Config, /// An optional wrapper to be used in addition to `rustc.wrapper` for workspace crates. /// This is used to get the correct bug report URL. For instance, /// if `clippy-driver` is set as the value for the wrapper, /// then the correct bug report URL for `clippy` can be obtained. workspace_wrapper: &'a Option<PathBuf>, // A set of messages that have already been printed. dedupe: HashSet<Message>, } impl<'a> DiagnosticPrinter<'a> { pub fn new( config: &'a Config, workspace_wrapper: &'a Option<PathBuf>, ) -> DiagnosticPrinter<'a> { DiagnosticPrinter { config, workspace_wrapper, dedupe: HashSet::new(), } } pub fn print(&mut self, msg: &Message) -> CargoResult<()> { match msg { Message::Migrating { file, from_edition, to_edition, } => { if!self.dedupe.insert(msg.clone()) { return Ok(()); } self.config.shell().status( "Migrating", &format!("{} from {} edition to {}", file, from_edition, to_edition), ) } Message::Fixing { file } => self .config .shell() .verbose(|shell| shell.status("Fixing", file)), Message::Fixed { file, fixes } => { let msg = if *fixes == 1 { "fix" } else { "fixes" }; let msg = format!("{} ({} {})", file, fixes, msg); self.config.shell().status("Fixed", msg) } Message::ReplaceFailed { file, message } => { let msg = format!("error applying suggestions to `{}`\n", file); self.config.shell().warn(&msg)?; write!( self.config.shell().err(), "The full error message was:\n\n> {}\n\n", message, )?; let issue_link = get_bug_report_url(self.workspace_wrapper); write!( self.config.shell().err(), "{}", gen_please_report_this_bug_text(issue_link) )?; Ok(()) } Message::FixFailed { files, krate, errors, abnormal_exit, } => { if let Some(ref krate) = *krate { self.config.shell().warn(&format!( "failed to automatically apply fixes suggested by rustc \ to crate `{}`", krate, ))?; } else { self.config .shell() .warn("failed to automatically apply fixes suggested by rustc")?; } if!files.is_empty() { writeln!( self.config.shell().err(), "\nafter fixes were automatically applied the compiler \ reported errors within these files:\n" )?; for file in files { writeln!(self.config.shell().err(), " * {}", file)?; } writeln!(self.config.shell().err())?; } let issue_link = get_bug_report_url(self.workspace_wrapper); write!( self.config.shell().err(), "{}", gen_please_report_this_bug_text(issue_link) )?; if!errors.is_empty() { writeln!( self.config.shell().err(), "The following errors were reported:" )?; for error in errors { write!(self.config.shell().err(), "{}", error)?; if!error.ends_with('\n') { writeln!(self.config.shell().err())?; } } } if let Some(exit) = abnormal_exit { writeln!( self.config.shell().err(), "rustc exited abnormally: {}", exit )?; } writeln!( self.config.shell().err(), "Original diagnostics will follow.\n" )?; Ok(()) } Message::EditionAlreadyEnabled { message, edition } => { if!self.dedupe.insert(msg.clone()) { return Ok(()); } // Don't give a really verbose warning if it has already been issued. if self.dedupe.insert(Message::EditionAlreadyEnabled { message: "".to_string(), // Dummy, so that this only long-warns once. edition: *edition, }) { self.config.shell().warn(&format!("\ {} If you are trying to migrate from the previous edition ({prev_edition}), the process requires following these steps: 1. Start with `edition = \"{prev_edition}\"` in `Cargo.toml` 2. Run `cargo fix --edition` 3. Modify `Cargo.toml` to set `edition = \"{this_edition}\"` 4. Run `cargo build` or `cargo test` to verify the fixes worked More details may be found at https://doc.rust-lang.org/edition-guide/editions/transitioning-an-existing-project-to-a-new-edition.html ", message, this_edition=edition, prev_edition=edition.previous().unwrap() )) } else { self.config.shell().warn(message) } } } } } fn gen_please_report_this_bug_text(url: &str) -> String { format!( "This likely indicates a bug in either rustc or cargo itself,\n\ and we would appreciate a bug report! You're likely to see \n\ a number of compiler warnings after this message which cargo\n\ attempted to fix but failed. If you could open an issue at\n\ {}\n\ quoting the full output of this command we'd be very appreciative!\n\ Note that you may be able to make some more progress in the near-term\n\ fixing code with the `--broken-code` flag\n\n\ ", url ) } fn get_bug_report_url(rustc_workspace_wrapper: &Option<PathBuf>) -> &str { let clippy = std::ffi::OsStr::new("clippy-driver"); let issue_link = match rustc_workspace_wrapper.as_ref().and_then(|x| x.file_stem()) { Some(wrapper) if wrapper == clippy => "https://github.com/rust-lang/rust-clippy/issues", _ => "https://github.com/rust-lang/rust/issues", }; issue_link } #[derive(Debug)] pub struct
{ listener: TcpListener, addr: SocketAddr, } pub struct StartedServer { addr: SocketAddr, done: Arc<AtomicBool>, thread: Option<JoinHandle<()>>, } impl RustfixDiagnosticServer { pub fn new() -> Result<Self, Error> { let listener = TcpListener::bind("127.0.0.1:0") .with_context(|| "failed to bind TCP listener to manage locking")?; let addr = listener.local_addr()?; Ok(RustfixDiagnosticServer { listener, addr }) } pub fn configure(&self, process: &mut ProcessBuilder) { process.env(DIAGNOSTICS_SERVER_VAR, self.addr.to_string()); } pub fn start<F>(self, on_message: F) -> Result<StartedServer, Error> where F: Fn(Message) + Send +'static, { let addr = self.addr; let done = Arc::new(AtomicBool::new(false)); let done2 = done.clone(); let thread = thread::spawn(move || { self.run(&on_message, &done2); }); Ok(StartedServer { addr, thread: Some(thread), done, }) } fn run(self, on_message: &dyn Fn(Message), done: &AtomicBool) { while let Ok((client, _)) = self.listener.accept() { if done.load(Ordering::SeqCst) { break; } let mut client = BufReader::new(client); let mut s = String::new(); if let Err(e) = client.read_to_string(&mut s) { warn!("diagnostic server failed to read: {}", e); } else { match serde_json::from_str(&s) { Ok(message) => on_message(message), Err(e) => warn!("invalid diagnostics message: {}", e), } } // The client should be kept alive until after `on_message` is // called to ensure that the client doesn't exit too soon (and // Message::Finish getting posted before Message::FixDiagnostic). drop(client); } } } impl Drop for StartedServer { fn drop(&mut self) { self.done.store(true, Ordering::SeqCst); // Ignore errors here as this is largely best-effort if TcpStream::connect(&self.addr).is_err() { return; } drop(self.thread.take().unwrap().join()); } }
RustfixDiagnosticServer
identifier_name
diagnostic_server.rs
//! A small TCP server to handle collection of diagnostics information in a //! cross-platform way for the `cargo fix` command. use std::collections::HashSet; use std::io::{BufReader, Read, Write}; use std::net::{Shutdown, SocketAddr, TcpListener, TcpStream}; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::thread::{self, JoinHandle}; use anyhow::{Context, Error}; use cargo_util::ProcessBuilder; use serde::{Deserialize, Serialize}; use tracing::warn; use crate::core::Edition; use crate::util::errors::CargoResult; use crate::util::Config; const DIAGNOSTICS_SERVER_VAR: &str = "__CARGO_FIX_DIAGNOSTICS_SERVER"; #[derive(Deserialize, Serialize, Hash, Eq, PartialEq, Clone)] pub enum Message { Migrating { file: String, from_edition: Edition, to_edition: Edition, }, Fixing { file: String, }, Fixed { file: String, fixes: u32, }, FixFailed { files: Vec<String>, krate: Option<String>, errors: Vec<String>, abnormal_exit: Option<String>, }, ReplaceFailed { file: String, message: String, }, EditionAlreadyEnabled { message: String, edition: Edition, }, } impl Message { pub fn post(&self, config: &Config) -> Result<(), Error> { let addr = config .get_env(DIAGNOSTICS_SERVER_VAR) .context("diagnostics collector misconfigured")?; let mut client = TcpStream::connect(&addr).context("failed to connect to parent diagnostics target")?; let s = serde_json::to_string(self).context("failed to serialize message")?; client .write_all(s.as_bytes()) .context("failed to write message to diagnostics target")?; client .shutdown(Shutdown::Write) .context("failed to shutdown")?; client .read_to_end(&mut Vec::new()) .context("failed to receive a disconnect")?; Ok(()) } } /// A printer that will print diagnostics messages to the shell. pub struct DiagnosticPrinter<'a> { /// The config to get the shell to print to. config: &'a Config, /// An optional wrapper to be used in addition to `rustc.wrapper` for workspace crates. /// This is used to get the correct bug report URL. For instance, /// if `clippy-driver` is set as the value for the wrapper, /// then the correct bug report URL for `clippy` can be obtained. workspace_wrapper: &'a Option<PathBuf>, // A set of messages that have already been printed. dedupe: HashSet<Message>, } impl<'a> DiagnosticPrinter<'a> { pub fn new( config: &'a Config, workspace_wrapper: &'a Option<PathBuf>, ) -> DiagnosticPrinter<'a> { DiagnosticPrinter { config, workspace_wrapper, dedupe: HashSet::new(), } } pub fn print(&mut self, msg: &Message) -> CargoResult<()> { match msg { Message::Migrating { file, from_edition, to_edition, } => { if!self.dedupe.insert(msg.clone()) { return Ok(()); } self.config.shell().status( "Migrating", &format!("{} from {} edition to {}", file, from_edition, to_edition), ) } Message::Fixing { file } => self .config .shell() .verbose(|shell| shell.status("Fixing", file)), Message::Fixed { file, fixes } => { let msg = if *fixes == 1 { "fix" } else { "fixes" }; let msg = format!("{} ({} {})", file, fixes, msg); self.config.shell().status("Fixed", msg) } Message::ReplaceFailed { file, message } => { let msg = format!("error applying suggestions to `{}`\n", file); self.config.shell().warn(&msg)?; write!( self.config.shell().err(), "The full error message was:\n\n> {}\n\n", message, )?; let issue_link = get_bug_report_url(self.workspace_wrapper); write!( self.config.shell().err(), "{}", gen_please_report_this_bug_text(issue_link) )?; Ok(()) } Message::FixFailed { files, krate, errors, abnormal_exit, } => { if let Some(ref krate) = *krate { self.config.shell().warn(&format!( "failed to automatically apply fixes suggested by rustc \ to crate `{}`", krate, ))?; } else
if!files.is_empty() { writeln!( self.config.shell().err(), "\nafter fixes were automatically applied the compiler \ reported errors within these files:\n" )?; for file in files { writeln!(self.config.shell().err(), " * {}", file)?; } writeln!(self.config.shell().err())?; } let issue_link = get_bug_report_url(self.workspace_wrapper); write!( self.config.shell().err(), "{}", gen_please_report_this_bug_text(issue_link) )?; if!errors.is_empty() { writeln!( self.config.shell().err(), "The following errors were reported:" )?; for error in errors { write!(self.config.shell().err(), "{}", error)?; if!error.ends_with('\n') { writeln!(self.config.shell().err())?; } } } if let Some(exit) = abnormal_exit { writeln!( self.config.shell().err(), "rustc exited abnormally: {}", exit )?; } writeln!( self.config.shell().err(), "Original diagnostics will follow.\n" )?; Ok(()) } Message::EditionAlreadyEnabled { message, edition } => { if!self.dedupe.insert(msg.clone()) { return Ok(()); } // Don't give a really verbose warning if it has already been issued. if self.dedupe.insert(Message::EditionAlreadyEnabled { message: "".to_string(), // Dummy, so that this only long-warns once. edition: *edition, }) { self.config.shell().warn(&format!("\ {} If you are trying to migrate from the previous edition ({prev_edition}), the process requires following these steps: 1. Start with `edition = \"{prev_edition}\"` in `Cargo.toml` 2. Run `cargo fix --edition` 3. Modify `Cargo.toml` to set `edition = \"{this_edition}\"` 4. Run `cargo build` or `cargo test` to verify the fixes worked More details may be found at https://doc.rust-lang.org/edition-guide/editions/transitioning-an-existing-project-to-a-new-edition.html ", message, this_edition=edition, prev_edition=edition.previous().unwrap() )) } else { self.config.shell().warn(message) } } } } } fn gen_please_report_this_bug_text(url: &str) -> String { format!( "This likely indicates a bug in either rustc or cargo itself,\n\ and we would appreciate a bug report! You're likely to see \n\ a number of compiler warnings after this message which cargo\n\ attempted to fix but failed. If you could open an issue at\n\ {}\n\ quoting the full output of this command we'd be very appreciative!\n\ Note that you may be able to make some more progress in the near-term\n\ fixing code with the `--broken-code` flag\n\n\ ", url ) } fn get_bug_report_url(rustc_workspace_wrapper: &Option<PathBuf>) -> &str { let clippy = std::ffi::OsStr::new("clippy-driver"); let issue_link = match rustc_workspace_wrapper.as_ref().and_then(|x| x.file_stem()) { Some(wrapper) if wrapper == clippy => "https://github.com/rust-lang/rust-clippy/issues", _ => "https://github.com/rust-lang/rust/issues", }; issue_link } #[derive(Debug)] pub struct RustfixDiagnosticServer { listener: TcpListener, addr: SocketAddr, } pub struct StartedServer { addr: SocketAddr, done: Arc<AtomicBool>, thread: Option<JoinHandle<()>>, } impl RustfixDiagnosticServer { pub fn new() -> Result<Self, Error> { let listener = TcpListener::bind("127.0.0.1:0") .with_context(|| "failed to bind TCP listener to manage locking")?; let addr = listener.local_addr()?; Ok(RustfixDiagnosticServer { listener, addr }) } pub fn configure(&self, process: &mut ProcessBuilder) { process.env(DIAGNOSTICS_SERVER_VAR, self.addr.to_string()); } pub fn start<F>(self, on_message: F) -> Result<StartedServer, Error> where F: Fn(Message) + Send +'static, { let addr = self.addr; let done = Arc::new(AtomicBool::new(false)); let done2 = done.clone(); let thread = thread::spawn(move || { self.run(&on_message, &done2); }); Ok(StartedServer { addr, thread: Some(thread), done, }) } fn run(self, on_message: &dyn Fn(Message), done: &AtomicBool) { while let Ok((client, _)) = self.listener.accept() { if done.load(Ordering::SeqCst) { break; } let mut client = BufReader::new(client); let mut s = String::new(); if let Err(e) = client.read_to_string(&mut s) { warn!("diagnostic server failed to read: {}", e); } else { match serde_json::from_str(&s) { Ok(message) => on_message(message), Err(e) => warn!("invalid diagnostics message: {}", e), } } // The client should be kept alive until after `on_message` is // called to ensure that the client doesn't exit too soon (and // Message::Finish getting posted before Message::FixDiagnostic). drop(client); } } } impl Drop for StartedServer { fn drop(&mut self) { self.done.store(true, Ordering::SeqCst); // Ignore errors here as this is largely best-effort if TcpStream::connect(&self.addr).is_err() { return; } drop(self.thread.take().unwrap().join()); } }
{ self.config .shell() .warn("failed to automatically apply fixes suggested by rustc")?; }
conditional_block
diagnostic_server.rs
//! A small TCP server to handle collection of diagnostics information in a //! cross-platform way for the `cargo fix` command. use std::collections::HashSet; use std::io::{BufReader, Read, Write}; use std::net::{Shutdown, SocketAddr, TcpListener, TcpStream}; use std::path::PathBuf; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::thread::{self, JoinHandle}; use anyhow::{Context, Error}; use cargo_util::ProcessBuilder; use serde::{Deserialize, Serialize}; use tracing::warn; use crate::core::Edition; use crate::util::errors::CargoResult; use crate::util::Config; const DIAGNOSTICS_SERVER_VAR: &str = "__CARGO_FIX_DIAGNOSTICS_SERVER"; #[derive(Deserialize, Serialize, Hash, Eq, PartialEq, Clone)] pub enum Message { Migrating { file: String, from_edition: Edition, to_edition: Edition, }, Fixing { file: String, }, Fixed { file: String, fixes: u32, }, FixFailed { files: Vec<String>, krate: Option<String>, errors: Vec<String>, abnormal_exit: Option<String>, }, ReplaceFailed { file: String, message: String, }, EditionAlreadyEnabled { message: String, edition: Edition, }, } impl Message { pub fn post(&self, config: &Config) -> Result<(), Error> { let addr = config .get_env(DIAGNOSTICS_SERVER_VAR) .context("diagnostics collector misconfigured")?; let mut client = TcpStream::connect(&addr).context("failed to connect to parent diagnostics target")?; let s = serde_json::to_string(self).context("failed to serialize message")?; client .write_all(s.as_bytes()) .context("failed to write message to diagnostics target")?; client .shutdown(Shutdown::Write) .context("failed to shutdown")?; client .read_to_end(&mut Vec::new()) .context("failed to receive a disconnect")?; Ok(()) } } /// A printer that will print diagnostics messages to the shell. pub struct DiagnosticPrinter<'a> { /// The config to get the shell to print to. config: &'a Config, /// An optional wrapper to be used in addition to `rustc.wrapper` for workspace crates. /// This is used to get the correct bug report URL. For instance, /// if `clippy-driver` is set as the value for the wrapper, /// then the correct bug report URL for `clippy` can be obtained. workspace_wrapper: &'a Option<PathBuf>, // A set of messages that have already been printed. dedupe: HashSet<Message>, } impl<'a> DiagnosticPrinter<'a> { pub fn new( config: &'a Config, workspace_wrapper: &'a Option<PathBuf>, ) -> DiagnosticPrinter<'a> { DiagnosticPrinter { config, workspace_wrapper, dedupe: HashSet::new(), } } pub fn print(&mut self, msg: &Message) -> CargoResult<()> { match msg { Message::Migrating { file, from_edition, to_edition, } => { if!self.dedupe.insert(msg.clone()) { return Ok(()); } self.config.shell().status( "Migrating", &format!("{} from {} edition to {}", file, from_edition, to_edition), ) } Message::Fixing { file } => self .config .shell() .verbose(|shell| shell.status("Fixing", file)), Message::Fixed { file, fixes } => { let msg = if *fixes == 1 { "fix" } else { "fixes" }; let msg = format!("{} ({} {})", file, fixes, msg); self.config.shell().status("Fixed", msg) } Message::ReplaceFailed { file, message } => { let msg = format!("error applying suggestions to `{}`\n", file); self.config.shell().warn(&msg)?; write!( self.config.shell().err(), "The full error message was:\n\n> {}\n\n", message, )?; let issue_link = get_bug_report_url(self.workspace_wrapper); write!( self.config.shell().err(), "{}", gen_please_report_this_bug_text(issue_link) )?; Ok(()) } Message::FixFailed { files, krate, errors, abnormal_exit, } => { if let Some(ref krate) = *krate { self.config.shell().warn(&format!( "failed to automatically apply fixes suggested by rustc \ to crate `{}`", krate, ))?; } else { self.config .shell() .warn("failed to automatically apply fixes suggested by rustc")?; } if!files.is_empty() { writeln!( self.config.shell().err(), "\nafter fixes were automatically applied the compiler \ reported errors within these files:\n" )?; for file in files { writeln!(self.config.shell().err(), " * {}", file)?; } writeln!(self.config.shell().err())?; } let issue_link = get_bug_report_url(self.workspace_wrapper); write!( self.config.shell().err(), "{}", gen_please_report_this_bug_text(issue_link) )?; if!errors.is_empty() { writeln!( self.config.shell().err(), "The following errors were reported:" )?; for error in errors { write!(self.config.shell().err(), "{}", error)?; if!error.ends_with('\n') { writeln!(self.config.shell().err())?; } } } if let Some(exit) = abnormal_exit { writeln!( self.config.shell().err(), "rustc exited abnormally: {}", exit )?; } writeln!( self.config.shell().err(), "Original diagnostics will follow.\n" )?; Ok(()) } Message::EditionAlreadyEnabled { message, edition } => { if!self.dedupe.insert(msg.clone()) { return Ok(()); } // Don't give a really verbose warning if it has already been issued. if self.dedupe.insert(Message::EditionAlreadyEnabled {
self.config.shell().warn(&format!("\ {} If you are trying to migrate from the previous edition ({prev_edition}), the process requires following these steps: 1. Start with `edition = \"{prev_edition}\"` in `Cargo.toml` 2. Run `cargo fix --edition` 3. Modify `Cargo.toml` to set `edition = \"{this_edition}\"` 4. Run `cargo build` or `cargo test` to verify the fixes worked More details may be found at https://doc.rust-lang.org/edition-guide/editions/transitioning-an-existing-project-to-a-new-edition.html ", message, this_edition=edition, prev_edition=edition.previous().unwrap() )) } else { self.config.shell().warn(message) } } } } } fn gen_please_report_this_bug_text(url: &str) -> String { format!( "This likely indicates a bug in either rustc or cargo itself,\n\ and we would appreciate a bug report! You're likely to see \n\ a number of compiler warnings after this message which cargo\n\ attempted to fix but failed. If you could open an issue at\n\ {}\n\ quoting the full output of this command we'd be very appreciative!\n\ Note that you may be able to make some more progress in the near-term\n\ fixing code with the `--broken-code` flag\n\n\ ", url ) } fn get_bug_report_url(rustc_workspace_wrapper: &Option<PathBuf>) -> &str { let clippy = std::ffi::OsStr::new("clippy-driver"); let issue_link = match rustc_workspace_wrapper.as_ref().and_then(|x| x.file_stem()) { Some(wrapper) if wrapper == clippy => "https://github.com/rust-lang/rust-clippy/issues", _ => "https://github.com/rust-lang/rust/issues", }; issue_link } #[derive(Debug)] pub struct RustfixDiagnosticServer { listener: TcpListener, addr: SocketAddr, } pub struct StartedServer { addr: SocketAddr, done: Arc<AtomicBool>, thread: Option<JoinHandle<()>>, } impl RustfixDiagnosticServer { pub fn new() -> Result<Self, Error> { let listener = TcpListener::bind("127.0.0.1:0") .with_context(|| "failed to bind TCP listener to manage locking")?; let addr = listener.local_addr()?; Ok(RustfixDiagnosticServer { listener, addr }) } pub fn configure(&self, process: &mut ProcessBuilder) { process.env(DIAGNOSTICS_SERVER_VAR, self.addr.to_string()); } pub fn start<F>(self, on_message: F) -> Result<StartedServer, Error> where F: Fn(Message) + Send +'static, { let addr = self.addr; let done = Arc::new(AtomicBool::new(false)); let done2 = done.clone(); let thread = thread::spawn(move || { self.run(&on_message, &done2); }); Ok(StartedServer { addr, thread: Some(thread), done, }) } fn run(self, on_message: &dyn Fn(Message), done: &AtomicBool) { while let Ok((client, _)) = self.listener.accept() { if done.load(Ordering::SeqCst) { break; } let mut client = BufReader::new(client); let mut s = String::new(); if let Err(e) = client.read_to_string(&mut s) { warn!("diagnostic server failed to read: {}", e); } else { match serde_json::from_str(&s) { Ok(message) => on_message(message), Err(e) => warn!("invalid diagnostics message: {}", e), } } // The client should be kept alive until after `on_message` is // called to ensure that the client doesn't exit too soon (and // Message::Finish getting posted before Message::FixDiagnostic). drop(client); } } } impl Drop for StartedServer { fn drop(&mut self) { self.done.store(true, Ordering::SeqCst); // Ignore errors here as this is largely best-effort if TcpStream::connect(&self.addr).is_err() { return; } drop(self.thread.take().unwrap().join()); } }
message: "".to_string(), // Dummy, so that this only long-warns once. edition: *edition, }) {
random_line_split
asset.rs
0} contains confidential data EpochSealConfidential(NodeId), /// nurn & replace seal definition for node {0} contains confidential data BurnSealConfidential(NodeId), /// inflation assignment (seal or state) for node {0} contains confidential /// data InflationAssignmentConfidential(NodeId), /// Internal data inconsistency, as returned by the [`rgb::GraphAPI`] /// methods #[display(inner)] #[from] Inconsistency(rgb::ConsistencyError), /// not of all epochs referenced in burn or burn & replace operation /// history are known from the consignment NotAllEpochsExposed, } /// Detailed RGB20 asset information /// /// Structure presents complete set of RGB20 asset-related data which can be /// extracted from the genesis or a consignment. It is not the source of the /// truth, and the presence of the data in the structure does not imply their /// validity, since the structure constructor does not validates blockchain or /// LN-based transaction commitments or satisfaction of schema requirements. /// /// The main reason of the structure is: /// 1) to persist *cached* copy of the asset data without the requirement to /// parse all stash transition each time in order to extract allocation /// information; /// 2) to present data from asset genesis or consignment for UI in convenient /// form. /// 3) to orchestrate generation of new state transitions taking into account /// known asset information. /// /// (1) is important for wallets, (2) is for more generic software, like /// client-side-validated data explorers, developer & debugging tools etc and /// (3) for asset-management software. /// /// In both (2) and (3) case there is no need to persist the structure; genesis /// /consignment should be persisted instead and the structure must be /// reconstructed each time from that data upon the launch #[cfg_attr( feature = "serde", derive(Serialize, Deserialize), serde(crate = "serde_crate", rename_all = "camelCase") )] #[derive( Clone, Getters, PartialEq, Debug, Display, StrictEncode, StrictDecode, )] #[display("{genesis_nomination} ({id})")] pub struct Asset { /// Bech32-representation of the asset genesis genesis: String, /// Asset ID, which is equal to Contract ID and genesis ID /// /// It can be used as a unique primary kep id: ContractId, /// Chain with which the asset is issued #[cfg_attr(feature = "serde", serde(with = "As::<DisplayFromStr>"))] chain: Chain, /// Asset creation data date: DateTime<Utc>, /// Names assigned to the asset at the issue time /// /// Nomination is a set of asset metadata assigned by the issuer, which /// define core asset properties: ticker, name, decimal precision, contract /// text. #[cfg_attr(feature = "serde", serde(flatten))] genesis_nomination: Nomination, /// List of all known renominations. /// /// This list does not include genesis nomination, which can be accessed /// via [`Asset::genesis_nomination`]. The last item in the list contains /// [`Asset::last_nomination`] data as a part of its renomination operation /// details. known_renominations: Vec<Renomination>, /// All issues known from the available data (stash and/or provided /// consignments) /// /// Primary issue is always the first one; the rest are provided in /// arbitrary order known_issues: Vec<Issue>, /// Single-use-seal controlling the beginning of the first epoch epoch_opening_seal: Option<OutPoint>, /// Burn & replacement epochs, organized according to the witness txid. /// /// Witness transaction must be mined for the epoch to be real. /// One of the inputs of this transaction MUST spend UTXO defined as a /// seal closed by this epoch ([`Epoch::closes`]) epochs: Vec<Epoch>, /// Detailed information about the asset supply (aggregated from the issue /// and burning information kept inside the epochs data) #[cfg_attr(feature = "serde", serde(flatten))] supply: Supply, /// Specifies outpoints controlling certain amounts of assets. /// /// NB: Information here does not imply that the outputs are owned by the /// current user or the owning transactions are mined/exist; this must be /// determined by the wallet and depends on specific medium (blockchain, /// LN) known_allocations: Vec<Allocation>, } impl Asset { /// Current asset ticker /// /// Current value determined by the last known renomination operation – /// or, by the genesis nomination, if no renomination are known /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn ticker(&self) -> &str { &self.active_nomination().ticker() } /// Current asset name /// /// Current value determined by the last known renomination operation – /// or, by the genesis nomination, if no renomination are known /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn name(&self) -> &str { &self.active_nomination().ticker() } /// Current version of the asset contract, represented in Ricardian form /// /// Current value determined by the last known renomination operation – /// or, by the genesis nomination, if no renomination are known /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn ricardian_contract(&self) -> &str { &self.active_nomination().ticker() } /// Current decimal precision of the asset value /// /// Current value determined by the last known renomination operation – /// or, by the genesis nomination, if no renomination are known /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn decimal_precision(&self) -> u8 { *self.active_nomination().decimal_precision() } /// Returns information (in atomic value units) about specific measure of /// the asset supply, if known, or `None` otherwise pub fn precise_supply( &self, measure: SupplyMeasure, ) -> Option<AtomicValue> { Some(match measure { SupplyMeasure::KnownCirculating => *self.supply.known_circulating(), SupplyMeasure::TotalCirculating => { match self.supply.total_circulating() { None => return None, Some(supply) => supply, } } SupplyMeasure::IssueLimit => *self.supply.issue_limit(), }) } /// Returns information in form of a float number about specific measure of /// the asset supply, if known, or [`f64::NAN`] otherwise pub fn fractional_supply( &self, measure: SupplyMeasure, ) -> FractionalAmount { let value = match self.precise_supply(measure) { None => return FractionalAmount::NAN, Some(supply) => supply, }; PreciseAmount::transmutate_into(value, self.decimal_precision()) } /// Nomination resulting from the last known renomination /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn last_nomination(&self) -> Option<&Nomination> { self.known_renominations.last().map(|o| o.nomination()) } /// Active nomination data. /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn active_nomination(&self) -> &Nomination { self.last_nomination().unwrap_or(&self.genesis_nomination) } /// Returns sum of all known allocations, in atomic value units #[inline] pub fn known_value(&self) -> AtomicValue { self.known_allocations.iter().map(Allocation::value).sum() } /// Returns sum of known allocation after applying `filter` function. Useful /// for filtering UTXOs owned by the current wallet. The returned value is /// in atomic units (see [`AtomicValue`] pub fn known_filtered_value<F>(&self, filter: F) -> AtomicValue where F: Fn(&Allocation) -> bool, { self.known_allocations .iter() .filter(|allocation| filter(*allocation)) .map(Allocation::value) .sum() } /// Returns sum of all known allocations, as a floating point value (see /// [`FractionalAmount`]) pub fn known_amount(&self) -> FractionalAmount { self.known_allocations .iter() .map(Allocation::value) .map(|atomic| { PreciseAmount::transmutate_into( atomic, self.decimal_precision(), ) }) .sum() } /// Returns sum of known allocation after applying `filter` function. Useful /// for filtering UTXOs owned by the current wallet. The returned amount is /// a floating point number (see [`FractionalAmount`]) pub fn known_filtered_amount<F>(&self, filter: F) -> FractionalAmount where F: Fn(&Allocation) -> bool, { self.known_allocations .iter() .filter(|allocation| filter(*allocation)) .map(Allocation::value) .map(|atomic| { PreciseAmount::transmutate_into( atomic, self.decimal_precision(), ) }) .sum() } /// Returns outpoints which when spent may indicate inflation happening /// up to specific amount. /// /// NB: Not of all inflation controlling points may be known pub fn known_inflation( &self, ) -> BTreeMap<OutPoint, (AtomicValue, Vec<u16>)> { let mut inflation_list = BTreeMap::new(); for issue in self.known_issues() { for (seal, data) in issue.inflation_assignments() { inflation_list.insert(*seal, data.clone()); } } inflation_list } #[inline] /// Lists all known allocations for the given bitcoin transaction /// [`OutPoint`] pub fn outpoint_allocations(&self, outpoint: OutPoint) -> Vec<Allocation> { self.known_allocations .iter() .filter(|a| *a.outpoint() == outpoint) .copied() .collect() } /// Adds new allocation to the list of known allocations pub fn add_allocation( &mut self, outpoint: OutPoint, node_id: NodeId, index: u16, value: value::Revealed, ) -> bool { let new_allocation = Allocation::with(node_id, index, outpoint, value); if!self.known_allocations.contains(&new_allocation) { self.known_allocations.push(new_allocation); true } else { false } } /// Adds issue to the list of known issues. This is an internal function /// which should not be used directly; instead construct the asset structure /// from the [`Consignment`] using [`Asset::try_from`] method. fn add_issue( &mut self, consignment: &Consignment, transition: &Transition, witness: Txid, ) -> Result<(), Error> { let closed_seals = consignment.seals_closed_with( transition.node_id(), OwnedRightType::Inflation, witness, )?; let issue = Issue::with(self.id, closed_seals, transition, witness)?; self.known_issues.push(issue); Ok(()) } /// Adds an epoch to the list of known epochs. This is an internal function /// which should not be used directly; instead construct the asset structure /// from the [`Consignment`] using [`Asset::try_from`] method. fn add_epoch( &mut self, consignment: &Consignment, transition: &Transition, no: usize, operations: Vec<BurnReplace>, witness: Txid, ) -> Result<(), Error> { let id = transition.node_id(); // 1. It must correctly extend known state, i.e. close UTXO for a seal // defined by a state transition already belonging to the asset let closed_seal = consignment .seals_closed_with(id, OwnedRightType::OpenEpoch, witness)? .into_iter() .next() .ok_or(Error::Inconsistency( rgb::ConsistencyError::NoSealsClosed( OwnedRightType::OpenEpoch.into(), id, ), ))?; let epoch = Epoch::with( self.id, no, closed_seal, transition, operations, witness, )?; self.epochs.insert(no as usize, epoch); Ok(()) } } impl TryFrom<Genesis> for Asset { type Error = Error; fn try_from(genesis: Genesis) -> Result<Self, Self::Error> { if genesis.schema_id()!= schema::schema().schema_id() { Err(Error::WrongSchemaId)?; } let genesis_meta = genesis.metadata();
let supply = *genesis_meta .u64(FieldType::IssuedSupply) .first() .ok_or(Error::UnsatisfiedSchemaRequirement)?; let mut issue_limit = 0; // Check if issue limit can be known for assignment in genesis.owned_rights_by_type(OwnedRightType::Inflation.into()) { for state in assignment.to_data_assignment_vec() { match state { Assignment::Revealed { assigned_state,.. } | Assignment::ConfidentialSeal { assigned_state,.. } => { if issue_limit < core::u64::MAX { issue_limit += assigned_state .u64() .ok_or(Error::UnsatisfiedSchemaRequirement)? }; } _ => issue_limit = core::u64::MAX, } } } let epoch_opening_seal = genesis .revealed_seals_by_type(OwnedRightType::OpenEpoch.into()) .map_err(|_| Error::EpochSealConfidential(genesis.node_id()))? .first() .copied() .map(|seal| seal.try_into()) .transpose()?; let issue = Issue::try_from(&genesis)?; let node_id = NodeId::from_inner(genesis.contract_id().into_inner()); let mut known_allocations = Vec::<Allocation>::new(); for assignment in genesis.owned_rights_by_type(OwnedRightType::Assets.into()) { assignment .to_value_assignment_vec() .into_iter() .enumerate() .for_each(|(index, assign)| { if let Assignment::Revealed { seal_definition: seal::Revealed::TxOutpoint(outpoint_reveal), assigned_state, } = assign { known_allocations.push(Allocation::with( node_id, index as u16, outpoint_reveal.into(), assigned_state, )) } }); } Ok(Asset { genesis: genesis.to_string(), id: genesis.contract_id(), chain: genesis.chain().clone(), genesis_nomination: Nomination::try_from(&genesis)?, supply: Supply::with(supply, None, issue_limit), date: DateTime::from_utc( NaiveDateTime::from_timestamp( *genesis_meta .i64(FieldType::Timestamp) .first() .ok_or(Error::UnsatisfiedSchemaRequirement)?, 0, ), Utc, ), known_renominations: empty!(), known_issues: vec![issue], // we assume that each genesis allocation with revealed amount // and known seal (they are always revealed together) belongs to us known_allocations, epochs: empty!(), epoch_opening_seal, }) } } impl TryFrom<Consignment> for Asset { type Error = Error; fn try_from(consignment: Consignment) -> Result<Self, Self::Error> { // 1. Parse genesis let mut asset: Asset = consignment.genesis.clone().try_into()?; // 2. Parse burn & replacement operations let mut epoch_operations: BTreeMap<NodeId, Vec<BurnReplace>> = empty!(); for transition in consignment.endpoint_transitions_by_types(&[ TransitionType::BurnAndReplace.into(), TransitionType::Burn.into(), ]) { let mut ops = consignment .chain_iter( transition.node_id(), OwnedRightType::BurnReplace.into(), ) .collect::<Vec<_>>(); ops.reverse(); if let Some((epoch, _)) = ops.pop() { let epoch_id = epoch.node_id(); let mut operations = vec![]; for (no, (transition, witness)) in ops.into_iter().enumerate() { let id = transition.node_id(); let closed_seal = consignment .seals_closed_with( id, OwnedRightType::BurnReplace, witness, )? .into_iter() .next() .ok_or(Error::Inconsistency( rgb::ConsistencyError::NoSealsClosed( OwnedRightType::BurnReplace.into(), id, ), ))?; operations.push(BurnReplace::with( asset.id, epoch_id, no, closed_seal, transition, witness, )?) } epoch_operations.insert(epoch_id, operations); } } // 3. Parse epochs let epoch_transition = consignment .endpoint_transitions_by_type(TransitionType::Epoch.into()) .into_iter() .next();
random_line_split
asset.rs
contains confidential data EpochSealConfidential(NodeId), /// nurn & replace seal definition for node {0} contains confidential data BurnSealConfidential(NodeId), /// inflation assignment (seal or state) for node {0} contains confidential /// data InflationAssignmentConfidential(NodeId), /// Internal data inconsistency, as returned by the [`rgb::GraphAPI`] /// methods #[display(inner)] #[from] Inconsistency(rgb::ConsistencyError), /// not of all epochs referenced in burn or burn & replace operation /// history are known from the consignment NotAllEpochsExposed, } /// Detailed RGB20 asset information /// /// Structure presents complete set of RGB20 asset-related data which can be /// extracted from the genesis or a consignment. It is not the source of the /// truth, and the presence of the data in the structure does not imply their /// validity, since the structure constructor does not validates blockchain or /// LN-based transaction commitments or satisfaction of schema requirements. /// /// The main reason of the structure is: /// 1) to persist *cached* copy of the asset data without the requirement to /// parse all stash transition each time in order to extract allocation /// information; /// 2) to present data from asset genesis or consignment for UI in convenient /// form. /// 3) to orchestrate generation of new state transitions taking into account /// known asset information. /// /// (1) is important for wallets, (2) is for more generic software, like /// client-side-validated data explorers, developer & debugging tools etc and /// (3) for asset-management software. /// /// In both (2) and (3) case there is no need to persist the structure; genesis /// /consignment should be persisted instead and the structure must be /// reconstructed each time from that data upon the launch #[cfg_attr( feature = "serde", derive(Serialize, Deserialize), serde(crate = "serde_crate", rename_all = "camelCase") )] #[derive( Clone, Getters, PartialEq, Debug, Display, StrictEncode, StrictDecode, )] #[display("{genesis_nomination} ({id})")] pub struct Asset { /// Bech32-representation of the asset genesis genesis: String, /// Asset ID, which is equal to Contract ID and genesis ID /// /// It can be used as a unique primary kep id: ContractId, /// Chain with which the asset is issued #[cfg_attr(feature = "serde", serde(with = "As::<DisplayFromStr>"))] chain: Chain, /// Asset creation data date: DateTime<Utc>, /// Names assigned to the asset at the issue time /// /// Nomination is a set of asset metadata assigned by the issuer, which /// define core asset properties: ticker, name, decimal precision, contract /// text. #[cfg_attr(feature = "serde", serde(flatten))] genesis_nomination: Nomination, /// List of all known renominations. /// /// This list does not include genesis nomination, which can be accessed /// via [`Asset::genesis_nomination`]. The last item in the list contains /// [`Asset::last_nomination`] data as a part of its renomination operation /// details. known_renominations: Vec<Renomination>, /// All issues known from the available data (stash and/or provided /// consignments) /// /// Primary issue is always the first one; the rest are provided in /// arbitrary order known_issues: Vec<Issue>, /// Single-use-seal controlling the beginning of the first epoch epoch_opening_seal: Option<OutPoint>, /// Burn & replacement epochs, organized according to the witness txid. /// /// Witness transaction must be mined for the epoch to be real. /// One of the inputs of this transaction MUST spend UTXO defined as a /// seal closed by this epoch ([`Epoch::closes`]) epochs: Vec<Epoch>, /// Detailed information about the asset supply (aggregated from the issue /// and burning information kept inside the epochs data) #[cfg_attr(feature = "serde", serde(flatten))] supply: Supply, /// Specifies outpoints controlling certain amounts of assets. /// /// NB: Information here does not imply that the outputs are owned by the /// current user or the owning transactions are mined/exist; this must be /// determined by the wallet and depends on specific medium (blockchain, /// LN) known_allocations: Vec<Allocation>, } impl Asset { /// Current asset ticker /// /// Current value determined by the last known renomination operation – /// or, by the genesis nomination, if no renomination are known /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn ticker(&self) -> &str { &self.active_nomination().ticker() } /// Current asset name /// /// Current value determined by the last known renomination operation – /// or, by the genesis nomination, if no renomination are known /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn name
lf) -> &str { &self.active_nomination().ticker() } /// Current version of the asset contract, represented in Ricardian form /// /// Current value determined by the last known renomination operation – /// or, by the genesis nomination, if no renomination are known /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn ricardian_contract(&self) -> &str { &self.active_nomination().ticker() } /// Current decimal precision of the asset value /// /// Current value determined by the last known renomination operation – /// or, by the genesis nomination, if no renomination are known /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn decimal_precision(&self) -> u8 { *self.active_nomination().decimal_precision() } /// Returns information (in atomic value units) about specific measure of /// the asset supply, if known, or `None` otherwise pub fn precise_supply( &self, measure: SupplyMeasure, ) -> Option<AtomicValue> { Some(match measure { SupplyMeasure::KnownCirculating => *self.supply.known_circulating(), SupplyMeasure::TotalCirculating => { match self.supply.total_circulating() { None => return None, Some(supply) => supply, } } SupplyMeasure::IssueLimit => *self.supply.issue_limit(), }) } /// Returns information in form of a float number about specific measure of /// the asset supply, if known, or [`f64::NAN`] otherwise pub fn fractional_supply( &self, measure: SupplyMeasure, ) -> FractionalAmount { let value = match self.precise_supply(measure) { None => return FractionalAmount::NAN, Some(supply) => supply, }; PreciseAmount::transmutate_into(value, self.decimal_precision()) } /// Nomination resulting from the last known renomination /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn last_nomination(&self) -> Option<&Nomination> { self.known_renominations.last().map(|o| o.nomination()) } /// Active nomination data. /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn active_nomination(&self) -> &Nomination { self.last_nomination().unwrap_or(&self.genesis_nomination) } /// Returns sum of all known allocations, in atomic value units #[inline] pub fn known_value(&self) -> AtomicValue { self.known_allocations.iter().map(Allocation::value).sum() } /// Returns sum of known allocation after applying `filter` function. Useful /// for filtering UTXOs owned by the current wallet. The returned value is /// in atomic units (see [`AtomicValue`] pub fn known_filtered_value<F>(&self, filter: F) -> AtomicValue where F: Fn(&Allocation) -> bool, { self.known_allocations .iter() .filter(|allocation| filter(*allocation)) .map(Allocation::value) .sum() } /// Returns sum of all known allocations, as a floating point value (see /// [`FractionalAmount`]) pub fn known_amount(&self) -> FractionalAmount { self.known_allocations .iter() .map(Allocation::value) .map(|atomic| { PreciseAmount::transmutate_into( atomic, self.decimal_precision(), ) }) .sum() } /// Returns sum of known allocation after applying `filter` function. Useful /// for filtering UTXOs owned by the current wallet. The returned amount is /// a floating point number (see [`FractionalAmount`]) pub fn known_filtered_amount<F>(&self, filter: F) -> FractionalAmount where F: Fn(&Allocation) -> bool, { self.known_allocations .iter() .filter(|allocation| filter(*allocation)) .map(Allocation::value) .map(|atomic| { PreciseAmount::transmutate_into( atomic, self.decimal_precision(), ) }) .sum() } /// Returns outpoints which when spent may indicate inflation happening /// up to specific amount. /// /// NB: Not of all inflation controlling points may be known pub fn known_inflation( &self, ) -> BTreeMap<OutPoint, (AtomicValue, Vec<u16>)> { let mut inflation_list = BTreeMap::new(); for issue in self.known_issues() { for (seal, data) in issue.inflation_assignments() { inflation_list.insert(*seal, data.clone()); } } inflation_list } #[inline] /// Lists all known allocations for the given bitcoin transaction /// [`OutPoint`] pub fn outpoint_allocations(&self, outpoint: OutPoint) -> Vec<Allocation> { self.known_allocations .iter() .filter(|a| *a.outpoint() == outpoint) .copied() .collect() } /// Adds new allocation to the list of known allocations pub fn add_allocation( &mut self, outpoint: OutPoint, node_id: NodeId, index: u16, value: value::Revealed, ) -> bool { let new_allocation = Allocation::with(node_id, index, outpoint, value); if!self.known_allocations.contains(&new_allocation) { self.known_allocations.push(new_allocation); true } else { false } } /// Adds issue to the list of known issues. This is an internal function /// which should not be used directly; instead construct the asset structure /// from the [`Consignment`] using [`Asset::try_from`] method. fn add_issue( &mut self, consignment: &Consignment, transition: &Transition, witness: Txid, ) -> Result<(), Error> { let closed_seals = consignment.seals_closed_with( transition.node_id(), OwnedRightType::Inflation, witness, )?; let issue = Issue::with(self.id, closed_seals, transition, witness)?; self.known_issues.push(issue); Ok(()) } /// Adds an epoch to the list of known epochs. This is an internal function /// which should not be used directly; instead construct the asset structure /// from the [`Consignment`] using [`Asset::try_from`] method. fn add_epoch( &mut self, consignment: &Consignment, transition: &Transition, no: usize, operations: Vec<BurnReplace>, witness: Txid, ) -> Result<(), Error> { let id = transition.node_id(); // 1. It must correctly extend known state, i.e. close UTXO for a seal // defined by a state transition already belonging to the asset let closed_seal = consignment .seals_closed_with(id, OwnedRightType::OpenEpoch, witness)? .into_iter() .next() .ok_or(Error::Inconsistency( rgb::ConsistencyError::NoSealsClosed( OwnedRightType::OpenEpoch.into(), id, ), ))?; let epoch = Epoch::with( self.id, no, closed_seal, transition, operations, witness, )?; self.epochs.insert(no as usize, epoch); Ok(()) } } impl TryFrom<Genesis> for Asset { type Error = Error; fn try_from(genesis: Genesis) -> Result<Self, Self::Error> { if genesis.schema_id()!= schema::schema().schema_id() { Err(Error::WrongSchemaId)?; } let genesis_meta = genesis.metadata(); let supply = *genesis_meta .u64(FieldType::IssuedSupply) .first() .ok_or(Error::UnsatisfiedSchemaRequirement)?; let mut issue_limit = 0; // Check if issue limit can be known for assignment in genesis.owned_rights_by_type(OwnedRightType::Inflation.into()) { for state in assignment.to_data_assignment_vec() { match state { Assignment::Revealed { assigned_state,.. } | Assignment::ConfidentialSeal { assigned_state,.. } => { if issue_limit < core::u64::MAX { issue_limit += assigned_state .u64() .ok_or(Error::UnsatisfiedSchemaRequirement)? }; } _ => issue_limit = core::u64::MAX, } } } let epoch_opening_seal = genesis .revealed_seals_by_type(OwnedRightType::OpenEpoch.into()) .map_err(|_| Error::EpochSealConfidential(genesis.node_id()))? .first() .copied() .map(|seal| seal.try_into()) .transpose()?; let issue = Issue::try_from(&genesis)?; let node_id = NodeId::from_inner(genesis.contract_id().into_inner()); let mut known_allocations = Vec::<Allocation>::new(); for assignment in genesis.owned_rights_by_type(OwnedRightType::Assets.into()) { assignment .to_value_assignment_vec() .into_iter() .enumerate() .for_each(|(index, assign)| { if let Assignment::Revealed { seal_definition: seal::Revealed::TxOutpoint(outpoint_reveal), assigned_state, } = assign { known_allocations.push(Allocation::with( node_id, index as u16, outpoint_reveal.into(), assigned_state, )) } }); } Ok(Asset { genesis: genesis.to_string(), id: genesis.contract_id(), chain: genesis.chain().clone(), genesis_nomination: Nomination::try_from(&genesis)?, supply: Supply::with(supply, None, issue_limit), date: DateTime::from_utc( NaiveDateTime::from_timestamp( *genesis_meta .i64(FieldType::Timestamp) .first() .ok_or(Error::UnsatisfiedSchemaRequirement)?, 0, ), Utc, ), known_renominations: empty!(), known_issues: vec![issue], // we assume that each genesis allocation with revealed amount // and known seal (they are always revealed together) belongs to us known_allocations, epochs: empty!(), epoch_opening_seal, }) } } impl TryFrom<Consignment> for Asset { type Error = Error; fn try_from(consignment: Consignment) -> Result<Self, Self::Error> { // 1. Parse genesis let mut asset: Asset = consignment.genesis.clone().try_into()?; // 2. Parse burn & replacement operations let mut epoch_operations: BTreeMap<NodeId, Vec<BurnReplace>> = empty!(); for transition in consignment.endpoint_transitions_by_types(&[ TransitionType::BurnAndReplace.into(), TransitionType::Burn.into(), ]) { let mut ops = consignment .chain_iter( transition.node_id(), OwnedRightType::BurnReplace.into(), ) .collect::<Vec<_>>(); ops.reverse(); if let Some((epoch, _)) = ops.pop() { let epoch_id = epoch.node_id(); let mut operations = vec![]; for (no, (transition, witness)) in ops.into_iter().enumerate() { let id = transition.node_id(); let closed_seal = consignment .seals_closed_with( id, OwnedRightType::BurnReplace, witness, )? .into_iter() .next() .ok_or(Error::Inconsistency( rgb::ConsistencyError::NoSealsClosed( OwnedRightType::BurnReplace.into(), id, ), ))?; operations.push(BurnReplace::with( asset.id, epoch_id, no, closed_seal, transition, witness, )?) } epoch_operations.insert(epoch_id, operations); } } // 3. Parse epochs let epoch_transition = consignment .endpoint_transitions_by_type(TransitionType::Epoch.into()) .into_iter() .next();
(&se
identifier_name
asset.rs
} contains confidential data EpochSealConfidential(NodeId), /// nurn & replace seal definition for node {0} contains confidential data BurnSealConfidential(NodeId), /// inflation assignment (seal or state) for node {0} contains confidential /// data InflationAssignmentConfidential(NodeId), /// Internal data inconsistency, as returned by the [`rgb::GraphAPI`] /// methods #[display(inner)] #[from] Inconsistency(rgb::ConsistencyError), /// not of all epochs referenced in burn or burn & replace operation /// history are known from the consignment NotAllEpochsExposed, } /// Detailed RGB20 asset information /// /// Structure presents complete set of RGB20 asset-related data which can be /// extracted from the genesis or a consignment. It is not the source of the /// truth, and the presence of the data in the structure does not imply their /// validity, since the structure constructor does not validates blockchain or /// LN-based transaction commitments or satisfaction of schema requirements. /// /// The main reason of the structure is: /// 1) to persist *cached* copy of the asset data without the requirement to /// parse all stash transition each time in order to extract allocation /// information; /// 2) to present data from asset genesis or consignment for UI in convenient /// form. /// 3) to orchestrate generation of new state transitions taking into account /// known asset information. /// /// (1) is important for wallets, (2) is for more generic software, like /// client-side-validated data explorers, developer & debugging tools etc and /// (3) for asset-management software. /// /// In both (2) and (3) case there is no need to persist the structure; genesis /// /consignment should be persisted instead and the structure must be /// reconstructed each time from that data upon the launch #[cfg_attr( feature = "serde", derive(Serialize, Deserialize), serde(crate = "serde_crate", rename_all = "camelCase") )] #[derive( Clone, Getters, PartialEq, Debug, Display, StrictEncode, StrictDecode, )] #[display("{genesis_nomination} ({id})")] pub struct Asset { /// Bech32-representation of the asset genesis genesis: String, /// Asset ID, which is equal to Contract ID and genesis ID /// /// It can be used as a unique primary kep id: ContractId, /// Chain with which the asset is issued #[cfg_attr(feature = "serde", serde(with = "As::<DisplayFromStr>"))] chain: Chain, /// Asset creation data date: DateTime<Utc>, /// Names assigned to the asset at the issue time /// /// Nomination is a set of asset metadata assigned by the issuer, which /// define core asset properties: ticker, name, decimal precision, contract /// text. #[cfg_attr(feature = "serde", serde(flatten))] genesis_nomination: Nomination, /// List of all known renominations. /// /// This list does not include genesis nomination, which can be accessed /// via [`Asset::genesis_nomination`]. The last item in the list contains /// [`Asset::last_nomination`] data as a part of its renomination operation /// details. known_renominations: Vec<Renomination>, /// All issues known from the available data (stash and/or provided /// consignments) /// /// Primary issue is always the first one; the rest are provided in /// arbitrary order known_issues: Vec<Issue>, /// Single-use-seal controlling the beginning of the first epoch epoch_opening_seal: Option<OutPoint>, /// Burn & replacement epochs, organized according to the witness txid. /// /// Witness transaction must be mined for the epoch to be real. /// One of the inputs of this transaction MUST spend UTXO defined as a /// seal closed by this epoch ([`Epoch::closes`]) epochs: Vec<Epoch>, /// Detailed information about the asset supply (aggregated from the issue /// and burning information kept inside the epochs data) #[cfg_attr(feature = "serde", serde(flatten))] supply: Supply, /// Specifies outpoints controlling certain amounts of assets. /// /// NB: Information here does not imply that the outputs are owned by the /// current user or the owning transactions are mined/exist; this must be /// determined by the wallet and depends on specific medium (blockchain, /// LN) known_allocations: Vec<Allocation>, } impl Asset { /// Current asset ticker /// /// Current value determined by the last known renomination operation – /// or, by the genesis nomination, if no renomination are known /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn ticker(&self) -> &str { &self.active_nomination().ticker() } /// Current asset name /// /// Current value determined by the last known renomination operation – /// or, by the genesis nomination, if no renomination are known /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn name(&self) -> &str { &self.active_nomination().ticker() } /// Current version of the asset contract, represented in Ricardian form /// /// Current value determined by the last known renomination operation – /// or, by the genesis nomination, if no renomination are known /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn ricardian_contract(&self) -> &str { &self.active_nomination().ticker() } /// Current decimal precision of the asset value /// /// Current value determined by the last known renomination operation – /// or, by the genesis nomination, if no renomination are known /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn decimal_precision(&self) -> u8 { *self.active_nomination().decimal_precision() } /// Returns information (in atomic value units) about specific measure of /// the asset supply, if known, or `None` otherwise pub fn precise_supply( &self, measure: SupplyMeasure, ) -> Option<AtomicValue> { Some(match measure { SupplyMeasure::KnownCirculating => *self.supply.known_circulating(), SupplyMeasure::TotalCirculating => { match self.supply.total_circulating() { None => return None, Some(supply) => supply, } } SupplyMeasure::IssueLimit => *self.supply.issue_limit(), }) } /// Returns information in form of a float number about specific measure of /// the asset supply, if known, or [`f64::NAN`] otherwise pub fn fractional_supply( &self, measure: SupplyMeasure, ) -> FractionalAmount { let value = match self.precise_supply(measure) { None => return FractionalAmount::NAN, Some(supply) => supply, }; PreciseAmount::transmutate_into(value, self.decimal_precision()) } /// Nomination resulting from the last known renomination /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn last_nomination(&self) -> Option<&Nomination> { self.known_renominations.last().map(|o| o.nomination()) } /// Active nomination data. /// /// NB: the returned result may not match the current valid nomination, /// since if there were further not yet known nominations the value /// returned by this function will not match the valid data #[inline] pub fn active_nomination(&self) -> &Nomination {
/ Returns sum of all known allocations, in atomic value units #[inline] pub fn known_value(&self) -> AtomicValue { self.known_allocations.iter().map(Allocation::value).sum() } /// Returns sum of known allocation after applying `filter` function. Useful /// for filtering UTXOs owned by the current wallet. The returned value is /// in atomic units (see [`AtomicValue`] pub fn known_filtered_value<F>(&self, filter: F) -> AtomicValue where F: Fn(&Allocation) -> bool, { self.known_allocations .iter() .filter(|allocation| filter(*allocation)) .map(Allocation::value) .sum() } /// Returns sum of all known allocations, as a floating point value (see /// [`FractionalAmount`]) pub fn known_amount(&self) -> FractionalAmount { self.known_allocations .iter() .map(Allocation::value) .map(|atomic| { PreciseAmount::transmutate_into( atomic, self.decimal_precision(), ) }) .sum() } /// Returns sum of known allocation after applying `filter` function. Useful /// for filtering UTXOs owned by the current wallet. The returned amount is /// a floating point number (see [`FractionalAmount`]) pub fn known_filtered_amount<F>(&self, filter: F) -> FractionalAmount where F: Fn(&Allocation) -> bool, { self.known_allocations .iter() .filter(|allocation| filter(*allocation)) .map(Allocation::value) .map(|atomic| { PreciseAmount::transmutate_into( atomic, self.decimal_precision(), ) }) .sum() } /// Returns outpoints which when spent may indicate inflation happening /// up to specific amount. /// /// NB: Not of all inflation controlling points may be known pub fn known_inflation( &self, ) -> BTreeMap<OutPoint, (AtomicValue, Vec<u16>)> { let mut inflation_list = BTreeMap::new(); for issue in self.known_issues() { for (seal, data) in issue.inflation_assignments() { inflation_list.insert(*seal, data.clone()); } } inflation_list } #[inline] /// Lists all known allocations for the given bitcoin transaction /// [`OutPoint`] pub fn outpoint_allocations(&self, outpoint: OutPoint) -> Vec<Allocation> { self.known_allocations .iter() .filter(|a| *a.outpoint() == outpoint) .copied() .collect() } /// Adds new allocation to the list of known allocations pub fn add_allocation( &mut self, outpoint: OutPoint, node_id: NodeId, index: u16, value: value::Revealed, ) -> bool { let new_allocation = Allocation::with(node_id, index, outpoint, value); if!self.known_allocations.contains(&new_allocation) { self.known_allocations.push(new_allocation); true } else { false } } /// Adds issue to the list of known issues. This is an internal function /// which should not be used directly; instead construct the asset structure /// from the [`Consignment`] using [`Asset::try_from`] method. fn add_issue( &mut self, consignment: &Consignment, transition: &Transition, witness: Txid, ) -> Result<(), Error> { let closed_seals = consignment.seals_closed_with( transition.node_id(), OwnedRightType::Inflation, witness, )?; let issue = Issue::with(self.id, closed_seals, transition, witness)?; self.known_issues.push(issue); Ok(()) } /// Adds an epoch to the list of known epochs. This is an internal function /// which should not be used directly; instead construct the asset structure /// from the [`Consignment`] using [`Asset::try_from`] method. fn add_epoch( &mut self, consignment: &Consignment, transition: &Transition, no: usize, operations: Vec<BurnReplace>, witness: Txid, ) -> Result<(), Error> { let id = transition.node_id(); // 1. It must correctly extend known state, i.e. close UTXO for a seal // defined by a state transition already belonging to the asset let closed_seal = consignment .seals_closed_with(id, OwnedRightType::OpenEpoch, witness)? .into_iter() .next() .ok_or(Error::Inconsistency( rgb::ConsistencyError::NoSealsClosed( OwnedRightType::OpenEpoch.into(), id, ), ))?; let epoch = Epoch::with( self.id, no, closed_seal, transition, operations, witness, )?; self.epochs.insert(no as usize, epoch); Ok(()) } } impl TryFrom<Genesis> for Asset { type Error = Error; fn try_from(genesis: Genesis) -> Result<Self, Self::Error> { if genesis.schema_id()!= schema::schema().schema_id() { Err(Error::WrongSchemaId)?; } let genesis_meta = genesis.metadata(); let supply = *genesis_meta .u64(FieldType::IssuedSupply) .first() .ok_or(Error::UnsatisfiedSchemaRequirement)?; let mut issue_limit = 0; // Check if issue limit can be known for assignment in genesis.owned_rights_by_type(OwnedRightType::Inflation.into()) { for state in assignment.to_data_assignment_vec() { match state { Assignment::Revealed { assigned_state,.. } | Assignment::ConfidentialSeal { assigned_state,.. } => { if issue_limit < core::u64::MAX { issue_limit += assigned_state .u64() .ok_or(Error::UnsatisfiedSchemaRequirement)? }; } _ => issue_limit = core::u64::MAX, } } } let epoch_opening_seal = genesis .revealed_seals_by_type(OwnedRightType::OpenEpoch.into()) .map_err(|_| Error::EpochSealConfidential(genesis.node_id()))? .first() .copied() .map(|seal| seal.try_into()) .transpose()?; let issue = Issue::try_from(&genesis)?; let node_id = NodeId::from_inner(genesis.contract_id().into_inner()); let mut known_allocations = Vec::<Allocation>::new(); for assignment in genesis.owned_rights_by_type(OwnedRightType::Assets.into()) { assignment .to_value_assignment_vec() .into_iter() .enumerate() .for_each(|(index, assign)| { if let Assignment::Revealed { seal_definition: seal::Revealed::TxOutpoint(outpoint_reveal), assigned_state, } = assign { known_allocations.push(Allocation::with( node_id, index as u16, outpoint_reveal.into(), assigned_state, )) } }); } Ok(Asset { genesis: genesis.to_string(), id: genesis.contract_id(), chain: genesis.chain().clone(), genesis_nomination: Nomination::try_from(&genesis)?, supply: Supply::with(supply, None, issue_limit), date: DateTime::from_utc( NaiveDateTime::from_timestamp( *genesis_meta .i64(FieldType::Timestamp) .first() .ok_or(Error::UnsatisfiedSchemaRequirement)?, 0, ), Utc, ), known_renominations: empty!(), known_issues: vec![issue], // we assume that each genesis allocation with revealed amount // and known seal (they are always revealed together) belongs to us known_allocations, epochs: empty!(), epoch_opening_seal, }) } } impl TryFrom<Consignment> for Asset { type Error = Error; fn try_from(consignment: Consignment) -> Result<Self, Self::Error> { // 1. Parse genesis let mut asset: Asset = consignment.genesis.clone().try_into()?; // 2. Parse burn & replacement operations let mut epoch_operations: BTreeMap<NodeId, Vec<BurnReplace>> = empty!(); for transition in consignment.endpoint_transitions_by_types(&[ TransitionType::BurnAndReplace.into(), TransitionType::Burn.into(), ]) { let mut ops = consignment .chain_iter( transition.node_id(), OwnedRightType::BurnReplace.into(), ) .collect::<Vec<_>>(); ops.reverse(); if let Some((epoch, _)) = ops.pop() { let epoch_id = epoch.node_id(); let mut operations = vec![]; for (no, (transition, witness)) in ops.into_iter().enumerate() { let id = transition.node_id(); let closed_seal = consignment .seals_closed_with( id, OwnedRightType::BurnReplace, witness, )? .into_iter() .next() .ok_or(Error::Inconsistency( rgb::ConsistencyError::NoSealsClosed( OwnedRightType::BurnReplace.into(), id, ), ))?; operations.push(BurnReplace::with( asset.id, epoch_id, no, closed_seal, transition, witness, )?) } epoch_operations.insert(epoch_id, operations); } } // 3. Parse epochs let epoch_transition = consignment .endpoint_transitions_by_type(TransitionType::Epoch.into()) .into_iter() .next();
self.last_nomination().unwrap_or(&self.genesis_nomination) } //
identifier_body
csv_import_accts_txns.rs
// Copyright (c) 2017-2020, scoobybejesus // Redistributions must include the license: https://github.com/scoobybejesus/cryptools/blob/master/LEGAL.txt use std::error::Error; use std::process; use std::fs::File; use std::cell::{RefCell}; use std::collections::{HashMap}; use std::path::PathBuf; use chrono::NaiveDate; use decimal::d128; use crate::transaction::{Transaction, ActionRecord}; use crate::account::{Account, RawAccount}; use crate::decimal_utils::{round_d128_1e8}; pub fn import_from_csv( import_file_path: PathBuf, iso_date_style: bool, separator: &String, raw_acct_map: &mut HashMap<u16, RawAccount>, acct_map: &mut HashMap<u16, Account>, action_records: &mut HashMap<u32, ActionRecord>, transactions_map: &mut HashMap<u32, Transaction>, ) -> Result<(), Box<dyn Error>> { let file = match File::open(import_file_path) { Ok(x) => { // println!("\nCSV ledger file opened successfully.\n"); x }, Err(e) => { println!("Invalid import_file_path"); eprintln!("System error: {}", e); std::process::exit(1); } }; let mut rdr = csv::ReaderBuilder::new() .has_headers(true) .from_reader(file); import_accounts(&mut rdr, raw_acct_map, acct_map)?; import_transactions( &mut rdr, iso_date_style, &separator, action_records, transactions_map, )?; Ok(()) } fn import_accounts( rdr: &mut csv::Reader<File>, raw_acct_map: &mut HashMap<u16, RawAccount>, acct_map: &mut HashMap<u16, Account>, ) -> Result<(), Box<dyn Error>> { let header1 = rdr.headers()?.clone(); // account_num let mut header2: csv::StringRecord = csv::StringRecord::new(); // name let mut header3: csv::StringRecord = csv::StringRecord::new(); // ticker let header4: csv::StringRecord; // is_margin // Account Creation loop. With rdr.has_headers() set to true above, the first record here is the second row of the CSV for result in rdr.records() { // This initial iteration through records will break after the 4th row, after accounts have been created let record = result?; if header2.len() == 0 { header2 = record.clone(); continue // After header2 is set, continue to next record } else if header3.len() == 0 { header3 = record.clone(); continue // After header3 is set, continue to next record } else { header4 = record.clone(); // println!("Assigned last header, record: {:?}", record);
// A StringRecord doesn't accept the same range indexing needed below, so a Vec of Strings will be used let headerstrings: Vec<String> = header1.into_iter().map(|field| field.to_string()).collect(); let acct_num_warn = "Transactions will not import correctly if account numbers in the CSV import file aren't ordered chronologically (i.e., beginning in column 4 - the 1st account column - the value should be 1. The next column's value should be 2, then 3, etc, until the final account)."; // Header row variables have been set. It's now time to set up the accounts. println!("\nCreating accounts..."); let length = &headerstrings.len(); for (idx, field) in headerstrings[3..*length].iter().enumerate() { // Parse account numbers. let account_num = field.trim().parse::<u16>().expect(&format!("Header row account number should parse into u16: {}", field)); // For now, their columns aren't remembered. Instead, they must have a particular index. 0th idx is the 1st account, and so on. if account_num!= ((idx + 1) as u16) { println!("FATAL: CSV Import: {}", acct_num_warn); std::process::exit(1); } let ind = idx+3; // Add three because the idx skips the first three 'key' columns let name:String = header2[ind].trim().to_string(); let ticker:String = header3[ind].trim().to_string(); // no.to_uppercase() b/c margin... let margin_string = &header4.clone()[ind]; let is_margin:bool = match margin_string.to_lowercase().trim() { "no" | "non" | "false" => false, "yes" | "margin" | "true" => true, _ => { println!("\n FATAL: CSV Import: Couldn't parse margin value for account {} {} \n",account_num, name); process::exit(1) } }; let just_account: RawAccount = RawAccount { account_num, name, ticker, is_margin, }; raw_acct_map.insert(account_num, just_account); let account: Account = Account { raw_key: account_num, list_of_lots: RefCell::new([].to_vec()) }; acct_map.insert(account_num, account); } break // This `break` exits this scope so `accounts` can be accessed in `import_transactions`. The rdr stays put. } }; Ok(()) } fn import_transactions( rdr: &mut csv::Reader<File>, iso_date_style: bool, separator: &String, action_records: &mut HashMap<u32, ActionRecord>, txns_map: &mut HashMap<u32, Transaction>, ) -> Result<(), Box<dyn Error>> { let mut this_tx_number = 0; let mut this_ar_number = 0; let mut changed_action_records = 0; let mut changed_txn_num = Vec::new(); println!("Creating transactions..."); for result in rdr.records() { // rdr's cursor is at row 5, which is the first transaction row let record = result?; this_tx_number += 1; // First, initialize metadata fields. let mut this_tx_date: &str = ""; let mut this_proceeds: &str; let mut this_memo: &str = ""; let mut proceeds_parsed = 0f32; // Next, create action_records. let mut action_records_map_keys_vec: Vec<u32> = Vec::with_capacity(2); let mut outgoing_ar: Option<ActionRecord> = None; let mut incoming_ar: Option<ActionRecord> = None; let mut outgoing_ar_num: Option<u32> = None; let mut incoming_ar_num: Option<u32> = None; for (idx, field) in record.iter().enumerate() { // Set metadata fields on first three fields. if idx == 0 { this_tx_date = field; } else if idx == 1 { let no_comma_string = field.replace(",", ""); proceeds_parsed = no_comma_string.parse::<f32>()?; } else if idx == 2 { this_memo = field; } // Check for empty strings. If not empty, it's a value for an action_record. else if field!= "" { this_ar_number += 1; let ind = idx; // starts at 3, which is the fourth field let acct_idx = ind - 2; // acct_num and acct_key would be idx + 1, so subtract 2 from ind to get 1 let account_key = acct_idx as u16; let amount_str = field.replace(",", ""); let mut amount = amount_str.parse::<d128>().unwrap(); // When parsing to a d128, it won't error; rather it'll return a NaN. It must now check for NaN, // and, if found, attempt to sanitize. These checks will convert accounting/comma format to the expected // format by removing parentheses from negatives and adding a minus sign in the front. It will also // attempt to remove empty spaces and currency symbols or designations (e.g. $ or USD). if amount.is_nan() { let b = sanitize_string_for_d128_parsing_basic(field).parse::<d128>().unwrap(); amount = b; }; if amount.is_nan() { let c = sanitize_string_for_d128_parsing_full(field).parse::<d128>().unwrap(); amount = c; }; if amount.is_nan() { println!("FATAL: Couldn't convert amount to d128 for transaction:\n{:#?}", record); std::process::exit(1); } let amount_rounded = round_d128_1e8(&amount); if amount!= amount_rounded { changed_action_records += 1; changed_txn_num.push(this_tx_number); } let action_record = ActionRecord { account_key, amount: amount_rounded, tx_key: this_tx_number, self_ar_key: this_ar_number, movements: RefCell::new([].to_vec()), }; if amount > d128!(0.0) { incoming_ar = Some(action_record); incoming_ar_num = Some(this_ar_number); action_records_map_keys_vec.push(incoming_ar_num.unwrap()) } else { outgoing_ar = Some(action_record); outgoing_ar_num = Some(this_ar_number); action_records_map_keys_vec.insert(0, outgoing_ar_num.unwrap()) }; } } // Note: the rust Trait implementation of FromStr for f32 is capable of parsing: // '3.14' // '-3.14' // '2.5E10', or equivalently, '2.5e10' // '2.5E-10' // '5.' // '.5', or, equivalently, '0.5' // 'inf', '-inf', 'NaN' // Notable observations from the list: // (a) scientific notation is accepted // (b) accounting format (numbers in parens representing negative numbers) is not explicitly accepted // Additionally notable: // (a) the decimal separator must be a period // (b) there can be no commas // (c) there can be no currency info ($120 or 120USD, etc. will fail to parse) // In summary, it appears to only allow: (i) numeric chars, (ii) a period, and/or (iii) a minus sign // // The Decimal::d128 implementation of FromStr calls into a C library, and that lib hasn't // been reviewed (by me), but it is thought/hoped to follow similar parsing conventions, // though there's no guarantee. Nevertheless, the above notes *appear* to hold true for d128. fn sanitize_string_for_d128_parsing_basic(field: &str) -> String { // First, remove commas. let no_comma_string = field.replace(",", ""); let almost_done = no_comma_string.replace(" ", ""); // Next, if ASCII (better be), check for accounting formatting if almost_done.is_ascii() { if almost_done.as_bytes()[0] == "(".as_bytes()[0] { let half_fixed = almost_done.replace("(", "-"); let negative_with_minus = half_fixed.replace(")", ""); return negative_with_minus } } almost_done } fn sanitize_string_for_d128_parsing_full(field: &str) -> String { let mut near_done = "".to_string(); // First, remove commas. let no_comma_string = field.replace(",", ""); let almost_done = no_comma_string.replace(" ", ""); // Next, if ASCII (better be), check for accounting formating if almost_done.is_ascii() { if almost_done.as_bytes()[0] == "(".as_bytes()[0] { let half_fixed = almost_done.replace("(", "-"); let negative_with_minus = half_fixed.replace(")", ""); near_done = negative_with_minus; } else { near_done = almost_done; } } else { near_done = almost_done; } // Strip non-numeric and non-period characters let all_done: String = near_done.chars() .filter(|x| x.is_numeric() | (x == &(".".as_bytes()[0] as char)) | (x == &("-".as_bytes()[0] as char))) .collect(); all_done } if let Some(incoming_ar) = incoming_ar { let x = incoming_ar_num.unwrap(); action_records.insert(x, incoming_ar); } if let Some(outgoing_ar) = outgoing_ar { let y = outgoing_ar_num.unwrap(); action_records.insert(y, outgoing_ar); } let format_yy: String; let format_yyyy: String; if iso_date_style { format_yyyy = "%Y".to_owned() + separator + "%m" + separator + "%d"; format_yy = "%y".to_owned() + separator + "%m" + separator + "%d"; } else { format_yyyy = "%m".to_owned() + separator + "%d" + separator + "%Y"; format_yy = "%m".to_owned() + separator + "%d" + separator + "%y"; } let tx_date = NaiveDate::parse_from_str(this_tx_date, &format_yy) .unwrap_or_else(|_| NaiveDate::parse_from_str(this_tx_date, &format_yyyy) .expect(" FATAL: Transaction date parsing failed. You must tell the program the format of the date in your CSV Input File. The date separator \ is expected to be a hyphen. The dating format is expected to be \"American\" (%m-%d-%y), not ISO 8601 (%y-%m-%d). You may set different \ date format options via command line flag, environment variable or.env file. Perhaps first run with `--help` or see `.env.example.`\n") ); let transaction = Transaction { tx_number: this_tx_number, date_as_string: this_tx_date.to_string(), date: tx_date, user_memo: this_memo.to_string(), proceeds: proceeds_parsed, action_record_idx_vec: action_records_map_keys_vec, }; txns_map.insert(this_tx_number, transaction); }; if changed_action_records > 0 { println!(" Changed actionrecord amounts due to rounding precision: {}. Changed txn numbers: {:?}.", changed_action_records, changed_txn_num); } Ok(()) }
random_line_split
csv_import_accts_txns.rs
// Copyright (c) 2017-2020, scoobybejesus // Redistributions must include the license: https://github.com/scoobybejesus/cryptools/blob/master/LEGAL.txt use std::error::Error; use std::process; use std::fs::File; use std::cell::{RefCell}; use std::collections::{HashMap}; use std::path::PathBuf; use chrono::NaiveDate; use decimal::d128; use crate::transaction::{Transaction, ActionRecord}; use crate::account::{Account, RawAccount}; use crate::decimal_utils::{round_d128_1e8}; pub fn import_from_csv( import_file_path: PathBuf, iso_date_style: bool, separator: &String, raw_acct_map: &mut HashMap<u16, RawAccount>, acct_map: &mut HashMap<u16, Account>, action_records: &mut HashMap<u32, ActionRecord>, transactions_map: &mut HashMap<u32, Transaction>, ) -> Result<(), Box<dyn Error>> { let file = match File::open(import_file_path) { Ok(x) => { // println!("\nCSV ledger file opened successfully.\n"); x }, Err(e) => { println!("Invalid import_file_path"); eprintln!("System error: {}", e); std::process::exit(1); } }; let mut rdr = csv::ReaderBuilder::new() .has_headers(true) .from_reader(file); import_accounts(&mut rdr, raw_acct_map, acct_map)?; import_transactions( &mut rdr, iso_date_style, &separator, action_records, transactions_map, )?; Ok(()) } fn import_accounts( rdr: &mut csv::Reader<File>, raw_acct_map: &mut HashMap<u16, RawAccount>, acct_map: &mut HashMap<u16, Account>, ) -> Result<(), Box<dyn Error>> { let header1 = rdr.headers()?.clone(); // account_num let mut header2: csv::StringRecord = csv::StringRecord::new(); // name let mut header3: csv::StringRecord = csv::StringRecord::new(); // ticker let header4: csv::StringRecord; // is_margin // Account Creation loop. With rdr.has_headers() set to true above, the first record here is the second row of the CSV for result in rdr.records() { // This initial iteration through records will break after the 4th row, after accounts have been created let record = result?; if header2.len() == 0 { header2 = record.clone(); continue // After header2 is set, continue to next record } else if header3.len() == 0 { header3 = record.clone(); continue // After header3 is set, continue to next record } else { header4 = record.clone(); // println!("Assigned last header, record: {:?}", record); // A StringRecord doesn't accept the same range indexing needed below, so a Vec of Strings will be used let headerstrings: Vec<String> = header1.into_iter().map(|field| field.to_string()).collect(); let acct_num_warn = "Transactions will not import correctly if account numbers in the CSV import file aren't ordered chronologically (i.e., beginning in column 4 - the 1st account column - the value should be 1. The next column's value should be 2, then 3, etc, until the final account)."; // Header row variables have been set. It's now time to set up the accounts. println!("\nCreating accounts..."); let length = &headerstrings.len(); for (idx, field) in headerstrings[3..*length].iter().enumerate() { // Parse account numbers. let account_num = field.trim().parse::<u16>().expect(&format!("Header row account number should parse into u16: {}", field)); // For now, their columns aren't remembered. Instead, they must have a particular index. 0th idx is the 1st account, and so on. if account_num!= ((idx + 1) as u16) { println!("FATAL: CSV Import: {}", acct_num_warn); std::process::exit(1); } let ind = idx+3; // Add three because the idx skips the first three 'key' columns let name:String = header2[ind].trim().to_string(); let ticker:String = header3[ind].trim().to_string(); // no.to_uppercase() b/c margin... let margin_string = &header4.clone()[ind]; let is_margin:bool = match margin_string.to_lowercase().trim() { "no" | "non" | "false" => false, "yes" | "margin" | "true" => true, _ => { println!("\n FATAL: CSV Import: Couldn't parse margin value for account {} {} \n",account_num, name); process::exit(1) } }; let just_account: RawAccount = RawAccount { account_num, name, ticker, is_margin, }; raw_acct_map.insert(account_num, just_account); let account: Account = Account { raw_key: account_num, list_of_lots: RefCell::new([].to_vec()) }; acct_map.insert(account_num, account); } break // This `break` exits this scope so `accounts` can be accessed in `import_transactions`. The rdr stays put. } }; Ok(()) } fn import_transactions( rdr: &mut csv::Reader<File>, iso_date_style: bool, separator: &String, action_records: &mut HashMap<u32, ActionRecord>, txns_map: &mut HashMap<u32, Transaction>, ) -> Result<(), Box<dyn Error>> { let mut this_tx_number = 0; let mut this_ar_number = 0; let mut changed_action_records = 0; let mut changed_txn_num = Vec::new(); println!("Creating transactions..."); for result in rdr.records() { // rdr's cursor is at row 5, which is the first transaction row let record = result?; this_tx_number += 1; // First, initialize metadata fields. let mut this_tx_date: &str = ""; let mut this_proceeds: &str; let mut this_memo: &str = ""; let mut proceeds_parsed = 0f32; // Next, create action_records. let mut action_records_map_keys_vec: Vec<u32> = Vec::with_capacity(2); let mut outgoing_ar: Option<ActionRecord> = None; let mut incoming_ar: Option<ActionRecord> = None; let mut outgoing_ar_num: Option<u32> = None; let mut incoming_ar_num: Option<u32> = None; for (idx, field) in record.iter().enumerate() { // Set metadata fields on first three fields. if idx == 0 { this_tx_date = field; } else if idx == 1 { let no_comma_string = field.replace(",", ""); proceeds_parsed = no_comma_string.parse::<f32>()?; } else if idx == 2 { this_memo = field; } // Check for empty strings. If not empty, it's a value for an action_record. else if field!= "" { this_ar_number += 1; let ind = idx; // starts at 3, which is the fourth field let acct_idx = ind - 2; // acct_num and acct_key would be idx + 1, so subtract 2 from ind to get 1 let account_key = acct_idx as u16; let amount_str = field.replace(",", ""); let mut amount = amount_str.parse::<d128>().unwrap(); // When parsing to a d128, it won't error; rather it'll return a NaN. It must now check for NaN, // and, if found, attempt to sanitize. These checks will convert accounting/comma format to the expected // format by removing parentheses from negatives and adding a minus sign in the front. It will also // attempt to remove empty spaces and currency symbols or designations (e.g. $ or USD). if amount.is_nan() { let b = sanitize_string_for_d128_parsing_basic(field).parse::<d128>().unwrap(); amount = b; }; if amount.is_nan() { let c = sanitize_string_for_d128_parsing_full(field).parse::<d128>().unwrap(); amount = c; }; if amount.is_nan() { println!("FATAL: Couldn't convert amount to d128 for transaction:\n{:#?}", record); std::process::exit(1); } let amount_rounded = round_d128_1e8(&amount); if amount!= amount_rounded { changed_action_records += 1; changed_txn_num.push(this_tx_number); } let action_record = ActionRecord { account_key, amount: amount_rounded, tx_key: this_tx_number, self_ar_key: this_ar_number, movements: RefCell::new([].to_vec()), }; if amount > d128!(0.0) { incoming_ar = Some(action_record); incoming_ar_num = Some(this_ar_number); action_records_map_keys_vec.push(incoming_ar_num.unwrap()) } else { outgoing_ar = Some(action_record); outgoing_ar_num = Some(this_ar_number); action_records_map_keys_vec.insert(0, outgoing_ar_num.unwrap()) }; } } // Note: the rust Trait implementation of FromStr for f32 is capable of parsing: // '3.14' // '-3.14' // '2.5E10', or equivalently, '2.5e10' // '2.5E-10' // '5.' // '.5', or, equivalently, '0.5' // 'inf', '-inf', 'NaN' // Notable observations from the list: // (a) scientific notation is accepted // (b) accounting format (numbers in parens representing negative numbers) is not explicitly accepted // Additionally notable: // (a) the decimal separator must be a period // (b) there can be no commas // (c) there can be no currency info ($120 or 120USD, etc. will fail to parse) // In summary, it appears to only allow: (i) numeric chars, (ii) a period, and/or (iii) a minus sign // // The Decimal::d128 implementation of FromStr calls into a C library, and that lib hasn't // been reviewed (by me), but it is thought/hoped to follow similar parsing conventions, // though there's no guarantee. Nevertheless, the above notes *appear* to hold true for d128. fn
(field: &str) -> String { // First, remove commas. let no_comma_string = field.replace(",", ""); let almost_done = no_comma_string.replace(" ", ""); // Next, if ASCII (better be), check for accounting formatting if almost_done.is_ascii() { if almost_done.as_bytes()[0] == "(".as_bytes()[0] { let half_fixed = almost_done.replace("(", "-"); let negative_with_minus = half_fixed.replace(")", ""); return negative_with_minus } } almost_done } fn sanitize_string_for_d128_parsing_full(field: &str) -> String { let mut near_done = "".to_string(); // First, remove commas. let no_comma_string = field.replace(",", ""); let almost_done = no_comma_string.replace(" ", ""); // Next, if ASCII (better be), check for accounting formating if almost_done.is_ascii() { if almost_done.as_bytes()[0] == "(".as_bytes()[0] { let half_fixed = almost_done.replace("(", "-"); let negative_with_minus = half_fixed.replace(")", ""); near_done = negative_with_minus; } else { near_done = almost_done; } } else { near_done = almost_done; } // Strip non-numeric and non-period characters let all_done: String = near_done.chars() .filter(|x| x.is_numeric() | (x == &(".".as_bytes()[0] as char)) | (x == &("-".as_bytes()[0] as char))) .collect(); all_done } if let Some(incoming_ar) = incoming_ar { let x = incoming_ar_num.unwrap(); action_records.insert(x, incoming_ar); } if let Some(outgoing_ar) = outgoing_ar { let y = outgoing_ar_num.unwrap(); action_records.insert(y, outgoing_ar); } let format_yy: String; let format_yyyy: String; if iso_date_style { format_yyyy = "%Y".to_owned() + separator + "%m" + separator + "%d"; format_yy = "%y".to_owned() + separator + "%m" + separator + "%d"; } else { format_yyyy = "%m".to_owned() + separator + "%d" + separator + "%Y"; format_yy = "%m".to_owned() + separator + "%d" + separator + "%y"; } let tx_date = NaiveDate::parse_from_str(this_tx_date, &format_yy) .unwrap_or_else(|_| NaiveDate::parse_from_str(this_tx_date, &format_yyyy) .expect(" FATAL: Transaction date parsing failed. You must tell the program the format of the date in your CSV Input File. The date separator \ is expected to be a hyphen. The dating format is expected to be \"American\" (%m-%d-%y), not ISO 8601 (%y-%m-%d). You may set different \ date format options via command line flag, environment variable or.env file. Perhaps first run with `--help` or see `.env.example.`\n") ); let transaction = Transaction { tx_number: this_tx_number, date_as_string: this_tx_date.to_string(), date: tx_date, user_memo: this_memo.to_string(), proceeds: proceeds_parsed, action_record_idx_vec: action_records_map_keys_vec, }; txns_map.insert(this_tx_number, transaction); }; if changed_action_records > 0 { println!(" Changed actionrecord amounts due to rounding precision: {}. Changed txn numbers: {:?}.", changed_action_records, changed_txn_num); } Ok(()) }
sanitize_string_for_d128_parsing_basic
identifier_name
csv_import_accts_txns.rs
// Copyright (c) 2017-2020, scoobybejesus // Redistributions must include the license: https://github.com/scoobybejesus/cryptools/blob/master/LEGAL.txt use std::error::Error; use std::process; use std::fs::File; use std::cell::{RefCell}; use std::collections::{HashMap}; use std::path::PathBuf; use chrono::NaiveDate; use decimal::d128; use crate::transaction::{Transaction, ActionRecord}; use crate::account::{Account, RawAccount}; use crate::decimal_utils::{round_d128_1e8}; pub fn import_from_csv( import_file_path: PathBuf, iso_date_style: bool, separator: &String, raw_acct_map: &mut HashMap<u16, RawAccount>, acct_map: &mut HashMap<u16, Account>, action_records: &mut HashMap<u32, ActionRecord>, transactions_map: &mut HashMap<u32, Transaction>, ) -> Result<(), Box<dyn Error>> { let file = match File::open(import_file_path) { Ok(x) => { // println!("\nCSV ledger file opened successfully.\n"); x }, Err(e) => { println!("Invalid import_file_path"); eprintln!("System error: {}", e); std::process::exit(1); } }; let mut rdr = csv::ReaderBuilder::new() .has_headers(true) .from_reader(file); import_accounts(&mut rdr, raw_acct_map, acct_map)?; import_transactions( &mut rdr, iso_date_style, &separator, action_records, transactions_map, )?; Ok(()) } fn import_accounts( rdr: &mut csv::Reader<File>, raw_acct_map: &mut HashMap<u16, RawAccount>, acct_map: &mut HashMap<u16, Account>, ) -> Result<(), Box<dyn Error>> { let header1 = rdr.headers()?.clone(); // account_num let mut header2: csv::StringRecord = csv::StringRecord::new(); // name let mut header3: csv::StringRecord = csv::StringRecord::new(); // ticker let header4: csv::StringRecord; // is_margin // Account Creation loop. With rdr.has_headers() set to true above, the first record here is the second row of the CSV for result in rdr.records() { // This initial iteration through records will break after the 4th row, after accounts have been created let record = result?; if header2.len() == 0 { header2 = record.clone(); continue // After header2 is set, continue to next record } else if header3.len() == 0 { header3 = record.clone(); continue // After header3 is set, continue to next record } else { header4 = record.clone(); // println!("Assigned last header, record: {:?}", record); // A StringRecord doesn't accept the same range indexing needed below, so a Vec of Strings will be used let headerstrings: Vec<String> = header1.into_iter().map(|field| field.to_string()).collect(); let acct_num_warn = "Transactions will not import correctly if account numbers in the CSV import file aren't ordered chronologically (i.e., beginning in column 4 - the 1st account column - the value should be 1. The next column's value should be 2, then 3, etc, until the final account)."; // Header row variables have been set. It's now time to set up the accounts. println!("\nCreating accounts..."); let length = &headerstrings.len(); for (idx, field) in headerstrings[3..*length].iter().enumerate() { // Parse account numbers. let account_num = field.trim().parse::<u16>().expect(&format!("Header row account number should parse into u16: {}", field)); // For now, their columns aren't remembered. Instead, they must have a particular index. 0th idx is the 1st account, and so on. if account_num!= ((idx + 1) as u16) { println!("FATAL: CSV Import: {}", acct_num_warn); std::process::exit(1); } let ind = idx+3; // Add three because the idx skips the first three 'key' columns let name:String = header2[ind].trim().to_string(); let ticker:String = header3[ind].trim().to_string(); // no.to_uppercase() b/c margin... let margin_string = &header4.clone()[ind]; let is_margin:bool = match margin_string.to_lowercase().trim() { "no" | "non" | "false" => false, "yes" | "margin" | "true" => true, _ => { println!("\n FATAL: CSV Import: Couldn't parse margin value for account {} {} \n",account_num, name); process::exit(1) } }; let just_account: RawAccount = RawAccount { account_num, name, ticker, is_margin, }; raw_acct_map.insert(account_num, just_account); let account: Account = Account { raw_key: account_num, list_of_lots: RefCell::new([].to_vec()) }; acct_map.insert(account_num, account); } break // This `break` exits this scope so `accounts` can be accessed in `import_transactions`. The rdr stays put. } }; Ok(()) } fn import_transactions( rdr: &mut csv::Reader<File>, iso_date_style: bool, separator: &String, action_records: &mut HashMap<u32, ActionRecord>, txns_map: &mut HashMap<u32, Transaction>, ) -> Result<(), Box<dyn Error>> { let mut this_tx_number = 0; let mut this_ar_number = 0; let mut changed_action_records = 0; let mut changed_txn_num = Vec::new(); println!("Creating transactions..."); for result in rdr.records() { // rdr's cursor is at row 5, which is the first transaction row let record = result?; this_tx_number += 1; // First, initialize metadata fields. let mut this_tx_date: &str = ""; let mut this_proceeds: &str; let mut this_memo: &str = ""; let mut proceeds_parsed = 0f32; // Next, create action_records. let mut action_records_map_keys_vec: Vec<u32> = Vec::with_capacity(2); let mut outgoing_ar: Option<ActionRecord> = None; let mut incoming_ar: Option<ActionRecord> = None; let mut outgoing_ar_num: Option<u32> = None; let mut incoming_ar_num: Option<u32> = None; for (idx, field) in record.iter().enumerate() { // Set metadata fields on first three fields. if idx == 0 { this_tx_date = field; } else if idx == 1 { let no_comma_string = field.replace(",", ""); proceeds_parsed = no_comma_string.parse::<f32>()?; } else if idx == 2 { this_memo = field; } // Check for empty strings. If not empty, it's a value for an action_record. else if field!= "" { this_ar_number += 1; let ind = idx; // starts at 3, which is the fourth field let acct_idx = ind - 2; // acct_num and acct_key would be idx + 1, so subtract 2 from ind to get 1 let account_key = acct_idx as u16; let amount_str = field.replace(",", ""); let mut amount = amount_str.parse::<d128>().unwrap(); // When parsing to a d128, it won't error; rather it'll return a NaN. It must now check for NaN, // and, if found, attempt to sanitize. These checks will convert accounting/comma format to the expected // format by removing parentheses from negatives and adding a minus sign in the front. It will also // attempt to remove empty spaces and currency symbols or designations (e.g. $ or USD). if amount.is_nan() { let b = sanitize_string_for_d128_parsing_basic(field).parse::<d128>().unwrap(); amount = b; }; if amount.is_nan() { let c = sanitize_string_for_d128_parsing_full(field).parse::<d128>().unwrap(); amount = c; }; if amount.is_nan() { println!("FATAL: Couldn't convert amount to d128 for transaction:\n{:#?}", record); std::process::exit(1); } let amount_rounded = round_d128_1e8(&amount); if amount!= amount_rounded { changed_action_records += 1; changed_txn_num.push(this_tx_number); } let action_record = ActionRecord { account_key, amount: amount_rounded, tx_key: this_tx_number, self_ar_key: this_ar_number, movements: RefCell::new([].to_vec()), }; if amount > d128!(0.0) { incoming_ar = Some(action_record); incoming_ar_num = Some(this_ar_number); action_records_map_keys_vec.push(incoming_ar_num.unwrap()) } else { outgoing_ar = Some(action_record); outgoing_ar_num = Some(this_ar_number); action_records_map_keys_vec.insert(0, outgoing_ar_num.unwrap()) }; } } // Note: the rust Trait implementation of FromStr for f32 is capable of parsing: // '3.14' // '-3.14' // '2.5E10', or equivalently, '2.5e10' // '2.5E-10' // '5.' // '.5', or, equivalently, '0.5' // 'inf', '-inf', 'NaN' // Notable observations from the list: // (a) scientific notation is accepted // (b) accounting format (numbers in parens representing negative numbers) is not explicitly accepted // Additionally notable: // (a) the decimal separator must be a period // (b) there can be no commas // (c) there can be no currency info ($120 or 120USD, etc. will fail to parse) // In summary, it appears to only allow: (i) numeric chars, (ii) a period, and/or (iii) a minus sign // // The Decimal::d128 implementation of FromStr calls into a C library, and that lib hasn't // been reviewed (by me), but it is thought/hoped to follow similar parsing conventions, // though there's no guarantee. Nevertheless, the above notes *appear* to hold true for d128. fn sanitize_string_for_d128_parsing_basic(field: &str) -> String { // First, remove commas. let no_comma_string = field.replace(",", ""); let almost_done = no_comma_string.replace(" ", ""); // Next, if ASCII (better be), check for accounting formatting if almost_done.is_ascii() { if almost_done.as_bytes()[0] == "(".as_bytes()[0] { let half_fixed = almost_done.replace("(", "-"); let negative_with_minus = half_fixed.replace(")", ""); return negative_with_minus } } almost_done } fn sanitize_string_for_d128_parsing_full(field: &str) -> String
// Strip non-numeric and non-period characters let all_done: String = near_done.chars() .filter(|x| x.is_numeric() | (x == &(".".as_bytes()[0] as char)) | (x == &("-".as_bytes()[0] as char))) .collect(); all_done } if let Some(incoming_ar) = incoming_ar { let x = incoming_ar_num.unwrap(); action_records.insert(x, incoming_ar); } if let Some(outgoing_ar) = outgoing_ar { let y = outgoing_ar_num.unwrap(); action_records.insert(y, outgoing_ar); } let format_yy: String; let format_yyyy: String; if iso_date_style { format_yyyy = "%Y".to_owned() + separator + "%m" + separator + "%d"; format_yy = "%y".to_owned() + separator + "%m" + separator + "%d"; } else { format_yyyy = "%m".to_owned() + separator + "%d" + separator + "%Y"; format_yy = "%m".to_owned() + separator + "%d" + separator + "%y"; } let tx_date = NaiveDate::parse_from_str(this_tx_date, &format_yy) .unwrap_or_else(|_| NaiveDate::parse_from_str(this_tx_date, &format_yyyy) .expect(" FATAL: Transaction date parsing failed. You must tell the program the format of the date in your CSV Input File. The date separator \ is expected to be a hyphen. The dating format is expected to be \"American\" (%m-%d-%y), not ISO 8601 (%y-%m-%d). You may set different \ date format options via command line flag, environment variable or.env file. Perhaps first run with `--help` or see `.env.example.`\n") ); let transaction = Transaction { tx_number: this_tx_number, date_as_string: this_tx_date.to_string(), date: tx_date, user_memo: this_memo.to_string(), proceeds: proceeds_parsed, action_record_idx_vec: action_records_map_keys_vec, }; txns_map.insert(this_tx_number, transaction); }; if changed_action_records > 0 { println!(" Changed actionrecord amounts due to rounding precision: {}. Changed txn numbers: {:?}.", changed_action_records, changed_txn_num); } Ok(()) }
{ let mut near_done = "".to_string(); // First, remove commas. let no_comma_string = field.replace(",", ""); let almost_done = no_comma_string.replace(" ", ""); // Next, if ASCII (better be), check for accounting formating if almost_done.is_ascii() { if almost_done.as_bytes()[0] == "(".as_bytes()[0] { let half_fixed = almost_done.replace("(", "-"); let negative_with_minus = half_fixed.replace(")", ""); near_done = negative_with_minus; } else { near_done = almost_done; } } else { near_done = almost_done; }
identifier_body
csv_import_accts_txns.rs
// Copyright (c) 2017-2020, scoobybejesus // Redistributions must include the license: https://github.com/scoobybejesus/cryptools/blob/master/LEGAL.txt use std::error::Error; use std::process; use std::fs::File; use std::cell::{RefCell}; use std::collections::{HashMap}; use std::path::PathBuf; use chrono::NaiveDate; use decimal::d128; use crate::transaction::{Transaction, ActionRecord}; use crate::account::{Account, RawAccount}; use crate::decimal_utils::{round_d128_1e8}; pub fn import_from_csv( import_file_path: PathBuf, iso_date_style: bool, separator: &String, raw_acct_map: &mut HashMap<u16, RawAccount>, acct_map: &mut HashMap<u16, Account>, action_records: &mut HashMap<u32, ActionRecord>, transactions_map: &mut HashMap<u32, Transaction>, ) -> Result<(), Box<dyn Error>> { let file = match File::open(import_file_path) { Ok(x) => { // println!("\nCSV ledger file opened successfully.\n"); x }, Err(e) => { println!("Invalid import_file_path"); eprintln!("System error: {}", e); std::process::exit(1); } }; let mut rdr = csv::ReaderBuilder::new() .has_headers(true) .from_reader(file); import_accounts(&mut rdr, raw_acct_map, acct_map)?; import_transactions( &mut rdr, iso_date_style, &separator, action_records, transactions_map, )?; Ok(()) } fn import_accounts( rdr: &mut csv::Reader<File>, raw_acct_map: &mut HashMap<u16, RawAccount>, acct_map: &mut HashMap<u16, Account>, ) -> Result<(), Box<dyn Error>> { let header1 = rdr.headers()?.clone(); // account_num let mut header2: csv::StringRecord = csv::StringRecord::new(); // name let mut header3: csv::StringRecord = csv::StringRecord::new(); // ticker let header4: csv::StringRecord; // is_margin // Account Creation loop. With rdr.has_headers() set to true above, the first record here is the second row of the CSV for result in rdr.records() { // This initial iteration through records will break after the 4th row, after accounts have been created let record = result?; if header2.len() == 0 { header2 = record.clone(); continue // After header2 is set, continue to next record } else if header3.len() == 0 { header3 = record.clone(); continue // After header3 is set, continue to next record } else { header4 = record.clone(); // println!("Assigned last header, record: {:?}", record); // A StringRecord doesn't accept the same range indexing needed below, so a Vec of Strings will be used let headerstrings: Vec<String> = header1.into_iter().map(|field| field.to_string()).collect(); let acct_num_warn = "Transactions will not import correctly if account numbers in the CSV import file aren't ordered chronologically (i.e., beginning in column 4 - the 1st account column - the value should be 1. The next column's value should be 2, then 3, etc, until the final account)."; // Header row variables have been set. It's now time to set up the accounts. println!("\nCreating accounts..."); let length = &headerstrings.len(); for (idx, field) in headerstrings[3..*length].iter().enumerate() { // Parse account numbers. let account_num = field.trim().parse::<u16>().expect(&format!("Header row account number should parse into u16: {}", field)); // For now, their columns aren't remembered. Instead, they must have a particular index. 0th idx is the 1st account, and so on. if account_num!= ((idx + 1) as u16) { println!("FATAL: CSV Import: {}", acct_num_warn); std::process::exit(1); } let ind = idx+3; // Add three because the idx skips the first three 'key' columns let name:String = header2[ind].trim().to_string(); let ticker:String = header3[ind].trim().to_string(); // no.to_uppercase() b/c margin... let margin_string = &header4.clone()[ind]; let is_margin:bool = match margin_string.to_lowercase().trim() { "no" | "non" | "false" => false, "yes" | "margin" | "true" => true, _ => { println!("\n FATAL: CSV Import: Couldn't parse margin value for account {} {} \n",account_num, name); process::exit(1) } }; let just_account: RawAccount = RawAccount { account_num, name, ticker, is_margin, }; raw_acct_map.insert(account_num, just_account); let account: Account = Account { raw_key: account_num, list_of_lots: RefCell::new([].to_vec()) }; acct_map.insert(account_num, account); } break // This `break` exits this scope so `accounts` can be accessed in `import_transactions`. The rdr stays put. } }; Ok(()) } fn import_transactions( rdr: &mut csv::Reader<File>, iso_date_style: bool, separator: &String, action_records: &mut HashMap<u32, ActionRecord>, txns_map: &mut HashMap<u32, Transaction>, ) -> Result<(), Box<dyn Error>> { let mut this_tx_number = 0; let mut this_ar_number = 0; let mut changed_action_records = 0; let mut changed_txn_num = Vec::new(); println!("Creating transactions..."); for result in rdr.records() { // rdr's cursor is at row 5, which is the first transaction row let record = result?; this_tx_number += 1; // First, initialize metadata fields. let mut this_tx_date: &str = ""; let mut this_proceeds: &str; let mut this_memo: &str = ""; let mut proceeds_parsed = 0f32; // Next, create action_records. let mut action_records_map_keys_vec: Vec<u32> = Vec::with_capacity(2); let mut outgoing_ar: Option<ActionRecord> = None; let mut incoming_ar: Option<ActionRecord> = None; let mut outgoing_ar_num: Option<u32> = None; let mut incoming_ar_num: Option<u32> = None; for (idx, field) in record.iter().enumerate() { // Set metadata fields on first three fields. if idx == 0 { this_tx_date = field; } else if idx == 1 { let no_comma_string = field.replace(",", ""); proceeds_parsed = no_comma_string.parse::<f32>()?; } else if idx == 2 { this_memo = field; } // Check for empty strings. If not empty, it's a value for an action_record. else if field!= "" { this_ar_number += 1; let ind = idx; // starts at 3, which is the fourth field let acct_idx = ind - 2; // acct_num and acct_key would be idx + 1, so subtract 2 from ind to get 1 let account_key = acct_idx as u16; let amount_str = field.replace(",", ""); let mut amount = amount_str.parse::<d128>().unwrap(); // When parsing to a d128, it won't error; rather it'll return a NaN. It must now check for NaN, // and, if found, attempt to sanitize. These checks will convert accounting/comma format to the expected // format by removing parentheses from negatives and adding a minus sign in the front. It will also // attempt to remove empty spaces and currency symbols or designations (e.g. $ or USD). if amount.is_nan() { let b = sanitize_string_for_d128_parsing_basic(field).parse::<d128>().unwrap(); amount = b; }; if amount.is_nan()
; if amount.is_nan() { println!("FATAL: Couldn't convert amount to d128 for transaction:\n{:#?}", record); std::process::exit(1); } let amount_rounded = round_d128_1e8(&amount); if amount!= amount_rounded { changed_action_records += 1; changed_txn_num.push(this_tx_number); } let action_record = ActionRecord { account_key, amount: amount_rounded, tx_key: this_tx_number, self_ar_key: this_ar_number, movements: RefCell::new([].to_vec()), }; if amount > d128!(0.0) { incoming_ar = Some(action_record); incoming_ar_num = Some(this_ar_number); action_records_map_keys_vec.push(incoming_ar_num.unwrap()) } else { outgoing_ar = Some(action_record); outgoing_ar_num = Some(this_ar_number); action_records_map_keys_vec.insert(0, outgoing_ar_num.unwrap()) }; } } // Note: the rust Trait implementation of FromStr for f32 is capable of parsing: // '3.14' // '-3.14' // '2.5E10', or equivalently, '2.5e10' // '2.5E-10' // '5.' // '.5', or, equivalently, '0.5' // 'inf', '-inf', 'NaN' // Notable observations from the list: // (a) scientific notation is accepted // (b) accounting format (numbers in parens representing negative numbers) is not explicitly accepted // Additionally notable: // (a) the decimal separator must be a period // (b) there can be no commas // (c) there can be no currency info ($120 or 120USD, etc. will fail to parse) // In summary, it appears to only allow: (i) numeric chars, (ii) a period, and/or (iii) a minus sign // // The Decimal::d128 implementation of FromStr calls into a C library, and that lib hasn't // been reviewed (by me), but it is thought/hoped to follow similar parsing conventions, // though there's no guarantee. Nevertheless, the above notes *appear* to hold true for d128. fn sanitize_string_for_d128_parsing_basic(field: &str) -> String { // First, remove commas. let no_comma_string = field.replace(",", ""); let almost_done = no_comma_string.replace(" ", ""); // Next, if ASCII (better be), check for accounting formatting if almost_done.is_ascii() { if almost_done.as_bytes()[0] == "(".as_bytes()[0] { let half_fixed = almost_done.replace("(", "-"); let negative_with_minus = half_fixed.replace(")", ""); return negative_with_minus } } almost_done } fn sanitize_string_for_d128_parsing_full(field: &str) -> String { let mut near_done = "".to_string(); // First, remove commas. let no_comma_string = field.replace(",", ""); let almost_done = no_comma_string.replace(" ", ""); // Next, if ASCII (better be), check for accounting formating if almost_done.is_ascii() { if almost_done.as_bytes()[0] == "(".as_bytes()[0] { let half_fixed = almost_done.replace("(", "-"); let negative_with_minus = half_fixed.replace(")", ""); near_done = negative_with_minus; } else { near_done = almost_done; } } else { near_done = almost_done; } // Strip non-numeric and non-period characters let all_done: String = near_done.chars() .filter(|x| x.is_numeric() | (x == &(".".as_bytes()[0] as char)) | (x == &("-".as_bytes()[0] as char))) .collect(); all_done } if let Some(incoming_ar) = incoming_ar { let x = incoming_ar_num.unwrap(); action_records.insert(x, incoming_ar); } if let Some(outgoing_ar) = outgoing_ar { let y = outgoing_ar_num.unwrap(); action_records.insert(y, outgoing_ar); } let format_yy: String; let format_yyyy: String; if iso_date_style { format_yyyy = "%Y".to_owned() + separator + "%m" + separator + "%d"; format_yy = "%y".to_owned() + separator + "%m" + separator + "%d"; } else { format_yyyy = "%m".to_owned() + separator + "%d" + separator + "%Y"; format_yy = "%m".to_owned() + separator + "%d" + separator + "%y"; } let tx_date = NaiveDate::parse_from_str(this_tx_date, &format_yy) .unwrap_or_else(|_| NaiveDate::parse_from_str(this_tx_date, &format_yyyy) .expect(" FATAL: Transaction date parsing failed. You must tell the program the format of the date in your CSV Input File. The date separator \ is expected to be a hyphen. The dating format is expected to be \"American\" (%m-%d-%y), not ISO 8601 (%y-%m-%d). You may set different \ date format options via command line flag, environment variable or.env file. Perhaps first run with `--help` or see `.env.example.`\n") ); let transaction = Transaction { tx_number: this_tx_number, date_as_string: this_tx_date.to_string(), date: tx_date, user_memo: this_memo.to_string(), proceeds: proceeds_parsed, action_record_idx_vec: action_records_map_keys_vec, }; txns_map.insert(this_tx_number, transaction); }; if changed_action_records > 0 { println!(" Changed actionrecord amounts due to rounding precision: {}. Changed txn numbers: {:?}.", changed_action_records, changed_txn_num); } Ok(()) }
{ let c = sanitize_string_for_d128_parsing_full(field).parse::<d128>().unwrap(); amount = c; }
conditional_block
spline.rs
//! Spline curves and operations. #[cfg(feature = "serialization")] use serde_derive::{Deserialize, Serialize}; #[cfg(not(feature = "std"))] use alloc::vec::Vec; #[cfg(feature = "std")] use std::cmp::Ordering; #[cfg(feature = "std")] use std::ops::{Div, Mul}; #[cfg(not(feature = "std"))] use core::ops::{Div, Mul}; #[cfg(not(feature = "std"))] use core::cmp::Ordering; use crate::interpolate::{Interpolate, Additive, One, Trigo}; use crate::interpolation::Interpolation; use crate::key::Key; /// Spline curve used to provide interpolation between control points (keys). /// /// Splines are made out of control points ([`Key`]). When creating a [`Spline`] with /// [`Spline::from_vec`] or [`Spline::from_iter`], the keys don’t have to be sorted (they are sorted /// automatically by the sampling value). /// /// You can sample from a spline with several functions: /// /// - [`Spline::sample`]: allows you to sample from a spline. If not enough keys are available /// for the required interpolation mode, you get `None`. /// - [`Spline::clamped_sample`]: behaves like [`Spline::sample`] but will return either the first /// or last key if out of bound; it will return `None` if not enough key. #[derive(Debug, Clone)] #[cfg_attr(feature = "serialization", derive(Deserialize, Serialize))] pub struct Spline<T, V>(pub(crate) Vec<Key<T, V>>); impl<T, V> Spline<T, V> { /// Internal sort to ensure invariant of sorting keys is valid. fn internal_sort(&mut self) where T: PartialOrd { self.0.sort_by(|k0, k1| k0.t.partial_cmp(&k1.t).unwrap_or(Ordering::Less)); } /// Create a new spline out of keys. The keys don’t have to be sorted even though it’s recommended /// to provide ascending sorted ones (for performance purposes). pub fn from_vec(keys: Vec<Key<T, V>>) -> Self where T: PartialOrd { let mut spline = Spline(keys); spline.internal_sort(); spline } /// Create a new spline by consuming an `Iterater<Item = Key<T>>`. They keys don’t have to be /// sorted. /// /// # Note on iterators /// /// It’s valid to use any iterator that implements `Iterator<Item = Key<T>>`. However, you should /// use [`Spline::from_vec`] if you are passing a [`Vec`]. pub fn from_iter<I>(iter: I) -> Self where I: Iterator<Item = Key<T, V>>, T: PartialOrd { Self::from_vec(iter.collect()) } /// Retrieve the keys of a spline. pub fn keys(&self) -> &[Key<T, V>] { &self.0 } /// Number of keys. #[inline(always)] pub fn len(&self) -> usize { self.0.len() } /// Check whether the spline has no key. #[inline(always)] pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Sample a spline at a given time, returning the interpolated value along with its associated /// key. /// /// The current implementation, based on immutability, cannot perform in constant time. This means /// that sampling’s processing complexity is currently *O(log n)*. It’s possible to achieve *O(1)* /// performance by using a slightly different spline type. If you are interested by this feature, /// an implementation for a dedicated type is foreseen yet not started yet. /// /// # Return /// /// `None` if you try to sample a value at a time that has no key associated with. That can also /// happen if you try to sample between two keys with a specific interpolation mode that makes the /// sampling impossible. For instance, [`Interpolation::CatmullRom`] requires *four* keys. If /// you’re near the beginning of the spline or its end, ensure you have enough keys around to make /// the sampling. pub fn sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { let keys = &self.0; let i = search_lower_cp(keys, t)?; let cp0 = &keys[i]; match cp0.interpolation { Interpolation::Step(threshold) => { let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = if nt < threshold { cp0.value } else { cp1.value }; Some((value, cp0, Some(cp1))) } Interpolation::Linear => { let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = Interpolate::lerp(cp0.value, cp1.value, nt); Some((value, cp0, Some(cp1))) } Interpolation::Cosine => { let two_t = T::one() + T::one(); let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let cos_nt = (T::one() - (nt * T::pi()).cos()) / two_t; let value = Interpolate::lerp(cp0.value, cp1.value, cos_nt); Some((value, cp0, Some(cp1))) } Interpolation::CatmullRom => { // We need at least four points for Catmull Rom; ensure we have them, otherwise, return // None. if i == 0 || i >= keys.len() - 2 { None } else { let cp1 = &keys[i + 1]; let cpm0 = &keys[i - 1]; let cpm1 = &keys[i + 2]; let nt = normalize_time(t, cp0, cp1); let value = Interpolate::cubic_hermite((cpm0.value, cpm0.t), (cp0.value, cp0.t), (cp1.value, cp1.t), (cpm1.value, cpm1.t), nt); Some((value, cp0, Some(cp1))) } } Interpolation::Bezier(u) => { // We need to check the next control point to see whether we want quadratic or cubic Bezier. let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = if let Interpolation::Bezier(v) = cp1.interpolation { Interpolate::cubic_bezier(cp0.value, u, v, cp1.value, nt) } else { Interpolate::quadratic_bezier(cp0.value, u, cp1.value, nt) }; Some((value, cp0, Some(cp1))) } Interpolation::StrokeBezier(input, output) => { let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = Interpolate::cubic_bezier(cp0.value, input, output, cp1.value, nt); Some((value, cp0, Some(cp1))) } Interpolation::__NonExhaustive => unreachable!(), } } /// Sample a spline at a given time. /// pub fn sample(&self, t: T) -> Option<V> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { self.sample_with_key(t).map(|(v, _, _)| v) } /// Sample a spline at a given time with clamping, returning the interpolated value along with its /// associated key. /// /// # Return /// /// If you sample before the first key or after the last one, return the first key or the last /// one, respectively. Otherwise, behave the same way as [`Spline::sample`]. /// /// # Error /// /// This function returns [`None`] if you have no key. pub fn clamped_sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { if self.0.is_empty() { return None; } self.sample_with_key(t).or_else(move || { let first = self.0.first().unwrap(); if t <= first.t { let second = if self.0.len() >= 2 { Some(&self.0[1]) } else { None }; Some((first.value, &first, second)) } else { let last = self.0.last().unwrap(); if t >= last.t { Some((last.value, &last, None)) } else { None } } }) } /// Sample a spline at a given time with clamping. pub fn clamped_sample(&self, t: T) -> Option<V> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { self.clamped_sample_with_key(t).map(|(v, _, _)| v) } /// Add a key into the spline. pub fn add(&mut self, key: Key<T, V>) where T: PartialOrd { self.0.push(key); self.internal_sort(); } /// Remove a key from the spline. pub fn remove(&mut self, index: usize) -> Option<Key<T, V>> { if index >
key and return the key already present. /// /// The key is updated — if present — with the provided function. /// /// # Notes /// /// That function makes sense only if you want to change the interpolator (i.e. [`Key::t`]) of /// your key. If you just want to change the interpolation mode or the carried value, consider /// using the [`Spline::get_mut`] method instead as it will be way faster. pub fn replace<F>( &mut self, index: usize, f: F ) -> Option<Key<T, V>> where F: FnOnce(&Key<T, V>) -> Key<T, V>, T: PartialOrd { let key = self.remove(index)?; self.add(f(&key)); Some(key) } /// Get a key at a given index. pub fn get(&self, index: usize) -> Option<&Key<T, V>> { self.0.get(index) } /// Mutably get a key at a given index. pub fn get_mut(&mut self, index: usize) -> Option<KeyMut<T, V>> { self.0.get_mut(index).map(|key| KeyMut { value: &mut key.value, interpolation: &mut key.interpolation }) } } /// A mutable [`Key`]. /// /// Mutable keys allow to edit the carried values and the interpolation mode but not the actual /// interpolator value as it would invalidate the internal structure of the [`Spline`]. If you /// want to achieve this, you’re advised to use [`Spline::replace`]. pub struct KeyMut<'a, T, V> { /// Carried value. pub value: &'a mut V, /// Interpolation mode to use for that key. pub interpolation: &'a mut Interpolation<T, V>, } // Normalize a time ([0;1]) given two control points. #[inline(always)] pub(crate) fn normalize_time<T, V>( t: T, cp: &Key<T, V>, cp1: &Key<T, V> ) -> T where T: Additive + Div<T, Output = T> + PartialEq { assert!(cp1.t!= cp.t, "overlapping keys"); (t - cp.t) / (cp1.t - cp.t) } // Find the lower control point corresponding to a given time. fn search_lower_cp<T, V>(cps: &[Key<T, V>], t: T) -> Option<usize> where T: PartialOrd { let mut i = 0; let len = cps.len(); if len < 2 { return None; } loop { let cp = &cps[i]; let cp1 = &cps[i+1]; if t >= cp1.t { if i >= len - 2 { return None; } i += 1; } else if t < cp.t { if i == 0 { return None; } i -= 1; } else { break; // found } } Some(i) }
= self.0.len() { None } else { Some(self.0.remove(index)) } } /// Update a
identifier_body
spline.rs
//! Spline curves and operations. #[cfg(feature = "serialization")] use serde_derive::{Deserialize, Serialize}; #[cfg(not(feature = "std"))] use alloc::vec::Vec; #[cfg(feature = "std")] use std::cmp::Ordering; #[cfg(feature = "std")] use std::ops::{Div, Mul}; #[cfg(not(feature = "std"))] use core::ops::{Div, Mul}; #[cfg(not(feature = "std"))] use core::cmp::Ordering; use crate::interpolate::{Interpolate, Additive, One, Trigo}; use crate::interpolation::Interpolation; use crate::key::Key; /// Spline curve used to provide interpolation between control points (keys). /// /// Splines are made out of control points ([`Key`]). When creating a [`Spline`] with /// [`Spline::from_vec`] or [`Spline::from_iter`], the keys don’t have to be sorted (they are sorted /// automatically by the sampling value). /// /// You can sample from a spline with several functions: /// /// - [`Spline::sample`]: allows you to sample from a spline. If not enough keys are available /// for the required interpolation mode, you get `None`. /// - [`Spline::clamped_sample`]: behaves like [`Spline::sample`] but will return either the first /// or last key if out of bound; it will return `None` if not enough key. #[derive(Debug, Clone)] #[cfg_attr(feature = "serialization", derive(Deserialize, Serialize))] pub struct Spline<T, V>(pub(crate) Vec<Key<T, V>>); impl<T, V> Spline<T, V> { /// Internal sort to ensure invariant of sorting keys is valid. fn internal_sort(&mut self) where T: PartialOrd { self.0.sort_by(|k0, k1| k0.t.partial_cmp(&k1.t).unwrap_or(Ordering::Less)); } /// Create a new spline out of keys. The keys don’t have to be sorted even though it’s recommended /// to provide ascending sorted ones (for performance purposes). pub fn from_vec(keys: Vec<Key<T, V>>) -> Self where T: PartialOrd { let mut spline = Spline(keys); spline.internal_sort(); spline } /// Create a new spline by consuming an `Iterater<Item = Key<T>>`. They keys don’t have to be /// sorted. /// /// # Note on iterators /// /// It’s valid to use any iterator that implements `Iterator<Item = Key<T>>`. However, you should /// use [`Spline::from_vec`] if you are passing a [`Vec`]. pub fn from_iter<I>(iter: I) -> Self where I: Iterator<Item = Key<T, V>>, T: PartialOrd { Self::from_vec(iter.collect()) } /// Retrieve the keys of a spline. pub fn keys(&self) -> &[Key<T, V>] { &self.0 } /// Number of keys. #[inline(always)] pub fn len(&self) -> usize { self.0.len() } /// Check whether the spline has no key. #[inline(always)] pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Sample a spline at a given time, returning the interpolated value along with its associated /// key. /// /// The current implementation, based on immutability, cannot perform in constant time. This means /// that sampling’s processing complexity is currently *O(log n)*. It’s possible to achieve *O(1)* /// performance by using a slightly different spline type. If you are interested by this feature, /// an implementation for a dedicated type is foreseen yet not started yet. /// /// # Return /// /// `None` if you try to sample a value at a time that has no key associated with. That can also /// happen if you try to sample between two keys with a specific interpolation mode that makes the /// sampling impossible. For instance, [`Interpolation::CatmullRom`] requires *four* keys. If /// you’re near the beginning of the spline or its end, ensure you have enough keys around to make /// the sampling. pub fn sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { let keys = &self.0; let i = search_lower_cp(keys, t)?; let cp0 = &keys[i]; match cp0.interpolation { Interpolation::Step(threshold) => { let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = if nt < threshold { cp0.value } else { cp1.value }; Some((value, cp0, Some(cp1))) } Interpolation::Linear => { let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = Interpolate::lerp(cp0.value, cp1.value, nt); Some((value, cp0, Some(cp1))) } Interpolation::Cosine => { let two_t = T::one() + T::one(); let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let cos_nt = (T::one() - (nt * T::pi()).cos()) / two_t; let value = Interpolate::lerp(cp0.value, cp1.value, cos_nt); Some((value, cp0, Some(cp1))) } Interpolation::CatmullRom => { // We need at least four points for Catmull Rom; ensure we have them, otherwise, return // None. if i == 0 || i >= keys.len() - 2 { None } else { let cp1 = &keys[i + 1]; let cpm0 = &keys[i - 1]; let cpm1 = &keys[i + 2]; let nt = normalize_time(t, cp0, cp1); let value = Interpolate::cubic_hermite((cpm0.value, cpm0.t), (cp0.value, cp0.t), (cp1.value, cp1.t), (cpm1.value, cpm1.t), nt); Some((value, cp0, Some(cp1))) } } Interpolation::Bezier(u) => { // We need to check the next control point to see whether we want quadratic or cubic Bezier. let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = if let Interpolation::Bezier(v) = cp1.interpolation { Interpolate::cubic_bezier(cp0.value, u, v, cp1.value, nt) } else { Interpolate::quadratic_bezier(cp0.value, u, cp1.value, nt) }; Some((value, cp0, Some(cp1))) } Interpolation::StrokeBezier(input, output) => { let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = Interpolate::cubic_bezier(cp0.value, input, output, cp1.value, nt); Some((value, cp0, Some(cp1))) } Interpolation::__NonExhaustive => unreachable!(), } } /// Sample a spline at a given time. /// pub fn sample(&self, t: T) -> Option<V> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { self.sample_with_key(t).map(|(v, _, _)| v) } /// Sample a spline at a given time with clamping, returning the interpolated value along with its /// associated key. /// /// # Return /// /// If you sample before the first key or after the last one, return the first key or the last /// one, respectively. Otherwise, behave the same way as [`Spline::sample`]. /// /// # Error /// /// This function returns [`None`] if you have no key. pub fn clamped_sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { if self.0.is_empty() { return None; } self.sample_with_key(t).or_else(move || { let first = self.0.first().unwrap(); if t <= first.t { let second = if self.0.len() >= 2 { Some(&self.0[1]) } else { None }; Some((first.value, &first, second)) } else { let last = self.0.last().unwrap(); if t >= last.t { Some((last.value, &last, None)) } else { None } } }) } /// Sample a spline at a given time with clamping. pub fn clamped_sample(&self, t: T) -> Option<V> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { self.clamped_sample_with_key(t).map(|(v, _, _)| v) } /// Add a key into the spline. pub fn add(&mut self, key: Key<T, V>) where T: PartialOrd { self.0.push(key); self.internal_sort(); } /// Remove a key from the spline. pub fn remove(&mut self, index: usize) -> Option<Key<T, V>> { if index >= self.0.len() { None } else { Some(self.0.remove(index)) } } /// Update a key and return the key already present. /// /// The key is updated — if present — with the provided function. /// /// # Notes /// /// That function makes sense only if you want to change the interpolator (i.e. [`Key::t`]) of /// your key. If you just want to change the interpolation mode or the carried value, consider /// using the [`Spline::get_mut`] method instead as it will be way faster. pub fn replace<F>( &mut self, index: usize, f: F ) -> Option<Key<T, V>> where F: FnOnce(&Key<T, V>) -> Key<T, V>, T: PartialOrd { let key = self.remove(index)?; self.add(f(&key)); Some(key) } /// Get a key at a given index. pub fn get(&self, index: usize) -> Option<&Key<T, V>> { self.0.get(index) } /// Mutably get a key at a given index. pub fn get_mut(&mut self, index: usize) -> Option<KeyMut<T, V>> { self.0.get_mut(index).map(|key| KeyMut { value: &mut key.value, interpolation: &mut key.interpolation }) } } /// A mutable [`Key`]. /// /// Mutable keys allow to edit the carried values and the interpolation mode but not the actual /// interpolator value as it would invalidate the internal structure of the [`Spline`]. If you /// want to achieve this, you’re advised to use [`Spline::replace`]. pub struct KeyMut<'a, T, V> { /
ried value. pub value: &'a mut V, /// Interpolation mode to use for that key. pub interpolation: &'a mut Interpolation<T, V>, } // Normalize a time ([0;1]) given two control points. #[inline(always)] pub(crate) fn normalize_time<T, V>( t: T, cp: &Key<T, V>, cp1: &Key<T, V> ) -> T where T: Additive + Div<T, Output = T> + PartialEq { assert!(cp1.t!= cp.t, "overlapping keys"); (t - cp.t) / (cp1.t - cp.t) } // Find the lower control point corresponding to a given time. fn search_lower_cp<T, V>(cps: &[Key<T, V>], t: T) -> Option<usize> where T: PartialOrd { let mut i = 0; let len = cps.len(); if len < 2 { return None; } loop { let cp = &cps[i]; let cp1 = &cps[i+1]; if t >= cp1.t { if i >= len - 2 { return None; } i += 1; } else if t < cp.t { if i == 0 { return None; } i -= 1; } else { break; // found } } Some(i) }
// Car
identifier_name
spline.rs
//! Spline curves and operations. #[cfg(feature = "serialization")] use serde_derive::{Deserialize, Serialize}; #[cfg(not(feature = "std"))] use alloc::vec::Vec; #[cfg(feature = "std")] use std::cmp::Ordering; #[cfg(feature = "std")] use std::ops::{Div, Mul}; #[cfg(not(feature = "std"))] use core::ops::{Div, Mul}; #[cfg(not(feature = "std"))] use core::cmp::Ordering; use crate::interpolate::{Interpolate, Additive, One, Trigo}; use crate::interpolation::Interpolation; use crate::key::Key; /// Spline curve used to provide interpolation between control points (keys). /// /// Splines are made out of control points ([`Key`]). When creating a [`Spline`] with /// [`Spline::from_vec`] or [`Spline::from_iter`], the keys don’t have to be sorted (they are sorted /// automatically by the sampling value). /// /// You can sample from a spline with several functions: /// /// - [`Spline::sample`]: allows you to sample from a spline. If not enough keys are available /// for the required interpolation mode, you get `None`. /// - [`Spline::clamped_sample`]: behaves like [`Spline::sample`] but will return either the first /// or last key if out of bound; it will return `None` if not enough key. #[derive(Debug, Clone)] #[cfg_attr(feature = "serialization", derive(Deserialize, Serialize))] pub struct Spline<T, V>(pub(crate) Vec<Key<T, V>>); impl<T, V> Spline<T, V> { /// Internal sort to ensure invariant of sorting keys is valid. fn internal_sort(&mut self) where T: PartialOrd { self.0.sort_by(|k0, k1| k0.t.partial_cmp(&k1.t).unwrap_or(Ordering::Less)); } /// Create a new spline out of keys. The keys don’t have to be sorted even though it’s recommended /// to provide ascending sorted ones (for performance purposes). pub fn from_vec(keys: Vec<Key<T, V>>) -> Self where T: PartialOrd { let mut spline = Spline(keys); spline.internal_sort(); spline } /// Create a new spline by consuming an `Iterater<Item = Key<T>>`. They keys don’t have to be /// sorted. /// /// # Note on iterators /// /// It’s valid to use any iterator that implements `Iterator<Item = Key<T>>`. However, you should /// use [`Spline::from_vec`] if you are passing a [`Vec`]. pub fn from_iter<I>(iter: I) -> Self where I: Iterator<Item = Key<T, V>>, T: PartialOrd { Self::from_vec(iter.collect()) } /// Retrieve the keys of a spline. pub fn keys(&self) -> &[Key<T, V>] { &self.0 } /// Number of keys. #[inline(always)] pub fn len(&self) -> usize { self.0.len() } /// Check whether the spline has no key. #[inline(always)] pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Sample a spline at a given time, returning the interpolated value along with its associated /// key. /// /// The current implementation, based on immutability, cannot perform in constant time. This means /// that sampling’s processing complexity is currently *O(log n)*. It’s possible to achieve *O(1)* /// performance by using a slightly different spline type. If you are interested by this feature, /// an implementation for a dedicated type is foreseen yet not started yet. /// /// # Return /// /// `None` if you try to sample a value at a time that has no key associated with. That can also /// happen if you try to sample between two keys with a specific interpolation mode that makes the /// sampling impossible. For instance, [`Interpolation::CatmullRom`] requires *four* keys. If /// you’re near the beginning of the spline or its end, ensure you have enough keys around to make /// the sampling. pub fn sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { let keys = &self.0; let i = search_lower_cp(keys, t)?; let cp0 = &keys[i]; match cp0.interpolation { Interpolation::Step(threshold) => { let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = if nt < threshold { cp0.value } else { cp1.value }; Some((value, cp0, Some(cp1))) }
Interpolation::Linear => { let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = Interpolate::lerp(cp0.value, cp1.value, nt); Some((value, cp0, Some(cp1))) } Interpolation::Cosine => { let two_t = T::one() + T::one(); let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let cos_nt = (T::one() - (nt * T::pi()).cos()) / two_t; let value = Interpolate::lerp(cp0.value, cp1.value, cos_nt); Some((value, cp0, Some(cp1))) } Interpolation::CatmullRom => { // We need at least four points for Catmull Rom; ensure we have them, otherwise, return // None. if i == 0 || i >= keys.len() - 2 { None } else { let cp1 = &keys[i + 1]; let cpm0 = &keys[i - 1]; let cpm1 = &keys[i + 2]; let nt = normalize_time(t, cp0, cp1); let value = Interpolate::cubic_hermite((cpm0.value, cpm0.t), (cp0.value, cp0.t), (cp1.value, cp1.t), (cpm1.value, cpm1.t), nt); Some((value, cp0, Some(cp1))) } } Interpolation::Bezier(u) => { // We need to check the next control point to see whether we want quadratic or cubic Bezier. let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = if let Interpolation::Bezier(v) = cp1.interpolation { Interpolate::cubic_bezier(cp0.value, u, v, cp1.value, nt) } else { Interpolate::quadratic_bezier(cp0.value, u, cp1.value, nt) }; Some((value, cp0, Some(cp1))) } Interpolation::StrokeBezier(input, output) => { let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = Interpolate::cubic_bezier(cp0.value, input, output, cp1.value, nt); Some((value, cp0, Some(cp1))) } Interpolation::__NonExhaustive => unreachable!(), } } /// Sample a spline at a given time. /// pub fn sample(&self, t: T) -> Option<V> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { self.sample_with_key(t).map(|(v, _, _)| v) } /// Sample a spline at a given time with clamping, returning the interpolated value along with its /// associated key. /// /// # Return /// /// If you sample before the first key or after the last one, return the first key or the last /// one, respectively. Otherwise, behave the same way as [`Spline::sample`]. /// /// # Error /// /// This function returns [`None`] if you have no key. pub fn clamped_sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { if self.0.is_empty() { return None; } self.sample_with_key(t).or_else(move || { let first = self.0.first().unwrap(); if t <= first.t { let second = if self.0.len() >= 2 { Some(&self.0[1]) } else { None }; Some((first.value, &first, second)) } else { let last = self.0.last().unwrap(); if t >= last.t { Some((last.value, &last, None)) } else { None } } }) } /// Sample a spline at a given time with clamping. pub fn clamped_sample(&self, t: T) -> Option<V> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { self.clamped_sample_with_key(t).map(|(v, _, _)| v) } /// Add a key into the spline. pub fn add(&mut self, key: Key<T, V>) where T: PartialOrd { self.0.push(key); self.internal_sort(); } /// Remove a key from the spline. pub fn remove(&mut self, index: usize) -> Option<Key<T, V>> { if index >= self.0.len() { None } else { Some(self.0.remove(index)) } } /// Update a key and return the key already present. /// /// The key is updated — if present — with the provided function. /// /// # Notes /// /// That function makes sense only if you want to change the interpolator (i.e. [`Key::t`]) of /// your key. If you just want to change the interpolation mode or the carried value, consider /// using the [`Spline::get_mut`] method instead as it will be way faster. pub fn replace<F>( &mut self, index: usize, f: F ) -> Option<Key<T, V>> where F: FnOnce(&Key<T, V>) -> Key<T, V>, T: PartialOrd { let key = self.remove(index)?; self.add(f(&key)); Some(key) } /// Get a key at a given index. pub fn get(&self, index: usize) -> Option<&Key<T, V>> { self.0.get(index) } /// Mutably get a key at a given index. pub fn get_mut(&mut self, index: usize) -> Option<KeyMut<T, V>> { self.0.get_mut(index).map(|key| KeyMut { value: &mut key.value, interpolation: &mut key.interpolation }) } } /// A mutable [`Key`]. /// /// Mutable keys allow to edit the carried values and the interpolation mode but not the actual /// interpolator value as it would invalidate the internal structure of the [`Spline`]. If you /// want to achieve this, you’re advised to use [`Spline::replace`]. pub struct KeyMut<'a, T, V> { /// Carried value. pub value: &'a mut V, /// Interpolation mode to use for that key. pub interpolation: &'a mut Interpolation<T, V>, } // Normalize a time ([0;1]) given two control points. #[inline(always)] pub(crate) fn normalize_time<T, V>( t: T, cp: &Key<T, V>, cp1: &Key<T, V> ) -> T where T: Additive + Div<T, Output = T> + PartialEq { assert!(cp1.t!= cp.t, "overlapping keys"); (t - cp.t) / (cp1.t - cp.t) } // Find the lower control point corresponding to a given time. fn search_lower_cp<T, V>(cps: &[Key<T, V>], t: T) -> Option<usize> where T: PartialOrd { let mut i = 0; let len = cps.len(); if len < 2 { return None; } loop { let cp = &cps[i]; let cp1 = &cps[i+1]; if t >= cp1.t { if i >= len - 2 { return None; } i += 1; } else if t < cp.t { if i == 0 { return None; } i -= 1; } else { break; // found } } Some(i) }
random_line_split
spline.rs
//! Spline curves and operations. #[cfg(feature = "serialization")] use serde_derive::{Deserialize, Serialize}; #[cfg(not(feature = "std"))] use alloc::vec::Vec; #[cfg(feature = "std")] use std::cmp::Ordering; #[cfg(feature = "std")] use std::ops::{Div, Mul}; #[cfg(not(feature = "std"))] use core::ops::{Div, Mul}; #[cfg(not(feature = "std"))] use core::cmp::Ordering; use crate::interpolate::{Interpolate, Additive, One, Trigo}; use crate::interpolation::Interpolation; use crate::key::Key; /// Spline curve used to provide interpolation between control points (keys). /// /// Splines are made out of control points ([`Key`]). When creating a [`Spline`] with /// [`Spline::from_vec`] or [`Spline::from_iter`], the keys don’t have to be sorted (they are sorted /// automatically by the sampling value). /// /// You can sample from a spline with several functions: /// /// - [`Spline::sample`]: allows you to sample from a spline. If not enough keys are available /// for the required interpolation mode, you get `None`. /// - [`Spline::clamped_sample`]: behaves like [`Spline::sample`] but will return either the first /// or last key if out of bound; it will return `None` if not enough key. #[derive(Debug, Clone)] #[cfg_attr(feature = "serialization", derive(Deserialize, Serialize))] pub struct Spline<T, V>(pub(crate) Vec<Key<T, V>>); impl<T, V> Spline<T, V> { /// Internal sort to ensure invariant of sorting keys is valid. fn internal_sort(&mut self) where T: PartialOrd { self.0.sort_by(|k0, k1| k0.t.partial_cmp(&k1.t).unwrap_or(Ordering::Less)); } /// Create a new spline out of keys. The keys don’t have to be sorted even though it’s recommended /// to provide ascending sorted ones (for performance purposes). pub fn from_vec(keys: Vec<Key<T, V>>) -> Self where T: PartialOrd { let mut spline = Spline(keys); spline.internal_sort(); spline } /// Create a new spline by consuming an `Iterater<Item = Key<T>>`. They keys don’t have to be /// sorted. /// /// # Note on iterators /// /// It’s valid to use any iterator that implements `Iterator<Item = Key<T>>`. However, you should /// use [`Spline::from_vec`] if you are passing a [`Vec`]. pub fn from_iter<I>(iter: I) -> Self where I: Iterator<Item = Key<T, V>>, T: PartialOrd { Self::from_vec(iter.collect()) } /// Retrieve the keys of a spline. pub fn keys(&self) -> &[Key<T, V>] { &self.0 } /// Number of keys. #[inline(always)] pub fn len(&self) -> usize { self.0.len() } /// Check whether the spline has no key. #[inline(always)] pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Sample a spline at a given time, returning the interpolated value along with its associated /// key. /// /// The current implementation, based on immutability, cannot perform in constant time. This means /// that sampling’s processing complexity is currently *O(log n)*. It’s possible to achieve *O(1)* /// performance by using a slightly different spline type. If you are interested by this feature, /// an implementation for a dedicated type is foreseen yet not started yet. /// /// # Return /// /// `None` if you try to sample a value at a time that has no key associated with. That can also /// happen if you try to sample between two keys with a specific interpolation mode that makes the /// sampling impossible. For instance, [`Interpolation::CatmullRom`] requires *four* keys. If /// you’re near the beginning of the spline or its end, ensure you have enough keys around to make /// the sampling. pub fn sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { let keys = &self.0; let i = search_lower_cp(keys, t)?; let cp0 = &keys[i]; match cp0.interpolation { Interpolation::Step(threshold) => { let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = if nt < threshold { cp0.value } else { cp1.value }; Some((value, cp0, Some(cp1))) } Interpolation::Linear => { let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = Interpolate::lerp(cp0.value, cp1.value, nt); Some((value, cp0, Some(cp1))) } Interpolation::Cosine => { let two_t = T::one() + T::one(); let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let cos_nt = (T::one() - (nt * T::pi()).cos()) / two_t; let value = Interpolate::lerp(cp0.value, cp1.value, cos_nt); Some((value, cp0, Some(cp1))) } Interpolation::CatmullRom => { // We need at least four points for Catmull Rom; ensure we have them, otherwise, return // None. if i == 0 || i >= keys.len() - 2 { None } else { let cp1 = &keys[i + 1]; let cpm0 = &keys[i - 1]; let cpm1 = &keys[i + 2]; let nt = normalize_time(t, cp0, cp1); let value = Interpolate::cubic_hermite((cpm0.value, cpm0.t), (cp0.value, cp0.t), (cp1.value, cp1.t), (cpm1.value, cpm1.t), nt); Some((value, cp0, Some(cp1))) } } Interpolation::Bezier(u) => { // We need to check the next control point to see whether we want quadratic or cubic Bezier. let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = if let Interpolation::Bezier(v) = cp1.interpolation { Interpolate::cubic_bezier(cp0.value, u, v, cp1.value, nt) } else { Interpolate::quadratic_bezier(cp0.value, u, cp1.value, nt) }; Some((value, cp0, Some(cp1))) } Interpolation::StrokeBezier(input, output) => { let cp1 = &keys[i + 1]; let nt = normalize_time(t, cp0, cp1); let value = Interpolate::cubic_bezier(cp0.value, input, output, cp1.value, nt); Some((value, cp0, Some(cp1))) } Interpolation::__NonExhaustive => unreachable!(), } } /// Sample a spline at a given time. /// pub fn sample(&self, t: T) -> Option<V> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { self.sample_with_key(t).map(|(v, _, _)| v) } /// Sample a spline at a given time with clamping, returning the interpolated value along with its /// associated key. /// /// # Return /// /// If you sample before the first key or after the last one, return the first key or the last /// one, respectively. Otherwise, behave the same way as [`Spline::sample`]. /// /// # Error /// /// This function returns [`None`] if you have no key. pub fn clamped_sample_with_key(&self, t: T) -> Option<(V, &Key<T, V>, Option<&Key<T, V>>)> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { if self.0.is_empty() { return None; } self.sample_with_key(t).or_else(move || { let first = self.0.first().unwrap(); if t <= first.t { let second = if self.0.len() >= 2 { Some(&self.0[1]) } else { None }; Some((first.value, &first, second)) } else { let last = self.0.last().unwrap(); if t >= last.t { Some
None } } }) } /// Sample a spline at a given time with clamping. pub fn clamped_sample(&self, t: T) -> Option<V> where T: Additive + One + Trigo + Mul<T, Output = T> + Div<T, Output = T> + PartialOrd, V: Interpolate<T> { self.clamped_sample_with_key(t).map(|(v, _, _)| v) } /// Add a key into the spline. pub fn add(&mut self, key: Key<T, V>) where T: PartialOrd { self.0.push(key); self.internal_sort(); } /// Remove a key from the spline. pub fn remove(&mut self, index: usize) -> Option<Key<T, V>> { if index >= self.0.len() { None } else { Some(self.0.remove(index)) } } /// Update a key and return the key already present. /// /// The key is updated — if present — with the provided function. /// /// # Notes /// /// That function makes sense only if you want to change the interpolator (i.e. [`Key::t`]) of /// your key. If you just want to change the interpolation mode or the carried value, consider /// using the [`Spline::get_mut`] method instead as it will be way faster. pub fn replace<F>( &mut self, index: usize, f: F ) -> Option<Key<T, V>> where F: FnOnce(&Key<T, V>) -> Key<T, V>, T: PartialOrd { let key = self.remove(index)?; self.add(f(&key)); Some(key) } /// Get a key at a given index. pub fn get(&self, index: usize) -> Option<&Key<T, V>> { self.0.get(index) } /// Mutably get a key at a given index. pub fn get_mut(&mut self, index: usize) -> Option<KeyMut<T, V>> { self.0.get_mut(index).map(|key| KeyMut { value: &mut key.value, interpolation: &mut key.interpolation }) } } /// A mutable [`Key`]. /// /// Mutable keys allow to edit the carried values and the interpolation mode but not the actual /// interpolator value as it would invalidate the internal structure of the [`Spline`]. If you /// want to achieve this, you’re advised to use [`Spline::replace`]. pub struct KeyMut<'a, T, V> { /// Carried value. pub value: &'a mut V, /// Interpolation mode to use for that key. pub interpolation: &'a mut Interpolation<T, V>, } // Normalize a time ([0;1]) given two control points. #[inline(always)] pub(crate) fn normalize_time<T, V>( t: T, cp: &Key<T, V>, cp1: &Key<T, V> ) -> T where T: Additive + Div<T, Output = T> + PartialEq { assert!(cp1.t!= cp.t, "overlapping keys"); (t - cp.t) / (cp1.t - cp.t) } // Find the lower control point corresponding to a given time. fn search_lower_cp<T, V>(cps: &[Key<T, V>], t: T) -> Option<usize> where T: PartialOrd { let mut i = 0; let len = cps.len(); if len < 2 { return None; } loop { let cp = &cps[i]; let cp1 = &cps[i+1]; if t >= cp1.t { if i >= len - 2 { return None; } i += 1; } else if t < cp.t { if i == 0 { return None; } i -= 1; } else { break; // found } } Some(i) }
((last.value, &last, None)) } else {
conditional_block
ourairports.rs
use serde::de::{self, Unexpected}; use serde::{Deserialize, Deserializer, Serialize}; /// Contains a record of a single airport. #[derive(Deserialize, Serialize)] pub struct Airport { /// Internal OurAirports integer identifier for the airport. /// This will stay persistent, even if the airport code changes. id: String, /// The text identifier used in the OurAirports URL. /// This will be the ICAO code if available. Otherwise, it will be a local airport code (if no conflict), or if nothing else is available, an internally-generated code starting with the ISO2 country code, followed by a dash and a four-digit number. ident: String, /// The type of the airport. /// Allowed values are "closed_airport", "heliport", "large_airport", "medium_airport", "seaplane_base", and "small_airport". #[serde(rename = "type")] airport_type: String, /// The official airport name, including "Airport", "Airstrip", etc. name: String, /// The airport latitude in decimal degrees (positive for north). latitude_deg: f64, /// The airport longitude in decimal degrees (positive for east). longitude_deg: f64, /// The airport elevation MSL in feet (*not* metres). None if unavailable. elevation_ft: Option<i32>, /// The code for the continent where the airport is (primarily) located. /// Allowed values are "AF" (Africa), "AN" (Antarctica), "AS" (Asia), "EU" (Europe), "NA" (North America), "OC" (Oceania), or "SA" (South America). continent: String, /// The two-character ISO 3166:1-alpha2 code for the country where the airport is (primarily) located. /// A handful of unofficial, non-ISO codes are also in use, such as "XK" for Kosovo. iso_country: String, /// An alphanumeric code for the high-level administrative subdivision of a country where the airport is primarily located (e.g. province, governorate), prefixed by the ISO2 country code and a hyphen. /// OurAirports uses ISO 3166:2 codes whenever possible, preferring higher administrative levels, but also includes some custom codes. iso_region: String, /// The primary municipality that the airport serves (when available). /// Note that this is *not* necessarily the municipality where the airport is physically located. municipality: String, /// true if the airport currently has scheduled airline service; false otherwise. #[serde(deserialize_with = "bool_from_str")] scheduled_service: bool, /// The code that an aviation GPS database (such as Jeppesen's or Garmin's) would normally use for the airport. This will always be the ICAO code if one exists. /// Note that, unlike the `ident` column, this is *not* guaranteed to be globally unique. gps_code: String, /// The three-letter IATA code for the airport (if it has one). iata_code: String, /// The local country code for the airport, if different from the `gps_code` and `iata_code` fields (used mainly for US airports). local_code: String, /// URL of the airport's official home page on the web, if one exists. home_link: String, /// URL of the airport's page on Wikipedia, if one exists. wikipedia_link: String, /// Extra keywords/phrases to assist with search, as a Vec. /// May include former names for the airport, alternate codes, names in other languages, nearby tourist destinations, etc. #[serde(deserialize_with = "vec_string_from_string")] keywords: Vec<String>, } /// Contains information about a single airport radio frequency /// for voice communication (radio navigation aids appear in struct Navaids) #[derive(Deserialize, Serialize)] pub struct AirportFrequency { /// Internal OurAirports integer identifier for the frequency. /// This will stay persistent, even if the radio frequency or description changes. id: String, /// Internal integer foreign key matching the `id` column for the associated airport in Airports struct. /// (`airport_ident` is a better alternative.) airport_ref: String, /// Externally-visible string foreign key matching the `ident` column for the associated airport in Airports. airport_ident: String, /// A code for the frequency type. /// This isn't (currently) a controlled vocabulary, but probably will be soon. /// Some common values are "TWR" (tower), "ATF" or "CTAF" (common traffic frequency), "GND" (ground control), "RMP" (ramp control), "ATIS" (automated weather), "RCO" (remote radio outlet), "ARR" (arrivals), "DEP" (departures), "UNICOM" (monitored ground station), and "RDO" (a flight-service station). #[serde(rename = "type")] frequency_type: String, /// A description of the frequency, typically the way a pilot would open a call on it. description: String, /// Radio voice frequency in megahertz. /// Note that the same frequency may appear multiple times for an airport, serving different functions. frequency_mhz: String, } /// Contains information about a single landing surface #[derive(Deserialize, Serialize)] pub struct Runway { /// Internal OurAirports integer identifier for the runway. /// This will stay persistent, even if the runway numbering changes. id: String, /// Internal integer foreign key matching the id column for the associated airport in airports.csv. (`airport_ident` is a better alternative.) airport_ref: String, /// Externally-visible string foreign key matching the ident column for the associated airport in airports.csv. airport_ident: String, /// Length of the full runway surface (including displaced thresholds, overrun areas, etc) in feet. length_ft: Option<u32>, /// Width of the runway surface in feet. width_ft: Option<u32>, /// Code for the runway surface type. /// This is not yet a controlled vocabulary, but probably will be soon. /// Some common values include "ASP" (asphalt), "TURF" (turf), "CON" (concrete), "GRS" (grass), "GRE" (gravel), "WATER" (water), and "UNK" (unknown). surface: String, /// `true` if the surface is lighted at night. `false` otherwise. #[serde(deserialize_with = "bool_from_str")] lighted: bool, /// `true` if the runway surface is currently closed, `false` otherwise. #[serde(deserialize_with = "bool_from_str")] closed: bool, /// Identifier for the low-numbered end of the runway. le_ident: String, /// Latitude of the centre of the low-numbered end of the runway, in decimal degrees (positive is north), if available. le_latitude_deg: Option<f64>, /// Longitude of the centre of the low-numbered end of the runway, in decimal degrees (positive is east), if available. le_longitude_deg: Option<f64>, /// Elevation above MSL of the low-numbered end of the runway in feet. le_elevation_ft: Option<i32>, /// Heading of the low-numbered end of the runway in degrees true (*not* magnetic). #[serde(rename = "le_heading_degT")] le_heading_deg_true: Option<f64>, /// Length of the displaced threshold (if any) for the low-numbered end of the runway, in feet. le_displaced_threshold_ft: Option<i32>, /// Identifier for the high-numbered end of the runway. he_ident: String, /// Latitude of the centre of the high-numbered end of the runway, in decimal degrees (positive is north), if available. he_latitude_deg: Option<f64>, /// Longitude of the centre of the high-numbered end of the runway, in decimal degrees (positive is east), if available. he_longitude_deg: Option<f64>, /// Elevation above MSL of the high-numbered end of the runway in feet. he_elevation_ft: Option<i32>, #[serde(rename = "he_heading_degT")] /// Heading of the high-numbered end of the runway in degrees true (*not* magnetic). he_heading_deg_true: Option<f64>, /// Length of the displaced threshold (if any) for the high-numbered end of the runway, in feet. he_displaced_threshold_ft: Option<i32>, } /// Represents a single radio navigation #[derive(Deserialize, Serialize)] pub struct Navaid { /// Internal OurAirports integer identifier for the navaid. /// This will stay persistent, even if the navaid identifier or frequency changes. id: String, /// This is a unique string identifier constructed from the navaid name and country, and used in the OurAirports URL. filename: String, /// The 1-3 character identifer that the navaid transmits. ident: String, /// The name of the navaid, excluding its type. name: String, /// The type of the navaid. Options are "DME", "NDB", "NDB-DME", "TACAN", "VOR", "VOR-DME", or "VORTAC". /// See the [map legend](https://ourairports.com/help/data-dictionary.html#navaids) for more information about each type. #[serde(rename = "type")] navaid_type: String, /// The frequency of the navaid in *kilohertz*. /// If the Navaid operates on the VHF band (VOR, VOR-DME) or operates on the UHF band with a paired VHF frequency (DME, TACAN, VORTAC), then you need to divide this number by 1,000 to get the frequency in megahertz (115.3 MHz in this example). /// For an NDB or NDB-DME, you can use this frequency directly. frequency_khz: String, /// The latitude of the navaid in decimal degrees (negative for south). latitude_deg: Option<f64>, /// The longitude of the navaid in decimal degrees (negative for west). longitude_deg: Option<f64>, /// The navaid's elevation MSL in feet (not metres). elevation_ft: Option<i32>, /// The two-character [ISO 3166:1-alpha2 code](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes) for the country that operates the navaid. /// A handful of unofficial, non-ISO codes are also in use, such as "XK" for [Kosovo](https://ourairports.com/countries/XK/). iso_country: String, /// The paired VHF frequency for the DME (or TACAN) in kilohertz. /// Divide by 1,000 to get the paired VHF frequency in megahertz (e.g. 115.3 MHz). dme_frequency_khz: String, /// The DME channel (an alternative way of tuning distance-measuring equipment) dme_channel: String, /// The latitude of the associated DME in decimal degrees (negative for south). If missing, assume that the value is the same as `latitude_deg`. dme_latitude_deg: Option<f64>, /// The longitude of the associated DME in decimal degrees (negative for west). If missing, assume that the value is the same as `longitude_deg`. dme_longitude_deg: Option<f64>, /// The associated DME transmitters elevation MSL in feet. If missing, assume that it's the same value as `elevation_ft`. dme_elevation_ft: Option<i32>, /// The magnetic variation adjustment built into a VOR's, VOR-DME's, or TACAN's radials. Positive means east (added to the true direction), and negative means west (subtracted from the true direction). /// This will not usually be the same as `magnetic_variation_deg` because the magnetic pole is constantly in motion. slaved_variation_deg: Option<f64>, /// The actual magnetic variation at the navaid's location. Positive means east (added to the true direction), and negative means west (subtracted from the true direction), magnetic_variation_deg: Option<f64>, /// The primary function of the navaid in the airspace system. /// Options include "HI" (high-altitude airways, at or above flight level 180), "LO" (low-altitude airways), "BOTH" (high- and low-altitude airways), "TERM" (terminal-area navigation only), and "RNAV" (non-GPS area navigation). #[serde(rename = "usageType")] usage_type: String, /// The power-output level of the navaid. /// Options include "HIGH", "MEDIUM", "LOW", and "UNKNOWN". power: String, /// The OurAirports text identifier (usually the ICAO code) for an airport associated with the navaid. /// Links to the `ident` column in airports.csv. associated_airport: String, } /// Represents a country or country-like entity (e.g. Hong Kong) #[derive(Deserialize, Serialize)] pub struct Country { /// Internal OurAirports integer identifier for the country. /// This will stay persistent, even if the country name or code changes. id: String, /// The two-character [ISO 3166:1-alpha2 code](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes) for the country that operates the navaid. /// A handful of unofficial, non-ISO codes are also in use, such as "XK" for [Kosovo](https://ourairports.com/countries/XK/). /// The `iso_country` field in airports.csv points into this field. code: String, /// The common **English**-language name for the country. /// Other variations of the name may appear in the `keywords` field to assist with search. name: String, /// The code for the continent where the country is (primarily) located. /// See the `continent` code in airports.csv for allowed values. continent: String, /// Link to the Wikipedia article about the country. wikipedia_link: String, /// An array of search keywords/phrases related to the country. #[serde(deserialize_with = "vec_string_from_string")] keywords: Vec<String>, } /// Represents a high-level administrative subdivision of a country #[derive(Deserialize, Serialize)] pub struct Region { /// Internal OurAirports integer identifier for the region. This will stay persistent, even if the region code changes. id: String, /// `local_code` prefixed with the country code to make a globally-unique identifier. code: String, /// The local code for the administrative subdivision. /// Whenever possible, these are official [ISO 3166:2](https://en.wikipedia.org/wiki/ISO_3166-2), at the highest level available, but in some cases OurAirports has to use unofficial codes. /// There is also a pseudo code "U-A" for each country, which means that the airport has not yet been assigned to a region (or perhaps can't be, as in the case of a deep-sea oil platform). local_code: String, /// The common **English**-language name for the administrative subdivision. /// In some cases, the name in local languages will appear in the `keywords` field assist search. name: String, /// A code for the continent to which the region belongs. /// See the `continent` field in airports.csv for a list of codes. continent: String, /// The two-character [ISO 3166:1-alpha2 code](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes) for the country containing the administrative subdivision. /// A handful of unofficial, non-ISO codes are also in use, such as "XK" for [Kosovo](https://ourairports.com/countries/XK/). iso_country: String, /// A link to the Wikipedia article describing the subdivision. wikipedia_link: String, /// An array of keywords to assist with search. May include former names for the region, and/or the region name in other languages. #[serde(deserialize_with = "vec_string_from_string")] keywords: Vec<String>, } /// Converts a string to a boolean based on "yes" and "no" fn bool_from_str<'de, D>(deserializer: D) -> Result<bool, D::Error> where D: Deserializer<'de>, { match String::deserialize(deserializer)?.to_lowercase().as_str() { "yes" | "1"=> Ok(true), "no" | "0" => Ok(false), other => Err(de::Error::invalid_value( Unexpected::Str(other), &"Value must be yes or no", )), } } /// Transforms a comma-separated string to a vector. fn
<'de, D>(deserializer: D) -> Result<Vec<String>, D::Error> where D: Deserializer<'de>, { let keywords = String::deserialize(deserializer)?; match keywords.len() { 0 => Ok(vec![]), _ => Ok(keywords.split(',').map(|s| s.trim().to_string()).collect()), } }
vec_string_from_string
identifier_name
ourairports.rs
use serde::de::{self, Unexpected}; use serde::{Deserialize, Deserializer, Serialize}; /// Contains a record of a single airport. #[derive(Deserialize, Serialize)] pub struct Airport { /// Internal OurAirports integer identifier for the airport. /// This will stay persistent, even if the airport code changes. id: String, /// The text identifier used in the OurAirports URL. /// This will be the ICAO code if available. Otherwise, it will be a local airport code (if no conflict), or if nothing else is available, an internally-generated code starting with the ISO2 country code, followed by a dash and a four-digit number. ident: String, /// The type of the airport. /// Allowed values are "closed_airport", "heliport", "large_airport", "medium_airport", "seaplane_base", and "small_airport". #[serde(rename = "type")] airport_type: String, /// The official airport name, including "Airport", "Airstrip", etc. name: String, /// The airport latitude in decimal degrees (positive for north). latitude_deg: f64, /// The airport longitude in decimal degrees (positive for east). longitude_deg: f64, /// The airport elevation MSL in feet (*not* metres). None if unavailable. elevation_ft: Option<i32>, /// The code for the continent where the airport is (primarily) located. /// Allowed values are "AF" (Africa), "AN" (Antarctica), "AS" (Asia), "EU" (Europe), "NA" (North America), "OC" (Oceania), or "SA" (South America). continent: String, /// The two-character ISO 3166:1-alpha2 code for the country where the airport is (primarily) located. /// A handful of unofficial, non-ISO codes are also in use, such as "XK" for Kosovo. iso_country: String, /// An alphanumeric code for the high-level administrative subdivision of a country where the airport is primarily located (e.g. province, governorate), prefixed by the ISO2 country code and a hyphen. /// OurAirports uses ISO 3166:2 codes whenever possible, preferring higher administrative levels, but also includes some custom codes. iso_region: String, /// The primary municipality that the airport serves (when available). /// Note that this is *not* necessarily the municipality where the airport is physically located. municipality: String, /// true if the airport currently has scheduled airline service; false otherwise. #[serde(deserialize_with = "bool_from_str")] scheduled_service: bool, /// The code that an aviation GPS database (such as Jeppesen's or Garmin's) would normally use for the airport. This will always be the ICAO code if one exists. /// Note that, unlike the `ident` column, this is *not* guaranteed to be globally unique. gps_code: String, /// The three-letter IATA code for the airport (if it has one). iata_code: String, /// The local country code for the airport, if different from the `gps_code` and `iata_code` fields (used mainly for US airports). local_code: String, /// URL of the airport's official home page on the web, if one exists. home_link: String, /// URL of the airport's page on Wikipedia, if one exists. wikipedia_link: String, /// Extra keywords/phrases to assist with search, as a Vec. /// May include former names for the airport, alternate codes, names in other languages, nearby tourist destinations, etc. #[serde(deserialize_with = "vec_string_from_string")]
/// for voice communication (radio navigation aids appear in struct Navaids) #[derive(Deserialize, Serialize)] pub struct AirportFrequency { /// Internal OurAirports integer identifier for the frequency. /// This will stay persistent, even if the radio frequency or description changes. id: String, /// Internal integer foreign key matching the `id` column for the associated airport in Airports struct. /// (`airport_ident` is a better alternative.) airport_ref: String, /// Externally-visible string foreign key matching the `ident` column for the associated airport in Airports. airport_ident: String, /// A code for the frequency type. /// This isn't (currently) a controlled vocabulary, but probably will be soon. /// Some common values are "TWR" (tower), "ATF" or "CTAF" (common traffic frequency), "GND" (ground control), "RMP" (ramp control), "ATIS" (automated weather), "RCO" (remote radio outlet), "ARR" (arrivals), "DEP" (departures), "UNICOM" (monitored ground station), and "RDO" (a flight-service station). #[serde(rename = "type")] frequency_type: String, /// A description of the frequency, typically the way a pilot would open a call on it. description: String, /// Radio voice frequency in megahertz. /// Note that the same frequency may appear multiple times for an airport, serving different functions. frequency_mhz: String, } /// Contains information about a single landing surface #[derive(Deserialize, Serialize)] pub struct Runway { /// Internal OurAirports integer identifier for the runway. /// This will stay persistent, even if the runway numbering changes. id: String, /// Internal integer foreign key matching the id column for the associated airport in airports.csv. (`airport_ident` is a better alternative.) airport_ref: String, /// Externally-visible string foreign key matching the ident column for the associated airport in airports.csv. airport_ident: String, /// Length of the full runway surface (including displaced thresholds, overrun areas, etc) in feet. length_ft: Option<u32>, /// Width of the runway surface in feet. width_ft: Option<u32>, /// Code for the runway surface type. /// This is not yet a controlled vocabulary, but probably will be soon. /// Some common values include "ASP" (asphalt), "TURF" (turf), "CON" (concrete), "GRS" (grass), "GRE" (gravel), "WATER" (water), and "UNK" (unknown). surface: String, /// `true` if the surface is lighted at night. `false` otherwise. #[serde(deserialize_with = "bool_from_str")] lighted: bool, /// `true` if the runway surface is currently closed, `false` otherwise. #[serde(deserialize_with = "bool_from_str")] closed: bool, /// Identifier for the low-numbered end of the runway. le_ident: String, /// Latitude of the centre of the low-numbered end of the runway, in decimal degrees (positive is north), if available. le_latitude_deg: Option<f64>, /// Longitude of the centre of the low-numbered end of the runway, in decimal degrees (positive is east), if available. le_longitude_deg: Option<f64>, /// Elevation above MSL of the low-numbered end of the runway in feet. le_elevation_ft: Option<i32>, /// Heading of the low-numbered end of the runway in degrees true (*not* magnetic). #[serde(rename = "le_heading_degT")] le_heading_deg_true: Option<f64>, /// Length of the displaced threshold (if any) for the low-numbered end of the runway, in feet. le_displaced_threshold_ft: Option<i32>, /// Identifier for the high-numbered end of the runway. he_ident: String, /// Latitude of the centre of the high-numbered end of the runway, in decimal degrees (positive is north), if available. he_latitude_deg: Option<f64>, /// Longitude of the centre of the high-numbered end of the runway, in decimal degrees (positive is east), if available. he_longitude_deg: Option<f64>, /// Elevation above MSL of the high-numbered end of the runway in feet. he_elevation_ft: Option<i32>, #[serde(rename = "he_heading_degT")] /// Heading of the high-numbered end of the runway in degrees true (*not* magnetic). he_heading_deg_true: Option<f64>, /// Length of the displaced threshold (if any) for the high-numbered end of the runway, in feet. he_displaced_threshold_ft: Option<i32>, } /// Represents a single radio navigation #[derive(Deserialize, Serialize)] pub struct Navaid { /// Internal OurAirports integer identifier for the navaid. /// This will stay persistent, even if the navaid identifier or frequency changes. id: String, /// This is a unique string identifier constructed from the navaid name and country, and used in the OurAirports URL. filename: String, /// The 1-3 character identifer that the navaid transmits. ident: String, /// The name of the navaid, excluding its type. name: String, /// The type of the navaid. Options are "DME", "NDB", "NDB-DME", "TACAN", "VOR", "VOR-DME", or "VORTAC". /// See the [map legend](https://ourairports.com/help/data-dictionary.html#navaids) for more information about each type. #[serde(rename = "type")] navaid_type: String, /// The frequency of the navaid in *kilohertz*. /// If the Navaid operates on the VHF band (VOR, VOR-DME) or operates on the UHF band with a paired VHF frequency (DME, TACAN, VORTAC), then you need to divide this number by 1,000 to get the frequency in megahertz (115.3 MHz in this example). /// For an NDB or NDB-DME, you can use this frequency directly. frequency_khz: String, /// The latitude of the navaid in decimal degrees (negative for south). latitude_deg: Option<f64>, /// The longitude of the navaid in decimal degrees (negative for west). longitude_deg: Option<f64>, /// The navaid's elevation MSL in feet (not metres). elevation_ft: Option<i32>, /// The two-character [ISO 3166:1-alpha2 code](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes) for the country that operates the navaid. /// A handful of unofficial, non-ISO codes are also in use, such as "XK" for [Kosovo](https://ourairports.com/countries/XK/). iso_country: String, /// The paired VHF frequency for the DME (or TACAN) in kilohertz. /// Divide by 1,000 to get the paired VHF frequency in megahertz (e.g. 115.3 MHz). dme_frequency_khz: String, /// The DME channel (an alternative way of tuning distance-measuring equipment) dme_channel: String, /// The latitude of the associated DME in decimal degrees (negative for south). If missing, assume that the value is the same as `latitude_deg`. dme_latitude_deg: Option<f64>, /// The longitude of the associated DME in decimal degrees (negative for west). If missing, assume that the value is the same as `longitude_deg`. dme_longitude_deg: Option<f64>, /// The associated DME transmitters elevation MSL in feet. If missing, assume that it's the same value as `elevation_ft`. dme_elevation_ft: Option<i32>, /// The magnetic variation adjustment built into a VOR's, VOR-DME's, or TACAN's radials. Positive means east (added to the true direction), and negative means west (subtracted from the true direction). /// This will not usually be the same as `magnetic_variation_deg` because the magnetic pole is constantly in motion. slaved_variation_deg: Option<f64>, /// The actual magnetic variation at the navaid's location. Positive means east (added to the true direction), and negative means west (subtracted from the true direction), magnetic_variation_deg: Option<f64>, /// The primary function of the navaid in the airspace system. /// Options include "HI" (high-altitude airways, at or above flight level 180), "LO" (low-altitude airways), "BOTH" (high- and low-altitude airways), "TERM" (terminal-area navigation only), and "RNAV" (non-GPS area navigation). #[serde(rename = "usageType")] usage_type: String, /// The power-output level of the navaid. /// Options include "HIGH", "MEDIUM", "LOW", and "UNKNOWN". power: String, /// The OurAirports text identifier (usually the ICAO code) for an airport associated with the navaid. /// Links to the `ident` column in airports.csv. associated_airport: String, } /// Represents a country or country-like entity (e.g. Hong Kong) #[derive(Deserialize, Serialize)] pub struct Country { /// Internal OurAirports integer identifier for the country. /// This will stay persistent, even if the country name or code changes. id: String, /// The two-character [ISO 3166:1-alpha2 code](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes) for the country that operates the navaid. /// A handful of unofficial, non-ISO codes are also in use, such as "XK" for [Kosovo](https://ourairports.com/countries/XK/). /// The `iso_country` field in airports.csv points into this field. code: String, /// The common **English**-language name for the country. /// Other variations of the name may appear in the `keywords` field to assist with search. name: String, /// The code for the continent where the country is (primarily) located. /// See the `continent` code in airports.csv for allowed values. continent: String, /// Link to the Wikipedia article about the country. wikipedia_link: String, /// An array of search keywords/phrases related to the country. #[serde(deserialize_with = "vec_string_from_string")] keywords: Vec<String>, } /// Represents a high-level administrative subdivision of a country #[derive(Deserialize, Serialize)] pub struct Region { /// Internal OurAirports integer identifier for the region. This will stay persistent, even if the region code changes. id: String, /// `local_code` prefixed with the country code to make a globally-unique identifier. code: String, /// The local code for the administrative subdivision. /// Whenever possible, these are official [ISO 3166:2](https://en.wikipedia.org/wiki/ISO_3166-2), at the highest level available, but in some cases OurAirports has to use unofficial codes. /// There is also a pseudo code "U-A" for each country, which means that the airport has not yet been assigned to a region (or perhaps can't be, as in the case of a deep-sea oil platform). local_code: String, /// The common **English**-language name for the administrative subdivision. /// In some cases, the name in local languages will appear in the `keywords` field assist search. name: String, /// A code for the continent to which the region belongs. /// See the `continent` field in airports.csv for a list of codes. continent: String, /// The two-character [ISO 3166:1-alpha2 code](https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes) for the country containing the administrative subdivision. /// A handful of unofficial, non-ISO codes are also in use, such as "XK" for [Kosovo](https://ourairports.com/countries/XK/). iso_country: String, /// A link to the Wikipedia article describing the subdivision. wikipedia_link: String, /// An array of keywords to assist with search. May include former names for the region, and/or the region name in other languages. #[serde(deserialize_with = "vec_string_from_string")] keywords: Vec<String>, } /// Converts a string to a boolean based on "yes" and "no" fn bool_from_str<'de, D>(deserializer: D) -> Result<bool, D::Error> where D: Deserializer<'de>, { match String::deserialize(deserializer)?.to_lowercase().as_str() { "yes" | "1"=> Ok(true), "no" | "0" => Ok(false), other => Err(de::Error::invalid_value( Unexpected::Str(other), &"Value must be yes or no", )), } } /// Transforms a comma-separated string to a vector. fn vec_string_from_string<'de, D>(deserializer: D) -> Result<Vec<String>, D::Error> where D: Deserializer<'de>, { let keywords = String::deserialize(deserializer)?; match keywords.len() { 0 => Ok(vec![]), _ => Ok(keywords.split(',').map(|s| s.trim().to_string()).collect()), } }
keywords: Vec<String>, } /// Contains information about a single airport radio frequency
random_line_split
num_format.rs
// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (vars) charf cninetyninehexfloatf decf floatf intf scif strf Cninety //! handles creating printed output for numeric substitutions // spell-checker:ignore (vars) charf decf floatf intf scif strf Cninety use std::env; use std::vec::Vec; use crate::display::Quotable; use crate::{show_error, show_warning}; use super::format_field::{FieldType, FormatField}; use super::formatter::{Base, FormatPrimitive, Formatter, InitialPrefix}; use super::formatters::cninetyninehexfloatf::CninetyNineHexFloatf; use super::formatters::decf::Decf; use super::formatters::floatf::Floatf; use super::formatters::intf::Intf; use super::formatters::scif::Scif; pub fn warn_expected_numeric(pf_arg: &str) { // important: keep println here not print show_error!("{}: expected a numeric value", pf_arg.maybe_quote()); } // when character constant arguments have excess characters // issue a warning when POSIXLY_CORRECT is not set fn warn_char_constant_ign(remaining_bytes: &[u8]) { match env::var("POSIXLY_CORRECT") { Ok(_) => {} Err(e) => { if let env::VarError::NotPresent = e { show_warning!( "{:?}: character(s) following character \ constant have been ignored", remaining_bytes ); } } } } // this function looks at the first few // characters of an argument and returns a value if we can learn // a value from that (e.g. no argument? return 0, char constant? ret value) fn get_provided(str_in_opt: Option<&String>) -> Option<u8> { const C_S_QUOTE: u8 = 39; const C_D_QUOTE: u8 = 34; match str_in_opt { Some(str_in) => { let mut byte_it = str_in.bytes(); if let Some(ch) = byte_it.next() { match ch { C_S_QUOTE | C_D_QUOTE => { Some(match byte_it.next() { Some(second_byte) => { let mut ignored: Vec<u8> = Vec::new(); for cont in byte_it { ignored.push(cont); } if!ignored.is_empty() { warn_char_constant_ign(&ignored); } second_byte } // no byte after quote None => { let so_far = (ch as char).to_string(); warn_expected_numeric(&so_far); 0_u8 } }) } // first byte is not quote _ => None, // no first byte } } else { Some(0_u8) } } None => Some(0), } } // takes a string and returns // a sign, // a base, // and an offset for index after all // initial spacing, sign, base prefix, and leading zeroes #[allow(clippy::cognitive_complexity)] fn get_initial_prefix(str_in: &str, field_type: &FieldType) -> InitialPrefix { let mut str_it = str_in.chars(); let mut ret = InitialPrefix { radix_in: Base::Ten, sign: 1, offset: 0, }; let mut top_char = str_it.next(); // skip spaces and ensure top_char is the first non-space char // (or None if none exists) while let Some(' ') = top_char { ret.offset += 1; top_char = str_it.next(); } // parse sign match top_char { Some('+') => { ret.offset += 1; top_char = str_it.next(); } Some('-') => { ret.sign = -1; ret.offset += 1; top_char = str_it.next(); } _ => {} } // we want to exit with offset being // the index of the first non-zero // digit before the decimal point or // if there is none, the zero before the // decimal point, or, if there is none, // the decimal point. // while we are determining the offset // we will ensure as a convention // the offset is always on the first character // that we are yet unsure if it is the // final offset. If the zero could be before // a decimal point we don't move past the zero. let mut is_hex = false; if Some('0') == top_char { if let Some(base) = str_it.next() { // lead zeroes can only exist in // octal and hex base let mut do_clean_lead_zeroes = false; match base { 'x' | 'X' => { is_hex = true; ret.offset += 2; ret.radix_in = Base::Hex; do_clean_lead_zeroes = true; } e @ '0'..='9' => { ret.offset += 1; if let FieldType::Intf = *field_type { ret.radix_in = Base::Octal; } if e == '0' { do_clean_lead_zeroes = true; } } _ => {} } if do_clean_lead_zeroes { let mut first = true; for ch_zero in str_it { // see notes on offset above: // this is why the offset for octal and decimal numbers // that reach this branch is 1 even though // they have already eaten the characters '00' // this is also why when hex encounters its // first zero it does not move its offset // forward because it does not know for sure // that it's current offset (of that zero) // is not the final offset, // whereas at that point octal knows its // current offset is not the final offset. match ch_zero { '0' => { if!(is_hex && first) { ret.offset += 1; } } // if decimal, keep last zero if one exists // (it's possible for last zero to // not exist at this branch if we're in hex input) '.' => break, // other digit, etc. _ => { if!(is_hex && first) { ret.offset += 1; } break; } } if first { first = false; } } } } } ret } // this is the function a Sub's print will delegate to // if it is a numeric field, passing the field details // and an iterator to the argument pub fn
(field: &FormatField, in_str_opt: Option<&String>) -> Option<String> { let field_char = field.field_char; // num format mainly operates by further delegating to one of // several Formatter structs depending on the field // see formatter.rs for more details // to do switch to static dispatch let formatter: Box<dyn Formatter> = match *field.field_type { FieldType::Intf => Box::new(Intf::new()), FieldType::Floatf => Box::new(Floatf::new()), FieldType::CninetyNineHexFloatf => Box::new(CninetyNineHexFloatf::new()), FieldType::Scif => Box::new(Scif::new()), FieldType::Decf => Box::new(Decf::new()), _ => { panic!("asked to do num format with non-num field type"); } }; let prim_opt= // if we can get an assumed value from looking at the first // few characters, use that value to create the FormatPrimitive if let Some(provided_num) = get_provided(in_str_opt) { let mut tmp = FormatPrimitive::default(); match field_char { 'u' | 'i' | 'd' => { tmp.pre_decimal = Some( format!("{provided_num}")); }, 'x' | 'X' => { tmp.pre_decimal = Some( format!("{provided_num:x}")); }, 'o' => { tmp.pre_decimal = Some( format!("{provided_num:o}")); }, 'e' | 'E' | 'g' | 'G' => { let as_str = format!("{provided_num}"); let initial_prefix = get_initial_prefix( &as_str, field.field_type ); tmp=formatter.get_primitive(field, &initial_prefix, &as_str) .expect("err during default provided num"); }, _ => { tmp.pre_decimal = Some( format!("{provided_num}")); tmp.post_decimal = Some(String::from("0")); } } Some(tmp) } else { // otherwise we'll interpret the argument as a number // using the appropriate Formatter let in_str = in_str_opt.expect( "please send the devs this message: \n get_provided is failing to ret as Some(0) on no str "); // first get information about the beginning of the // numeric argument that would be useful for // any formatter (int or float) let initial_prefix = get_initial_prefix( in_str, field.field_type ); // then get the FormatPrimitive from the Formatter formatter.get_primitive(field, &initial_prefix, in_str) }; // if we have a formatPrimitive, print its results // according to the field-char appropriate Formatter prim_opt.map(|prim| formatter.primitive_to_str(&prim, field.clone())) }
num_format
identifier_name
num_format.rs
// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (vars) charf cninetyninehexfloatf decf floatf intf scif strf Cninety //! handles creating printed output for numeric substitutions // spell-checker:ignore (vars) charf decf floatf intf scif strf Cninety use std::env; use std::vec::Vec; use crate::display::Quotable; use crate::{show_error, show_warning}; use super::format_field::{FieldType, FormatField}; use super::formatter::{Base, FormatPrimitive, Formatter, InitialPrefix}; use super::formatters::cninetyninehexfloatf::CninetyNineHexFloatf; use super::formatters::decf::Decf; use super::formatters::floatf::Floatf; use super::formatters::intf::Intf; use super::formatters::scif::Scif; pub fn warn_expected_numeric(pf_arg: &str)
// when character constant arguments have excess characters // issue a warning when POSIXLY_CORRECT is not set fn warn_char_constant_ign(remaining_bytes: &[u8]) { match env::var("POSIXLY_CORRECT") { Ok(_) => {} Err(e) => { if let env::VarError::NotPresent = e { show_warning!( "{:?}: character(s) following character \ constant have been ignored", remaining_bytes ); } } } } // this function looks at the first few // characters of an argument and returns a value if we can learn // a value from that (e.g. no argument? return 0, char constant? ret value) fn get_provided(str_in_opt: Option<&String>) -> Option<u8> { const C_S_QUOTE: u8 = 39; const C_D_QUOTE: u8 = 34; match str_in_opt { Some(str_in) => { let mut byte_it = str_in.bytes(); if let Some(ch) = byte_it.next() { match ch { C_S_QUOTE | C_D_QUOTE => { Some(match byte_it.next() { Some(second_byte) => { let mut ignored: Vec<u8> = Vec::new(); for cont in byte_it { ignored.push(cont); } if!ignored.is_empty() { warn_char_constant_ign(&ignored); } second_byte } // no byte after quote None => { let so_far = (ch as char).to_string(); warn_expected_numeric(&so_far); 0_u8 } }) } // first byte is not quote _ => None, // no first byte } } else { Some(0_u8) } } None => Some(0), } } // takes a string and returns // a sign, // a base, // and an offset for index after all // initial spacing, sign, base prefix, and leading zeroes #[allow(clippy::cognitive_complexity)] fn get_initial_prefix(str_in: &str, field_type: &FieldType) -> InitialPrefix { let mut str_it = str_in.chars(); let mut ret = InitialPrefix { radix_in: Base::Ten, sign: 1, offset: 0, }; let mut top_char = str_it.next(); // skip spaces and ensure top_char is the first non-space char // (or None if none exists) while let Some(' ') = top_char { ret.offset += 1; top_char = str_it.next(); } // parse sign match top_char { Some('+') => { ret.offset += 1; top_char = str_it.next(); } Some('-') => { ret.sign = -1; ret.offset += 1; top_char = str_it.next(); } _ => {} } // we want to exit with offset being // the index of the first non-zero // digit before the decimal point or // if there is none, the zero before the // decimal point, or, if there is none, // the decimal point. // while we are determining the offset // we will ensure as a convention // the offset is always on the first character // that we are yet unsure if it is the // final offset. If the zero could be before // a decimal point we don't move past the zero. let mut is_hex = false; if Some('0') == top_char { if let Some(base) = str_it.next() { // lead zeroes can only exist in // octal and hex base let mut do_clean_lead_zeroes = false; match base { 'x' | 'X' => { is_hex = true; ret.offset += 2; ret.radix_in = Base::Hex; do_clean_lead_zeroes = true; } e @ '0'..='9' => { ret.offset += 1; if let FieldType::Intf = *field_type { ret.radix_in = Base::Octal; } if e == '0' { do_clean_lead_zeroes = true; } } _ => {} } if do_clean_lead_zeroes { let mut first = true; for ch_zero in str_it { // see notes on offset above: // this is why the offset for octal and decimal numbers // that reach this branch is 1 even though // they have already eaten the characters '00' // this is also why when hex encounters its // first zero it does not move its offset // forward because it does not know for sure // that it's current offset (of that zero) // is not the final offset, // whereas at that point octal knows its // current offset is not the final offset. match ch_zero { '0' => { if!(is_hex && first) { ret.offset += 1; } } // if decimal, keep last zero if one exists // (it's possible for last zero to // not exist at this branch if we're in hex input) '.' => break, // other digit, etc. _ => { if!(is_hex && first) { ret.offset += 1; } break; } } if first { first = false; } } } } } ret } // this is the function a Sub's print will delegate to // if it is a numeric field, passing the field details // and an iterator to the argument pub fn num_format(field: &FormatField, in_str_opt: Option<&String>) -> Option<String> { let field_char = field.field_char; // num format mainly operates by further delegating to one of // several Formatter structs depending on the field // see formatter.rs for more details // to do switch to static dispatch let formatter: Box<dyn Formatter> = match *field.field_type { FieldType::Intf => Box::new(Intf::new()), FieldType::Floatf => Box::new(Floatf::new()), FieldType::CninetyNineHexFloatf => Box::new(CninetyNineHexFloatf::new()), FieldType::Scif => Box::new(Scif::new()), FieldType::Decf => Box::new(Decf::new()), _ => { panic!("asked to do num format with non-num field type"); } }; let prim_opt= // if we can get an assumed value from looking at the first // few characters, use that value to create the FormatPrimitive if let Some(provided_num) = get_provided(in_str_opt) { let mut tmp = FormatPrimitive::default(); match field_char { 'u' | 'i' | 'd' => { tmp.pre_decimal = Some( format!("{provided_num}")); }, 'x' | 'X' => { tmp.pre_decimal = Some( format!("{provided_num:x}")); }, 'o' => { tmp.pre_decimal = Some( format!("{provided_num:o}")); }, 'e' | 'E' | 'g' | 'G' => { let as_str = format!("{provided_num}"); let initial_prefix = get_initial_prefix( &as_str, field.field_type ); tmp=formatter.get_primitive(field, &initial_prefix, &as_str) .expect("err during default provided num"); }, _ => { tmp.pre_decimal = Some( format!("{provided_num}")); tmp.post_decimal = Some(String::from("0")); } } Some(tmp) } else { // otherwise we'll interpret the argument as a number // using the appropriate Formatter let in_str = in_str_opt.expect( "please send the devs this message: \n get_provided is failing to ret as Some(0) on no str "); // first get information about the beginning of the // numeric argument that would be useful for // any formatter (int or float) let initial_prefix = get_initial_prefix( in_str, field.field_type ); // then get the FormatPrimitive from the Formatter formatter.get_primitive(field, &initial_prefix, in_str) }; // if we have a formatPrimitive, print its results // according to the field-char appropriate Formatter prim_opt.map(|prim| formatter.primitive_to_str(&prim, field.clone())) }
{ // important: keep println here not print show_error!("{}: expected a numeric value", pf_arg.maybe_quote()); }
identifier_body
num_format.rs
// This file is part of the uutils coreutils package. // // For the full copyright and license information, please view the LICENSE // file that was distributed with this source code. // spell-checker:ignore (vars) charf cninetyninehexfloatf decf floatf intf scif strf Cninety //! handles creating printed output for numeric substitutions // spell-checker:ignore (vars) charf decf floatf intf scif strf Cninety use std::env; use std::vec::Vec; use crate::display::Quotable; use crate::{show_error, show_warning}; use super::format_field::{FieldType, FormatField}; use super::formatter::{Base, FormatPrimitive, Formatter, InitialPrefix}; use super::formatters::cninetyninehexfloatf::CninetyNineHexFloatf; use super::formatters::decf::Decf; use super::formatters::floatf::Floatf; use super::formatters::intf::Intf; use super::formatters::scif::Scif; pub fn warn_expected_numeric(pf_arg: &str) { // important: keep println here not print show_error!("{}: expected a numeric value", pf_arg.maybe_quote()); } // when character constant arguments have excess characters // issue a warning when POSIXLY_CORRECT is not set fn warn_char_constant_ign(remaining_bytes: &[u8]) { match env::var("POSIXLY_CORRECT") { Ok(_) => {} Err(e) => { if let env::VarError::NotPresent = e { show_warning!( "{:?}: character(s) following character \ constant have been ignored", remaining_bytes ); } } } } // this function looks at the first few // characters of an argument and returns a value if we can learn // a value from that (e.g. no argument? return 0, char constant? ret value) fn get_provided(str_in_opt: Option<&String>) -> Option<u8> { const C_S_QUOTE: u8 = 39; const C_D_QUOTE: u8 = 34; match str_in_opt { Some(str_in) => { let mut byte_it = str_in.bytes(); if let Some(ch) = byte_it.next() { match ch { C_S_QUOTE | C_D_QUOTE => { Some(match byte_it.next() { Some(second_byte) => { let mut ignored: Vec<u8> = Vec::new(); for cont in byte_it { ignored.push(cont); } if!ignored.is_empty() { warn_char_constant_ign(&ignored); } second_byte } // no byte after quote None => { let so_far = (ch as char).to_string(); warn_expected_numeric(&so_far); 0_u8 } }) } // first byte is not quote _ => None, // no first byte } } else { Some(0_u8) } } None => Some(0), } } // takes a string and returns // a sign, // a base, // and an offset for index after all // initial spacing, sign, base prefix, and leading zeroes #[allow(clippy::cognitive_complexity)] fn get_initial_prefix(str_in: &str, field_type: &FieldType) -> InitialPrefix { let mut str_it = str_in.chars(); let mut ret = InitialPrefix { radix_in: Base::Ten, sign: 1, offset: 0, }; let mut top_char = str_it.next(); // skip spaces and ensure top_char is the first non-space char // (or None if none exists) while let Some(' ') = top_char { ret.offset += 1; top_char = str_it.next(); } // parse sign match top_char { Some('+') => { ret.offset += 1; top_char = str_it.next(); } Some('-') => { ret.sign = -1;
ret.offset += 1; top_char = str_it.next(); } _ => {} } // we want to exit with offset being // the index of the first non-zero // digit before the decimal point or // if there is none, the zero before the // decimal point, or, if there is none, // the decimal point. // while we are determining the offset // we will ensure as a convention // the offset is always on the first character // that we are yet unsure if it is the // final offset. If the zero could be before // a decimal point we don't move past the zero. let mut is_hex = false; if Some('0') == top_char { if let Some(base) = str_it.next() { // lead zeroes can only exist in // octal and hex base let mut do_clean_lead_zeroes = false; match base { 'x' | 'X' => { is_hex = true; ret.offset += 2; ret.radix_in = Base::Hex; do_clean_lead_zeroes = true; } e @ '0'..='9' => { ret.offset += 1; if let FieldType::Intf = *field_type { ret.radix_in = Base::Octal; } if e == '0' { do_clean_lead_zeroes = true; } } _ => {} } if do_clean_lead_zeroes { let mut first = true; for ch_zero in str_it { // see notes on offset above: // this is why the offset for octal and decimal numbers // that reach this branch is 1 even though // they have already eaten the characters '00' // this is also why when hex encounters its // first zero it does not move its offset // forward because it does not know for sure // that it's current offset (of that zero) // is not the final offset, // whereas at that point octal knows its // current offset is not the final offset. match ch_zero { '0' => { if!(is_hex && first) { ret.offset += 1; } } // if decimal, keep last zero if one exists // (it's possible for last zero to // not exist at this branch if we're in hex input) '.' => break, // other digit, etc. _ => { if!(is_hex && first) { ret.offset += 1; } break; } } if first { first = false; } } } } } ret } // this is the function a Sub's print will delegate to // if it is a numeric field, passing the field details // and an iterator to the argument pub fn num_format(field: &FormatField, in_str_opt: Option<&String>) -> Option<String> { let field_char = field.field_char; // num format mainly operates by further delegating to one of // several Formatter structs depending on the field // see formatter.rs for more details // to do switch to static dispatch let formatter: Box<dyn Formatter> = match *field.field_type { FieldType::Intf => Box::new(Intf::new()), FieldType::Floatf => Box::new(Floatf::new()), FieldType::CninetyNineHexFloatf => Box::new(CninetyNineHexFloatf::new()), FieldType::Scif => Box::new(Scif::new()), FieldType::Decf => Box::new(Decf::new()), _ => { panic!("asked to do num format with non-num field type"); } }; let prim_opt= // if we can get an assumed value from looking at the first // few characters, use that value to create the FormatPrimitive if let Some(provided_num) = get_provided(in_str_opt) { let mut tmp = FormatPrimitive::default(); match field_char { 'u' | 'i' | 'd' => { tmp.pre_decimal = Some( format!("{provided_num}")); }, 'x' | 'X' => { tmp.pre_decimal = Some( format!("{provided_num:x}")); }, 'o' => { tmp.pre_decimal = Some( format!("{provided_num:o}")); }, 'e' | 'E' | 'g' | 'G' => { let as_str = format!("{provided_num}"); let initial_prefix = get_initial_prefix( &as_str, field.field_type ); tmp=formatter.get_primitive(field, &initial_prefix, &as_str) .expect("err during default provided num"); }, _ => { tmp.pre_decimal = Some( format!("{provided_num}")); tmp.post_decimal = Some(String::from("0")); } } Some(tmp) } else { // otherwise we'll interpret the argument as a number // using the appropriate Formatter let in_str = in_str_opt.expect( "please send the devs this message: \n get_provided is failing to ret as Some(0) on no str "); // first get information about the beginning of the // numeric argument that would be useful for // any formatter (int or float) let initial_prefix = get_initial_prefix( in_str, field.field_type ); // then get the FormatPrimitive from the Formatter formatter.get_primitive(field, &initial_prefix, in_str) }; // if we have a formatPrimitive, print its results // according to the field-char appropriate Formatter prim_opt.map(|prim| formatter.primitive_to_str(&prim, field.clone())) }
random_line_split
publish.rs
//! //! The Zargo package manager `publish` subcommand. //! use std::convert::TryFrom; use std::path::PathBuf; use std::str::FromStr; use colored::Colorize; use structopt::StructOpt; use zksync::web3::types::H256; use zksync_eth_signer::PrivateKeySigner; use zksync_types::tx::PackedEthSignature; use crate::error::Error; use crate::executable::compiler::Compiler; use crate::executable::virtual_machine::VirtualMachine; use crate::http::downloader::Downloader; use crate::http::Client as HttpClient; use crate::network::Network; use crate::project::data::input::Input as InputFile; use crate::project::data::private_key::PrivateKey as PrivateKeyFile; use crate::project::data::verifying_key::VerifyingKey as VerifyingKeyFile; use crate::project::data::Directory as DataDirectory; use crate::project::src::Directory as SourceDirectory; use crate::project::target::bytecode::Bytecode as BytecodeFile; use crate::project::target::deps::Directory as TargetDependenciesDirectory; use crate::project::target::Directory as TargetDirectory; /// /// The Zargo package manager `publish` subcommand. /// #[derive(Debug, StructOpt)] #[structopt(about = "Uploads the smart contract to the specified network")] pub struct Command { /// Prints more logs, if passed several times. #[structopt(short = "v", long = "verbose", parse(from_occurrences))] pub verbosity: usize, /// Suppresses output, if set. #[structopt(short = "q", long = "quiet")] pub quiet: bool, /// The path to the Zinc project manifest file. #[structopt( long = "manifest-path", parse(from_os_str), default_value = "./Zargo.toml" )] pub manifest_path: PathBuf, /// Sets the contract instance name. #[structopt(long = "instance")] pub instance: String, /// Sets the network name, where the contract must be published to. #[structopt(long = "network", default_value = "localhost")] pub network: String, /// Sets the change-pubkey fee token. #[structopt(long = "change-pubkey-fee-token", default_value = "ETH")] pub change_pubkey_fee_token: String, } /// /// The publish data. Used for testing purposes. /// pub struct Data { /// The address of the published contract instance. pub address: zksync_types::Address, /// The account ID of the published contract instance. pub account_id: zksync_types::AccountId, } impl Data { /// /// A shortcut constructor. /// pub fn new(address: zksync_types::Address, account_id: zksync_types::AccountId) -> Self { Self { address, account_id, } } } impl Command { /// /// A shortcut constructor. /// pub fn new( verbosity: usize, quiet: bool, manifest_path: PathBuf, instance: String, network: Option<String>, change_pubkey_fee_token: Option<String>, ) -> Self { Self { verbosity, quiet, manifest_path, instance, network: network .unwrap_or_else(|| Network::from(zksync::Network::Localhost).to_string()), change_pubkey_fee_token: change_pubkey_fee_token.unwrap_or_else(|| "ETH".to_owned()), } } /// /// Executes the command. /// pub async fn execute(self) -> anyhow::Result<Data> { let network = zksync::Network::from_str(self.network.as_str()) .map(Network::from) .map_err(Error::NetworkInvalid)?; let url = network .try_into_url() .map_err(Error::NetworkUnimplemented)?; let http_client = HttpClient::new(url); let manifest = zinc_project::Manifest::try_from(&self.manifest_path)?; match manifest.project.r#type { zinc_project::ProjectType::Contract => {} _ => anyhow::bail!(Error::NotAContract), } let mut manifest_path = self.manifest_path; if manifest_path.is_file() { manifest_path.pop(); } if let zinc_project::ProjectType::Contract = manifest.project.r#type { if!PrivateKeyFile::exists_at(&manifest_path) { PrivateKeyFile::default().write_to(&manifest_path)?; } } let source_directory_path = SourceDirectory::path(&manifest_path); let source = zinc_project::Source::try_from_path(&source_directory_path, &manifest_path, true)?; let project = zinc_project::Project::new(manifest.clone(), source); DataDirectory::create(&manifest_path)?; let data_directory_path = DataDirectory::path(&manifest_path); let mut input_path = data_directory_path.clone(); input_path.push(format!( "{}.{}", zinc_const::file_name::INPUT, zinc_const::extension::JSON, )); let mut proving_key_path = data_directory_path.clone(); proving_key_path.push(zinc_const::file_name::PROVING_KEY); let mut verifying_key_path = data_directory_path.clone(); verifying_key_path.push(zinc_const::file_name::VERIFYING_KEY.to_owned()); TargetDirectory::create(&manifest_path, true)?; let target_directory_path = TargetDirectory::path(&manifest_path, true); let mut binary_path = target_directory_path; binary_path.push(format!( "{}.{}", zinc_const::file_name::BINARY, zinc_const::extension::BINARY )); TargetDependenciesDirectory::create(&manifest_path)?; if let Some(dependencies) = manifest.dependencies { let network = zksync::Network::from_str(self.network.as_str()) .map(Network::from) .map_err(Error::NetworkInvalid)?; let url = network .try_into_url() .map_err(Error::NetworkUnimplemented)?; let http_client = HttpClient::new(url); let mut downloader = Downloader::new(&http_client, &manifest_path); downloader.download_dependency_list(dependencies).await?; } Compiler::build_release( self.verbosity, self.quiet, manifest.project.name.as_str(), &manifest.project.version, &manifest_path, false, )?; let bytecode = BytecodeFile::try_from_path(&binary_path, true)?; let input = InputFile::try_from_path(&input_path)?; let arguments = input .inner .as_object() .ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))? .get("arguments") .cloned() .ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))? .as_object() .ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))? .get(zinc_const::contract::CONSTRUCTOR_IDENTIFIER) .cloned() .ok_or_else(|| { Error::MissingInputSection(zinc_const::contract::CONSTRUCTOR_IDENTIFIER.to_owned()) })?; if!verifying_key_path.exists()
let verifying_key = VerifyingKeyFile::try_from(&verifying_key_path)?; if!self.quiet { eprintln!( " {} the instance `{}` of `{} v{}` to network `{}`", "Uploading".bright_green(), self.instance, manifest.project.name, manifest.project.version, network, ); } let response = http_client .publish( zinc_types::PublishRequestQuery::new( manifest.project.name, manifest.project.version, self.instance, self.change_pubkey_fee_token.clone(), ), zinc_types::PublishRequestBody::new( project, bytecode.inner, arguments, verifying_key.inner, ), ) .await?; if!self.quiet { eprintln!( " {} {}", "Address".bright_green(), serde_json::to_string(&response.address) .expect(zinc_const::panic::DATA_CONVERSION) .replace("\"", "") ); } let private_key = PrivateKeyFile::try_from(&manifest_path)?; let signer_private_key: H256 = private_key.inner.parse()?; let signer_address = PackedEthSignature::address_from_private_key(&signer_private_key)?; let wallet_credentials = zksync::WalletCredentials::from_eth_signer( signer_address, PrivateKeySigner::new(signer_private_key), network.into(), ) .await .expect(zinc_const::panic::DATA_CONVERSION); let wallet = zksync::Wallet::new(zksync::RpcProvider::new(network.into()), wallet_credentials) .await?; let initial_transfer = crate::transaction::new_initial( &wallet, response.address, self.change_pubkey_fee_token, response.change_pubkey_fee, ) .await?; let address = response.address; let response = http_client .initialize( zinc_types::InitializeRequestQuery::new(response.address), zinc_types::InitializeRequestBody::new(initial_transfer), ) .await?; if!self.quiet { eprintln!(" {} {}", "Account ID".bright_green(), response.account_id); } Ok(Data::new(address, response.account_id)) } }
{ VirtualMachine::setup_contract( self.verbosity, self.quiet, &binary_path, zinc_const::contract::CONSTRUCTOR_IDENTIFIER, &proving_key_path, &verifying_key_path, )?; }
conditional_block
publish.rs
//! //! The Zargo package manager `publish` subcommand. //! use std::convert::TryFrom; use std::path::PathBuf; use std::str::FromStr; use colored::Colorize; use structopt::StructOpt; use zksync::web3::types::H256; use zksync_eth_signer::PrivateKeySigner; use zksync_types::tx::PackedEthSignature; use crate::error::Error; use crate::executable::compiler::Compiler; use crate::executable::virtual_machine::VirtualMachine; use crate::http::downloader::Downloader; use crate::http::Client as HttpClient; use crate::network::Network; use crate::project::data::input::Input as InputFile; use crate::project::data::private_key::PrivateKey as PrivateKeyFile; use crate::project::data::verifying_key::VerifyingKey as VerifyingKeyFile; use crate::project::data::Directory as DataDirectory; use crate::project::src::Directory as SourceDirectory; use crate::project::target::bytecode::Bytecode as BytecodeFile; use crate::project::target::deps::Directory as TargetDependenciesDirectory; use crate::project::target::Directory as TargetDirectory; /// /// The Zargo package manager `publish` subcommand. /// #[derive(Debug, StructOpt)] #[structopt(about = "Uploads the smart contract to the specified network")] pub struct Command { /// Prints more logs, if passed several times. #[structopt(short = "v", long = "verbose", parse(from_occurrences))] pub verbosity: usize, /// Suppresses output, if set. #[structopt(short = "q", long = "quiet")] pub quiet: bool, /// The path to the Zinc project manifest file. #[structopt( long = "manifest-path", parse(from_os_str), default_value = "./Zargo.toml" )] pub manifest_path: PathBuf, /// Sets the contract instance name. #[structopt(long = "instance")] pub instance: String, /// Sets the network name, where the contract must be published to. #[structopt(long = "network", default_value = "localhost")] pub network: String, /// Sets the change-pubkey fee token. #[structopt(long = "change-pubkey-fee-token", default_value = "ETH")] pub change_pubkey_fee_token: String, } /// /// The publish data. Used for testing purposes. /// pub struct Data { /// The address of the published contract instance. pub address: zksync_types::Address, /// The account ID of the published contract instance. pub account_id: zksync_types::AccountId, } impl Data { /// /// A shortcut constructor. /// pub fn new(address: zksync_types::Address, account_id: zksync_types::AccountId) -> Self { Self { address, account_id, } } } impl Command { /// /// A shortcut constructor. /// pub fn new( verbosity: usize, quiet: bool, manifest_path: PathBuf, instance: String, network: Option<String>, change_pubkey_fee_token: Option<String>, ) -> Self { Self { verbosity, quiet, manifest_path, instance, network: network .unwrap_or_else(|| Network::from(zksync::Network::Localhost).to_string()), change_pubkey_fee_token: change_pubkey_fee_token.unwrap_or_else(|| "ETH".to_owned()), } } /// /// Executes the command. /// pub async fn execute(self) -> anyhow::Result<Data> { let network = zksync::Network::from_str(self.network.as_str()) .map(Network::from) .map_err(Error::NetworkInvalid)?; let url = network .try_into_url() .map_err(Error::NetworkUnimplemented)?; let http_client = HttpClient::new(url); let manifest = zinc_project::Manifest::try_from(&self.manifest_path)?; match manifest.project.r#type { zinc_project::ProjectType::Contract => {} _ => anyhow::bail!(Error::NotAContract), } let mut manifest_path = self.manifest_path; if manifest_path.is_file() { manifest_path.pop(); } if let zinc_project::ProjectType::Contract = manifest.project.r#type { if!PrivateKeyFile::exists_at(&manifest_path) { PrivateKeyFile::default().write_to(&manifest_path)?; } } let source_directory_path = SourceDirectory::path(&manifest_path); let source = zinc_project::Source::try_from_path(&source_directory_path, &manifest_path, true)?; let project = zinc_project::Project::new(manifest.clone(), source); DataDirectory::create(&manifest_path)?; let data_directory_path = DataDirectory::path(&manifest_path); let mut input_path = data_directory_path.clone(); input_path.push(format!( "{}.{}", zinc_const::file_name::INPUT, zinc_const::extension::JSON, )); let mut proving_key_path = data_directory_path.clone(); proving_key_path.push(zinc_const::file_name::PROVING_KEY); let mut verifying_key_path = data_directory_path.clone(); verifying_key_path.push(zinc_const::file_name::VERIFYING_KEY.to_owned()); TargetDirectory::create(&manifest_path, true)?; let target_directory_path = TargetDirectory::path(&manifest_path, true); let mut binary_path = target_directory_path; binary_path.push(format!( "{}.{}", zinc_const::file_name::BINARY, zinc_const::extension::BINARY )); TargetDependenciesDirectory::create(&manifest_path)?; if let Some(dependencies) = manifest.dependencies { let network = zksync::Network::from_str(self.network.as_str()) .map(Network::from) .map_err(Error::NetworkInvalid)?; let url = network .try_into_url() .map_err(Error::NetworkUnimplemented)?; let http_client = HttpClient::new(url); let mut downloader = Downloader::new(&http_client, &manifest_path); downloader.download_dependency_list(dependencies).await?; } Compiler::build_release( self.verbosity, self.quiet, manifest.project.name.as_str(), &manifest.project.version, &manifest_path, false, )?; let bytecode = BytecodeFile::try_from_path(&binary_path, true)?; let input = InputFile::try_from_path(&input_path)?; let arguments = input .inner .as_object() .ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))? .get("arguments") .cloned() .ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))? .as_object() .ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))? .get(zinc_const::contract::CONSTRUCTOR_IDENTIFIER) .cloned() .ok_or_else(|| { Error::MissingInputSection(zinc_const::contract::CONSTRUCTOR_IDENTIFIER.to_owned()) })?; if!verifying_key_path.exists() { VirtualMachine::setup_contract( self.verbosity, self.quiet, &binary_path, zinc_const::contract::CONSTRUCTOR_IDENTIFIER, &proving_key_path, &verifying_key_path, )?; } let verifying_key = VerifyingKeyFile::try_from(&verifying_key_path)?; if!self.quiet { eprintln!( " {} the instance `{}` of `{} v{}` to network `{}`", "Uploading".bright_green(), self.instance, manifest.project.name, manifest.project.version, network, ); } let response = http_client .publish( zinc_types::PublishRequestQuery::new( manifest.project.name, manifest.project.version, self.instance, self.change_pubkey_fee_token.clone(), ), zinc_types::PublishRequestBody::new( project, bytecode.inner, arguments, verifying_key.inner, ), ) .await?; if!self.quiet { eprintln!( " {} {}", "Address".bright_green(), serde_json::to_string(&response.address) .expect(zinc_const::panic::DATA_CONVERSION) .replace("\"", "") ); } let private_key = PrivateKeyFile::try_from(&manifest_path)?; let signer_private_key: H256 = private_key.inner.parse()?; let signer_address = PackedEthSignature::address_from_private_key(&signer_private_key)?; let wallet_credentials = zksync::WalletCredentials::from_eth_signer( signer_address, PrivateKeySigner::new(signer_private_key), network.into(), ) .await .expect(zinc_const::panic::DATA_CONVERSION);
let initial_transfer = crate::transaction::new_initial( &wallet, response.address, self.change_pubkey_fee_token, response.change_pubkey_fee, ) .await?; let address = response.address; let response = http_client .initialize( zinc_types::InitializeRequestQuery::new(response.address), zinc_types::InitializeRequestBody::new(initial_transfer), ) .await?; if!self.quiet { eprintln!(" {} {}", "Account ID".bright_green(), response.account_id); } Ok(Data::new(address, response.account_id)) } }
let wallet = zksync::Wallet::new(zksync::RpcProvider::new(network.into()), wallet_credentials) .await?;
random_line_split
publish.rs
//! //! The Zargo package manager `publish` subcommand. //! use std::convert::TryFrom; use std::path::PathBuf; use std::str::FromStr; use colored::Colorize; use structopt::StructOpt; use zksync::web3::types::H256; use zksync_eth_signer::PrivateKeySigner; use zksync_types::tx::PackedEthSignature; use crate::error::Error; use crate::executable::compiler::Compiler; use crate::executable::virtual_machine::VirtualMachine; use crate::http::downloader::Downloader; use crate::http::Client as HttpClient; use crate::network::Network; use crate::project::data::input::Input as InputFile; use crate::project::data::private_key::PrivateKey as PrivateKeyFile; use crate::project::data::verifying_key::VerifyingKey as VerifyingKeyFile; use crate::project::data::Directory as DataDirectory; use crate::project::src::Directory as SourceDirectory; use crate::project::target::bytecode::Bytecode as BytecodeFile; use crate::project::target::deps::Directory as TargetDependenciesDirectory; use crate::project::target::Directory as TargetDirectory; /// /// The Zargo package manager `publish` subcommand. /// #[derive(Debug, StructOpt)] #[structopt(about = "Uploads the smart contract to the specified network")] pub struct Command { /// Prints more logs, if passed several times. #[structopt(short = "v", long = "verbose", parse(from_occurrences))] pub verbosity: usize, /// Suppresses output, if set. #[structopt(short = "q", long = "quiet")] pub quiet: bool, /// The path to the Zinc project manifest file. #[structopt( long = "manifest-path", parse(from_os_str), default_value = "./Zargo.toml" )] pub manifest_path: PathBuf, /// Sets the contract instance name. #[structopt(long = "instance")] pub instance: String, /// Sets the network name, where the contract must be published to. #[structopt(long = "network", default_value = "localhost")] pub network: String, /// Sets the change-pubkey fee token. #[structopt(long = "change-pubkey-fee-token", default_value = "ETH")] pub change_pubkey_fee_token: String, } /// /// The publish data. Used for testing purposes. /// pub struct Data { /// The address of the published contract instance. pub address: zksync_types::Address, /// The account ID of the published contract instance. pub account_id: zksync_types::AccountId, } impl Data { /// /// A shortcut constructor. /// pub fn new(address: zksync_types::Address, account_id: zksync_types::AccountId) -> Self
} impl Command { /// /// A shortcut constructor. /// pub fn new( verbosity: usize, quiet: bool, manifest_path: PathBuf, instance: String, network: Option<String>, change_pubkey_fee_token: Option<String>, ) -> Self { Self { verbosity, quiet, manifest_path, instance, network: network .unwrap_or_else(|| Network::from(zksync::Network::Localhost).to_string()), change_pubkey_fee_token: change_pubkey_fee_token.unwrap_or_else(|| "ETH".to_owned()), } } /// /// Executes the command. /// pub async fn execute(self) -> anyhow::Result<Data> { let network = zksync::Network::from_str(self.network.as_str()) .map(Network::from) .map_err(Error::NetworkInvalid)?; let url = network .try_into_url() .map_err(Error::NetworkUnimplemented)?; let http_client = HttpClient::new(url); let manifest = zinc_project::Manifest::try_from(&self.manifest_path)?; match manifest.project.r#type { zinc_project::ProjectType::Contract => {} _ => anyhow::bail!(Error::NotAContract), } let mut manifest_path = self.manifest_path; if manifest_path.is_file() { manifest_path.pop(); } if let zinc_project::ProjectType::Contract = manifest.project.r#type { if!PrivateKeyFile::exists_at(&manifest_path) { PrivateKeyFile::default().write_to(&manifest_path)?; } } let source_directory_path = SourceDirectory::path(&manifest_path); let source = zinc_project::Source::try_from_path(&source_directory_path, &manifest_path, true)?; let project = zinc_project::Project::new(manifest.clone(), source); DataDirectory::create(&manifest_path)?; let data_directory_path = DataDirectory::path(&manifest_path); let mut input_path = data_directory_path.clone(); input_path.push(format!( "{}.{}", zinc_const::file_name::INPUT, zinc_const::extension::JSON, )); let mut proving_key_path = data_directory_path.clone(); proving_key_path.push(zinc_const::file_name::PROVING_KEY); let mut verifying_key_path = data_directory_path.clone(); verifying_key_path.push(zinc_const::file_name::VERIFYING_KEY.to_owned()); TargetDirectory::create(&manifest_path, true)?; let target_directory_path = TargetDirectory::path(&manifest_path, true); let mut binary_path = target_directory_path; binary_path.push(format!( "{}.{}", zinc_const::file_name::BINARY, zinc_const::extension::BINARY )); TargetDependenciesDirectory::create(&manifest_path)?; if let Some(dependencies) = manifest.dependencies { let network = zksync::Network::from_str(self.network.as_str()) .map(Network::from) .map_err(Error::NetworkInvalid)?; let url = network .try_into_url() .map_err(Error::NetworkUnimplemented)?; let http_client = HttpClient::new(url); let mut downloader = Downloader::new(&http_client, &manifest_path); downloader.download_dependency_list(dependencies).await?; } Compiler::build_release( self.verbosity, self.quiet, manifest.project.name.as_str(), &manifest.project.version, &manifest_path, false, )?; let bytecode = BytecodeFile::try_from_path(&binary_path, true)?; let input = InputFile::try_from_path(&input_path)?; let arguments = input .inner .as_object() .ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))? .get("arguments") .cloned() .ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))? .as_object() .ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))? .get(zinc_const::contract::CONSTRUCTOR_IDENTIFIER) .cloned() .ok_or_else(|| { Error::MissingInputSection(zinc_const::contract::CONSTRUCTOR_IDENTIFIER.to_owned()) })?; if!verifying_key_path.exists() { VirtualMachine::setup_contract( self.verbosity, self.quiet, &binary_path, zinc_const::contract::CONSTRUCTOR_IDENTIFIER, &proving_key_path, &verifying_key_path, )?; } let verifying_key = VerifyingKeyFile::try_from(&verifying_key_path)?; if!self.quiet { eprintln!( " {} the instance `{}` of `{} v{}` to network `{}`", "Uploading".bright_green(), self.instance, manifest.project.name, manifest.project.version, network, ); } let response = http_client .publish( zinc_types::PublishRequestQuery::new( manifest.project.name, manifest.project.version, self.instance, self.change_pubkey_fee_token.clone(), ), zinc_types::PublishRequestBody::new( project, bytecode.inner, arguments, verifying_key.inner, ), ) .await?; if!self.quiet { eprintln!( " {} {}", "Address".bright_green(), serde_json::to_string(&response.address) .expect(zinc_const::panic::DATA_CONVERSION) .replace("\"", "") ); } let private_key = PrivateKeyFile::try_from(&manifest_path)?; let signer_private_key: H256 = private_key.inner.parse()?; let signer_address = PackedEthSignature::address_from_private_key(&signer_private_key)?; let wallet_credentials = zksync::WalletCredentials::from_eth_signer( signer_address, PrivateKeySigner::new(signer_private_key), network.into(), ) .await .expect(zinc_const::panic::DATA_CONVERSION); let wallet = zksync::Wallet::new(zksync::RpcProvider::new(network.into()), wallet_credentials) .await?; let initial_transfer = crate::transaction::new_initial( &wallet, response.address, self.change_pubkey_fee_token, response.change_pubkey_fee, ) .await?; let address = response.address; let response = http_client .initialize( zinc_types::InitializeRequestQuery::new(response.address), zinc_types::InitializeRequestBody::new(initial_transfer), ) .await?; if!self.quiet { eprintln!(" {} {}", "Account ID".bright_green(), response.account_id); } Ok(Data::new(address, response.account_id)) } }
{ Self { address, account_id, } }
identifier_body
publish.rs
//! //! The Zargo package manager `publish` subcommand. //! use std::convert::TryFrom; use std::path::PathBuf; use std::str::FromStr; use colored::Colorize; use structopt::StructOpt; use zksync::web3::types::H256; use zksync_eth_signer::PrivateKeySigner; use zksync_types::tx::PackedEthSignature; use crate::error::Error; use crate::executable::compiler::Compiler; use crate::executable::virtual_machine::VirtualMachine; use crate::http::downloader::Downloader; use crate::http::Client as HttpClient; use crate::network::Network; use crate::project::data::input::Input as InputFile; use crate::project::data::private_key::PrivateKey as PrivateKeyFile; use crate::project::data::verifying_key::VerifyingKey as VerifyingKeyFile; use crate::project::data::Directory as DataDirectory; use crate::project::src::Directory as SourceDirectory; use crate::project::target::bytecode::Bytecode as BytecodeFile; use crate::project::target::deps::Directory as TargetDependenciesDirectory; use crate::project::target::Directory as TargetDirectory; /// /// The Zargo package manager `publish` subcommand. /// #[derive(Debug, StructOpt)] #[structopt(about = "Uploads the smart contract to the specified network")] pub struct Command { /// Prints more logs, if passed several times. #[structopt(short = "v", long = "verbose", parse(from_occurrences))] pub verbosity: usize, /// Suppresses output, if set. #[structopt(short = "q", long = "quiet")] pub quiet: bool, /// The path to the Zinc project manifest file. #[structopt( long = "manifest-path", parse(from_os_str), default_value = "./Zargo.toml" )] pub manifest_path: PathBuf, /// Sets the contract instance name. #[structopt(long = "instance")] pub instance: String, /// Sets the network name, where the contract must be published to. #[structopt(long = "network", default_value = "localhost")] pub network: String, /// Sets the change-pubkey fee token. #[structopt(long = "change-pubkey-fee-token", default_value = "ETH")] pub change_pubkey_fee_token: String, } /// /// The publish data. Used for testing purposes. /// pub struct
{ /// The address of the published contract instance. pub address: zksync_types::Address, /// The account ID of the published contract instance. pub account_id: zksync_types::AccountId, } impl Data { /// /// A shortcut constructor. /// pub fn new(address: zksync_types::Address, account_id: zksync_types::AccountId) -> Self { Self { address, account_id, } } } impl Command { /// /// A shortcut constructor. /// pub fn new( verbosity: usize, quiet: bool, manifest_path: PathBuf, instance: String, network: Option<String>, change_pubkey_fee_token: Option<String>, ) -> Self { Self { verbosity, quiet, manifest_path, instance, network: network .unwrap_or_else(|| Network::from(zksync::Network::Localhost).to_string()), change_pubkey_fee_token: change_pubkey_fee_token.unwrap_or_else(|| "ETH".to_owned()), } } /// /// Executes the command. /// pub async fn execute(self) -> anyhow::Result<Data> { let network = zksync::Network::from_str(self.network.as_str()) .map(Network::from) .map_err(Error::NetworkInvalid)?; let url = network .try_into_url() .map_err(Error::NetworkUnimplemented)?; let http_client = HttpClient::new(url); let manifest = zinc_project::Manifest::try_from(&self.manifest_path)?; match manifest.project.r#type { zinc_project::ProjectType::Contract => {} _ => anyhow::bail!(Error::NotAContract), } let mut manifest_path = self.manifest_path; if manifest_path.is_file() { manifest_path.pop(); } if let zinc_project::ProjectType::Contract = manifest.project.r#type { if!PrivateKeyFile::exists_at(&manifest_path) { PrivateKeyFile::default().write_to(&manifest_path)?; } } let source_directory_path = SourceDirectory::path(&manifest_path); let source = zinc_project::Source::try_from_path(&source_directory_path, &manifest_path, true)?; let project = zinc_project::Project::new(manifest.clone(), source); DataDirectory::create(&manifest_path)?; let data_directory_path = DataDirectory::path(&manifest_path); let mut input_path = data_directory_path.clone(); input_path.push(format!( "{}.{}", zinc_const::file_name::INPUT, zinc_const::extension::JSON, )); let mut proving_key_path = data_directory_path.clone(); proving_key_path.push(zinc_const::file_name::PROVING_KEY); let mut verifying_key_path = data_directory_path.clone(); verifying_key_path.push(zinc_const::file_name::VERIFYING_KEY.to_owned()); TargetDirectory::create(&manifest_path, true)?; let target_directory_path = TargetDirectory::path(&manifest_path, true); let mut binary_path = target_directory_path; binary_path.push(format!( "{}.{}", zinc_const::file_name::BINARY, zinc_const::extension::BINARY )); TargetDependenciesDirectory::create(&manifest_path)?; if let Some(dependencies) = manifest.dependencies { let network = zksync::Network::from_str(self.network.as_str()) .map(Network::from) .map_err(Error::NetworkInvalid)?; let url = network .try_into_url() .map_err(Error::NetworkUnimplemented)?; let http_client = HttpClient::new(url); let mut downloader = Downloader::new(&http_client, &manifest_path); downloader.download_dependency_list(dependencies).await?; } Compiler::build_release( self.verbosity, self.quiet, manifest.project.name.as_str(), &manifest.project.version, &manifest_path, false, )?; let bytecode = BytecodeFile::try_from_path(&binary_path, true)?; let input = InputFile::try_from_path(&input_path)?; let arguments = input .inner .as_object() .ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))? .get("arguments") .cloned() .ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))? .as_object() .ok_or_else(|| Error::MissingInputSection("arguments".to_owned()))? .get(zinc_const::contract::CONSTRUCTOR_IDENTIFIER) .cloned() .ok_or_else(|| { Error::MissingInputSection(zinc_const::contract::CONSTRUCTOR_IDENTIFIER.to_owned()) })?; if!verifying_key_path.exists() { VirtualMachine::setup_contract( self.verbosity, self.quiet, &binary_path, zinc_const::contract::CONSTRUCTOR_IDENTIFIER, &proving_key_path, &verifying_key_path, )?; } let verifying_key = VerifyingKeyFile::try_from(&verifying_key_path)?; if!self.quiet { eprintln!( " {} the instance `{}` of `{} v{}` to network `{}`", "Uploading".bright_green(), self.instance, manifest.project.name, manifest.project.version, network, ); } let response = http_client .publish( zinc_types::PublishRequestQuery::new( manifest.project.name, manifest.project.version, self.instance, self.change_pubkey_fee_token.clone(), ), zinc_types::PublishRequestBody::new( project, bytecode.inner, arguments, verifying_key.inner, ), ) .await?; if!self.quiet { eprintln!( " {} {}", "Address".bright_green(), serde_json::to_string(&response.address) .expect(zinc_const::panic::DATA_CONVERSION) .replace("\"", "") ); } let private_key = PrivateKeyFile::try_from(&manifest_path)?; let signer_private_key: H256 = private_key.inner.parse()?; let signer_address = PackedEthSignature::address_from_private_key(&signer_private_key)?; let wallet_credentials = zksync::WalletCredentials::from_eth_signer( signer_address, PrivateKeySigner::new(signer_private_key), network.into(), ) .await .expect(zinc_const::panic::DATA_CONVERSION); let wallet = zksync::Wallet::new(zksync::RpcProvider::new(network.into()), wallet_credentials) .await?; let initial_transfer = crate::transaction::new_initial( &wallet, response.address, self.change_pubkey_fee_token, response.change_pubkey_fee, ) .await?; let address = response.address; let response = http_client .initialize( zinc_types::InitializeRequestQuery::new(response.address), zinc_types::InitializeRequestBody::new(initial_transfer), ) .await?; if!self.quiet { eprintln!(" {} {}", "Account ID".bright_green(), response.account_id); } Ok(Data::new(address, response.account_id)) } }
Data
identifier_name
manager.rs
use std::rc::Rc; use std::cell::RefCell; use std::path::{Path,PathBuf}; use std::fs; use std::collections::HashMap; use std::io::Write; use Realm; use Result; use Systemd; use RealmSymlinks; use NetworkConfig; use util::*; const REALMS_BASE_PATH: &str = "/realms"; pub struct RealmManager { /// Map from realm name -> realm realm_map: HashMap<String, Realm>, /// Sorted for 'list' realm_list: Vec<Realm>, /// track status of 'current' and 'default' symlinks symlinks: Rc<RefCell<RealmSymlinks>>, /// finds free ip addresses to use network: Rc<RefCell<NetworkConfig>>, /// interface to systemd systemd: Systemd, } impl RealmManager { fn new() -> Result<RealmManager>
fn create_network_config() -> Result<Rc<RefCell<NetworkConfig>>> { let mut network = NetworkConfig::new(); network.add_bridge("clear", "172.17.0.0/24")?; Ok(Rc::new(RefCell::new(network))) } pub fn load() -> Result<RealmManager> { let mut manager = RealmManager::new()?; manager.symlinks.borrow_mut().load_symlinks()?; if! PathBuf::from(REALMS_BASE_PATH).exists() { bail!("realms base directory {} does not exist", REALMS_BASE_PATH); } for dent in fs::read_dir(REALMS_BASE_PATH)? { let path = dent?.path(); manager.process_realm_path(&path) .map_err(|e| format_err!("error processing entry {} in realm base dir: {}", path.display(), e))?; } manager.realm_list.sort_unstable(); Ok(manager) } /// /// Process `path` as an entry from the base realms directory and /// if `path` is a directory, and directory name has prefix "realm-" /// extract chars after prefix as realm name and add a new `Realm` /// instance /// fn process_realm_path(&mut self, path: &Path) -> Result<()> { let meta = path.symlink_metadata()?; if!meta.is_dir() { return Ok(()) } let fname = path_filename(path); if!fname.starts_with("realm-") { return Ok(()) } let (_, realm_name) = fname.split_at(6); if!is_valid_realm_name(realm_name) { warn!("ignoring directory in realm storage which has invalid realm name: {}", realm_name); return Ok(()) } let rootfs = path.join("rootfs"); if!rootfs.exists() { warn!("realm directory {} does not have a rootfs, ignoring", path.display()); return Ok(()) } match Realm::new(realm_name, self.symlinks.clone(), self.network.clone()) { Ok(realm) => { self.add_realm_entry(realm);}, Err(e) => warn!("Ignoring '{}': {}", realm_name, e), }; Ok(()) } fn add_realm_entry(&mut self, realm: Realm) -> &Realm { self.realm_map.insert(realm.name().to_owned(), realm.clone()); self.realm_list.push(realm.clone()); self.realm_map.get(realm.name()).expect("cannot find realm we just added to map") } fn remove_realm_entry(&mut self, name: &str) -> Result<()> { self.realm_map.remove(name); let list = self.realm_list.clone(); let mut have_default = false; self.realm_list.clear(); for realm in list { if realm.name()!= name { if realm.is_default() { have_default = true; } self.realm_list.push(realm); } } if!have_default &&!self.realm_list.is_empty() { self.symlinks.borrow_mut().set_default_symlink(self.realm_list[0].name())?; } Ok(()) } pub fn current_realm_name(&self) -> Option<String> { self.symlinks.borrow().current() } pub fn default_realm_name(&self) -> Option<String> { self.symlinks.borrow().default() } /// /// Execute shell in a realm. If `realm_name` is `None` then exec /// shell in current realm, otherwise look up realm by name. /// /// If `root_shell` is true, open a root shell, otherwise open /// a user (uid = 1000) shell. /// pub fn launch_shell(&self, realm_name: Option<&str>, root_shell: bool) -> Result<()> { let run_shell = |realm: &Realm| { info!("opening shell in realm '{}'", realm.name()); realm.exec_shell(root_shell)?; info!("exiting shell in realm '{}'", realm.name()); Ok(()) }; if let Some(name) = realm_name { self.with_named_realm(name, true, run_shell) } else { self.with_current_realm(run_shell) } } pub fn launch_terminal(&self, name: Option<&str>) -> Result<()> { let run_terminal = |realm: &Realm| { info!("opening terminal in realm '{}'", realm.name()); let title_arg = format!("Realm: {}", realm.name()); realm.run(&["/usr/bin/gnome-terminal".to_owned(), "--title".to_owned(), title_arg], true) }; if let Some(name) = name { self.with_named_realm(name, true, run_terminal) } else { self.with_current_realm(run_terminal) } } pub fn run_in_realm(&self, realm_name: Option<&str>, args: &[String], use_launcher: bool) -> Result<()> { if let Some(name) = realm_name { self.with_named_realm(name, true, |realm| realm.run(args, use_launcher)) } else { self.with_current_realm(|realm| realm.run(args, use_launcher)) } } fn with_current_realm<F: Fn(&Realm)->Result<()>>(&self, f: F) -> Result<()> { match self.symlinks.borrow().current() { Some(ref name) => { self.with_named_realm(name, false, f)?; }, None => { warn!("No current realm instance to run command in"); } } Ok(()) } fn with_named_realm<F: Fn(&Realm)->Result<()>>(&self, name: &str, want_start: bool, f: F) -> Result<()> { match self.realm(name) { Some(realm) => { if want_start &&!realm.is_running()? { info!("realm '{}' is not running, starting it.", realm.name()); self.start_realm(realm)?; } f(realm) }, None => bail!("no realm with name '{}' exists", name), } } pub fn list(&self) -> Result<()> { let mut out = ColoredOutput::new(); self.print_realm_header(&mut out); for realm in &self.realm_list { self.print_realm(realm, &mut out)?; } Ok(()) } fn print_realm_header(&self, out: &mut ColoredOutput) { out.write(" REALMS ").bold("bold").write(": current, ").bright("colored") .write(": running, (default) starts on boot\n").write(" ------\n\n"); } fn print_realm(&self, realm: &Realm, out: &mut ColoredOutput) -> Result<()> { let name = format!("{:12}", realm.name()); if realm.is_current() { out.write(" > ").bold(&name); } else if realm.is_running()? { out.write(" ").bright(&name); } else { out.write(" ").dim(&name); } if realm.is_default() { out.write(" (default)"); } out.write("\n"); Ok(()) } pub fn start_default(&mut self) -> Result<()> { let default = self.symlinks.borrow().default(); if let Some(ref realm_name) = default { self.start_named_realm(realm_name)?; return Ok(()); } bail!("No default realm to start"); } pub fn start_named_realm(&mut self, realm_name: &str) -> Result<()> { info!("starting realm '{}'", realm_name); self.with_named_realm(realm_name, false, |realm| self.start_realm(realm)) } fn start_realm(&self, realm: &Realm) -> Result<()> { let mut symlinks = self.symlinks.borrow_mut(); let no_current_realm = symlinks.current().is_none(); // no realm is current, so make this realm the current one // service file for realm will also start desktopd, so this symlink // must be created before launching realm. if no_current_realm { symlinks.set_current_symlink(Some(realm.name()))?; } if let Err(e) = realm.start() { if no_current_realm { // oops realm failed to start, need to reset symlink we changed symlinks.set_current_symlink(None)?; } return Err(e); } Ok(()) } pub fn stop_realm(&mut self, name: &str) -> Result<()> { match self.realm_map.get(name) { Some(realm) => { realm.stop()?; self.set_current_if_none()?; }, None => { warn!("Cannot stop '{}'. Realm does not exist", name); return Ok(()) }, }; Ok(()) } fn set_current_if_none(&self) -> Result<()> { let mut symlinks = self.symlinks.borrow_mut(); if symlinks.current().is_some() { return Ok(()); } if let Some(ref name) = self.find_running_realm_name()? { symlinks.set_current_symlink(Some(name))?; self.systemd.restart_desktopd()?; } else { self.systemd.stop_desktopd()?; } Ok(()) } fn find_running_realm_name(&self) -> Result<Option<String>> { for realm in self.realm_map.values() { if realm.is_running()? { return Ok(Some(realm.name().to_string())); } } Ok(None) } pub fn set_current_by_name(&self, realm_name: &str) -> Result<()> { self.with_named_realm(realm_name, false, |realm| realm.set_current()) } pub fn set_default_by_name(&self, realm_name: &str) -> Result<()> { self.with_named_realm(realm_name, false, |realm| realm.set_default()) } pub fn realm_name_exists(&self, name: &str) -> bool { self.realm_map.contains_key(name) } pub fn realm(&self, name: &str) -> Option<&Realm> { self.realm_map.get(name) } pub fn new_realm(&mut self, name: &str) -> Result<&Realm> { if!is_valid_realm_name(name) { bail!("'{}' is not a valid realm name. Only letters, numbers and dash '-' symbol allowed in name. First character must be a letter", name); } else if self.realm_name_exists(name) { bail!("A realm with name '{}' already exists", name); } let realm = Realm::new(name, self.symlinks.clone(), self.network.clone())?; match realm.create_realm_directory() { Ok(()) => Ok(self.add_realm_entry(realm)), Err(e) => { fs::remove_dir_all(realm.base_path())?; Err(e) }, } } pub fn remove_realm(&mut self, realm_name: &str, confirm: bool, save_home: bool) -> Result<()> { self.with_named_realm(realm_name, false, |realm| { if realm.base_path().join(".realmlock").exists() { warn!("Realm '{}' has.realmlock file in base directory to protect it from deletion.", realm.name()); warn!("Remove this file from {} before running'realms remove {}' if you really want to delete it", realm.base_path().display(), realm.name()); return Ok(()); } let mut save_home = save_home; if confirm { if!RealmManager::confirm_delete(realm.name(), &mut save_home)? { return Ok(()); } } realm.delete_realm(save_home)?; self.set_current_if_none() })?; self.remove_realm_entry(realm_name)?; Ok(()) } fn confirm_delete(realm_name: &str, save_home: &mut bool) -> Result<bool> { let you_sure = RealmManager::prompt_user(&format!("Are you sure you want to remove realm '{}'?", realm_name), false)?; if!you_sure { info!("Ok, not removing"); return Ok(false); } println!("\nThe home directory for this realm can be saved in /realms/removed/home-{}\n", realm_name); *save_home = RealmManager::prompt_user("Would you like to save the home directory?", true)?; Ok(true) } fn prompt_user(prompt: &str, default_y: bool) -> Result<bool> { let yn = if default_y { "(Y/n)" } else { "(y/N)" }; use std::io::{stdin,stdout}; print!("{} {} : ", prompt, yn); stdout().flush()?; let mut line = String::new(); stdin().read_line(&mut line)?; let yes = match line.trim().chars().next() { Some(c) => c == 'Y' || c == 'y', None => default_y, }; Ok(yes) } pub fn base_appimg_update(&self) -> Result<()> { info!("Entering root shell on base appimg"); self.systemd.base_image_update_shell() } }
{ let network = RealmManager::create_network_config()?; Ok(RealmManager { realm_map: HashMap::new(), realm_list: Vec::new(), symlinks: Rc::new(RefCell::new(RealmSymlinks::new())), network: network.clone(), systemd: Systemd::new(network), }) }
identifier_body
manager.rs
use std::rc::Rc; use std::cell::RefCell; use std::path::{Path,PathBuf}; use std::fs; use std::collections::HashMap; use std::io::Write; use Realm; use Result; use Systemd; use RealmSymlinks; use NetworkConfig; use util::*; const REALMS_BASE_PATH: &str = "/realms"; pub struct RealmManager { /// Map from realm name -> realm realm_map: HashMap<String, Realm>, /// Sorted for 'list' realm_list: Vec<Realm>, /// track status of 'current' and 'default' symlinks symlinks: Rc<RefCell<RealmSymlinks>>, /// finds free ip addresses to use network: Rc<RefCell<NetworkConfig>>, /// interface to systemd systemd: Systemd, } impl RealmManager { fn new() -> Result<RealmManager> { let network = RealmManager::create_network_config()?; Ok(RealmManager { realm_map: HashMap::new(), realm_list: Vec::new(), symlinks: Rc::new(RefCell::new(RealmSymlinks::new())), network: network.clone(), systemd: Systemd::new(network), }) } fn create_network_config() -> Result<Rc<RefCell<NetworkConfig>>> { let mut network = NetworkConfig::new(); network.add_bridge("clear", "172.17.0.0/24")?; Ok(Rc::new(RefCell::new(network))) } pub fn load() -> Result<RealmManager> { let mut manager = RealmManager::new()?; manager.symlinks.borrow_mut().load_symlinks()?; if! PathBuf::from(REALMS_BASE_PATH).exists() { bail!("realms base directory {} does not exist", REALMS_BASE_PATH); } for dent in fs::read_dir(REALMS_BASE_PATH)? { let path = dent?.path(); manager.process_realm_path(&path) .map_err(|e| format_err!("error processing entry {} in realm base dir: {}", path.display(), e))?; } manager.realm_list.sort_unstable(); Ok(manager) } /// /// Process `path` as an entry from the base realms directory and /// if `path` is a directory, and directory name has prefix "realm-" /// extract chars after prefix as realm name and add a new `Realm` /// instance /// fn process_realm_path(&mut self, path: &Path) -> Result<()> { let meta = path.symlink_metadata()?; if!meta.is_dir() { return Ok(()) } let fname = path_filename(path); if!fname.starts_with("realm-") { return Ok(()) } let (_, realm_name) = fname.split_at(6); if!is_valid_realm_name(realm_name) { warn!("ignoring directory in realm storage which has invalid realm name: {}", realm_name); return Ok(()) } let rootfs = path.join("rootfs"); if!rootfs.exists() { warn!("realm directory {} does not have a rootfs, ignoring", path.display()); return Ok(()) } match Realm::new(realm_name, self.symlinks.clone(), self.network.clone()) { Ok(realm) => { self.add_realm_entry(realm);}, Err(e) => warn!("Ignoring '{}': {}", realm_name, e), }; Ok(()) } fn add_realm_entry(&mut self, realm: Realm) -> &Realm { self.realm_map.insert(realm.name().to_owned(), realm.clone()); self.realm_list.push(realm.clone()); self.realm_map.get(realm.name()).expect("cannot find realm we just added to map") } fn remove_realm_entry(&mut self, name: &str) -> Result<()> { self.realm_map.remove(name); let list = self.realm_list.clone(); let mut have_default = false; self.realm_list.clear(); for realm in list { if realm.name()!= name { if realm.is_default() { have_default = true; } self.realm_list.push(realm); } } if!have_default &&!self.realm_list.is_empty() { self.symlinks.borrow_mut().set_default_symlink(self.realm_list[0].name())?; } Ok(()) } pub fn current_realm_name(&self) -> Option<String> { self.symlinks.borrow().current() } pub fn default_realm_name(&self) -> Option<String> { self.symlinks.borrow().default() } /// /// Execute shell in a realm. If `realm_name` is `None` then exec /// shell in current realm, otherwise look up realm by name. /// /// If `root_shell` is true, open a root shell, otherwise open /// a user (uid = 1000) shell. /// pub fn launch_shell(&self, realm_name: Option<&str>, root_shell: bool) -> Result<()> { let run_shell = |realm: &Realm| { info!("opening shell in realm '{}'", realm.name()); realm.exec_shell(root_shell)?; info!("exiting shell in realm '{}'", realm.name()); Ok(()) }; if let Some(name) = realm_name { self.with_named_realm(name, true, run_shell) } else { self.with_current_realm(run_shell) } } pub fn launch_terminal(&self, name: Option<&str>) -> Result<()> { let run_terminal = |realm: &Realm| { info!("opening terminal in realm '{}'", realm.name()); let title_arg = format!("Realm: {}", realm.name()); realm.run(&["/usr/bin/gnome-terminal".to_owned(), "--title".to_owned(), title_arg], true) }; if let Some(name) = name { self.with_named_realm(name, true, run_terminal) } else { self.with_current_realm(run_terminal) } } pub fn run_in_realm(&self, realm_name: Option<&str>, args: &[String], use_launcher: bool) -> Result<()> { if let Some(name) = realm_name { self.with_named_realm(name, true, |realm| realm.run(args, use_launcher)) } else { self.with_current_realm(|realm| realm.run(args, use_launcher)) } } fn with_current_realm<F: Fn(&Realm)->Result<()>>(&self, f: F) -> Result<()> { match self.symlinks.borrow().current() { Some(ref name) => { self.with_named_realm(name, false, f)?; }, None => { warn!("No current realm instance to run command in"); } } Ok(()) } fn with_named_realm<F: Fn(&Realm)->Result<()>>(&self, name: &str, want_start: bool, f: F) -> Result<()> { match self.realm(name) { Some(realm) => { if want_start &&!realm.is_running()? { info!("realm '{}' is not running, starting it.", realm.name()); self.start_realm(realm)?; } f(realm) }, None => bail!("no realm with name '{}' exists", name), } } pub fn list(&self) -> Result<()> { let mut out = ColoredOutput::new(); self.print_realm_header(&mut out); for realm in &self.realm_list { self.print_realm(realm, &mut out)?; } Ok(()) } fn print_realm_header(&self, out: &mut ColoredOutput) { out.write(" REALMS ").bold("bold").write(": current, ").bright("colored") .write(": running, (default) starts on boot\n").write(" ------\n\n"); } fn print_realm(&self, realm: &Realm, out: &mut ColoredOutput) -> Result<()> { let name = format!("{:12}", realm.name()); if realm.is_current() { out.write(" > ").bold(&name); } else if realm.is_running()? { out.write(" ").bright(&name); } else { out.write(" ").dim(&name); } if realm.is_default() { out.write(" (default)"); } out.write("\n"); Ok(()) } pub fn start_default(&mut self) -> Result<()> { let default = self.symlinks.borrow().default(); if let Some(ref realm_name) = default { self.start_named_realm(realm_name)?; return Ok(()); } bail!("No default realm to start"); } pub fn start_named_realm(&mut self, realm_name: &str) -> Result<()> { info!("starting realm '{}'", realm_name); self.with_named_realm(realm_name, false, |realm| self.start_realm(realm)) } fn start_realm(&self, realm: &Realm) -> Result<()> { let mut symlinks = self.symlinks.borrow_mut(); let no_current_realm = symlinks.current().is_none(); // no realm is current, so make this realm the current one // service file for realm will also start desktopd, so this symlink // must be created before launching realm. if no_current_realm { symlinks.set_current_symlink(Some(realm.name()))?; } if let Err(e) = realm.start() { if no_current_realm { // oops realm failed to start, need to reset symlink we changed symlinks.set_current_symlink(None)?; } return Err(e); } Ok(()) } pub fn stop_realm(&mut self, name: &str) -> Result<()> { match self.realm_map.get(name) { Some(realm) => { realm.stop()?; self.set_current_if_none()?; }, None => { warn!("Cannot stop '{}'. Realm does not exist", name); return Ok(()) }, }; Ok(()) } fn set_current_if_none(&self) -> Result<()> { let mut symlinks = self.symlinks.borrow_mut(); if symlinks.current().is_some() { return Ok(()); } if let Some(ref name) = self.find_running_realm_name()? { symlinks.set_current_symlink(Some(name))?; self.systemd.restart_desktopd()?; } else { self.systemd.stop_desktopd()?; } Ok(()) } fn find_running_realm_name(&self) -> Result<Option<String>> { for realm in self.realm_map.values() { if realm.is_running()? { return Ok(Some(realm.name().to_string())); } } Ok(None) } pub fn set_current_by_name(&self, realm_name: &str) -> Result<()> { self.with_named_realm(realm_name, false, |realm| realm.set_current()) }
pub fn set_default_by_name(&self, realm_name: &str) -> Result<()> { self.with_named_realm(realm_name, false, |realm| realm.set_default()) } pub fn realm_name_exists(&self, name: &str) -> bool { self.realm_map.contains_key(name) } pub fn realm(&self, name: &str) -> Option<&Realm> { self.realm_map.get(name) } pub fn new_realm(&mut self, name: &str) -> Result<&Realm> { if!is_valid_realm_name(name) { bail!("'{}' is not a valid realm name. Only letters, numbers and dash '-' symbol allowed in name. First character must be a letter", name); } else if self.realm_name_exists(name) { bail!("A realm with name '{}' already exists", name); } let realm = Realm::new(name, self.symlinks.clone(), self.network.clone())?; match realm.create_realm_directory() { Ok(()) => Ok(self.add_realm_entry(realm)), Err(e) => { fs::remove_dir_all(realm.base_path())?; Err(e) }, } } pub fn remove_realm(&mut self, realm_name: &str, confirm: bool, save_home: bool) -> Result<()> { self.with_named_realm(realm_name, false, |realm| { if realm.base_path().join(".realmlock").exists() { warn!("Realm '{}' has.realmlock file in base directory to protect it from deletion.", realm.name()); warn!("Remove this file from {} before running'realms remove {}' if you really want to delete it", realm.base_path().display(), realm.name()); return Ok(()); } let mut save_home = save_home; if confirm { if!RealmManager::confirm_delete(realm.name(), &mut save_home)? { return Ok(()); } } realm.delete_realm(save_home)?; self.set_current_if_none() })?; self.remove_realm_entry(realm_name)?; Ok(()) } fn confirm_delete(realm_name: &str, save_home: &mut bool) -> Result<bool> { let you_sure = RealmManager::prompt_user(&format!("Are you sure you want to remove realm '{}'?", realm_name), false)?; if!you_sure { info!("Ok, not removing"); return Ok(false); } println!("\nThe home directory for this realm can be saved in /realms/removed/home-{}\n", realm_name); *save_home = RealmManager::prompt_user("Would you like to save the home directory?", true)?; Ok(true) } fn prompt_user(prompt: &str, default_y: bool) -> Result<bool> { let yn = if default_y { "(Y/n)" } else { "(y/N)" }; use std::io::{stdin,stdout}; print!("{} {} : ", prompt, yn); stdout().flush()?; let mut line = String::new(); stdin().read_line(&mut line)?; let yes = match line.trim().chars().next() { Some(c) => c == 'Y' || c == 'y', None => default_y, }; Ok(yes) } pub fn base_appimg_update(&self) -> Result<()> { info!("Entering root shell on base appimg"); self.systemd.base_image_update_shell() } }
random_line_split
manager.rs
use std::rc::Rc; use std::cell::RefCell; use std::path::{Path,PathBuf}; use std::fs; use std::collections::HashMap; use std::io::Write; use Realm; use Result; use Systemd; use RealmSymlinks; use NetworkConfig; use util::*; const REALMS_BASE_PATH: &str = "/realms"; pub struct RealmManager { /// Map from realm name -> realm realm_map: HashMap<String, Realm>, /// Sorted for 'list' realm_list: Vec<Realm>, /// track status of 'current' and 'default' symlinks symlinks: Rc<RefCell<RealmSymlinks>>, /// finds free ip addresses to use network: Rc<RefCell<NetworkConfig>>, /// interface to systemd systemd: Systemd, } impl RealmManager { fn new() -> Result<RealmManager> { let network = RealmManager::create_network_config()?; Ok(RealmManager { realm_map: HashMap::new(), realm_list: Vec::new(), symlinks: Rc::new(RefCell::new(RealmSymlinks::new())), network: network.clone(), systemd: Systemd::new(network), }) } fn create_network_config() -> Result<Rc<RefCell<NetworkConfig>>> { let mut network = NetworkConfig::new(); network.add_bridge("clear", "172.17.0.0/24")?; Ok(Rc::new(RefCell::new(network))) } pub fn load() -> Result<RealmManager> { let mut manager = RealmManager::new()?; manager.symlinks.borrow_mut().load_symlinks()?; if! PathBuf::from(REALMS_BASE_PATH).exists() { bail!("realms base directory {} does not exist", REALMS_BASE_PATH); } for dent in fs::read_dir(REALMS_BASE_PATH)? { let path = dent?.path(); manager.process_realm_path(&path) .map_err(|e| format_err!("error processing entry {} in realm base dir: {}", path.display(), e))?; } manager.realm_list.sort_unstable(); Ok(manager) } /// /// Process `path` as an entry from the base realms directory and /// if `path` is a directory, and directory name has prefix "realm-" /// extract chars after prefix as realm name and add a new `Realm` /// instance /// fn process_realm_path(&mut self, path: &Path) -> Result<()> { let meta = path.symlink_metadata()?; if!meta.is_dir() { return Ok(()) } let fname = path_filename(path); if!fname.starts_with("realm-") { return Ok(()) } let (_, realm_name) = fname.split_at(6); if!is_valid_realm_name(realm_name) { warn!("ignoring directory in realm storage which has invalid realm name: {}", realm_name); return Ok(()) } let rootfs = path.join("rootfs"); if!rootfs.exists() { warn!("realm directory {} does not have a rootfs, ignoring", path.display()); return Ok(()) } match Realm::new(realm_name, self.symlinks.clone(), self.network.clone()) { Ok(realm) => { self.add_realm_entry(realm);}, Err(e) => warn!("Ignoring '{}': {}", realm_name, e), }; Ok(()) } fn
(&mut self, realm: Realm) -> &Realm { self.realm_map.insert(realm.name().to_owned(), realm.clone()); self.realm_list.push(realm.clone()); self.realm_map.get(realm.name()).expect("cannot find realm we just added to map") } fn remove_realm_entry(&mut self, name: &str) -> Result<()> { self.realm_map.remove(name); let list = self.realm_list.clone(); let mut have_default = false; self.realm_list.clear(); for realm in list { if realm.name()!= name { if realm.is_default() { have_default = true; } self.realm_list.push(realm); } } if!have_default &&!self.realm_list.is_empty() { self.symlinks.borrow_mut().set_default_symlink(self.realm_list[0].name())?; } Ok(()) } pub fn current_realm_name(&self) -> Option<String> { self.symlinks.borrow().current() } pub fn default_realm_name(&self) -> Option<String> { self.symlinks.borrow().default() } /// /// Execute shell in a realm. If `realm_name` is `None` then exec /// shell in current realm, otherwise look up realm by name. /// /// If `root_shell` is true, open a root shell, otherwise open /// a user (uid = 1000) shell. /// pub fn launch_shell(&self, realm_name: Option<&str>, root_shell: bool) -> Result<()> { let run_shell = |realm: &Realm| { info!("opening shell in realm '{}'", realm.name()); realm.exec_shell(root_shell)?; info!("exiting shell in realm '{}'", realm.name()); Ok(()) }; if let Some(name) = realm_name { self.with_named_realm(name, true, run_shell) } else { self.with_current_realm(run_shell) } } pub fn launch_terminal(&self, name: Option<&str>) -> Result<()> { let run_terminal = |realm: &Realm| { info!("opening terminal in realm '{}'", realm.name()); let title_arg = format!("Realm: {}", realm.name()); realm.run(&["/usr/bin/gnome-terminal".to_owned(), "--title".to_owned(), title_arg], true) }; if let Some(name) = name { self.with_named_realm(name, true, run_terminal) } else { self.with_current_realm(run_terminal) } } pub fn run_in_realm(&self, realm_name: Option<&str>, args: &[String], use_launcher: bool) -> Result<()> { if let Some(name) = realm_name { self.with_named_realm(name, true, |realm| realm.run(args, use_launcher)) } else { self.with_current_realm(|realm| realm.run(args, use_launcher)) } } fn with_current_realm<F: Fn(&Realm)->Result<()>>(&self, f: F) -> Result<()> { match self.symlinks.borrow().current() { Some(ref name) => { self.with_named_realm(name, false, f)?; }, None => { warn!("No current realm instance to run command in"); } } Ok(()) } fn with_named_realm<F: Fn(&Realm)->Result<()>>(&self, name: &str, want_start: bool, f: F) -> Result<()> { match self.realm(name) { Some(realm) => { if want_start &&!realm.is_running()? { info!("realm '{}' is not running, starting it.", realm.name()); self.start_realm(realm)?; } f(realm) }, None => bail!("no realm with name '{}' exists", name), } } pub fn list(&self) -> Result<()> { let mut out = ColoredOutput::new(); self.print_realm_header(&mut out); for realm in &self.realm_list { self.print_realm(realm, &mut out)?; } Ok(()) } fn print_realm_header(&self, out: &mut ColoredOutput) { out.write(" REALMS ").bold("bold").write(": current, ").bright("colored") .write(": running, (default) starts on boot\n").write(" ------\n\n"); } fn print_realm(&self, realm: &Realm, out: &mut ColoredOutput) -> Result<()> { let name = format!("{:12}", realm.name()); if realm.is_current() { out.write(" > ").bold(&name); } else if realm.is_running()? { out.write(" ").bright(&name); } else { out.write(" ").dim(&name); } if realm.is_default() { out.write(" (default)"); } out.write("\n"); Ok(()) } pub fn start_default(&mut self) -> Result<()> { let default = self.symlinks.borrow().default(); if let Some(ref realm_name) = default { self.start_named_realm(realm_name)?; return Ok(()); } bail!("No default realm to start"); } pub fn start_named_realm(&mut self, realm_name: &str) -> Result<()> { info!("starting realm '{}'", realm_name); self.with_named_realm(realm_name, false, |realm| self.start_realm(realm)) } fn start_realm(&self, realm: &Realm) -> Result<()> { let mut symlinks = self.symlinks.borrow_mut(); let no_current_realm = symlinks.current().is_none(); // no realm is current, so make this realm the current one // service file for realm will also start desktopd, so this symlink // must be created before launching realm. if no_current_realm { symlinks.set_current_symlink(Some(realm.name()))?; } if let Err(e) = realm.start() { if no_current_realm { // oops realm failed to start, need to reset symlink we changed symlinks.set_current_symlink(None)?; } return Err(e); } Ok(()) } pub fn stop_realm(&mut self, name: &str) -> Result<()> { match self.realm_map.get(name) { Some(realm) => { realm.stop()?; self.set_current_if_none()?; }, None => { warn!("Cannot stop '{}'. Realm does not exist", name); return Ok(()) }, }; Ok(()) } fn set_current_if_none(&self) -> Result<()> { let mut symlinks = self.symlinks.borrow_mut(); if symlinks.current().is_some() { return Ok(()); } if let Some(ref name) = self.find_running_realm_name()? { symlinks.set_current_symlink(Some(name))?; self.systemd.restart_desktopd()?; } else { self.systemd.stop_desktopd()?; } Ok(()) } fn find_running_realm_name(&self) -> Result<Option<String>> { for realm in self.realm_map.values() { if realm.is_running()? { return Ok(Some(realm.name().to_string())); } } Ok(None) } pub fn set_current_by_name(&self, realm_name: &str) -> Result<()> { self.with_named_realm(realm_name, false, |realm| realm.set_current()) } pub fn set_default_by_name(&self, realm_name: &str) -> Result<()> { self.with_named_realm(realm_name, false, |realm| realm.set_default()) } pub fn realm_name_exists(&self, name: &str) -> bool { self.realm_map.contains_key(name) } pub fn realm(&self, name: &str) -> Option<&Realm> { self.realm_map.get(name) } pub fn new_realm(&mut self, name: &str) -> Result<&Realm> { if!is_valid_realm_name(name) { bail!("'{}' is not a valid realm name. Only letters, numbers and dash '-' symbol allowed in name. First character must be a letter", name); } else if self.realm_name_exists(name) { bail!("A realm with name '{}' already exists", name); } let realm = Realm::new(name, self.symlinks.clone(), self.network.clone())?; match realm.create_realm_directory() { Ok(()) => Ok(self.add_realm_entry(realm)), Err(e) => { fs::remove_dir_all(realm.base_path())?; Err(e) }, } } pub fn remove_realm(&mut self, realm_name: &str, confirm: bool, save_home: bool) -> Result<()> { self.with_named_realm(realm_name, false, |realm| { if realm.base_path().join(".realmlock").exists() { warn!("Realm '{}' has.realmlock file in base directory to protect it from deletion.", realm.name()); warn!("Remove this file from {} before running'realms remove {}' if you really want to delete it", realm.base_path().display(), realm.name()); return Ok(()); } let mut save_home = save_home; if confirm { if!RealmManager::confirm_delete(realm.name(), &mut save_home)? { return Ok(()); } } realm.delete_realm(save_home)?; self.set_current_if_none() })?; self.remove_realm_entry(realm_name)?; Ok(()) } fn confirm_delete(realm_name: &str, save_home: &mut bool) -> Result<bool> { let you_sure = RealmManager::prompt_user(&format!("Are you sure you want to remove realm '{}'?", realm_name), false)?; if!you_sure { info!("Ok, not removing"); return Ok(false); } println!("\nThe home directory for this realm can be saved in /realms/removed/home-{}\n", realm_name); *save_home = RealmManager::prompt_user("Would you like to save the home directory?", true)?; Ok(true) } fn prompt_user(prompt: &str, default_y: bool) -> Result<bool> { let yn = if default_y { "(Y/n)" } else { "(y/N)" }; use std::io::{stdin,stdout}; print!("{} {} : ", prompt, yn); stdout().flush()?; let mut line = String::new(); stdin().read_line(&mut line)?; let yes = match line.trim().chars().next() { Some(c) => c == 'Y' || c == 'y', None => default_y, }; Ok(yes) } pub fn base_appimg_update(&self) -> Result<()> { info!("Entering root shell on base appimg"); self.systemd.base_image_update_shell() } }
add_realm_entry
identifier_name
http.rs
//! This example uses [hyper][] to create a http server which handles requests asynchronously in //! gluon. To do this we define a few types and functions in Rust with which we register in gluon //! so that we can communicate with `hyper`. The rest of the implementation is done in gluon, //! routing the requests and constructing the responses. //! //! [hyper]:https://hyper.rs extern crate gluon; extern crate gluon_base as base; #[macro_use] extern crate gluon_vm as vm; #[macro_use] extern crate collect_mac; extern crate env_logger; extern crate futures; extern crate hyper; #[macro_use] extern crate log; use std::env; use std::fmt; use std::error::Error as StdError; use std::fs::File; use std::io::{stderr, Read, Write}; use std::marker::PhantomData; use std::sync::{Arc, Mutex}; use hyper::{Chunk, Method, StatusCode}; use hyper::server::Service; use futures::Async; use futures::future::Future; use futures::sink::Sink; use futures::stream::Stream; use futures::sync::mpsc::Sender; use base::types::{ArcType, Type}; use vm::{Error as VmError, ExternModule, Result as VmResult}; use vm::thread::ThreadInternal; use vm::thread::{Context, RootedThread, Thread}; use vm::Variants; use vm::api::{Function, FunctionRef, FutureResult, Getable, OpaqueValue, PushAsRef, Pushable, Userdata, ValueRef, VmType, WithVM, IO}; use vm::gc::{Gc, Traverseable}; use gluon::import::add_extern_module; use vm::internal::Value; use gluon::{new_vm, Compiler}; // `Handler` is a type defined in http.glu but since we need to refer to it in the signature of // listen we define a phantom type which we can use with `OpaqueValue` to store a `Handler` in Rust struct Handler<T>(PhantomData<T>); impl<T: VmType +'static> VmType for Handler<T> { type Type = Self; fn make_type(vm: &Thread) -> ArcType { let typ = (*vm.global_env() .get_env() .find_type_info("examples.http_types.Handler") .unwrap()) .clone() .into_type(); Type::app(typ, collect![T::make_type(vm)]) } } // Rust does not let us define traits on types defined in a different crate such as `hyper`. We can // however work around this by defining a wrapper type which we are then able to define the traits // on. struct Wrap<T>(T); macro_rules! define_vmtype { ($name: ident) => { impl VmType for Wrap<$name> { type Type = $name; fn make_type(vm: &Thread) -> ArcType { let typ = concat!("examples.http_types.", stringify!($name)); (*vm.global_env().get_env().find_type_info(typ).unwrap()) .clone() .into_type() } } } } define_vmtype! { Method } impl<'vm> Pushable<'vm> for Wrap<Method> { fn push(self, _: &'vm Thread, context: &mut Context) -> VmResult<()> { use hyper::Method::*; context.stack.push(Value::tag(match self.0 { Get => 0, Post => 1, Delete => 2, _ => { return Err(VmError::Message(format!( "Method `{:?}` does not exist in gluon", self.0 )).into()) } })); Ok(()) } } define_vmtype! { StatusCode } impl<'vm> Getable<'vm> for Wrap<StatusCode> { fn from_value(_: &'vm Thread, value: Variants) -> Self { use hyper::StatusCode::*; match value.as_ref() { ValueRef::Data(data) => Wrap(match data.tag() { 0 => Ok, 1 => NotFound, 2 => InternalServerError, _ => panic!("Unexpected tag"), }), _ => panic!(), } } } // Representation of a http body that is in the prograss of being read pub struct Body(Arc<Mutex<Box<Stream<Item = PushAsRef<Chunk, [u8]>, Error = VmError> + Send>>>); // By implementing `Userdata` on `Body` it can be automatically pushed and retrieved from gluon // threads impl Userdata for Body {} // Types implementing `Userdata` requires a `std::fmt::Debug` implementation so it can be displayed impl fmt::Debug for Body { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Body") } } // `Traverseable` is required by `Userdata` so that the garbage collector knows how to scan the // value for garbage collected references. Normally objects do not contain any references so this // can be empty impl Traverseable for Body { fn traverse(&self, _: &mut Gc) {} } // `VmType` is the last trait required for a type to implement `Userdata` and defines the type used // in gluon for this Rust type. For opaque `Userdata` values this minimal implementation is enough // as the default implementation of `make_type` will lookup `VmType::Type` from the virtual machine // which should have been registered earlier with `Thread::register_type` impl VmType for Body { type Type = Self; } // Since `Body` implements `Userdata` gluon will automatically marshal the gluon representation // into `&Body` argument fn read_chunk( body: &Body, ) -> FutureResult< Box<Future<Item = IO<Option<PushAsRef<Chunk, [u8]>>>, Error = VmError> + Send +'static>, > { use futures::future::poll_fn; let body = body.0.clone(); // `FutureResult` is a wrapper type around `Future` which when returned to the interpreter is
stream.poll().map(|async| async.map(IO::Value)) }))) } // A http body that is being written pub struct ResponseBody(Arc<Mutex<Option<Sender<Result<Chunk, hyper::Error>>>>>); impl fmt::Debug for ResponseBody { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Response") } } impl Userdata for ResponseBody {} impl Traverseable for ResponseBody { fn traverse(&self, _: &mut Gc) {} } impl VmType for ResponseBody { type Type = Self; } fn write_response( response: &ResponseBody, bytes: &[u8], ) -> FutureResult<Box<Future<Item = IO<()>, Error = VmError> + Send +'static>> { use futures::future::poll_fn; use futures::AsyncSink; // Turn `bytes´ into a `Chunk` which can be sent to the http body let mut unsent_chunk = Some(Ok(bytes.to_owned().into())); let response = response.0.clone(); FutureResult(Box::new(poll_fn(move || { info!("Starting response send"); let mut sender = response.lock().unwrap(); let sender = sender .as_mut() .expect("Sender has been dropped while still in use"); if let Some(chunk) = unsent_chunk.take() { match sender.start_send(chunk) { Ok(AsyncSink::NotReady(chunk)) => { unsent_chunk = Some(chunk); return Ok(Async::NotReady); } Ok(AsyncSink::Ready) => (), Err(_) => { info!("Could not send http response"); return Ok(Async::Ready(IO::Value(()))); } } } match sender.poll_complete() { Ok(async) => Ok(async.map(IO::Value)), Err(_) => { info!("Could not send http response"); Ok(Async::Ready(IO::Value(()))) } } }))) } // Next we define some record types which are marshalled to and from gluon. These have equivalent // definitions in http_types.glu field_decl! { method, uri, status, body, request, response } type Request = record_type!{ method => Wrap<Method>, uri => String, body => Body }; type Response = record_type!{ status => Wrap<StatusCode> }; type HttpState = record_type!{ request => Request, response => ResponseBody }; fn listen(port: i32, value: WithVM<OpaqueValue<RootedThread, Handler<Response>>>) -> IO<()> { let WithVM { value: handler, vm: thread, } = value; use hyper::server::{Http, Request as HyperRequest, Response as HyperResponse}; // Retrieve the `handle` function from the http module which we use to evaluate values of type // `Handler Response` type ListenFn = fn(OpaqueValue<RootedThread, Handler<Response>>, HttpState) -> IO<Response>; let handle: Function<RootedThread, ListenFn> = thread .get_global("examples.http.handle") .unwrap_or_else(|err| panic!("{}", err)); struct Listen { handle: Function<RootedThread, ListenFn>, handler: OpaqueValue<RootedThread, Handler<Response>>, } impl Service for Listen { type Request = HyperRequest; type Response = HyperResponse; type Error = hyper::Error; type Future = Box<Future<Item = HyperResponse, Error = hyper::Error> + Send +'static>; fn call(&self, request: HyperRequest) -> Self::Future { let gluon_request = record_no_decl! { // Here we use to `Wrap` type to make `hyper::Request` into a type that can be // pushed to gluon method => Wrap(request.method().clone()), uri => request.uri().to_string(), // Since `Body` implements `Userdata` it can be directly pushed to gluon body => Body(Arc::new(Mutex::new(Box::new(request.body() .map_err(|err| VmError::Message(format!("{}", err))) // `PushAsRef` makes the `body` parameter act as a `&[u8]` which means it is // marshalled to `Array Byte` in gluon .map(PushAsRef::<_, [u8]>::new))))) }; let (response_sender, response_body) = hyper::Body::pair(); let response_sender = Arc::new(Mutex::new(Some(response_sender))); let http_state = record_no_decl!{ request => gluon_request, response => ResponseBody(response_sender.clone()) }; Box::new( self.handle .clone() .call_async(self.handler.clone(), http_state) .then(move |result| match result { Ok(value) => { match value { IO::Value(record_p!{ status }) => { // Drop the sender to so that it the receiver stops waiting for // more chunks *response_sender.lock().unwrap() = None; Ok( HyperResponse::new() .with_status(status.0) .with_body(response_body), ) } IO::Exception(err) => { let _ = stderr().write(err.as_bytes()); Ok( HyperResponse::new() .with_status(StatusCode::InternalServerError), ) } } } Err(err) => { let _ = stderr().write(format!("{}", err).as_bytes()); Ok(HyperResponse::new().with_status(StatusCode::InternalServerError)) } }), ) } } let addr = format!("127.0.0.1:{}", port).parse().unwrap(); let result = Http::new() .bind(&addr, move || { Ok(Listen { handle: handle.clone(), handler: handler.clone(), }) }) .and_then(|server| server.run()); match result { Ok(()) => IO::Value(()), Err(err) => IO::Exception(format!("{}", err)), } } // To let the `http_types` module refer to `Body` and `ResponseBody` we register these types in a // separate function which is called before loading `http_types` pub fn load_types(vm: &Thread) -> VmResult<()> { vm.register_type::<Body>("Body", &[])?; vm.register_type::<ResponseBody>("ResponseBody", &[])?; Ok(()) } pub fn load(vm: &Thread) -> VmResult<ExternModule> { ExternModule::new( vm, record! { listen => primitive!(2 listen), read_chunk => primitive!(1 read_chunk), write_response => primitive!(2 write_response) }, ) } fn main() { if let Err(err) = main_() { panic!("{}", err) } } fn main_() -> Result<(), Box<StdError>> { let _ = env_logger::try_init(); let port = env::args() .nth(1) .map(|port| port.parse::<i32>().expect("port")) .unwrap_or(80); let thread = new_vm(); // First load all the http types so we can refer to them from gluon load_types(&thread)?; Compiler::new().run_expr::<()>( &thread, "", r#"let _ = import! "examples/http_types.glu" in () "#, )?; // Load the primitive functions we define in this module add_extern_module(&thread, "http.prim", load); // Last we run our `http_server.glu` module which returns a function which starts listening // on the port we passed from the command line let mut expr = String::new(); { let mut file = File::open("examples/http_server.glu")?; file.read_to_string(&mut expr)?; } let (mut listen, _) = Compiler::new().run_expr::<FunctionRef<fn(i32) -> IO<()>>>(&thread, "http_test", &expr)?; listen.call(port)?; Ok(()) }
// polled until completion. After `poll` returns `Ready` the value is then returned to the // gluon function which called `read_chunk` FutureResult(Box::new(poll_fn(move || { let mut stream = body.lock().unwrap();
random_line_split
http.rs
//! This example uses [hyper][] to create a http server which handles requests asynchronously in //! gluon. To do this we define a few types and functions in Rust with which we register in gluon //! so that we can communicate with `hyper`. The rest of the implementation is done in gluon, //! routing the requests and constructing the responses. //! //! [hyper]:https://hyper.rs extern crate gluon; extern crate gluon_base as base; #[macro_use] extern crate gluon_vm as vm; #[macro_use] extern crate collect_mac; extern crate env_logger; extern crate futures; extern crate hyper; #[macro_use] extern crate log; use std::env; use std::fmt; use std::error::Error as StdError; use std::fs::File; use std::io::{stderr, Read, Write}; use std::marker::PhantomData; use std::sync::{Arc, Mutex}; use hyper::{Chunk, Method, StatusCode}; use hyper::server::Service; use futures::Async; use futures::future::Future; use futures::sink::Sink; use futures::stream::Stream; use futures::sync::mpsc::Sender; use base::types::{ArcType, Type}; use vm::{Error as VmError, ExternModule, Result as VmResult}; use vm::thread::ThreadInternal; use vm::thread::{Context, RootedThread, Thread}; use vm::Variants; use vm::api::{Function, FunctionRef, FutureResult, Getable, OpaqueValue, PushAsRef, Pushable, Userdata, ValueRef, VmType, WithVM, IO}; use vm::gc::{Gc, Traverseable}; use gluon::import::add_extern_module; use vm::internal::Value; use gluon::{new_vm, Compiler}; // `Handler` is a type defined in http.glu but since we need to refer to it in the signature of // listen we define a phantom type which we can use with `OpaqueValue` to store a `Handler` in Rust struct Handler<T>(PhantomData<T>); impl<T: VmType +'static> VmType for Handler<T> { type Type = Self; fn make_type(vm: &Thread) -> ArcType { let typ = (*vm.global_env() .get_env() .find_type_info("examples.http_types.Handler") .unwrap()) .clone() .into_type(); Type::app(typ, collect![T::make_type(vm)]) } } // Rust does not let us define traits on types defined in a different crate such as `hyper`. We can // however work around this by defining a wrapper type which we are then able to define the traits // on. struct Wrap<T>(T); macro_rules! define_vmtype { ($name: ident) => { impl VmType for Wrap<$name> { type Type = $name; fn make_type(vm: &Thread) -> ArcType { let typ = concat!("examples.http_types.", stringify!($name)); (*vm.global_env().get_env().find_type_info(typ).unwrap()) .clone() .into_type() } } } } define_vmtype! { Method } impl<'vm> Pushable<'vm> for Wrap<Method> { fn push(self, _: &'vm Thread, context: &mut Context) -> VmResult<()>
} define_vmtype! { StatusCode } impl<'vm> Getable<'vm> for Wrap<StatusCode> { fn from_value(_: &'vm Thread, value: Variants) -> Self { use hyper::StatusCode::*; match value.as_ref() { ValueRef::Data(data) => Wrap(match data.tag() { 0 => Ok, 1 => NotFound, 2 => InternalServerError, _ => panic!("Unexpected tag"), }), _ => panic!(), } } } // Representation of a http body that is in the prograss of being read pub struct Body(Arc<Mutex<Box<Stream<Item = PushAsRef<Chunk, [u8]>, Error = VmError> + Send>>>); // By implementing `Userdata` on `Body` it can be automatically pushed and retrieved from gluon // threads impl Userdata for Body {} // Types implementing `Userdata` requires a `std::fmt::Debug` implementation so it can be displayed impl fmt::Debug for Body { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Body") } } // `Traverseable` is required by `Userdata` so that the garbage collector knows how to scan the // value for garbage collected references. Normally objects do not contain any references so this // can be empty impl Traverseable for Body { fn traverse(&self, _: &mut Gc) {} } // `VmType` is the last trait required for a type to implement `Userdata` and defines the type used // in gluon for this Rust type. For opaque `Userdata` values this minimal implementation is enough // as the default implementation of `make_type` will lookup `VmType::Type` from the virtual machine // which should have been registered earlier with `Thread::register_type` impl VmType for Body { type Type = Self; } // Since `Body` implements `Userdata` gluon will automatically marshal the gluon representation // into `&Body` argument fn read_chunk( body: &Body, ) -> FutureResult< Box<Future<Item = IO<Option<PushAsRef<Chunk, [u8]>>>, Error = VmError> + Send +'static>, > { use futures::future::poll_fn; let body = body.0.clone(); // `FutureResult` is a wrapper type around `Future` which when returned to the interpreter is // polled until completion. After `poll` returns `Ready` the value is then returned to the // gluon function which called `read_chunk` FutureResult(Box::new(poll_fn(move || { let mut stream = body.lock().unwrap(); stream.poll().map(|async| async.map(IO::Value)) }))) } // A http body that is being written pub struct ResponseBody(Arc<Mutex<Option<Sender<Result<Chunk, hyper::Error>>>>>); impl fmt::Debug for ResponseBody { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Response") } } impl Userdata for ResponseBody {} impl Traverseable for ResponseBody { fn traverse(&self, _: &mut Gc) {} } impl VmType for ResponseBody { type Type = Self; } fn write_response( response: &ResponseBody, bytes: &[u8], ) -> FutureResult<Box<Future<Item = IO<()>, Error = VmError> + Send +'static>> { use futures::future::poll_fn; use futures::AsyncSink; // Turn `bytes´ into a `Chunk` which can be sent to the http body let mut unsent_chunk = Some(Ok(bytes.to_owned().into())); let response = response.0.clone(); FutureResult(Box::new(poll_fn(move || { info!("Starting response send"); let mut sender = response.lock().unwrap(); let sender = sender .as_mut() .expect("Sender has been dropped while still in use"); if let Some(chunk) = unsent_chunk.take() { match sender.start_send(chunk) { Ok(AsyncSink::NotReady(chunk)) => { unsent_chunk = Some(chunk); return Ok(Async::NotReady); } Ok(AsyncSink::Ready) => (), Err(_) => { info!("Could not send http response"); return Ok(Async::Ready(IO::Value(()))); } } } match sender.poll_complete() { Ok(async) => Ok(async.map(IO::Value)), Err(_) => { info!("Could not send http response"); Ok(Async::Ready(IO::Value(()))) } } }))) } // Next we define some record types which are marshalled to and from gluon. These have equivalent // definitions in http_types.glu field_decl! { method, uri, status, body, request, response } type Request = record_type!{ method => Wrap<Method>, uri => String, body => Body }; type Response = record_type!{ status => Wrap<StatusCode> }; type HttpState = record_type!{ request => Request, response => ResponseBody }; fn listen(port: i32, value: WithVM<OpaqueValue<RootedThread, Handler<Response>>>) -> IO<()> { let WithVM { value: handler, vm: thread, } = value; use hyper::server::{Http, Request as HyperRequest, Response as HyperResponse}; // Retrieve the `handle` function from the http module which we use to evaluate values of type // `Handler Response` type ListenFn = fn(OpaqueValue<RootedThread, Handler<Response>>, HttpState) -> IO<Response>; let handle: Function<RootedThread, ListenFn> = thread .get_global("examples.http.handle") .unwrap_or_else(|err| panic!("{}", err)); struct Listen { handle: Function<RootedThread, ListenFn>, handler: OpaqueValue<RootedThread, Handler<Response>>, } impl Service for Listen { type Request = HyperRequest; type Response = HyperResponse; type Error = hyper::Error; type Future = Box<Future<Item = HyperResponse, Error = hyper::Error> + Send +'static>; fn call(&self, request: HyperRequest) -> Self::Future { let gluon_request = record_no_decl! { // Here we use to `Wrap` type to make `hyper::Request` into a type that can be // pushed to gluon method => Wrap(request.method().clone()), uri => request.uri().to_string(), // Since `Body` implements `Userdata` it can be directly pushed to gluon body => Body(Arc::new(Mutex::new(Box::new(request.body() .map_err(|err| VmError::Message(format!("{}", err))) // `PushAsRef` makes the `body` parameter act as a `&[u8]` which means it is // marshalled to `Array Byte` in gluon .map(PushAsRef::<_, [u8]>::new))))) }; let (response_sender, response_body) = hyper::Body::pair(); let response_sender = Arc::new(Mutex::new(Some(response_sender))); let http_state = record_no_decl!{ request => gluon_request, response => ResponseBody(response_sender.clone()) }; Box::new( self.handle .clone() .call_async(self.handler.clone(), http_state) .then(move |result| match result { Ok(value) => { match value { IO::Value(record_p!{ status }) => { // Drop the sender to so that it the receiver stops waiting for // more chunks *response_sender.lock().unwrap() = None; Ok( HyperResponse::new() .with_status(status.0) .with_body(response_body), ) } IO::Exception(err) => { let _ = stderr().write(err.as_bytes()); Ok( HyperResponse::new() .with_status(StatusCode::InternalServerError), ) } } } Err(err) => { let _ = stderr().write(format!("{}", err).as_bytes()); Ok(HyperResponse::new().with_status(StatusCode::InternalServerError)) } }), ) } } let addr = format!("127.0.0.1:{}", port).parse().unwrap(); let result = Http::new() .bind(&addr, move || { Ok(Listen { handle: handle.clone(), handler: handler.clone(), }) }) .and_then(|server| server.run()); match result { Ok(()) => IO::Value(()), Err(err) => IO::Exception(format!("{}", err)), } } // To let the `http_types` module refer to `Body` and `ResponseBody` we register these types in a // separate function which is called before loading `http_types` pub fn load_types(vm: &Thread) -> VmResult<()> { vm.register_type::<Body>("Body", &[])?; vm.register_type::<ResponseBody>("ResponseBody", &[])?; Ok(()) } pub fn load(vm: &Thread) -> VmResult<ExternModule> { ExternModule::new( vm, record! { listen => primitive!(2 listen), read_chunk => primitive!(1 read_chunk), write_response => primitive!(2 write_response) }, ) } fn main() { if let Err(err) = main_() { panic!("{}", err) } } fn main_() -> Result<(), Box<StdError>> { let _ = env_logger::try_init(); let port = env::args() .nth(1) .map(|port| port.parse::<i32>().expect("port")) .unwrap_or(80); let thread = new_vm(); // First load all the http types so we can refer to them from gluon load_types(&thread)?; Compiler::new().run_expr::<()>( &thread, "", r#"let _ = import! "examples/http_types.glu" in () "#, )?; // Load the primitive functions we define in this module add_extern_module(&thread, "http.prim", load); // Last we run our `http_server.glu` module which returns a function which starts listening // on the port we passed from the command line let mut expr = String::new(); { let mut file = File::open("examples/http_server.glu")?; file.read_to_string(&mut expr)?; } let (mut listen, _) = Compiler::new().run_expr::<FunctionRef<fn(i32) -> IO<()>>>(&thread, "http_test", &expr)?; listen.call(port)?; Ok(()) }
{ use hyper::Method::*; context.stack.push(Value::tag(match self.0 { Get => 0, Post => 1, Delete => 2, _ => { return Err(VmError::Message(format!( "Method `{:?}` does not exist in gluon", self.0 )).into()) } })); Ok(()) }
identifier_body
http.rs
//! This example uses [hyper][] to create a http server which handles requests asynchronously in //! gluon. To do this we define a few types and functions in Rust with which we register in gluon //! so that we can communicate with `hyper`. The rest of the implementation is done in gluon, //! routing the requests and constructing the responses. //! //! [hyper]:https://hyper.rs extern crate gluon; extern crate gluon_base as base; #[macro_use] extern crate gluon_vm as vm; #[macro_use] extern crate collect_mac; extern crate env_logger; extern crate futures; extern crate hyper; #[macro_use] extern crate log; use std::env; use std::fmt; use std::error::Error as StdError; use std::fs::File; use std::io::{stderr, Read, Write}; use std::marker::PhantomData; use std::sync::{Arc, Mutex}; use hyper::{Chunk, Method, StatusCode}; use hyper::server::Service; use futures::Async; use futures::future::Future; use futures::sink::Sink; use futures::stream::Stream; use futures::sync::mpsc::Sender; use base::types::{ArcType, Type}; use vm::{Error as VmError, ExternModule, Result as VmResult}; use vm::thread::ThreadInternal; use vm::thread::{Context, RootedThread, Thread}; use vm::Variants; use vm::api::{Function, FunctionRef, FutureResult, Getable, OpaqueValue, PushAsRef, Pushable, Userdata, ValueRef, VmType, WithVM, IO}; use vm::gc::{Gc, Traverseable}; use gluon::import::add_extern_module; use vm::internal::Value; use gluon::{new_vm, Compiler}; // `Handler` is a type defined in http.glu but since we need to refer to it in the signature of // listen we define a phantom type which we can use with `OpaqueValue` to store a `Handler` in Rust struct Handler<T>(PhantomData<T>); impl<T: VmType +'static> VmType for Handler<T> { type Type = Self; fn make_type(vm: &Thread) -> ArcType { let typ = (*vm.global_env() .get_env() .find_type_info("examples.http_types.Handler") .unwrap()) .clone() .into_type(); Type::app(typ, collect![T::make_type(vm)]) } } // Rust does not let us define traits on types defined in a different crate such as `hyper`. We can // however work around this by defining a wrapper type which we are then able to define the traits // on. struct Wrap<T>(T); macro_rules! define_vmtype { ($name: ident) => { impl VmType for Wrap<$name> { type Type = $name; fn make_type(vm: &Thread) -> ArcType { let typ = concat!("examples.http_types.", stringify!($name)); (*vm.global_env().get_env().find_type_info(typ).unwrap()) .clone() .into_type() } } } } define_vmtype! { Method } impl<'vm> Pushable<'vm> for Wrap<Method> { fn push(self, _: &'vm Thread, context: &mut Context) -> VmResult<()> { use hyper::Method::*; context.stack.push(Value::tag(match self.0 { Get => 0, Post => 1, Delete => 2, _ => { return Err(VmError::Message(format!( "Method `{:?}` does not exist in gluon", self.0 )).into()) } })); Ok(()) } } define_vmtype! { StatusCode } impl<'vm> Getable<'vm> for Wrap<StatusCode> { fn from_value(_: &'vm Thread, value: Variants) -> Self { use hyper::StatusCode::*; match value.as_ref() { ValueRef::Data(data) => Wrap(match data.tag() { 0 => Ok, 1 => NotFound, 2 => InternalServerError, _ => panic!("Unexpected tag"), }), _ => panic!(), } } } // Representation of a http body that is in the prograss of being read pub struct Body(Arc<Mutex<Box<Stream<Item = PushAsRef<Chunk, [u8]>, Error = VmError> + Send>>>); // By implementing `Userdata` on `Body` it can be automatically pushed and retrieved from gluon // threads impl Userdata for Body {} // Types implementing `Userdata` requires a `std::fmt::Debug` implementation so it can be displayed impl fmt::Debug for Body { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Body") } } // `Traverseable` is required by `Userdata` so that the garbage collector knows how to scan the // value for garbage collected references. Normally objects do not contain any references so this // can be empty impl Traverseable for Body { fn traverse(&self, _: &mut Gc) {} } // `VmType` is the last trait required for a type to implement `Userdata` and defines the type used // in gluon for this Rust type. For opaque `Userdata` values this minimal implementation is enough // as the default implementation of `make_type` will lookup `VmType::Type` from the virtual machine // which should have been registered earlier with `Thread::register_type` impl VmType for Body { type Type = Self; } // Since `Body` implements `Userdata` gluon will automatically marshal the gluon representation // into `&Body` argument fn read_chunk( body: &Body, ) -> FutureResult< Box<Future<Item = IO<Option<PushAsRef<Chunk, [u8]>>>, Error = VmError> + Send +'static>, > { use futures::future::poll_fn; let body = body.0.clone(); // `FutureResult` is a wrapper type around `Future` which when returned to the interpreter is // polled until completion. After `poll` returns `Ready` the value is then returned to the // gluon function which called `read_chunk` FutureResult(Box::new(poll_fn(move || { let mut stream = body.lock().unwrap(); stream.poll().map(|async| async.map(IO::Value)) }))) } // A http body that is being written pub struct ResponseBody(Arc<Mutex<Option<Sender<Result<Chunk, hyper::Error>>>>>); impl fmt::Debug for ResponseBody { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Response") } } impl Userdata for ResponseBody {} impl Traverseable for ResponseBody { fn traverse(&self, _: &mut Gc) {} } impl VmType for ResponseBody { type Type = Self; } fn write_response( response: &ResponseBody, bytes: &[u8], ) -> FutureResult<Box<Future<Item = IO<()>, Error = VmError> + Send +'static>> { use futures::future::poll_fn; use futures::AsyncSink; // Turn `bytes´ into a `Chunk` which can be sent to the http body let mut unsent_chunk = Some(Ok(bytes.to_owned().into())); let response = response.0.clone(); FutureResult(Box::new(poll_fn(move || { info!("Starting response send"); let mut sender = response.lock().unwrap(); let sender = sender .as_mut() .expect("Sender has been dropped while still in use"); if let Some(chunk) = unsent_chunk.take() { match sender.start_send(chunk) { Ok(AsyncSink::NotReady(chunk)) => { unsent_chunk = Some(chunk); return Ok(Async::NotReady); } Ok(AsyncSink::Ready) => (), Err(_) => { info!("Could not send http response"); return Ok(Async::Ready(IO::Value(()))); } } } match sender.poll_complete() { Ok(async) => Ok(async.map(IO::Value)), Err(_) => { info!("Could not send http response"); Ok(Async::Ready(IO::Value(()))) } } }))) } // Next we define some record types which are marshalled to and from gluon. These have equivalent // definitions in http_types.glu field_decl! { method, uri, status, body, request, response } type Request = record_type!{ method => Wrap<Method>, uri => String, body => Body }; type Response = record_type!{ status => Wrap<StatusCode> }; type HttpState = record_type!{ request => Request, response => ResponseBody }; fn listen(port: i32, value: WithVM<OpaqueValue<RootedThread, Handler<Response>>>) -> IO<()> { let WithVM { value: handler, vm: thread, } = value; use hyper::server::{Http, Request as HyperRequest, Response as HyperResponse}; // Retrieve the `handle` function from the http module which we use to evaluate values of type // `Handler Response` type ListenFn = fn(OpaqueValue<RootedThread, Handler<Response>>, HttpState) -> IO<Response>; let handle: Function<RootedThread, ListenFn> = thread .get_global("examples.http.handle") .unwrap_or_else(|err| panic!("{}", err)); struct Listen { handle: Function<RootedThread, ListenFn>, handler: OpaqueValue<RootedThread, Handler<Response>>, } impl Service for Listen { type Request = HyperRequest; type Response = HyperResponse; type Error = hyper::Error; type Future = Box<Future<Item = HyperResponse, Error = hyper::Error> + Send +'static>; fn call(&self, request: HyperRequest) -> Self::Future { let gluon_request = record_no_decl! { // Here we use to `Wrap` type to make `hyper::Request` into a type that can be // pushed to gluon method => Wrap(request.method().clone()), uri => request.uri().to_string(), // Since `Body` implements `Userdata` it can be directly pushed to gluon body => Body(Arc::new(Mutex::new(Box::new(request.body() .map_err(|err| VmError::Message(format!("{}", err))) // `PushAsRef` makes the `body` parameter act as a `&[u8]` which means it is // marshalled to `Array Byte` in gluon .map(PushAsRef::<_, [u8]>::new))))) }; let (response_sender, response_body) = hyper::Body::pair(); let response_sender = Arc::new(Mutex::new(Some(response_sender))); let http_state = record_no_decl!{ request => gluon_request, response => ResponseBody(response_sender.clone()) }; Box::new( self.handle .clone() .call_async(self.handler.clone(), http_state) .then(move |result| match result { Ok(value) => { match value { IO::Value(record_p!{ status }) => { // Drop the sender to so that it the receiver stops waiting for // more chunks *response_sender.lock().unwrap() = None; Ok( HyperResponse::new() .with_status(status.0) .with_body(response_body), ) } IO::Exception(err) => {
} } Err(err) => { let _ = stderr().write(format!("{}", err).as_bytes()); Ok(HyperResponse::new().with_status(StatusCode::InternalServerError)) } }), ) } } let addr = format!("127.0.0.1:{}", port).parse().unwrap(); let result = Http::new() .bind(&addr, move || { Ok(Listen { handle: handle.clone(), handler: handler.clone(), }) }) .and_then(|server| server.run()); match result { Ok(()) => IO::Value(()), Err(err) => IO::Exception(format!("{}", err)), } } // To let the `http_types` module refer to `Body` and `ResponseBody` we register these types in a // separate function which is called before loading `http_types` pub fn load_types(vm: &Thread) -> VmResult<()> { vm.register_type::<Body>("Body", &[])?; vm.register_type::<ResponseBody>("ResponseBody", &[])?; Ok(()) } pub fn load(vm: &Thread) -> VmResult<ExternModule> { ExternModule::new( vm, record! { listen => primitive!(2 listen), read_chunk => primitive!(1 read_chunk), write_response => primitive!(2 write_response) }, ) } fn main() { if let Err(err) = main_() { panic!("{}", err) } } fn main_() -> Result<(), Box<StdError>> { let _ = env_logger::try_init(); let port = env::args() .nth(1) .map(|port| port.parse::<i32>().expect("port")) .unwrap_or(80); let thread = new_vm(); // First load all the http types so we can refer to them from gluon load_types(&thread)?; Compiler::new().run_expr::<()>( &thread, "", r#"let _ = import! "examples/http_types.glu" in () "#, )?; // Load the primitive functions we define in this module add_extern_module(&thread, "http.prim", load); // Last we run our `http_server.glu` module which returns a function which starts listening // on the port we passed from the command line let mut expr = String::new(); { let mut file = File::open("examples/http_server.glu")?; file.read_to_string(&mut expr)?; } let (mut listen, _) = Compiler::new().run_expr::<FunctionRef<fn(i32) -> IO<()>>>(&thread, "http_test", &expr)?; listen.call(port)?; Ok(()) }
let _ = stderr().write(err.as_bytes()); Ok( HyperResponse::new() .with_status(StatusCode::InternalServerError), ) }
conditional_block
http.rs
//! This example uses [hyper][] to create a http server which handles requests asynchronously in //! gluon. To do this we define a few types and functions in Rust with which we register in gluon //! so that we can communicate with `hyper`. The rest of the implementation is done in gluon, //! routing the requests and constructing the responses. //! //! [hyper]:https://hyper.rs extern crate gluon; extern crate gluon_base as base; #[macro_use] extern crate gluon_vm as vm; #[macro_use] extern crate collect_mac; extern crate env_logger; extern crate futures; extern crate hyper; #[macro_use] extern crate log; use std::env; use std::fmt; use std::error::Error as StdError; use std::fs::File; use std::io::{stderr, Read, Write}; use std::marker::PhantomData; use std::sync::{Arc, Mutex}; use hyper::{Chunk, Method, StatusCode}; use hyper::server::Service; use futures::Async; use futures::future::Future; use futures::sink::Sink; use futures::stream::Stream; use futures::sync::mpsc::Sender; use base::types::{ArcType, Type}; use vm::{Error as VmError, ExternModule, Result as VmResult}; use vm::thread::ThreadInternal; use vm::thread::{Context, RootedThread, Thread}; use vm::Variants; use vm::api::{Function, FunctionRef, FutureResult, Getable, OpaqueValue, PushAsRef, Pushable, Userdata, ValueRef, VmType, WithVM, IO}; use vm::gc::{Gc, Traverseable}; use gluon::import::add_extern_module; use vm::internal::Value; use gluon::{new_vm, Compiler}; // `Handler` is a type defined in http.glu but since we need to refer to it in the signature of // listen we define a phantom type which we can use with `OpaqueValue` to store a `Handler` in Rust struct Handler<T>(PhantomData<T>); impl<T: VmType +'static> VmType for Handler<T> { type Type = Self; fn make_type(vm: &Thread) -> ArcType { let typ = (*vm.global_env() .get_env() .find_type_info("examples.http_types.Handler") .unwrap()) .clone() .into_type(); Type::app(typ, collect![T::make_type(vm)]) } } // Rust does not let us define traits on types defined in a different crate such as `hyper`. We can // however work around this by defining a wrapper type which we are then able to define the traits // on. struct Wrap<T>(T); macro_rules! define_vmtype { ($name: ident) => { impl VmType for Wrap<$name> { type Type = $name; fn make_type(vm: &Thread) -> ArcType { let typ = concat!("examples.http_types.", stringify!($name)); (*vm.global_env().get_env().find_type_info(typ).unwrap()) .clone() .into_type() } } } } define_vmtype! { Method } impl<'vm> Pushable<'vm> for Wrap<Method> { fn push(self, _: &'vm Thread, context: &mut Context) -> VmResult<()> { use hyper::Method::*; context.stack.push(Value::tag(match self.0 { Get => 0, Post => 1, Delete => 2, _ => { return Err(VmError::Message(format!( "Method `{:?}` does not exist in gluon", self.0 )).into()) } })); Ok(()) } } define_vmtype! { StatusCode } impl<'vm> Getable<'vm> for Wrap<StatusCode> { fn from_value(_: &'vm Thread, value: Variants) -> Self { use hyper::StatusCode::*; match value.as_ref() { ValueRef::Data(data) => Wrap(match data.tag() { 0 => Ok, 1 => NotFound, 2 => InternalServerError, _ => panic!("Unexpected tag"), }), _ => panic!(), } } } // Representation of a http body that is in the prograss of being read pub struct Body(Arc<Mutex<Box<Stream<Item = PushAsRef<Chunk, [u8]>, Error = VmError> + Send>>>); // By implementing `Userdata` on `Body` it can be automatically pushed and retrieved from gluon // threads impl Userdata for Body {} // Types implementing `Userdata` requires a `std::fmt::Debug` implementation so it can be displayed impl fmt::Debug for Body { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Body") } } // `Traverseable` is required by `Userdata` so that the garbage collector knows how to scan the // value for garbage collected references. Normally objects do not contain any references so this // can be empty impl Traverseable for Body { fn traverse(&self, _: &mut Gc) {} } // `VmType` is the last trait required for a type to implement `Userdata` and defines the type used // in gluon for this Rust type. For opaque `Userdata` values this minimal implementation is enough // as the default implementation of `make_type` will lookup `VmType::Type` from the virtual machine // which should have been registered earlier with `Thread::register_type` impl VmType for Body { type Type = Self; } // Since `Body` implements `Userdata` gluon will automatically marshal the gluon representation // into `&Body` argument fn read_chunk( body: &Body, ) -> FutureResult< Box<Future<Item = IO<Option<PushAsRef<Chunk, [u8]>>>, Error = VmError> + Send +'static>, > { use futures::future::poll_fn; let body = body.0.clone(); // `FutureResult` is a wrapper type around `Future` which when returned to the interpreter is // polled until completion. After `poll` returns `Ready` the value is then returned to the // gluon function which called `read_chunk` FutureResult(Box::new(poll_fn(move || { let mut stream = body.lock().unwrap(); stream.poll().map(|async| async.map(IO::Value)) }))) } // A http body that is being written pub struct ResponseBody(Arc<Mutex<Option<Sender<Result<Chunk, hyper::Error>>>>>); impl fmt::Debug for ResponseBody { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "hyper::Response") } } impl Userdata for ResponseBody {} impl Traverseable for ResponseBody { fn traverse(&self, _: &mut Gc) {} } impl VmType for ResponseBody { type Type = Self; } fn
( response: &ResponseBody, bytes: &[u8], ) -> FutureResult<Box<Future<Item = IO<()>, Error = VmError> + Send +'static>> { use futures::future::poll_fn; use futures::AsyncSink; // Turn `bytes´ into a `Chunk` which can be sent to the http body let mut unsent_chunk = Some(Ok(bytes.to_owned().into())); let response = response.0.clone(); FutureResult(Box::new(poll_fn(move || { info!("Starting response send"); let mut sender = response.lock().unwrap(); let sender = sender .as_mut() .expect("Sender has been dropped while still in use"); if let Some(chunk) = unsent_chunk.take() { match sender.start_send(chunk) { Ok(AsyncSink::NotReady(chunk)) => { unsent_chunk = Some(chunk); return Ok(Async::NotReady); } Ok(AsyncSink::Ready) => (), Err(_) => { info!("Could not send http response"); return Ok(Async::Ready(IO::Value(()))); } } } match sender.poll_complete() { Ok(async) => Ok(async.map(IO::Value)), Err(_) => { info!("Could not send http response"); Ok(Async::Ready(IO::Value(()))) } } }))) } // Next we define some record types which are marshalled to and from gluon. These have equivalent // definitions in http_types.glu field_decl! { method, uri, status, body, request, response } type Request = record_type!{ method => Wrap<Method>, uri => String, body => Body }; type Response = record_type!{ status => Wrap<StatusCode> }; type HttpState = record_type!{ request => Request, response => ResponseBody }; fn listen(port: i32, value: WithVM<OpaqueValue<RootedThread, Handler<Response>>>) -> IO<()> { let WithVM { value: handler, vm: thread, } = value; use hyper::server::{Http, Request as HyperRequest, Response as HyperResponse}; // Retrieve the `handle` function from the http module which we use to evaluate values of type // `Handler Response` type ListenFn = fn(OpaqueValue<RootedThread, Handler<Response>>, HttpState) -> IO<Response>; let handle: Function<RootedThread, ListenFn> = thread .get_global("examples.http.handle") .unwrap_or_else(|err| panic!("{}", err)); struct Listen { handle: Function<RootedThread, ListenFn>, handler: OpaqueValue<RootedThread, Handler<Response>>, } impl Service for Listen { type Request = HyperRequest; type Response = HyperResponse; type Error = hyper::Error; type Future = Box<Future<Item = HyperResponse, Error = hyper::Error> + Send +'static>; fn call(&self, request: HyperRequest) -> Self::Future { let gluon_request = record_no_decl! { // Here we use to `Wrap` type to make `hyper::Request` into a type that can be // pushed to gluon method => Wrap(request.method().clone()), uri => request.uri().to_string(), // Since `Body` implements `Userdata` it can be directly pushed to gluon body => Body(Arc::new(Mutex::new(Box::new(request.body() .map_err(|err| VmError::Message(format!("{}", err))) // `PushAsRef` makes the `body` parameter act as a `&[u8]` which means it is // marshalled to `Array Byte` in gluon .map(PushAsRef::<_, [u8]>::new))))) }; let (response_sender, response_body) = hyper::Body::pair(); let response_sender = Arc::new(Mutex::new(Some(response_sender))); let http_state = record_no_decl!{ request => gluon_request, response => ResponseBody(response_sender.clone()) }; Box::new( self.handle .clone() .call_async(self.handler.clone(), http_state) .then(move |result| match result { Ok(value) => { match value { IO::Value(record_p!{ status }) => { // Drop the sender to so that it the receiver stops waiting for // more chunks *response_sender.lock().unwrap() = None; Ok( HyperResponse::new() .with_status(status.0) .with_body(response_body), ) } IO::Exception(err) => { let _ = stderr().write(err.as_bytes()); Ok( HyperResponse::new() .with_status(StatusCode::InternalServerError), ) } } } Err(err) => { let _ = stderr().write(format!("{}", err).as_bytes()); Ok(HyperResponse::new().with_status(StatusCode::InternalServerError)) } }), ) } } let addr = format!("127.0.0.1:{}", port).parse().unwrap(); let result = Http::new() .bind(&addr, move || { Ok(Listen { handle: handle.clone(), handler: handler.clone(), }) }) .and_then(|server| server.run()); match result { Ok(()) => IO::Value(()), Err(err) => IO::Exception(format!("{}", err)), } } // To let the `http_types` module refer to `Body` and `ResponseBody` we register these types in a // separate function which is called before loading `http_types` pub fn load_types(vm: &Thread) -> VmResult<()> { vm.register_type::<Body>("Body", &[])?; vm.register_type::<ResponseBody>("ResponseBody", &[])?; Ok(()) } pub fn load(vm: &Thread) -> VmResult<ExternModule> { ExternModule::new( vm, record! { listen => primitive!(2 listen), read_chunk => primitive!(1 read_chunk), write_response => primitive!(2 write_response) }, ) } fn main() { if let Err(err) = main_() { panic!("{}", err) } } fn main_() -> Result<(), Box<StdError>> { let _ = env_logger::try_init(); let port = env::args() .nth(1) .map(|port| port.parse::<i32>().expect("port")) .unwrap_or(80); let thread = new_vm(); // First load all the http types so we can refer to them from gluon load_types(&thread)?; Compiler::new().run_expr::<()>( &thread, "", r#"let _ = import! "examples/http_types.glu" in () "#, )?; // Load the primitive functions we define in this module add_extern_module(&thread, "http.prim", load); // Last we run our `http_server.glu` module which returns a function which starts listening // on the port we passed from the command line let mut expr = String::new(); { let mut file = File::open("examples/http_server.glu")?; file.read_to_string(&mut expr)?; } let (mut listen, _) = Compiler::new().run_expr::<FunctionRef<fn(i32) -> IO<()>>>(&thread, "http_test", &expr)?; listen.call(port)?; Ok(()) }
write_response
identifier_name