file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
logger.rs
extern crate env_logger; extern crate log_panics; extern crate log; #[cfg(target_os = "android")] extern crate android_logger; use self::env_logger::Builder as EnvLoggerBuilder; use self::log::{LevelFilter, Level}; use std::env; use std::io::Write; #[cfg(target_os = "android")] use self::android_logger::Filter; use log::{Record, Metadata}; use libc::{c_void, c_char}; use std::ffi::CString; use std::ptr; use indy_api_types::errors::prelude::*; use indy_utils::ctypes; use indy_api_types::errors::IndyErrorKind::InvalidStructure; pub static mut LOGGER_STATE: LoggerState = LoggerState::Default; pub enum LoggerState { Default, Custom } impl LoggerState { pub fn get(&self) -> (*const c_void, Option<EnabledCB>, Option<LogCB>, Option<FlushCB>) { match self { LoggerState::Default => (ptr::null(), Some(LibindyDefaultLogger::enabled), Some(LibindyDefaultLogger::log), Some(LibindyDefaultLogger::flush)), LoggerState::Custom => unsafe { (CONTEXT, ENABLED_CB, LOG_CB, FLUSH_CB) }, } } } pub type EnabledCB = extern fn(context: *const c_void, level: u32, target: *const c_char) -> bool; pub type LogCB = extern fn(context: *const c_void, level: u32, target: *const c_char, message: *const c_char, module_path: *const c_char, file: *const c_char, line: u32); pub type FlushCB = extern fn(context: *const c_void); static mut CONTEXT: *const c_void = ptr::null(); static mut ENABLED_CB: Option<EnabledCB> = None; static mut LOG_CB: Option<LogCB> = None; static mut FLUSH_CB: Option<FlushCB> = None; #[cfg(debug_assertions)] const DEFAULT_MAX_LEVEL: LevelFilter = LevelFilter::Trace; #[cfg(not(debug_assertions))] const DEFAULT_MAX_LEVEL: LevelFilter = LevelFilter::Info; pub struct LibindyLogger { context: *const c_void, enabled: Option<EnabledCB>, log: LogCB, flush: Option<FlushCB>, } impl LibindyLogger { fn new(context: *const c_void, enabled: Option<EnabledCB>, log: LogCB, flush: Option<FlushCB>) -> Self { LibindyLogger { context, enabled, log, flush } } } impl log::Log for LibindyLogger { fn enabled(&self, metadata: &Metadata) -> bool { if let Some(enabled_cb) = self.enabled { let level = metadata.level() as u32; let target = CString::new(metadata.target()).unwrap(); enabled_cb(self.context, level, target.as_ptr(), ) } else { true } } fn log(&self, record: &Record) { let log_cb = self.log; let level = record.level() as u32; let target = CString::new(record.target()).unwrap(); let message = CString::new(record.args().to_string()).unwrap(); let module_path = record.module_path().map(|a| CString::new(a).unwrap()); let file = record.file().map(|a| CString::new(a).unwrap()); let line = record.line().unwrap_or(0); log_cb(self.context, level, target.as_ptr(), message.as_ptr(), module_path.as_ref().map(|p| p.as_ptr()).unwrap_or(ptr::null()), file.as_ref().map(|p| p.as_ptr()).unwrap_or(ptr::null()), line, ) } fn flush(&self) { if let Some(flush_cb) = self.flush { flush_cb(self.context) } } } unsafe impl Sync for LibindyLogger {} unsafe impl Send for LibindyLogger {} impl LibindyLogger { pub fn init(context: *const c_void, enabled: Option<EnabledCB>, log: LogCB, flush: Option<FlushCB>, max_lvl: Option<u32>) -> Result<(), IndyError> { let logger = LibindyLogger::new(context, enabled, log, flush); log::set_boxed_logger(Box::new(logger))?; let max_lvl = match max_lvl { Some(max_lvl) => LibindyLogger::map_u32_lvl_to_filter(max_lvl)?, None => DEFAULT_MAX_LEVEL, }; log::set_max_level(max_lvl); unsafe { LOGGER_STATE = LoggerState::Custom; CONTEXT = context; ENABLED_CB = enabled; LOG_CB = Some(log); FLUSH_CB = flush }; Ok(()) } fn map_u32_lvl_to_filter(max_level: u32) -> IndyResult<LevelFilter> { let max_level = match max_level { 0 => LevelFilter::Off, 1 => LevelFilter::Error, 2 => LevelFilter::Warn, 3 => LevelFilter::Info, 4 => LevelFilter::Debug, 5 => LevelFilter::Trace, _ => return Err(IndyError::from(InvalidStructure)), }; Ok(max_level) } pub fn set_max_level(max_level: u32) -> IndyResult<LevelFilter> { let max_level_filter = LibindyLogger::map_u32_lvl_to_filter(max_level)?; log::set_max_level(max_level_filter); Ok(max_level_filter) } } pub struct LibindyDefaultLogger; impl LibindyDefaultLogger { pub fn init(pattern: Option<String>) -> Result<(), IndyError> { let pattern = pattern.or_else(|| env::var("RUST_LOG").ok()); log_panics::init(); //Logging of panics is essential for android. As android does not log to stdout for native code if cfg!(target_os = "android") { #[cfg(target_os = "android")] let log_filter = match pattern { Some(val) => match val.to_lowercase().as_ref() { "error" => Filter::default().with_min_level(log::Level::Error), "warn" => Filter::default().with_min_level(log::Level::Warn), "info" => Filter::default().with_min_level(log::Level::Info), "debug" => Filter::default().with_min_level(log::Level::Debug), "trace" => Filter::default().with_min_level(log::Level::Trace), _ => Filter::default().with_min_level(log::Level::Error), }, None => Filter::default().with_min_level(log::Level::Error) }; //Set logging to off when deploying production android app. #[cfg(target_os = "android")] android_logger::init_once(log_filter); info!("Logging for Android"); } else { EnvLoggerBuilder::new() .format(|buf, record| writeln!(buf, "{:>5}|{:<30}|{:>35}:{:<4}| {}", record.level(), record.target(), record.file().get_or_insert(""), record.line().get_or_insert(0), record.args())) .filter(None, LevelFilter::Off) .parse_filters(pattern.as_ref().map(String::as_str).unwrap_or("")) .try_init()?; } unsafe { LOGGER_STATE = LoggerState::Default }; Ok(()) } extern fn enabled(_context: *const c_void, level: u32, target: *const c_char) -> bool { let level = get_level(level); let target = ctypes::c_str_to_string(target).unwrap().unwrap(); let metadata: Metadata = Metadata::builder() .level(level) .target(&target) .build(); log::logger().enabled(&metadata) } extern fn log(_context: *const c_void, level: u32, target: *const c_char, args: *const c_char, module_path: *const c_char, file: *const c_char, line: u32) { let target = ctypes::c_str_to_string(target).unwrap().unwrap(); let args = ctypes::c_str_to_string(args).unwrap().unwrap(); let module_path = ctypes::c_str_to_string(module_path).unwrap(); let file = ctypes::c_str_to_string(file).unwrap(); let level = get_level(level); log::logger().log( &Record::builder() .args(format_args!("{}", args)) .level(level) .target(&target) .module_path(module_path) .file(file) .line(Some(line)) .build(), ); } extern fn flush(_context: *const c_void) { log::logger().flush() } } fn get_level(level: u32) -> Level { match level { 1 => Level::Error, 2 => Level::Warn, 3 => Level::Info, 4 => Level::Debug, 5 => Level::Trace, _ => unreachable!(), } }
#[macro_export] macro_rules! try_log { ($expr:expr) => (match $expr { Ok(val) => val, Err(err) => { error!("try_log! | {}", err); return Err(From::from(err)) } }) } macro_rules! _map_err { ($lvl:expr, $expr:expr) => ( |err| { log!($lvl, "{} - {}", $expr, err); err } ); ($lvl:expr) => ( |err| { log!($lvl, "{}", err); err } ) } #[macro_export] macro_rules! map_err_err { () => ( _map_err!(::log::Level::Error) ); ($($arg:tt)*) => ( _map_err!(::log::Level::Error, $($arg)*) ) } #[macro_export] macro_rules! map_err_trace { () => ( _map_err!(::log::Level::Trace) ); ($($arg:tt)*) => ( _map_err!(::log::Level::Trace, $($arg)*) ) } #[macro_export] macro_rules! map_err_info { () => ( _map_err!(::log::Level::Info) ); ($($arg:tt)*) => ( _map_err!(::log::Level::Info, $($arg)*) ) } #[cfg(debug_assertions)] #[macro_export] macro_rules! secret { ($val:expr) => {{ $val }}; } #[cfg(not(debug_assertions))] #[macro_export] macro_rules! secret { ($val:expr) => {{ "_" }}; }
random_line_split
eo_view.rs
// std imports use std::mem; // external imports use num::traits::Num; // local imports use sralgebra::MagmaBase; use super::eo_traits::{ERO, ECO}; use view::MatrixView; use traits::{Shape, MatrixBuffer, Strided}; /// Implementation of Elementary row operations. impl<'a, T:MagmaBase + Num> ERO<T> for MatrixView<'a, T> { /// Row scaling by a factor and adding to another row. /// r_i = r_i + k * r_j /// The j-th row can be outside the view also. /// This is the row relative to the start of the view.
fn ero_scale_add(&mut self, i : usize, j : isize, scale : T )-> &mut MatrixView<'a, T> { debug_assert! (i < self.num_rows()); let m = self.matrix(); // Compute j-th row in m (by doing offset) let j = j + (self.start_row() as isize); debug_assert! (j >= 0); let j = j as usize; debug_assert!(j < m.num_rows()); let ptr = m.as_ptr(); // I am allowing modification of the underlying buffer let ptr : *mut T = unsafe { mem::transmute(ptr) }; let sc = self.start_col(); // Compute initial offsets let mut offset_a = self.cell_to_offset(i, 0); let mut offset_b = m.cell_to_offset(j, sc); let stride_a = self.stride() as isize; let stride_b = m.stride() as isize; for _ in 0..self.num_cols(){ unsafe { let va = *ptr.offset(offset_a); let vb = *ptr.offset(offset_b); *ptr.offset(offset_a) = va + scale * vb; } // Update offsets offset_a += stride_a; offset_b += stride_b; } self } } /// Implementation of Elementary column operations. impl<'a, T:MagmaBase + Num> ECO<T> for MatrixView<'a, T> { /// Column scaling by a factor and adding to another column. /// c_i = c_i + k * c_j /// The j-th column can be outside the view also. /// This is the column relative to the start of the view. #[inline] fn eco_scale_add(&mut self, i : usize, j : isize, scale : T )-> &mut MatrixView<'a, T> { debug_assert! (i < self.num_cols()); let m = self.matrix(); // Compute j-th column in m (by doing offset) let j = j + (self.start_col() as isize); debug_assert! (j >= 0); let j = j as usize; debug_assert!(j < m.num_cols()); let ptr = m.as_ptr(); // I am allowing modification of the underlying buffer let ptr : *mut T = unsafe { mem::transmute(ptr) }; let sr = self.start_row(); // Compute initial offsets let mut offset_a = self.cell_to_offset(0, i); let mut offset_b = m.cell_to_offset(sr, j); for _ in 0..self.num_rows(){ unsafe { let va = *ptr.offset(offset_a); let vb = *ptr.offset(offset_b); *ptr.offset(offset_a) = va + scale * vb; } // Update offsets offset_a += 1; offset_b += 1; } self } } /****************************************************** * * Unit tests * *******************************************************/ #[cfg(test)] mod test{ //use super::*; } /****************************************************** * * Bench marks * *******************************************************/ #[cfg(test)] mod bench{ //extern crate test; //use self::test::Bencher; //use super::*; }
#[inline]
random_line_split
eo_view.rs
// std imports use std::mem; // external imports use num::traits::Num; // local imports use sralgebra::MagmaBase; use super::eo_traits::{ERO, ECO}; use view::MatrixView; use traits::{Shape, MatrixBuffer, Strided}; /// Implementation of Elementary row operations. impl<'a, T:MagmaBase + Num> ERO<T> for MatrixView<'a, T> { /// Row scaling by a factor and adding to another row. /// r_i = r_i + k * r_j /// The j-th row can be outside the view also. /// This is the row relative to the start of the view. #[inline] fn ero_scale_add(&mut self, i : usize, j : isize, scale : T )-> &mut MatrixView<'a, T> { debug_assert! (i < self.num_rows()); let m = self.matrix(); // Compute j-th row in m (by doing offset) let j = j + (self.start_row() as isize); debug_assert! (j >= 0); let j = j as usize; debug_assert!(j < m.num_rows()); let ptr = m.as_ptr(); // I am allowing modification of the underlying buffer let ptr : *mut T = unsafe { mem::transmute(ptr) }; let sc = self.start_col(); // Compute initial offsets let mut offset_a = self.cell_to_offset(i, 0); let mut offset_b = m.cell_to_offset(j, sc); let stride_a = self.stride() as isize; let stride_b = m.stride() as isize; for _ in 0..self.num_cols(){ unsafe { let va = *ptr.offset(offset_a); let vb = *ptr.offset(offset_b); *ptr.offset(offset_a) = va + scale * vb; } // Update offsets offset_a += stride_a; offset_b += stride_b; } self } } /// Implementation of Elementary column operations. impl<'a, T:MagmaBase + Num> ECO<T> for MatrixView<'a, T> { /// Column scaling by a factor and adding to another column. /// c_i = c_i + k * c_j /// The j-th column can be outside the view also. /// This is the column relative to the start of the view. #[inline] fn eco_scale_add(&mut self, i : usize, j : isize, scale : T )-> &mut MatrixView<'a, T>
} // Update offsets offset_a += 1; offset_b += 1; } self } } /****************************************************** * * Unit tests * *******************************************************/ #[cfg(test)] mod test{ //use super::*; } /****************************************************** * * Bench marks * *******************************************************/ #[cfg(test)] mod bench{ //extern crate test; //use self::test::Bencher; //use super::*; }
{ debug_assert! (i < self.num_cols()); let m = self.matrix(); // Compute j-th column in m (by doing offset) let j = j + (self.start_col() as isize); debug_assert! (j >= 0); let j = j as usize; debug_assert!(j < m.num_cols()); let ptr = m.as_ptr(); // I am allowing modification of the underlying buffer let ptr : *mut T = unsafe { mem::transmute(ptr) }; let sr = self.start_row(); // Compute initial offsets let mut offset_a = self.cell_to_offset(0, i); let mut offset_b = m.cell_to_offset(sr, j); for _ in 0..self.num_rows(){ unsafe { let va = *ptr.offset(offset_a); let vb = *ptr.offset(offset_b); *ptr.offset(offset_a) = va + scale * vb;
identifier_body
eo_view.rs
// std imports use std::mem; // external imports use num::traits::Num; // local imports use sralgebra::MagmaBase; use super::eo_traits::{ERO, ECO}; use view::MatrixView; use traits::{Shape, MatrixBuffer, Strided}; /// Implementation of Elementary row operations. impl<'a, T:MagmaBase + Num> ERO<T> for MatrixView<'a, T> { /// Row scaling by a factor and adding to another row. /// r_i = r_i + k * r_j /// The j-th row can be outside the view also. /// This is the row relative to the start of the view. #[inline] fn ero_scale_add(&mut self, i : usize, j : isize, scale : T )-> &mut MatrixView<'a, T> { debug_assert! (i < self.num_rows()); let m = self.matrix(); // Compute j-th row in m (by doing offset) let j = j + (self.start_row() as isize); debug_assert! (j >= 0); let j = j as usize; debug_assert!(j < m.num_rows()); let ptr = m.as_ptr(); // I am allowing modification of the underlying buffer let ptr : *mut T = unsafe { mem::transmute(ptr) }; let sc = self.start_col(); // Compute initial offsets let mut offset_a = self.cell_to_offset(i, 0); let mut offset_b = m.cell_to_offset(j, sc); let stride_a = self.stride() as isize; let stride_b = m.stride() as isize; for _ in 0..self.num_cols(){ unsafe { let va = *ptr.offset(offset_a); let vb = *ptr.offset(offset_b); *ptr.offset(offset_a) = va + scale * vb; } // Update offsets offset_a += stride_a; offset_b += stride_b; } self } } /// Implementation of Elementary column operations. impl<'a, T:MagmaBase + Num> ECO<T> for MatrixView<'a, T> { /// Column scaling by a factor and adding to another column. /// c_i = c_i + k * c_j /// The j-th column can be outside the view also. /// This is the column relative to the start of the view. #[inline] fn
(&mut self, i : usize, j : isize, scale : T )-> &mut MatrixView<'a, T> { debug_assert! (i < self.num_cols()); let m = self.matrix(); // Compute j-th column in m (by doing offset) let j = j + (self.start_col() as isize); debug_assert! (j >= 0); let j = j as usize; debug_assert!(j < m.num_cols()); let ptr = m.as_ptr(); // I am allowing modification of the underlying buffer let ptr : *mut T = unsafe { mem::transmute(ptr) }; let sr = self.start_row(); // Compute initial offsets let mut offset_a = self.cell_to_offset(0, i); let mut offset_b = m.cell_to_offset(sr, j); for _ in 0..self.num_rows(){ unsafe { let va = *ptr.offset(offset_a); let vb = *ptr.offset(offset_b); *ptr.offset(offset_a) = va + scale * vb; } // Update offsets offset_a += 1; offset_b += 1; } self } } /****************************************************** * * Unit tests * *******************************************************/ #[cfg(test)] mod test{ //use super::*; } /****************************************************** * * Bench marks * *******************************************************/ #[cfg(test)] mod bench{ //extern crate test; //use self::test::Bencher; //use super::*; }
eco_scale_add
identifier_name
mount.rs
// This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. use base::prelude::*; use core::{mem}; use core::ops::{BitOr, Not, BitAnd}; use cty::{c_ulong, MS_RDONLY, MS_NOSUID, MS_NODEV, MS_NOEXEC, MS_SYNCHRONOUS, MS_REMOUNT, MS_MANDLOCK, MS_DIRSYNC, MS_NOATIME, MS_NODIRATIME, MS_BIND, MS_MOVE, MS_REC, MS_SILENT, MS_POSIXACL, MS_UNBINDABLE, MS_LAZYTIME, MS_PRIVATE, MS_SLAVE, MS_SHARED, MS_STRICTATIME, PATH_MAX}; use fmt::{Debug, Write}; use syscall::{self}; use rmo::{ToRmo}; use str_one::{CStr}; use str_two::{CString}; use {rmo_cstr, Pool}; /// Mounts a filesystem. /// /// [argument, src] /// The file that will be mounted. /// /// [argument, dst] /// The point at which it will be mounted. /// /// [argument, ty] /// The type of the filesystem. /// /// [argument, flags] /// The flags to be used to mount the filesystem. /// /// [argument, data] /// Filesystem dependent data. /// /// = Remarks /// /// :flags: link:lrs::fs::flags /// /// See {flags} for pre-defined mount flags. /// /// = Examples /// /// The following example bind-mounts a directory `a` read-only at the path `b`. Both /// paths must exist in the current working directory and the example must be executed as /// root. /// /// ---- /// mount("a", "b", "", MOUNT_READ_ONLY | MOUNT_BIND, "").unwrap(); /// ---- /// /// The example in link:lrs::fs::unmount[unmount] shows how to perform the unmount /// operation. /// /// = See also /// /// * link:man:mount(2) /// * {flags} pub fn mount<P, Q, R, S>(src: P, dst: Q, ty: R, flags: MountFlags, data: S) -> Result where P: for<'a> ToRmo<Pool<'a>, CStr, CString<Pool<'a>>>, Q: for<'a> ToRmo<Pool<'a>, CStr, CString<Pool<'a>>>, R: for<'a> ToRmo<Pool<'a>, CStr, CString<Pool<'a>>>, S: for<'a> ToRmo<Pool<'a>, CStr, CString<Pool<'a>>>, { let mut buf1: [d8; PATH_MAX] = unsafe { mem::uninit() }; let mut buf2: [d8; PATH_MAX] = unsafe { mem::uninit() }; let mut buf3: [d8; 256] = unsafe { mem::uninit() }; let mut buf4: [d8; 256] = unsafe { mem::uninit() }; let src = try!(rmo_cstr(&src, &mut buf1)); let dst = try!(rmo_cstr(&dst, &mut buf2)); let ty = try!(rmo_cstr(&ty, &mut buf3)); let data = try!(rmo_cstr(&data, &mut buf4)); rv!(syscall::mount(&src, &dst, &ty, flags.0, &data)) } /// Flags used when mounting a filesystem. /// /// = Remarks /// /// :flags: link:lrs::fs::flags /// /// See {flags} for pre-defined mount flags. /// /// = See also /// /// * flags pub struct MountFlags(c_ulong); impl BitOr for MountFlags { type Output = MountFlags; fn
(self, other: MountFlags) -> MountFlags { MountFlags(self.0 | other.0) } } impl BitAnd for MountFlags { type Output = MountFlags; fn bitand(self, other: MountFlags) -> MountFlags { MountFlags(self.0 & other.0) } } impl Not for MountFlags { type Output = MountFlags; fn not(self) -> MountFlags { MountFlags(!self.0) } } pub const MOUNT_NONE: MountFlags = MountFlags(0); macro_rules! create { ($($(#[$meta:meta])* flag $name:ident = $val:expr;)*) => { $($(#[$meta])* pub const $name: MountFlags = MountFlags($val);)* impl Debug for MountFlags { fn fmt<W: Write>(&self, w: &mut W) -> Result { let mut first = true; $( if self.0 & $val!= 0 { if!first { try!(w.write(b"|")); } first = false; try!(w.write_all(stringify!($name).as_bytes())); } )* let _ = first; Ok(()) } } } } create! { #[doc = "Mount the filesystem read-only.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_RDONLY therein"] flag MOUNT_READ_ONLY = MS_RDONLY; #[doc = "Don't respect set-user-id and set-group-id flags on the filesystem.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_NOSUID therein"] flag MOUNT_NO_SET_ID = MS_NOSUID; #[doc = "Don't allow access to devices on this filesystem.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_NODEV therein"] flag MOUNT_NO_DEVICE_ACCESS = MS_NODEV; #[doc = "Don't allow execution of programs on this filesystem.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_NOEXEC therein"] flag MOUNT_NO_EXEC = MS_NOEXEC; #[doc = "Flush all data and meta-data changes to this filesystem to the disk \ immediately.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_SYNCHRONOUS therein"] flag MOUNT_SYNC = MS_SYNCHRONOUS; #[doc = "Perform a remount operation.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_REMOUNT therein"] flag MOUNT_REMOUNT = MS_REMOUNT; #[doc = "Allow mandatory locking on the monut point.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_MANBLOCK therein"] flag MOUNT_MANDATORY_LOCKING = MS_MANDLOCK; #[doc = "Make directory changes on this filesystem synchonous.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_DIRSYNC therein"] flag MOUNT_DIR_SYNC = MS_DIRSYNC; #[doc = "Don't update the access times of files on this filesystem.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_NOATIME therein"] flag MOUNT_NO_ACCESS_TIME = MS_NOATIME; #[doc = "Don't update the access times of directories on this filesystem.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_NODIRATIME therein"] flag MOUNT_NO_DIR_ACCESS_TIME = MS_NODIRATIME; #[doc = "Perform a bind operation.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_BIND therein"] flag MOUNT_BIND = MS_BIND; #[doc = "Atomically move a mount to another mount point.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_MOVE therein"] flag MOUNT_MOVE = MS_MOVE; #[doc = "Not documented."] flag MOUNT_REC = MS_REC; #[doc = "Omit certain warning messages from the kernel log.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_SILENT therein"] flag MOUNT_SILENT = MS_SILENT; #[doc = "Not documented."] flag MOUNT_POSIX_ACL = MS_POSIXACL; #[doc = "Not documented."] flag MOUNT_UNBINDABLE = MS_UNBINDABLE; #[doc = "Not documented."] flag MOUNT_PRIVATE = MS_PRIVATE; #[doc = "Not documented."] flag MOUNT_SLAVE = MS_SLAVE; #[doc = "Not documented."] flag MOUNT_SHARED = MS_SHARED; #[doc = "Perform an access time update after every access.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_STRICTATIME therein"] flag MOUNT_STRICT_ACCESS_TIME = MS_STRICTATIME; #[doc = "Maintain changes to access/modification/status-change times in memory and \ only update the inodes under special circumstances.\n"] #[doc = "= Remarks"] #[doc = ":lazy: link:man:mount(2)"] #[doc = "See the {lazy}[manual page] and MS_LAZYTIME therein for the details.\n"] #[doc = "== Kernel versions"] #[doc = "The required kernel version is 4.0.\n"] #[doc = "= See also"] #[doc = "* {lazy} and MS_LAZYTIME therein"] flag MOUNT_LAZY_TIME = MS_LAZYTIME; } impl MountFlags { /// Sets a flag. /// /// [argument, flag] /// The flag to be set. pub fn set(&mut self, flag: MountFlags) { self.0 |= flag.0 } /// Clears a flag. /// /// [argument, flag] /// The flag to be cleared. pub fn unset(&mut self, flag: MountFlags) { self.0 &=!flag.0 } /// Returns whether a flag is set. /// /// [argument, flag] /// The flag to be checked. pub fn is_set(&self, flag: MountFlags) -> bool { self.0 & flag.0!= 0 } }
bitor
identifier_name
mount.rs
// This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. use base::prelude::*; use core::{mem}; use core::ops::{BitOr, Not, BitAnd}; use cty::{c_ulong, MS_RDONLY, MS_NOSUID, MS_NODEV, MS_NOEXEC, MS_SYNCHRONOUS, MS_REMOUNT, MS_MANDLOCK, MS_DIRSYNC, MS_NOATIME, MS_NODIRATIME, MS_BIND, MS_MOVE, MS_REC, MS_SILENT, MS_POSIXACL, MS_UNBINDABLE, MS_LAZYTIME, MS_PRIVATE, MS_SLAVE, MS_SHARED, MS_STRICTATIME, PATH_MAX}; use fmt::{Debug, Write}; use syscall::{self}; use rmo::{ToRmo}; use str_one::{CStr}; use str_two::{CString}; use {rmo_cstr, Pool}; /// Mounts a filesystem. /// /// [argument, src] /// The file that will be mounted. /// /// [argument, dst] /// The point at which it will be mounted. /// /// [argument, ty] /// The type of the filesystem. /// /// [argument, flags] /// The flags to be used to mount the filesystem. /// /// [argument, data] /// Filesystem dependent data. /// /// = Remarks /// /// :flags: link:lrs::fs::flags /// /// See {flags} for pre-defined mount flags. /// /// = Examples /// /// The following example bind-mounts a directory `a` read-only at the path `b`. Both /// paths must exist in the current working directory and the example must be executed as /// root. /// /// ---- /// mount("a", "b", "", MOUNT_READ_ONLY | MOUNT_BIND, "").unwrap(); /// ---- /// /// The example in link:lrs::fs::unmount[unmount] shows how to perform the unmount /// operation. /// /// = See also /// /// * link:man:mount(2) /// * {flags} pub fn mount<P, Q, R, S>(src: P, dst: Q, ty: R, flags: MountFlags, data: S) -> Result where P: for<'a> ToRmo<Pool<'a>, CStr, CString<Pool<'a>>>, Q: for<'a> ToRmo<Pool<'a>, CStr, CString<Pool<'a>>>, R: for<'a> ToRmo<Pool<'a>, CStr, CString<Pool<'a>>>, S: for<'a> ToRmo<Pool<'a>, CStr, CString<Pool<'a>>>, { let mut buf1: [d8; PATH_MAX] = unsafe { mem::uninit() }; let mut buf2: [d8; PATH_MAX] = unsafe { mem::uninit() }; let mut buf3: [d8; 256] = unsafe { mem::uninit() }; let mut buf4: [d8; 256] = unsafe { mem::uninit() }; let src = try!(rmo_cstr(&src, &mut buf1)); let dst = try!(rmo_cstr(&dst, &mut buf2)); let ty = try!(rmo_cstr(&ty, &mut buf3)); let data = try!(rmo_cstr(&data, &mut buf4)); rv!(syscall::mount(&src, &dst, &ty, flags.0, &data)) } /// Flags used when mounting a filesystem. /// /// = Remarks /// /// :flags: link:lrs::fs::flags /// /// See {flags} for pre-defined mount flags. /// /// = See also /// /// * flags pub struct MountFlags(c_ulong); impl BitOr for MountFlags { type Output = MountFlags; fn bitor(self, other: MountFlags) -> MountFlags
} impl BitAnd for MountFlags { type Output = MountFlags; fn bitand(self, other: MountFlags) -> MountFlags { MountFlags(self.0 & other.0) } } impl Not for MountFlags { type Output = MountFlags; fn not(self) -> MountFlags { MountFlags(!self.0) } } pub const MOUNT_NONE: MountFlags = MountFlags(0); macro_rules! create { ($($(#[$meta:meta])* flag $name:ident = $val:expr;)*) => { $($(#[$meta])* pub const $name: MountFlags = MountFlags($val);)* impl Debug for MountFlags { fn fmt<W: Write>(&self, w: &mut W) -> Result { let mut first = true; $( if self.0 & $val!= 0 { if!first { try!(w.write(b"|")); } first = false; try!(w.write_all(stringify!($name).as_bytes())); } )* let _ = first; Ok(()) } } } } create! { #[doc = "Mount the filesystem read-only.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_RDONLY therein"] flag MOUNT_READ_ONLY = MS_RDONLY; #[doc = "Don't respect set-user-id and set-group-id flags on the filesystem.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_NOSUID therein"] flag MOUNT_NO_SET_ID = MS_NOSUID; #[doc = "Don't allow access to devices on this filesystem.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_NODEV therein"] flag MOUNT_NO_DEVICE_ACCESS = MS_NODEV; #[doc = "Don't allow execution of programs on this filesystem.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_NOEXEC therein"] flag MOUNT_NO_EXEC = MS_NOEXEC; #[doc = "Flush all data and meta-data changes to this filesystem to the disk \ immediately.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_SYNCHRONOUS therein"] flag MOUNT_SYNC = MS_SYNCHRONOUS; #[doc = "Perform a remount operation.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_REMOUNT therein"] flag MOUNT_REMOUNT = MS_REMOUNT; #[doc = "Allow mandatory locking on the monut point.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_MANBLOCK therein"] flag MOUNT_MANDATORY_LOCKING = MS_MANDLOCK; #[doc = "Make directory changes on this filesystem synchonous.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_DIRSYNC therein"] flag MOUNT_DIR_SYNC = MS_DIRSYNC; #[doc = "Don't update the access times of files on this filesystem.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_NOATIME therein"] flag MOUNT_NO_ACCESS_TIME = MS_NOATIME; #[doc = "Don't update the access times of directories on this filesystem.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_NODIRATIME therein"] flag MOUNT_NO_DIR_ACCESS_TIME = MS_NODIRATIME; #[doc = "Perform a bind operation.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_BIND therein"] flag MOUNT_BIND = MS_BIND; #[doc = "Atomically move a mount to another mount point.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_MOVE therein"] flag MOUNT_MOVE = MS_MOVE; #[doc = "Not documented."] flag MOUNT_REC = MS_REC; #[doc = "Omit certain warning messages from the kernel log.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_SILENT therein"] flag MOUNT_SILENT = MS_SILENT; #[doc = "Not documented."] flag MOUNT_POSIX_ACL = MS_POSIXACL; #[doc = "Not documented."] flag MOUNT_UNBINDABLE = MS_UNBINDABLE; #[doc = "Not documented."] flag MOUNT_PRIVATE = MS_PRIVATE; #[doc = "Not documented."] flag MOUNT_SLAVE = MS_SLAVE; #[doc = "Not documented."] flag MOUNT_SHARED = MS_SHARED; #[doc = "Perform an access time update after every access.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_STRICTATIME therein"] flag MOUNT_STRICT_ACCESS_TIME = MS_STRICTATIME; #[doc = "Maintain changes to access/modification/status-change times in memory and \ only update the inodes under special circumstances.\n"] #[doc = "= Remarks"] #[doc = ":lazy: link:man:mount(2)"] #[doc = "See the {lazy}[manual page] and MS_LAZYTIME therein for the details.\n"] #[doc = "== Kernel versions"] #[doc = "The required kernel version is 4.0.\n"] #[doc = "= See also"] #[doc = "* {lazy} and MS_LAZYTIME therein"] flag MOUNT_LAZY_TIME = MS_LAZYTIME; } impl MountFlags { /// Sets a flag. /// /// [argument, flag] /// The flag to be set. pub fn set(&mut self, flag: MountFlags) { self.0 |= flag.0 } /// Clears a flag. /// /// [argument, flag] /// The flag to be cleared. pub fn unset(&mut self, flag: MountFlags) { self.0 &=!flag.0 } /// Returns whether a flag is set. /// /// [argument, flag] /// The flag to be checked. pub fn is_set(&self, flag: MountFlags) -> bool { self.0 & flag.0!= 0 } }
{ MountFlags(self.0 | other.0) }
identifier_body
mount.rs
// This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. use base::prelude::*; use core::{mem}; use core::ops::{BitOr, Not, BitAnd}; use cty::{c_ulong, MS_RDONLY, MS_NOSUID, MS_NODEV, MS_NOEXEC, MS_SYNCHRONOUS, MS_REMOUNT, MS_MANDLOCK, MS_DIRSYNC, MS_NOATIME, MS_NODIRATIME, MS_BIND, MS_MOVE, MS_REC, MS_SILENT, MS_POSIXACL, MS_UNBINDABLE, MS_LAZYTIME, MS_PRIVATE, MS_SLAVE, MS_SHARED, MS_STRICTATIME, PATH_MAX}; use fmt::{Debug, Write}; use syscall::{self}; use rmo::{ToRmo}; use str_one::{CStr}; use str_two::{CString}; use {rmo_cstr, Pool}; /// Mounts a filesystem. /// /// [argument, src] /// The file that will be mounted. /// /// [argument, dst] /// The point at which it will be mounted. /// /// [argument, ty] /// The type of the filesystem. /// /// [argument, flags] /// The flags to be used to mount the filesystem. /// /// [argument, data] /// Filesystem dependent data. /// /// = Remarks /// /// :flags: link:lrs::fs::flags /// /// See {flags} for pre-defined mount flags. /// /// = Examples /// /// The following example bind-mounts a directory `a` read-only at the path `b`. Both /// paths must exist in the current working directory and the example must be executed as /// root. /// /// ---- /// mount("a", "b", "", MOUNT_READ_ONLY | MOUNT_BIND, "").unwrap(); /// ---- /// /// The example in link:lrs::fs::unmount[unmount] shows how to perform the unmount /// operation. /// /// = See also /// /// * link:man:mount(2) /// * {flags} pub fn mount<P, Q, R, S>(src: P, dst: Q, ty: R, flags: MountFlags, data: S) -> Result where P: for<'a> ToRmo<Pool<'a>, CStr, CString<Pool<'a>>>, Q: for<'a> ToRmo<Pool<'a>, CStr, CString<Pool<'a>>>, R: for<'a> ToRmo<Pool<'a>, CStr, CString<Pool<'a>>>, S: for<'a> ToRmo<Pool<'a>, CStr, CString<Pool<'a>>>, { let mut buf1: [d8; PATH_MAX] = unsafe { mem::uninit() }; let mut buf2: [d8; PATH_MAX] = unsafe { mem::uninit() }; let mut buf3: [d8; 256] = unsafe { mem::uninit() }; let mut buf4: [d8; 256] = unsafe { mem::uninit() }; let src = try!(rmo_cstr(&src, &mut buf1)); let dst = try!(rmo_cstr(&dst, &mut buf2)); let ty = try!(rmo_cstr(&ty, &mut buf3)); let data = try!(rmo_cstr(&data, &mut buf4)); rv!(syscall::mount(&src, &dst, &ty, flags.0, &data)) } /// Flags used when mounting a filesystem. /// /// = Remarks /// /// :flags: link:lrs::fs::flags /// /// See {flags} for pre-defined mount flags. /// /// = See also /// /// * flags pub struct MountFlags(c_ulong); impl BitOr for MountFlags { type Output = MountFlags; fn bitor(self, other: MountFlags) -> MountFlags { MountFlags(self.0 | other.0) } } impl BitAnd for MountFlags { type Output = MountFlags; fn bitand(self, other: MountFlags) -> MountFlags { MountFlags(self.0 & other.0) } } impl Not for MountFlags { type Output = MountFlags; fn not(self) -> MountFlags { MountFlags(!self.0) } } pub const MOUNT_NONE: MountFlags = MountFlags(0); macro_rules! create { ($($(#[$meta:meta])* flag $name:ident = $val:expr;)*) => { $($(#[$meta])* pub const $name: MountFlags = MountFlags($val);)* impl Debug for MountFlags { fn fmt<W: Write>(&self, w: &mut W) -> Result { let mut first = true; $( if self.0 & $val!= 0 { if!first { try!(w.write(b"|")); } first = false; try!(w.write_all(stringify!($name).as_bytes())); } )* let _ = first; Ok(()) } } } } create! { #[doc = "Mount the filesystem read-only.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_RDONLY therein"] flag MOUNT_READ_ONLY = MS_RDONLY; #[doc = "Don't respect set-user-id and set-group-id flags on the filesystem.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_NOSUID therein"] flag MOUNT_NO_SET_ID = MS_NOSUID; #[doc = "Don't allow access to devices on this filesystem.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_NODEV therein"] flag MOUNT_NO_DEVICE_ACCESS = MS_NODEV; #[doc = "Don't allow execution of programs on this filesystem.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_NOEXEC therein"] flag MOUNT_NO_EXEC = MS_NOEXEC; #[doc = "Flush all data and meta-data changes to this filesystem to the disk \ immediately.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_SYNCHRONOUS therein"] flag MOUNT_SYNC = MS_SYNCHRONOUS; #[doc = "Perform a remount operation.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_REMOUNT therein"] flag MOUNT_REMOUNT = MS_REMOUNT; #[doc = "Allow mandatory locking on the monut point.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_MANBLOCK therein"] flag MOUNT_MANDATORY_LOCKING = MS_MANDLOCK; #[doc = "Make directory changes on this filesystem synchonous.\n"]
#[doc = "* link:man:mount(2) and MS_DIRSYNC therein"] flag MOUNT_DIR_SYNC = MS_DIRSYNC; #[doc = "Don't update the access times of files on this filesystem.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_NOATIME therein"] flag MOUNT_NO_ACCESS_TIME = MS_NOATIME; #[doc = "Don't update the access times of directories on this filesystem.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_NODIRATIME therein"] flag MOUNT_NO_DIR_ACCESS_TIME = MS_NODIRATIME; #[doc = "Perform a bind operation.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_BIND therein"] flag MOUNT_BIND = MS_BIND; #[doc = "Atomically move a mount to another mount point.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_MOVE therein"] flag MOUNT_MOVE = MS_MOVE; #[doc = "Not documented."] flag MOUNT_REC = MS_REC; #[doc = "Omit certain warning messages from the kernel log.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_SILENT therein"] flag MOUNT_SILENT = MS_SILENT; #[doc = "Not documented."] flag MOUNT_POSIX_ACL = MS_POSIXACL; #[doc = "Not documented."] flag MOUNT_UNBINDABLE = MS_UNBINDABLE; #[doc = "Not documented."] flag MOUNT_PRIVATE = MS_PRIVATE; #[doc = "Not documented."] flag MOUNT_SLAVE = MS_SLAVE; #[doc = "Not documented."] flag MOUNT_SHARED = MS_SHARED; #[doc = "Perform an access time update after every access.\n"] #[doc = "= See also"] #[doc = "* link:man:mount(2) and MS_STRICTATIME therein"] flag MOUNT_STRICT_ACCESS_TIME = MS_STRICTATIME; #[doc = "Maintain changes to access/modification/status-change times in memory and \ only update the inodes under special circumstances.\n"] #[doc = "= Remarks"] #[doc = ":lazy: link:man:mount(2)"] #[doc = "See the {lazy}[manual page] and MS_LAZYTIME therein for the details.\n"] #[doc = "== Kernel versions"] #[doc = "The required kernel version is 4.0.\n"] #[doc = "= See also"] #[doc = "* {lazy} and MS_LAZYTIME therein"] flag MOUNT_LAZY_TIME = MS_LAZYTIME; } impl MountFlags { /// Sets a flag. /// /// [argument, flag] /// The flag to be set. pub fn set(&mut self, flag: MountFlags) { self.0 |= flag.0 } /// Clears a flag. /// /// [argument, flag] /// The flag to be cleared. pub fn unset(&mut self, flag: MountFlags) { self.0 &=!flag.0 } /// Returns whether a flag is set. /// /// [argument, flag] /// The flag to be checked. pub fn is_set(&self, flag: MountFlags) -> bool { self.0 & flag.0!= 0 } }
#[doc = "= See also"]
random_line_split
font_template.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::io; use std::io::File; /// Platform specific font representation for Linux. /// The identifier is an absolute path, and the bytes /// field is the loaded data that can be passed to /// freetype and azure directly. pub struct
{ pub bytes: Vec<u8>, pub identifier: String, } impl FontTemplateData { pub fn new(identifier: &str, font_data: Option<Vec<u8>>) -> FontTemplateData { let bytes = match font_data { Some(bytes) => { bytes }, None => { // TODO: Handle file load failure! let mut file = File::open_mode(&Path::new(identifier), io::Open, io::Read).unwrap(); file.read_to_end().unwrap() }, }; FontTemplateData { bytes: bytes, identifier: identifier.to_string(), } } }
FontTemplateData
identifier_name
font_template.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::io; use std::io::File; /// Platform specific font representation for Linux. /// The identifier is an absolute path, and the bytes /// field is the loaded data that can be passed to /// freetype and azure directly. pub struct FontTemplateData { pub bytes: Vec<u8>, pub identifier: String, } impl FontTemplateData { pub fn new(identifier: &str, font_data: Option<Vec<u8>>) -> FontTemplateData
}
{ let bytes = match font_data { Some(bytes) => { bytes }, None => { // TODO: Handle file load failure! let mut file = File::open_mode(&Path::new(identifier), io::Open, io::Read).unwrap(); file.read_to_end().unwrap() }, }; FontTemplateData { bytes: bytes, identifier: identifier.to_string(), } }
identifier_body
font_template.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::io; use std::io::File; /// Platform specific font representation for Linux. /// The identifier is an absolute path, and the bytes /// field is the loaded data that can be passed to /// freetype and azure directly. pub struct FontTemplateData { pub bytes: Vec<u8>, pub identifier: String, } impl FontTemplateData { pub fn new(identifier: &str, font_data: Option<Vec<u8>>) -> FontTemplateData { let bytes = match font_data { Some(bytes) => { bytes }, None =>
, }; FontTemplateData { bytes: bytes, identifier: identifier.to_string(), } } }
{ // TODO: Handle file load failure! let mut file = File::open_mode(&Path::new(identifier), io::Open, io::Read).unwrap(); file.read_to_end().unwrap() }
conditional_block
font_template.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use std::io; use std::io::File;
/// The identifier is an absolute path, and the bytes /// field is the loaded data that can be passed to /// freetype and azure directly. pub struct FontTemplateData { pub bytes: Vec<u8>, pub identifier: String, } impl FontTemplateData { pub fn new(identifier: &str, font_data: Option<Vec<u8>>) -> FontTemplateData { let bytes = match font_data { Some(bytes) => { bytes }, None => { // TODO: Handle file load failure! let mut file = File::open_mode(&Path::new(identifier), io::Open, io::Read).unwrap(); file.read_to_end().unwrap() }, }; FontTemplateData { bytes: bytes, identifier: identifier.to_string(), } } }
/// Platform specific font representation for Linux.
random_line_split
issue-21400.rs
// run-pass // Regression test for #21400 which itself was extracted from // stackoverflow.com/questions/28031155/is-my-borrow-checker-drunk/28031580 fn main() { let mut t = Test; assert_eq!(t.method1("one"), Ok(1)); assert_eq!(t.method2("two"), Ok(2)); assert_eq!(t.test(), Ok(2)); }
} fn method2(self: &mut Test, _arg: &str) -> Result<usize, &str> { Ok(2) } fn test(self: &mut Test) -> Result<usize, &str> { let s = format!("abcde"); // (Originally, this invocation of method2 was saying that `s` // does not live long enough.) let data = match self.method2(&*s) { Ok(r) => r, Err(e) => return Err(e) }; Ok(data) } } // Below is a closer match for the original test that was failing to compile pub struct GitConnect; impl GitConnect { fn command(self: &mut GitConnect, _s: &str) -> Result<Vec<Vec<u8>>, &str> { unimplemented!() } pub fn git_upload_pack(self: &mut GitConnect) -> Result<String, &str> { let c = format!("git-upload-pack"); let mut out = String::new(); let data = self.command(&c)?; for line in data.iter() { out.push_str(&format!("{:?}", line)); } Ok(out) } }
struct Test; impl Test { fn method1(&mut self, _arg: &str) -> Result<usize, &str> { Ok(1)
random_line_split
issue-21400.rs
// run-pass // Regression test for #21400 which itself was extracted from // stackoverflow.com/questions/28031155/is-my-borrow-checker-drunk/28031580 fn main() { let mut t = Test; assert_eq!(t.method1("one"), Ok(1)); assert_eq!(t.method2("two"), Ok(2)); assert_eq!(t.test(), Ok(2)); } struct Test; impl Test { fn method1(&mut self, _arg: &str) -> Result<usize, &str> { Ok(1) } fn method2(self: &mut Test, _arg: &str) -> Result<usize, &str> { Ok(2) } fn test(self: &mut Test) -> Result<usize, &str>
} // Below is a closer match for the original test that was failing to compile pub struct GitConnect; impl GitConnect { fn command(self: &mut GitConnect, _s: &str) -> Result<Vec<Vec<u8>>, &str> { unimplemented!() } pub fn git_upload_pack(self: &mut GitConnect) -> Result<String, &str> { let c = format!("git-upload-pack"); let mut out = String::new(); let data = self.command(&c)?; for line in data.iter() { out.push_str(&format!("{:?}", line)); } Ok(out) } }
{ let s = format!("abcde"); // (Originally, this invocation of method2 was saying that `s` // does not live long enough.) let data = match self.method2(&*s) { Ok(r) => r, Err(e) => return Err(e) }; Ok(data) }
identifier_body
issue-21400.rs
// run-pass // Regression test for #21400 which itself was extracted from // stackoverflow.com/questions/28031155/is-my-borrow-checker-drunk/28031580 fn main() { let mut t = Test; assert_eq!(t.method1("one"), Ok(1)); assert_eq!(t.method2("two"), Ok(2)); assert_eq!(t.test(), Ok(2)); } struct Test; impl Test { fn method1(&mut self, _arg: &str) -> Result<usize, &str> { Ok(1) } fn method2(self: &mut Test, _arg: &str) -> Result<usize, &str> { Ok(2) } fn test(self: &mut Test) -> Result<usize, &str> { let s = format!("abcde"); // (Originally, this invocation of method2 was saying that `s` // does not live long enough.) let data = match self.method2(&*s) { Ok(r) => r, Err(e) => return Err(e) }; Ok(data) } } // Below is a closer match for the original test that was failing to compile pub struct GitConnect; impl GitConnect { fn command(self: &mut GitConnect, _s: &str) -> Result<Vec<Vec<u8>>, &str> { unimplemented!() } pub fn
(self: &mut GitConnect) -> Result<String, &str> { let c = format!("git-upload-pack"); let mut out = String::new(); let data = self.command(&c)?; for line in data.iter() { out.push_str(&format!("{:?}", line)); } Ok(out) } }
git_upload_pack
identifier_name
order_code.rs
#[derive(Clone, PartialEq)] pub struct OrderCode { pub value: String, } impl OrderCode { pub fn new(year: i32, number: usize) -> OrderCode { let code = format!("{:04}{:08}", year, number); return OrderCode { value: code }; } } impl FromStr for OrderCode { type Err = String; fn from_str(s: &str) -> Result<Self, Self::Err> { Ok(OrderCode { value: s.to_string(), }) // TODO error handling } } #[cfg(test)] mod tests { use super::*; #[test] fn check_new_code() { let order_code = OrderCode::new(2021, 123); assert_eq!("202100000123", order_code.value); } #[test] fn check_code_from_str() { let order_code = OrderCode::from_str("202100000123").unwrap(); assert_eq!("202100000123", order_code.value); } }
// value object #[allow(dead_code)] use std::str::FromStr;
random_line_split
order_code.rs
// value object #[allow(dead_code)] use std::str::FromStr; #[derive(Clone, PartialEq)] pub struct
{ pub value: String, } impl OrderCode { pub fn new(year: i32, number: usize) -> OrderCode { let code = format!("{:04}{:08}", year, number); return OrderCode { value: code }; } } impl FromStr for OrderCode { type Err = String; fn from_str(s: &str) -> Result<Self, Self::Err> { Ok(OrderCode { value: s.to_string(), }) // TODO error handling } } #[cfg(test)] mod tests { use super::*; #[test] fn check_new_code() { let order_code = OrderCode::new(2021, 123); assert_eq!("202100000123", order_code.value); } #[test] fn check_code_from_str() { let order_code = OrderCode::from_str("202100000123").unwrap(); assert_eq!("202100000123", order_code.value); } }
OrderCode
identifier_name
vimawesome.rs
use std::io; use std::process::{Command, Stdio}; use std::rc::Rc; use std::thread; use serde_json; use glib; use gtk; use gtk::prelude::*; use super::store::PlugInfo; pub fn call<F>(query: Option<String>, cb: F) where F: FnOnce(io::Result<DescriptionList>) + Send +'static, { thread::spawn(move || { let mut result = Some(request(query.as_ref().map(|s| s.as_ref()))); let mut cb = Some(cb); glib::idle_add(move || {
fn request(query: Option<&str>) -> io::Result<DescriptionList> { let child = Command::new("curl") .arg("-s") .arg(format!( "https://vimawesome.com/api/plugins?query={}&page=1", query.unwrap_or("") )) .stdout(Stdio::piped()) .spawn()?; let out = child.wait_with_output()?; if out.status.success() { if out.stdout.is_empty() { Ok(DescriptionList::empty()) } else { let description_list: DescriptionList = serde_json::from_slice(&out.stdout) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; Ok(description_list) } } else { Err(io::Error::new( io::ErrorKind::Other, format!( "curl exit with error:\n{}", match out.status.code() { Some(code) => format!("Exited with status code: {}", code), None => "Process terminated by signal".to_owned(), } ), )) } } pub fn build_result_panel<F: Fn(PlugInfo) +'static>( list: &DescriptionList, add_cb: F, ) -> gtk::ScrolledWindow { let scroll = gtk::ScrolledWindow::new( Option::<&gtk::Adjustment>::None, Option::<&gtk::Adjustment>::None, ); scroll.get_style_context().add_class("view"); let panel = gtk::ListBox::new(); let cb_ref = Rc::new(add_cb); for plug in list.plugins.iter() { let row = create_plug_row(plug, cb_ref.clone()); panel.add(&row); } scroll.add(&panel); scroll.show_all(); scroll } fn create_plug_row<F: Fn(PlugInfo) +'static>( plug: &Description, add_cb: Rc<F>, ) -> gtk::ListBoxRow { let row = gtk::ListBoxRow::new(); let row_container = gtk::Box::new(gtk::Orientation::Vertical, 5); row_container.set_border_width(5); let hbox = gtk::Box::new(gtk::Orientation::Horizontal, 5); let label_box = create_plug_label(plug); let button_box = gtk::Box::new(gtk::Orientation::Horizontal, 0); button_box.set_halign(gtk::Align::End); let add_btn = gtk::Button::new_with_label("Install"); button_box.pack_start(&add_btn, false, true, 0); row_container.pack_start(&hbox, true, true, 0); hbox.pack_start(&label_box, true, true, 0); hbox.pack_start(&button_box, false, true, 0); row.add(&row_container); add_btn.connect_clicked(clone!(plug => move |btn| { if let Some(ref github_url) = plug.github_url { btn.set_sensitive(false); add_cb(PlugInfo::new(plug.name.clone(), github_url.clone())); } })); row } fn create_plug_label(plug: &Description) -> gtk::Box { let label_box = gtk::Box::new(gtk::Orientation::Vertical, 5); let name_lbl = gtk::Label::new(None); name_lbl.set_markup(&format!( "<b>{}</b> by {}", plug.name, plug.author .as_ref() .map(|s| s.as_ref()) .unwrap_or("unknown",) )); name_lbl.set_halign(gtk::Align::Start); let url_lbl = gtk::Label::new(None); if let Some(url) = plug.github_url.as_ref() { url_lbl.set_markup(&format!("<a href=\"{}\">{}</a>", url, url)); } url_lbl.set_halign(gtk::Align::Start); label_box.pack_start(&name_lbl, true, true, 0); label_box.pack_start(&url_lbl, true, true, 0); label_box } #[derive(Deserialize, Debug)] pub struct DescriptionList { pub plugins: Box<[Description]>, } impl DescriptionList { fn empty() -> DescriptionList { DescriptionList { plugins: Box::new([]), } } } #[derive(Deserialize, Debug, Clone)] pub struct Description { pub name: String, pub github_url: Option<String>, pub author: Option<String>, pub github_stars: Option<i64>, }
cb.take().unwrap()(result.take().unwrap()); Continue(false) }) }); }
random_line_split
vimawesome.rs
use std::io; use std::process::{Command, Stdio}; use std::rc::Rc; use std::thread; use serde_json; use glib; use gtk; use gtk::prelude::*; use super::store::PlugInfo; pub fn call<F>(query: Option<String>, cb: F) where F: FnOnce(io::Result<DescriptionList>) + Send +'static, { thread::spawn(move || { let mut result = Some(request(query.as_ref().map(|s| s.as_ref()))); let mut cb = Some(cb); glib::idle_add(move || { cb.take().unwrap()(result.take().unwrap()); Continue(false) }) }); } fn request(query: Option<&str>) -> io::Result<DescriptionList> { let child = Command::new("curl") .arg("-s") .arg(format!( "https://vimawesome.com/api/plugins?query={}&page=1", query.unwrap_or("") )) .stdout(Stdio::piped()) .spawn()?; let out = child.wait_with_output()?; if out.status.success() { if out.stdout.is_empty() { Ok(DescriptionList::empty()) } else { let description_list: DescriptionList = serde_json::from_slice(&out.stdout) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; Ok(description_list) } } else { Err(io::Error::new( io::ErrorKind::Other, format!( "curl exit with error:\n{}", match out.status.code() { Some(code) => format!("Exited with status code: {}", code), None => "Process terminated by signal".to_owned(), } ), )) } } pub fn build_result_panel<F: Fn(PlugInfo) +'static>( list: &DescriptionList, add_cb: F, ) -> gtk::ScrolledWindow { let scroll = gtk::ScrolledWindow::new( Option::<&gtk::Adjustment>::None, Option::<&gtk::Adjustment>::None, ); scroll.get_style_context().add_class("view"); let panel = gtk::ListBox::new(); let cb_ref = Rc::new(add_cb); for plug in list.plugins.iter() { let row = create_plug_row(plug, cb_ref.clone()); panel.add(&row); } scroll.add(&panel); scroll.show_all(); scroll } fn create_plug_row<F: Fn(PlugInfo) +'static>( plug: &Description, add_cb: Rc<F>, ) -> gtk::ListBoxRow { let row = gtk::ListBoxRow::new(); let row_container = gtk::Box::new(gtk::Orientation::Vertical, 5); row_container.set_border_width(5); let hbox = gtk::Box::new(gtk::Orientation::Horizontal, 5); let label_box = create_plug_label(plug); let button_box = gtk::Box::new(gtk::Orientation::Horizontal, 0); button_box.set_halign(gtk::Align::End); let add_btn = gtk::Button::new_with_label("Install"); button_box.pack_start(&add_btn, false, true, 0); row_container.pack_start(&hbox, true, true, 0); hbox.pack_start(&label_box, true, true, 0); hbox.pack_start(&button_box, false, true, 0); row.add(&row_container); add_btn.connect_clicked(clone!(plug => move |btn| { if let Some(ref github_url) = plug.github_url { btn.set_sensitive(false); add_cb(PlugInfo::new(plug.name.clone(), github_url.clone())); } })); row } fn create_plug_label(plug: &Description) -> gtk::Box { let label_box = gtk::Box::new(gtk::Orientation::Vertical, 5); let name_lbl = gtk::Label::new(None); name_lbl.set_markup(&format!( "<b>{}</b> by {}", plug.name, plug.author .as_ref() .map(|s| s.as_ref()) .unwrap_or("unknown",) )); name_lbl.set_halign(gtk::Align::Start); let url_lbl = gtk::Label::new(None); if let Some(url) = plug.github_url.as_ref() { url_lbl.set_markup(&format!("<a href=\"{}\">{}</a>", url, url)); } url_lbl.set_halign(gtk::Align::Start); label_box.pack_start(&name_lbl, true, true, 0); label_box.pack_start(&url_lbl, true, true, 0); label_box } #[derive(Deserialize, Debug)] pub struct
{ pub plugins: Box<[Description]>, } impl DescriptionList { fn empty() -> DescriptionList { DescriptionList { plugins: Box::new([]), } } } #[derive(Deserialize, Debug, Clone)] pub struct Description { pub name: String, pub github_url: Option<String>, pub author: Option<String>, pub github_stars: Option<i64>, }
DescriptionList
identifier_name
vimawesome.rs
use std::io; use std::process::{Command, Stdio}; use std::rc::Rc; use std::thread; use serde_json; use glib; use gtk; use gtk::prelude::*; use super::store::PlugInfo; pub fn call<F>(query: Option<String>, cb: F) where F: FnOnce(io::Result<DescriptionList>) + Send +'static, { thread::spawn(move || { let mut result = Some(request(query.as_ref().map(|s| s.as_ref()))); let mut cb = Some(cb); glib::idle_add(move || { cb.take().unwrap()(result.take().unwrap()); Continue(false) }) }); } fn request(query: Option<&str>) -> io::Result<DescriptionList> { let child = Command::new("curl") .arg("-s") .arg(format!( "https://vimawesome.com/api/plugins?query={}&page=1", query.unwrap_or("") )) .stdout(Stdio::piped()) .spawn()?; let out = child.wait_with_output()?; if out.status.success() { if out.stdout.is_empty() { Ok(DescriptionList::empty()) } else { let description_list: DescriptionList = serde_json::from_slice(&out.stdout) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; Ok(description_list) } } else
} pub fn build_result_panel<F: Fn(PlugInfo) +'static>( list: &DescriptionList, add_cb: F, ) -> gtk::ScrolledWindow { let scroll = gtk::ScrolledWindow::new( Option::<&gtk::Adjustment>::None, Option::<&gtk::Adjustment>::None, ); scroll.get_style_context().add_class("view"); let panel = gtk::ListBox::new(); let cb_ref = Rc::new(add_cb); for plug in list.plugins.iter() { let row = create_plug_row(plug, cb_ref.clone()); panel.add(&row); } scroll.add(&panel); scroll.show_all(); scroll } fn create_plug_row<F: Fn(PlugInfo) +'static>( plug: &Description, add_cb: Rc<F>, ) -> gtk::ListBoxRow { let row = gtk::ListBoxRow::new(); let row_container = gtk::Box::new(gtk::Orientation::Vertical, 5); row_container.set_border_width(5); let hbox = gtk::Box::new(gtk::Orientation::Horizontal, 5); let label_box = create_plug_label(plug); let button_box = gtk::Box::new(gtk::Orientation::Horizontal, 0); button_box.set_halign(gtk::Align::End); let add_btn = gtk::Button::new_with_label("Install"); button_box.pack_start(&add_btn, false, true, 0); row_container.pack_start(&hbox, true, true, 0); hbox.pack_start(&label_box, true, true, 0); hbox.pack_start(&button_box, false, true, 0); row.add(&row_container); add_btn.connect_clicked(clone!(plug => move |btn| { if let Some(ref github_url) = plug.github_url { btn.set_sensitive(false); add_cb(PlugInfo::new(plug.name.clone(), github_url.clone())); } })); row } fn create_plug_label(plug: &Description) -> gtk::Box { let label_box = gtk::Box::new(gtk::Orientation::Vertical, 5); let name_lbl = gtk::Label::new(None); name_lbl.set_markup(&format!( "<b>{}</b> by {}", plug.name, plug.author .as_ref() .map(|s| s.as_ref()) .unwrap_or("unknown",) )); name_lbl.set_halign(gtk::Align::Start); let url_lbl = gtk::Label::new(None); if let Some(url) = plug.github_url.as_ref() { url_lbl.set_markup(&format!("<a href=\"{}\">{}</a>", url, url)); } url_lbl.set_halign(gtk::Align::Start); label_box.pack_start(&name_lbl, true, true, 0); label_box.pack_start(&url_lbl, true, true, 0); label_box } #[derive(Deserialize, Debug)] pub struct DescriptionList { pub plugins: Box<[Description]>, } impl DescriptionList { fn empty() -> DescriptionList { DescriptionList { plugins: Box::new([]), } } } #[derive(Deserialize, Debug, Clone)] pub struct Description { pub name: String, pub github_url: Option<String>, pub author: Option<String>, pub github_stars: Option<i64>, }
{ Err(io::Error::new( io::ErrorKind::Other, format!( "curl exit with error:\n{}", match out.status.code() { Some(code) => format!("Exited with status code: {}", code), None => "Process terminated by signal".to_owned(), } ), )) }
conditional_block
osstringext.rs
use std::ffi::OsStr; #[cfg(not(target_os = "windows"))] use std::os::unix::ffi::OsStrExt; #[cfg(target_os = "windows")] use INVALID_UTF8; #[cfg(target_os = "windows")] trait OsStrExt3 { fn from_bytes(b: &[u8]) -> &Self; fn as_bytes(&self) -> &[u8]; } #[doc(hidden)] pub trait OsStrExt2 { fn starts_with(&self, s: &[u8]) -> bool; fn split_at_byte(&self, b: u8) -> (&OsStr, &OsStr); fn split_at(&self, i: usize) -> (&OsStr, &OsStr); fn trim_left_matches(&self, b: u8) -> &OsStr; fn len_(&self) -> usize; fn contains_byte(&self, b: u8) -> bool; fn is_empty_(&self) -> bool; fn split(&self, b: u8) -> OsSplit; } #[cfg(target_os = "windows")] impl OsStrExt3 for OsStr { fn from_bytes(b: &[u8]) -> &Self { use ::std::mem; unsafe { mem::transmute(b) } } fn as_bytes(&self) -> &[u8] { self.to_str().map(|s| s.as_bytes()).expect(INVALID_UTF8) } } impl OsStrExt2 for OsStr { fn starts_with(&self, s: &[u8]) -> bool { self.as_bytes().starts_with(s) } fn is_empty_(&self) -> bool { self.as_bytes().is_empty() } fn contains_byte(&self, byte: u8) -> bool { for b in self.as_bytes() { if b == &byte { return true; } } false } fn split_at_byte(&self, byte: u8) -> (&OsStr, &OsStr) { for (i, b) in self.as_bytes().iter().enumerate() { if b == &byte { return (&OsStr::from_bytes(&self.as_bytes()[..i]), &OsStr::from_bytes(&self.as_bytes()[i+1..])); }
for (i, b) in self.as_bytes().iter().enumerate() { if b!= &byte { return &OsStr::from_bytes(&self.as_bytes()[i..]); } } &*self } fn split_at(&self, i: usize) -> (&OsStr, &OsStr) { (&OsStr::from_bytes(&self.as_bytes()[..i]), &OsStr::from_bytes(&self.as_bytes()[i..])) } fn len_(&self) -> usize { self.as_bytes().len() } fn split(&self, b: u8) -> OsSplit { OsSplit { sep: b, val: self.as_bytes(), pos: 0 } } } #[doc(hidden)] #[derive(Clone, Debug)] pub struct OsSplit<'a> { sep: u8, val: &'a [u8], pos: usize, } impl<'a> Iterator for OsSplit<'a> { type Item = &'a OsStr; fn next(&mut self) -> Option<&'a OsStr> { debugln!("fn=OsSplit::next;"); debugln!("OsSplit: {:?}", self); if self.pos == self.val.len() { return None; } let start = self.pos; for b in &self.val[start..] { self.pos += 1; if *b == self.sep { return Some(&OsStr::from_bytes(&self.val[start..self.pos - 1])); } } Some(&OsStr::from_bytes(&self.val[start..])) } fn size_hint(&self) -> (usize, Option<usize>) { let mut count = 0; for b in &self.val[self.pos..] { if *b == self.sep { count += 1; } } if count > 0 { return (count, Some(count)); } (0, None) } } impl<'a> DoubleEndedIterator for OsSplit<'a> { fn next_back(&mut self) -> Option<&'a OsStr> { if self.pos == 0 { return None; } let start = self.pos; for b in self.val[..self.pos].iter().rev() { self.pos -= 1; if *b == self.sep { return Some(&OsStr::from_bytes(&self.val[self.pos + 1..start])); } } Some(&OsStr::from_bytes(&self.val[..start])) } }
} (&*self, &OsStr::from_bytes(&self.as_bytes()[self.len_()..self.len_()])) } fn trim_left_matches(&self, byte: u8) -> &OsStr {
random_line_split
osstringext.rs
use std::ffi::OsStr; #[cfg(not(target_os = "windows"))] use std::os::unix::ffi::OsStrExt; #[cfg(target_os = "windows")] use INVALID_UTF8; #[cfg(target_os = "windows")] trait OsStrExt3 { fn from_bytes(b: &[u8]) -> &Self; fn as_bytes(&self) -> &[u8]; } #[doc(hidden)] pub trait OsStrExt2 { fn starts_with(&self, s: &[u8]) -> bool; fn split_at_byte(&self, b: u8) -> (&OsStr, &OsStr); fn split_at(&self, i: usize) -> (&OsStr, &OsStr); fn trim_left_matches(&self, b: u8) -> &OsStr; fn len_(&self) -> usize; fn contains_byte(&self, b: u8) -> bool; fn is_empty_(&self) -> bool; fn split(&self, b: u8) -> OsSplit; } #[cfg(target_os = "windows")] impl OsStrExt3 for OsStr { fn from_bytes(b: &[u8]) -> &Self { use ::std::mem; unsafe { mem::transmute(b) } } fn as_bytes(&self) -> &[u8] { self.to_str().map(|s| s.as_bytes()).expect(INVALID_UTF8) } } impl OsStrExt2 for OsStr { fn starts_with(&self, s: &[u8]) -> bool { self.as_bytes().starts_with(s) } fn is_empty_(&self) -> bool { self.as_bytes().is_empty() } fn
(&self, byte: u8) -> bool { for b in self.as_bytes() { if b == &byte { return true; } } false } fn split_at_byte(&self, byte: u8) -> (&OsStr, &OsStr) { for (i, b) in self.as_bytes().iter().enumerate() { if b == &byte { return (&OsStr::from_bytes(&self.as_bytes()[..i]), &OsStr::from_bytes(&self.as_bytes()[i+1..])); } } (&*self, &OsStr::from_bytes(&self.as_bytes()[self.len_()..self.len_()])) } fn trim_left_matches(&self, byte: u8) -> &OsStr { for (i, b) in self.as_bytes().iter().enumerate() { if b!= &byte { return &OsStr::from_bytes(&self.as_bytes()[i..]); } } &*self } fn split_at(&self, i: usize) -> (&OsStr, &OsStr) { (&OsStr::from_bytes(&self.as_bytes()[..i]), &OsStr::from_bytes(&self.as_bytes()[i..])) } fn len_(&self) -> usize { self.as_bytes().len() } fn split(&self, b: u8) -> OsSplit { OsSplit { sep: b, val: self.as_bytes(), pos: 0 } } } #[doc(hidden)] #[derive(Clone, Debug)] pub struct OsSplit<'a> { sep: u8, val: &'a [u8], pos: usize, } impl<'a> Iterator for OsSplit<'a> { type Item = &'a OsStr; fn next(&mut self) -> Option<&'a OsStr> { debugln!("fn=OsSplit::next;"); debugln!("OsSplit: {:?}", self); if self.pos == self.val.len() { return None; } let start = self.pos; for b in &self.val[start..] { self.pos += 1; if *b == self.sep { return Some(&OsStr::from_bytes(&self.val[start..self.pos - 1])); } } Some(&OsStr::from_bytes(&self.val[start..])) } fn size_hint(&self) -> (usize, Option<usize>) { let mut count = 0; for b in &self.val[self.pos..] { if *b == self.sep { count += 1; } } if count > 0 { return (count, Some(count)); } (0, None) } } impl<'a> DoubleEndedIterator for OsSplit<'a> { fn next_back(&mut self) -> Option<&'a OsStr> { if self.pos == 0 { return None; } let start = self.pos; for b in self.val[..self.pos].iter().rev() { self.pos -= 1; if *b == self.sep { return Some(&OsStr::from_bytes(&self.val[self.pos + 1..start])); } } Some(&OsStr::from_bytes(&self.val[..start])) } }
contains_byte
identifier_name
osstringext.rs
use std::ffi::OsStr; #[cfg(not(target_os = "windows"))] use std::os::unix::ffi::OsStrExt; #[cfg(target_os = "windows")] use INVALID_UTF8; #[cfg(target_os = "windows")] trait OsStrExt3 { fn from_bytes(b: &[u8]) -> &Self; fn as_bytes(&self) -> &[u8]; } #[doc(hidden)] pub trait OsStrExt2 { fn starts_with(&self, s: &[u8]) -> bool; fn split_at_byte(&self, b: u8) -> (&OsStr, &OsStr); fn split_at(&self, i: usize) -> (&OsStr, &OsStr); fn trim_left_matches(&self, b: u8) -> &OsStr; fn len_(&self) -> usize; fn contains_byte(&self, b: u8) -> bool; fn is_empty_(&self) -> bool; fn split(&self, b: u8) -> OsSplit; } #[cfg(target_os = "windows")] impl OsStrExt3 for OsStr { fn from_bytes(b: &[u8]) -> &Self { use ::std::mem; unsafe { mem::transmute(b) } } fn as_bytes(&self) -> &[u8] { self.to_str().map(|s| s.as_bytes()).expect(INVALID_UTF8) } } impl OsStrExt2 for OsStr { fn starts_with(&self, s: &[u8]) -> bool { self.as_bytes().starts_with(s) } fn is_empty_(&self) -> bool { self.as_bytes().is_empty() } fn contains_byte(&self, byte: u8) -> bool { for b in self.as_bytes() { if b == &byte { return true; } } false } fn split_at_byte(&self, byte: u8) -> (&OsStr, &OsStr) { for (i, b) in self.as_bytes().iter().enumerate() { if b == &byte { return (&OsStr::from_bytes(&self.as_bytes()[..i]), &OsStr::from_bytes(&self.as_bytes()[i+1..])); } } (&*self, &OsStr::from_bytes(&self.as_bytes()[self.len_()..self.len_()])) } fn trim_left_matches(&self, byte: u8) -> &OsStr { for (i, b) in self.as_bytes().iter().enumerate() { if b!= &byte { return &OsStr::from_bytes(&self.as_bytes()[i..]); } } &*self } fn split_at(&self, i: usize) -> (&OsStr, &OsStr) { (&OsStr::from_bytes(&self.as_bytes()[..i]), &OsStr::from_bytes(&self.as_bytes()[i..])) } fn len_(&self) -> usize { self.as_bytes().len() } fn split(&self, b: u8) -> OsSplit { OsSplit { sep: b, val: self.as_bytes(), pos: 0 } } } #[doc(hidden)] #[derive(Clone, Debug)] pub struct OsSplit<'a> { sep: u8, val: &'a [u8], pos: usize, } impl<'a> Iterator for OsSplit<'a> { type Item = &'a OsStr; fn next(&mut self) -> Option<&'a OsStr> { debugln!("fn=OsSplit::next;"); debugln!("OsSplit: {:?}", self); if self.pos == self.val.len() { return None; } let start = self.pos; for b in &self.val[start..] { self.pos += 1; if *b == self.sep { return Some(&OsStr::from_bytes(&self.val[start..self.pos - 1])); } } Some(&OsStr::from_bytes(&self.val[start..])) } fn size_hint(&self) -> (usize, Option<usize>) { let mut count = 0; for b in &self.val[self.pos..] { if *b == self.sep
} if count > 0 { return (count, Some(count)); } (0, None) } } impl<'a> DoubleEndedIterator for OsSplit<'a> { fn next_back(&mut self) -> Option<&'a OsStr> { if self.pos == 0 { return None; } let start = self.pos; for b in self.val[..self.pos].iter().rev() { self.pos -= 1; if *b == self.sep { return Some(&OsStr::from_bytes(&self.val[self.pos + 1..start])); } } Some(&OsStr::from_bytes(&self.val[..start])) } }
{ count += 1; }
conditional_block
osstringext.rs
use std::ffi::OsStr; #[cfg(not(target_os = "windows"))] use std::os::unix::ffi::OsStrExt; #[cfg(target_os = "windows")] use INVALID_UTF8; #[cfg(target_os = "windows")] trait OsStrExt3 { fn from_bytes(b: &[u8]) -> &Self; fn as_bytes(&self) -> &[u8]; } #[doc(hidden)] pub trait OsStrExt2 { fn starts_with(&self, s: &[u8]) -> bool; fn split_at_byte(&self, b: u8) -> (&OsStr, &OsStr); fn split_at(&self, i: usize) -> (&OsStr, &OsStr); fn trim_left_matches(&self, b: u8) -> &OsStr; fn len_(&self) -> usize; fn contains_byte(&self, b: u8) -> bool; fn is_empty_(&self) -> bool; fn split(&self, b: u8) -> OsSplit; } #[cfg(target_os = "windows")] impl OsStrExt3 for OsStr { fn from_bytes(b: &[u8]) -> &Self { use ::std::mem; unsafe { mem::transmute(b) } } fn as_bytes(&self) -> &[u8] { self.to_str().map(|s| s.as_bytes()).expect(INVALID_UTF8) } } impl OsStrExt2 for OsStr { fn starts_with(&self, s: &[u8]) -> bool { self.as_bytes().starts_with(s) } fn is_empty_(&self) -> bool { self.as_bytes().is_empty() } fn contains_byte(&self, byte: u8) -> bool { for b in self.as_bytes() { if b == &byte { return true; } } false } fn split_at_byte(&self, byte: u8) -> (&OsStr, &OsStr)
fn trim_left_matches(&self, byte: u8) -> &OsStr { for (i, b) in self.as_bytes().iter().enumerate() { if b!= &byte { return &OsStr::from_bytes(&self.as_bytes()[i..]); } } &*self } fn split_at(&self, i: usize) -> (&OsStr, &OsStr) { (&OsStr::from_bytes(&self.as_bytes()[..i]), &OsStr::from_bytes(&self.as_bytes()[i..])) } fn len_(&self) -> usize { self.as_bytes().len() } fn split(&self, b: u8) -> OsSplit { OsSplit { sep: b, val: self.as_bytes(), pos: 0 } } } #[doc(hidden)] #[derive(Clone, Debug)] pub struct OsSplit<'a> { sep: u8, val: &'a [u8], pos: usize, } impl<'a> Iterator for OsSplit<'a> { type Item = &'a OsStr; fn next(&mut self) -> Option<&'a OsStr> { debugln!("fn=OsSplit::next;"); debugln!("OsSplit: {:?}", self); if self.pos == self.val.len() { return None; } let start = self.pos; for b in &self.val[start..] { self.pos += 1; if *b == self.sep { return Some(&OsStr::from_bytes(&self.val[start..self.pos - 1])); } } Some(&OsStr::from_bytes(&self.val[start..])) } fn size_hint(&self) -> (usize, Option<usize>) { let mut count = 0; for b in &self.val[self.pos..] { if *b == self.sep { count += 1; } } if count > 0 { return (count, Some(count)); } (0, None) } } impl<'a> DoubleEndedIterator for OsSplit<'a> { fn next_back(&mut self) -> Option<&'a OsStr> { if self.pos == 0 { return None; } let start = self.pos; for b in self.val[..self.pos].iter().rev() { self.pos -= 1; if *b == self.sep { return Some(&OsStr::from_bytes(&self.val[self.pos + 1..start])); } } Some(&OsStr::from_bytes(&self.val[..start])) } }
{ for (i, b) in self.as_bytes().iter().enumerate() { if b == &byte { return (&OsStr::from_bytes(&self.as_bytes()[..i]), &OsStr::from_bytes(&self.as_bytes()[i+1..])); } } (&*self, &OsStr::from_bytes(&self.as_bytes()[self.len_()..self.len_()])) }
identifier_body
stack.rs
// This file is part of libfringe, a low-level green threading library. // Copyright (c) Nathan Zadoks <[email protected]> // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Traits for stacks. /// A trait for objects that hold ownership of a stack. /// /// To preserve memory safety, an implementation of this trait must fulfill
/// the following contract: /// /// * The base address of the stack must be aligned to /// a [`STACK_ALIGNMENT`][align]-byte boundary. /// * Every address between the base and the limit must be readable and writable. /// /// [align]: constant.STACK_ALIGNMENT.html pub trait Stack { /// Returns the base address of the stack. /// On all modern architectures, the stack grows downwards, /// so this is the highest address. fn base(&self) -> *mut u8; /// Returns the limit address of the stack. /// On all modern architectures, the stack grows downwards, /// so this is the lowest address. fn limit(&self) -> *mut u8; } /// A marker trait for `Stack` objects with a guard page. /// /// To preserve memory safety, an implementation of this trait must fulfill /// the following contract, in addition to the [contract](trait.Stack.html) of `Stack`: /// /// * Any access of data at addresses `limit()` to `limit().offset(4096)` must /// abnormally terminate, at least, the thread that performs the access. pub unsafe trait GuardedStack {}
random_line_split
bootstrapper.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ client::{CoordinatorMessage, StateSyncClient}, coordinator::StateSyncCoordinator, executor_proxy::{ExecutorProxy, ExecutorProxyTrait}, network::{StateSyncEvents, StateSyncSender}, }; use diem_config::{config::NodeConfig, network_id::NodeNetworkId}; use diem_types::waypoint::Waypoint; use executor_types::ChunkExecutor; use futures::channel::mpsc; use std::{boxed::Box, collections::HashMap, sync::Arc}; use storage_interface::DbReader; use subscription_service::ReconfigSubscription; use tokio::runtime::{Builder, Runtime}; /// Creates and bootstraps new state syncs and creates clients for /// communicating with those state syncs. pub struct StateSyncBootstrapper { _runtime: Runtime, coordinator_sender: mpsc::UnboundedSender<CoordinatorMessage>, } impl StateSyncBootstrapper { pub fn bootstrap( network: Vec<(NodeNetworkId, StateSyncSender, StateSyncEvents)>, state_sync_to_mempool_sender: mpsc::Sender<diem_mempool::CommitNotification>, storage: Arc<dyn DbReader>, executor: Box<dyn ChunkExecutor>, node_config: &NodeConfig, waypoint: Waypoint, reconfig_event_subscriptions: Vec<ReconfigSubscription>, ) -> Self { let runtime = Builder::new_multi_thread() .thread_name("state-sync") .enable_all() .build() .expect("[State Sync] Failed to create runtime!"); let executor_proxy = ExecutorProxy::new(storage, executor, reconfig_event_subscriptions); Self::bootstrap_with_executor_proxy( runtime, network, state_sync_to_mempool_sender, node_config, waypoint, executor_proxy, )
pub fn bootstrap_with_executor_proxy<E: ExecutorProxyTrait +'static>( runtime: Runtime, network: Vec<(NodeNetworkId, StateSyncSender, StateSyncEvents)>, state_sync_to_mempool_sender: mpsc::Sender<diem_mempool::CommitNotification>, node_config: &NodeConfig, waypoint: Waypoint, executor_proxy: E, ) -> Self { let (coordinator_sender, coordinator_receiver) = mpsc::unbounded(); let initial_state = executor_proxy .get_local_storage_state() .expect("[State Sync] Starting failure: cannot sync with storage!"); let network_senders: HashMap<_, _> = network .iter() .map(|(network_id, sender, _events)| (network_id.clone(), sender.clone())) .collect(); let coordinator = StateSyncCoordinator::new( coordinator_receiver, state_sync_to_mempool_sender, network_senders, node_config, waypoint, executor_proxy, initial_state, ) .expect("[State Sync] Unable to create state sync coordinator!"); runtime.spawn(coordinator.start(network)); Self { _runtime: runtime, coordinator_sender, } } pub fn create_client(&self, commit_timeout_secs: u64) -> StateSyncClient { StateSyncClient::new(self.coordinator_sender.clone(), commit_timeout_secs) } }
}
random_line_split
bootstrapper.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ client::{CoordinatorMessage, StateSyncClient}, coordinator::StateSyncCoordinator, executor_proxy::{ExecutorProxy, ExecutorProxyTrait}, network::{StateSyncEvents, StateSyncSender}, }; use diem_config::{config::NodeConfig, network_id::NodeNetworkId}; use diem_types::waypoint::Waypoint; use executor_types::ChunkExecutor; use futures::channel::mpsc; use std::{boxed::Box, collections::HashMap, sync::Arc}; use storage_interface::DbReader; use subscription_service::ReconfigSubscription; use tokio::runtime::{Builder, Runtime}; /// Creates and bootstraps new state syncs and creates clients for /// communicating with those state syncs. pub struct
{ _runtime: Runtime, coordinator_sender: mpsc::UnboundedSender<CoordinatorMessage>, } impl StateSyncBootstrapper { pub fn bootstrap( network: Vec<(NodeNetworkId, StateSyncSender, StateSyncEvents)>, state_sync_to_mempool_sender: mpsc::Sender<diem_mempool::CommitNotification>, storage: Arc<dyn DbReader>, executor: Box<dyn ChunkExecutor>, node_config: &NodeConfig, waypoint: Waypoint, reconfig_event_subscriptions: Vec<ReconfigSubscription>, ) -> Self { let runtime = Builder::new_multi_thread() .thread_name("state-sync") .enable_all() .build() .expect("[State Sync] Failed to create runtime!"); let executor_proxy = ExecutorProxy::new(storage, executor, reconfig_event_subscriptions); Self::bootstrap_with_executor_proxy( runtime, network, state_sync_to_mempool_sender, node_config, waypoint, executor_proxy, ) } pub fn bootstrap_with_executor_proxy<E: ExecutorProxyTrait +'static>( runtime: Runtime, network: Vec<(NodeNetworkId, StateSyncSender, StateSyncEvents)>, state_sync_to_mempool_sender: mpsc::Sender<diem_mempool::CommitNotification>, node_config: &NodeConfig, waypoint: Waypoint, executor_proxy: E, ) -> Self { let (coordinator_sender, coordinator_receiver) = mpsc::unbounded(); let initial_state = executor_proxy .get_local_storage_state() .expect("[State Sync] Starting failure: cannot sync with storage!"); let network_senders: HashMap<_, _> = network .iter() .map(|(network_id, sender, _events)| (network_id.clone(), sender.clone())) .collect(); let coordinator = StateSyncCoordinator::new( coordinator_receiver, state_sync_to_mempool_sender, network_senders, node_config, waypoint, executor_proxy, initial_state, ) .expect("[State Sync] Unable to create state sync coordinator!"); runtime.spawn(coordinator.start(network)); Self { _runtime: runtime, coordinator_sender, } } pub fn create_client(&self, commit_timeout_secs: u64) -> StateSyncClient { StateSyncClient::new(self.coordinator_sender.clone(), commit_timeout_secs) } }
StateSyncBootstrapper
identifier_name
bootstrapper.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ client::{CoordinatorMessage, StateSyncClient}, coordinator::StateSyncCoordinator, executor_proxy::{ExecutorProxy, ExecutorProxyTrait}, network::{StateSyncEvents, StateSyncSender}, }; use diem_config::{config::NodeConfig, network_id::NodeNetworkId}; use diem_types::waypoint::Waypoint; use executor_types::ChunkExecutor; use futures::channel::mpsc; use std::{boxed::Box, collections::HashMap, sync::Arc}; use storage_interface::DbReader; use subscription_service::ReconfigSubscription; use tokio::runtime::{Builder, Runtime}; /// Creates and bootstraps new state syncs and creates clients for /// communicating with those state syncs. pub struct StateSyncBootstrapper { _runtime: Runtime, coordinator_sender: mpsc::UnboundedSender<CoordinatorMessage>, } impl StateSyncBootstrapper { pub fn bootstrap( network: Vec<(NodeNetworkId, StateSyncSender, StateSyncEvents)>, state_sync_to_mempool_sender: mpsc::Sender<diem_mempool::CommitNotification>, storage: Arc<dyn DbReader>, executor: Box<dyn ChunkExecutor>, node_config: &NodeConfig, waypoint: Waypoint, reconfig_event_subscriptions: Vec<ReconfigSubscription>, ) -> Self
pub fn bootstrap_with_executor_proxy<E: ExecutorProxyTrait +'static>( runtime: Runtime, network: Vec<(NodeNetworkId, StateSyncSender, StateSyncEvents)>, state_sync_to_mempool_sender: mpsc::Sender<diem_mempool::CommitNotification>, node_config: &NodeConfig, waypoint: Waypoint, executor_proxy: E, ) -> Self { let (coordinator_sender, coordinator_receiver) = mpsc::unbounded(); let initial_state = executor_proxy .get_local_storage_state() .expect("[State Sync] Starting failure: cannot sync with storage!"); let network_senders: HashMap<_, _> = network .iter() .map(|(network_id, sender, _events)| (network_id.clone(), sender.clone())) .collect(); let coordinator = StateSyncCoordinator::new( coordinator_receiver, state_sync_to_mempool_sender, network_senders, node_config, waypoint, executor_proxy, initial_state, ) .expect("[State Sync] Unable to create state sync coordinator!"); runtime.spawn(coordinator.start(network)); Self { _runtime: runtime, coordinator_sender, } } pub fn create_client(&self, commit_timeout_secs: u64) -> StateSyncClient { StateSyncClient::new(self.coordinator_sender.clone(), commit_timeout_secs) } }
{ let runtime = Builder::new_multi_thread() .thread_name("state-sync") .enable_all() .build() .expect("[State Sync] Failed to create runtime!"); let executor_proxy = ExecutorProxy::new(storage, executor, reconfig_event_subscriptions); Self::bootstrap_with_executor_proxy( runtime, network, state_sync_to_mempool_sender, node_config, waypoint, executor_proxy, ) }
identifier_body
rcvr-borrowed-to-slice.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. trait sum { fn sum_(self) -> int; } // Note: impl on a slice impl<'a> sum for &'a [int] { fn sum_(self) -> int { self.iter().fold(0, |a, &b| a + b) } } fn call_sum(x: &[int]) -> int { x.sum_() } pub fn main()
{ let x = vec!(1, 2, 3); let y = call_sum(x.as_slice()); println!("y=={}", y); assert_eq!(y, 6); let x = vec!(1, 2, 3); let y = x.as_slice().sum_(); println!("y=={}", y); assert_eq!(y, 6); let x = vec!(1, 2, 3); let y = x.as_slice().sum_(); println!("y=={}", y); assert_eq!(y, 6); }
identifier_body
rcvr-borrowed-to-slice.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. trait sum { fn sum_(self) -> int; } // Note: impl on a slice impl<'a> sum for &'a [int] { fn sum_(self) -> int { self.iter().fold(0, |a, &b| a + b) } } fn call_sum(x: &[int]) -> int { x.sum_() }
let x = vec!(1, 2, 3); let y = call_sum(x.as_slice()); println!("y=={}", y); assert_eq!(y, 6); let x = vec!(1, 2, 3); let y = x.as_slice().sum_(); println!("y=={}", y); assert_eq!(y, 6); let x = vec!(1, 2, 3); let y = x.as_slice().sum_(); println!("y=={}", y); assert_eq!(y, 6); }
pub fn main() {
random_line_split
rcvr-borrowed-to-slice.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. trait sum { fn sum_(self) -> int; } // Note: impl on a slice impl<'a> sum for &'a [int] { fn sum_(self) -> int { self.iter().fold(0, |a, &b| a + b) } } fn
(x: &[int]) -> int { x.sum_() } pub fn main() { let x = vec!(1, 2, 3); let y = call_sum(x.as_slice()); println!("y=={}", y); assert_eq!(y, 6); let x = vec!(1, 2, 3); let y = x.as_slice().sum_(); println!("y=={}", y); assert_eq!(y, 6); let x = vec!(1, 2, 3); let y = x.as_slice().sum_(); println!("y=={}", y); assert_eq!(y, 6); }
call_sum
identifier_name
attr.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use attr; use ast; use codemap::{spanned, Spanned, mk_sp, Span}; use parse::common::*; //resolve bug? use parse::token; use parse::parser::Parser; use parse::token::INTERPOLATED; use std::gc::{Gc, GC}; /// A parser that can parse attributes. pub trait ParserAttr { fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute>; fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute; fn parse_inner_attrs_and_next(&mut self) -> (Vec<ast::Attribute>, Vec<ast::Attribute>); fn parse_meta_item(&mut self) -> Gc<ast::MetaItem>; fn parse_meta_seq(&mut self) -> Vec<Gc<ast::MetaItem>>; fn parse_optional_meta(&mut self) -> Vec<Gc<ast::MetaItem>>; } impl<'a> ParserAttr for Parser<'a> { /// Parse attributes that appear before an item fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute> { let mut attrs: Vec<ast::Attribute> = Vec::new(); loop { debug!("parse_outer_attributes: self.token={}", self.token); match self.token { token::POUND => { attrs.push(self.parse_attribute(false)); } token::DOC_COMMENT(s) => { let attr = ::attr::mk_sugared_doc_attr( attr::mk_attr_id(), self.id_to_interned_str(s.ident()), self.span.lo, self.span.hi ); if attr.node.style!= ast::AttrOuter { self.fatal("expected outer comment"); } attrs.push(attr); self.bump(); } _ => break } } return attrs; } /// Matches `attribute = #! [ meta_item ]` /// /// If permit_inner is true, then a leading `!` indicates an inner /// attribute fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute { debug!("parse_attributes: permit_inner={:?} self.token={:?}", permit_inner, self.token); let (span, value, mut style) = match self.token { token::POUND => { let lo = self.span.lo; self.bump(); let style = if self.eat(&token::NOT) { if!permit_inner { let span = self.span; self.span_err(span, "an inner attribute is not permitted in \ this context"); } ast::AttrInner } else { ast::AttrOuter }; self.expect(&token::LBRACKET); let meta_item = self.parse_meta_item(); let hi = self.span.hi; self.expect(&token::RBRACKET); (mk_sp(lo, hi), meta_item, style) } _ => { let token_str = self.this_token_to_string(); self.fatal(format!("expected `#`, found `{}`", token_str).as_slice()); } }; if permit_inner && self.eat(&token::SEMI) { self.span_warn(span, "this inner attribute syntax is deprecated. \ The new syntax is `#![foo]`, with a bang and no semicolon."); style = ast::AttrInner; } return Spanned { span: span, node: ast::Attribute_ { id: attr::mk_attr_id(), style: style, value: value, is_sugared_doc: false } }; } /// Parse attributes that appear after the opening of an item. These should /// be preceded by an exclamation mark, but we accept and warn about one /// terminated by a semicolon. In addition to a vector of inner attributes, /// this function also returns a vector that may contain the first outer /// attribute of the next item (since we can't know whether the attribute /// is an inner attribute of the containing item or an outer attribute of /// the first contained item until we see the semi). /// matches inner_attrs* outer_attr? /// you can make the 'next' field an Option, but the result is going to be /// more useful as a vector. fn parse_inner_attrs_and_next(&mut self) -> (Vec<ast::Attribute>, Vec<ast::Attribute> ) { let mut inner_attrs: Vec<ast::Attribute> = Vec::new(); let mut next_outer_attrs: Vec<ast::Attribute> = Vec::new(); loop { let attr = match self.token { token::POUND => { self.parse_attribute(true) } token::DOC_COMMENT(s) => { // we need to get the position of this token before we bump. let Span { lo, hi,.. } = self.span; self.bump(); attr::mk_sugared_doc_attr(attr::mk_attr_id(), self.id_to_interned_str(s.ident()), lo, hi) } _ => { break; } }; if attr.node.style == ast::AttrInner { inner_attrs.push(attr); } else { next_outer_attrs.push(attr); break; } } (inner_attrs, next_outer_attrs) } /// matches meta_item = IDENT /// | IDENT = lit /// | IDENT meta_seq fn parse_meta_item(&mut self) -> Gc<ast::MetaItem> { match self.token { token::INTERPOLATED(token::NtMeta(e)) => { self.bump(); return e } _ => {} } let lo = self.span.lo; let ident = self.parse_ident(); let name = self.id_to_interned_str(ident); match self.token { token::EQ => { self.bump(); let lit = self.parse_lit(); // FIXME #623 Non-string meta items are not serialized correctly; // just forbid them for now match lit.node { ast::LitStr(..) => {} _ => { self.span_err( lit.span, "non-string literals are not allowed in meta-items"); } } let hi = self.span.hi; box(GC) spanned(lo, hi, ast::MetaNameValue(name, lit)) } token::LPAREN => { let inner_items = self.parse_meta_seq(); let hi = self.span.hi; box(GC) spanned(lo, hi, ast::MetaList(name, inner_items)) } _ => { let hi = self.last_span.hi; box(GC) spanned(lo, hi, ast::MetaWord(name)) } } } /// matches meta_seq = ( COMMASEP(meta_item) ) fn parse_meta_seq(&mut self) -> Vec<Gc<ast::MetaItem>> { self.parse_seq(&token::LPAREN, &token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_meta_item()).node } fn
(&mut self) -> Vec<Gc<ast::MetaItem>> { match self.token { token::LPAREN => self.parse_meta_seq(), _ => Vec::new() } } }
parse_optional_meta
identifier_name
attr.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use attr; use ast; use codemap::{spanned, Spanned, mk_sp, Span}; use parse::common::*; //resolve bug? use parse::token; use parse::parser::Parser; use parse::token::INTERPOLATED; use std::gc::{Gc, GC}; /// A parser that can parse attributes. pub trait ParserAttr { fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute>; fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute; fn parse_inner_attrs_and_next(&mut self) -> (Vec<ast::Attribute>, Vec<ast::Attribute>); fn parse_meta_item(&mut self) -> Gc<ast::MetaItem>; fn parse_meta_seq(&mut self) -> Vec<Gc<ast::MetaItem>>; fn parse_optional_meta(&mut self) -> Vec<Gc<ast::MetaItem>>; } impl<'a> ParserAttr for Parser<'a> { /// Parse attributes that appear before an item fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute> { let mut attrs: Vec<ast::Attribute> = Vec::new(); loop { debug!("parse_outer_attributes: self.token={}", self.token); match self.token { token::POUND => { attrs.push(self.parse_attribute(false)); } token::DOC_COMMENT(s) => { let attr = ::attr::mk_sugared_doc_attr( attr::mk_attr_id(), self.id_to_interned_str(s.ident()), self.span.lo, self.span.hi ); if attr.node.style!= ast::AttrOuter { self.fatal("expected outer comment"); } attrs.push(attr); self.bump(); } _ => break } } return attrs; } /// Matches `attribute = #! [ meta_item ]` /// /// If permit_inner is true, then a leading `!` indicates an inner /// attribute fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute { debug!("parse_attributes: permit_inner={:?} self.token={:?}", permit_inner, self.token); let (span, value, mut style) = match self.token { token::POUND => { let lo = self.span.lo; self.bump(); let style = if self.eat(&token::NOT) { if!permit_inner { let span = self.span; self.span_err(span, "an inner attribute is not permitted in \ this context"); } ast::AttrInner } else { ast::AttrOuter }; self.expect(&token::LBRACKET); let meta_item = self.parse_meta_item(); let hi = self.span.hi; self.expect(&token::RBRACKET); (mk_sp(lo, hi), meta_item, style) } _ => { let token_str = self.this_token_to_string(); self.fatal(format!("expected `#`, found `{}`", token_str).as_slice()); } }; if permit_inner && self.eat(&token::SEMI) { self.span_warn(span, "this inner attribute syntax is deprecated. \ The new syntax is `#![foo]`, with a bang and no semicolon."); style = ast::AttrInner; } return Spanned { span: span, node: ast::Attribute_ { id: attr::mk_attr_id(), style: style, value: value, is_sugared_doc: false } }; } /// Parse attributes that appear after the opening of an item. These should /// be preceded by an exclamation mark, but we accept and warn about one /// terminated by a semicolon. In addition to a vector of inner attributes, /// this function also returns a vector that may contain the first outer /// attribute of the next item (since we can't know whether the attribute /// is an inner attribute of the containing item or an outer attribute of /// the first contained item until we see the semi). /// matches inner_attrs* outer_attr? /// you can make the 'next' field an Option, but the result is going to be /// more useful as a vector. fn parse_inner_attrs_and_next(&mut self) -> (Vec<ast::Attribute>, Vec<ast::Attribute> ) { let mut inner_attrs: Vec<ast::Attribute> = Vec::new(); let mut next_outer_attrs: Vec<ast::Attribute> = Vec::new(); loop { let attr = match self.token { token::POUND => { self.parse_attribute(true) } token::DOC_COMMENT(s) => { // we need to get the position of this token before we bump. let Span { lo, hi,.. } = self.span; self.bump(); attr::mk_sugared_doc_attr(attr::mk_attr_id(), self.id_to_interned_str(s.ident()), lo, hi) } _ => { break; } }; if attr.node.style == ast::AttrInner { inner_attrs.push(attr); } else { next_outer_attrs.push(attr); break; } } (inner_attrs, next_outer_attrs) } /// matches meta_item = IDENT /// | IDENT = lit /// | IDENT meta_seq fn parse_meta_item(&mut self) -> Gc<ast::MetaItem> { match self.token { token::INTERPOLATED(token::NtMeta(e)) => { self.bump();
_ => {} } let lo = self.span.lo; let ident = self.parse_ident(); let name = self.id_to_interned_str(ident); match self.token { token::EQ => { self.bump(); let lit = self.parse_lit(); // FIXME #623 Non-string meta items are not serialized correctly; // just forbid them for now match lit.node { ast::LitStr(..) => {} _ => { self.span_err( lit.span, "non-string literals are not allowed in meta-items"); } } let hi = self.span.hi; box(GC) spanned(lo, hi, ast::MetaNameValue(name, lit)) } token::LPAREN => { let inner_items = self.parse_meta_seq(); let hi = self.span.hi; box(GC) spanned(lo, hi, ast::MetaList(name, inner_items)) } _ => { let hi = self.last_span.hi; box(GC) spanned(lo, hi, ast::MetaWord(name)) } } } /// matches meta_seq = ( COMMASEP(meta_item) ) fn parse_meta_seq(&mut self) -> Vec<Gc<ast::MetaItem>> { self.parse_seq(&token::LPAREN, &token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_meta_item()).node } fn parse_optional_meta(&mut self) -> Vec<Gc<ast::MetaItem>> { match self.token { token::LPAREN => self.parse_meta_seq(), _ => Vec::new() } } }
return e }
random_line_split
attr.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use attr; use ast; use codemap::{spanned, Spanned, mk_sp, Span}; use parse::common::*; //resolve bug? use parse::token; use parse::parser::Parser; use parse::token::INTERPOLATED; use std::gc::{Gc, GC}; /// A parser that can parse attributes. pub trait ParserAttr { fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute>; fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute; fn parse_inner_attrs_and_next(&mut self) -> (Vec<ast::Attribute>, Vec<ast::Attribute>); fn parse_meta_item(&mut self) -> Gc<ast::MetaItem>; fn parse_meta_seq(&mut self) -> Vec<Gc<ast::MetaItem>>; fn parse_optional_meta(&mut self) -> Vec<Gc<ast::MetaItem>>; } impl<'a> ParserAttr for Parser<'a> { /// Parse attributes that appear before an item fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute> { let mut attrs: Vec<ast::Attribute> = Vec::new(); loop { debug!("parse_outer_attributes: self.token={}", self.token); match self.token { token::POUND => { attrs.push(self.parse_attribute(false)); } token::DOC_COMMENT(s) => { let attr = ::attr::mk_sugared_doc_attr( attr::mk_attr_id(), self.id_to_interned_str(s.ident()), self.span.lo, self.span.hi ); if attr.node.style!= ast::AttrOuter { self.fatal("expected outer comment"); } attrs.push(attr); self.bump(); } _ => break } } return attrs; } /// Matches `attribute = #! [ meta_item ]` /// /// If permit_inner is true, then a leading `!` indicates an inner /// attribute fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute { debug!("parse_attributes: permit_inner={:?} self.token={:?}", permit_inner, self.token); let (span, value, mut style) = match self.token { token::POUND => { let lo = self.span.lo; self.bump(); let style = if self.eat(&token::NOT) { if!permit_inner { let span = self.span; self.span_err(span, "an inner attribute is not permitted in \ this context"); } ast::AttrInner } else { ast::AttrOuter }; self.expect(&token::LBRACKET); let meta_item = self.parse_meta_item(); let hi = self.span.hi; self.expect(&token::RBRACKET); (mk_sp(lo, hi), meta_item, style) } _ => { let token_str = self.this_token_to_string(); self.fatal(format!("expected `#`, found `{}`", token_str).as_slice()); } }; if permit_inner && self.eat(&token::SEMI) { self.span_warn(span, "this inner attribute syntax is deprecated. \ The new syntax is `#![foo]`, with a bang and no semicolon."); style = ast::AttrInner; } return Spanned { span: span, node: ast::Attribute_ { id: attr::mk_attr_id(), style: style, value: value, is_sugared_doc: false } }; } /// Parse attributes that appear after the opening of an item. These should /// be preceded by an exclamation mark, but we accept and warn about one /// terminated by a semicolon. In addition to a vector of inner attributes, /// this function also returns a vector that may contain the first outer /// attribute of the next item (since we can't know whether the attribute /// is an inner attribute of the containing item or an outer attribute of /// the first contained item until we see the semi). /// matches inner_attrs* outer_attr? /// you can make the 'next' field an Option, but the result is going to be /// more useful as a vector. fn parse_inner_attrs_and_next(&mut self) -> (Vec<ast::Attribute>, Vec<ast::Attribute> ) { let mut inner_attrs: Vec<ast::Attribute> = Vec::new(); let mut next_outer_attrs: Vec<ast::Attribute> = Vec::new(); loop { let attr = match self.token { token::POUND =>
token::DOC_COMMENT(s) => { // we need to get the position of this token before we bump. let Span { lo, hi,.. } = self.span; self.bump(); attr::mk_sugared_doc_attr(attr::mk_attr_id(), self.id_to_interned_str(s.ident()), lo, hi) } _ => { break; } }; if attr.node.style == ast::AttrInner { inner_attrs.push(attr); } else { next_outer_attrs.push(attr); break; } } (inner_attrs, next_outer_attrs) } /// matches meta_item = IDENT /// | IDENT = lit /// | IDENT meta_seq fn parse_meta_item(&mut self) -> Gc<ast::MetaItem> { match self.token { token::INTERPOLATED(token::NtMeta(e)) => { self.bump(); return e } _ => {} } let lo = self.span.lo; let ident = self.parse_ident(); let name = self.id_to_interned_str(ident); match self.token { token::EQ => { self.bump(); let lit = self.parse_lit(); // FIXME #623 Non-string meta items are not serialized correctly; // just forbid them for now match lit.node { ast::LitStr(..) => {} _ => { self.span_err( lit.span, "non-string literals are not allowed in meta-items"); } } let hi = self.span.hi; box(GC) spanned(lo, hi, ast::MetaNameValue(name, lit)) } token::LPAREN => { let inner_items = self.parse_meta_seq(); let hi = self.span.hi; box(GC) spanned(lo, hi, ast::MetaList(name, inner_items)) } _ => { let hi = self.last_span.hi; box(GC) spanned(lo, hi, ast::MetaWord(name)) } } } /// matches meta_seq = ( COMMASEP(meta_item) ) fn parse_meta_seq(&mut self) -> Vec<Gc<ast::MetaItem>> { self.parse_seq(&token::LPAREN, &token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_meta_item()).node } fn parse_optional_meta(&mut self) -> Vec<Gc<ast::MetaItem>> { match self.token { token::LPAREN => self.parse_meta_seq(), _ => Vec::new() } } }
{ self.parse_attribute(true) }
conditional_block
attr.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use attr; use ast; use codemap::{spanned, Spanned, mk_sp, Span}; use parse::common::*; //resolve bug? use parse::token; use parse::parser::Parser; use parse::token::INTERPOLATED; use std::gc::{Gc, GC}; /// A parser that can parse attributes. pub trait ParserAttr { fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute>; fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute; fn parse_inner_attrs_and_next(&mut self) -> (Vec<ast::Attribute>, Vec<ast::Attribute>); fn parse_meta_item(&mut self) -> Gc<ast::MetaItem>; fn parse_meta_seq(&mut self) -> Vec<Gc<ast::MetaItem>>; fn parse_optional_meta(&mut self) -> Vec<Gc<ast::MetaItem>>; } impl<'a> ParserAttr for Parser<'a> { /// Parse attributes that appear before an item fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute> { let mut attrs: Vec<ast::Attribute> = Vec::new(); loop { debug!("parse_outer_attributes: self.token={}", self.token); match self.token { token::POUND => { attrs.push(self.parse_attribute(false)); } token::DOC_COMMENT(s) => { let attr = ::attr::mk_sugared_doc_attr( attr::mk_attr_id(), self.id_to_interned_str(s.ident()), self.span.lo, self.span.hi ); if attr.node.style!= ast::AttrOuter { self.fatal("expected outer comment"); } attrs.push(attr); self.bump(); } _ => break } } return attrs; } /// Matches `attribute = #! [ meta_item ]` /// /// If permit_inner is true, then a leading `!` indicates an inner /// attribute fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute { debug!("parse_attributes: permit_inner={:?} self.token={:?}", permit_inner, self.token); let (span, value, mut style) = match self.token { token::POUND => { let lo = self.span.lo; self.bump(); let style = if self.eat(&token::NOT) { if!permit_inner { let span = self.span; self.span_err(span, "an inner attribute is not permitted in \ this context"); } ast::AttrInner } else { ast::AttrOuter }; self.expect(&token::LBRACKET); let meta_item = self.parse_meta_item(); let hi = self.span.hi; self.expect(&token::RBRACKET); (mk_sp(lo, hi), meta_item, style) } _ => { let token_str = self.this_token_to_string(); self.fatal(format!("expected `#`, found `{}`", token_str).as_slice()); } }; if permit_inner && self.eat(&token::SEMI) { self.span_warn(span, "this inner attribute syntax is deprecated. \ The new syntax is `#![foo]`, with a bang and no semicolon."); style = ast::AttrInner; } return Spanned { span: span, node: ast::Attribute_ { id: attr::mk_attr_id(), style: style, value: value, is_sugared_doc: false } }; } /// Parse attributes that appear after the opening of an item. These should /// be preceded by an exclamation mark, but we accept and warn about one /// terminated by a semicolon. In addition to a vector of inner attributes, /// this function also returns a vector that may contain the first outer /// attribute of the next item (since we can't know whether the attribute /// is an inner attribute of the containing item or an outer attribute of /// the first contained item until we see the semi). /// matches inner_attrs* outer_attr? /// you can make the 'next' field an Option, but the result is going to be /// more useful as a vector. fn parse_inner_attrs_and_next(&mut self) -> (Vec<ast::Attribute>, Vec<ast::Attribute> ) { let mut inner_attrs: Vec<ast::Attribute> = Vec::new(); let mut next_outer_attrs: Vec<ast::Attribute> = Vec::new(); loop { let attr = match self.token { token::POUND => { self.parse_attribute(true) } token::DOC_COMMENT(s) => { // we need to get the position of this token before we bump. let Span { lo, hi,.. } = self.span; self.bump(); attr::mk_sugared_doc_attr(attr::mk_attr_id(), self.id_to_interned_str(s.ident()), lo, hi) } _ => { break; } }; if attr.node.style == ast::AttrInner { inner_attrs.push(attr); } else { next_outer_attrs.push(attr); break; } } (inner_attrs, next_outer_attrs) } /// matches meta_item = IDENT /// | IDENT = lit /// | IDENT meta_seq fn parse_meta_item(&mut self) -> Gc<ast::MetaItem> { match self.token { token::INTERPOLATED(token::NtMeta(e)) => { self.bump(); return e } _ => {} } let lo = self.span.lo; let ident = self.parse_ident(); let name = self.id_to_interned_str(ident); match self.token { token::EQ => { self.bump(); let lit = self.parse_lit(); // FIXME #623 Non-string meta items are not serialized correctly; // just forbid them for now match lit.node { ast::LitStr(..) => {} _ => { self.span_err( lit.span, "non-string literals are not allowed in meta-items"); } } let hi = self.span.hi; box(GC) spanned(lo, hi, ast::MetaNameValue(name, lit)) } token::LPAREN => { let inner_items = self.parse_meta_seq(); let hi = self.span.hi; box(GC) spanned(lo, hi, ast::MetaList(name, inner_items)) } _ => { let hi = self.last_span.hi; box(GC) spanned(lo, hi, ast::MetaWord(name)) } } } /// matches meta_seq = ( COMMASEP(meta_item) ) fn parse_meta_seq(&mut self) -> Vec<Gc<ast::MetaItem>>
fn parse_optional_meta(&mut self) -> Vec<Gc<ast::MetaItem>> { match self.token { token::LPAREN => self.parse_meta_seq(), _ => Vec::new() } } }
{ self.parse_seq(&token::LPAREN, &token::RPAREN, seq_sep_trailing_disallowed(token::COMMA), |p| p.parse_meta_item()).node }
identifier_body
traits-conditional-dispatch.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test that we are able to resolve conditional dispatch. Here, the // blanket impl for T:Copy coexists with an impl for Box<T>, because // Box does not impl Copy. #![allow(unknown_features)] #![feature(box_syntax)] trait Get { fn get(&self) -> Self; } impl<T:Copy> Get for T { fn get(&self) -> T { *self } } impl<T:Get> Get for Box<T> { fn get(&self) -> Box<T> { box get_it(&**self) } } fn
<T:Get>(t: &T) -> T { (*t).get() } fn main() { assert_eq!(get_it(&1_u32), 1_u32); assert_eq!(get_it(&1_u16), 1_u16); assert_eq!(get_it(&Some(1_u16)), Some(1_u16)); assert_eq!(get_it(&box 1i), box 1i); }
get_it
identifier_name
traits-conditional-dispatch.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test that we are able to resolve conditional dispatch. Here, the // blanket impl for T:Copy coexists with an impl for Box<T>, because // Box does not impl Copy. #![allow(unknown_features)] #![feature(box_syntax)] trait Get { fn get(&self) -> Self; } impl<T:Copy> Get for T { fn get(&self) -> T { *self } } impl<T:Get> Get for Box<T> { fn get(&self) -> Box<T>
} fn get_it<T:Get>(t: &T) -> T { (*t).get() } fn main() { assert_eq!(get_it(&1_u32), 1_u32); assert_eq!(get_it(&1_u16), 1_u16); assert_eq!(get_it(&Some(1_u16)), Some(1_u16)); assert_eq!(get_it(&box 1i), box 1i); }
{ box get_it(&**self) }
identifier_body
traits-conditional-dispatch.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test that we are able to resolve conditional dispatch. Here, the // blanket impl for T:Copy coexists with an impl for Box<T>, because // Box does not impl Copy. #![allow(unknown_features)] #![feature(box_syntax)] trait Get { fn get(&self) -> Self; } impl<T:Copy> Get for T { fn get(&self) -> T { *self } } impl<T:Get> Get for Box<T> { fn get(&self) -> Box<T> { box get_it(&**self) } } fn get_it<T:Get>(t: &T) -> T {
assert_eq!(get_it(&1_u32), 1_u32); assert_eq!(get_it(&1_u16), 1_u16); assert_eq!(get_it(&Some(1_u16)), Some(1_u16)); assert_eq!(get_it(&box 1i), box 1i); }
(*t).get() } fn main() {
random_line_split
lib.rs
pub struct Actor { x: i32, y: i32 } impl Actor { fn follow(self, dir: &Direction) -> Actor
} #[derive(PartialEq, Debug)] pub enum Direction { North, East, South, West, } impl Direction { // (Left turn, Right turn) fn turns(&self) -> (Direction, Direction) { match *self { Direction::North => (Direction::West, Direction::East), Direction::East => (Direction::North, Direction::South), Direction::South => (Direction::East, Direction::West), Direction::West => (Direction::South, Direction::North) } } } pub struct Robot(Actor, Direction); impl Robot { pub fn new(x: isize, y: isize, d: Direction) -> Self { Robot( Actor { x : x as i32, y : y as i32 }, d ) } pub fn turn_right(self) -> Self { Robot(self.0, self.1.turns().1) } pub fn turn_left(self) -> Self { Robot(self.0, self.1.turns().0) } pub fn advance(self) -> Self { Robot(self.0.follow(&self.1), self.1) } fn obey(self, code: &str) -> Self { match code { "R" => self.turn_right(), "L" => self.turn_left(), "A" => self.advance(), _ => panic!("Invalid instruction code") } } pub fn instructions(self, instructions: &str) -> Self { match instructions.split_at(1) { (c, "") => self.obey(c), (c, rest) => self.obey(c).instructions(rest) } } pub fn position(&self) -> (isize, isize) { (self.0.x as isize, self.0.y as isize) } pub fn direction(&self) -> &Direction { &self.1 } }
{ match *dir { Direction::North => Actor { x: self.x, y: self.y + 1 }, Direction::East => Actor { x: self.x + 1, y: self.y }, Direction::South => Actor { x: self.x, y: self.y - 1 }, Direction::West => Actor { x: self.x - 1, y: self.y } } }
identifier_body
lib.rs
pub struct Actor { x: i32, y: i32 } impl Actor { fn follow(self, dir: &Direction) -> Actor { match *dir { Direction::North => Actor { x: self.x, y: self.y + 1 }, Direction::East => Actor { x: self.x + 1, y: self.y },
} #[derive(PartialEq, Debug)] pub enum Direction { North, East, South, West, } impl Direction { // (Left turn, Right turn) fn turns(&self) -> (Direction, Direction) { match *self { Direction::North => (Direction::West, Direction::East), Direction::East => (Direction::North, Direction::South), Direction::South => (Direction::East, Direction::West), Direction::West => (Direction::South, Direction::North) } } } pub struct Robot(Actor, Direction); impl Robot { pub fn new(x: isize, y: isize, d: Direction) -> Self { Robot( Actor { x : x as i32, y : y as i32 }, d ) } pub fn turn_right(self) -> Self { Robot(self.0, self.1.turns().1) } pub fn turn_left(self) -> Self { Robot(self.0, self.1.turns().0) } pub fn advance(self) -> Self { Robot(self.0.follow(&self.1), self.1) } fn obey(self, code: &str) -> Self { match code { "R" => self.turn_right(), "L" => self.turn_left(), "A" => self.advance(), _ => panic!("Invalid instruction code") } } pub fn instructions(self, instructions: &str) -> Self { match instructions.split_at(1) { (c, "") => self.obey(c), (c, rest) => self.obey(c).instructions(rest) } } pub fn position(&self) -> (isize, isize) { (self.0.x as isize, self.0.y as isize) } pub fn direction(&self) -> &Direction { &self.1 } }
Direction::South => Actor { x: self.x, y: self.y - 1 }, Direction::West => Actor { x: self.x - 1, y: self.y } } }
random_line_split
lib.rs
pub struct Actor { x: i32, y: i32 } impl Actor { fn follow(self, dir: &Direction) -> Actor { match *dir { Direction::North => Actor { x: self.x, y: self.y + 1 }, Direction::East => Actor { x: self.x + 1, y: self.y }, Direction::South => Actor { x: self.x, y: self.y - 1 }, Direction::West => Actor { x: self.x - 1, y: self.y } } } } #[derive(PartialEq, Debug)] pub enum Direction { North, East, South, West, } impl Direction { // (Left turn, Right turn) fn turns(&self) -> (Direction, Direction) { match *self { Direction::North => (Direction::West, Direction::East), Direction::East => (Direction::North, Direction::South), Direction::South => (Direction::East, Direction::West), Direction::West => (Direction::South, Direction::North) } } } pub struct Robot(Actor, Direction); impl Robot { pub fn new(x: isize, y: isize, d: Direction) -> Self { Robot( Actor { x : x as i32, y : y as i32 }, d ) } pub fn turn_right(self) -> Self { Robot(self.0, self.1.turns().1) } pub fn
(self) -> Self { Robot(self.0, self.1.turns().0) } pub fn advance(self) -> Self { Robot(self.0.follow(&self.1), self.1) } fn obey(self, code: &str) -> Self { match code { "R" => self.turn_right(), "L" => self.turn_left(), "A" => self.advance(), _ => panic!("Invalid instruction code") } } pub fn instructions(self, instructions: &str) -> Self { match instructions.split_at(1) { (c, "") => self.obey(c), (c, rest) => self.obey(c).instructions(rest) } } pub fn position(&self) -> (isize, isize) { (self.0.x as isize, self.0.y as isize) } pub fn direction(&self) -> &Direction { &self.1 } }
turn_left
identifier_name
gqtp_request.rs
use std::io; use std::io::Cursor; use std::io::prelude::*; use std::borrow::Cow; use std::net::TcpStream; use std::string::FromUtf8Error; use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt}; const RECV_BUF_SIZE: usize = 8192; const GQTP_HEADER_SIZE: usize = 24; #[derive(Debug)] pub enum GQTPError { InvalidProtocol, InvalidBodySize, StatusError(u16), IO(io::Error), EncodingError(FromUtf8Error), } impl From<io::Error> for GQTPError { fn from(err: io::Error) -> GQTPError
} impl From<FromUtf8Error> for GQTPError { fn from(err: FromUtf8Error) -> GQTPError { GQTPError::EncodingError(err) } } /// Request [GQTP protocol](http://groonga.org/docs/spec/gqtp.html) over TcpStream pub struct GQTPRequest<'a> { addr: Cow<'a, str>, } impl<'a> Default for GQTPRequest<'a> { fn default() -> GQTPRequest<'a> { GQTPRequest { addr: Cow::Borrowed("127.0.0.1:10043") } } } impl<'a> GQTPRequest<'a> { /// Create a GQTP client. pub fn new() -> GQTPRequest<'a> { GQTPRequest::default() } /// Set host address for GQTP server. /// /// # Examples /// /// ``` /// extern crate ruroonga_client as groonga; /// /// groonga::GQTPRequest::new().with_addr("127.0.0.1:20043"); /// ``` pub fn with_addr<T>(mut self, addr: T) -> GQTPRequest<'a> where T: Into<Cow<'a, str>> { self.addr = addr.into(); self } /// Send request and Receive response. pub fn call<C>(&self, command: C) -> Result<String, GQTPError> where C: AsRef<str> { // send let mut stream = try!(TcpStream::connect(self.addr.as_ref())); let mut send_buf = vec![]; try!(send_buf.write_u8(0xc7)); try!(send_buf.write_u8(0)); try!(send_buf.write_i16::<BigEndian>(0)); try!(send_buf.write_u8(0)); try!(send_buf.write_u8(0x02)); // flags try!(send_buf.write_u16::<BigEndian>(0)); try!(send_buf.write_u32::<BigEndian>(command.as_ref().len() as u32)); try!(send_buf.write_u32::<BigEndian>(0)); try!(send_buf.write_u64::<BigEndian>(0)); send_buf.extend_from_slice(command.as_ref().as_bytes()); let _ = stream.write_all(send_buf.as_slice()); // receive and check protocol header value let mut read_buf = vec![0; RECV_BUF_SIZE]; let _ = stream.read(&mut read_buf); let mut buf = Cursor::new(read_buf); let protocol = try!(buf.read_u8()); let query_type = try!(buf.read_u8()); if protocol!= 0xc7 || query_type > 5 { return Err(GQTPError::InvalidProtocol); } let _ = try!(buf.read_i16::<BigEndian>()); let _ = try!(buf.read_u8()); let flags = try!(buf.read_u8()); if!((flags & 0x01) == 0x01 || (flags & 0x02) == 0x02) { return Err(GQTPError::InvalidProtocol); } let status = try!(buf.read_u16::<BigEndian>()); if status!= 0 && status!= 1 { return Err(GQTPError::StatusError(status)); } let size = try!(buf.read_i32::<BigEndian>()) as usize; let _ = try!(buf.read_i32::<BigEndian>()); // opaque let _ = try!(buf.read_i64::<BigEndian>()); // cas // read body let mut msg_buf_len = if (size + GQTP_HEADER_SIZE) > RECV_BUF_SIZE { RECV_BUF_SIZE - GQTP_HEADER_SIZE } else { size }; let mut msg = vec![0; msg_buf_len]; let _ = try!(buf.read(&mut msg)); if (size + GQTP_HEADER_SIZE) > RECV_BUF_SIZE { loop { let mut read_buf = vec![0; RECV_BUF_SIZE]; let rsize = try!(stream.read(&mut read_buf)); msg.extend_from_slice(read_buf.as_ref()); msg_buf_len += rsize; if msg_buf_len >= size { break; } } } Ok(try!(String::from_utf8(msg))) } } #[cfg(test)] mod tests { use super::*; #[test] fn smoke_gqtp() { let req = GQTPRequest::new(); assert_eq!("127.0.0.1:10043", req.addr) } #[test] fn smoke_gqtp_with_addr() { let req = GQTPRequest::new().with_addr("127.0.0.1:20043"); assert_eq!("127.0.0.1:20043", req.addr) } #[test] fn smoke_gqtp_with_addr_string() { let req = GQTPRequest::new().with_addr("127.0.0.1:20043".to_string()); assert_eq!("127.0.0.1:20043", req.addr) } }
{ GQTPError::IO(err) }
identifier_body
gqtp_request.rs
use std::io; use std::io::Cursor; use std::io::prelude::*; use std::borrow::Cow; use std::net::TcpStream; use std::string::FromUtf8Error; use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt}; const RECV_BUF_SIZE: usize = 8192; const GQTP_HEADER_SIZE: usize = 24; #[derive(Debug)] pub enum GQTPError { InvalidProtocol, InvalidBodySize, StatusError(u16), IO(io::Error), EncodingError(FromUtf8Error), } impl From<io::Error> for GQTPError { fn from(err: io::Error) -> GQTPError { GQTPError::IO(err) } } impl From<FromUtf8Error> for GQTPError { fn from(err: FromUtf8Error) -> GQTPError { GQTPError::EncodingError(err) } } /// Request [GQTP protocol](http://groonga.org/docs/spec/gqtp.html) over TcpStream pub struct GQTPRequest<'a> { addr: Cow<'a, str>, } impl<'a> Default for GQTPRequest<'a> { fn default() -> GQTPRequest<'a> { GQTPRequest { addr: Cow::Borrowed("127.0.0.1:10043") } } } impl<'a> GQTPRequest<'a> { /// Create a GQTP client. pub fn new() -> GQTPRequest<'a> { GQTPRequest::default() } /// Set host address for GQTP server. /// /// # Examples /// /// ``` /// extern crate ruroonga_client as groonga; /// /// groonga::GQTPRequest::new().with_addr("127.0.0.1:20043"); /// ``` pub fn with_addr<T>(mut self, addr: T) -> GQTPRequest<'a> where T: Into<Cow<'a, str>> { self.addr = addr.into(); self } /// Send request and Receive response. pub fn call<C>(&self, command: C) -> Result<String, GQTPError> where C: AsRef<str> { // send let mut stream = try!(TcpStream::connect(self.addr.as_ref())); let mut send_buf = vec![]; try!(send_buf.write_u8(0xc7)); try!(send_buf.write_u8(0)); try!(send_buf.write_i16::<BigEndian>(0)); try!(send_buf.write_u8(0)); try!(send_buf.write_u8(0x02)); // flags try!(send_buf.write_u16::<BigEndian>(0)); try!(send_buf.write_u32::<BigEndian>(command.as_ref().len() as u32)); try!(send_buf.write_u32::<BigEndian>(0)); try!(send_buf.write_u64::<BigEndian>(0)); send_buf.extend_from_slice(command.as_ref().as_bytes()); let _ = stream.write_all(send_buf.as_slice()); // receive and check protocol header value let mut read_buf = vec![0; RECV_BUF_SIZE]; let _ = stream.read(&mut read_buf); let mut buf = Cursor::new(read_buf); let protocol = try!(buf.read_u8()); let query_type = try!(buf.read_u8()); if protocol!= 0xc7 || query_type > 5
let _ = try!(buf.read_i16::<BigEndian>()); let _ = try!(buf.read_u8()); let flags = try!(buf.read_u8()); if!((flags & 0x01) == 0x01 || (flags & 0x02) == 0x02) { return Err(GQTPError::InvalidProtocol); } let status = try!(buf.read_u16::<BigEndian>()); if status!= 0 && status!= 1 { return Err(GQTPError::StatusError(status)); } let size = try!(buf.read_i32::<BigEndian>()) as usize; let _ = try!(buf.read_i32::<BigEndian>()); // opaque let _ = try!(buf.read_i64::<BigEndian>()); // cas // read body let mut msg_buf_len = if (size + GQTP_HEADER_SIZE) > RECV_BUF_SIZE { RECV_BUF_SIZE - GQTP_HEADER_SIZE } else { size }; let mut msg = vec![0; msg_buf_len]; let _ = try!(buf.read(&mut msg)); if (size + GQTP_HEADER_SIZE) > RECV_BUF_SIZE { loop { let mut read_buf = vec![0; RECV_BUF_SIZE]; let rsize = try!(stream.read(&mut read_buf)); msg.extend_from_slice(read_buf.as_ref()); msg_buf_len += rsize; if msg_buf_len >= size { break; } } } Ok(try!(String::from_utf8(msg))) } } #[cfg(test)] mod tests { use super::*; #[test] fn smoke_gqtp() { let req = GQTPRequest::new(); assert_eq!("127.0.0.1:10043", req.addr) } #[test] fn smoke_gqtp_with_addr() { let req = GQTPRequest::new().with_addr("127.0.0.1:20043"); assert_eq!("127.0.0.1:20043", req.addr) } #[test] fn smoke_gqtp_with_addr_string() { let req = GQTPRequest::new().with_addr("127.0.0.1:20043".to_string()); assert_eq!("127.0.0.1:20043", req.addr) } }
{ return Err(GQTPError::InvalidProtocol); }
conditional_block
gqtp_request.rs
use std::io; use std::io::Cursor; use std::io::prelude::*; use std::borrow::Cow; use std::net::TcpStream; use std::string::FromUtf8Error; use byteorder::{BigEndian, WriteBytesExt, ReadBytesExt}; const RECV_BUF_SIZE: usize = 8192; const GQTP_HEADER_SIZE: usize = 24; #[derive(Debug)] pub enum
{ InvalidProtocol, InvalidBodySize, StatusError(u16), IO(io::Error), EncodingError(FromUtf8Error), } impl From<io::Error> for GQTPError { fn from(err: io::Error) -> GQTPError { GQTPError::IO(err) } } impl From<FromUtf8Error> for GQTPError { fn from(err: FromUtf8Error) -> GQTPError { GQTPError::EncodingError(err) } } /// Request [GQTP protocol](http://groonga.org/docs/spec/gqtp.html) over TcpStream pub struct GQTPRequest<'a> { addr: Cow<'a, str>, } impl<'a> Default for GQTPRequest<'a> { fn default() -> GQTPRequest<'a> { GQTPRequest { addr: Cow::Borrowed("127.0.0.1:10043") } } } impl<'a> GQTPRequest<'a> { /// Create a GQTP client. pub fn new() -> GQTPRequest<'a> { GQTPRequest::default() } /// Set host address for GQTP server. /// /// # Examples /// /// ``` /// extern crate ruroonga_client as groonga; /// /// groonga::GQTPRequest::new().with_addr("127.0.0.1:20043"); /// ``` pub fn with_addr<T>(mut self, addr: T) -> GQTPRequest<'a> where T: Into<Cow<'a, str>> { self.addr = addr.into(); self } /// Send request and Receive response. pub fn call<C>(&self, command: C) -> Result<String, GQTPError> where C: AsRef<str> { // send let mut stream = try!(TcpStream::connect(self.addr.as_ref())); let mut send_buf = vec![]; try!(send_buf.write_u8(0xc7)); try!(send_buf.write_u8(0)); try!(send_buf.write_i16::<BigEndian>(0)); try!(send_buf.write_u8(0)); try!(send_buf.write_u8(0x02)); // flags try!(send_buf.write_u16::<BigEndian>(0)); try!(send_buf.write_u32::<BigEndian>(command.as_ref().len() as u32)); try!(send_buf.write_u32::<BigEndian>(0)); try!(send_buf.write_u64::<BigEndian>(0)); send_buf.extend_from_slice(command.as_ref().as_bytes()); let _ = stream.write_all(send_buf.as_slice()); // receive and check protocol header value let mut read_buf = vec![0; RECV_BUF_SIZE]; let _ = stream.read(&mut read_buf); let mut buf = Cursor::new(read_buf); let protocol = try!(buf.read_u8()); let query_type = try!(buf.read_u8()); if protocol!= 0xc7 || query_type > 5 { return Err(GQTPError::InvalidProtocol); } let _ = try!(buf.read_i16::<BigEndian>()); let _ = try!(buf.read_u8()); let flags = try!(buf.read_u8()); if!((flags & 0x01) == 0x01 || (flags & 0x02) == 0x02) { return Err(GQTPError::InvalidProtocol); } let status = try!(buf.read_u16::<BigEndian>()); if status!= 0 && status!= 1 { return Err(GQTPError::StatusError(status)); } let size = try!(buf.read_i32::<BigEndian>()) as usize; let _ = try!(buf.read_i32::<BigEndian>()); // opaque let _ = try!(buf.read_i64::<BigEndian>()); // cas // read body let mut msg_buf_len = if (size + GQTP_HEADER_SIZE) > RECV_BUF_SIZE { RECV_BUF_SIZE - GQTP_HEADER_SIZE } else { size }; let mut msg = vec![0; msg_buf_len]; let _ = try!(buf.read(&mut msg)); if (size + GQTP_HEADER_SIZE) > RECV_BUF_SIZE { loop { let mut read_buf = vec![0; RECV_BUF_SIZE]; let rsize = try!(stream.read(&mut read_buf)); msg.extend_from_slice(read_buf.as_ref()); msg_buf_len += rsize; if msg_buf_len >= size { break; } } } Ok(try!(String::from_utf8(msg))) } } #[cfg(test)] mod tests { use super::*; #[test] fn smoke_gqtp() { let req = GQTPRequest::new(); assert_eq!("127.0.0.1:10043", req.addr) } #[test] fn smoke_gqtp_with_addr() { let req = GQTPRequest::new().with_addr("127.0.0.1:20043"); assert_eq!("127.0.0.1:20043", req.addr) } #[test] fn smoke_gqtp_with_addr_string() { let req = GQTPRequest::new().with_addr("127.0.0.1:20043".to_string()); assert_eq!("127.0.0.1:20043", req.addr) } }
GQTPError
identifier_name
issue_922.rs
use juniper::*; struct Query; #[juniper::graphql_object] impl Query { fn characters() -> Vec<CharacterValue> { vec![ Into::into(Human { id: 0, name: "human-32".to_owned(), }), Into::into(Droid { id: 1, name: "R2-D2".to_owned(), }), ] } } #[juniper::graphql_interface(for = [Human, Droid])] trait Character { fn id(&self) -> i32; fn name(&self) -> String; } #[derive(juniper::GraphQLObject)] #[graphql(impl = CharacterValue)] struct Human { pub id: i32, pub name: String, } #[juniper::graphql_interface] impl Character for Human { fn id(&self) -> i32 { self.id } fn name(&self) -> String { self.name.clone() } } #[derive(juniper::GraphQLObject)] #[graphql(impl = CharacterValue)] struct Droid { pub id: i32, pub name: String, } #[juniper::graphql_interface] impl Character for Droid { fn id(&self) -> i32 { self.id } fn name(&self) -> String
} type Schema = juniper::RootNode<'static, Query, EmptyMutation<()>, EmptySubscription<()>>; #[tokio::test] async fn test_fragment_on_interface() { let query = r#" query Query { characters { ...CharacterFragment } } fragment CharacterFragment on Character { __typename ... on Human { id name } ... on Droid { id name } } "#; let (res, errors) = juniper::execute( query, None, &Schema::new(Query, EmptyMutation::new(), EmptySubscription::new()), &Variables::new(), &(), ) .await .unwrap(); assert_eq!(errors.len(), 0); assert_eq!( res, graphql_value!({ "characters": [ {"__typename": "Human", "id": 0, "name": "human-32"}, {"__typename": "Droid", "id": 1, "name": "R2-D2"}, ], }), ); let (res, errors) = juniper::execute_sync( query, None, &Schema::new(Query, EmptyMutation::new(), EmptySubscription::new()), &Variables::new(), &(), ) .unwrap(); assert_eq!(errors.len(), 0); assert_eq!( res, graphql_value!({ "characters": [ {"__typename": "Human", "id": 0, "name": "human-32"}, {"__typename": "Droid", "id": 1, "name": "R2-D2"}, ], }), ); }
{ self.name.clone() }
identifier_body
issue_922.rs
use juniper::*; struct Query; #[juniper::graphql_object] impl Query { fn characters() -> Vec<CharacterValue> { vec![ Into::into(Human { id: 0, name: "human-32".to_owned(), }), Into::into(Droid { id: 1, name: "R2-D2".to_owned(), }), ] } } #[juniper::graphql_interface(for = [Human, Droid])] trait Character { fn id(&self) -> i32; fn name(&self) -> String; } #[derive(juniper::GraphQLObject)] #[graphql(impl = CharacterValue)] struct Human { pub id: i32, pub name: String, } #[juniper::graphql_interface] impl Character for Human { fn id(&self) -> i32 {
} } #[derive(juniper::GraphQLObject)] #[graphql(impl = CharacterValue)] struct Droid { pub id: i32, pub name: String, } #[juniper::graphql_interface] impl Character for Droid { fn id(&self) -> i32 { self.id } fn name(&self) -> String { self.name.clone() } } type Schema = juniper::RootNode<'static, Query, EmptyMutation<()>, EmptySubscription<()>>; #[tokio::test] async fn test_fragment_on_interface() { let query = r#" query Query { characters { ...CharacterFragment } } fragment CharacterFragment on Character { __typename ... on Human { id name } ... on Droid { id name } } "#; let (res, errors) = juniper::execute( query, None, &Schema::new(Query, EmptyMutation::new(), EmptySubscription::new()), &Variables::new(), &(), ) .await .unwrap(); assert_eq!(errors.len(), 0); assert_eq!( res, graphql_value!({ "characters": [ {"__typename": "Human", "id": 0, "name": "human-32"}, {"__typename": "Droid", "id": 1, "name": "R2-D2"}, ], }), ); let (res, errors) = juniper::execute_sync( query, None, &Schema::new(Query, EmptyMutation::new(), EmptySubscription::new()), &Variables::new(), &(), ) .unwrap(); assert_eq!(errors.len(), 0); assert_eq!( res, graphql_value!({ "characters": [ {"__typename": "Human", "id": 0, "name": "human-32"}, {"__typename": "Droid", "id": 1, "name": "R2-D2"}, ], }), ); }
self.id } fn name(&self) -> String { self.name.clone()
random_line_split
issue_922.rs
use juniper::*; struct Query; #[juniper::graphql_object] impl Query { fn characters() -> Vec<CharacterValue> { vec![ Into::into(Human { id: 0, name: "human-32".to_owned(), }), Into::into(Droid { id: 1, name: "R2-D2".to_owned(), }), ] } } #[juniper::graphql_interface(for = [Human, Droid])] trait Character { fn id(&self) -> i32; fn name(&self) -> String; } #[derive(juniper::GraphQLObject)] #[graphql(impl = CharacterValue)] struct Human { pub id: i32, pub name: String, } #[juniper::graphql_interface] impl Character for Human { fn id(&self) -> i32 { self.id } fn name(&self) -> String { self.name.clone() } } #[derive(juniper::GraphQLObject)] #[graphql(impl = CharacterValue)] struct
{ pub id: i32, pub name: String, } #[juniper::graphql_interface] impl Character for Droid { fn id(&self) -> i32 { self.id } fn name(&self) -> String { self.name.clone() } } type Schema = juniper::RootNode<'static, Query, EmptyMutation<()>, EmptySubscription<()>>; #[tokio::test] async fn test_fragment_on_interface() { let query = r#" query Query { characters { ...CharacterFragment } } fragment CharacterFragment on Character { __typename ... on Human { id name } ... on Droid { id name } } "#; let (res, errors) = juniper::execute( query, None, &Schema::new(Query, EmptyMutation::new(), EmptySubscription::new()), &Variables::new(), &(), ) .await .unwrap(); assert_eq!(errors.len(), 0); assert_eq!( res, graphql_value!({ "characters": [ {"__typename": "Human", "id": 0, "name": "human-32"}, {"__typename": "Droid", "id": 1, "name": "R2-D2"}, ], }), ); let (res, errors) = juniper::execute_sync( query, None, &Schema::new(Query, EmptyMutation::new(), EmptySubscription::new()), &Variables::new(), &(), ) .unwrap(); assert_eq!(errors.len(), 0); assert_eq!( res, graphql_value!({ "characters": [ {"__typename": "Human", "id": 0, "name": "human-32"}, {"__typename": "Droid", "id": 1, "name": "R2-D2"}, ], }), ); }
Droid
identifier_name
mouse.rs
//! //! A module for describing Mouse state. //! //! The `Ui` will continuously maintain the latest Mouse state, necessary for widget logic. //! use position::Point; /// Represents the current state of a mouse button. #[derive(Clone, Copy, Debug)] pub enum ButtonState { /// The mouse is currently up. Up, /// The mouse is currently down (pressed). Down, } /// Represents the current state of the Mouse. #[derive(Copy, Clone, Debug)] pub struct Mouse { /// Position of the mouse cursor. pub xy: Point, /// Left mouse button state. pub left: ButtonState, /// Middle mouse button state. pub middle: ButtonState, /// Right mouse button state. pub right: ButtonState, /// Unknown button state. pub unknown: ButtonState, } impl Mouse { /// Constructor for a Mouse struct. pub fn
(xy: Point, left: ButtonState, middle: ButtonState, right: ButtonState) -> Mouse { Mouse { xy: xy, left: left, middle: middle, right: right, unknown: ButtonState::Up } } /// Return the mouse state with its position relative to the given position. pub fn relative_to(self, xy: Point) -> Mouse { Mouse { xy: ::vecmath::vec2_sub(self.xy, xy),..self } } }
new
identifier_name
mouse.rs
//! //! A module for describing Mouse state. //! //! The `Ui` will continuously maintain the latest Mouse state, necessary for widget logic. //! use position::Point; /// Represents the current state of a mouse button. #[derive(Clone, Copy, Debug)] pub enum ButtonState { /// The mouse is currently up. Up, /// The mouse is currently down (pressed). Down, } /// Represents the current state of the Mouse. #[derive(Copy, Clone, Debug)]
/// Left mouse button state. pub left: ButtonState, /// Middle mouse button state. pub middle: ButtonState, /// Right mouse button state. pub right: ButtonState, /// Unknown button state. pub unknown: ButtonState, } impl Mouse { /// Constructor for a Mouse struct. pub fn new(xy: Point, left: ButtonState, middle: ButtonState, right: ButtonState) -> Mouse { Mouse { xy: xy, left: left, middle: middle, right: right, unknown: ButtonState::Up } } /// Return the mouse state with its position relative to the given position. pub fn relative_to(self, xy: Point) -> Mouse { Mouse { xy: ::vecmath::vec2_sub(self.xy, xy),..self } } }
pub struct Mouse { /// Position of the mouse cursor. pub xy: Point,
random_line_split
test_resource.rs
use rustler::{Env, ResourceArc}; use std::sync::RwLock; pub struct TestResource { test_field: RwLock<i32>, } /// This one is designed to look more like pointer data, to increase the /// chance of segfaults if the implementation is wrong. pub struct ImmutableResource { a: u32, b: u32, } pub fn
(env: Env) -> bool { rustler::resource!(TestResource, env); rustler::resource!(ImmutableResource, env); true } #[rustler::nif] pub fn resource_make() -> ResourceArc<TestResource> { ResourceArc::new(TestResource { test_field: RwLock::new(0), }) } #[rustler::nif] pub fn resource_set_integer_field(resource: ResourceArc<TestResource>, n: i32) -> &'static str { let mut test_field = resource.test_field.write().unwrap(); *test_field = n; "ok" } #[rustler::nif] pub fn resource_get_integer_field(resource: ResourceArc<TestResource>) -> i32 { *resource.test_field.read().unwrap() } use std::sync::atomic::{AtomicUsize, Ordering}; lazy_static::lazy_static! { static ref COUNT: AtomicUsize = AtomicUsize::new(0); } impl ImmutableResource { fn new(u: u32) -> ImmutableResource { COUNT.fetch_add(1, Ordering::SeqCst); ImmutableResource { a: u, b:!u } } } impl Drop for ImmutableResource { fn drop(&mut self) { assert_eq!(self.a,!self.b); self.b = self.a; COUNT.fetch_sub(1, Ordering::SeqCst); } } #[rustler::nif] pub fn resource_make_immutable(u: u32) -> ResourceArc<ImmutableResource> { ResourceArc::new(ImmutableResource::new(u)) } // Count how many instances of `ImmutableResource` are currently alive globally. #[rustler::nif] pub fn resource_immutable_count() -> u32 { COUNT.load(Ordering::SeqCst) as u32 }
on_load
identifier_name
test_resource.rs
use rustler::{Env, ResourceArc}; use std::sync::RwLock; pub struct TestResource { test_field: RwLock<i32>, } /// This one is designed to look more like pointer data, to increase the /// chance of segfaults if the implementation is wrong. pub struct ImmutableResource { a: u32, b: u32, } pub fn on_load(env: Env) -> bool { rustler::resource!(TestResource, env); rustler::resource!(ImmutableResource, env); true } #[rustler::nif] pub fn resource_make() -> ResourceArc<TestResource> { ResourceArc::new(TestResource { test_field: RwLock::new(0), }) } #[rustler::nif] pub fn resource_set_integer_field(resource: ResourceArc<TestResource>, n: i32) -> &'static str { let mut test_field = resource.test_field.write().unwrap(); *test_field = n; "ok" } #[rustler::nif] pub fn resource_get_integer_field(resource: ResourceArc<TestResource>) -> i32 { *resource.test_field.read().unwrap() } use std::sync::atomic::{AtomicUsize, Ordering}; lazy_static::lazy_static! { static ref COUNT: AtomicUsize = AtomicUsize::new(0); } impl ImmutableResource { fn new(u: u32) -> ImmutableResource { COUNT.fetch_add(1, Ordering::SeqCst); ImmutableResource { a: u, b:!u } } } impl Drop for ImmutableResource { fn drop(&mut self) { assert_eq!(self.a,!self.b); self.b = self.a; COUNT.fetch_sub(1, Ordering::SeqCst); } } #[rustler::nif] pub fn resource_make_immutable(u: u32) -> ResourceArc<ImmutableResource>
// Count how many instances of `ImmutableResource` are currently alive globally. #[rustler::nif] pub fn resource_immutable_count() -> u32 { COUNT.load(Ordering::SeqCst) as u32 }
{ ResourceArc::new(ImmutableResource::new(u)) }
identifier_body
test_resource.rs
use rustler::{Env, ResourceArc}; use std::sync::RwLock; pub struct TestResource { test_field: RwLock<i32>, } /// This one is designed to look more like pointer data, to increase the /// chance of segfaults if the implementation is wrong. pub struct ImmutableResource { a: u32, b: u32, } pub fn on_load(env: Env) -> bool { rustler::resource!(TestResource, env); rustler::resource!(ImmutableResource, env); true } #[rustler::nif] pub fn resource_make() -> ResourceArc<TestResource> { ResourceArc::new(TestResource { test_field: RwLock::new(0), }) } #[rustler::nif] pub fn resource_set_integer_field(resource: ResourceArc<TestResource>, n: i32) -> &'static str { let mut test_field = resource.test_field.write().unwrap();
"ok" } #[rustler::nif] pub fn resource_get_integer_field(resource: ResourceArc<TestResource>) -> i32 { *resource.test_field.read().unwrap() } use std::sync::atomic::{AtomicUsize, Ordering}; lazy_static::lazy_static! { static ref COUNT: AtomicUsize = AtomicUsize::new(0); } impl ImmutableResource { fn new(u: u32) -> ImmutableResource { COUNT.fetch_add(1, Ordering::SeqCst); ImmutableResource { a: u, b:!u } } } impl Drop for ImmutableResource { fn drop(&mut self) { assert_eq!(self.a,!self.b); self.b = self.a; COUNT.fetch_sub(1, Ordering::SeqCst); } } #[rustler::nif] pub fn resource_make_immutable(u: u32) -> ResourceArc<ImmutableResource> { ResourceArc::new(ImmutableResource::new(u)) } // Count how many instances of `ImmutableResource` are currently alive globally. #[rustler::nif] pub fn resource_immutable_count() -> u32 { COUNT.load(Ordering::SeqCst) as u32 }
*test_field = n;
random_line_split
pin.rs
// Zinc, the bare metal stack for rust. // Copyright 2014 Lionel Flandrin <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![allow(missing_docs)] //! Pin configuration //! Allows GPIO configuration //! Pin muxing not implemented yet. use hal::pin::{Gpio, GpioDirection, In, Out, GpioLevel, High, Low}; use hal::tiva_c::sysctl; use util::support::get_reg_ref; /// The pins are accessed through ports. Each port has 8 pins and are identified /// by a letter (PortA, PortB, etc...). #[allow(missing_docs)] #[derive(Clone, Copy)] pub enum PortId { PortA, PortB, PortC, PortD, PortE, PortF, } /// Structure describing a single HW pin #[derive(Clone, Copy)] pub struct Pin { /// Timer register interface regs: &'static reg::Port, /// Pin index in the port index: usize, } impl Pin { /// Create and configure a Pin pub fn new(pid: PortId, pin_index: u8, dir: GpioDirection, function: u8) -> Pin {
PortId::PortC => (sysctl::periph::gpio::PORT_C, reg::PORT_C), PortId::PortD => (sysctl::periph::gpio::PORT_D, reg::PORT_D), PortId::PortE => (sysctl::periph::gpio::PORT_E, reg::PORT_E), PortId::PortF => (sysctl::periph::gpio::PORT_F, reg::PORT_F), }; periph.ensure_enabled(); let pin = Pin { regs: get_reg_ref(regs), index: pin_index as usize }; pin.configure(dir, function); pin } /// Configure GPIO pin fn configure(&self, dir: GpioDirection, function: u8) { // Disable the GPIO during reconfig self.regs.den.set_den(self.index, false); self.set_direction(dir); // Configure the "alternate function". AFSEL 0 means GPIO, 1 means the port // is driven by another peripheral. When AFSEL is 1 the actual function // config goes into the CTL register. match function { 0 => { self.regs.afsel.set_afsel(self.index, reg::Port_afsel_afsel::GPIO); }, f => { self.regs.afsel.set_afsel(self.index, reg::Port_afsel_afsel::PERIPHERAL); self.regs.pctl.set_pctl(self.index, f as u32); } } // We can chose to drive each GPIO at either 2, 4 or 8mA. Default to 2mA for // now. // TODO(simias): make that configurable self.regs.dr2r.set_dr2r(self.index, true); self.regs.dr4r.set_dr4r(self.index, false); self.regs.dr8r.set_dr8r(self.index, false); // TODO(simias): configure open drain/pull up/pull down/slew rate if necessary self.regs.odr.set_odr(self.index, false); self.regs.pur.set_pur(self.index, false); self.regs.pdr.set_pdr(self.index, false); // Enable GPIO self.regs.den.set_den(self.index, true); } fn set_level(&self, level: bool) { self.regs.data.set_data(self.index, level); } } impl Gpio for Pin { /// Sets output GPIO value to high. fn set_high(&self) { self.set_level(true); } /// Sets output GPIO value to low. fn set_low(&self) { self.set_level(false); } /// Returns input GPIO level. fn level(&self) -> GpioLevel { match self.regs.data.data(self.index) { true => High, false => Low, } } /// Sets output GPIO direction. fn set_direction(&self, dir: GpioDirection) { self.regs.dir.set_dir(self.index, match dir { In => reg::Port_dir_dir::INPUT, Out => reg::Port_dir_dir::OUTPUT, }); } } pub mod reg { //! Pin registers definition use volatile_cell::VolatileCell; use core::ops::Drop; ioregs!(Port = { 0x3FC => reg32 data { //! Pin value 0..7 => data[8] } 0x400 => reg32 dir { //! Pin direction 0..7 => dir[8] { 0 => INPUT, 1 => OUTPUT, } } 0x420 => reg32 afsel { //! Pin alternate function 0..7 => afsel[8] { 0 => GPIO, 1 => PERIPHERAL, } } 0x500 => reg32 dr2r { //! Select 2mA drive strength 0..7 => dr2r[8] } 0x504 => reg32 dr4r { //! Select 4mA drive strength 0..7 => dr4r[8] } 0x508 => reg32 dr8r { //! Select 8mA drive strength 0..7 => dr8r[8] } 0x50C => reg32 odr { //! Configure pin as open drain 0..7 => odr[8] } 0x510 => reg32 pur { //! Enable pin pull-up 0..7 => pur[8] } 0x514 => reg32 pdr { //! Enable pin pull-down 0..7 => pdr[8] } 0x518 => reg32 slr { //! Slew rate control enable (only available for 8mA drive strength) 0..7 => slr[8] } 0x51C => reg32 den { //! Enable pin 0..7 => den[8] } 0x52C => reg32 pctl { //! Pin function selection when afsel is set for the pin. 0..31 => pctl[8] } }); pub const PORT_A: *const Port = 0x40004000 as *const Port; pub const PORT_B: *const Port = 0x40005000 as *const Port; pub const PORT_C: *const Port = 0x40006000 as *const Port; pub const PORT_D: *const Port = 0x40007000 as *const Port; pub const PORT_E: *const Port = 0x40024000 as *const Port; pub const PORT_F: *const Port = 0x40025000 as *const Port; }
// Retrieve GPIO port peripheral to enable it let (periph, regs) = match pid { PortId::PortA => (sysctl::periph::gpio::PORT_A, reg::PORT_A), PortId::PortB => (sysctl::periph::gpio::PORT_B, reg::PORT_B),
random_line_split
pin.rs
// Zinc, the bare metal stack for rust. // Copyright 2014 Lionel Flandrin <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![allow(missing_docs)] //! Pin configuration //! Allows GPIO configuration //! Pin muxing not implemented yet. use hal::pin::{Gpio, GpioDirection, In, Out, GpioLevel, High, Low}; use hal::tiva_c::sysctl; use util::support::get_reg_ref; /// The pins are accessed through ports. Each port has 8 pins and are identified /// by a letter (PortA, PortB, etc...). #[allow(missing_docs)] #[derive(Clone, Copy)] pub enum PortId { PortA, PortB, PortC, PortD, PortE, PortF, } /// Structure describing a single HW pin #[derive(Clone, Copy)] pub struct Pin { /// Timer register interface regs: &'static reg::Port, /// Pin index in the port index: usize, } impl Pin { /// Create and configure a Pin pub fn new(pid: PortId, pin_index: u8, dir: GpioDirection, function: u8) -> Pin { // Retrieve GPIO port peripheral to enable it let (periph, regs) = match pid { PortId::PortA => (sysctl::periph::gpio::PORT_A, reg::PORT_A), PortId::PortB => (sysctl::periph::gpio::PORT_B, reg::PORT_B), PortId::PortC => (sysctl::periph::gpio::PORT_C, reg::PORT_C), PortId::PortD => (sysctl::periph::gpio::PORT_D, reg::PORT_D), PortId::PortE => (sysctl::periph::gpio::PORT_E, reg::PORT_E), PortId::PortF => (sysctl::periph::gpio::PORT_F, reg::PORT_F), }; periph.ensure_enabled(); let pin = Pin { regs: get_reg_ref(regs), index: pin_index as usize }; pin.configure(dir, function); pin } /// Configure GPIO pin fn configure(&self, dir: GpioDirection, function: u8) { // Disable the GPIO during reconfig self.regs.den.set_den(self.index, false); self.set_direction(dir); // Configure the "alternate function". AFSEL 0 means GPIO, 1 means the port // is driven by another peripheral. When AFSEL is 1 the actual function // config goes into the CTL register. match function { 0 => { self.regs.afsel.set_afsel(self.index, reg::Port_afsel_afsel::GPIO); }, f => { self.regs.afsel.set_afsel(self.index, reg::Port_afsel_afsel::PERIPHERAL); self.regs.pctl.set_pctl(self.index, f as u32); } } // We can chose to drive each GPIO at either 2, 4 or 8mA. Default to 2mA for // now. // TODO(simias): make that configurable self.regs.dr2r.set_dr2r(self.index, true); self.regs.dr4r.set_dr4r(self.index, false); self.regs.dr8r.set_dr8r(self.index, false); // TODO(simias): configure open drain/pull up/pull down/slew rate if necessary self.regs.odr.set_odr(self.index, false); self.regs.pur.set_pur(self.index, false); self.regs.pdr.set_pdr(self.index, false); // Enable GPIO self.regs.den.set_den(self.index, true); } fn set_level(&self, level: bool) { self.regs.data.set_data(self.index, level); } } impl Gpio for Pin { /// Sets output GPIO value to high. fn set_high(&self) { self.set_level(true); } /// Sets output GPIO value to low. fn set_low(&self) { self.set_level(false); } /// Returns input GPIO level. fn
(&self) -> GpioLevel { match self.regs.data.data(self.index) { true => High, false => Low, } } /// Sets output GPIO direction. fn set_direction(&self, dir: GpioDirection) { self.regs.dir.set_dir(self.index, match dir { In => reg::Port_dir_dir::INPUT, Out => reg::Port_dir_dir::OUTPUT, }); } } pub mod reg { //! Pin registers definition use volatile_cell::VolatileCell; use core::ops::Drop; ioregs!(Port = { 0x3FC => reg32 data { //! Pin value 0..7 => data[8] } 0x400 => reg32 dir { //! Pin direction 0..7 => dir[8] { 0 => INPUT, 1 => OUTPUT, } } 0x420 => reg32 afsel { //! Pin alternate function 0..7 => afsel[8] { 0 => GPIO, 1 => PERIPHERAL, } } 0x500 => reg32 dr2r { //! Select 2mA drive strength 0..7 => dr2r[8] } 0x504 => reg32 dr4r { //! Select 4mA drive strength 0..7 => dr4r[8] } 0x508 => reg32 dr8r { //! Select 8mA drive strength 0..7 => dr8r[8] } 0x50C => reg32 odr { //! Configure pin as open drain 0..7 => odr[8] } 0x510 => reg32 pur { //! Enable pin pull-up 0..7 => pur[8] } 0x514 => reg32 pdr { //! Enable pin pull-down 0..7 => pdr[8] } 0x518 => reg32 slr { //! Slew rate control enable (only available for 8mA drive strength) 0..7 => slr[8] } 0x51C => reg32 den { //! Enable pin 0..7 => den[8] } 0x52C => reg32 pctl { //! Pin function selection when afsel is set for the pin. 0..31 => pctl[8] } }); pub const PORT_A: *const Port = 0x40004000 as *const Port; pub const PORT_B: *const Port = 0x40005000 as *const Port; pub const PORT_C: *const Port = 0x40006000 as *const Port; pub const PORT_D: *const Port = 0x40007000 as *const Port; pub const PORT_E: *const Port = 0x40024000 as *const Port; pub const PORT_F: *const Port = 0x40025000 as *const Port; }
level
identifier_name
pin.rs
// Zinc, the bare metal stack for rust. // Copyright 2014 Lionel Flandrin <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #![allow(missing_docs)] //! Pin configuration //! Allows GPIO configuration //! Pin muxing not implemented yet. use hal::pin::{Gpio, GpioDirection, In, Out, GpioLevel, High, Low}; use hal::tiva_c::sysctl; use util::support::get_reg_ref; /// The pins are accessed through ports. Each port has 8 pins and are identified /// by a letter (PortA, PortB, etc...). #[allow(missing_docs)] #[derive(Clone, Copy)] pub enum PortId { PortA, PortB, PortC, PortD, PortE, PortF, } /// Structure describing a single HW pin #[derive(Clone, Copy)] pub struct Pin { /// Timer register interface regs: &'static reg::Port, /// Pin index in the port index: usize, } impl Pin { /// Create and configure a Pin pub fn new(pid: PortId, pin_index: u8, dir: GpioDirection, function: u8) -> Pin { // Retrieve GPIO port peripheral to enable it let (periph, regs) = match pid { PortId::PortA => (sysctl::periph::gpio::PORT_A, reg::PORT_A), PortId::PortB => (sysctl::periph::gpio::PORT_B, reg::PORT_B), PortId::PortC => (sysctl::periph::gpio::PORT_C, reg::PORT_C), PortId::PortD => (sysctl::periph::gpio::PORT_D, reg::PORT_D), PortId::PortE => (sysctl::periph::gpio::PORT_E, reg::PORT_E), PortId::PortF => (sysctl::periph::gpio::PORT_F, reg::PORT_F), }; periph.ensure_enabled(); let pin = Pin { regs: get_reg_ref(regs), index: pin_index as usize }; pin.configure(dir, function); pin } /// Configure GPIO pin fn configure(&self, dir: GpioDirection, function: u8) { // Disable the GPIO during reconfig self.regs.den.set_den(self.index, false); self.set_direction(dir); // Configure the "alternate function". AFSEL 0 means GPIO, 1 means the port // is driven by another peripheral. When AFSEL is 1 the actual function // config goes into the CTL register. match function { 0 => { self.regs.afsel.set_afsel(self.index, reg::Port_afsel_afsel::GPIO); }, f => { self.regs.afsel.set_afsel(self.index, reg::Port_afsel_afsel::PERIPHERAL); self.regs.pctl.set_pctl(self.index, f as u32); } } // We can chose to drive each GPIO at either 2, 4 or 8mA. Default to 2mA for // now. // TODO(simias): make that configurable self.regs.dr2r.set_dr2r(self.index, true); self.regs.dr4r.set_dr4r(self.index, false); self.regs.dr8r.set_dr8r(self.index, false); // TODO(simias): configure open drain/pull up/pull down/slew rate if necessary self.regs.odr.set_odr(self.index, false); self.regs.pur.set_pur(self.index, false); self.regs.pdr.set_pdr(self.index, false); // Enable GPIO self.regs.den.set_den(self.index, true); } fn set_level(&self, level: bool) { self.regs.data.set_data(self.index, level); } } impl Gpio for Pin { /// Sets output GPIO value to high. fn set_high(&self) { self.set_level(true); } /// Sets output GPIO value to low. fn set_low(&self)
/// Returns input GPIO level. fn level(&self) -> GpioLevel { match self.regs.data.data(self.index) { true => High, false => Low, } } /// Sets output GPIO direction. fn set_direction(&self, dir: GpioDirection) { self.regs.dir.set_dir(self.index, match dir { In => reg::Port_dir_dir::INPUT, Out => reg::Port_dir_dir::OUTPUT, }); } } pub mod reg { //! Pin registers definition use volatile_cell::VolatileCell; use core::ops::Drop; ioregs!(Port = { 0x3FC => reg32 data { //! Pin value 0..7 => data[8] } 0x400 => reg32 dir { //! Pin direction 0..7 => dir[8] { 0 => INPUT, 1 => OUTPUT, } } 0x420 => reg32 afsel { //! Pin alternate function 0..7 => afsel[8] { 0 => GPIO, 1 => PERIPHERAL, } } 0x500 => reg32 dr2r { //! Select 2mA drive strength 0..7 => dr2r[8] } 0x504 => reg32 dr4r { //! Select 4mA drive strength 0..7 => dr4r[8] } 0x508 => reg32 dr8r { //! Select 8mA drive strength 0..7 => dr8r[8] } 0x50C => reg32 odr { //! Configure pin as open drain 0..7 => odr[8] } 0x510 => reg32 pur { //! Enable pin pull-up 0..7 => pur[8] } 0x514 => reg32 pdr { //! Enable pin pull-down 0..7 => pdr[8] } 0x518 => reg32 slr { //! Slew rate control enable (only available for 8mA drive strength) 0..7 => slr[8] } 0x51C => reg32 den { //! Enable pin 0..7 => den[8] } 0x52C => reg32 pctl { //! Pin function selection when afsel is set for the pin. 0..31 => pctl[8] } }); pub const PORT_A: *const Port = 0x40004000 as *const Port; pub const PORT_B: *const Port = 0x40005000 as *const Port; pub const PORT_C: *const Port = 0x40006000 as *const Port; pub const PORT_D: *const Port = 0x40007000 as *const Port; pub const PORT_E: *const Port = 0x40024000 as *const Port; pub const PORT_F: *const Port = 0x40025000 as *const Port; }
{ self.set_level(false); }
identifier_body
nodelist.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::NodeListBinding; use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object}; use dom::node::{AbstractNode, ScriptView}; use dom::window::Window; use js::jsapi::{JSObject, JSContext}; enum NodeListType { Simple(~[AbstractNode<ScriptView>]), Children(AbstractNode<ScriptView>) } pub struct NodeList { list_type: NodeListType, reflector_: Reflector, window: @mut Window, } impl NodeList { pub fn new_inherited(window: @mut Window, list_type: NodeListType) -> NodeList { NodeList { list_type: list_type, reflector_: Reflector::new(), window: window, } } pub fn new(window: @mut Window, list_type: NodeListType) -> @mut NodeList { reflect_dom_object(@mut NodeList::new_inherited(window, list_type), window, NodeListBinding::Wrap) } pub fn new_simple_list(window: @mut Window, elements: ~[AbstractNode<ScriptView>]) -> @mut NodeList { NodeList::new(window, Simple(elements)) } pub fn new_child_list(window: @mut Window, node: AbstractNode<ScriptView>) -> @mut NodeList
pub fn Length(&self) -> u32 { match self.list_type { Simple(ref elems) => elems.len() as u32, Children(ref node) => node.children().len() as u32 } } pub fn Item(&self, index: u32) -> Option<AbstractNode<ScriptView>> { match self.list_type { _ if index >= self.Length() => None, Simple(ref elems) => Some(elems[index]), Children(ref node) => node.children().nth(index as uint) } } pub fn IndexedGetter(&self, index: u32, found: &mut bool) -> Option<AbstractNode<ScriptView>> { let item = self.Item(index); *found = item.is_some(); item } } impl Reflectable for NodeList { fn reflector<'a>(&'a self) -> &'a Reflector { &self.reflector_ } fn mut_reflector<'a>(&'a mut self) -> &'a mut Reflector { &mut self.reflector_ } fn wrap_object_shared(@mut self, _cx: *JSContext, _scope: *JSObject) -> *JSObject { unreachable!(); } fn GetParentObject(&self, _cx: *JSContext) -> Option<@mut Reflectable> { Some(self.window as @mut Reflectable) } }
{ NodeList::new(window, Children(node)) }
identifier_body
nodelist.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::NodeListBinding; use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object}; use dom::node::{AbstractNode, ScriptView}; use dom::window::Window; use js::jsapi::{JSObject, JSContext}; enum NodeListType { Simple(~[AbstractNode<ScriptView>]), Children(AbstractNode<ScriptView>) } pub struct NodeList { list_type: NodeListType, reflector_: Reflector, window: @mut Window, } impl NodeList { pub fn new_inherited(window: @mut Window, list_type: NodeListType) -> NodeList { NodeList { list_type: list_type, reflector_: Reflector::new(), window: window, } } pub fn new(window: @mut Window, list_type: NodeListType) -> @mut NodeList { reflect_dom_object(@mut NodeList::new_inherited(window, list_type), window, NodeListBinding::Wrap) } pub fn
(window: @mut Window, elements: ~[AbstractNode<ScriptView>]) -> @mut NodeList { NodeList::new(window, Simple(elements)) } pub fn new_child_list(window: @mut Window, node: AbstractNode<ScriptView>) -> @mut NodeList { NodeList::new(window, Children(node)) } pub fn Length(&self) -> u32 { match self.list_type { Simple(ref elems) => elems.len() as u32, Children(ref node) => node.children().len() as u32 } } pub fn Item(&self, index: u32) -> Option<AbstractNode<ScriptView>> { match self.list_type { _ if index >= self.Length() => None, Simple(ref elems) => Some(elems[index]), Children(ref node) => node.children().nth(index as uint) } } pub fn IndexedGetter(&self, index: u32, found: &mut bool) -> Option<AbstractNode<ScriptView>> { let item = self.Item(index); *found = item.is_some(); item } } impl Reflectable for NodeList { fn reflector<'a>(&'a self) -> &'a Reflector { &self.reflector_ } fn mut_reflector<'a>(&'a mut self) -> &'a mut Reflector { &mut self.reflector_ } fn wrap_object_shared(@mut self, _cx: *JSContext, _scope: *JSObject) -> *JSObject { unreachable!(); } fn GetParentObject(&self, _cx: *JSContext) -> Option<@mut Reflectable> { Some(self.window as @mut Reflectable) } }
new_simple_list
identifier_name
nodelist.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::NodeListBinding; use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object}; use dom::node::{AbstractNode, ScriptView}; use dom::window::Window; use js::jsapi::{JSObject, JSContext}; enum NodeListType { Simple(~[AbstractNode<ScriptView>]), Children(AbstractNode<ScriptView>) } pub struct NodeList { list_type: NodeListType, reflector_: Reflector, window: @mut Window, } impl NodeList { pub fn new_inherited(window: @mut Window, list_type: NodeListType) -> NodeList { NodeList { list_type: list_type, reflector_: Reflector::new(), window: window, } } pub fn new(window: @mut Window, list_type: NodeListType) -> @mut NodeList { reflect_dom_object(@mut NodeList::new_inherited(window, list_type), window, NodeListBinding::Wrap) } pub fn new_simple_list(window: @mut Window, elements: ~[AbstractNode<ScriptView>]) -> @mut NodeList { NodeList::new(window, Simple(elements)) } pub fn new_child_list(window: @mut Window, node: AbstractNode<ScriptView>) -> @mut NodeList { NodeList::new(window, Children(node)) } pub fn Length(&self) -> u32 { match self.list_type { Simple(ref elems) => elems.len() as u32, Children(ref node) => node.children().len() as u32 } } pub fn Item(&self, index: u32) -> Option<AbstractNode<ScriptView>> { match self.list_type { _ if index >= self.Length() => None, Simple(ref elems) => Some(elems[index]), Children(ref node) => node.children().nth(index as uint) } } pub fn IndexedGetter(&self, index: u32, found: &mut bool) -> Option<AbstractNode<ScriptView>> { let item = self.Item(index); *found = item.is_some(); item } } impl Reflectable for NodeList { fn reflector<'a>(&'a self) -> &'a Reflector { &self.reflector_ } fn mut_reflector<'a>(&'a mut self) -> &'a mut Reflector { &mut self.reflector_ } fn wrap_object_shared(@mut self, _cx: *JSContext, _scope: *JSObject) -> *JSObject { unreachable!(); }
fn GetParentObject(&self, _cx: *JSContext) -> Option<@mut Reflectable> { Some(self.window as @mut Reflectable) } }
random_line_split
htmllinkelement.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::attr::{Attr, AttrValue}; use dom::attr::AttrHelpers; use dom::bindings::codegen::Bindings::HTMLLinkElementBinding; use dom::bindings::codegen::Bindings::HTMLLinkElementBinding::HTMLLinkElementMethods; use dom::bindings::codegen::InheritTypes::HTMLLinkElementDerived; use dom::bindings::codegen::InheritTypes::{ElementCast, HTMLElementCast}; use dom::bindings::js::{MutNullableJS, JSRef, Temporary, OptionalRootable}; use dom::bindings::utils::{Reflectable, Reflector}; use dom::document::Document; use dom::domtokenlist::DOMTokenList; use dom::element::{AttributeHandlers, Element, HTMLLinkElementTypeId}; use dom::eventtarget::{EventTarget, NodeTargetTypeId}; use dom::htmlelement::HTMLElement; use dom::node::{Node, NodeHelpers, ElementNodeTypeId, window_from_node}; use dom::virtualmethods::VirtualMethods; use layout_interface::{LayoutChan, LoadStylesheetMsg}; use servo_util::str::{DOMString, HTML_SPACE_CHARACTERS}; use std::ascii::AsciiExt; use std::default::Default; use url::UrlParser; use string_cache::Atom; #[dom_struct] pub struct HTMLLinkElement { htmlelement: HTMLElement, rel_list: MutNullableJS<DOMTokenList>, } impl HTMLLinkElementDerived for EventTarget { fn is_htmllinkelement(&self) -> bool { *self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLLinkElementTypeId)) } } impl HTMLLinkElement { fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLLinkElement { HTMLLinkElement { htmlelement: HTMLElement::new_inherited(HTMLLinkElementTypeId, localName, prefix, document), rel_list: Default::default(), } } #[allow(unrooted_must_root)] pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLLinkElement> { let element = HTMLLinkElement::new_inherited(localName, prefix, document); Node::reflect_node(box element, document, HTMLLinkElementBinding::Wrap) } } fn
(element: JSRef<Element>, name: &Atom) -> Option<String> { let elem = element.get_attribute(ns!(""), name).root(); elem.map(|e| e.value().as_slice().to_string()) } fn is_stylesheet(value: &Option<String>) -> bool { match *value { Some(ref value) => { value.as_slice().split(HTML_SPACE_CHARACTERS.as_slice()) .any(|s| s.as_slice().eq_ignore_ascii_case("stylesheet")) }, None => false, } } impl<'a> VirtualMethods for JSRef<'a, HTMLLinkElement> { fn super_type<'a>(&'a self) -> Option<&'a VirtualMethods> { let htmlelement: &JSRef<HTMLElement> = HTMLElementCast::from_borrowed_ref(self); Some(htmlelement as &VirtualMethods) } fn after_set_attr(&self, attr: JSRef<Attr>) { match self.super_type() { Some(ref s) => s.after_set_attr(attr), _ => () } let element: JSRef<Element> = ElementCast::from_ref(*self); let rel = get_attr(element, &atom!("rel")); match (rel, attr.local_name()) { (ref rel, &atom!("href")) => { if is_stylesheet(rel) { self.handle_stylesheet_url(attr.value().as_slice()); } } (_, _) => () } } fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue { match name { &atom!("rel") => AttrValue::from_tokenlist(value), _ => self.super_type().unwrap().parse_plain_attribute(name, value), } } fn bind_to_tree(&self, tree_in_doc: bool) { match self.super_type() { Some(ref s) => s.bind_to_tree(tree_in_doc), _ => () } if tree_in_doc { let element: JSRef<Element> = ElementCast::from_ref(*self); let rel = get_attr(element, &atom!("rel")); let href = get_attr(element, &atom!("href")); match (rel, href) { (ref rel, Some(ref href)) if is_stylesheet(rel) => { self.handle_stylesheet_url(href.as_slice()); } _ => {} } } } } trait PrivateHTMLLinkElementHelpers { fn handle_stylesheet_url(self, href: &str); } impl<'a> PrivateHTMLLinkElementHelpers for JSRef<'a, HTMLLinkElement> { fn handle_stylesheet_url(self, href: &str) { let window = window_from_node(self).root(); match UrlParser::new().base_url(&window.page().get_url()).parse(href) { Ok(url) => { let LayoutChan(ref layout_chan) = window.page().layout_chan; layout_chan.send(LoadStylesheetMsg(url)); } Err(e) => debug!("Parsing url {:s} failed: {}", href, e) } } } impl Reflectable for HTMLLinkElement { fn reflector<'a>(&'a self) -> &'a Reflector { self.htmlelement.reflector() } } impl<'a> HTMLLinkElementMethods for JSRef<'a, HTMLLinkElement> { fn RelList(self) -> Temporary<DOMTokenList> { if self.rel_list.get().is_none() { let element: JSRef<Element> = ElementCast::from_ref(self); let rel_list = DOMTokenList::new(element, &atom!("rel")); self.rel_list.assign(Some(rel_list)); } self.rel_list.get().unwrap() } }
get_attr
identifier_name
htmllinkelement.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::attr::{Attr, AttrValue}; use dom::attr::AttrHelpers; use dom::bindings::codegen::Bindings::HTMLLinkElementBinding; use dom::bindings::codegen::Bindings::HTMLLinkElementBinding::HTMLLinkElementMethods; use dom::bindings::codegen::InheritTypes::HTMLLinkElementDerived; use dom::bindings::codegen::InheritTypes::{ElementCast, HTMLElementCast}; use dom::bindings::js::{MutNullableJS, JSRef, Temporary, OptionalRootable}; use dom::bindings::utils::{Reflectable, Reflector}; use dom::document::Document; use dom::domtokenlist::DOMTokenList; use dom::element::{AttributeHandlers, Element, HTMLLinkElementTypeId}; use dom::eventtarget::{EventTarget, NodeTargetTypeId}; use dom::htmlelement::HTMLElement; use dom::node::{Node, NodeHelpers, ElementNodeTypeId, window_from_node}; use dom::virtualmethods::VirtualMethods; use layout_interface::{LayoutChan, LoadStylesheetMsg}; use servo_util::str::{DOMString, HTML_SPACE_CHARACTERS}; use std::ascii::AsciiExt; use std::default::Default; use url::UrlParser; use string_cache::Atom; #[dom_struct] pub struct HTMLLinkElement { htmlelement: HTMLElement, rel_list: MutNullableJS<DOMTokenList>, } impl HTMLLinkElementDerived for EventTarget { fn is_htmllinkelement(&self) -> bool { *self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLLinkElementTypeId)) } } impl HTMLLinkElement { fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLLinkElement { HTMLLinkElement { htmlelement: HTMLElement::new_inherited(HTMLLinkElementTypeId, localName, prefix, document), rel_list: Default::default(), } } #[allow(unrooted_must_root)] pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLLinkElement>
} fn get_attr(element: JSRef<Element>, name: &Atom) -> Option<String> { let elem = element.get_attribute(ns!(""), name).root(); elem.map(|e| e.value().as_slice().to_string()) } fn is_stylesheet(value: &Option<String>) -> bool { match *value { Some(ref value) => { value.as_slice().split(HTML_SPACE_CHARACTERS.as_slice()) .any(|s| s.as_slice().eq_ignore_ascii_case("stylesheet")) }, None => false, } } impl<'a> VirtualMethods for JSRef<'a, HTMLLinkElement> { fn super_type<'a>(&'a self) -> Option<&'a VirtualMethods> { let htmlelement: &JSRef<HTMLElement> = HTMLElementCast::from_borrowed_ref(self); Some(htmlelement as &VirtualMethods) } fn after_set_attr(&self, attr: JSRef<Attr>) { match self.super_type() { Some(ref s) => s.after_set_attr(attr), _ => () } let element: JSRef<Element> = ElementCast::from_ref(*self); let rel = get_attr(element, &atom!("rel")); match (rel, attr.local_name()) { (ref rel, &atom!("href")) => { if is_stylesheet(rel) { self.handle_stylesheet_url(attr.value().as_slice()); } } (_, _) => () } } fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue { match name { &atom!("rel") => AttrValue::from_tokenlist(value), _ => self.super_type().unwrap().parse_plain_attribute(name, value), } } fn bind_to_tree(&self, tree_in_doc: bool) { match self.super_type() { Some(ref s) => s.bind_to_tree(tree_in_doc), _ => () } if tree_in_doc { let element: JSRef<Element> = ElementCast::from_ref(*self); let rel = get_attr(element, &atom!("rel")); let href = get_attr(element, &atom!("href")); match (rel, href) { (ref rel, Some(ref href)) if is_stylesheet(rel) => { self.handle_stylesheet_url(href.as_slice()); } _ => {} } } } } trait PrivateHTMLLinkElementHelpers { fn handle_stylesheet_url(self, href: &str); } impl<'a> PrivateHTMLLinkElementHelpers for JSRef<'a, HTMLLinkElement> { fn handle_stylesheet_url(self, href: &str) { let window = window_from_node(self).root(); match UrlParser::new().base_url(&window.page().get_url()).parse(href) { Ok(url) => { let LayoutChan(ref layout_chan) = window.page().layout_chan; layout_chan.send(LoadStylesheetMsg(url)); } Err(e) => debug!("Parsing url {:s} failed: {}", href, e) } } } impl Reflectable for HTMLLinkElement { fn reflector<'a>(&'a self) -> &'a Reflector { self.htmlelement.reflector() } } impl<'a> HTMLLinkElementMethods for JSRef<'a, HTMLLinkElement> { fn RelList(self) -> Temporary<DOMTokenList> { if self.rel_list.get().is_none() { let element: JSRef<Element> = ElementCast::from_ref(self); let rel_list = DOMTokenList::new(element, &atom!("rel")); self.rel_list.assign(Some(rel_list)); } self.rel_list.get().unwrap() } }
{ let element = HTMLLinkElement::new_inherited(localName, prefix, document); Node::reflect_node(box element, document, HTMLLinkElementBinding::Wrap) }
identifier_body
htmllinkelement.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::attr::{Attr, AttrValue}; use dom::attr::AttrHelpers; use dom::bindings::codegen::Bindings::HTMLLinkElementBinding; use dom::bindings::codegen::Bindings::HTMLLinkElementBinding::HTMLLinkElementMethods; use dom::bindings::codegen::InheritTypes::HTMLLinkElementDerived; use dom::bindings::codegen::InheritTypes::{ElementCast, HTMLElementCast}; use dom::bindings::js::{MutNullableJS, JSRef, Temporary, OptionalRootable}; use dom::bindings::utils::{Reflectable, Reflector}; use dom::document::Document; use dom::domtokenlist::DOMTokenList; use dom::element::{AttributeHandlers, Element, HTMLLinkElementTypeId}; use dom::eventtarget::{EventTarget, NodeTargetTypeId}; use dom::htmlelement::HTMLElement; use dom::node::{Node, NodeHelpers, ElementNodeTypeId, window_from_node}; use dom::virtualmethods::VirtualMethods; use layout_interface::{LayoutChan, LoadStylesheetMsg}; use servo_util::str::{DOMString, HTML_SPACE_CHARACTERS}; use std::ascii::AsciiExt; use std::default::Default; use url::UrlParser; use string_cache::Atom; #[dom_struct] pub struct HTMLLinkElement { htmlelement: HTMLElement, rel_list: MutNullableJS<DOMTokenList>, } impl HTMLLinkElementDerived for EventTarget { fn is_htmllinkelement(&self) -> bool { *self.type_id() == NodeTargetTypeId(ElementNodeTypeId(HTMLLinkElementTypeId)) } } impl HTMLLinkElement { fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> HTMLLinkElement { HTMLLinkElement { htmlelement: HTMLElement::new_inherited(HTMLLinkElementTypeId, localName, prefix, document), rel_list: Default::default(), }
#[allow(unrooted_must_root)] pub fn new(localName: DOMString, prefix: Option<DOMString>, document: JSRef<Document>) -> Temporary<HTMLLinkElement> { let element = HTMLLinkElement::new_inherited(localName, prefix, document); Node::reflect_node(box element, document, HTMLLinkElementBinding::Wrap) } } fn get_attr(element: JSRef<Element>, name: &Atom) -> Option<String> { let elem = element.get_attribute(ns!(""), name).root(); elem.map(|e| e.value().as_slice().to_string()) } fn is_stylesheet(value: &Option<String>) -> bool { match *value { Some(ref value) => { value.as_slice().split(HTML_SPACE_CHARACTERS.as_slice()) .any(|s| s.as_slice().eq_ignore_ascii_case("stylesheet")) }, None => false, } } impl<'a> VirtualMethods for JSRef<'a, HTMLLinkElement> { fn super_type<'a>(&'a self) -> Option<&'a VirtualMethods> { let htmlelement: &JSRef<HTMLElement> = HTMLElementCast::from_borrowed_ref(self); Some(htmlelement as &VirtualMethods) } fn after_set_attr(&self, attr: JSRef<Attr>) { match self.super_type() { Some(ref s) => s.after_set_attr(attr), _ => () } let element: JSRef<Element> = ElementCast::from_ref(*self); let rel = get_attr(element, &atom!("rel")); match (rel, attr.local_name()) { (ref rel, &atom!("href")) => { if is_stylesheet(rel) { self.handle_stylesheet_url(attr.value().as_slice()); } } (_, _) => () } } fn parse_plain_attribute(&self, name: &Atom, value: DOMString) -> AttrValue { match name { &atom!("rel") => AttrValue::from_tokenlist(value), _ => self.super_type().unwrap().parse_plain_attribute(name, value), } } fn bind_to_tree(&self, tree_in_doc: bool) { match self.super_type() { Some(ref s) => s.bind_to_tree(tree_in_doc), _ => () } if tree_in_doc { let element: JSRef<Element> = ElementCast::from_ref(*self); let rel = get_attr(element, &atom!("rel")); let href = get_attr(element, &atom!("href")); match (rel, href) { (ref rel, Some(ref href)) if is_stylesheet(rel) => { self.handle_stylesheet_url(href.as_slice()); } _ => {} } } } } trait PrivateHTMLLinkElementHelpers { fn handle_stylesheet_url(self, href: &str); } impl<'a> PrivateHTMLLinkElementHelpers for JSRef<'a, HTMLLinkElement> { fn handle_stylesheet_url(self, href: &str) { let window = window_from_node(self).root(); match UrlParser::new().base_url(&window.page().get_url()).parse(href) { Ok(url) => { let LayoutChan(ref layout_chan) = window.page().layout_chan; layout_chan.send(LoadStylesheetMsg(url)); } Err(e) => debug!("Parsing url {:s} failed: {}", href, e) } } } impl Reflectable for HTMLLinkElement { fn reflector<'a>(&'a self) -> &'a Reflector { self.htmlelement.reflector() } } impl<'a> HTMLLinkElementMethods for JSRef<'a, HTMLLinkElement> { fn RelList(self) -> Temporary<DOMTokenList> { if self.rel_list.get().is_none() { let element: JSRef<Element> = ElementCast::from_ref(self); let rel_list = DOMTokenList::new(element, &atom!("rel")); self.rel_list.assign(Some(rel_list)); } self.rel_list.get().unwrap() } }
}
random_line_split
xtc.rs
//! This module serves as the entry point into Xt's main binary. // This file is part of Xt. // This is the Xt text editor; it edits text. // Copyright (C) 2016-2018 The Xt Developers // This program is free software: you can redistribute it and/or // modify it under the terms of the GNU General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // This program is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see // <http://www.gnu.org/licenses/>. extern crate clap; extern crate xt_core as xt; #[macro_use] extern crate slog; extern crate slog_term; use clap::{App, Arg, ArgMatches, SubCommand}; use xt::logging::init_logger; fn retrieve_arguments() -> ArgMatches<'static>
fn main() { let _args = retrieve_arguments(); let log = init_logger(); info!(log, "Xt (core) loading.."); warn!( log, "Xt (core) has no configuration file. Reverting to defaults." ); error!(log, "Xt Core is not ready for deployment. Halt."); unimplemented!(); }
{ App::new("xt-core") .version("0.1.0") .author("Dom Rodriguez <[email protected]>") .about("Core backend for Xt.") .arg( Arg::with_name("verbose") .short("v") .multiple(true) .required(false) .help("Set the level of logging verbosity"), ) .subcommand( SubCommand::with_name("spawn").help("Spawn a instance of Xt"), ) .get_matches() }
identifier_body
xtc.rs
//! This module serves as the entry point into Xt's main binary. // This file is part of Xt. // This is the Xt text editor; it edits text. // Copyright (C) 2016-2018 The Xt Developers // This program is free software: you can redistribute it and/or // modify it under the terms of the GNU General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // This program is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see // <http://www.gnu.org/licenses/>. extern crate clap; extern crate xt_core as xt; #[macro_use] extern crate slog; extern crate slog_term; use clap::{App, Arg, ArgMatches, SubCommand}; use xt::logging::init_logger; fn
() -> ArgMatches<'static> { App::new("xt-core") .version("0.1.0") .author("Dom Rodriguez <[email protected]>") .about("Core backend for Xt.") .arg( Arg::with_name("verbose") .short("v") .multiple(true) .required(false) .help("Set the level of logging verbosity"), ) .subcommand( SubCommand::with_name("spawn").help("Spawn a instance of Xt"), ) .get_matches() } fn main() { let _args = retrieve_arguments(); let log = init_logger(); info!(log, "Xt (core) loading.."); warn!( log, "Xt (core) has no configuration file. Reverting to defaults." ); error!(log, "Xt Core is not ready for deployment. Halt."); unimplemented!(); }
retrieve_arguments
identifier_name
xtc.rs
//! This module serves as the entry point into Xt's main binary.
// This is the Xt text editor; it edits text. // Copyright (C) 2016-2018 The Xt Developers // This program is free software: you can redistribute it and/or // modify it under the terms of the GNU General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // This program is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see // <http://www.gnu.org/licenses/>. extern crate clap; extern crate xt_core as xt; #[macro_use] extern crate slog; extern crate slog_term; use clap::{App, Arg, ArgMatches, SubCommand}; use xt::logging::init_logger; fn retrieve_arguments() -> ArgMatches<'static> { App::new("xt-core") .version("0.1.0") .author("Dom Rodriguez <[email protected]>") .about("Core backend for Xt.") .arg( Arg::with_name("verbose") .short("v") .multiple(true) .required(false) .help("Set the level of logging verbosity"), ) .subcommand( SubCommand::with_name("spawn").help("Spawn a instance of Xt"), ) .get_matches() } fn main() { let _args = retrieve_arguments(); let log = init_logger(); info!(log, "Xt (core) loading.."); warn!( log, "Xt (core) has no configuration file. Reverting to defaults." ); error!(log, "Xt Core is not ready for deployment. Halt."); unimplemented!(); }
// This file is part of Xt.
random_line_split
for_each.rs
use {Future, Task, Poll}; use stream::Stream; /// A stream combinator which executes a unit closure over each item on a /// stream. /// /// This structure is returned by the `Stream::for_each` method. pub struct ForEach<S, F> { stream: S,
f: F, } pub fn new<S, F>(s: S, f: F) -> ForEach<S, F> where S: Stream, F: FnMut(S::Item) -> Result<(), S::Error> + Send +'static { ForEach { stream: s, f: f, } } impl<S, F> Future for ForEach<S, F> where S: Stream, F: FnMut(S::Item) -> Result<(), S::Error> + Send +'static { type Item = (); type Error = S::Error; fn poll(&mut self, task: &mut Task) -> Poll<(), S::Error> { loop { match try_poll!(self.stream.poll(task)) { Ok(Some(e)) => { match (self.f)(e) { Ok(()) => {} Err(e) => return Poll::Err(e), } } Ok(None) => return Poll::Ok(()), Err(e) => return Poll::Err(e), } } } fn schedule(&mut self, task: &mut Task) { self.stream.schedule(task) } }
random_line_split
for_each.rs
use {Future, Task, Poll}; use stream::Stream; /// A stream combinator which executes a unit closure over each item on a /// stream. /// /// This structure is returned by the `Stream::for_each` method. pub struct ForEach<S, F> { stream: S, f: F, } pub fn
<S, F>(s: S, f: F) -> ForEach<S, F> where S: Stream, F: FnMut(S::Item) -> Result<(), S::Error> + Send +'static { ForEach { stream: s, f: f, } } impl<S, F> Future for ForEach<S, F> where S: Stream, F: FnMut(S::Item) -> Result<(), S::Error> + Send +'static { type Item = (); type Error = S::Error; fn poll(&mut self, task: &mut Task) -> Poll<(), S::Error> { loop { match try_poll!(self.stream.poll(task)) { Ok(Some(e)) => { match (self.f)(e) { Ok(()) => {} Err(e) => return Poll::Err(e), } } Ok(None) => return Poll::Ok(()), Err(e) => return Poll::Err(e), } } } fn schedule(&mut self, task: &mut Task) { self.stream.schedule(task) } }
new
identifier_name
for_each.rs
use {Future, Task, Poll}; use stream::Stream; /// A stream combinator which executes a unit closure over each item on a /// stream. /// /// This structure is returned by the `Stream::for_each` method. pub struct ForEach<S, F> { stream: S, f: F, } pub fn new<S, F>(s: S, f: F) -> ForEach<S, F> where S: Stream, F: FnMut(S::Item) -> Result<(), S::Error> + Send +'static { ForEach { stream: s, f: f, } } impl<S, F> Future for ForEach<S, F> where S: Stream, F: FnMut(S::Item) -> Result<(), S::Error> + Send +'static { type Item = (); type Error = S::Error; fn poll(&mut self, task: &mut Task) -> Poll<(), S::Error> { loop { match try_poll!(self.stream.poll(task)) { Ok(Some(e)) => { match (self.f)(e) { Ok(()) =>
Err(e) => return Poll::Err(e), } } Ok(None) => return Poll::Ok(()), Err(e) => return Poll::Err(e), } } } fn schedule(&mut self, task: &mut Task) { self.stream.schedule(task) } }
{}
conditional_block
for_each.rs
use {Future, Task, Poll}; use stream::Stream; /// A stream combinator which executes a unit closure over each item on a /// stream. /// /// This structure is returned by the `Stream::for_each` method. pub struct ForEach<S, F> { stream: S, f: F, } pub fn new<S, F>(s: S, f: F) -> ForEach<S, F> where S: Stream, F: FnMut(S::Item) -> Result<(), S::Error> + Send +'static { ForEach { stream: s, f: f, } } impl<S, F> Future for ForEach<S, F> where S: Stream, F: FnMut(S::Item) -> Result<(), S::Error> + Send +'static { type Item = (); type Error = S::Error; fn poll(&mut self, task: &mut Task) -> Poll<(), S::Error> { loop { match try_poll!(self.stream.poll(task)) { Ok(Some(e)) => { match (self.f)(e) { Ok(()) => {} Err(e) => return Poll::Err(e), } } Ok(None) => return Poll::Ok(()), Err(e) => return Poll::Err(e), } } } fn schedule(&mut self, task: &mut Task)
}
{ self.stream.schedule(task) }
identifier_body
lib.rs
// Need this for rusty_peg #![recursion_limit = "256"] // I hate this lint. #![allow(unused_parens)] // The builtin tests don't cover the CLI and so forth, and it's just // too darn annoying to try and make them do so. #![cfg_attr(test, allow(dead_code))] extern crate ascii_canvas; extern crate atty; extern crate bit_set; extern crate diff; extern crate ena; extern crate itertools; extern crate lalrpop_util; extern crate petgraph; extern crate regex; extern crate regex_syntax; extern crate string_cache;
extern crate unicode_xid; #[cfg(test)] extern crate rand; // hoist the modules that define macros up earlier #[macro_use] mod rust; #[macro_use] mod log; mod api; mod build; mod collections; mod file_text; mod grammar; mod lexer; mod lr1; mod message; mod normalize; mod parser; mod kernel_set; mod session; mod tls; mod tok; mod util; #[cfg(test)] mod generate; #[cfg(test)] mod test_util; pub use api::Configuration; pub use api::process_root; pub use api::process_root_unconditionally; use ascii_canvas::style;
extern crate term;
random_line_split
credentials.rs
use crate::errors::*; use dirs; use failure::Error; use std::env; use std::fs::File; use std::io::Read; use std::path::PathBuf; use toml::Value; use url::Url; /// Representation of a Chef configuration. #[derive(Debug, Clone, Default, PartialEq, Deserialize)] pub struct Config { /// The URL to the Chef Server organization pub chef_server_url: String, node_name: Option<String>, client_name: Option<String>, #[serde(skip)] profile: String, client_key: String, /// The path or contents of the validator key pub validator_key: Option<String>, /// The authentication scheme to use; defaults to 1.3. #[serde(default = "default_auth_string")] pub sign_ver: String, } impl Config { /// Creates a new Config from a `TOML` string. pub fn from_str(toml: &str, profile: &str) -> Result<Self, Error> { let credentials = toml.parse::<Value>().unwrap(); let credentials = credentials[profile].clone(); let mut creds: Config = credentials .try_into() .map_err(ChefError::TomlDeserializeError)?; creds.profile = profile.into(); Ok(creds) } /// Loads a config from `~/.chef/credentials`, using the following heuristic to determine a /// profile name: /// /// - If `profile` is not `None`, use the value provided. /// - Using the `CHEF_PROFILE` environment variable /// - Using the contents of `~/.chef/context` /// - Otherwise use the default profile. pub fn from_credentials(profile: Option<&str>) -> Result<Self, Error> { let credentials = get_chef_path("credentials")?; let profile = select_profile_name(profile); debug!( "Opening credentials file: {:?} with profile: {:?}", credentials, profile ); match File::open(&credentials) { Ok(mut fh) => { let mut config = String::new(); fh.read_to_string(&mut config)?; Self::from_str(&config, &profile) } Err(_) => Err(ChefError::UnparseableConfigError(String::from( "Unable to read credentials file", )) .into()), } } /// Returns the configured name to authenticate with. A profile may use either `client_name` or /// `node_name` interchangeably; `client_name` is preferred, and a `DuplicateClientNameError` will /// be returned if both `client_name` and `node_name` are used. pub fn client_name(&self) -> Result<&str, Error> { let profile = self.profile.clone(); if self.client_name.is_some() && self.node_name.is_some() { Err(ChefError::DuplicateClientNameError(profile).into()) } else if self.node_name.is_some() { self.node_name .as_ref() .ok_or_else(|| { ChefError::UnparseableConfigError(format!( "failed to read node name for profile: {}", profile )) .into() }) .and_then(|n| Ok(n.as_ref())) } else if self.client_name.is_some() { self.client_name .as_ref() .ok_or_else(|| { ChefError::UnparseableConfigError(format!( "failed to read client name for profile: {}", profile )) .into() }) .and_then(|n| Ok(n.as_ref())) } else { Err(ChefError::UnparseableConfigError(format!( "No node_name or client_name found for profile: {}", profile )) .into()) } } /// Returns the contents of the client key used for signing requests. pub fn key(&self) -> Result<Vec<u8>, Error> { if self .client_key .starts_with("-----BEGIN RSA PRIVATE KEY-----") { Ok(self.client_key.as_bytes().into()) } else { let key_path = get_chef_path(&self.client_key)?; debug!("Reading key from {:?}", key_path); let mut key: Vec<u8> = vec![]; File::open(&key_path).and_then(|mut fh| fh.read_to_end(&mut key))?; debug!("Read private key: {:?}", key); Ok(key) } } fn endpoint(&self) -> Result<Url, Error> { Url::parse(self.chef_server_url.as_ref()).map_err(|e| e.into()) } /// Returns the organization path of the configured `chef_server_url`. pub fn organization_path(&self) -> Result<String, Error> { let endpoint = self.endpoint()?; Ok(endpoint.path().into()) } /// Returns the scheme, host and port of the configured `chef_server_url`. pub fn url_base(&self) -> Result<String, Error> { let endpoint = self.endpoint()?; let host = &endpoint.host_str().unwrap(); let port = &endpoint.port_or_known_default().unwrap(); let scheme = &endpoint.scheme(); Ok(format!("{}://{}:{}", scheme, host, port)) } } fn select_profile_name(name: Option<&str>) -> String { if name.is_some() { debug!("Using explicit profile name"); return name.unwrap().into(); }; if let Ok(env) = env::var("CHEF_PROFILE") { debug!("Using environment variable for profile name"); return env; }; let path = match get_chef_path("context") { Ok(p) => p, Err(_) => { debug!("Unable to figure out path to context file, using default profile"); return String::from("default"); } }; if let Ok(mut fh) = File::open(path) { debug!("Reading profile context from file"); let mut contents = String::new(); return match fh.read_to_string(&mut contents) { Ok(_) => contents, Err(_) => { info!("Failed to read profile from file, using default"); String::from("default") } }; } debug!("Using default profile name"); String::from("default") } fn get_chef_path(val: &str) -> Result<String, Error> { let home_dir = match dirs::home_dir() { Some(path) => path, None => { return Err(ChefError::PrivateKeyError(String::from( "Could not identify user's home directory", )) .into()); } }; let mut p = PathBuf::from(val); if!p.is_absolute() { p = home_dir; p.push(".chef"); p.push(val) } match p.to_str() { Some(path) => Ok(path.to_owned()), None => Err(ChefError::PrivateKeyError(String::from( "Could not construct a path to the user's.chef directory", )) .into()), } } fn default_auth_string() -> String { "1.3".into() } #[cfg(test)] mod tests { use super::*; use std; const CREDENTIALS: &str = r#" [default] node_name = 'barney' client_key = 'barney_rubble.pem' chef_server_url = 'https://api.chef.io/organizations/bedrock' "#; #[test] fn test_from_str() { let cfg = Config::from_str(CREDENTIALS, "default").unwrap(); assert_eq!(cfg.node_name.unwrap(), "barney") } #[test] fn
() { let cfg = Config::from_str(CREDENTIALS, "default").unwrap(); assert_eq!(cfg.organization_path().unwrap(), "/organizations/bedrock") } #[test] fn test_default_profile_name() { std::env::remove_var("CHEF_PROFILE"); assert_eq!(select_profile_name(None), String::from("default")) } #[test] fn test_environment_profile_name() { std::env::set_var("CHEF_PROFILE", "environment"); assert_eq!(select_profile_name(None), String::from("environment")); std::env::remove_var("CHEF_PROFILE"); } #[test] fn test_explicit_profile_name() { assert_eq!( select_profile_name(Some("explicit")), String::from("explicit") ) } #[test] fn test_get_chef_path() { let home = dirs::home_dir().unwrap(); std::env::set_var("HOME", "/home/barney"); let path = get_chef_path("credentials").unwrap(); assert_eq!(path, "/home/barney/.chef/credentials"); std::env::set_var("HOME", home); } #[test] fn test_absolute_get_chef_path() { let home = dirs::home_dir().unwrap(); std::env::set_var("HOME", "/home/barney"); let path = get_chef_path("/home/fred/.chef/fred.pem").unwrap(); assert_eq!(path, "/home/fred/.chef/fred.pem"); std::env::set_var("HOME", home); } }
test_organization_path
identifier_name
credentials.rs
use crate::errors::*; use dirs; use failure::Error; use std::env; use std::fs::File; use std::io::Read; use std::path::PathBuf; use toml::Value; use url::Url; /// Representation of a Chef configuration. #[derive(Debug, Clone, Default, PartialEq, Deserialize)] pub struct Config { /// The URL to the Chef Server organization pub chef_server_url: String, node_name: Option<String>, client_name: Option<String>, #[serde(skip)] profile: String, client_key: String, /// The path or contents of the validator key pub validator_key: Option<String>, /// The authentication scheme to use; defaults to 1.3. #[serde(default = "default_auth_string")] pub sign_ver: String, } impl Config { /// Creates a new Config from a `TOML` string. pub fn from_str(toml: &str, profile: &str) -> Result<Self, Error> { let credentials = toml.parse::<Value>().unwrap(); let credentials = credentials[profile].clone(); let mut creds: Config = credentials .try_into() .map_err(ChefError::TomlDeserializeError)?; creds.profile = profile.into(); Ok(creds) } /// Loads a config from `~/.chef/credentials`, using the following heuristic to determine a /// profile name: /// /// - If `profile` is not `None`, use the value provided. /// - Using the `CHEF_PROFILE` environment variable /// - Using the contents of `~/.chef/context` /// - Otherwise use the default profile. pub fn from_credentials(profile: Option<&str>) -> Result<Self, Error> { let credentials = get_chef_path("credentials")?; let profile = select_profile_name(profile); debug!( "Opening credentials file: {:?} with profile: {:?}", credentials, profile ); match File::open(&credentials) { Ok(mut fh) => { let mut config = String::new(); fh.read_to_string(&mut config)?; Self::from_str(&config, &profile) } Err(_) => Err(ChefError::UnparseableConfigError(String::from( "Unable to read credentials file", )) .into()), } } /// Returns the configured name to authenticate with. A profile may use either `client_name` or
Err(ChefError::DuplicateClientNameError(profile).into()) } else if self.node_name.is_some() { self.node_name .as_ref() .ok_or_else(|| { ChefError::UnparseableConfigError(format!( "failed to read node name for profile: {}", profile )) .into() }) .and_then(|n| Ok(n.as_ref())) } else if self.client_name.is_some() { self.client_name .as_ref() .ok_or_else(|| { ChefError::UnparseableConfigError(format!( "failed to read client name for profile: {}", profile )) .into() }) .and_then(|n| Ok(n.as_ref())) } else { Err(ChefError::UnparseableConfigError(format!( "No node_name or client_name found for profile: {}", profile )) .into()) } } /// Returns the contents of the client key used for signing requests. pub fn key(&self) -> Result<Vec<u8>, Error> { if self .client_key .starts_with("-----BEGIN RSA PRIVATE KEY-----") { Ok(self.client_key.as_bytes().into()) } else { let key_path = get_chef_path(&self.client_key)?; debug!("Reading key from {:?}", key_path); let mut key: Vec<u8> = vec![]; File::open(&key_path).and_then(|mut fh| fh.read_to_end(&mut key))?; debug!("Read private key: {:?}", key); Ok(key) } } fn endpoint(&self) -> Result<Url, Error> { Url::parse(self.chef_server_url.as_ref()).map_err(|e| e.into()) } /// Returns the organization path of the configured `chef_server_url`. pub fn organization_path(&self) -> Result<String, Error> { let endpoint = self.endpoint()?; Ok(endpoint.path().into()) } /// Returns the scheme, host and port of the configured `chef_server_url`. pub fn url_base(&self) -> Result<String, Error> { let endpoint = self.endpoint()?; let host = &endpoint.host_str().unwrap(); let port = &endpoint.port_or_known_default().unwrap(); let scheme = &endpoint.scheme(); Ok(format!("{}://{}:{}", scheme, host, port)) } } fn select_profile_name(name: Option<&str>) -> String { if name.is_some() { debug!("Using explicit profile name"); return name.unwrap().into(); }; if let Ok(env) = env::var("CHEF_PROFILE") { debug!("Using environment variable for profile name"); return env; }; let path = match get_chef_path("context") { Ok(p) => p, Err(_) => { debug!("Unable to figure out path to context file, using default profile"); return String::from("default"); } }; if let Ok(mut fh) = File::open(path) { debug!("Reading profile context from file"); let mut contents = String::new(); return match fh.read_to_string(&mut contents) { Ok(_) => contents, Err(_) => { info!("Failed to read profile from file, using default"); String::from("default") } }; } debug!("Using default profile name"); String::from("default") } fn get_chef_path(val: &str) -> Result<String, Error> { let home_dir = match dirs::home_dir() { Some(path) => path, None => { return Err(ChefError::PrivateKeyError(String::from( "Could not identify user's home directory", )) .into()); } }; let mut p = PathBuf::from(val); if!p.is_absolute() { p = home_dir; p.push(".chef"); p.push(val) } match p.to_str() { Some(path) => Ok(path.to_owned()), None => Err(ChefError::PrivateKeyError(String::from( "Could not construct a path to the user's.chef directory", )) .into()), } } fn default_auth_string() -> String { "1.3".into() } #[cfg(test)] mod tests { use super::*; use std; const CREDENTIALS: &str = r#" [default] node_name = 'barney' client_key = 'barney_rubble.pem' chef_server_url = 'https://api.chef.io/organizations/bedrock' "#; #[test] fn test_from_str() { let cfg = Config::from_str(CREDENTIALS, "default").unwrap(); assert_eq!(cfg.node_name.unwrap(), "barney") } #[test] fn test_organization_path() { let cfg = Config::from_str(CREDENTIALS, "default").unwrap(); assert_eq!(cfg.organization_path().unwrap(), "/organizations/bedrock") } #[test] fn test_default_profile_name() { std::env::remove_var("CHEF_PROFILE"); assert_eq!(select_profile_name(None), String::from("default")) } #[test] fn test_environment_profile_name() { std::env::set_var("CHEF_PROFILE", "environment"); assert_eq!(select_profile_name(None), String::from("environment")); std::env::remove_var("CHEF_PROFILE"); } #[test] fn test_explicit_profile_name() { assert_eq!( select_profile_name(Some("explicit")), String::from("explicit") ) } #[test] fn test_get_chef_path() { let home = dirs::home_dir().unwrap(); std::env::set_var("HOME", "/home/barney"); let path = get_chef_path("credentials").unwrap(); assert_eq!(path, "/home/barney/.chef/credentials"); std::env::set_var("HOME", home); } #[test] fn test_absolute_get_chef_path() { let home = dirs::home_dir().unwrap(); std::env::set_var("HOME", "/home/barney"); let path = get_chef_path("/home/fred/.chef/fred.pem").unwrap(); assert_eq!(path, "/home/fred/.chef/fred.pem"); std::env::set_var("HOME", home); } }
/// `node_name` interchangeably; `client_name` is preferred, and a `DuplicateClientNameError` will /// be returned if both `client_name` and `node_name` are used. pub fn client_name(&self) -> Result<&str, Error> { let profile = self.profile.clone(); if self.client_name.is_some() && self.node_name.is_some() {
random_line_split
credentials.rs
use crate::errors::*; use dirs; use failure::Error; use std::env; use std::fs::File; use std::io::Read; use std::path::PathBuf; use toml::Value; use url::Url; /// Representation of a Chef configuration. #[derive(Debug, Clone, Default, PartialEq, Deserialize)] pub struct Config { /// The URL to the Chef Server organization pub chef_server_url: String, node_name: Option<String>, client_name: Option<String>, #[serde(skip)] profile: String, client_key: String, /// The path or contents of the validator key pub validator_key: Option<String>, /// The authentication scheme to use; defaults to 1.3. #[serde(default = "default_auth_string")] pub sign_ver: String, } impl Config { /// Creates a new Config from a `TOML` string. pub fn from_str(toml: &str, profile: &str) -> Result<Self, Error> { let credentials = toml.parse::<Value>().unwrap(); let credentials = credentials[profile].clone(); let mut creds: Config = credentials .try_into() .map_err(ChefError::TomlDeserializeError)?; creds.profile = profile.into(); Ok(creds) } /// Loads a config from `~/.chef/credentials`, using the following heuristic to determine a /// profile name: /// /// - If `profile` is not `None`, use the value provided. /// - Using the `CHEF_PROFILE` environment variable /// - Using the contents of `~/.chef/context` /// - Otherwise use the default profile. pub fn from_credentials(profile: Option<&str>) -> Result<Self, Error> { let credentials = get_chef_path("credentials")?; let profile = select_profile_name(profile); debug!( "Opening credentials file: {:?} with profile: {:?}", credentials, profile ); match File::open(&credentials) { Ok(mut fh) => { let mut config = String::new(); fh.read_to_string(&mut config)?; Self::from_str(&config, &profile) } Err(_) => Err(ChefError::UnparseableConfigError(String::from( "Unable to read credentials file", )) .into()), } } /// Returns the configured name to authenticate with. A profile may use either `client_name` or /// `node_name` interchangeably; `client_name` is preferred, and a `DuplicateClientNameError` will /// be returned if both `client_name` and `node_name` are used. pub fn client_name(&self) -> Result<&str, Error>
"failed to read client name for profile: {}", profile )) .into() }) .and_then(|n| Ok(n.as_ref())) } else { Err(ChefError::UnparseableConfigError(format!( "No node_name or client_name found for profile: {}", profile )) .into()) } } /// Returns the contents of the client key used for signing requests. pub fn key(&self) -> Result<Vec<u8>, Error> { if self .client_key .starts_with("-----BEGIN RSA PRIVATE KEY-----") { Ok(self.client_key.as_bytes().into()) } else { let key_path = get_chef_path(&self.client_key)?; debug!("Reading key from {:?}", key_path); let mut key: Vec<u8> = vec![]; File::open(&key_path).and_then(|mut fh| fh.read_to_end(&mut key))?; debug!("Read private key: {:?}", key); Ok(key) } } fn endpoint(&self) -> Result<Url, Error> { Url::parse(self.chef_server_url.as_ref()).map_err(|e| e.into()) } /// Returns the organization path of the configured `chef_server_url`. pub fn organization_path(&self) -> Result<String, Error> { let endpoint = self.endpoint()?; Ok(endpoint.path().into()) } /// Returns the scheme, host and port of the configured `chef_server_url`. pub fn url_base(&self) -> Result<String, Error> { let endpoint = self.endpoint()?; let host = &endpoint.host_str().unwrap(); let port = &endpoint.port_or_known_default().unwrap(); let scheme = &endpoint.scheme(); Ok(format!("{}://{}:{}", scheme, host, port)) } } fn select_profile_name(name: Option<&str>) -> String { if name.is_some() { debug!("Using explicit profile name"); return name.unwrap().into(); }; if let Ok(env) = env::var("CHEF_PROFILE") { debug!("Using environment variable for profile name"); return env; }; let path = match get_chef_path("context") { Ok(p) => p, Err(_) => { debug!("Unable to figure out path to context file, using default profile"); return String::from("default"); } }; if let Ok(mut fh) = File::open(path) { debug!("Reading profile context from file"); let mut contents = String::new(); return match fh.read_to_string(&mut contents) { Ok(_) => contents, Err(_) => { info!("Failed to read profile from file, using default"); String::from("default") } }; } debug!("Using default profile name"); String::from("default") } fn get_chef_path(val: &str) -> Result<String, Error> { let home_dir = match dirs::home_dir() { Some(path) => path, None => { return Err(ChefError::PrivateKeyError(String::from( "Could not identify user's home directory", )) .into()); } }; let mut p = PathBuf::from(val); if!p.is_absolute() { p = home_dir; p.push(".chef"); p.push(val) } match p.to_str() { Some(path) => Ok(path.to_owned()), None => Err(ChefError::PrivateKeyError(String::from( "Could not construct a path to the user's.chef directory", )) .into()), } } fn default_auth_string() -> String { "1.3".into() } #[cfg(test)] mod tests { use super::*; use std; const CREDENTIALS: &str = r#" [default] node_name = 'barney' client_key = 'barney_rubble.pem' chef_server_url = 'https://api.chef.io/organizations/bedrock' "#; #[test] fn test_from_str() { let cfg = Config::from_str(CREDENTIALS, "default").unwrap(); assert_eq!(cfg.node_name.unwrap(), "barney") } #[test] fn test_organization_path() { let cfg = Config::from_str(CREDENTIALS, "default").unwrap(); assert_eq!(cfg.organization_path().unwrap(), "/organizations/bedrock") } #[test] fn test_default_profile_name() { std::env::remove_var("CHEF_PROFILE"); assert_eq!(select_profile_name(None), String::from("default")) } #[test] fn test_environment_profile_name() { std::env::set_var("CHEF_PROFILE", "environment"); assert_eq!(select_profile_name(None), String::from("environment")); std::env::remove_var("CHEF_PROFILE"); } #[test] fn test_explicit_profile_name() { assert_eq!( select_profile_name(Some("explicit")), String::from("explicit") ) } #[test] fn test_get_chef_path() { let home = dirs::home_dir().unwrap(); std::env::set_var("HOME", "/home/barney"); let path = get_chef_path("credentials").unwrap(); assert_eq!(path, "/home/barney/.chef/credentials"); std::env::set_var("HOME", home); } #[test] fn test_absolute_get_chef_path() { let home = dirs::home_dir().unwrap(); std::env::set_var("HOME", "/home/barney"); let path = get_chef_path("/home/fred/.chef/fred.pem").unwrap(); assert_eq!(path, "/home/fred/.chef/fred.pem"); std::env::set_var("HOME", home); } }
{ let profile = self.profile.clone(); if self.client_name.is_some() && self.node_name.is_some() { Err(ChefError::DuplicateClientNameError(profile).into()) } else if self.node_name.is_some() { self.node_name .as_ref() .ok_or_else(|| { ChefError::UnparseableConfigError(format!( "failed to read node name for profile: {}", profile )) .into() }) .and_then(|n| Ok(n.as_ref())) } else if self.client_name.is_some() { self.client_name .as_ref() .ok_or_else(|| { ChefError::UnparseableConfigError(format!(
identifier_body
main.rs
// 0.096s on A4-5000 (1.5 GHz) extern crate permutohedron; use std::collections::HashMap; fn main() { let input = include_str!("input.txt"); // Obtain locations and route possibilities let mut locations: Vec<&str> = Vec::new(); let mut routes: HashMap<[&str;2], usize> = HashMap::new(); for line in input.lines() { let words = line.split_whitespace().collect::<Vec<&str>>(); let (first, second) = ([words[0], words[2]], [words[2], words[0]]); let distance = words[4].parse::<usize>().unwrap(); routes.insert(first, distance); routes.insert(second, distance); if!locations.contains(&words[0]) { locations.push(words[0]); } if!locations.contains(&words[2])
} // Calculate the shortest and longest distances/routes let mut shortest_distance: usize = 65535; let mut shortest_route: Vec<&str> = Vec::new(); let mut longest_distance: usize = 0; let mut longest_route: Vec<&str> = Vec::new(); for permutation in permutohedron::Heap::new(&mut locations) { let distance = calculate_distance(&permutation, &routes); if distance < shortest_distance { shortest_route = permutation.clone(); shortest_distance = distance; } if distance > longest_distance { longest_route = permutation.clone(); longest_distance = distance; } } // Print the results print!("The shortest route is {} miles: ", shortest_distance); print_route(&shortest_route); print!("\nThe longest route is {} miles: ", longest_distance); print_route(&longest_route); } /// Calculates the distance that must be traveled for a given input route. fn calculate_distance(input: &Vec<&str>, map: &HashMap<[&str;2],usize>) -> usize { let mut distance: usize = 0; let mut iterator = input.iter(); let mut previous = iterator.next().unwrap(); for next in iterator { distance += *map.get(&[previous.clone(),next.clone()]).unwrap(); previous = next; } return distance; } /// Prints the given route to stdout. fn print_route(locations: &Vec<&str>) { let mut iterator = locations.iter(); print!("{}", iterator.next().unwrap()); for x in iterator { print!(" -> {}", x); } print!("\n"); }
{ locations.push(words[2]); }
conditional_block
main.rs
// 0.096s on A4-5000 (1.5 GHz) extern crate permutohedron; use std::collections::HashMap; fn main() { let input = include_str!("input.txt"); // Obtain locations and route possibilities let mut locations: Vec<&str> = Vec::new(); let mut routes: HashMap<[&str;2], usize> = HashMap::new(); for line in input.lines() { let words = line.split_whitespace().collect::<Vec<&str>>(); let (first, second) = ([words[0], words[2]], [words[2], words[0]]); let distance = words[4].parse::<usize>().unwrap(); routes.insert(first, distance); routes.insert(second, distance); if!locations.contains(&words[0]) { locations.push(words[0]); } if!locations.contains(&words[2]) { locations.push(words[2]); } } // Calculate the shortest and longest distances/routes let mut shortest_distance: usize = 65535; let mut shortest_route: Vec<&str> = Vec::new(); let mut longest_distance: usize = 0; let mut longest_route: Vec<&str> = Vec::new(); for permutation in permutohedron::Heap::new(&mut locations) { let distance = calculate_distance(&permutation, &routes); if distance < shortest_distance { shortest_route = permutation.clone(); shortest_distance = distance; } if distance > longest_distance { longest_route = permutation.clone(); longest_distance = distance; } } // Print the results print!("The shortest route is {} miles: ", shortest_distance); print_route(&shortest_route); print!("\nThe longest route is {} miles: ", longest_distance); print_route(&longest_route); } /// Calculates the distance that must be traveled for a given input route. fn
(input: &Vec<&str>, map: &HashMap<[&str;2],usize>) -> usize { let mut distance: usize = 0; let mut iterator = input.iter(); let mut previous = iterator.next().unwrap(); for next in iterator { distance += *map.get(&[previous.clone(),next.clone()]).unwrap(); previous = next; } return distance; } /// Prints the given route to stdout. fn print_route(locations: &Vec<&str>) { let mut iterator = locations.iter(); print!("{}", iterator.next().unwrap()); for x in iterator { print!(" -> {}", x); } print!("\n"); }
calculate_distance
identifier_name
main.rs
// 0.096s on A4-5000 (1.5 GHz) extern crate permutohedron; use std::collections::HashMap; fn main()
let mut longest_route: Vec<&str> = Vec::new(); for permutation in permutohedron::Heap::new(&mut locations) { let distance = calculate_distance(&permutation, &routes); if distance < shortest_distance { shortest_route = permutation.clone(); shortest_distance = distance; } if distance > longest_distance { longest_route = permutation.clone(); longest_distance = distance; } } // Print the results print!("The shortest route is {} miles: ", shortest_distance); print_route(&shortest_route); print!("\nThe longest route is {} miles: ", longest_distance); print_route(&longest_route); } /// Calculates the distance that must be traveled for a given input route. fn calculate_distance(input: &Vec<&str>, map: &HashMap<[&str;2],usize>) -> usize { let mut distance: usize = 0; let mut iterator = input.iter(); let mut previous = iterator.next().unwrap(); for next in iterator { distance += *map.get(&[previous.clone(),next.clone()]).unwrap(); previous = next; } return distance; } /// Prints the given route to stdout. fn print_route(locations: &Vec<&str>) { let mut iterator = locations.iter(); print!("{}", iterator.next().unwrap()); for x in iterator { print!(" -> {}", x); } print!("\n"); }
{ let input = include_str!("input.txt"); // Obtain locations and route possibilities let mut locations: Vec<&str> = Vec::new(); let mut routes: HashMap<[&str;2], usize> = HashMap::new(); for line in input.lines() { let words = line.split_whitespace().collect::<Vec<&str>>(); let (first, second) = ([words[0], words[2]], [words[2], words[0]]); let distance = words[4].parse::<usize>().unwrap(); routes.insert(first, distance); routes.insert(second, distance); if !locations.contains(&words[0]) { locations.push(words[0]); } if !locations.contains(&words[2]) { locations.push(words[2]); } } // Calculate the shortest and longest distances/routes let mut shortest_distance: usize = 65535; let mut shortest_route: Vec<&str> = Vec::new(); let mut longest_distance: usize = 0;
identifier_body
main.rs
// 0.096s on A4-5000 (1.5 GHz) extern crate permutohedron; use std::collections::HashMap; fn main() { let input = include_str!("input.txt"); // Obtain locations and route possibilities let mut locations: Vec<&str> = Vec::new(); let mut routes: HashMap<[&str;2], usize> = HashMap::new(); for line in input.lines() { let words = line.split_whitespace().collect::<Vec<&str>>(); let (first, second) = ([words[0], words[2]], [words[2], words[0]]); let distance = words[4].parse::<usize>().unwrap(); routes.insert(first, distance); routes.insert(second, distance); if!locations.contains(&words[0]) { locations.push(words[0]); } if!locations.contains(&words[2]) { locations.push(words[2]); } } // Calculate the shortest and longest distances/routes let mut shortest_distance: usize = 65535; let mut shortest_route: Vec<&str> = Vec::new(); let mut longest_distance: usize = 0; let mut longest_route: Vec<&str> = Vec::new(); for permutation in permutohedron::Heap::new(&mut locations) { let distance = calculate_distance(&permutation, &routes); if distance < shortest_distance { shortest_route = permutation.clone(); shortest_distance = distance; } if distance > longest_distance { longest_route = permutation.clone(); longest_distance = distance; } } // Print the results print!("The shortest route is {} miles: ", shortest_distance); print_route(&shortest_route); print!("\nThe longest route is {} miles: ", longest_distance); print_route(&longest_route); } /// Calculates the distance that must be traveled for a given input route. fn calculate_distance(input: &Vec<&str>, map: &HashMap<[&str;2],usize>) -> usize { let mut distance: usize = 0; let mut iterator = input.iter(); let mut previous = iterator.next().unwrap(); for next in iterator { distance += *map.get(&[previous.clone(),next.clone()]).unwrap(); previous = next; } return distance; } /// Prints the given route to stdout. fn print_route(locations: &Vec<&str>) { let mut iterator = locations.iter(); print!("{}", iterator.next().unwrap());
}
for x in iterator { print!(" -> {}", x); } print!("\n");
random_line_split
argument-passing.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // xfail-fast struct X { x: int } fn f1(a: &mut X, b: &mut int, c: int) -> int { let r = a.x + *b + c; a.x = 0; *b = 10; return r; } fn f2(a: int, f: &fn(int)) -> int { f(1); return a; } pub fn
() { let mut a = X {x: 1}; let mut b = 2; let mut c = 3; assert_eq!(f1(&mut a, &mut b, c), 6); assert_eq!(a.x, 0); assert_eq!(b, 10); assert_eq!(f2(a.x, |x| a.x = 50), 0); assert_eq!(a.x, 50); }
main
identifier_name
argument-passing.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // xfail-fast struct X { x: int } fn f1(a: &mut X, b: &mut int, c: int) -> int { let r = a.x + *b + c; a.x = 0; *b = 10; return r; } fn f2(a: int, f: &fn(int)) -> int { f(1); return a; } pub fn main() { let mut a = X {x: 1}; let mut b = 2; let mut c = 3; assert_eq!(f1(&mut a, &mut b, c), 6); assert_eq!(a.x, 0); assert_eq!(b, 10); assert_eq!(f2(a.x, |x| a.x = 50), 0); assert_eq!(a.x, 50); }
// http://rust-lang.org/COPYRIGHT. //
random_line_split
argument-passing.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // xfail-fast struct X { x: int } fn f1(a: &mut X, b: &mut int, c: int) -> int { let r = a.x + *b + c; a.x = 0; *b = 10; return r; } fn f2(a: int, f: &fn(int)) -> int
pub fn main() { let mut a = X {x: 1}; let mut b = 2; let mut c = 3; assert_eq!(f1(&mut a, &mut b, c), 6); assert_eq!(a.x, 0); assert_eq!(b, 10); assert_eq!(f2(a.x, |x| a.x = 50), 0); assert_eq!(a.x, 50); }
{ f(1); return a; }
identifier_body
test-commands-transfer.rs
mod helpers; use crate::helpers::prelude::*; #[test] fn test_command_transfer()
assert_eq!( 0, main_with_args( &["rooster", "transfer", "youtube", "[email protected]"], &mut CursorInputOutput::new("", "xxxx\n"), &rooster_file ) ); let mut io = CursorInputOutput::new("", "xxxx\n"); assert_eq!( 0, main_with_args(&["rooster", "list"], &mut io, &rooster_file) ); let output_as_vecu8 = io.stdout_cursor.into_inner(); let output_as_string = String::from_utf8_lossy(output_as_vecu8.as_slice()); assert!(output_as_string.contains("Youtube")); assert!(!output_as_string.contains("[email protected]")); assert!(output_as_string.contains("[email protected]")); }
{ let rooster_file = tempfile(); assert_eq!( 0, main_with_args( &["rooster", "init", "--force-for-tests"], &mut CursorInputOutput::new("", "\nxxxx\n"), &rooster_file ) ); assert_eq!( 0, main_with_args( &["rooster", "generate", "-s", "Youtube", "[email protected]"], &mut CursorInputOutput::new("", "xxxx\n"), &rooster_file ) );
identifier_body
test-commands-transfer.rs
mod helpers; use crate::helpers::prelude::*; #[test] fn
() { let rooster_file = tempfile(); assert_eq!( 0, main_with_args( &["rooster", "init", "--force-for-tests"], &mut CursorInputOutput::new("", "\nxxxx\n"), &rooster_file ) ); assert_eq!( 0, main_with_args( &["rooster", "generate", "-s", "Youtube", "[email protected]"], &mut CursorInputOutput::new("", "xxxx\n"), &rooster_file ) ); assert_eq!( 0, main_with_args( &["rooster", "transfer", "youtube", "[email protected]"], &mut CursorInputOutput::new("", "xxxx\n"), &rooster_file ) ); let mut io = CursorInputOutput::new("", "xxxx\n"); assert_eq!( 0, main_with_args(&["rooster", "list"], &mut io, &rooster_file) ); let output_as_vecu8 = io.stdout_cursor.into_inner(); let output_as_string = String::from_utf8_lossy(output_as_vecu8.as_slice()); assert!(output_as_string.contains("Youtube")); assert!(!output_as_string.contains("[email protected]")); assert!(output_as_string.contains("[email protected]")); }
test_command_transfer
identifier_name
test-commands-transfer.rs
mod helpers; use crate::helpers::prelude::*; #[test] fn test_command_transfer() { let rooster_file = tempfile(); assert_eq!( 0, main_with_args( &["rooster", "init", "--force-for-tests"], &mut CursorInputOutput::new("", "\nxxxx\n"), &rooster_file ) ); assert_eq!( 0, main_with_args( &["rooster", "generate", "-s", "Youtube", "[email protected]"], &mut CursorInputOutput::new("", "xxxx\n"), &rooster_file ) );
&["rooster", "transfer", "youtube", "[email protected]"], &mut CursorInputOutput::new("", "xxxx\n"), &rooster_file ) ); let mut io = CursorInputOutput::new("", "xxxx\n"); assert_eq!( 0, main_with_args(&["rooster", "list"], &mut io, &rooster_file) ); let output_as_vecu8 = io.stdout_cursor.into_inner(); let output_as_string = String::from_utf8_lossy(output_as_vecu8.as_slice()); assert!(output_as_string.contains("Youtube")); assert!(!output_as_string.contains("[email protected]")); assert!(output_as_string.contains("[email protected]")); }
assert_eq!( 0, main_with_args(
random_line_split
lib.rs
extern crate sudoku; use sudoku::Sudoku; fn read_sudokus(sudokus_str: &str) -> Vec<Sudoku> { let sudokus_str = sudokus_str.replace("\r\n", "\n"); let mut sudokus = vec![]; for i in 0.. { // 9 lines with 9 cells each + 1 linefeed character per line // + 1 LF char between each sudoku // 9*(9+1) + 1 let rg = 0+i*91..90+i*91; if rg.end > sudokus_str.len() { break } sudokus.push( Sudoku::from_str( &sudokus_str[rg] ).expect("Benchmark sudokus file contains sudoku in incorrect format") ) } sudokus } #[test] fn solve_1() { let sudoku_str = "___2___63 3____54_1 __1__398_ _______9_ ___538___ _3_______ _263__5__ 5_37____8 47___1___"; let mut sudoku = Sudoku::from_str(sudoku_str).unwrap(); sudoku.solve(); println!("{}", sudoku); } #[test] fn solve_2() { let sudoku_str = "\ 7__|4__|__2 21_|3_5|46_ __9|_28|__1 ___|542|3__ ___|___|___ __5|817|___ 5__|73_|9__ _63|2_4|_17 8__|__9|__3"; let mut sudoku = Sudoku::from_str(sudoku_str).unwrap(); sudoku.solve(); println!("{}", sudoku); } #[test] #[should_panic] fn wrong_format_1() { let sudoku_str = "___2___63 3____54_1 __1__398_ _______9_ ___538___ _3_______ _263__5__ 5_37____8"; Sudoku::from_str(sudoku_str).unwrap(); } #[test] fn correct_solution_easy_sudokus() { let sudokus = read_sudokus( include_str!("../sudokus/easy_sudokus.txt") ); let solved_sudokus = read_sudokus( include_str!("../sudokus/solved_easy_sudokus.txt") ); for (i, (sudoku, solved_sudoku)) in sudokus.into_iter().zip(solved_sudokus).enumerate() { if let Some(solution) = sudoku.clone().solve_unique() { assert_eq!( solved_sudoku, solution); } else { panic!("Found multiple solutions to sudoku with unique solution or none at all for {}th sudoku:\n{}", i, sudoku); } } } #[test] fn correct_solution_medium_sudokus() { let sudokus = read_sudokus( include_str!("../sudokus/medium_sudokus.txt") ); let solved_sudokus = read_sudokus( include_str!("../sudokus/solved_medium_sudokus.txt") ); for (i, (sudoku, solved_sudoku)) in sudokus.into_iter().zip(solved_sudokus).enumerate() { if let Some(solution) = sudoku.clone().solve_unique() { assert_eq!( solved_sudoku, solution); } else
} } #[test] fn correct_solution_hard_sudokus() { let sudokus = read_sudokus( include_str!("../sudokus/hard_sudokus.txt") ); let solved_sudokus = read_sudokus( include_str!("../sudokus/solved_hard_sudokus.txt") ); for (i, (sudoku, solved_sudoku)) in sudokus.into_iter().zip(solved_sudokus).enumerate() { if let Some(solution) = sudoku.clone().solve_unique() { assert_eq!( solved_sudoku, solution); } else { panic!("Found multiple solutions to sudoku with unique solution or none at all for {}th sudoku:\n{}", i, sudoku); } } }
{ panic!("Found multiple solutions to sudoku with unique solution or none at all for {}th sudoku:\n{}", i, sudoku); }
conditional_block
lib.rs
extern crate sudoku; use sudoku::Sudoku; fn
(sudokus_str: &str) -> Vec<Sudoku> { let sudokus_str = sudokus_str.replace("\r\n", "\n"); let mut sudokus = vec![]; for i in 0.. { // 9 lines with 9 cells each + 1 linefeed character per line // + 1 LF char between each sudoku // 9*(9+1) + 1 let rg = 0+i*91..90+i*91; if rg.end > sudokus_str.len() { break } sudokus.push( Sudoku::from_str( &sudokus_str[rg] ).expect("Benchmark sudokus file contains sudoku in incorrect format") ) } sudokus } #[test] fn solve_1() { let sudoku_str = "___2___63 3____54_1 __1__398_ _______9_ ___538___ _3_______ _263__5__ 5_37____8 47___1___"; let mut sudoku = Sudoku::from_str(sudoku_str).unwrap(); sudoku.solve(); println!("{}", sudoku); } #[test] fn solve_2() { let sudoku_str = "\ 7__|4__|__2 21_|3_5|46_ __9|_28|__1 ___|542|3__ ___|___|___ __5|817|___ 5__|73_|9__ _63|2_4|_17 8__|__9|__3"; let mut sudoku = Sudoku::from_str(sudoku_str).unwrap(); sudoku.solve(); println!("{}", sudoku); } #[test] #[should_panic] fn wrong_format_1() { let sudoku_str = "___2___63 3____54_1 __1__398_ _______9_ ___538___ _3_______ _263__5__ 5_37____8"; Sudoku::from_str(sudoku_str).unwrap(); } #[test] fn correct_solution_easy_sudokus() { let sudokus = read_sudokus( include_str!("../sudokus/easy_sudokus.txt") ); let solved_sudokus = read_sudokus( include_str!("../sudokus/solved_easy_sudokus.txt") ); for (i, (sudoku, solved_sudoku)) in sudokus.into_iter().zip(solved_sudokus).enumerate() { if let Some(solution) = sudoku.clone().solve_unique() { assert_eq!( solved_sudoku, solution); } else { panic!("Found multiple solutions to sudoku with unique solution or none at all for {}th sudoku:\n{}", i, sudoku); } } } #[test] fn correct_solution_medium_sudokus() { let sudokus = read_sudokus( include_str!("../sudokus/medium_sudokus.txt") ); let solved_sudokus = read_sudokus( include_str!("../sudokus/solved_medium_sudokus.txt") ); for (i, (sudoku, solved_sudoku)) in sudokus.into_iter().zip(solved_sudokus).enumerate() { if let Some(solution) = sudoku.clone().solve_unique() { assert_eq!( solved_sudoku, solution); } else { panic!("Found multiple solutions to sudoku with unique solution or none at all for {}th sudoku:\n{}", i, sudoku); } } } #[test] fn correct_solution_hard_sudokus() { let sudokus = read_sudokus( include_str!("../sudokus/hard_sudokus.txt") ); let solved_sudokus = read_sudokus( include_str!("../sudokus/solved_hard_sudokus.txt") ); for (i, (sudoku, solved_sudoku)) in sudokus.into_iter().zip(solved_sudokus).enumerate() { if let Some(solution) = sudoku.clone().solve_unique() { assert_eq!( solved_sudoku, solution); } else { panic!("Found multiple solutions to sudoku with unique solution or none at all for {}th sudoku:\n{}", i, sudoku); } } }
read_sudokus
identifier_name
lib.rs
extern crate sudoku; use sudoku::Sudoku; fn read_sudokus(sudokus_str: &str) -> Vec<Sudoku> { let sudokus_str = sudokus_str.replace("\r\n", "\n"); let mut sudokus = vec![]; for i in 0.. { // 9 lines with 9 cells each + 1 linefeed character per line // + 1 LF char between each sudoku // 9*(9+1) + 1 let rg = 0+i*91..90+i*91; if rg.end > sudokus_str.len() { break } sudokus.push( Sudoku::from_str( &sudokus_str[rg] ).expect("Benchmark sudokus file contains sudoku in incorrect format") ) }
#[test] fn solve_1() { let sudoku_str = "___2___63 3____54_1 __1__398_ _______9_ ___538___ _3_______ _263__5__ 5_37____8 47___1___"; let mut sudoku = Sudoku::from_str(sudoku_str).unwrap(); sudoku.solve(); println!("{}", sudoku); } #[test] fn solve_2() { let sudoku_str = "\ 7__|4__|__2 21_|3_5|46_ __9|_28|__1 ___|542|3__ ___|___|___ __5|817|___ 5__|73_|9__ _63|2_4|_17 8__|__9|__3"; let mut sudoku = Sudoku::from_str(sudoku_str).unwrap(); sudoku.solve(); println!("{}", sudoku); } #[test] #[should_panic] fn wrong_format_1() { let sudoku_str = "___2___63 3____54_1 __1__398_ _______9_ ___538___ _3_______ _263__5__ 5_37____8"; Sudoku::from_str(sudoku_str).unwrap(); } #[test] fn correct_solution_easy_sudokus() { let sudokus = read_sudokus( include_str!("../sudokus/easy_sudokus.txt") ); let solved_sudokus = read_sudokus( include_str!("../sudokus/solved_easy_sudokus.txt") ); for (i, (sudoku, solved_sudoku)) in sudokus.into_iter().zip(solved_sudokus).enumerate() { if let Some(solution) = sudoku.clone().solve_unique() { assert_eq!( solved_sudoku, solution); } else { panic!("Found multiple solutions to sudoku with unique solution or none at all for {}th sudoku:\n{}", i, sudoku); } } } #[test] fn correct_solution_medium_sudokus() { let sudokus = read_sudokus( include_str!("../sudokus/medium_sudokus.txt") ); let solved_sudokus = read_sudokus( include_str!("../sudokus/solved_medium_sudokus.txt") ); for (i, (sudoku, solved_sudoku)) in sudokus.into_iter().zip(solved_sudokus).enumerate() { if let Some(solution) = sudoku.clone().solve_unique() { assert_eq!( solved_sudoku, solution); } else { panic!("Found multiple solutions to sudoku with unique solution or none at all for {}th sudoku:\n{}", i, sudoku); } } } #[test] fn correct_solution_hard_sudokus() { let sudokus = read_sudokus( include_str!("../sudokus/hard_sudokus.txt") ); let solved_sudokus = read_sudokus( include_str!("../sudokus/solved_hard_sudokus.txt") ); for (i, (sudoku, solved_sudoku)) in sudokus.into_iter().zip(solved_sudokus).enumerate() { if let Some(solution) = sudoku.clone().solve_unique() { assert_eq!( solved_sudoku, solution); } else { panic!("Found multiple solutions to sudoku with unique solution or none at all for {}th sudoku:\n{}", i, sudoku); } } }
sudokus }
random_line_split
lib.rs
extern crate sudoku; use sudoku::Sudoku; fn read_sudokus(sudokus_str: &str) -> Vec<Sudoku> { let sudokus_str = sudokus_str.replace("\r\n", "\n"); let mut sudokus = vec![]; for i in 0.. { // 9 lines with 9 cells each + 1 linefeed character per line // + 1 LF char between each sudoku // 9*(9+1) + 1 let rg = 0+i*91..90+i*91; if rg.end > sudokus_str.len() { break } sudokus.push( Sudoku::from_str( &sudokus_str[rg] ).expect("Benchmark sudokus file contains sudoku in incorrect format") ) } sudokus } #[test] fn solve_1() { let sudoku_str = "___2___63 3____54_1 __1__398_ _______9_ ___538___ _3_______ _263__5__ 5_37____8 47___1___"; let mut sudoku = Sudoku::from_str(sudoku_str).unwrap(); sudoku.solve(); println!("{}", sudoku); } #[test] fn solve_2() { let sudoku_str = "\ 7__|4__|__2 21_|3_5|46_ __9|_28|__1 ___|542|3__ ___|___|___ __5|817|___ 5__|73_|9__ _63|2_4|_17 8__|__9|__3"; let mut sudoku = Sudoku::from_str(sudoku_str).unwrap(); sudoku.solve(); println!("{}", sudoku); } #[test] #[should_panic] fn wrong_format_1()
#[test] fn correct_solution_easy_sudokus() { let sudokus = read_sudokus( include_str!("../sudokus/easy_sudokus.txt") ); let solved_sudokus = read_sudokus( include_str!("../sudokus/solved_easy_sudokus.txt") ); for (i, (sudoku, solved_sudoku)) in sudokus.into_iter().zip(solved_sudokus).enumerate() { if let Some(solution) = sudoku.clone().solve_unique() { assert_eq!( solved_sudoku, solution); } else { panic!("Found multiple solutions to sudoku with unique solution or none at all for {}th sudoku:\n{}", i, sudoku); } } } #[test] fn correct_solution_medium_sudokus() { let sudokus = read_sudokus( include_str!("../sudokus/medium_sudokus.txt") ); let solved_sudokus = read_sudokus( include_str!("../sudokus/solved_medium_sudokus.txt") ); for (i, (sudoku, solved_sudoku)) in sudokus.into_iter().zip(solved_sudokus).enumerate() { if let Some(solution) = sudoku.clone().solve_unique() { assert_eq!( solved_sudoku, solution); } else { panic!("Found multiple solutions to sudoku with unique solution or none at all for {}th sudoku:\n{}", i, sudoku); } } } #[test] fn correct_solution_hard_sudokus() { let sudokus = read_sudokus( include_str!("../sudokus/hard_sudokus.txt") ); let solved_sudokus = read_sudokus( include_str!("../sudokus/solved_hard_sudokus.txt") ); for (i, (sudoku, solved_sudoku)) in sudokus.into_iter().zip(solved_sudokus).enumerate() { if let Some(solution) = sudoku.clone().solve_unique() { assert_eq!( solved_sudoku, solution); } else { panic!("Found multiple solutions to sudoku with unique solution or none at all for {}th sudoku:\n{}", i, sudoku); } } }
{ let sudoku_str = "___2___63 3____54_1 __1__398_ _______9_ ___538___ _3_______ _263__5__ 5_37____8"; Sudoku::from_str(sudoku_str).unwrap(); }
identifier_body
lib.rs
// Copyright 2015-2017 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version.
// You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. #[cfg(feature = "with-syntex")] include!(concat!(env!("OUT_DIR"), "/lib.rs")); #[cfg(not(feature = "with-syntex"))] include!("lib.rs.in");
// Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details.
random_line_split
ops.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ //! DAG and Id operations (mostly traits) use crate::clone::CloneData; use crate::default_impl; use crate::errors::NotFoundError; use crate::id::Group; use crate::id::Id; use crate::id::VertexName; use crate::locked::Locked; use crate::namedag::MemNameDag; use crate::nameset::id_lazy::IdLazySet; use crate::nameset::id_static::IdStaticSet; use crate::nameset::NameSet; use crate::nameset::SyncNameSetQuery; use crate::IdSet; use crate::Result; use crate::VerLink; use std::sync::Arc; /// DAG related read-only algorithms. #[async_trait::async_trait] pub trait DagAlgorithm: Send + Sync { /// Sort a `NameSet` topologically. async fn sort(&self, set: &NameSet) -> Result<NameSet>; /// Re-create the graph so it looks better when rendered. async fn beautify(&self, main_branch: Option<NameSet>) -> Result<MemNameDag> { default_impl::beautify(self, main_branch).await } /// Get ordered parent vertexes. async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>>; /// Returns a set that covers all vertexes tracked by this DAG. async fn all(&self) -> Result<NameSet>; /// Calculates all ancestors reachable from any name from the given set. async fn ancestors(&self, set: NameSet) -> Result<NameSet>; /// Calculates parents of the given set. /// /// Note: Parent order is not preserved. Use [`NameDag::parent_names`] /// to preserve order. async fn parents(&self, set: NameSet) -> Result<NameSet> { default_impl::parents(self, set).await } /// Calculates the n-th first ancestor. async fn first_ancestor_nth(&self, name: VertexName, n: u64) -> Result<Option<VertexName>> { default_impl::first_ancestor_nth(self, name, n).await } /// Calculates ancestors but only follows the first parent. async fn first_ancestors(&self, set: NameSet) -> Result<NameSet> { default_impl::first_ancestors(self, set).await } /// Calculates heads of the given set. async fn heads(&self, set: NameSet) -> Result<NameSet> { default_impl::heads(self, set).await } /// Calculates children of the given set. async fn children(&self, set: NameSet) -> Result<NameSet>; /// Calculates roots of the given set. async fn roots(&self, set: NameSet) -> Result<NameSet> { default_impl::roots(self, set).await } /// Calculates merges of the selected set (vertexes with >=2 parents). async fn merges(&self, set: NameSet) -> Result<NameSet> { default_impl::merges(self, set).await } /// Calculates one "greatest common ancestor" of the given set. /// /// If there are no common ancestors, return None. /// If there are multiple greatest common ancestors, pick one arbitrarily. /// Use `gca_all` to get all of them. async fn gca_one(&self, set: NameSet) -> Result<Option<VertexName>> { default_impl::gca_one(self, set).await } /// Calculates all "greatest common ancestor"s of the given set. /// `gca_one` is faster if an arbitrary answer is ok. async fn gca_all(&self, set: NameSet) -> Result<NameSet> { default_impl::gca_all(self, set).await } /// Calculates all common ancestors of the given set. async fn common_ancestors(&self, set: NameSet) -> Result<NameSet> { default_impl::common_ancestors(self, set).await } /// Tests if `ancestor` is an ancestor of `descendant`. async fn is_ancestor(&self, ancestor: VertexName, descendant: VertexName) -> Result<bool> { default_impl::is_ancestor(self, ancestor, descendant).await } /// Calculates "heads" of the ancestors of the given set. That is, /// Find Y, which is the smallest subset of set X, where `ancestors(Y)` is /// `ancestors(X)`. /// /// This is faster than calculating `heads(ancestors(set))` in certain /// implementations like segmented changelog. /// /// This is different from `heads`. In case set contains X and Y, and Y is /// an ancestor of X, but not the immediate ancestor, `heads` will include /// Y while this function won't. async fn heads_ancestors(&self, set: NameSet) -> Result<NameSet> { default_impl::heads_ancestors(self, set).await } /// Calculates the "dag range" - vertexes reachable from both sides. async fn range(&self, roots: NameSet, heads: NameSet) -> Result<NameSet>; /// Calculates `ancestors(reachable) - ancestors(unreachable)`. async fn only(&self, reachable: NameSet, unreachable: NameSet) -> Result<NameSet> { default_impl::only(self, reachable, unreachable).await } /// Calculates `ancestors(reachable) - ancestors(unreachable)`, and /// `ancestors(unreachable)`. /// This might be faster in some implementations than calculating `only` and /// `ancestors` separately. async fn only_both( &self, reachable: NameSet, unreachable: NameSet, ) -> Result<(NameSet, NameSet)> { default_impl::only_both(self, reachable, unreachable).await } /// Calculates the descendants of the given set. async fn descendants(&self, set: NameSet) -> Result<NameSet>; /// Calculates `roots` that are reachable from `heads` without going /// through other `roots`. For example, given the following graph: /// /// ```plain,ignore /// F /// |\ /// C E /// | | /// B D /// |/ /// A /// ``` /// /// `reachable_roots(roots=[A, B, C], heads=[F])` returns `[A, C]`. /// `B` is not included because it cannot be reached without going /// through another root `C` from `F`. `A` is included because it /// can be reached via `F -> E -> D -> A` that does not go through /// other roots. /// /// The can be calculated as
/// Actual implementation might have faster paths. /// /// The `roots & ancestors(heads)` portion filters out bogus roots for /// compatibility, if the callsite does not provide bogus roots, it /// could be simplified to just `roots`. async fn reachable_roots(&self, roots: NameSet, heads: NameSet) -> Result<NameSet> { default_impl::reachable_roots(self, roots, heads).await } /// Get a snapshot of the current graph that can operate separately. /// /// This makes it easier to fight with borrowck. fn dag_snapshot(&self) -> Result<Arc<dyn DagAlgorithm + Send + Sync>>; /// Identity of the dag. fn dag_id(&self) -> &str; /// Version of the dag. Useful to figure out compatibility between two dags. fn dag_version(&self) -> &VerLink; } #[async_trait::async_trait] pub trait Parents: Send + Sync { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>>; } #[async_trait::async_trait] impl Parents for Arc<dyn DagAlgorithm + Send + Sync> { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> { DagAlgorithm::parent_names(self, name).await } } #[async_trait::async_trait] impl Parents for &(dyn DagAlgorithm + Send + Sync) { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> { DagAlgorithm::parent_names(*self, name).await } } #[async_trait::async_trait] impl<'a> Parents for Box<dyn Fn(VertexName) -> Result<Vec<VertexName>> + Send + Sync + 'a> { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> { (self)(name) } } #[async_trait::async_trait] impl Parents for std::collections::HashMap<VertexName, Vec<VertexName>> { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> { match self.get(&name) { Some(v) => Ok(v.clone()), None => name.not_found(), } } } /// Add vertexes recursively to the DAG. #[async_trait::async_trait] pub trait DagAddHeads { /// Add vertexes and their ancestors to the DAG. This does not persistent /// changes to disk. async fn add_heads(&mut self, parents: &dyn Parents, heads: &[VertexName]) -> Result<()>; } /// Import a generated `CloneData` object into an empty DAG. #[async_trait::async_trait] pub trait DagImportCloneData { /// Updates the DAG using a `CloneData` object. async fn import_clone_data(&mut self, clone_data: CloneData<VertexName>) -> Result<()>; } #[async_trait::async_trait] pub trait DagExportCloneData { /// Export `CloneData` for vertexes in the master group. async fn export_clone_data(&self) -> Result<CloneData<VertexName>>; } /// Persistent the DAG on disk. #[async_trait::async_trait] pub trait DagPersistent { /// Write in-memory DAG to disk. This might also pick up changes to /// the DAG by other processes. async fn flush(&mut self, master_heads: &[VertexName]) -> Result<()>; /// Write in-memory IdMap that caches Id <-> Vertex translation from /// remote service to disk. async fn flush_cached_idmap(&self) -> Result<()>; /// A faster path for add_heads, followed by flush. async fn add_heads_and_flush( &mut self, parent_names_func: &dyn Parents, master_names: &[VertexName], non_master_names: &[VertexName], ) -> Result<()>; /// Import from another (potentially large) DAG. Write to disk immediately. async fn import_and_flush( &mut self, dag: &dyn DagAlgorithm, master_heads: NameSet, ) -> Result<()> { let heads = dag.heads(dag.all().await?).await?; let non_master_heads = heads - master_heads.clone(); let master_heads: Vec<VertexName> = master_heads.iter()?.collect::<Result<Vec<_>>>()?; let non_master_heads: Vec<VertexName> = non_master_heads.iter()?.collect::<Result<Vec<_>>>()?; self.add_heads_and_flush(&dag.dag_snapshot()?, &master_heads, &non_master_heads) .await } } /// Import ASCII graph to DAG. pub trait ImportAscii { /// Import vertexes described in an ASCII graph. /// `heads` optionally specifies the order of heads to insert. /// Useful for testing. Panic if the input is invalid. fn import_ascii_with_heads( &mut self, text: &str, heads: Option<&[impl AsRef<str>]>, ) -> Result<()>; /// Import vertexes described in an ASCII graph. fn import_ascii(&mut self, text: &str) -> Result<()> { self.import_ascii_with_heads(text, <Option<&[&str]>>::None) } } /// Lookup vertexes by prefixes. #[async_trait::async_trait] pub trait PrefixLookup { /// Lookup vertexes by hex prefix. async fn vertexes_by_hex_prefix( &self, hex_prefix: &[u8], limit: usize, ) -> Result<Vec<VertexName>>; } /// Convert between `Vertex` and `Id`. #[async_trait::async_trait] pub trait IdConvert: PrefixLookup + Sync { async fn vertex_id(&self, name: VertexName) -> Result<Id>; async fn vertex_id_with_max_group( &self, name: &VertexName, max_group: Group, ) -> Result<Option<Id>>; async fn vertex_name(&self, id: Id) -> Result<VertexName>; async fn contains_vertex_name(&self, name: &VertexName) -> Result<bool>; /// Test if an `id` is present locally. Do not trigger remote fetching. async fn contains_vertex_id_locally(&self, id: &[Id]) -> Result<Vec<bool>>; /// Test if an `name` is present locally. Do not trigger remote fetching. async fn contains_vertex_name_locally(&self, name: &[VertexName]) -> Result<Vec<bool>>; async fn vertex_id_optional(&self, name: &VertexName) -> Result<Option<Id>> { self.vertex_id_with_max_group(name, Group::NON_MASTER).await } /// Convert [`Id`]s to [`VertexName`]s in batch. async fn vertex_name_batch(&self, ids: &[Id]) -> Result<Vec<Result<VertexName>>> { // This is not an efficient implementation in an async context. let mut names = Vec::with_capacity(ids.len()); for &id in ids { names.push(self.vertex_name(id).await); } Ok(names) } /// Convert [`VertexName`]s to [`Id`]s in batch. async fn vertex_id_batch(&self, names: &[VertexName]) -> Result<Vec<Result<Id>>> { // This is not an efficient implementation in an async context. let mut ids = Vec::with_capacity(names.len()); for name in names { ids.push(self.vertex_id(name.clone()).await); } Ok(ids) } /// Identity of the map. fn map_id(&self) -> &str; /// Version of the map. Useful to figure out compatibility between two maps. fn map_version(&self) -> &VerLink; } impl<T> ImportAscii for T where T: DagAddHeads, { fn import_ascii_with_heads( &mut self, text: &str, heads: Option<&[impl AsRef<str>]>, ) -> Result<()> { let parents = drawdag::parse(&text); let heads: Vec<_> = match heads { Some(heads) => heads .iter() .map(|s| VertexName::copy_from(s.as_ref().as_bytes())) .collect(), None => { let mut heads: Vec<_> = parents .keys() .map(|s| VertexName::copy_from(s.as_bytes())) .collect(); heads.sort(); heads } }; let v = |s: String| VertexName::copy_from(s.as_bytes()); let parents: std::collections::HashMap<VertexName, Vec<VertexName>> = parents .into_iter() .map(|(k, vs)| (v(k), vs.into_iter().map(v).collect())) .collect(); nonblocking::non_blocking_result(self.add_heads(&parents, &heads[..]))?; Ok(()) } } #[async_trait::async_trait] pub trait ToIdSet { /// Converts [`NameSet`] to [`IdSet`]. async fn to_id_set(&self, set: &NameSet) -> Result<IdSet>; } pub trait ToSet { /// Converts [`IdSet`] to [`NameSet`]. fn to_set(&self, set: &IdSet) -> Result<NameSet>; } pub trait IdMapSnapshot { /// Get a snapshot of IdMap. fn id_map_snapshot(&self) -> Result<Arc<dyn IdConvert + Send + Sync>>; } /// Describes how to persist state to disk. pub trait Persist { /// Return type of `lock()`. type Lock: Send + Sync; /// Obtain an exclusive lock for writing. /// This should prevent other writers. fn lock(&mut self) -> Result<Self::Lock>; /// Reload from the source of truth. Drop pending changes. /// /// This requires a lock and is usually called before `persist()`. fn reload(&mut self, _lock: &Self::Lock) -> Result<()>; /// Write pending changes to the source of truth. /// /// This requires a lock. fn persist(&mut self, _lock: &Self::Lock) -> Result<()>; /// Return a [`Locked`] instance that provides race-free filesytem read and /// write access by taking an exclusive lock. fn prepare_filesystem_sync(&mut self) -> Result<Locked<Self>> where Self: Sized, { let lock = self.lock()?; self.reload(&lock)?; Ok(Locked { inner: self, lock }) } } /// Address that can be used to open things. /// /// The address type decides the return type of `open`. pub trait Open: Clone { type OpenTarget; fn open(&self) -> Result<Self::OpenTarget>; } /// Fallible clone. pub trait TryClone { fn try_clone(&self) -> Result<Self> where Self: Sized; } impl<T: Clone> TryClone for T { fn try_clone(&self) -> Result<Self> { Ok(self.clone()) } } #[async_trait::async_trait] impl<T: IdConvert + IdMapSnapshot> ToIdSet for T { /// Converts [`NameSet`] to [`IdSet`]. async fn to_id_set(&self, set: &NameSet) -> Result<IdSet> { let version = set.hints().id_map_version(); // Fast path: extract IdSet from IdStaticSet. if let Some(set) = set.as_any().downcast_ref::<IdStaticSet>() { if None < version && version <= Some(self.map_version()) { return Ok(set.spans.clone()); } } // Convert IdLazySet to IdStaticSet. Bypass hash lookups. if let Some(set) = set.as_any().downcast_ref::<IdLazySet>() { if None < version && version <= Some(self.map_version()) { let set: IdStaticSet = set.to_static()?; return Ok(set.spans); } } // Slow path: iterate through the set and convert it to a non-lazy // IdSet. Does not bypass hash lookups. let mut spans = IdSet::empty(); for name in set.iter()? { let name = name?; let id = self.vertex_id(name).await?; spans.push(id); } Ok(spans) } } impl IdMapSnapshot for Arc<dyn IdConvert + Send + Sync> { fn id_map_snapshot(&self) -> Result<Arc<dyn IdConvert + Send + Sync>> { Ok(self.clone()) } } impl<T: IdMapSnapshot + DagAlgorithm> ToSet for T { /// Converts [`IdSet`] to [`NameSet`]. fn to_set(&self, set: &IdSet) -> Result<NameSet> { NameSet::from_spans_dag(set.clone(), self) } }
/// `roots & (heads | parents(only(heads, roots & ancestors(heads))))`.
random_line_split
ops.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ //! DAG and Id operations (mostly traits) use crate::clone::CloneData; use crate::default_impl; use crate::errors::NotFoundError; use crate::id::Group; use crate::id::Id; use crate::id::VertexName; use crate::locked::Locked; use crate::namedag::MemNameDag; use crate::nameset::id_lazy::IdLazySet; use crate::nameset::id_static::IdStaticSet; use crate::nameset::NameSet; use crate::nameset::SyncNameSetQuery; use crate::IdSet; use crate::Result; use crate::VerLink; use std::sync::Arc; /// DAG related read-only algorithms. #[async_trait::async_trait] pub trait DagAlgorithm: Send + Sync { /// Sort a `NameSet` topologically. async fn sort(&self, set: &NameSet) -> Result<NameSet>; /// Re-create the graph so it looks better when rendered. async fn beautify(&self, main_branch: Option<NameSet>) -> Result<MemNameDag> { default_impl::beautify(self, main_branch).await } /// Get ordered parent vertexes. async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>>; /// Returns a set that covers all vertexes tracked by this DAG. async fn all(&self) -> Result<NameSet>; /// Calculates all ancestors reachable from any name from the given set. async fn ancestors(&self, set: NameSet) -> Result<NameSet>; /// Calculates parents of the given set. /// /// Note: Parent order is not preserved. Use [`NameDag::parent_names`] /// to preserve order. async fn parents(&self, set: NameSet) -> Result<NameSet> { default_impl::parents(self, set).await } /// Calculates the n-th first ancestor. async fn first_ancestor_nth(&self, name: VertexName, n: u64) -> Result<Option<VertexName>> { default_impl::first_ancestor_nth(self, name, n).await } /// Calculates ancestors but only follows the first parent. async fn first_ancestors(&self, set: NameSet) -> Result<NameSet> { default_impl::first_ancestors(self, set).await } /// Calculates heads of the given set. async fn heads(&self, set: NameSet) -> Result<NameSet> { default_impl::heads(self, set).await } /// Calculates children of the given set. async fn children(&self, set: NameSet) -> Result<NameSet>; /// Calculates roots of the given set. async fn roots(&self, set: NameSet) -> Result<NameSet> { default_impl::roots(self, set).await } /// Calculates merges of the selected set (vertexes with >=2 parents). async fn merges(&self, set: NameSet) -> Result<NameSet> { default_impl::merges(self, set).await } /// Calculates one "greatest common ancestor" of the given set. /// /// If there are no common ancestors, return None. /// If there are multiple greatest common ancestors, pick one arbitrarily. /// Use `gca_all` to get all of them. async fn gca_one(&self, set: NameSet) -> Result<Option<VertexName>> { default_impl::gca_one(self, set).await } /// Calculates all "greatest common ancestor"s of the given set. /// `gca_one` is faster if an arbitrary answer is ok. async fn gca_all(&self, set: NameSet) -> Result<NameSet> { default_impl::gca_all(self, set).await } /// Calculates all common ancestors of the given set. async fn common_ancestors(&self, set: NameSet) -> Result<NameSet> { default_impl::common_ancestors(self, set).await } /// Tests if `ancestor` is an ancestor of `descendant`. async fn is_ancestor(&self, ancestor: VertexName, descendant: VertexName) -> Result<bool> { default_impl::is_ancestor(self, ancestor, descendant).await } /// Calculates "heads" of the ancestors of the given set. That is, /// Find Y, which is the smallest subset of set X, where `ancestors(Y)` is /// `ancestors(X)`. /// /// This is faster than calculating `heads(ancestors(set))` in certain /// implementations like segmented changelog. /// /// This is different from `heads`. In case set contains X and Y, and Y is /// an ancestor of X, but not the immediate ancestor, `heads` will include /// Y while this function won't. async fn heads_ancestors(&self, set: NameSet) -> Result<NameSet> { default_impl::heads_ancestors(self, set).await } /// Calculates the "dag range" - vertexes reachable from both sides. async fn range(&self, roots: NameSet, heads: NameSet) -> Result<NameSet>; /// Calculates `ancestors(reachable) - ancestors(unreachable)`. async fn only(&self, reachable: NameSet, unreachable: NameSet) -> Result<NameSet> { default_impl::only(self, reachable, unreachable).await } /// Calculates `ancestors(reachable) - ancestors(unreachable)`, and /// `ancestors(unreachable)`. /// This might be faster in some implementations than calculating `only` and /// `ancestors` separately. async fn only_both( &self, reachable: NameSet, unreachable: NameSet, ) -> Result<(NameSet, NameSet)> { default_impl::only_both(self, reachable, unreachable).await } /// Calculates the descendants of the given set. async fn descendants(&self, set: NameSet) -> Result<NameSet>; /// Calculates `roots` that are reachable from `heads` without going /// through other `roots`. For example, given the following graph: /// /// ```plain,ignore /// F /// |\ /// C E /// | | /// B D /// |/ /// A /// ``` /// /// `reachable_roots(roots=[A, B, C], heads=[F])` returns `[A, C]`. /// `B` is not included because it cannot be reached without going /// through another root `C` from `F`. `A` is included because it /// can be reached via `F -> E -> D -> A` that does not go through /// other roots. /// /// The can be calculated as /// `roots & (heads | parents(only(heads, roots & ancestors(heads))))`. /// Actual implementation might have faster paths. /// /// The `roots & ancestors(heads)` portion filters out bogus roots for /// compatibility, if the callsite does not provide bogus roots, it /// could be simplified to just `roots`. async fn reachable_roots(&self, roots: NameSet, heads: NameSet) -> Result<NameSet> { default_impl::reachable_roots(self, roots, heads).await } /// Get a snapshot of the current graph that can operate separately. /// /// This makes it easier to fight with borrowck. fn dag_snapshot(&self) -> Result<Arc<dyn DagAlgorithm + Send + Sync>>; /// Identity of the dag. fn dag_id(&self) -> &str; /// Version of the dag. Useful to figure out compatibility between two dags. fn dag_version(&self) -> &VerLink; } #[async_trait::async_trait] pub trait Parents: Send + Sync { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>>; } #[async_trait::async_trait] impl Parents for Arc<dyn DagAlgorithm + Send + Sync> { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> { DagAlgorithm::parent_names(self, name).await } } #[async_trait::async_trait] impl Parents for &(dyn DagAlgorithm + Send + Sync) { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> { DagAlgorithm::parent_names(*self, name).await } } #[async_trait::async_trait] impl<'a> Parents for Box<dyn Fn(VertexName) -> Result<Vec<VertexName>> + Send + Sync + 'a> { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> { (self)(name) } } #[async_trait::async_trait] impl Parents for std::collections::HashMap<VertexName, Vec<VertexName>> { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> { match self.get(&name) { Some(v) => Ok(v.clone()), None => name.not_found(), } } } /// Add vertexes recursively to the DAG. #[async_trait::async_trait] pub trait DagAddHeads { /// Add vertexes and their ancestors to the DAG. This does not persistent /// changes to disk. async fn add_heads(&mut self, parents: &dyn Parents, heads: &[VertexName]) -> Result<()>; } /// Import a generated `CloneData` object into an empty DAG. #[async_trait::async_trait] pub trait DagImportCloneData { /// Updates the DAG using a `CloneData` object. async fn import_clone_data(&mut self, clone_data: CloneData<VertexName>) -> Result<()>; } #[async_trait::async_trait] pub trait DagExportCloneData { /// Export `CloneData` for vertexes in the master group. async fn export_clone_data(&self) -> Result<CloneData<VertexName>>; } /// Persistent the DAG on disk. #[async_trait::async_trait] pub trait DagPersistent { /// Write in-memory DAG to disk. This might also pick up changes to /// the DAG by other processes. async fn flush(&mut self, master_heads: &[VertexName]) -> Result<()>; /// Write in-memory IdMap that caches Id <-> Vertex translation from /// remote service to disk. async fn flush_cached_idmap(&self) -> Result<()>; /// A faster path for add_heads, followed by flush. async fn add_heads_and_flush( &mut self, parent_names_func: &dyn Parents, master_names: &[VertexName], non_master_names: &[VertexName], ) -> Result<()>; /// Import from another (potentially large) DAG. Write to disk immediately. async fn import_and_flush( &mut self, dag: &dyn DagAlgorithm, master_heads: NameSet, ) -> Result<()> { let heads = dag.heads(dag.all().await?).await?; let non_master_heads = heads - master_heads.clone(); let master_heads: Vec<VertexName> = master_heads.iter()?.collect::<Result<Vec<_>>>()?; let non_master_heads: Vec<VertexName> = non_master_heads.iter()?.collect::<Result<Vec<_>>>()?; self.add_heads_and_flush(&dag.dag_snapshot()?, &master_heads, &non_master_heads) .await } } /// Import ASCII graph to DAG. pub trait ImportAscii { /// Import vertexes described in an ASCII graph. /// `heads` optionally specifies the order of heads to insert. /// Useful for testing. Panic if the input is invalid. fn import_ascii_with_heads( &mut self, text: &str, heads: Option<&[impl AsRef<str>]>, ) -> Result<()>; /// Import vertexes described in an ASCII graph. fn import_ascii(&mut self, text: &str) -> Result<()> { self.import_ascii_with_heads(text, <Option<&[&str]>>::None) } } /// Lookup vertexes by prefixes. #[async_trait::async_trait] pub trait PrefixLookup { /// Lookup vertexes by hex prefix. async fn vertexes_by_hex_prefix( &self, hex_prefix: &[u8], limit: usize, ) -> Result<Vec<VertexName>>; } /// Convert between `Vertex` and `Id`. #[async_trait::async_trait] pub trait IdConvert: PrefixLookup + Sync { async fn vertex_id(&self, name: VertexName) -> Result<Id>; async fn vertex_id_with_max_group( &self, name: &VertexName, max_group: Group, ) -> Result<Option<Id>>; async fn vertex_name(&self, id: Id) -> Result<VertexName>; async fn contains_vertex_name(&self, name: &VertexName) -> Result<bool>; /// Test if an `id` is present locally. Do not trigger remote fetching. async fn contains_vertex_id_locally(&self, id: &[Id]) -> Result<Vec<bool>>; /// Test if an `name` is present locally. Do not trigger remote fetching. async fn contains_vertex_name_locally(&self, name: &[VertexName]) -> Result<Vec<bool>>; async fn vertex_id_optional(&self, name: &VertexName) -> Result<Option<Id>> { self.vertex_id_with_max_group(name, Group::NON_MASTER).await } /// Convert [`Id`]s to [`VertexName`]s in batch. async fn vertex_name_batch(&self, ids: &[Id]) -> Result<Vec<Result<VertexName>>> { // This is not an efficient implementation in an async context. let mut names = Vec::with_capacity(ids.len()); for &id in ids { names.push(self.vertex_name(id).await); } Ok(names) } /// Convert [`VertexName`]s to [`Id`]s in batch. async fn vertex_id_batch(&self, names: &[VertexName]) -> Result<Vec<Result<Id>>> { // This is not an efficient implementation in an async context. let mut ids = Vec::with_capacity(names.len()); for name in names { ids.push(self.vertex_id(name.clone()).await); } Ok(ids) } /// Identity of the map. fn map_id(&self) -> &str; /// Version of the map. Useful to figure out compatibility between two maps. fn map_version(&self) -> &VerLink; } impl<T> ImportAscii for T where T: DagAddHeads, { fn import_ascii_with_heads( &mut self, text: &str, heads: Option<&[impl AsRef<str>]>, ) -> Result<()> { let parents = drawdag::parse(&text); let heads: Vec<_> = match heads { Some(heads) => heads .iter() .map(|s| VertexName::copy_from(s.as_ref().as_bytes())) .collect(), None => { let mut heads: Vec<_> = parents .keys() .map(|s| VertexName::copy_from(s.as_bytes())) .collect(); heads.sort(); heads } }; let v = |s: String| VertexName::copy_from(s.as_bytes()); let parents: std::collections::HashMap<VertexName, Vec<VertexName>> = parents .into_iter() .map(|(k, vs)| (v(k), vs.into_iter().map(v).collect())) .collect(); nonblocking::non_blocking_result(self.add_heads(&parents, &heads[..]))?; Ok(()) } } #[async_trait::async_trait] pub trait ToIdSet { /// Converts [`NameSet`] to [`IdSet`]. async fn to_id_set(&self, set: &NameSet) -> Result<IdSet>; } pub trait ToSet { /// Converts [`IdSet`] to [`NameSet`]. fn to_set(&self, set: &IdSet) -> Result<NameSet>; } pub trait IdMapSnapshot { /// Get a snapshot of IdMap. fn id_map_snapshot(&self) -> Result<Arc<dyn IdConvert + Send + Sync>>; } /// Describes how to persist state to disk. pub trait Persist { /// Return type of `lock()`. type Lock: Send + Sync; /// Obtain an exclusive lock for writing. /// This should prevent other writers. fn lock(&mut self) -> Result<Self::Lock>; /// Reload from the source of truth. Drop pending changes. /// /// This requires a lock and is usually called before `persist()`. fn reload(&mut self, _lock: &Self::Lock) -> Result<()>; /// Write pending changes to the source of truth. /// /// This requires a lock. fn persist(&mut self, _lock: &Self::Lock) -> Result<()>; /// Return a [`Locked`] instance that provides race-free filesytem read and /// write access by taking an exclusive lock. fn prepare_filesystem_sync(&mut self) -> Result<Locked<Self>> where Self: Sized, { let lock = self.lock()?; self.reload(&lock)?; Ok(Locked { inner: self, lock }) } } /// Address that can be used to open things. /// /// The address type decides the return type of `open`. pub trait Open: Clone { type OpenTarget; fn open(&self) -> Result<Self::OpenTarget>; } /// Fallible clone. pub trait TryClone { fn try_clone(&self) -> Result<Self> where Self: Sized; } impl<T: Clone> TryClone for T { fn try_clone(&self) -> Result<Self> { Ok(self.clone()) } } #[async_trait::async_trait] impl<T: IdConvert + IdMapSnapshot> ToIdSet for T { /// Converts [`NameSet`] to [`IdSet`]. async fn to_id_set(&self, set: &NameSet) -> Result<IdSet> { let version = set.hints().id_map_version(); // Fast path: extract IdSet from IdStaticSet. if let Some(set) = set.as_any().downcast_ref::<IdStaticSet>()
// Convert IdLazySet to IdStaticSet. Bypass hash lookups. if let Some(set) = set.as_any().downcast_ref::<IdLazySet>() { if None < version && version <= Some(self.map_version()) { let set: IdStaticSet = set.to_static()?; return Ok(set.spans); } } // Slow path: iterate through the set and convert it to a non-lazy // IdSet. Does not bypass hash lookups. let mut spans = IdSet::empty(); for name in set.iter()? { let name = name?; let id = self.vertex_id(name).await?; spans.push(id); } Ok(spans) } } impl IdMapSnapshot for Arc<dyn IdConvert + Send + Sync> { fn id_map_snapshot(&self) -> Result<Arc<dyn IdConvert + Send + Sync>> { Ok(self.clone()) } } impl<T: IdMapSnapshot + DagAlgorithm> ToSet for T { /// Converts [`IdSet`] to [`NameSet`]. fn to_set(&self, set: &IdSet) -> Result<NameSet> { NameSet::from_spans_dag(set.clone(), self) } }
{ if None < version && version <= Some(self.map_version()) { return Ok(set.spans.clone()); } }
conditional_block
ops.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ //! DAG and Id operations (mostly traits) use crate::clone::CloneData; use crate::default_impl; use crate::errors::NotFoundError; use crate::id::Group; use crate::id::Id; use crate::id::VertexName; use crate::locked::Locked; use crate::namedag::MemNameDag; use crate::nameset::id_lazy::IdLazySet; use crate::nameset::id_static::IdStaticSet; use crate::nameset::NameSet; use crate::nameset::SyncNameSetQuery; use crate::IdSet; use crate::Result; use crate::VerLink; use std::sync::Arc; /// DAG related read-only algorithms. #[async_trait::async_trait] pub trait DagAlgorithm: Send + Sync { /// Sort a `NameSet` topologically. async fn sort(&self, set: &NameSet) -> Result<NameSet>; /// Re-create the graph so it looks better when rendered. async fn beautify(&self, main_branch: Option<NameSet>) -> Result<MemNameDag> { default_impl::beautify(self, main_branch).await } /// Get ordered parent vertexes. async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>>; /// Returns a set that covers all vertexes tracked by this DAG. async fn all(&self) -> Result<NameSet>; /// Calculates all ancestors reachable from any name from the given set. async fn ancestors(&self, set: NameSet) -> Result<NameSet>; /// Calculates parents of the given set. /// /// Note: Parent order is not preserved. Use [`NameDag::parent_names`] /// to preserve order. async fn parents(&self, set: NameSet) -> Result<NameSet> { default_impl::parents(self, set).await } /// Calculates the n-th first ancestor. async fn first_ancestor_nth(&self, name: VertexName, n: u64) -> Result<Option<VertexName>> { default_impl::first_ancestor_nth(self, name, n).await } /// Calculates ancestors but only follows the first parent. async fn first_ancestors(&self, set: NameSet) -> Result<NameSet> { default_impl::first_ancestors(self, set).await } /// Calculates heads of the given set. async fn heads(&self, set: NameSet) -> Result<NameSet> { default_impl::heads(self, set).await } /// Calculates children of the given set. async fn children(&self, set: NameSet) -> Result<NameSet>; /// Calculates roots of the given set. async fn roots(&self, set: NameSet) -> Result<NameSet> { default_impl::roots(self, set).await } /// Calculates merges of the selected set (vertexes with >=2 parents). async fn
(&self, set: NameSet) -> Result<NameSet> { default_impl::merges(self, set).await } /// Calculates one "greatest common ancestor" of the given set. /// /// If there are no common ancestors, return None. /// If there are multiple greatest common ancestors, pick one arbitrarily. /// Use `gca_all` to get all of them. async fn gca_one(&self, set: NameSet) -> Result<Option<VertexName>> { default_impl::gca_one(self, set).await } /// Calculates all "greatest common ancestor"s of the given set. /// `gca_one` is faster if an arbitrary answer is ok. async fn gca_all(&self, set: NameSet) -> Result<NameSet> { default_impl::gca_all(self, set).await } /// Calculates all common ancestors of the given set. async fn common_ancestors(&self, set: NameSet) -> Result<NameSet> { default_impl::common_ancestors(self, set).await } /// Tests if `ancestor` is an ancestor of `descendant`. async fn is_ancestor(&self, ancestor: VertexName, descendant: VertexName) -> Result<bool> { default_impl::is_ancestor(self, ancestor, descendant).await } /// Calculates "heads" of the ancestors of the given set. That is, /// Find Y, which is the smallest subset of set X, where `ancestors(Y)` is /// `ancestors(X)`. /// /// This is faster than calculating `heads(ancestors(set))` in certain /// implementations like segmented changelog. /// /// This is different from `heads`. In case set contains X and Y, and Y is /// an ancestor of X, but not the immediate ancestor, `heads` will include /// Y while this function won't. async fn heads_ancestors(&self, set: NameSet) -> Result<NameSet> { default_impl::heads_ancestors(self, set).await } /// Calculates the "dag range" - vertexes reachable from both sides. async fn range(&self, roots: NameSet, heads: NameSet) -> Result<NameSet>; /// Calculates `ancestors(reachable) - ancestors(unreachable)`. async fn only(&self, reachable: NameSet, unreachable: NameSet) -> Result<NameSet> { default_impl::only(self, reachable, unreachable).await } /// Calculates `ancestors(reachable) - ancestors(unreachable)`, and /// `ancestors(unreachable)`. /// This might be faster in some implementations than calculating `only` and /// `ancestors` separately. async fn only_both( &self, reachable: NameSet, unreachable: NameSet, ) -> Result<(NameSet, NameSet)> { default_impl::only_both(self, reachable, unreachable).await } /// Calculates the descendants of the given set. async fn descendants(&self, set: NameSet) -> Result<NameSet>; /// Calculates `roots` that are reachable from `heads` without going /// through other `roots`. For example, given the following graph: /// /// ```plain,ignore /// F /// |\ /// C E /// | | /// B D /// |/ /// A /// ``` /// /// `reachable_roots(roots=[A, B, C], heads=[F])` returns `[A, C]`. /// `B` is not included because it cannot be reached without going /// through another root `C` from `F`. `A` is included because it /// can be reached via `F -> E -> D -> A` that does not go through /// other roots. /// /// The can be calculated as /// `roots & (heads | parents(only(heads, roots & ancestors(heads))))`. /// Actual implementation might have faster paths. /// /// The `roots & ancestors(heads)` portion filters out bogus roots for /// compatibility, if the callsite does not provide bogus roots, it /// could be simplified to just `roots`. async fn reachable_roots(&self, roots: NameSet, heads: NameSet) -> Result<NameSet> { default_impl::reachable_roots(self, roots, heads).await } /// Get a snapshot of the current graph that can operate separately. /// /// This makes it easier to fight with borrowck. fn dag_snapshot(&self) -> Result<Arc<dyn DagAlgorithm + Send + Sync>>; /// Identity of the dag. fn dag_id(&self) -> &str; /// Version of the dag. Useful to figure out compatibility between two dags. fn dag_version(&self) -> &VerLink; } #[async_trait::async_trait] pub trait Parents: Send + Sync { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>>; } #[async_trait::async_trait] impl Parents for Arc<dyn DagAlgorithm + Send + Sync> { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> { DagAlgorithm::parent_names(self, name).await } } #[async_trait::async_trait] impl Parents for &(dyn DagAlgorithm + Send + Sync) { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> { DagAlgorithm::parent_names(*self, name).await } } #[async_trait::async_trait] impl<'a> Parents for Box<dyn Fn(VertexName) -> Result<Vec<VertexName>> + Send + Sync + 'a> { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> { (self)(name) } } #[async_trait::async_trait] impl Parents for std::collections::HashMap<VertexName, Vec<VertexName>> { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> { match self.get(&name) { Some(v) => Ok(v.clone()), None => name.not_found(), } } } /// Add vertexes recursively to the DAG. #[async_trait::async_trait] pub trait DagAddHeads { /// Add vertexes and their ancestors to the DAG. This does not persistent /// changes to disk. async fn add_heads(&mut self, parents: &dyn Parents, heads: &[VertexName]) -> Result<()>; } /// Import a generated `CloneData` object into an empty DAG. #[async_trait::async_trait] pub trait DagImportCloneData { /// Updates the DAG using a `CloneData` object. async fn import_clone_data(&mut self, clone_data: CloneData<VertexName>) -> Result<()>; } #[async_trait::async_trait] pub trait DagExportCloneData { /// Export `CloneData` for vertexes in the master group. async fn export_clone_data(&self) -> Result<CloneData<VertexName>>; } /// Persistent the DAG on disk. #[async_trait::async_trait] pub trait DagPersistent { /// Write in-memory DAG to disk. This might also pick up changes to /// the DAG by other processes. async fn flush(&mut self, master_heads: &[VertexName]) -> Result<()>; /// Write in-memory IdMap that caches Id <-> Vertex translation from /// remote service to disk. async fn flush_cached_idmap(&self) -> Result<()>; /// A faster path for add_heads, followed by flush. async fn add_heads_and_flush( &mut self, parent_names_func: &dyn Parents, master_names: &[VertexName], non_master_names: &[VertexName], ) -> Result<()>; /// Import from another (potentially large) DAG. Write to disk immediately. async fn import_and_flush( &mut self, dag: &dyn DagAlgorithm, master_heads: NameSet, ) -> Result<()> { let heads = dag.heads(dag.all().await?).await?; let non_master_heads = heads - master_heads.clone(); let master_heads: Vec<VertexName> = master_heads.iter()?.collect::<Result<Vec<_>>>()?; let non_master_heads: Vec<VertexName> = non_master_heads.iter()?.collect::<Result<Vec<_>>>()?; self.add_heads_and_flush(&dag.dag_snapshot()?, &master_heads, &non_master_heads) .await } } /// Import ASCII graph to DAG. pub trait ImportAscii { /// Import vertexes described in an ASCII graph. /// `heads` optionally specifies the order of heads to insert. /// Useful for testing. Panic if the input is invalid. fn import_ascii_with_heads( &mut self, text: &str, heads: Option<&[impl AsRef<str>]>, ) -> Result<()>; /// Import vertexes described in an ASCII graph. fn import_ascii(&mut self, text: &str) -> Result<()> { self.import_ascii_with_heads(text, <Option<&[&str]>>::None) } } /// Lookup vertexes by prefixes. #[async_trait::async_trait] pub trait PrefixLookup { /// Lookup vertexes by hex prefix. async fn vertexes_by_hex_prefix( &self, hex_prefix: &[u8], limit: usize, ) -> Result<Vec<VertexName>>; } /// Convert between `Vertex` and `Id`. #[async_trait::async_trait] pub trait IdConvert: PrefixLookup + Sync { async fn vertex_id(&self, name: VertexName) -> Result<Id>; async fn vertex_id_with_max_group( &self, name: &VertexName, max_group: Group, ) -> Result<Option<Id>>; async fn vertex_name(&self, id: Id) -> Result<VertexName>; async fn contains_vertex_name(&self, name: &VertexName) -> Result<bool>; /// Test if an `id` is present locally. Do not trigger remote fetching. async fn contains_vertex_id_locally(&self, id: &[Id]) -> Result<Vec<bool>>; /// Test if an `name` is present locally. Do not trigger remote fetching. async fn contains_vertex_name_locally(&self, name: &[VertexName]) -> Result<Vec<bool>>; async fn vertex_id_optional(&self, name: &VertexName) -> Result<Option<Id>> { self.vertex_id_with_max_group(name, Group::NON_MASTER).await } /// Convert [`Id`]s to [`VertexName`]s in batch. async fn vertex_name_batch(&self, ids: &[Id]) -> Result<Vec<Result<VertexName>>> { // This is not an efficient implementation in an async context. let mut names = Vec::with_capacity(ids.len()); for &id in ids { names.push(self.vertex_name(id).await); } Ok(names) } /// Convert [`VertexName`]s to [`Id`]s in batch. async fn vertex_id_batch(&self, names: &[VertexName]) -> Result<Vec<Result<Id>>> { // This is not an efficient implementation in an async context. let mut ids = Vec::with_capacity(names.len()); for name in names { ids.push(self.vertex_id(name.clone()).await); } Ok(ids) } /// Identity of the map. fn map_id(&self) -> &str; /// Version of the map. Useful to figure out compatibility between two maps. fn map_version(&self) -> &VerLink; } impl<T> ImportAscii for T where T: DagAddHeads, { fn import_ascii_with_heads( &mut self, text: &str, heads: Option<&[impl AsRef<str>]>, ) -> Result<()> { let parents = drawdag::parse(&text); let heads: Vec<_> = match heads { Some(heads) => heads .iter() .map(|s| VertexName::copy_from(s.as_ref().as_bytes())) .collect(), None => { let mut heads: Vec<_> = parents .keys() .map(|s| VertexName::copy_from(s.as_bytes())) .collect(); heads.sort(); heads } }; let v = |s: String| VertexName::copy_from(s.as_bytes()); let parents: std::collections::HashMap<VertexName, Vec<VertexName>> = parents .into_iter() .map(|(k, vs)| (v(k), vs.into_iter().map(v).collect())) .collect(); nonblocking::non_blocking_result(self.add_heads(&parents, &heads[..]))?; Ok(()) } } #[async_trait::async_trait] pub trait ToIdSet { /// Converts [`NameSet`] to [`IdSet`]. async fn to_id_set(&self, set: &NameSet) -> Result<IdSet>; } pub trait ToSet { /// Converts [`IdSet`] to [`NameSet`]. fn to_set(&self, set: &IdSet) -> Result<NameSet>; } pub trait IdMapSnapshot { /// Get a snapshot of IdMap. fn id_map_snapshot(&self) -> Result<Arc<dyn IdConvert + Send + Sync>>; } /// Describes how to persist state to disk. pub trait Persist { /// Return type of `lock()`. type Lock: Send + Sync; /// Obtain an exclusive lock for writing. /// This should prevent other writers. fn lock(&mut self) -> Result<Self::Lock>; /// Reload from the source of truth. Drop pending changes. /// /// This requires a lock and is usually called before `persist()`. fn reload(&mut self, _lock: &Self::Lock) -> Result<()>; /// Write pending changes to the source of truth. /// /// This requires a lock. fn persist(&mut self, _lock: &Self::Lock) -> Result<()>; /// Return a [`Locked`] instance that provides race-free filesytem read and /// write access by taking an exclusive lock. fn prepare_filesystem_sync(&mut self) -> Result<Locked<Self>> where Self: Sized, { let lock = self.lock()?; self.reload(&lock)?; Ok(Locked { inner: self, lock }) } } /// Address that can be used to open things. /// /// The address type decides the return type of `open`. pub trait Open: Clone { type OpenTarget; fn open(&self) -> Result<Self::OpenTarget>; } /// Fallible clone. pub trait TryClone { fn try_clone(&self) -> Result<Self> where Self: Sized; } impl<T: Clone> TryClone for T { fn try_clone(&self) -> Result<Self> { Ok(self.clone()) } } #[async_trait::async_trait] impl<T: IdConvert + IdMapSnapshot> ToIdSet for T { /// Converts [`NameSet`] to [`IdSet`]. async fn to_id_set(&self, set: &NameSet) -> Result<IdSet> { let version = set.hints().id_map_version(); // Fast path: extract IdSet from IdStaticSet. if let Some(set) = set.as_any().downcast_ref::<IdStaticSet>() { if None < version && version <= Some(self.map_version()) { return Ok(set.spans.clone()); } } // Convert IdLazySet to IdStaticSet. Bypass hash lookups. if let Some(set) = set.as_any().downcast_ref::<IdLazySet>() { if None < version && version <= Some(self.map_version()) { let set: IdStaticSet = set.to_static()?; return Ok(set.spans); } } // Slow path: iterate through the set and convert it to a non-lazy // IdSet. Does not bypass hash lookups. let mut spans = IdSet::empty(); for name in set.iter()? { let name = name?; let id = self.vertex_id(name).await?; spans.push(id); } Ok(spans) } } impl IdMapSnapshot for Arc<dyn IdConvert + Send + Sync> { fn id_map_snapshot(&self) -> Result<Arc<dyn IdConvert + Send + Sync>> { Ok(self.clone()) } } impl<T: IdMapSnapshot + DagAlgorithm> ToSet for T { /// Converts [`IdSet`] to [`NameSet`]. fn to_set(&self, set: &IdSet) -> Result<NameSet> { NameSet::from_spans_dag(set.clone(), self) } }
merges
identifier_name
ops.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This software may be used and distributed according to the terms of the * GNU General Public License version 2. */ //! DAG and Id operations (mostly traits) use crate::clone::CloneData; use crate::default_impl; use crate::errors::NotFoundError; use crate::id::Group; use crate::id::Id; use crate::id::VertexName; use crate::locked::Locked; use crate::namedag::MemNameDag; use crate::nameset::id_lazy::IdLazySet; use crate::nameset::id_static::IdStaticSet; use crate::nameset::NameSet; use crate::nameset::SyncNameSetQuery; use crate::IdSet; use crate::Result; use crate::VerLink; use std::sync::Arc; /// DAG related read-only algorithms. #[async_trait::async_trait] pub trait DagAlgorithm: Send + Sync { /// Sort a `NameSet` topologically. async fn sort(&self, set: &NameSet) -> Result<NameSet>; /// Re-create the graph so it looks better when rendered. async fn beautify(&self, main_branch: Option<NameSet>) -> Result<MemNameDag> { default_impl::beautify(self, main_branch).await } /// Get ordered parent vertexes. async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>>; /// Returns a set that covers all vertexes tracked by this DAG. async fn all(&self) -> Result<NameSet>; /// Calculates all ancestors reachable from any name from the given set. async fn ancestors(&self, set: NameSet) -> Result<NameSet>; /// Calculates parents of the given set. /// /// Note: Parent order is not preserved. Use [`NameDag::parent_names`] /// to preserve order. async fn parents(&self, set: NameSet) -> Result<NameSet> { default_impl::parents(self, set).await } /// Calculates the n-th first ancestor. async fn first_ancestor_nth(&self, name: VertexName, n: u64) -> Result<Option<VertexName>> { default_impl::first_ancestor_nth(self, name, n).await } /// Calculates ancestors but only follows the first parent. async fn first_ancestors(&self, set: NameSet) -> Result<NameSet> { default_impl::first_ancestors(self, set).await } /// Calculates heads of the given set. async fn heads(&self, set: NameSet) -> Result<NameSet> { default_impl::heads(self, set).await } /// Calculates children of the given set. async fn children(&self, set: NameSet) -> Result<NameSet>; /// Calculates roots of the given set. async fn roots(&self, set: NameSet) -> Result<NameSet> { default_impl::roots(self, set).await } /// Calculates merges of the selected set (vertexes with >=2 parents). async fn merges(&self, set: NameSet) -> Result<NameSet> { default_impl::merges(self, set).await } /// Calculates one "greatest common ancestor" of the given set. /// /// If there are no common ancestors, return None. /// If there are multiple greatest common ancestors, pick one arbitrarily. /// Use `gca_all` to get all of them. async fn gca_one(&self, set: NameSet) -> Result<Option<VertexName>> { default_impl::gca_one(self, set).await } /// Calculates all "greatest common ancestor"s of the given set. /// `gca_one` is faster if an arbitrary answer is ok. async fn gca_all(&self, set: NameSet) -> Result<NameSet> { default_impl::gca_all(self, set).await } /// Calculates all common ancestors of the given set. async fn common_ancestors(&self, set: NameSet) -> Result<NameSet> { default_impl::common_ancestors(self, set).await } /// Tests if `ancestor` is an ancestor of `descendant`. async fn is_ancestor(&self, ancestor: VertexName, descendant: VertexName) -> Result<bool> { default_impl::is_ancestor(self, ancestor, descendant).await } /// Calculates "heads" of the ancestors of the given set. That is, /// Find Y, which is the smallest subset of set X, where `ancestors(Y)` is /// `ancestors(X)`. /// /// This is faster than calculating `heads(ancestors(set))` in certain /// implementations like segmented changelog. /// /// This is different from `heads`. In case set contains X and Y, and Y is /// an ancestor of X, but not the immediate ancestor, `heads` will include /// Y while this function won't. async fn heads_ancestors(&self, set: NameSet) -> Result<NameSet> { default_impl::heads_ancestors(self, set).await } /// Calculates the "dag range" - vertexes reachable from both sides. async fn range(&self, roots: NameSet, heads: NameSet) -> Result<NameSet>; /// Calculates `ancestors(reachable) - ancestors(unreachable)`. async fn only(&self, reachable: NameSet, unreachable: NameSet) -> Result<NameSet> { default_impl::only(self, reachable, unreachable).await } /// Calculates `ancestors(reachable) - ancestors(unreachable)`, and /// `ancestors(unreachable)`. /// This might be faster in some implementations than calculating `only` and /// `ancestors` separately. async fn only_both( &self, reachable: NameSet, unreachable: NameSet, ) -> Result<(NameSet, NameSet)> { default_impl::only_both(self, reachable, unreachable).await } /// Calculates the descendants of the given set. async fn descendants(&self, set: NameSet) -> Result<NameSet>; /// Calculates `roots` that are reachable from `heads` without going /// through other `roots`. For example, given the following graph: /// /// ```plain,ignore /// F /// |\ /// C E /// | | /// B D /// |/ /// A /// ``` /// /// `reachable_roots(roots=[A, B, C], heads=[F])` returns `[A, C]`. /// `B` is not included because it cannot be reached without going /// through another root `C` from `F`. `A` is included because it /// can be reached via `F -> E -> D -> A` that does not go through /// other roots. /// /// The can be calculated as /// `roots & (heads | parents(only(heads, roots & ancestors(heads))))`. /// Actual implementation might have faster paths. /// /// The `roots & ancestors(heads)` portion filters out bogus roots for /// compatibility, if the callsite does not provide bogus roots, it /// could be simplified to just `roots`. async fn reachable_roots(&self, roots: NameSet, heads: NameSet) -> Result<NameSet> { default_impl::reachable_roots(self, roots, heads).await } /// Get a snapshot of the current graph that can operate separately. /// /// This makes it easier to fight with borrowck. fn dag_snapshot(&self) -> Result<Arc<dyn DagAlgorithm + Send + Sync>>; /// Identity of the dag. fn dag_id(&self) -> &str; /// Version of the dag. Useful to figure out compatibility between two dags. fn dag_version(&self) -> &VerLink; } #[async_trait::async_trait] pub trait Parents: Send + Sync { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>>; } #[async_trait::async_trait] impl Parents for Arc<dyn DagAlgorithm + Send + Sync> { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>>
} #[async_trait::async_trait] impl Parents for &(dyn DagAlgorithm + Send + Sync) { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> { DagAlgorithm::parent_names(*self, name).await } } #[async_trait::async_trait] impl<'a> Parents for Box<dyn Fn(VertexName) -> Result<Vec<VertexName>> + Send + Sync + 'a> { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> { (self)(name) } } #[async_trait::async_trait] impl Parents for std::collections::HashMap<VertexName, Vec<VertexName>> { async fn parent_names(&self, name: VertexName) -> Result<Vec<VertexName>> { match self.get(&name) { Some(v) => Ok(v.clone()), None => name.not_found(), } } } /// Add vertexes recursively to the DAG. #[async_trait::async_trait] pub trait DagAddHeads { /// Add vertexes and their ancestors to the DAG. This does not persistent /// changes to disk. async fn add_heads(&mut self, parents: &dyn Parents, heads: &[VertexName]) -> Result<()>; } /// Import a generated `CloneData` object into an empty DAG. #[async_trait::async_trait] pub trait DagImportCloneData { /// Updates the DAG using a `CloneData` object. async fn import_clone_data(&mut self, clone_data: CloneData<VertexName>) -> Result<()>; } #[async_trait::async_trait] pub trait DagExportCloneData { /// Export `CloneData` for vertexes in the master group. async fn export_clone_data(&self) -> Result<CloneData<VertexName>>; } /// Persistent the DAG on disk. #[async_trait::async_trait] pub trait DagPersistent { /// Write in-memory DAG to disk. This might also pick up changes to /// the DAG by other processes. async fn flush(&mut self, master_heads: &[VertexName]) -> Result<()>; /// Write in-memory IdMap that caches Id <-> Vertex translation from /// remote service to disk. async fn flush_cached_idmap(&self) -> Result<()>; /// A faster path for add_heads, followed by flush. async fn add_heads_and_flush( &mut self, parent_names_func: &dyn Parents, master_names: &[VertexName], non_master_names: &[VertexName], ) -> Result<()>; /// Import from another (potentially large) DAG. Write to disk immediately. async fn import_and_flush( &mut self, dag: &dyn DagAlgorithm, master_heads: NameSet, ) -> Result<()> { let heads = dag.heads(dag.all().await?).await?; let non_master_heads = heads - master_heads.clone(); let master_heads: Vec<VertexName> = master_heads.iter()?.collect::<Result<Vec<_>>>()?; let non_master_heads: Vec<VertexName> = non_master_heads.iter()?.collect::<Result<Vec<_>>>()?; self.add_heads_and_flush(&dag.dag_snapshot()?, &master_heads, &non_master_heads) .await } } /// Import ASCII graph to DAG. pub trait ImportAscii { /// Import vertexes described in an ASCII graph. /// `heads` optionally specifies the order of heads to insert. /// Useful for testing. Panic if the input is invalid. fn import_ascii_with_heads( &mut self, text: &str, heads: Option<&[impl AsRef<str>]>, ) -> Result<()>; /// Import vertexes described in an ASCII graph. fn import_ascii(&mut self, text: &str) -> Result<()> { self.import_ascii_with_heads(text, <Option<&[&str]>>::None) } } /// Lookup vertexes by prefixes. #[async_trait::async_trait] pub trait PrefixLookup { /// Lookup vertexes by hex prefix. async fn vertexes_by_hex_prefix( &self, hex_prefix: &[u8], limit: usize, ) -> Result<Vec<VertexName>>; } /// Convert between `Vertex` and `Id`. #[async_trait::async_trait] pub trait IdConvert: PrefixLookup + Sync { async fn vertex_id(&self, name: VertexName) -> Result<Id>; async fn vertex_id_with_max_group( &self, name: &VertexName, max_group: Group, ) -> Result<Option<Id>>; async fn vertex_name(&self, id: Id) -> Result<VertexName>; async fn contains_vertex_name(&self, name: &VertexName) -> Result<bool>; /// Test if an `id` is present locally. Do not trigger remote fetching. async fn contains_vertex_id_locally(&self, id: &[Id]) -> Result<Vec<bool>>; /// Test if an `name` is present locally. Do not trigger remote fetching. async fn contains_vertex_name_locally(&self, name: &[VertexName]) -> Result<Vec<bool>>; async fn vertex_id_optional(&self, name: &VertexName) -> Result<Option<Id>> { self.vertex_id_with_max_group(name, Group::NON_MASTER).await } /// Convert [`Id`]s to [`VertexName`]s in batch. async fn vertex_name_batch(&self, ids: &[Id]) -> Result<Vec<Result<VertexName>>> { // This is not an efficient implementation in an async context. let mut names = Vec::with_capacity(ids.len()); for &id in ids { names.push(self.vertex_name(id).await); } Ok(names) } /// Convert [`VertexName`]s to [`Id`]s in batch. async fn vertex_id_batch(&self, names: &[VertexName]) -> Result<Vec<Result<Id>>> { // This is not an efficient implementation in an async context. let mut ids = Vec::with_capacity(names.len()); for name in names { ids.push(self.vertex_id(name.clone()).await); } Ok(ids) } /// Identity of the map. fn map_id(&self) -> &str; /// Version of the map. Useful to figure out compatibility between two maps. fn map_version(&self) -> &VerLink; } impl<T> ImportAscii for T where T: DagAddHeads, { fn import_ascii_with_heads( &mut self, text: &str, heads: Option<&[impl AsRef<str>]>, ) -> Result<()> { let parents = drawdag::parse(&text); let heads: Vec<_> = match heads { Some(heads) => heads .iter() .map(|s| VertexName::copy_from(s.as_ref().as_bytes())) .collect(), None => { let mut heads: Vec<_> = parents .keys() .map(|s| VertexName::copy_from(s.as_bytes())) .collect(); heads.sort(); heads } }; let v = |s: String| VertexName::copy_from(s.as_bytes()); let parents: std::collections::HashMap<VertexName, Vec<VertexName>> = parents .into_iter() .map(|(k, vs)| (v(k), vs.into_iter().map(v).collect())) .collect(); nonblocking::non_blocking_result(self.add_heads(&parents, &heads[..]))?; Ok(()) } } #[async_trait::async_trait] pub trait ToIdSet { /// Converts [`NameSet`] to [`IdSet`]. async fn to_id_set(&self, set: &NameSet) -> Result<IdSet>; } pub trait ToSet { /// Converts [`IdSet`] to [`NameSet`]. fn to_set(&self, set: &IdSet) -> Result<NameSet>; } pub trait IdMapSnapshot { /// Get a snapshot of IdMap. fn id_map_snapshot(&self) -> Result<Arc<dyn IdConvert + Send + Sync>>; } /// Describes how to persist state to disk. pub trait Persist { /// Return type of `lock()`. type Lock: Send + Sync; /// Obtain an exclusive lock for writing. /// This should prevent other writers. fn lock(&mut self) -> Result<Self::Lock>; /// Reload from the source of truth. Drop pending changes. /// /// This requires a lock and is usually called before `persist()`. fn reload(&mut self, _lock: &Self::Lock) -> Result<()>; /// Write pending changes to the source of truth. /// /// This requires a lock. fn persist(&mut self, _lock: &Self::Lock) -> Result<()>; /// Return a [`Locked`] instance that provides race-free filesytem read and /// write access by taking an exclusive lock. fn prepare_filesystem_sync(&mut self) -> Result<Locked<Self>> where Self: Sized, { let lock = self.lock()?; self.reload(&lock)?; Ok(Locked { inner: self, lock }) } } /// Address that can be used to open things. /// /// The address type decides the return type of `open`. pub trait Open: Clone { type OpenTarget; fn open(&self) -> Result<Self::OpenTarget>; } /// Fallible clone. pub trait TryClone { fn try_clone(&self) -> Result<Self> where Self: Sized; } impl<T: Clone> TryClone for T { fn try_clone(&self) -> Result<Self> { Ok(self.clone()) } } #[async_trait::async_trait] impl<T: IdConvert + IdMapSnapshot> ToIdSet for T { /// Converts [`NameSet`] to [`IdSet`]. async fn to_id_set(&self, set: &NameSet) -> Result<IdSet> { let version = set.hints().id_map_version(); // Fast path: extract IdSet from IdStaticSet. if let Some(set) = set.as_any().downcast_ref::<IdStaticSet>() { if None < version && version <= Some(self.map_version()) { return Ok(set.spans.clone()); } } // Convert IdLazySet to IdStaticSet. Bypass hash lookups. if let Some(set) = set.as_any().downcast_ref::<IdLazySet>() { if None < version && version <= Some(self.map_version()) { let set: IdStaticSet = set.to_static()?; return Ok(set.spans); } } // Slow path: iterate through the set and convert it to a non-lazy // IdSet. Does not bypass hash lookups. let mut spans = IdSet::empty(); for name in set.iter()? { let name = name?; let id = self.vertex_id(name).await?; spans.push(id); } Ok(spans) } } impl IdMapSnapshot for Arc<dyn IdConvert + Send + Sync> { fn id_map_snapshot(&self) -> Result<Arc<dyn IdConvert + Send + Sync>> { Ok(self.clone()) } } impl<T: IdMapSnapshot + DagAlgorithm> ToSet for T { /// Converts [`IdSet`] to [`NameSet`]. fn to_set(&self, set: &IdSet) -> Result<NameSet> { NameSet::from_spans_dag(set.clone(), self) } }
{ DagAlgorithm::parent_names(self, name).await }
identifier_body
mouse.rs
use nalgebra::Point2; use num::Zero; use std::collections::HashSet; use std::ops::Deref; use event::{ElementState, Event, MouseButton, React}; use input::state::{CompositeState, Element, Input, InputDifference, InputState, InputTransition, Snapshot, State}; use BoolExt; impl Element for MouseButton { type State = ElementState; } /// Mouse position (pointer) element. #[derive(Clone, Copy, Debug)] pub struct MousePosition; impl Element for MousePosition { type State = Point2<i32>; } /// Mouse proximity element. Indicates whether or not the mouse position /// (pointer) is within the bounds of the window. #[derive(Clone, Copy, Debug)] pub struct MouseProximity; impl Element for MouseProximity { type State = bool; } /// Mouse (pointer) input device. pub struct Mouse { live: MouseState, snapshot: MouseState, } impl Mouse { pub fn new() -> Self { Mouse::default() } } impl Default for Mouse { fn default() -> Self { Mouse { live: MouseState::new(), snapshot: MouseState::new(), } } } impl Deref for Mouse { type Target = MouseState; fn deref(&self) -> &Self::Target { &self.live } } impl Input for Mouse { type State = MouseState; fn live(&self) -> &Self::State { &self.live } fn snapshot(&self) -> &Self::State { &self.snapshot } } impl InputDifference<MousePosition> for Mouse { type Difference = Option<( MousePosition, <<MousePosition as Element>::State as State>::Difference, )>; // This is distinct from `InputTransition::transition`. That function // indicates whether or not a change has occurred and yields the current // state. This function instead yields a *difference*, for which the type // representing the change in state can be entirely different than the type // of the state itself. For mouse position, `transition` yields a point and // `difference` yields a vector. fn difference(&self) -> Self::Difference { let difference = self.live.state(MousePosition) - self.snapshot.state(MousePosition); (!difference.is_zero()).into_some((MousePosition, difference)) } } impl InputDifference<MouseProximity> for Mouse { type Difference = Option<( MouseProximity, <<MouseProximity as Element>::State as State>::Difference, )>; fn difference(&self) -> Self::Difference { self.transition(MouseProximity) .map(|state| (MouseProximity, state)) } } impl React for Mouse { fn react(&mut self, event: &Event) { match *event { Event::MouseEntered => { self.live.proximity = true; } Event::MouseInput(ElementState::Pressed, button) => { self.live.buttons.insert(button);
self.live.proximity = false; } Event::MouseMoved(x, y) => { self.live.position = Point2::new(x, y); } _ => {} } } } impl Snapshot for Mouse { fn snapshot(&mut self) { self.snapshot = self.live.clone(); } } #[derive(Clone)] pub struct MouseState { buttons: HashSet<MouseButton>, position: Point2<i32>, proximity: bool, } impl MouseState { fn new() -> Self { MouseState { buttons: HashSet::new(), position: Point2::origin(), proximity: false, } } } impl CompositeState<MouseButton> for MouseState { type Composite = HashSet<MouseButton>; fn composite(&self) -> &Self::Composite { &self.buttons } } impl InputState<MousePosition> for MouseState { fn state(&self, _: MousePosition) -> <MousePosition as Element>::State { self.position } } impl InputState<MouseProximity> for MouseState { fn state(&self, _: MouseProximity) -> <MouseProximity as Element>::State { self.proximity } } #[cfg(test)] mod tests { use nalgebra::Vector2; use super::super::*; use event::{Event, React}; #[test] fn position_difference_some() { let mut mouse = Mouse::new(); mouse.react(&Event::MouseMoved(1, 0)); mouse.snapshot(); mouse.react(&Event::MouseMoved(1, 1)); // Move 1 LU along the Y axis. assert_eq!( Vector2::<i32>::new(0, 1), <Mouse as InputDifference<MousePosition>>::difference(&mouse) .unwrap() .1 ); } #[test] fn position_difference_none() { let mut mouse = Mouse::new(); mouse.react(&Event::MouseMoved(1, 0)); mouse.snapshot(); assert!(<Mouse as InputDifference<MousePosition>>::difference(&mouse).is_none()); } }
} Event::MouseInput(ElementState::Released, button) => { self.live.buttons.remove(&button); } Event::MouseLeft => {
random_line_split
mouse.rs
use nalgebra::Point2; use num::Zero; use std::collections::HashSet; use std::ops::Deref; use event::{ElementState, Event, MouseButton, React}; use input::state::{CompositeState, Element, Input, InputDifference, InputState, InputTransition, Snapshot, State}; use BoolExt; impl Element for MouseButton { type State = ElementState; } /// Mouse position (pointer) element. #[derive(Clone, Copy, Debug)] pub struct MousePosition; impl Element for MousePosition { type State = Point2<i32>; } /// Mouse proximity element. Indicates whether or not the mouse position /// (pointer) is within the bounds of the window. #[derive(Clone, Copy, Debug)] pub struct MouseProximity; impl Element for MouseProximity { type State = bool; } /// Mouse (pointer) input device. pub struct Mouse { live: MouseState, snapshot: MouseState, } impl Mouse { pub fn new() -> Self { Mouse::default() } } impl Default for Mouse { fn default() -> Self { Mouse { live: MouseState::new(), snapshot: MouseState::new(), } } } impl Deref for Mouse { type Target = MouseState; fn deref(&self) -> &Self::Target { &self.live } } impl Input for Mouse { type State = MouseState; fn live(&self) -> &Self::State { &self.live } fn snapshot(&self) -> &Self::State { &self.snapshot } } impl InputDifference<MousePosition> for Mouse { type Difference = Option<( MousePosition, <<MousePosition as Element>::State as State>::Difference, )>; // This is distinct from `InputTransition::transition`. That function // indicates whether or not a change has occurred and yields the current // state. This function instead yields a *difference*, for which the type // representing the change in state can be entirely different than the type // of the state itself. For mouse position, `transition` yields a point and // `difference` yields a vector. fn difference(&self) -> Self::Difference { let difference = self.live.state(MousePosition) - self.snapshot.state(MousePosition); (!difference.is_zero()).into_some((MousePosition, difference)) } } impl InputDifference<MouseProximity> for Mouse { type Difference = Option<( MouseProximity, <<MouseProximity as Element>::State as State>::Difference, )>; fn difference(&self) -> Self::Difference { self.transition(MouseProximity) .map(|state| (MouseProximity, state)) } } impl React for Mouse { fn react(&mut self, event: &Event) { match *event { Event::MouseEntered => { self.live.proximity = true; } Event::MouseInput(ElementState::Pressed, button) => { self.live.buttons.insert(button); } Event::MouseInput(ElementState::Released, button) => { self.live.buttons.remove(&button); } Event::MouseLeft => { self.live.proximity = false; } Event::MouseMoved(x, y) => { self.live.position = Point2::new(x, y); } _ =>
} } } impl Snapshot for Mouse { fn snapshot(&mut self) { self.snapshot = self.live.clone(); } } #[derive(Clone)] pub struct MouseState { buttons: HashSet<MouseButton>, position: Point2<i32>, proximity: bool, } impl MouseState { fn new() -> Self { MouseState { buttons: HashSet::new(), position: Point2::origin(), proximity: false, } } } impl CompositeState<MouseButton> for MouseState { type Composite = HashSet<MouseButton>; fn composite(&self) -> &Self::Composite { &self.buttons } } impl InputState<MousePosition> for MouseState { fn state(&self, _: MousePosition) -> <MousePosition as Element>::State { self.position } } impl InputState<MouseProximity> for MouseState { fn state(&self, _: MouseProximity) -> <MouseProximity as Element>::State { self.proximity } } #[cfg(test)] mod tests { use nalgebra::Vector2; use super::super::*; use event::{Event, React}; #[test] fn position_difference_some() { let mut mouse = Mouse::new(); mouse.react(&Event::MouseMoved(1, 0)); mouse.snapshot(); mouse.react(&Event::MouseMoved(1, 1)); // Move 1 LU along the Y axis. assert_eq!( Vector2::<i32>::new(0, 1), <Mouse as InputDifference<MousePosition>>::difference(&mouse) .unwrap() .1 ); } #[test] fn position_difference_none() { let mut mouse = Mouse::new(); mouse.react(&Event::MouseMoved(1, 0)); mouse.snapshot(); assert!(<Mouse as InputDifference<MousePosition>>::difference(&mouse).is_none()); } }
{}
conditional_block
mouse.rs
use nalgebra::Point2; use num::Zero; use std::collections::HashSet; use std::ops::Deref; use event::{ElementState, Event, MouseButton, React}; use input::state::{CompositeState, Element, Input, InputDifference, InputState, InputTransition, Snapshot, State}; use BoolExt; impl Element for MouseButton { type State = ElementState; } /// Mouse position (pointer) element. #[derive(Clone, Copy, Debug)] pub struct MousePosition; impl Element for MousePosition { type State = Point2<i32>; } /// Mouse proximity element. Indicates whether or not the mouse position /// (pointer) is within the bounds of the window. #[derive(Clone, Copy, Debug)] pub struct MouseProximity; impl Element for MouseProximity { type State = bool; } /// Mouse (pointer) input device. pub struct Mouse { live: MouseState, snapshot: MouseState, } impl Mouse { pub fn new() -> Self { Mouse::default() } } impl Default for Mouse { fn default() -> Self { Mouse { live: MouseState::new(), snapshot: MouseState::new(), } } } impl Deref for Mouse { type Target = MouseState; fn deref(&self) -> &Self::Target { &self.live } } impl Input for Mouse { type State = MouseState; fn live(&self) -> &Self::State { &self.live } fn snapshot(&self) -> &Self::State { &self.snapshot } } impl InputDifference<MousePosition> for Mouse { type Difference = Option<( MousePosition, <<MousePosition as Element>::State as State>::Difference, )>; // This is distinct from `InputTransition::transition`. That function // indicates whether or not a change has occurred and yields the current // state. This function instead yields a *difference*, for which the type // representing the change in state can be entirely different than the type // of the state itself. For mouse position, `transition` yields a point and // `difference` yields a vector. fn difference(&self) -> Self::Difference { let difference = self.live.state(MousePosition) - self.snapshot.state(MousePosition); (!difference.is_zero()).into_some((MousePosition, difference)) } } impl InputDifference<MouseProximity> for Mouse { type Difference = Option<( MouseProximity, <<MouseProximity as Element>::State as State>::Difference, )>; fn difference(&self) -> Self::Difference
} impl React for Mouse { fn react(&mut self, event: &Event) { match *event { Event::MouseEntered => { self.live.proximity = true; } Event::MouseInput(ElementState::Pressed, button) => { self.live.buttons.insert(button); } Event::MouseInput(ElementState::Released, button) => { self.live.buttons.remove(&button); } Event::MouseLeft => { self.live.proximity = false; } Event::MouseMoved(x, y) => { self.live.position = Point2::new(x, y); } _ => {} } } } impl Snapshot for Mouse { fn snapshot(&mut self) { self.snapshot = self.live.clone(); } } #[derive(Clone)] pub struct MouseState { buttons: HashSet<MouseButton>, position: Point2<i32>, proximity: bool, } impl MouseState { fn new() -> Self { MouseState { buttons: HashSet::new(), position: Point2::origin(), proximity: false, } } } impl CompositeState<MouseButton> for MouseState { type Composite = HashSet<MouseButton>; fn composite(&self) -> &Self::Composite { &self.buttons } } impl InputState<MousePosition> for MouseState { fn state(&self, _: MousePosition) -> <MousePosition as Element>::State { self.position } } impl InputState<MouseProximity> for MouseState { fn state(&self, _: MouseProximity) -> <MouseProximity as Element>::State { self.proximity } } #[cfg(test)] mod tests { use nalgebra::Vector2; use super::super::*; use event::{Event, React}; #[test] fn position_difference_some() { let mut mouse = Mouse::new(); mouse.react(&Event::MouseMoved(1, 0)); mouse.snapshot(); mouse.react(&Event::MouseMoved(1, 1)); // Move 1 LU along the Y axis. assert_eq!( Vector2::<i32>::new(0, 1), <Mouse as InputDifference<MousePosition>>::difference(&mouse) .unwrap() .1 ); } #[test] fn position_difference_none() { let mut mouse = Mouse::new(); mouse.react(&Event::MouseMoved(1, 0)); mouse.snapshot(); assert!(<Mouse as InputDifference<MousePosition>>::difference(&mouse).is_none()); } }
{ self.transition(MouseProximity) .map(|state| (MouseProximity, state)) }
identifier_body
mouse.rs
use nalgebra::Point2; use num::Zero; use std::collections::HashSet; use std::ops::Deref; use event::{ElementState, Event, MouseButton, React}; use input::state::{CompositeState, Element, Input, InputDifference, InputState, InputTransition, Snapshot, State}; use BoolExt; impl Element for MouseButton { type State = ElementState; } /// Mouse position (pointer) element. #[derive(Clone, Copy, Debug)] pub struct MousePosition; impl Element for MousePosition { type State = Point2<i32>; } /// Mouse proximity element. Indicates whether or not the mouse position /// (pointer) is within the bounds of the window. #[derive(Clone, Copy, Debug)] pub struct MouseProximity; impl Element for MouseProximity { type State = bool; } /// Mouse (pointer) input device. pub struct Mouse { live: MouseState, snapshot: MouseState, } impl Mouse { pub fn
() -> Self { Mouse::default() } } impl Default for Mouse { fn default() -> Self { Mouse { live: MouseState::new(), snapshot: MouseState::new(), } } } impl Deref for Mouse { type Target = MouseState; fn deref(&self) -> &Self::Target { &self.live } } impl Input for Mouse { type State = MouseState; fn live(&self) -> &Self::State { &self.live } fn snapshot(&self) -> &Self::State { &self.snapshot } } impl InputDifference<MousePosition> for Mouse { type Difference = Option<( MousePosition, <<MousePosition as Element>::State as State>::Difference, )>; // This is distinct from `InputTransition::transition`. That function // indicates whether or not a change has occurred and yields the current // state. This function instead yields a *difference*, for which the type // representing the change in state can be entirely different than the type // of the state itself. For mouse position, `transition` yields a point and // `difference` yields a vector. fn difference(&self) -> Self::Difference { let difference = self.live.state(MousePosition) - self.snapshot.state(MousePosition); (!difference.is_zero()).into_some((MousePosition, difference)) } } impl InputDifference<MouseProximity> for Mouse { type Difference = Option<( MouseProximity, <<MouseProximity as Element>::State as State>::Difference, )>; fn difference(&self) -> Self::Difference { self.transition(MouseProximity) .map(|state| (MouseProximity, state)) } } impl React for Mouse { fn react(&mut self, event: &Event) { match *event { Event::MouseEntered => { self.live.proximity = true; } Event::MouseInput(ElementState::Pressed, button) => { self.live.buttons.insert(button); } Event::MouseInput(ElementState::Released, button) => { self.live.buttons.remove(&button); } Event::MouseLeft => { self.live.proximity = false; } Event::MouseMoved(x, y) => { self.live.position = Point2::new(x, y); } _ => {} } } } impl Snapshot for Mouse { fn snapshot(&mut self) { self.snapshot = self.live.clone(); } } #[derive(Clone)] pub struct MouseState { buttons: HashSet<MouseButton>, position: Point2<i32>, proximity: bool, } impl MouseState { fn new() -> Self { MouseState { buttons: HashSet::new(), position: Point2::origin(), proximity: false, } } } impl CompositeState<MouseButton> for MouseState { type Composite = HashSet<MouseButton>; fn composite(&self) -> &Self::Composite { &self.buttons } } impl InputState<MousePosition> for MouseState { fn state(&self, _: MousePosition) -> <MousePosition as Element>::State { self.position } } impl InputState<MouseProximity> for MouseState { fn state(&self, _: MouseProximity) -> <MouseProximity as Element>::State { self.proximity } } #[cfg(test)] mod tests { use nalgebra::Vector2; use super::super::*; use event::{Event, React}; #[test] fn position_difference_some() { let mut mouse = Mouse::new(); mouse.react(&Event::MouseMoved(1, 0)); mouse.snapshot(); mouse.react(&Event::MouseMoved(1, 1)); // Move 1 LU along the Y axis. assert_eq!( Vector2::<i32>::new(0, 1), <Mouse as InputDifference<MousePosition>>::difference(&mouse) .unwrap() .1 ); } #[test] fn position_difference_none() { let mut mouse = Mouse::new(); mouse.react(&Event::MouseMoved(1, 0)); mouse.snapshot(); assert!(<Mouse as InputDifference<MousePosition>>::difference(&mouse).is_none()); } }
new
identifier_name
config.rs
use std::io::prelude::*; use std::{env, fs}; use std::fs::{File, OpenOptions}; use std::path::PathBuf;
use serde_json; use serde_json::Error; #[derive(Serialize, Deserialize)] pub struct Config { pub username: String, pub access_token: String } impl Clone for Config { fn clone(&self) -> Config { Config { username: self.username.clone(), access_token: self.access_token.clone() } } } fn config_from_args(matches: &ArgMatches) -> Config { Config { username: matches.value_of("username").unwrap().to_string(), access_token: matches.value_of("access_token").unwrap().to_string() } } pub fn load_config() -> Config { let credentials_path = { let home_dir = get_home_dir(); let mut xs = home_dir; xs.push(".config"); xs.push("gh"); xs.push("credentials"); xs }; if!credentials_path.exists() { panic!("no configuration found"); } let file = match File::open(&credentials_path) { Ok(f) => f, Err(e) => panic!("could not open credentials file {}", e) }; let mut buf_reader = BufReader::new(file); let mut contents = String::new(); let _ = buf_reader.read_to_string(&mut contents); let config: Config = json_ops::from_str_or_die(&contents, "Unable to parse Credentials file!"); config } pub fn show_config(matches: &ArgMatches) -> () { //let credentials_path = { // let home_dir = get_home_dir(); // let mut xs = home_dir; // xs.push(".config"); // xs.push("gh"); // xs.push("credentials"); // xs //}; //if!credentials_path.exists() { // panic!("no configuration found"); //} //let file = match File::open(&credentials_path) { // Ok(f) => f, // Err(e) => panic!("could not open credentials file {}", e) //}; //let mut buf_reader = BufReader::new(file); //let mut contents = String::new(); //let _ = buf_reader.read_to_string(&mut contents); //let decoded: Config = json::decode(&contents).unwrap(); let config = load_config(); match matches.value_of("format") { None => print_config(&config), Some(format) => if format == "json" { //let config_json = json::as_pretty_json(&decoded); let config_json = match serde_json::to_string_pretty(&config) { Ok(json) => json, Err(_) => panic!("Error serializing config json"), }; println!("{}", config_json); } else { panic!("unknown format request {}", format); } } } fn print_config(config: &Config) -> () { println!("{0: <10} {1: <40}", "login", "access token"); println!("{0: <10} {1: <10}", config.username, config.access_token); } pub fn set_config(matches: &ArgMatches) -> () { let config = config_from_args(matches); let config_json = match serde_json::to_string_pretty(&config) { Ok(json) => json, Err(_) => panic!("Error serializing config json"), }; let st = config_json.to_string(); let home_dir = get_home_dir(); let config_dir = ensure_config_dir_exists(home_dir); let gh_dir = ensure_gh_dir_exists(config_dir); let credentials = { let mut xs = gh_dir; xs.push("credentials"); xs }; let file = match OpenOptions::new().read(true) .write(true) .create(true) .open(&credentials) { Ok(f) => f, Err(e) => panic!(e) }; if credentials.exists() { let mut buf = BufWriter::new(&file); buf.write_all(st.as_bytes()).expect("Unable to write config"); } else { let mut buf = BufWriter::new(&file); buf.write_all(st.as_bytes()).expect("Unable to write config"); } let len = { let x: usize = st.as_bytes().len(); x as u64 }; let _ = file.set_len(len); println!("Completed set_config!"); } // Returns the $HOME directory or panics if it can't find it. fn get_home_dir() -> PathBuf { match env::home_dir() { Some(path) => path, None => panic!("$HOME directory not found") } } // Ensures the config directory exists. Creates the directory if it doesn't. // $HOME/.config is the expected config directory. Returns the // PathBuf after ensure it exists. fn ensure_config_dir_exists(home_path: PathBuf) -> PathBuf { let config_path = { let mut xs = home_path; xs.push(".config"); xs }; if!config_path.exists() { let _ = fs::create_dir(&config_path); } config_path } // Ensures the gh directory exists. Creates the directory if it doesn't. // $HOME/.config/gh is the expected gh directory. Returns the // PathBuf after ensure it exists. fn ensure_gh_dir_exists(config_path: PathBuf) -> PathBuf { let gh_path = { let mut xs = config_path; xs.push("gh"); xs }; if!gh_path.exists() { let _ = fs::create_dir(&gh_path); } gh_path }
use clap::ArgMatches; use std::io::{BufReader, BufWriter, Write}; use evidence::json_ops;
random_line_split
config.rs
use std::io::prelude::*; use std::{env, fs}; use std::fs::{File, OpenOptions}; use std::path::PathBuf; use clap::ArgMatches; use std::io::{BufReader, BufWriter, Write}; use evidence::json_ops; use serde_json; use serde_json::Error; #[derive(Serialize, Deserialize)] pub struct Config { pub username: String, pub access_token: String } impl Clone for Config { fn clone(&self) -> Config { Config { username: self.username.clone(), access_token: self.access_token.clone() } } } fn config_from_args(matches: &ArgMatches) -> Config { Config { username: matches.value_of("username").unwrap().to_string(), access_token: matches.value_of("access_token").unwrap().to_string() } } pub fn load_config() -> Config { let credentials_path = { let home_dir = get_home_dir(); let mut xs = home_dir; xs.push(".config"); xs.push("gh"); xs.push("credentials"); xs }; if!credentials_path.exists() { panic!("no configuration found"); } let file = match File::open(&credentials_path) { Ok(f) => f, Err(e) => panic!("could not open credentials file {}", e) }; let mut buf_reader = BufReader::new(file); let mut contents = String::new(); let _ = buf_reader.read_to_string(&mut contents); let config: Config = json_ops::from_str_or_die(&contents, "Unable to parse Credentials file!"); config } pub fn show_config(matches: &ArgMatches) -> () { //let credentials_path = { // let home_dir = get_home_dir(); // let mut xs = home_dir; // xs.push(".config"); // xs.push("gh"); // xs.push("credentials"); // xs //}; //if!credentials_path.exists() { // panic!("no configuration found"); //} //let file = match File::open(&credentials_path) { // Ok(f) => f, // Err(e) => panic!("could not open credentials file {}", e) //}; //let mut buf_reader = BufReader::new(file); //let mut contents = String::new(); //let _ = buf_reader.read_to_string(&mut contents); //let decoded: Config = json::decode(&contents).unwrap(); let config = load_config(); match matches.value_of("format") { None => print_config(&config), Some(format) => if format == "json" { //let config_json = json::as_pretty_json(&decoded); let config_json = match serde_json::to_string_pretty(&config) { Ok(json) => json, Err(_) => panic!("Error serializing config json"), }; println!("{}", config_json); } else { panic!("unknown format request {}", format); } } } fn print_config(config: &Config) -> () { println!("{0: <10} {1: <40}", "login", "access token"); println!("{0: <10} {1: <10}", config.username, config.access_token); } pub fn set_config(matches: &ArgMatches) -> () { let config = config_from_args(matches); let config_json = match serde_json::to_string_pretty(&config) { Ok(json) => json, Err(_) => panic!("Error serializing config json"), }; let st = config_json.to_string(); let home_dir = get_home_dir(); let config_dir = ensure_config_dir_exists(home_dir); let gh_dir = ensure_gh_dir_exists(config_dir); let credentials = { let mut xs = gh_dir; xs.push("credentials"); xs }; let file = match OpenOptions::new().read(true) .write(true) .create(true) .open(&credentials) { Ok(f) => f, Err(e) => panic!(e) }; if credentials.exists() { let mut buf = BufWriter::new(&file); buf.write_all(st.as_bytes()).expect("Unable to write config"); } else { let mut buf = BufWriter::new(&file); buf.write_all(st.as_bytes()).expect("Unable to write config"); } let len = { let x: usize = st.as_bytes().len(); x as u64 }; let _ = file.set_len(len); println!("Completed set_config!"); } // Returns the $HOME directory or panics if it can't find it. fn get_home_dir() -> PathBuf { match env::home_dir() { Some(path) => path, None => panic!("$HOME directory not found") } } // Ensures the config directory exists. Creates the directory if it doesn't. // $HOME/.config is the expected config directory. Returns the // PathBuf after ensure it exists. fn ensure_config_dir_exists(home_path: PathBuf) -> PathBuf { let config_path = { let mut xs = home_path; xs.push(".config"); xs }; if!config_path.exists() { let _ = fs::create_dir(&config_path); } config_path } // Ensures the gh directory exists. Creates the directory if it doesn't. // $HOME/.config/gh is the expected gh directory. Returns the // PathBuf after ensure it exists. fn ensure_gh_dir_exists(config_path: PathBuf) -> PathBuf
{ let gh_path = { let mut xs = config_path; xs.push("gh"); xs }; if !gh_path.exists() { let _ = fs::create_dir(&gh_path); } gh_path }
identifier_body
config.rs
use std::io::prelude::*; use std::{env, fs}; use std::fs::{File, OpenOptions}; use std::path::PathBuf; use clap::ArgMatches; use std::io::{BufReader, BufWriter, Write}; use evidence::json_ops; use serde_json; use serde_json::Error; #[derive(Serialize, Deserialize)] pub struct Config { pub username: String, pub access_token: String } impl Clone for Config { fn clone(&self) -> Config { Config { username: self.username.clone(), access_token: self.access_token.clone() } } } fn config_from_args(matches: &ArgMatches) -> Config { Config { username: matches.value_of("username").unwrap().to_string(), access_token: matches.value_of("access_token").unwrap().to_string() } } pub fn load_config() -> Config { let credentials_path = { let home_dir = get_home_dir(); let mut xs = home_dir; xs.push(".config"); xs.push("gh"); xs.push("credentials"); xs }; if!credentials_path.exists() { panic!("no configuration found"); } let file = match File::open(&credentials_path) { Ok(f) => f, Err(e) => panic!("could not open credentials file {}", e) }; let mut buf_reader = BufReader::new(file); let mut contents = String::new(); let _ = buf_reader.read_to_string(&mut contents); let config: Config = json_ops::from_str_or_die(&contents, "Unable to parse Credentials file!"); config } pub fn show_config(matches: &ArgMatches) -> () { //let credentials_path = { // let home_dir = get_home_dir(); // let mut xs = home_dir; // xs.push(".config"); // xs.push("gh"); // xs.push("credentials"); // xs //}; //if!credentials_path.exists() { // panic!("no configuration found"); //} //let file = match File::open(&credentials_path) { // Ok(f) => f, // Err(e) => panic!("could not open credentials file {}", e) //}; //let mut buf_reader = BufReader::new(file); //let mut contents = String::new(); //let _ = buf_reader.read_to_string(&mut contents); //let decoded: Config = json::decode(&contents).unwrap(); let config = load_config(); match matches.value_of("format") { None => print_config(&config), Some(format) => if format == "json" { //let config_json = json::as_pretty_json(&decoded); let config_json = match serde_json::to_string_pretty(&config) { Ok(json) => json, Err(_) => panic!("Error serializing config json"), }; println!("{}", config_json); } else { panic!("unknown format request {}", format); } } } fn print_config(config: &Config) -> () { println!("{0: <10} {1: <40}", "login", "access token"); println!("{0: <10} {1: <10}", config.username, config.access_token); } pub fn set_config(matches: &ArgMatches) -> () { let config = config_from_args(matches); let config_json = match serde_json::to_string_pretty(&config) { Ok(json) => json, Err(_) => panic!("Error serializing config json"), }; let st = config_json.to_string(); let home_dir = get_home_dir(); let config_dir = ensure_config_dir_exists(home_dir); let gh_dir = ensure_gh_dir_exists(config_dir); let credentials = { let mut xs = gh_dir; xs.push("credentials"); xs }; let file = match OpenOptions::new().read(true) .write(true) .create(true) .open(&credentials) { Ok(f) => f, Err(e) => panic!(e) }; if credentials.exists() { let mut buf = BufWriter::new(&file); buf.write_all(st.as_bytes()).expect("Unable to write config"); } else { let mut buf = BufWriter::new(&file); buf.write_all(st.as_bytes()).expect("Unable to write config"); } let len = { let x: usize = st.as_bytes().len(); x as u64 }; let _ = file.set_len(len); println!("Completed set_config!"); } // Returns the $HOME directory or panics if it can't find it. fn
() -> PathBuf { match env::home_dir() { Some(path) => path, None => panic!("$HOME directory not found") } } // Ensures the config directory exists. Creates the directory if it doesn't. // $HOME/.config is the expected config directory. Returns the // PathBuf after ensure it exists. fn ensure_config_dir_exists(home_path: PathBuf) -> PathBuf { let config_path = { let mut xs = home_path; xs.push(".config"); xs }; if!config_path.exists() { let _ = fs::create_dir(&config_path); } config_path } // Ensures the gh directory exists. Creates the directory if it doesn't. // $HOME/.config/gh is the expected gh directory. Returns the // PathBuf after ensure it exists. fn ensure_gh_dir_exists(config_path: PathBuf) -> PathBuf { let gh_path = { let mut xs = config_path; xs.push("gh"); xs }; if!gh_path.exists() { let _ = fs::create_dir(&gh_path); } gh_path }
get_home_dir
identifier_name
tray.rs
//! This modules only contains a function to create a tray icon. extern crate systray; extern crate tempfile; use std::sync::{Arc, Mutex}; use library::Queue; use command::Command; /// Creates a tray icon with some buttons to send commands to the server. pub fn start_tray(queue: &Arc<Mutex<Queue>>) { if let Ok(mut tray) = systray::Application::new()
tray.add_menu_item(&"Stop".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::Stop, None); }).ok(); tray.add_menu_separator().ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Previous album".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::PreviousAlbum, None); }).ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Next album".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::NextAlbum, None); }).ok(); tray.add_menu_separator().ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Previous song".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::PreviousSong, None); }).ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Next song".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::NextSong, None); }).ok(); tray.add_menu_separator().ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Quit".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::StopServer, None); }).ok(); tray.wait_for_message(); } }
{ // This is stupid, but I have no other way to do this // Read the icon file at compilation into an array ... const BYTES: &[u8] = include_bytes!("../assets/icons/icon.ico"); // ... then write this icon file to a temporary file ... use std::io::Write; let mut tmp_file = tempfile::NamedTempFile::new().unwrap(); tmp_file.write_all(BYTES).unwrap(); // ... and set the icon from the temp file tray.set_icon_from_file(&tmp_file.path().to_str().unwrap().to_string()).ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Play / Pause".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::PlayPause, None); }).ok(); let q = Arc::clone(queue);
conditional_block
tray.rs
//! This modules only contains a function to create a tray icon. extern crate systray; extern crate tempfile; use std::sync::{Arc, Mutex}; use library::Queue; use command::Command; /// Creates a tray icon with some buttons to send commands to the server. pub fn start_tray(queue: &Arc<Mutex<Queue>>)
let q = Arc::clone(queue); tray.add_menu_item(&"Stop".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::Stop, None); }).ok(); tray.add_menu_separator().ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Previous album".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::PreviousAlbum, None); }).ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Next album".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::NextAlbum, None); }).ok(); tray.add_menu_separator().ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Previous song".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::PreviousSong, None); }).ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Next song".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::NextSong, None); }).ok(); tray.add_menu_separator().ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Quit".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::StopServer, None); }).ok(); tray.wait_for_message(); } }
{ if let Ok(mut tray) = systray::Application::new() { // This is stupid, but I have no other way to do this // Read the icon file at compilation into an array ... const BYTES: &[u8] = include_bytes!("../assets/icons/icon.ico"); // ... then write this icon file to a temporary file ... use std::io::Write; let mut tmp_file = tempfile::NamedTempFile::new().unwrap(); tmp_file.write_all(BYTES).unwrap(); // ... and set the icon from the temp file tray.set_icon_from_file(&tmp_file.path().to_str().unwrap().to_string()).ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Play / Pause".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::PlayPause, None); }).ok();
identifier_body
tray.rs
//! This modules only contains a function to create a tray icon. extern crate systray; extern crate tempfile; use std::sync::{Arc, Mutex}; use library::Queue; use command::Command; /// Creates a tray icon with some buttons to send commands to the server. pub fn start_tray(queue: &Arc<Mutex<Queue>>) { if let Ok(mut tray) = systray::Application::new() { // This is stupid, but I have no other way to do this // Read the icon file at compilation into an array... const BYTES: &[u8] = include_bytes!("../assets/icons/icon.ico"); //... then write this icon file to a temporary file... use std::io::Write; let mut tmp_file = tempfile::NamedTempFile::new().unwrap(); tmp_file.write_all(BYTES).unwrap(); //... and set the icon from the temp file tray.set_icon_from_file(&tmp_file.path().to_str().unwrap().to_string()).ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Play / Pause".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::PlayPause, None); }).ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Stop".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::Stop, None); }).ok(); tray.add_menu_separator().ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Previous album".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::PreviousAlbum, None); }).ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Next album".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::NextAlbum, None); }).ok(); tray.add_menu_separator().ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Previous song".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::PreviousSong, None); }).ok(); let q = Arc::clone(queue); tray.add_menu_item(&"Next song".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::NextSong, None); }).ok(); tray.add_menu_separator().ok();
tray.add_menu_item(&"Quit".to_string(), move |_| { let mut q = q.lock().unwrap(); q.push(Command::StopServer, None); }).ok(); tray.wait_for_message(); } }
let q = Arc::clone(queue);
random_line_split