file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
maildir.rs
use std::time::Duration; use chan::Sender; use block::{Block, ConfigBlock}; use config::Config; use de::deserialize_duration; use errors::*; use widgets::text::TextWidget; use widget::{I3BarWidget, State}; use input::I3BarEvent; use scheduler::Task; use maildir::Maildir as ExtMaildir; use uuid::Uuid; pub struct Maildir { text: TextWidget, id: String, update_interval: Duration, inboxes: Vec<String>, threshold_warning: usize, threshold_critical: usize, } #[derive(Deserialize, Debug, Default, Clone)] #[serde(deny_unknown_fields)] pub struct MaildirConfig { /// Update interval in seconds #[serde(default = "MaildirConfig::default_interval", deserialize_with = "deserialize_duration")] pub interval: Duration, pub inboxes: Vec<String>, #[serde(default = "MaildirConfig::default_threshold_warning")] pub threshold_warning: usize, #[serde(default = "MaildirConfig::default_threshold_critical")] pub threshold_critical: usize, } impl MaildirConfig { fn default_interval() -> Duration { Duration::from_secs(5) } fn default_threshold_warning() -> usize { 1 as usize } fn default_threshold_critical() -> usize { 10 as usize } } impl ConfigBlock for Maildir { type Config = MaildirConfig; fn new(block_config: Self::Config, config: Config, _tx_update_request: Sender<Task>) -> Result<Self> { Ok(Maildir { id: Uuid::new_v4().simple().to_string(), update_interval: block_config.interval, text: TextWidget::new(config.clone()) .with_icon("mail") .with_text(""), inboxes: block_config.inboxes, threshold_warning: block_config.threshold_warning, threshold_critical: block_config.threshold_critical, }) } } impl Block for Maildir { fn update(&mut self) -> Result<Option<Duration>> { let mut newmails = 0; for inbox in &self.inboxes { let isl: &str = &inbox[..]; let maildir = ExtMaildir::from(isl); newmails += maildir.count_new(); } let mut state = { State::Idle }; if newmails >= self.threshold_critical { state = { State::Critical }; } else if newmails >= self.threshold_warning { state = { State::Warning }; } self.text.set_state(state); self.text.set_text(format!("{}", newmails)); Ok(Some(self.update_interval)) } fn view(&self) -> Vec<&I3BarWidget>
fn click(&mut self, _: &I3BarEvent) -> Result<()> { Ok(()) } fn id(&self) -> &str { &self.id } }
{ vec![&self.text] }
identifier_body
maildir.rs
use std::time::Duration; use chan::Sender; use block::{Block, ConfigBlock}; use config::Config; use de::deserialize_duration; use errors::*; use widgets::text::TextWidget; use widget::{I3BarWidget, State}; use input::I3BarEvent; use scheduler::Task; use maildir::Maildir as ExtMaildir; use uuid::Uuid; pub struct Maildir { text: TextWidget, id: String, update_interval: Duration, inboxes: Vec<String>, threshold_warning: usize, threshold_critical: usize, } #[derive(Deserialize, Debug, Default, Clone)] #[serde(deny_unknown_fields)] pub struct MaildirConfig { /// Update interval in seconds #[serde(default = "MaildirConfig::default_interval", deserialize_with = "deserialize_duration")] pub interval: Duration, pub inboxes: Vec<String>, #[serde(default = "MaildirConfig::default_threshold_warning")] pub threshold_warning: usize, #[serde(default = "MaildirConfig::default_threshold_critical")] pub threshold_critical: usize, } impl MaildirConfig { fn default_interval() -> Duration { Duration::from_secs(5) } fn default_threshold_warning() -> usize { 1 as usize } fn default_threshold_critical() -> usize { 10 as usize } } impl ConfigBlock for Maildir { type Config = MaildirConfig; fn new(block_config: Self::Config, config: Config, _tx_update_request: Sender<Task>) -> Result<Self> { Ok(Maildir { id: Uuid::new_v4().simple().to_string(), update_interval: block_config.interval, text: TextWidget::new(config.clone()) .with_icon("mail") .with_text(""), inboxes: block_config.inboxes, threshold_warning: block_config.threshold_warning, threshold_critical: block_config.threshold_critical,
fn update(&mut self) -> Result<Option<Duration>> { let mut newmails = 0; for inbox in &self.inboxes { let isl: &str = &inbox[..]; let maildir = ExtMaildir::from(isl); newmails += maildir.count_new(); } let mut state = { State::Idle }; if newmails >= self.threshold_critical { state = { State::Critical }; } else if newmails >= self.threshold_warning { state = { State::Warning }; } self.text.set_state(state); self.text.set_text(format!("{}", newmails)); Ok(Some(self.update_interval)) } fn view(&self) -> Vec<&I3BarWidget> { vec![&self.text] } fn click(&mut self, _: &I3BarEvent) -> Result<()> { Ok(()) } fn id(&self) -> &str { &self.id } }
}) } } impl Block for Maildir {
random_line_split
maildir.rs
use std::time::Duration; use chan::Sender; use block::{Block, ConfigBlock}; use config::Config; use de::deserialize_duration; use errors::*; use widgets::text::TextWidget; use widget::{I3BarWidget, State}; use input::I3BarEvent; use scheduler::Task; use maildir::Maildir as ExtMaildir; use uuid::Uuid; pub struct Maildir { text: TextWidget, id: String, update_interval: Duration, inboxes: Vec<String>, threshold_warning: usize, threshold_critical: usize, } #[derive(Deserialize, Debug, Default, Clone)] #[serde(deny_unknown_fields)] pub struct MaildirConfig { /// Update interval in seconds #[serde(default = "MaildirConfig::default_interval", deserialize_with = "deserialize_duration")] pub interval: Duration, pub inboxes: Vec<String>, #[serde(default = "MaildirConfig::default_threshold_warning")] pub threshold_warning: usize, #[serde(default = "MaildirConfig::default_threshold_critical")] pub threshold_critical: usize, } impl MaildirConfig { fn default_interval() -> Duration { Duration::from_secs(5) } fn default_threshold_warning() -> usize { 1 as usize } fn default_threshold_critical() -> usize { 10 as usize } } impl ConfigBlock for Maildir { type Config = MaildirConfig; fn new(block_config: Self::Config, config: Config, _tx_update_request: Sender<Task>) -> Result<Self> { Ok(Maildir { id: Uuid::new_v4().simple().to_string(), update_interval: block_config.interval, text: TextWidget::new(config.clone()) .with_icon("mail") .with_text(""), inboxes: block_config.inboxes, threshold_warning: block_config.threshold_warning, threshold_critical: block_config.threshold_critical, }) } } impl Block for Maildir { fn update(&mut self) -> Result<Option<Duration>> { let mut newmails = 0; for inbox in &self.inboxes { let isl: &str = &inbox[..]; let maildir = ExtMaildir::from(isl); newmails += maildir.count_new(); } let mut state = { State::Idle }; if newmails >= self.threshold_critical { state = { State::Critical }; } else if newmails >= self.threshold_warning { state = { State::Warning }; } self.text.set_state(state); self.text.set_text(format!("{}", newmails)); Ok(Some(self.update_interval)) } fn
(&self) -> Vec<&I3BarWidget> { vec![&self.text] } fn click(&mut self, _: &I3BarEvent) -> Result<()> { Ok(()) } fn id(&self) -> &str { &self.id } }
view
identifier_name
memory.rs
//! Memory logic for the context sub-system. //! //! Some parts of this code are based on the Redox OS. use alloc::arc::{Arc, Weak}; use spin::Mutex; use arch::memory::paging::{ActivePageTable, Page, PageIter, VirtualAddress}; use arch::memory::paging::entry::EntryFlags; use arch::start; #[derive(Clone, Debug)] pub enum SharedMemory { Owned(Arc<Mutex<Memory>>), Borrowed(Weak<Mutex<Memory>>) } impl SharedMemory { /// Mark as borrowed. pub fn borrow(&self) -> SharedMemory { match *self { SharedMemory::Owned(ref memory_lock) => SharedMemory::Borrowed(Arc::downgrade(memory_lock)), SharedMemory::Borrowed(ref memory_lock) => SharedMemory::Borrowed(memory_lock.clone()) } } } #[derive(Debug)] pub struct Memory { /// Start address for the memory zone. start: VirtualAddress, /// Size of the address space. size: usize, /// Flags for this address space. flags: EntryFlags } impl Memory { /// Create a new Memory instance. pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, clear: bool) -> Self { let mut memory = Memory { start, size, flags }; // map the memory and clean it if requested memory.map(clear); memory } /// Get the start address for this memory space. pub fn start_address(&self) -> VirtualAddress { self.start } /// Get the size of the address space. pub fn size(&self) -> usize { self.size } /// Get the flags associated to this memory zone. pub fn flags(&self) -> EntryFlags { self.flags } /// Get an iterator with the page range for this memory zone. pub fn pages(&self) -> PageIter { let start_page = Page::containing_address(self.start); let end_page = Page::containing_address((self.start as usize + self.size - 1) as VirtualAddress); Page::range_inclusive(start_page, end_page) } /// Convert this memory zone to a shared one. pub fn
(self) -> SharedMemory { SharedMemory::Owned(Arc::new(Mutex::new(self))) } /// Map a new space on the virtual memory for this memory zone. fn map(&mut self, clean: bool) { // create a new active page table let mut active_table = unsafe { ActivePageTable::new() }; // get memory controller if let Some(ref mut memory_controller) = *::MEMORY_CONTROLLER.lock() { for page in self.pages() { memory_controller.map(&mut active_table, page, self.flags); } } else { panic!("Memory controller required"); } } /// Remap a memory area to another region pub fn remap(&mut self, new_flags: EntryFlags) { // create a new page table let mut active_table = unsafe { ActivePageTable::new() }; // get memory controller if let Some(ref mut memory_controller) = *::MEMORY_CONTROLLER.lock() { // remap all pages for page in self.pages() { memory_controller.remap(&mut active_table, page, new_flags); } // flush TLB memory_controller.flush_all(); self.flags = new_flags; } else { panic!("Memory controller required"); } } }
to_shared
identifier_name
memory.rs
//! Memory logic for the context sub-system. //! //! Some parts of this code are based on the Redox OS. use alloc::arc::{Arc, Weak}; use spin::Mutex; use arch::memory::paging::{ActivePageTable, Page, PageIter, VirtualAddress}; use arch::memory::paging::entry::EntryFlags; use arch::start; #[derive(Clone, Debug)] pub enum SharedMemory { Owned(Arc<Mutex<Memory>>), Borrowed(Weak<Mutex<Memory>>) } impl SharedMemory { /// Mark as borrowed. pub fn borrow(&self) -> SharedMemory { match *self { SharedMemory::Owned(ref memory_lock) => SharedMemory::Borrowed(Arc::downgrade(memory_lock)), SharedMemory::Borrowed(ref memory_lock) => SharedMemory::Borrowed(memory_lock.clone()) } } } #[derive(Debug)] pub struct Memory { /// Start address for the memory zone. start: VirtualAddress, /// Size of the address space. size: usize, /// Flags for this address space. flags: EntryFlags } impl Memory { /// Create a new Memory instance. pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, clear: bool) -> Self { let mut memory = Memory { start, size, flags }; // map the memory and clean it if requested memory.map(clear); memory } /// Get the start address for this memory space. pub fn start_address(&self) -> VirtualAddress { self.start } /// Get the size of the address space. pub fn size(&self) -> usize { self.size } /// Get the flags associated to this memory zone. pub fn flags(&self) -> EntryFlags { self.flags } /// Get an iterator with the page range for this memory zone. pub fn pages(&self) -> PageIter { let start_page = Page::containing_address(self.start); let end_page = Page::containing_address((self.start as usize + self.size - 1) as VirtualAddress); Page::range_inclusive(start_page, end_page) } /// Convert this memory zone to a shared one. pub fn to_shared(self) -> SharedMemory { SharedMemory::Owned(Arc::new(Mutex::new(self))) } /// Map a new space on the virtual memory for this memory zone. fn map(&mut self, clean: bool) { // create a new active page table let mut active_table = unsafe { ActivePageTable::new() }; // get memory controller if let Some(ref mut memory_controller) = *::MEMORY_CONTROLLER.lock()
else { panic!("Memory controller required"); } } /// Remap a memory area to another region pub fn remap(&mut self, new_flags: EntryFlags) { // create a new page table let mut active_table = unsafe { ActivePageTable::new() }; // get memory controller if let Some(ref mut memory_controller) = *::MEMORY_CONTROLLER.lock() { // remap all pages for page in self.pages() { memory_controller.remap(&mut active_table, page, new_flags); } // flush TLB memory_controller.flush_all(); self.flags = new_flags; } else { panic!("Memory controller required"); } } }
{ for page in self.pages() { memory_controller.map(&mut active_table, page, self.flags); } }
conditional_block
memory.rs
//! Memory logic for the context sub-system. //! //! Some parts of this code are based on the Redox OS. use alloc::arc::{Arc, Weak}; use spin::Mutex; use arch::memory::paging::{ActivePageTable, Page, PageIter, VirtualAddress}; use arch::memory::paging::entry::EntryFlags; use arch::start; #[derive(Clone, Debug)] pub enum SharedMemory { Owned(Arc<Mutex<Memory>>), Borrowed(Weak<Mutex<Memory>>) } impl SharedMemory { /// Mark as borrowed. pub fn borrow(&self) -> SharedMemory { match *self { SharedMemory::Owned(ref memory_lock) => SharedMemory::Borrowed(Arc::downgrade(memory_lock)), SharedMemory::Borrowed(ref memory_lock) => SharedMemory::Borrowed(memory_lock.clone()) } } } #[derive(Debug)] pub struct Memory { /// Start address for the memory zone. start: VirtualAddress, /// Size of the address space. size: usize, /// Flags for this address space. flags: EntryFlags } impl Memory { /// Create a new Memory instance. pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, clear: bool) -> Self { let mut memory = Memory { start, size, flags }; // map the memory and clean it if requested memory.map(clear); memory } /// Get the start address for this memory space. pub fn start_address(&self) -> VirtualAddress { self.start } /// Get the size of the address space. pub fn size(&self) -> usize { self.size } /// Get the flags associated to this memory zone. pub fn flags(&self) -> EntryFlags
/// Get an iterator with the page range for this memory zone. pub fn pages(&self) -> PageIter { let start_page = Page::containing_address(self.start); let end_page = Page::containing_address((self.start as usize + self.size - 1) as VirtualAddress); Page::range_inclusive(start_page, end_page) } /// Convert this memory zone to a shared one. pub fn to_shared(self) -> SharedMemory { SharedMemory::Owned(Arc::new(Mutex::new(self))) } /// Map a new space on the virtual memory for this memory zone. fn map(&mut self, clean: bool) { // create a new active page table let mut active_table = unsafe { ActivePageTable::new() }; // get memory controller if let Some(ref mut memory_controller) = *::MEMORY_CONTROLLER.lock() { for page in self.pages() { memory_controller.map(&mut active_table, page, self.flags); } } else { panic!("Memory controller required"); } } /// Remap a memory area to another region pub fn remap(&mut self, new_flags: EntryFlags) { // create a new page table let mut active_table = unsafe { ActivePageTable::new() }; // get memory controller if let Some(ref mut memory_controller) = *::MEMORY_CONTROLLER.lock() { // remap all pages for page in self.pages() { memory_controller.remap(&mut active_table, page, new_flags); } // flush TLB memory_controller.flush_all(); self.flags = new_flags; } else { panic!("Memory controller required"); } } }
{ self.flags }
identifier_body
memory.rs
//! Memory logic for the context sub-system. //! //! Some parts of this code are based on the Redox OS. use alloc::arc::{Arc, Weak}; use spin::Mutex; use arch::memory::paging::{ActivePageTable, Page, PageIter, VirtualAddress}; use arch::memory::paging::entry::EntryFlags; use arch::start; #[derive(Clone, Debug)] pub enum SharedMemory { Owned(Arc<Mutex<Memory>>), Borrowed(Weak<Mutex<Memory>>) } impl SharedMemory { /// Mark as borrowed. pub fn borrow(&self) -> SharedMemory { match *self { SharedMemory::Owned(ref memory_lock) => SharedMemory::Borrowed(Arc::downgrade(memory_lock)), SharedMemory::Borrowed(ref memory_lock) => SharedMemory::Borrowed(memory_lock.clone()) } } } #[derive(Debug)] pub struct Memory { /// Start address for the memory zone. start: VirtualAddress, /// Size of the address space. size: usize, /// Flags for this address space. flags: EntryFlags }
start, size, flags }; // map the memory and clean it if requested memory.map(clear); memory } /// Get the start address for this memory space. pub fn start_address(&self) -> VirtualAddress { self.start } /// Get the size of the address space. pub fn size(&self) -> usize { self.size } /// Get the flags associated to this memory zone. pub fn flags(&self) -> EntryFlags { self.flags } /// Get an iterator with the page range for this memory zone. pub fn pages(&self) -> PageIter { let start_page = Page::containing_address(self.start); let end_page = Page::containing_address((self.start as usize + self.size - 1) as VirtualAddress); Page::range_inclusive(start_page, end_page) } /// Convert this memory zone to a shared one. pub fn to_shared(self) -> SharedMemory { SharedMemory::Owned(Arc::new(Mutex::new(self))) } /// Map a new space on the virtual memory for this memory zone. fn map(&mut self, clean: bool) { // create a new active page table let mut active_table = unsafe { ActivePageTable::new() }; // get memory controller if let Some(ref mut memory_controller) = *::MEMORY_CONTROLLER.lock() { for page in self.pages() { memory_controller.map(&mut active_table, page, self.flags); } } else { panic!("Memory controller required"); } } /// Remap a memory area to another region pub fn remap(&mut self, new_flags: EntryFlags) { // create a new page table let mut active_table = unsafe { ActivePageTable::new() }; // get memory controller if let Some(ref mut memory_controller) = *::MEMORY_CONTROLLER.lock() { // remap all pages for page in self.pages() { memory_controller.remap(&mut active_table, page, new_flags); } // flush TLB memory_controller.flush_all(); self.flags = new_flags; } else { panic!("Memory controller required"); } } }
impl Memory { /// Create a new Memory instance. pub fn new(start: VirtualAddress, size: usize, flags: EntryFlags, clear: bool) -> Self { let mut memory = Memory {
random_line_split
grammar.rs
#[derive(Debug)] #[derive(Clone)] #[derive(PartialEq)] pub enum AddOp { Add, Subtract, Start, } #[derive(Debug)] #[derive(Clone)] #[derive(PartialEq)] pub enum MultOp { Multiply, Divide, Modulo, Start, } #[derive(Clone)] #[derive(Debug)] #[derive(PartialEq)] pub struct AddTerm(pub AddOp, pub Expr); #[derive(Clone)] #[derive(Debug)] #[derive(PartialEq)] pub struct MultTerm(pub MultOp, pub Expr); #[derive(Debug)] #[derive(Clone)] #[derive(PartialEq)] pub enum Expr { Variable(String), Num(i32), AddSub(Vec<AddTerm>), //a + b - c + d becomes [(+ a) (+ b) (- c) (+ d)] MultDiv(Vec<MultTerm>), } //for now this is it's own type and not a statement #[derive(Debug)] #[derive(Clone)] #[derive(PartialEq)] pub struct Block(pub Vec<Statement>); #[derive(Debug)] #[derive(Clone)] #[derive(PartialEq)] pub enum Statement { Assign(String, Expr), Output(Expr), If(Expr, Comparator, Expr, Block, Option<Block>), While(Expr, Comparator, Expr, Block), Loop(Expr, Block), } #[derive(Debug)] #[derive(Clone)] #[derive(Eq)] #[derive(PartialEq)]
CLt, // < CNeq, //!= CGeq, // >= CLeq, // <= }
pub enum Comparator { CEq, // == CGt, // >
random_line_split
grammar.rs
#[derive(Debug)] #[derive(Clone)] #[derive(PartialEq)] pub enum
{ Add, Subtract, Start, } #[derive(Debug)] #[derive(Clone)] #[derive(PartialEq)] pub enum MultOp { Multiply, Divide, Modulo, Start, } #[derive(Clone)] #[derive(Debug)] #[derive(PartialEq)] pub struct AddTerm(pub AddOp, pub Expr); #[derive(Clone)] #[derive(Debug)] #[derive(PartialEq)] pub struct MultTerm(pub MultOp, pub Expr); #[derive(Debug)] #[derive(Clone)] #[derive(PartialEq)] pub enum Expr { Variable(String), Num(i32), AddSub(Vec<AddTerm>), //a + b - c + d becomes [(+ a) (+ b) (- c) (+ d)] MultDiv(Vec<MultTerm>), } //for now this is it's own type and not a statement #[derive(Debug)] #[derive(Clone)] #[derive(PartialEq)] pub struct Block(pub Vec<Statement>); #[derive(Debug)] #[derive(Clone)] #[derive(PartialEq)] pub enum Statement { Assign(String, Expr), Output(Expr), If(Expr, Comparator, Expr, Block, Option<Block>), While(Expr, Comparator, Expr, Block), Loop(Expr, Block), } #[derive(Debug)] #[derive(Clone)] #[derive(Eq)] #[derive(PartialEq)] pub enum Comparator { CEq, // == CGt, // > CLt, // < CNeq, //!= CGeq, // >= CLeq, // <= }
AddOp
identifier_name
arena.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Dynamic arenas. // Arenas are used to quickly allocate objects that share a // lifetime. The arena uses ~[u8] vectors as a backing store to // allocate objects from. For each allocated object, the arena stores // a pointer to the type descriptor followed by the // object. (Potentially with alignment padding after each of them.) // When the arena is destroyed, it iterates through all of its chunks, // and uses the tydesc information to trace through the objects, // calling the destructors on them. // One subtle point that needs to be addressed is how to handle // failures while running the user provided initializer function. It // is important to not run the destructor on uninitialized objects, but // how to detect them is somewhat subtle. Since alloc() can be invoked // recursively, it is not sufficient to simply exclude the most recent // object. To solve this without requiring extra space, we use the low // order bit of the tydesc pointer to encode whether the object it // describes has been fully initialized. // As an optimization, objects with destructors are stored in // different chunks than objects without destructors. This reduces // overhead when initializing plain-old-data and means we don't need // to waste time running the destructors of POD. use list::{MutList, MutCons, MutNil}; use core::at_vec; use core::cast::{transmute, transmute_mut_region}; use core::cast; use core::libc::size_t; use core::ptr; use core::sys::TypeDesc; use core::sys; use core::uint; use core::vec; pub mod rusti { #[abi = "rust-intrinsic"] pub extern "rust-intrinsic" { fn move_val_init<T>(dst: &mut T, src: T); fn needs_drop<T>() -> bool; } } pub mod rustrt { use core::libc::size_t; use core::sys::TypeDesc; pub extern { #[rust_stack] unsafe fn rust_call_tydesc_glue(root: *u8, tydesc: *TypeDesc, field: size_t); } } // This probably belongs somewhere else. Needs to be kept in sync with // changes to glue... static tydesc_drop_glue_index: size_t = 3 as size_t; // The way arena uses arrays is really deeply awful. The arrays are // allocated, and have capacities reserved, but the fill for the array // will always stay at 0. struct Chunk { data: @[u8], fill: uint, is_pod: bool, } pub struct Arena { // The head is separated out from the list as a unbenchmarked // microoptimization, to avoid needing to case on the list to // access the head. priv head: Chunk, priv pod_head: Chunk, priv chunks: @mut MutList<Chunk>, } #[unsafe_destructor] impl Drop for Arena { fn finalize(&self) { unsafe { destroy_chunk(&self.head); for self.chunks.each |chunk| { if!chunk.is_pod { destroy_chunk(chunk); } } } } } fn chunk(size: uint, is_pod: bool) -> Chunk { let mut v: @[u8] = @[]; unsafe { at_vec::raw::reserve(&mut v, size); } Chunk { data: unsafe { cast::transmute(v) }, fill: 0u, is_pod: is_pod, } } pub fn arena_with_size(initial_size: uint) -> Arena { Arena { head: chunk(initial_size, false), pod_head: chunk(initial_size, true), chunks: @mut MutNil, } } pub fn Arena() -> Arena { arena_with_size(32u) } #[inline(always)] fn
(base: uint, align: uint) -> uint { (base + (align - 1)) &!(align - 1) } // Walk down a chunk, running the destructors for any objects stored // in it. unsafe fn destroy_chunk(chunk: &Chunk) { let mut idx = 0; let buf = vec::raw::to_ptr(chunk.data); let fill = chunk.fill; while idx < fill { let tydesc_data: *uint = transmute(ptr::offset(buf, idx)); let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data); let size = (*tydesc).size, align = (*tydesc).align; let after_tydesc = idx + sys::size_of::<*TypeDesc>(); let start = round_up_to(after_tydesc, align); //debug!("freeing object: idx = %u, size = %u, align = %u, done = %b", // start, size, align, is_done); if is_done { rustrt::rust_call_tydesc_glue( ptr::offset(buf, start), tydesc, tydesc_drop_glue_index); } // Find where the next tydesc lives idx = round_up_to(start + size, sys::pref_align_of::<*TypeDesc>()); } } // We encode whether the object a tydesc describes has been // initialized in the arena in the low bit of the tydesc pointer. This // is necessary in order to properly do cleanup if a failure occurs // during an initializer. #[inline(always)] unsafe fn bitpack_tydesc_ptr(p: *TypeDesc, is_done: bool) -> uint { let p_bits: uint = transmute(p); p_bits | (is_done as uint) } #[inline(always)] unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TypeDesc, bool) { (transmute(p &!1), p & 1 == 1) } pub impl Arena { // Functions for the POD part of the arena priv fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 { // Allocate a new chunk. let chunk_size = at_vec::capacity(self.pod_head.data); let new_min_chunk_size = uint::max(n_bytes, chunk_size); self.chunks = @mut MutCons(copy self.pod_head, self.chunks); self.pod_head = chunk(uint::next_power_of_two(new_min_chunk_size + 1u), true); return self.alloc_pod_inner(n_bytes, align); } #[inline(always)] priv fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 { unsafe { // XXX: Borrow check let head = transmute_mut_region(&mut self.pod_head); let start = round_up_to(head.fill, align); let end = start + n_bytes; if end > at_vec::capacity(head.data) { return self.alloc_pod_grow(n_bytes, align); } head.fill = end; //debug!("idx = %u, size = %u, align = %u, fill = %u", // start, n_bytes, align, head.fill); ptr::offset(vec::raw::to_ptr(head.data), start) } } #[inline(always)] priv fn alloc_pod<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T { unsafe { let tydesc = sys::get_type_desc::<T>(); let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align); let ptr: *mut T = transmute(ptr); rusti::move_val_init(&mut (*ptr), op()); return transmute(ptr); } } // Functions for the non-POD part of the arena priv fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint) -> (*u8, *u8) { // Allocate a new chunk. let chunk_size = at_vec::capacity(self.head.data); let new_min_chunk_size = uint::max(n_bytes, chunk_size); self.chunks = @mut MutCons(copy self.head, self.chunks); self.head = chunk(uint::next_power_of_two(new_min_chunk_size + 1u), false); return self.alloc_nonpod_inner(n_bytes, align); } #[inline(always)] priv fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint) -> (*u8, *u8) { unsafe { let head = transmute_mut_region(&mut self.head); let tydesc_start = head.fill; let after_tydesc = head.fill + sys::size_of::<*TypeDesc>(); let start = round_up_to(after_tydesc, align); let end = start + n_bytes; if end > at_vec::capacity(head.data) { return self.alloc_nonpod_grow(n_bytes, align); } head.fill = round_up_to(end, sys::pref_align_of::<*TypeDesc>()); //debug!("idx = %u, size = %u, align = %u, fill = %u", // start, n_bytes, align, head.fill); let buf = vec::raw::to_ptr(head.data); return (ptr::offset(buf, tydesc_start), ptr::offset(buf, start)); } } #[inline(always)] priv fn alloc_nonpod<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T { unsafe { let tydesc = sys::get_type_desc::<T>(); let (ty_ptr, ptr) = self.alloc_nonpod_inner((*tydesc).size, (*tydesc).align); let ty_ptr: *mut uint = transmute(ty_ptr); let ptr: *mut T = transmute(ptr); // Write in our tydesc along with a bit indicating that it // has *not* been initialized yet. *ty_ptr = transmute(tydesc); // Actually initialize it rusti::move_val_init(&mut(*ptr), op()); // Now that we are done, update the tydesc to indicate that // the object is there. *ty_ptr = bitpack_tydesc_ptr(tydesc, true); return transmute(ptr); } } // The external interface #[inline(always)] fn alloc<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T { unsafe { // XXX: Borrow check let this = transmute_mut_region(self); if!rusti::needs_drop::<T>() { return this.alloc_pod(op); } // XXX: Borrow check let this = transmute_mut_region(self); this.alloc_nonpod(op) } } } #[test] fn test_arena_destructors() { let mut arena = Arena(); for uint::range(0, 10) |i| { // Arena allocate something with drop glue to make sure it // doesn't leak. do arena.alloc { @i }; // Allocate something with funny size and alignment, to keep // things interesting. do arena.alloc { [0u8, 1u8, 2u8] }; } } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_arena_destructors_fail() { let mut arena = Arena(); // Put some stuff in the arena. for uint::range(0, 10) |i| { // Arena allocate something with drop glue to make sure it // doesn't leak. do arena.alloc { @i }; // Allocate something with funny size and alignment, to keep // things interesting. do arena.alloc { [0u8, 1u8, 2u8] }; } // Now, fail while allocating do arena.alloc::<@int> { // Now fail. fail!(); }; }
round_up_to
identifier_name
arena.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Dynamic arenas. // Arenas are used to quickly allocate objects that share a // lifetime. The arena uses ~[u8] vectors as a backing store to // allocate objects from. For each allocated object, the arena stores // a pointer to the type descriptor followed by the // object. (Potentially with alignment padding after each of them.) // When the arena is destroyed, it iterates through all of its chunks, // and uses the tydesc information to trace through the objects, // calling the destructors on them. // One subtle point that needs to be addressed is how to handle // failures while running the user provided initializer function. It // is important to not run the destructor on uninitialized objects, but // how to detect them is somewhat subtle. Since alloc() can be invoked // recursively, it is not sufficient to simply exclude the most recent // object. To solve this without requiring extra space, we use the low // order bit of the tydesc pointer to encode whether the object it // describes has been fully initialized. // As an optimization, objects with destructors are stored in // different chunks than objects without destructors. This reduces // overhead when initializing plain-old-data and means we don't need // to waste time running the destructors of POD. use list::{MutList, MutCons, MutNil}; use core::at_vec; use core::cast::{transmute, transmute_mut_region}; use core::cast; use core::libc::size_t; use core::ptr; use core::sys::TypeDesc; use core::sys; use core::uint; use core::vec; pub mod rusti { #[abi = "rust-intrinsic"] pub extern "rust-intrinsic" { fn move_val_init<T>(dst: &mut T, src: T); fn needs_drop<T>() -> bool; } } pub mod rustrt { use core::libc::size_t; use core::sys::TypeDesc; pub extern { #[rust_stack] unsafe fn rust_call_tydesc_glue(root: *u8, tydesc: *TypeDesc, field: size_t); } } // This probably belongs somewhere else. Needs to be kept in sync with // changes to glue... static tydesc_drop_glue_index: size_t = 3 as size_t; // The way arena uses arrays is really deeply awful. The arrays are // allocated, and have capacities reserved, but the fill for the array // will always stay at 0. struct Chunk { data: @[u8], fill: uint, is_pod: bool, } pub struct Arena { // The head is separated out from the list as a unbenchmarked // microoptimization, to avoid needing to case on the list to // access the head. priv head: Chunk, priv pod_head: Chunk, priv chunks: @mut MutList<Chunk>, } #[unsafe_destructor] impl Drop for Arena { fn finalize(&self) { unsafe { destroy_chunk(&self.head); for self.chunks.each |chunk| { if!chunk.is_pod { destroy_chunk(chunk); } } } } } fn chunk(size: uint, is_pod: bool) -> Chunk { let mut v: @[u8] = @[]; unsafe { at_vec::raw::reserve(&mut v, size); } Chunk { data: unsafe { cast::transmute(v) }, fill: 0u, is_pod: is_pod, } } pub fn arena_with_size(initial_size: uint) -> Arena { Arena { head: chunk(initial_size, false), pod_head: chunk(initial_size, true), chunks: @mut MutNil, } } pub fn Arena() -> Arena { arena_with_size(32u) } #[inline(always)] fn round_up_to(base: uint, align: uint) -> uint { (base + (align - 1)) &!(align - 1) } // Walk down a chunk, running the destructors for any objects stored // in it. unsafe fn destroy_chunk(chunk: &Chunk) { let mut idx = 0; let buf = vec::raw::to_ptr(chunk.data); let fill = chunk.fill; while idx < fill { let tydesc_data: *uint = transmute(ptr::offset(buf, idx)); let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data); let size = (*tydesc).size, align = (*tydesc).align; let after_tydesc = idx + sys::size_of::<*TypeDesc>(); let start = round_up_to(after_tydesc, align); //debug!("freeing object: idx = %u, size = %u, align = %u, done = %b", // start, size, align, is_done); if is_done { rustrt::rust_call_tydesc_glue( ptr::offset(buf, start), tydesc, tydesc_drop_glue_index); } // Find where the next tydesc lives idx = round_up_to(start + size, sys::pref_align_of::<*TypeDesc>()); } } // We encode whether the object a tydesc describes has been // initialized in the arena in the low bit of the tydesc pointer. This // is necessary in order to properly do cleanup if a failure occurs // during an initializer. #[inline(always)] unsafe fn bitpack_tydesc_ptr(p: *TypeDesc, is_done: bool) -> uint { let p_bits: uint = transmute(p); p_bits | (is_done as uint) } #[inline(always)] unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TypeDesc, bool) { (transmute(p &!1), p & 1 == 1) } pub impl Arena { // Functions for the POD part of the arena priv fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 { // Allocate a new chunk. let chunk_size = at_vec::capacity(self.pod_head.data); let new_min_chunk_size = uint::max(n_bytes, chunk_size); self.chunks = @mut MutCons(copy self.pod_head, self.chunks); self.pod_head = chunk(uint::next_power_of_two(new_min_chunk_size + 1u), true); return self.alloc_pod_inner(n_bytes, align); } #[inline(always)] priv fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 { unsafe { // XXX: Borrow check let head = transmute_mut_region(&mut self.pod_head); let start = round_up_to(head.fill, align); let end = start + n_bytes; if end > at_vec::capacity(head.data) { return self.alloc_pod_grow(n_bytes, align); } head.fill = end; //debug!("idx = %u, size = %u, align = %u, fill = %u", // start, n_bytes, align, head.fill); ptr::offset(vec::raw::to_ptr(head.data), start) } } #[inline(always)] priv fn alloc_pod<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T { unsafe { let tydesc = sys::get_type_desc::<T>(); let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align); let ptr: *mut T = transmute(ptr); rusti::move_val_init(&mut (*ptr), op()); return transmute(ptr); } } // Functions for the non-POD part of the arena priv fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint) -> (*u8, *u8) { // Allocate a new chunk. let chunk_size = at_vec::capacity(self.head.data); let new_min_chunk_size = uint::max(n_bytes, chunk_size); self.chunks = @mut MutCons(copy self.head, self.chunks); self.head = chunk(uint::next_power_of_two(new_min_chunk_size + 1u), false); return self.alloc_nonpod_inner(n_bytes, align); } #[inline(always)] priv fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint) -> (*u8, *u8) { unsafe { let head = transmute_mut_region(&mut self.head); let tydesc_start = head.fill; let after_tydesc = head.fill + sys::size_of::<*TypeDesc>(); let start = round_up_to(after_tydesc, align); let end = start + n_bytes; if end > at_vec::capacity(head.data)
head.fill = round_up_to(end, sys::pref_align_of::<*TypeDesc>()); //debug!("idx = %u, size = %u, align = %u, fill = %u", // start, n_bytes, align, head.fill); let buf = vec::raw::to_ptr(head.data); return (ptr::offset(buf, tydesc_start), ptr::offset(buf, start)); } } #[inline(always)] priv fn alloc_nonpod<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T { unsafe { let tydesc = sys::get_type_desc::<T>(); let (ty_ptr, ptr) = self.alloc_nonpod_inner((*tydesc).size, (*tydesc).align); let ty_ptr: *mut uint = transmute(ty_ptr); let ptr: *mut T = transmute(ptr); // Write in our tydesc along with a bit indicating that it // has *not* been initialized yet. *ty_ptr = transmute(tydesc); // Actually initialize it rusti::move_val_init(&mut(*ptr), op()); // Now that we are done, update the tydesc to indicate that // the object is there. *ty_ptr = bitpack_tydesc_ptr(tydesc, true); return transmute(ptr); } } // The external interface #[inline(always)] fn alloc<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T { unsafe { // XXX: Borrow check let this = transmute_mut_region(self); if!rusti::needs_drop::<T>() { return this.alloc_pod(op); } // XXX: Borrow check let this = transmute_mut_region(self); this.alloc_nonpod(op) } } } #[test] fn test_arena_destructors() { let mut arena = Arena(); for uint::range(0, 10) |i| { // Arena allocate something with drop glue to make sure it // doesn't leak. do arena.alloc { @i }; // Allocate something with funny size and alignment, to keep // things interesting. do arena.alloc { [0u8, 1u8, 2u8] }; } } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_arena_destructors_fail() { let mut arena = Arena(); // Put some stuff in the arena. for uint::range(0, 10) |i| { // Arena allocate something with drop glue to make sure it // doesn't leak. do arena.alloc { @i }; // Allocate something with funny size and alignment, to keep // things interesting. do arena.alloc { [0u8, 1u8, 2u8] }; } // Now, fail while allocating do arena.alloc::<@int> { // Now fail. fail!(); }; }
{ return self.alloc_nonpod_grow(n_bytes, align); }
conditional_block
arena.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Dynamic arenas. // Arenas are used to quickly allocate objects that share a // lifetime. The arena uses ~[u8] vectors as a backing store to // allocate objects from. For each allocated object, the arena stores // a pointer to the type descriptor followed by the // object. (Potentially with alignment padding after each of them.) // When the arena is destroyed, it iterates through all of its chunks, // and uses the tydesc information to trace through the objects, // calling the destructors on them. // One subtle point that needs to be addressed is how to handle // failures while running the user provided initializer function. It // is important to not run the destructor on uninitialized objects, but // how to detect them is somewhat subtle. Since alloc() can be invoked // recursively, it is not sufficient to simply exclude the most recent // object. To solve this without requiring extra space, we use the low // order bit of the tydesc pointer to encode whether the object it // describes has been fully initialized. // As an optimization, objects with destructors are stored in // different chunks than objects without destructors. This reduces // overhead when initializing plain-old-data and means we don't need // to waste time running the destructors of POD. use list::{MutList, MutCons, MutNil}; use core::at_vec; use core::cast::{transmute, transmute_mut_region}; use core::cast; use core::libc::size_t; use core::ptr; use core::sys::TypeDesc; use core::sys; use core::uint; use core::vec; pub mod rusti { #[abi = "rust-intrinsic"] pub extern "rust-intrinsic" { fn move_val_init<T>(dst: &mut T, src: T); fn needs_drop<T>() -> bool; } } pub mod rustrt { use core::libc::size_t; use core::sys::TypeDesc; pub extern { #[rust_stack] unsafe fn rust_call_tydesc_glue(root: *u8, tydesc: *TypeDesc, field: size_t); } } // This probably belongs somewhere else. Needs to be kept in sync with // changes to glue... static tydesc_drop_glue_index: size_t = 3 as size_t; // The way arena uses arrays is really deeply awful. The arrays are // allocated, and have capacities reserved, but the fill for the array // will always stay at 0. struct Chunk { data: @[u8], fill: uint, is_pod: bool, } pub struct Arena { // The head is separated out from the list as a unbenchmarked // microoptimization, to avoid needing to case on the list to // access the head. priv head: Chunk, priv pod_head: Chunk, priv chunks: @mut MutList<Chunk>, } #[unsafe_destructor] impl Drop for Arena { fn finalize(&self) { unsafe { destroy_chunk(&self.head); for self.chunks.each |chunk| { if!chunk.is_pod { destroy_chunk(chunk); } } } } } fn chunk(size: uint, is_pod: bool) -> Chunk { let mut v: @[u8] = @[]; unsafe { at_vec::raw::reserve(&mut v, size); } Chunk { data: unsafe { cast::transmute(v) }, fill: 0u, is_pod: is_pod, } } pub fn arena_with_size(initial_size: uint) -> Arena { Arena { head: chunk(initial_size, false), pod_head: chunk(initial_size, true), chunks: @mut MutNil, } } pub fn Arena() -> Arena { arena_with_size(32u) } #[inline(always)] fn round_up_to(base: uint, align: uint) -> uint { (base + (align - 1)) &!(align - 1) } // Walk down a chunk, running the destructors for any objects stored // in it. unsafe fn destroy_chunk(chunk: &Chunk) { let mut idx = 0; let buf = vec::raw::to_ptr(chunk.data); let fill = chunk.fill; while idx < fill { let tydesc_data: *uint = transmute(ptr::offset(buf, idx)); let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data); let size = (*tydesc).size, align = (*tydesc).align; let after_tydesc = idx + sys::size_of::<*TypeDesc>(); let start = round_up_to(after_tydesc, align); //debug!("freeing object: idx = %u, size = %u, align = %u, done = %b", // start, size, align, is_done); if is_done { rustrt::rust_call_tydesc_glue( ptr::offset(buf, start), tydesc, tydesc_drop_glue_index); } // Find where the next tydesc lives idx = round_up_to(start + size, sys::pref_align_of::<*TypeDesc>()); } }
// is necessary in order to properly do cleanup if a failure occurs // during an initializer. #[inline(always)] unsafe fn bitpack_tydesc_ptr(p: *TypeDesc, is_done: bool) -> uint { let p_bits: uint = transmute(p); p_bits | (is_done as uint) } #[inline(always)] unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TypeDesc, bool) { (transmute(p &!1), p & 1 == 1) } pub impl Arena { // Functions for the POD part of the arena priv fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 { // Allocate a new chunk. let chunk_size = at_vec::capacity(self.pod_head.data); let new_min_chunk_size = uint::max(n_bytes, chunk_size); self.chunks = @mut MutCons(copy self.pod_head, self.chunks); self.pod_head = chunk(uint::next_power_of_two(new_min_chunk_size + 1u), true); return self.alloc_pod_inner(n_bytes, align); } #[inline(always)] priv fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 { unsafe { // XXX: Borrow check let head = transmute_mut_region(&mut self.pod_head); let start = round_up_to(head.fill, align); let end = start + n_bytes; if end > at_vec::capacity(head.data) { return self.alloc_pod_grow(n_bytes, align); } head.fill = end; //debug!("idx = %u, size = %u, align = %u, fill = %u", // start, n_bytes, align, head.fill); ptr::offset(vec::raw::to_ptr(head.data), start) } } #[inline(always)] priv fn alloc_pod<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T { unsafe { let tydesc = sys::get_type_desc::<T>(); let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align); let ptr: *mut T = transmute(ptr); rusti::move_val_init(&mut (*ptr), op()); return transmute(ptr); } } // Functions for the non-POD part of the arena priv fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint) -> (*u8, *u8) { // Allocate a new chunk. let chunk_size = at_vec::capacity(self.head.data); let new_min_chunk_size = uint::max(n_bytes, chunk_size); self.chunks = @mut MutCons(copy self.head, self.chunks); self.head = chunk(uint::next_power_of_two(new_min_chunk_size + 1u), false); return self.alloc_nonpod_inner(n_bytes, align); } #[inline(always)] priv fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint) -> (*u8, *u8) { unsafe { let head = transmute_mut_region(&mut self.head); let tydesc_start = head.fill; let after_tydesc = head.fill + sys::size_of::<*TypeDesc>(); let start = round_up_to(after_tydesc, align); let end = start + n_bytes; if end > at_vec::capacity(head.data) { return self.alloc_nonpod_grow(n_bytes, align); } head.fill = round_up_to(end, sys::pref_align_of::<*TypeDesc>()); //debug!("idx = %u, size = %u, align = %u, fill = %u", // start, n_bytes, align, head.fill); let buf = vec::raw::to_ptr(head.data); return (ptr::offset(buf, tydesc_start), ptr::offset(buf, start)); } } #[inline(always)] priv fn alloc_nonpod<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T { unsafe { let tydesc = sys::get_type_desc::<T>(); let (ty_ptr, ptr) = self.alloc_nonpod_inner((*tydesc).size, (*tydesc).align); let ty_ptr: *mut uint = transmute(ty_ptr); let ptr: *mut T = transmute(ptr); // Write in our tydesc along with a bit indicating that it // has *not* been initialized yet. *ty_ptr = transmute(tydesc); // Actually initialize it rusti::move_val_init(&mut(*ptr), op()); // Now that we are done, update the tydesc to indicate that // the object is there. *ty_ptr = bitpack_tydesc_ptr(tydesc, true); return transmute(ptr); } } // The external interface #[inline(always)] fn alloc<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T { unsafe { // XXX: Borrow check let this = transmute_mut_region(self); if!rusti::needs_drop::<T>() { return this.alloc_pod(op); } // XXX: Borrow check let this = transmute_mut_region(self); this.alloc_nonpod(op) } } } #[test] fn test_arena_destructors() { let mut arena = Arena(); for uint::range(0, 10) |i| { // Arena allocate something with drop glue to make sure it // doesn't leak. do arena.alloc { @i }; // Allocate something with funny size and alignment, to keep // things interesting. do arena.alloc { [0u8, 1u8, 2u8] }; } } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_arena_destructors_fail() { let mut arena = Arena(); // Put some stuff in the arena. for uint::range(0, 10) |i| { // Arena allocate something with drop glue to make sure it // doesn't leak. do arena.alloc { @i }; // Allocate something with funny size and alignment, to keep // things interesting. do arena.alloc { [0u8, 1u8, 2u8] }; } // Now, fail while allocating do arena.alloc::<@int> { // Now fail. fail!(); }; }
// We encode whether the object a tydesc describes has been // initialized in the arena in the low bit of the tydesc pointer. This
random_line_split
arena.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Dynamic arenas. // Arenas are used to quickly allocate objects that share a // lifetime. The arena uses ~[u8] vectors as a backing store to // allocate objects from. For each allocated object, the arena stores // a pointer to the type descriptor followed by the // object. (Potentially with alignment padding after each of them.) // When the arena is destroyed, it iterates through all of its chunks, // and uses the tydesc information to trace through the objects, // calling the destructors on them. // One subtle point that needs to be addressed is how to handle // failures while running the user provided initializer function. It // is important to not run the destructor on uninitialized objects, but // how to detect them is somewhat subtle. Since alloc() can be invoked // recursively, it is not sufficient to simply exclude the most recent // object. To solve this without requiring extra space, we use the low // order bit of the tydesc pointer to encode whether the object it // describes has been fully initialized. // As an optimization, objects with destructors are stored in // different chunks than objects without destructors. This reduces // overhead when initializing plain-old-data and means we don't need // to waste time running the destructors of POD. use list::{MutList, MutCons, MutNil}; use core::at_vec; use core::cast::{transmute, transmute_mut_region}; use core::cast; use core::libc::size_t; use core::ptr; use core::sys::TypeDesc; use core::sys; use core::uint; use core::vec; pub mod rusti { #[abi = "rust-intrinsic"] pub extern "rust-intrinsic" { fn move_val_init<T>(dst: &mut T, src: T); fn needs_drop<T>() -> bool; } } pub mod rustrt { use core::libc::size_t; use core::sys::TypeDesc; pub extern { #[rust_stack] unsafe fn rust_call_tydesc_glue(root: *u8, tydesc: *TypeDesc, field: size_t); } } // This probably belongs somewhere else. Needs to be kept in sync with // changes to glue... static tydesc_drop_glue_index: size_t = 3 as size_t; // The way arena uses arrays is really deeply awful. The arrays are // allocated, and have capacities reserved, but the fill for the array // will always stay at 0. struct Chunk { data: @[u8], fill: uint, is_pod: bool, } pub struct Arena { // The head is separated out from the list as a unbenchmarked // microoptimization, to avoid needing to case on the list to // access the head. priv head: Chunk, priv pod_head: Chunk, priv chunks: @mut MutList<Chunk>, } #[unsafe_destructor] impl Drop for Arena { fn finalize(&self) { unsafe { destroy_chunk(&self.head); for self.chunks.each |chunk| { if!chunk.is_pod { destroy_chunk(chunk); } } } } } fn chunk(size: uint, is_pod: bool) -> Chunk { let mut v: @[u8] = @[]; unsafe { at_vec::raw::reserve(&mut v, size); } Chunk { data: unsafe { cast::transmute(v) }, fill: 0u, is_pod: is_pod, } } pub fn arena_with_size(initial_size: uint) -> Arena { Arena { head: chunk(initial_size, false), pod_head: chunk(initial_size, true), chunks: @mut MutNil, } } pub fn Arena() -> Arena { arena_with_size(32u) } #[inline(always)] fn round_up_to(base: uint, align: uint) -> uint { (base + (align - 1)) &!(align - 1) } // Walk down a chunk, running the destructors for any objects stored // in it. unsafe fn destroy_chunk(chunk: &Chunk) { let mut idx = 0; let buf = vec::raw::to_ptr(chunk.data); let fill = chunk.fill; while idx < fill { let tydesc_data: *uint = transmute(ptr::offset(buf, idx)); let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data); let size = (*tydesc).size, align = (*tydesc).align; let after_tydesc = idx + sys::size_of::<*TypeDesc>(); let start = round_up_to(after_tydesc, align); //debug!("freeing object: idx = %u, size = %u, align = %u, done = %b", // start, size, align, is_done); if is_done { rustrt::rust_call_tydesc_glue( ptr::offset(buf, start), tydesc, tydesc_drop_glue_index); } // Find where the next tydesc lives idx = round_up_to(start + size, sys::pref_align_of::<*TypeDesc>()); } } // We encode whether the object a tydesc describes has been // initialized in the arena in the low bit of the tydesc pointer. This // is necessary in order to properly do cleanup if a failure occurs // during an initializer. #[inline(always)] unsafe fn bitpack_tydesc_ptr(p: *TypeDesc, is_done: bool) -> uint { let p_bits: uint = transmute(p); p_bits | (is_done as uint) } #[inline(always)] unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TypeDesc, bool) { (transmute(p &!1), p & 1 == 1) } pub impl Arena { // Functions for the POD part of the arena priv fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 { // Allocate a new chunk. let chunk_size = at_vec::capacity(self.pod_head.data); let new_min_chunk_size = uint::max(n_bytes, chunk_size); self.chunks = @mut MutCons(copy self.pod_head, self.chunks); self.pod_head = chunk(uint::next_power_of_two(new_min_chunk_size + 1u), true); return self.alloc_pod_inner(n_bytes, align); } #[inline(always)] priv fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 { unsafe { // XXX: Borrow check let head = transmute_mut_region(&mut self.pod_head); let start = round_up_to(head.fill, align); let end = start + n_bytes; if end > at_vec::capacity(head.data) { return self.alloc_pod_grow(n_bytes, align); } head.fill = end; //debug!("idx = %u, size = %u, align = %u, fill = %u", // start, n_bytes, align, head.fill); ptr::offset(vec::raw::to_ptr(head.data), start) } } #[inline(always)] priv fn alloc_pod<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T { unsafe { let tydesc = sys::get_type_desc::<T>(); let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align); let ptr: *mut T = transmute(ptr); rusti::move_val_init(&mut (*ptr), op()); return transmute(ptr); } } // Functions for the non-POD part of the arena priv fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint) -> (*u8, *u8) { // Allocate a new chunk. let chunk_size = at_vec::capacity(self.head.data); let new_min_chunk_size = uint::max(n_bytes, chunk_size); self.chunks = @mut MutCons(copy self.head, self.chunks); self.head = chunk(uint::next_power_of_two(new_min_chunk_size + 1u), false); return self.alloc_nonpod_inner(n_bytes, align); } #[inline(always)] priv fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint) -> (*u8, *u8) { unsafe { let head = transmute_mut_region(&mut self.head); let tydesc_start = head.fill; let after_tydesc = head.fill + sys::size_of::<*TypeDesc>(); let start = round_up_to(after_tydesc, align); let end = start + n_bytes; if end > at_vec::capacity(head.data) { return self.alloc_nonpod_grow(n_bytes, align); } head.fill = round_up_to(end, sys::pref_align_of::<*TypeDesc>()); //debug!("idx = %u, size = %u, align = %u, fill = %u", // start, n_bytes, align, head.fill); let buf = vec::raw::to_ptr(head.data); return (ptr::offset(buf, tydesc_start), ptr::offset(buf, start)); } } #[inline(always)] priv fn alloc_nonpod<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T
// The external interface #[inline(always)] fn alloc<'a, T>(&'a mut self, op: &fn() -> T) -> &'a T { unsafe { // XXX: Borrow check let this = transmute_mut_region(self); if!rusti::needs_drop::<T>() { return this.alloc_pod(op); } // XXX: Borrow check let this = transmute_mut_region(self); this.alloc_nonpod(op) } } } #[test] fn test_arena_destructors() { let mut arena = Arena(); for uint::range(0, 10) |i| { // Arena allocate something with drop glue to make sure it // doesn't leak. do arena.alloc { @i }; // Allocate something with funny size and alignment, to keep // things interesting. do arena.alloc { [0u8, 1u8, 2u8] }; } } #[test] #[should_fail] #[ignore(cfg(windows))] fn test_arena_destructors_fail() { let mut arena = Arena(); // Put some stuff in the arena. for uint::range(0, 10) |i| { // Arena allocate something with drop glue to make sure it // doesn't leak. do arena.alloc { @i }; // Allocate something with funny size and alignment, to keep // things interesting. do arena.alloc { [0u8, 1u8, 2u8] }; } // Now, fail while allocating do arena.alloc::<@int> { // Now fail. fail!(); }; }
{ unsafe { let tydesc = sys::get_type_desc::<T>(); let (ty_ptr, ptr) = self.alloc_nonpod_inner((*tydesc).size, (*tydesc).align); let ty_ptr: *mut uint = transmute(ty_ptr); let ptr: *mut T = transmute(ptr); // Write in our tydesc along with a bit indicating that it // has *not* been initialized yet. *ty_ptr = transmute(tydesc); // Actually initialize it rusti::move_val_init(&mut(*ptr), op()); // Now that we are done, update the tydesc to indicate that // the object is there. *ty_ptr = bitpack_tydesc_ptr(tydesc, true); return transmute(ptr); } }
identifier_body
discord.rs
use inth_oauth2::provider::Provider; use inth_oauth2::token::{Bearer, Refresh}; use inth_oauth2::Client; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Discord; #[derive(RustcDecodable)] pub struct DiscordUser { pub username: String, pub verified: bool, pub mfa_enabled: bool, pub id: String, pub avatar: String, pub discriminator: String, pub email: String } impl Provider for Discord { type Lifetime = Refresh; type Token = Bearer<Refresh>; fn auth_uri() -> &'static str { "https://discordapp.com/api/oauth2/authorize" } fn token_uri() -> &'static str { "https://discordapp.com/api/oauth2/token" } } pub const DISCORD_SCOPES: &'static str = "identify email guilds"; pub fn get_client() -> Client<Discord>
{ Client::<Discord>::new( // XXX don't commit these to git that would be very bad // String::from(""), // String::from(""), // Some(String::from("")) ) }
identifier_body
discord.rs
use inth_oauth2::provider::Provider; use inth_oauth2::token::{Bearer, Refresh}; use inth_oauth2::Client; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Discord; #[derive(RustcDecodable)] pub struct DiscordUser { pub username: String, pub verified: bool, pub mfa_enabled: bool, pub id: String, pub avatar: String, pub discriminator: String, pub email: String } impl Provider for Discord { type Lifetime = Refresh; type Token = Bearer<Refresh>; fn
() -> &'static str { "https://discordapp.com/api/oauth2/authorize" } fn token_uri() -> &'static str { "https://discordapp.com/api/oauth2/token" } } pub const DISCORD_SCOPES: &'static str = "identify email guilds"; pub fn get_client() -> Client<Discord> { Client::<Discord>::new( // XXX don't commit these to git that would be very bad // String::from(""), // String::from(""), // Some(String::from("")) ) }
auth_uri
identifier_name
discord.rs
use inth_oauth2::provider::Provider; use inth_oauth2::token::{Bearer, Refresh}; use inth_oauth2::Client; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Discord; #[derive(RustcDecodable)] pub struct DiscordUser { pub username: String, pub verified: bool, pub mfa_enabled: bool, pub id: String, pub avatar: String, pub discriminator: String, pub email: String } impl Provider for Discord { type Lifetime = Refresh; type Token = Bearer<Refresh>; fn auth_uri() -> &'static str { "https://discordapp.com/api/oauth2/authorize" } fn token_uri() -> &'static str { "https://discordapp.com/api/oauth2/token" }
pub const DISCORD_SCOPES: &'static str = "identify email guilds"; pub fn get_client() -> Client<Discord> { Client::<Discord>::new( // XXX don't commit these to git that would be very bad // String::from(""), // String::from(""), // Some(String::from("")) ) }
}
random_line_split
addition.rs
extern crate basiccms; #[cfg(test)] mod tests { use basiccms::*; #[test] #[should_panic] fn you_cannot_add_two_sketches_together_if_they_have_different_hashers () { let mut left = Sketch::new(0.0001, 0.99); let mut right = Sketch::new(0.0001, 0.99); left.add(1); right.add(1);
#[test] fn but_you_can_add_together_two_sketches_from_a_common_base () { let mut left = Sketch::new(0.0001, 0.99); let mut right = left.clone(); left.add(1); right.add(1); let mut third = &left + &right; assert_eq!(1, left.point(1)); assert_eq!(1, right.point(1)); assert_eq!(2, third.point(1)); } }
let mut third = &left + &right; third.point(1); }
random_line_split
addition.rs
extern crate basiccms; #[cfg(test)] mod tests { use basiccms::*; #[test] #[should_panic] fn you_cannot_add_two_sketches_together_if_they_have_different_hashers ()
#[test] fn but_you_can_add_together_two_sketches_from_a_common_base () { let mut left = Sketch::new(0.0001, 0.99); let mut right = left.clone(); left.add(1); right.add(1); let mut third = &left + &right; assert_eq!(1, left.point(1)); assert_eq!(1, right.point(1)); assert_eq!(2, third.point(1)); } }
{ let mut left = Sketch::new(0.0001, 0.99); let mut right = Sketch::new(0.0001, 0.99); left.add(1); right.add(1); let mut third = &left + &right; third.point(1); }
identifier_body
addition.rs
extern crate basiccms; #[cfg(test)] mod tests { use basiccms::*; #[test] #[should_panic] fn
() { let mut left = Sketch::new(0.0001, 0.99); let mut right = Sketch::new(0.0001, 0.99); left.add(1); right.add(1); let mut third = &left + &right; third.point(1); } #[test] fn but_you_can_add_together_two_sketches_from_a_common_base () { let mut left = Sketch::new(0.0001, 0.99); let mut right = left.clone(); left.add(1); right.add(1); let mut third = &left + &right; assert_eq!(1, left.point(1)); assert_eq!(1, right.point(1)); assert_eq!(2, third.point(1)); } }
you_cannot_add_two_sketches_together_if_they_have_different_hashers
identifier_name
quote-tokens.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-android
#![feature(quote, rustc_private)] extern crate syntax; use syntax::ext::base::ExtCtxt; use syntax::ptr::P; fn syntax_extension(cx: &ExtCtxt) { let e_toks : Vec<syntax::ast::TokenTree> = quote_tokens!(cx, 1 + 2); let p_toks : Vec<syntax::ast::TokenTree> = quote_tokens!(cx, (x, 1.. 4, *)); let a: P<syntax::ast::Expr> = quote_expr!(cx, 1 + 2); let _b: Option<P<syntax::ast::Item>> = quote_item!(cx, static foo : isize = $e_toks; ); let _c: P<syntax::ast::Pat> = quote_pat!(cx, (x, 1.. 4, *) ); let _d: Option<P<syntax::ast::Stmt>> = quote_stmt!(cx, let x = $a; ); let _d: syntax::ast::Arm = quote_arm!(cx, (ref x, ref y) = (x, y) ); let _e: P<syntax::ast::Expr> = quote_expr!(cx, match foo { $p_toks => 10 } ); let _f: P<syntax::ast::Expr> = quote_expr!(cx, ()); let _g: P<syntax::ast::Expr> = quote_expr!(cx, true); let _h: P<syntax::ast::Expr> = quote_expr!(cx, 'a'); let i: Option<P<syntax::ast::Item>> = quote_item!(cx, #[derive(Eq)] struct Foo; ); assert!(i.is_some()); let _l: P<syntax::ast::Ty> = quote_ty!(cx, &isize); let _m: Vec<syntax::ast::TokenTree> = quote_matcher!(cx, $($foo:tt,)* bar); let _n: syntax::ast::Attribute = quote_attr!(cx, #![cfg(foo, bar = "baz")]); let _o: Option<P<syntax::ast::Item>> = quote_item!(cx, fn foo<T:?Sized>() {}); } fn main() { }
// ignore-pretty: does not work well with `--test`
random_line_split
quote-tokens.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-android // ignore-pretty: does not work well with `--test` #![feature(quote, rustc_private)] extern crate syntax; use syntax::ext::base::ExtCtxt; use syntax::ptr::P; fn
(cx: &ExtCtxt) { let e_toks : Vec<syntax::ast::TokenTree> = quote_tokens!(cx, 1 + 2); let p_toks : Vec<syntax::ast::TokenTree> = quote_tokens!(cx, (x, 1.. 4, *)); let a: P<syntax::ast::Expr> = quote_expr!(cx, 1 + 2); let _b: Option<P<syntax::ast::Item>> = quote_item!(cx, static foo : isize = $e_toks; ); let _c: P<syntax::ast::Pat> = quote_pat!(cx, (x, 1.. 4, *) ); let _d: Option<P<syntax::ast::Stmt>> = quote_stmt!(cx, let x = $a; ); let _d: syntax::ast::Arm = quote_arm!(cx, (ref x, ref y) = (x, y) ); let _e: P<syntax::ast::Expr> = quote_expr!(cx, match foo { $p_toks => 10 } ); let _f: P<syntax::ast::Expr> = quote_expr!(cx, ()); let _g: P<syntax::ast::Expr> = quote_expr!(cx, true); let _h: P<syntax::ast::Expr> = quote_expr!(cx, 'a'); let i: Option<P<syntax::ast::Item>> = quote_item!(cx, #[derive(Eq)] struct Foo; ); assert!(i.is_some()); let _l: P<syntax::ast::Ty> = quote_ty!(cx, &isize); let _m: Vec<syntax::ast::TokenTree> = quote_matcher!(cx, $($foo:tt,)* bar); let _n: syntax::ast::Attribute = quote_attr!(cx, #![cfg(foo, bar = "baz")]); let _o: Option<P<syntax::ast::Item>> = quote_item!(cx, fn foo<T:?Sized>() {}); } fn main() { }
syntax_extension
identifier_name
quote-tokens.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-android // ignore-pretty: does not work well with `--test` #![feature(quote, rustc_private)] extern crate syntax; use syntax::ext::base::ExtCtxt; use syntax::ptr::P; fn syntax_extension(cx: &ExtCtxt) { let e_toks : Vec<syntax::ast::TokenTree> = quote_tokens!(cx, 1 + 2); let p_toks : Vec<syntax::ast::TokenTree> = quote_tokens!(cx, (x, 1.. 4, *)); let a: P<syntax::ast::Expr> = quote_expr!(cx, 1 + 2); let _b: Option<P<syntax::ast::Item>> = quote_item!(cx, static foo : isize = $e_toks; ); let _c: P<syntax::ast::Pat> = quote_pat!(cx, (x, 1.. 4, *) ); let _d: Option<P<syntax::ast::Stmt>> = quote_stmt!(cx, let x = $a; ); let _d: syntax::ast::Arm = quote_arm!(cx, (ref x, ref y) = (x, y) ); let _e: P<syntax::ast::Expr> = quote_expr!(cx, match foo { $p_toks => 10 } ); let _f: P<syntax::ast::Expr> = quote_expr!(cx, ()); let _g: P<syntax::ast::Expr> = quote_expr!(cx, true); let _h: P<syntax::ast::Expr> = quote_expr!(cx, 'a'); let i: Option<P<syntax::ast::Item>> = quote_item!(cx, #[derive(Eq)] struct Foo; ); assert!(i.is_some()); let _l: P<syntax::ast::Ty> = quote_ty!(cx, &isize); let _m: Vec<syntax::ast::TokenTree> = quote_matcher!(cx, $($foo:tt,)* bar); let _n: syntax::ast::Attribute = quote_attr!(cx, #![cfg(foo, bar = "baz")]); let _o: Option<P<syntax::ast::Item>> = quote_item!(cx, fn foo<T:?Sized>() {}); } fn main()
{ }
identifier_body
tree_id.rs
use std::cmp::Ordering; #[derive(Debug, Clone)] pub struct TreeId { id: String } static TREE_ID_STEP: usize = 2; impl TreeId { pub fn new(s: String) -> Self { TreeId { id: s } } pub fn id(&self) -> String { self.id.clone() } pub fn len(&self) -> usize { self.id.len() } } impl Ord for TreeId { fn cmp(&self, other: &TreeId) -> Ordering { self.id.cmp(&other.id) } } impl PartialOrd for TreeId { fn partial_cmp(&self, other: &TreeId) -> Option<Ordering>
} impl Eq for TreeId {} impl PartialEq for TreeId { fn eq(&self, other: &TreeId) -> bool { self.id == other.id } } pub fn level(ti: &TreeId) -> usize { ti.id.len() / TREE_ID_STEP } pub fn key(ti: &TreeId, level: usize) -> Option<String> { let sub_index = level*TREE_ID_STEP; let ref s = ti.id; if sub_index == s.len() { Some(s.clone()) } else if sub_index < s.len() { Some(String::from(&s[..sub_index])) } else { None } }
{ Some(self.id.cmp(&other.id)) }
identifier_body
tree_id.rs
use std::cmp::Ordering; #[derive(Debug, Clone)] pub struct TreeId { id: String } static TREE_ID_STEP: usize = 2; impl TreeId { pub fn new(s: String) -> Self { TreeId { id: s } } pub fn id(&self) -> String { self.id.clone() } pub fn len(&self) -> usize { self.id.len() } } impl Ord for TreeId { fn cmp(&self, other: &TreeId) -> Ordering { self.id.cmp(&other.id) } } impl PartialOrd for TreeId { fn
(&self, other: &TreeId) -> Option<Ordering> { Some(self.id.cmp(&other.id)) } } impl Eq for TreeId {} impl PartialEq for TreeId { fn eq(&self, other: &TreeId) -> bool { self.id == other.id } } pub fn level(ti: &TreeId) -> usize { ti.id.len() / TREE_ID_STEP } pub fn key(ti: &TreeId, level: usize) -> Option<String> { let sub_index = level*TREE_ID_STEP; let ref s = ti.id; if sub_index == s.len() { Some(s.clone()) } else if sub_index < s.len() { Some(String::from(&s[..sub_index])) } else { None } }
partial_cmp
identifier_name
tree_id.rs
use std::cmp::Ordering; #[derive(Debug, Clone)] pub struct TreeId { id: String } static TREE_ID_STEP: usize = 2; impl TreeId { pub fn new(s: String) -> Self { TreeId { id: s } } pub fn id(&self) -> String { self.id.clone() } pub fn len(&self) -> usize { self.id.len() } } impl Ord for TreeId { fn cmp(&self, other: &TreeId) -> Ordering {
} } impl PartialOrd for TreeId { fn partial_cmp(&self, other: &TreeId) -> Option<Ordering> { Some(self.id.cmp(&other.id)) } } impl Eq for TreeId {} impl PartialEq for TreeId { fn eq(&self, other: &TreeId) -> bool { self.id == other.id } } pub fn level(ti: &TreeId) -> usize { ti.id.len() / TREE_ID_STEP } pub fn key(ti: &TreeId, level: usize) -> Option<String> { let sub_index = level*TREE_ID_STEP; let ref s = ti.id; if sub_index == s.len() { Some(s.clone()) } else if sub_index < s.len() { Some(String::from(&s[..sub_index])) } else { None } }
self.id.cmp(&other.id)
random_line_split
tree_id.rs
use std::cmp::Ordering; #[derive(Debug, Clone)] pub struct TreeId { id: String } static TREE_ID_STEP: usize = 2; impl TreeId { pub fn new(s: String) -> Self { TreeId { id: s } } pub fn id(&self) -> String { self.id.clone() } pub fn len(&self) -> usize { self.id.len() } } impl Ord for TreeId { fn cmp(&self, other: &TreeId) -> Ordering { self.id.cmp(&other.id) } } impl PartialOrd for TreeId { fn partial_cmp(&self, other: &TreeId) -> Option<Ordering> { Some(self.id.cmp(&other.id)) } } impl Eq for TreeId {} impl PartialEq for TreeId { fn eq(&self, other: &TreeId) -> bool { self.id == other.id } } pub fn level(ti: &TreeId) -> usize { ti.id.len() / TREE_ID_STEP } pub fn key(ti: &TreeId, level: usize) -> Option<String> { let sub_index = level*TREE_ID_STEP; let ref s = ti.id; if sub_index == s.len()
else if sub_index < s.len() { Some(String::from(&s[..sub_index])) } else { None } }
{ Some(s.clone()) }
conditional_block
metadata.rs
// Copyright 2017 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::errors; use crate::providers; use crate::providers::aliyun::AliyunProvider; use crate::providers::aws::AwsProvider; use crate::providers::azure::Azure; use crate::providers::cloudstack::configdrive::ConfigDrive; use crate::providers::cloudstack::network::CloudstackNetwork; use crate::providers::digitalocean::DigitalOceanProvider; use crate::providers::exoscale::ExoscaleProvider; use crate::providers::gcp::GcpProvider; use crate::providers::ibmcloud::IBMGen2Provider; use crate::providers::ibmcloud_classic::IBMClassicProvider; use crate::providers::openstack; use crate::providers::openstack::network::OpenstackProviderNetwork; use crate::providers::packet::PacketProvider; #[cfg(feature = "cl-legacy")] use crate::providers::vagrant_virtualbox::VagrantVirtualboxProvider; use crate::providers::vmware::VmwareProvider; use crate::providers::vultr::VultrProvider; macro_rules! box_result { ($exp:expr) => { Ok(Box::new($exp)) }; } /// Fetch metadata for the given provider. /// /// This is the generic, top-level function to fetch provider metadata. /// The configured provider is passed in and this function dispatches the call /// to the provider-specific fetch logic.
"aws" => box_result!(AwsProvider::try_new()?), "azure" => box_result!(Azure::try_new()?), "cloudstack-metadata" => box_result!(CloudstackNetwork::try_new()?), "cloudstack-configdrive" => box_result!(ConfigDrive::try_new()?), "digitalocean" => box_result!(DigitalOceanProvider::try_new()?), "exoscale" => box_result!(ExoscaleProvider::try_new()?), #[cfg(feature = "cl-legacy")] "ec2" => box_result!(AwsProvider::try_new()?), #[cfg(feature = "cl-legacy")] "gce" => box_result!(GcpProvider::try_new()?), #[cfg(not(feature = "cl-legacy"))] "gcp" => box_result!(GcpProvider::try_new()?), // IBM Cloud - VPC Generation 2. "ibmcloud" => box_result!(IBMGen2Provider::try_new()?), // IBM Cloud - Classic infrastructure. "ibmcloud-classic" => box_result!(IBMClassicProvider::try_new()?), "openstack" => openstack::try_config_drive_else_network(), "openstack-metadata" => box_result!(OpenstackProviderNetwork::try_new()?), "packet" => box_result!(PacketProvider::try_new()?), #[cfg(feature = "cl-legacy")] "vagrant-virtualbox" => box_result!(VagrantVirtualboxProvider::new()), "vmware" => box_result!(VmwareProvider::try_new()?), "vultr" => box_result!(VultrProvider::try_new()?), _ => Err(errors::ErrorKind::UnknownProvider(provider.to_owned()).into()), } }
pub fn fetch_metadata(provider: &str) -> errors::Result<Box<dyn providers::MetadataProvider>> { match provider { "aliyun" => box_result!(AliyunProvider::try_new()?), #[cfg(not(feature = "cl-legacy"))]
random_line_split
metadata.rs
// Copyright 2017 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::errors; use crate::providers; use crate::providers::aliyun::AliyunProvider; use crate::providers::aws::AwsProvider; use crate::providers::azure::Azure; use crate::providers::cloudstack::configdrive::ConfigDrive; use crate::providers::cloudstack::network::CloudstackNetwork; use crate::providers::digitalocean::DigitalOceanProvider; use crate::providers::exoscale::ExoscaleProvider; use crate::providers::gcp::GcpProvider; use crate::providers::ibmcloud::IBMGen2Provider; use crate::providers::ibmcloud_classic::IBMClassicProvider; use crate::providers::openstack; use crate::providers::openstack::network::OpenstackProviderNetwork; use crate::providers::packet::PacketProvider; #[cfg(feature = "cl-legacy")] use crate::providers::vagrant_virtualbox::VagrantVirtualboxProvider; use crate::providers::vmware::VmwareProvider; use crate::providers::vultr::VultrProvider; macro_rules! box_result { ($exp:expr) => { Ok(Box::new($exp)) }; } /// Fetch metadata for the given provider. /// /// This is the generic, top-level function to fetch provider metadata. /// The configured provider is passed in and this function dispatches the call /// to the provider-specific fetch logic. pub fn
(provider: &str) -> errors::Result<Box<dyn providers::MetadataProvider>> { match provider { "aliyun" => box_result!(AliyunProvider::try_new()?), #[cfg(not(feature = "cl-legacy"))] "aws" => box_result!(AwsProvider::try_new()?), "azure" => box_result!(Azure::try_new()?), "cloudstack-metadata" => box_result!(CloudstackNetwork::try_new()?), "cloudstack-configdrive" => box_result!(ConfigDrive::try_new()?), "digitalocean" => box_result!(DigitalOceanProvider::try_new()?), "exoscale" => box_result!(ExoscaleProvider::try_new()?), #[cfg(feature = "cl-legacy")] "ec2" => box_result!(AwsProvider::try_new()?), #[cfg(feature = "cl-legacy")] "gce" => box_result!(GcpProvider::try_new()?), #[cfg(not(feature = "cl-legacy"))] "gcp" => box_result!(GcpProvider::try_new()?), // IBM Cloud - VPC Generation 2. "ibmcloud" => box_result!(IBMGen2Provider::try_new()?), // IBM Cloud - Classic infrastructure. "ibmcloud-classic" => box_result!(IBMClassicProvider::try_new()?), "openstack" => openstack::try_config_drive_else_network(), "openstack-metadata" => box_result!(OpenstackProviderNetwork::try_new()?), "packet" => box_result!(PacketProvider::try_new()?), #[cfg(feature = "cl-legacy")] "vagrant-virtualbox" => box_result!(VagrantVirtualboxProvider::new()), "vmware" => box_result!(VmwareProvider::try_new()?), "vultr" => box_result!(VultrProvider::try_new()?), _ => Err(errors::ErrorKind::UnknownProvider(provider.to_owned()).into()), } }
fetch_metadata
identifier_name
metadata.rs
// Copyright 2017 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::errors; use crate::providers; use crate::providers::aliyun::AliyunProvider; use crate::providers::aws::AwsProvider; use crate::providers::azure::Azure; use crate::providers::cloudstack::configdrive::ConfigDrive; use crate::providers::cloudstack::network::CloudstackNetwork; use crate::providers::digitalocean::DigitalOceanProvider; use crate::providers::exoscale::ExoscaleProvider; use crate::providers::gcp::GcpProvider; use crate::providers::ibmcloud::IBMGen2Provider; use crate::providers::ibmcloud_classic::IBMClassicProvider; use crate::providers::openstack; use crate::providers::openstack::network::OpenstackProviderNetwork; use crate::providers::packet::PacketProvider; #[cfg(feature = "cl-legacy")] use crate::providers::vagrant_virtualbox::VagrantVirtualboxProvider; use crate::providers::vmware::VmwareProvider; use crate::providers::vultr::VultrProvider; macro_rules! box_result { ($exp:expr) => { Ok(Box::new($exp)) }; } /// Fetch metadata for the given provider. /// /// This is the generic, top-level function to fetch provider metadata. /// The configured provider is passed in and this function dispatches the call /// to the provider-specific fetch logic. pub fn fetch_metadata(provider: &str) -> errors::Result<Box<dyn providers::MetadataProvider>>
"openstack" => openstack::try_config_drive_else_network(), "openstack-metadata" => box_result!(OpenstackProviderNetwork::try_new()?), "packet" => box_result!(PacketProvider::try_new()?), #[cfg(feature = "cl-legacy")] "vagrant-virtualbox" => box_result!(VagrantVirtualboxProvider::new()), "vmware" => box_result!(VmwareProvider::try_new()?), "vultr" => box_result!(VultrProvider::try_new()?), _ => Err(errors::ErrorKind::UnknownProvider(provider.to_owned()).into()), } }
{ match provider { "aliyun" => box_result!(AliyunProvider::try_new()?), #[cfg(not(feature = "cl-legacy"))] "aws" => box_result!(AwsProvider::try_new()?), "azure" => box_result!(Azure::try_new()?), "cloudstack-metadata" => box_result!(CloudstackNetwork::try_new()?), "cloudstack-configdrive" => box_result!(ConfigDrive::try_new()?), "digitalocean" => box_result!(DigitalOceanProvider::try_new()?), "exoscale" => box_result!(ExoscaleProvider::try_new()?), #[cfg(feature = "cl-legacy")] "ec2" => box_result!(AwsProvider::try_new()?), #[cfg(feature = "cl-legacy")] "gce" => box_result!(GcpProvider::try_new()?), #[cfg(not(feature = "cl-legacy"))] "gcp" => box_result!(GcpProvider::try_new()?), // IBM Cloud - VPC Generation 2. "ibmcloud" => box_result!(IBMGen2Provider::try_new()?), // IBM Cloud - Classic infrastructure. "ibmcloud-classic" => box_result!(IBMClassicProvider::try_new()?),
identifier_body
lib.rs
// This thread waits for requests to calculate the currently visible nodes, runs a // calculation and sends the visible nodes back to the drawing thread. If multiple requests // queue up while it is processing one, it will drop all but the latest one before // restarting the next calculation. let (get_visible_nodes_params_tx, rx) = mpsc::channel::<Matrix4<f64>>(); let (tx, get_visible_nodes_result_rx) = mpsc::channel(); let octree_clone = octree.clone(); thread::spawn(move || { while let Ok(mut matrix) = rx.recv() { // Drain the channel, we only ever want to update the latest. while let Ok(newer_matrix) = rx.try_recv() { matrix = newer_matrix; } let visible_nodes = octree_clone.get_visible_nodes(&matrix); tx.send(visible_nodes).unwrap(); } }); Self { last_moving: now, last_log: now, visible_nodes: Vec::new(), node_drawer: NodeDrawer::new(&Rc::clone(&gl)), num_frames: 0, point_size: 1., gamma: 1., get_visible_nodes_params_tx, get_visible_nodes_result_rx, max_nodes_moving: max_nodes_in_memory, needs_drawing: true, show_octree_nodes: false, max_nodes_in_memory, node_views: NodeViewContainer::new(octree, max_nodes_in_memory), box_drawer: BoxDrawer::new(&Rc::clone(&gl)), world_to_gl: Matrix4::identity(), gl, } } pub fn camera_changed(&mut self, world_to_gl: &Matrix4<f64>) { self.last_moving = time::Instant::now(); self.needs_drawing = true; self.node_drawer.update_world_to_gl(world_to_gl); self.get_visible_nodes_params_tx.send(*world_to_gl).unwrap(); self.last_moving = time::Instant::now(); self.world_to_gl = *world_to_gl; } pub fn toggle_show_octree_nodes(&mut self) { self.show_octree_nodes =!self.show_octree_nodes; } pub fn adjust_gamma(&mut self, delta: f32) { self.gamma += delta; self.needs_drawing = true; } pub fn adjust_point_size(&mut self, delta: f32) { // Point size == 1. is the smallest that is rendered. self.point_size = (self.point_size + delta).max(1.); self.needs_drawing = true; } pub fn draw(&mut self) -> DrawResult { let mut draw_result = DrawResult::NoChange; let mut num_points_drawn = 0; let mut num_nodes_drawn = 0; let now = time::Instant::now(); let moving = now - self.last_moving < time::Duration::milliseconds(150); self.needs_drawing |= self.node_views.consume_arrived_nodes(&self.node_drawer); while let Ok(visible_nodes) = self.get_visible_nodes_result_rx.try_recv() { self.visible_nodes.clear(); self.visible_nodes.extend(visible_nodes); self.needs_drawing = true; } if self.needs_drawing { unsafe { self.gl.ClearColor(0., 0., 0., 1.); self.gl .Clear(opengl::COLOR_BUFFER_BIT | opengl::DEPTH_BUFFER_BIT); } } // We use a heuristic to keep the frame rate as stable as possible by increasing/decreasing the number of nodes to draw. let max_nodes_to_display = if moving
else { self.max_nodes_in_memory }; let filtered_visible_nodes = self.visible_nodes.iter().take(max_nodes_to_display); for node_id in filtered_visible_nodes { let view = self.node_views.get_or_request(&node_id); if!self.needs_drawing || view.is_none() { continue; } let view = view.unwrap(); num_points_drawn += self.node_drawer.draw( view, 1, /* level of detail */ self.point_size, self.gamma, ); num_nodes_drawn += 1; if self.show_octree_nodes { self.box_drawer.draw_outlines( &view.meta.bounding_cube.to_aabb(), &self.world_to_gl, &YELLOW, ); } } if self.needs_drawing { draw_result = DrawResult::HasDrawn; } self.needs_drawing = moving; self.num_frames += 1; let now = time::Instant::now(); if now - self.last_log > time::Duration::seconds(1) { let duration_s = (now - self.last_log).as_seconds_f64(); let fps = f64::from(self.num_frames) / duration_s; if moving { if fps < 20. { self.max_nodes_moving = (self.max_nodes_moving as f32 * 0.9) as usize; } if fps > 25. && self.max_nodes_moving < self.max_nodes_in_memory { self.max_nodes_moving = (self.max_nodes_moving as f32 * 1.1) as usize; } } self.num_frames = 0; self.last_log = now; eprintln!( "FPS: {:.2}, Drew {} points from {} loaded nodes. {} nodes \ should be shown, Cache {} MB", fps, num_points_drawn, num_nodes_drawn, self.visible_nodes.len(), self.node_views.get_used_memory_bytes() as f32 / 1024. / 1024., ); } draw_result } } #[derive(Debug, Serialize, Deserialize)] pub struct CameraStates { states: Vec<camera::State>, } fn save_camera(index: usize, pose_path: &Option<PathBuf>, camera: &Camera) { if pose_path.is_none() { eprintln!("Not serving from a local directory. Cannot save camera."); return; } assert!(index < 10); let mut states = ::std::fs::read_to_string(pose_path.as_ref().unwrap()) .and_then(|data| { serde_json::from_str(&data) .map_err(|_| io::Error::new(io::ErrorKind::Other, "Could not read camera file.")) }) .unwrap_or_else(|_| CameraStates { states: vec![camera.state(); 10], }); states.states[index] = camera.state(); match std::fs::write( pose_path.as_ref().unwrap(), serde_json::to_string_pretty(&states).unwrap().as_bytes(), ) { Ok(_) => (), Err(e) => eprintln!( "Could not write {}: {}", pose_path.as_ref().unwrap().display(), e ), } eprintln!("Saved current camera position as {}.", index); } fn load_camera(index: usize, pose_path: &Option<PathBuf>, camera: &mut Camera) { if pose_path.is_none() { eprintln!("Not serving from a local directory. Cannot load camera."); return; } assert!(index < 10); let states = ::std::fs::read_to_string(pose_path.as_ref().unwrap()) .and_then(|data| { serde_json::from_str(&data) .map_err(|_| io::Error::new(io::ErrorKind::Other, "Could not read camera file.")) }) .unwrap_or_else(|_| CameraStates { states: vec![camera.state(); 10], }); camera.set_state(states.states[index]); } pub trait Extension { fn pre_init(app: clap::App) -> clap::App; fn new(matches: &clap::ArgMatches, opengl: Rc<opengl::Gl>) -> Self; fn local_from_global(matches: &clap::ArgMatches, octree: &Octree) -> Option<Isometry3<f64>>; fn camera_changed(&mut self, transform: &Matrix4<f64>); fn draw(&mut self); } trait Joystick { fn act(&self, camera: &mut Camera); fn joystick(&self) -> &sdl2::joystick::Joystick; } struct XBoxJoystick { joystick: sdl2::joystick::Joystick, } impl Joystick for XBoxJoystick { fn act(&self, camera: &mut Camera) { let right = f64::from(self.joystick.axis(0).unwrap()) / 1000.; let forward = f64::from(self.joystick.axis(1).unwrap()) / 1000.; let turning_right = -f64::from(self.joystick.axis(3).unwrap()) / 32000.; let turning_up = -f64::from(self.joystick.axis(4).unwrap()) / 32000.; camera.pan(right, 0., forward); camera.rotate(turning_up, turning_right); } fn joystick(&self) -> &sdl2::joystick::Joystick { &self.joystick } } struct SpaceMouseJoystick { joystick: sdl2::joystick::Joystick, } impl Joystick for SpaceMouseJoystick { fn act(&self, camera: &mut Camera) { let x = f64::from(self.joystick.axis(0).unwrap()) / 500.; let y = f64::from(-self.joystick.axis(1).unwrap()) / 500.; let z = f64::from(-self.joystick.axis(2).unwrap()) / 500.; let up = f64::from(self.joystick.axis(3).unwrap()) / 500.; // Combine tilting and turning on the knob. let around = f64::from(self.joystick.axis(4).unwrap()) / 500. - f64::from(self.joystick.axis(5).unwrap()) / 500.; camera.pan(x, y, z); camera.rotate(up, around); } fn joystick(&self) -> &sdl2::joystick::Joystick { &self.joystick } } pub fn run<T: Extension>(data_provider_factory: DataProviderFactory) { let mut app = clap::App::new("sdl_viewer").args(&[ clap::Arg::with_name("octree") .about("Input path of the octree.") .index(1) .required(true), clap::Arg::with_name("terrain") .long("terrain") .takes_value(true) .multiple(true) .about("Terrain directories (multiple possible)."), clap::Arg::with_name("cache_size_mb") .about( "Maximum cache size in MB for octree nodes in GPU memory. \ The default value is 2000 MB and the valid range is 1000 MB to 16000 MB.", ) .required(false), ]); app = T::pre_init(app); let matches = app.get_matches(); let octree_argument = matches.value_of("octree").unwrap(); // Maximum number of MB for the octree node cache. The default is 2 GB let cache_size_mb: usize = matches .value_of("cache_size_mb") .unwrap_or("2000") .parse() .expect("Could not parse 'cache_size_mb' option."); // Maximum number of MB for the octree node cache in range 1..16 GB. The default is 2 GB let limit_cache_size_mb = cmp::max(1000, cmp::min(16_000, cache_size_mb)); // Assuming about 200 KB per octree node on average let max_nodes_in_memory = limit_cache_size_mb * 5; // If no octree was generated create a FromDisk loader let octree: Arc<Octree> = Arc::from( data_provider_factory .generate_data_provider(octree_argument) .and_then(|provider| Octree::from_data_provider(provider)) .unwrap_or_else(|_| panic!("Couldn't create octree from path '{}'.", octree_argument)), ); let mut pose_path = None; let pose_path_buf = PathBuf::from(&octree_argument).join("poses.json"); if pose_path_buf.exists() { pose_path = Some(pose_path_buf); } let ctx = sdl2::init().unwrap(); let video_subsystem = ctx.video().unwrap(); // We need to open the joysticks we are interested in and keep the object alive to receive // input from it. We just open the first we find. let joystick_subsystem = ctx.joystick().unwrap(); let mut joysticks = Vec::new(); for idx in 0..joystick_subsystem .num_joysticks() .expect("Should be able to enumerate joysticks.") { if let Ok(joystick) = joystick_subsystem.open(idx) { let (kind, j) = if joystick.name().contains("Xbox") { ( "XBox controller", Box::new(XBoxJoystick { joystick }) as Box<dyn Joystick>, ) } else { ( "Space mouse", Box::new(SpaceMouseJoystick { joystick }) as Box<dyn Joystick>, ) }; eprintln!( "Found a joystick named '{}' ({} axes, {} buttons, {} balls, {} hats). Will treat it as a {}.", j.joystick().name(), j.joystick().num_axes(), j.joystick().num_buttons(), j.joystick().num_balls(), j.joystick().num_hats(), kind ); joysticks.push(j); } } let gl_attr = video_subsystem.gl_attr(); // TODO(hrapp): This should use OpenGL ES 2.0 to be compatible with WebGL, so this can be made // to work with emscripten. gl_attr.set_context_profile(GLProfile::Core); gl_attr.set_context_version(4, 1); const WINDOW_WIDTH: i32 = 800; const WINDOW_HEIGHT: i32 = 600; let window = match video_subsystem .window("sdl2_viewer", WINDOW_WIDTH as u32, WINDOW_HEIGHT as u32) .position_centered() .resizable() .opengl() .build() { Ok(window) => window, Err(err) => panic!("failed to create window: {}", err), }; // We need to create a context now, only after can we actually legally load the gl functions // and query 'gl_attr'. let _context = window.gl_create_context().unwrap(); let _swap_interval = video_subsystem.gl_set_swap_interval(SwapInterval::VSync); assert_eq!(gl_attr.context_profile(), GLProfile::Core); let gl = Rc::new(opengl::Gl::load_with(|s| { let ptr = video_subsystem.gl_get_proc_address(s); ptr as *const std::ffi::c_void })); let mut extension = T::new(&matches, Rc::clone(&gl)); let ext_local_from_global = T::local_from_global(&matches, &octree); let mut renderer = PointCloudRenderer::new(max_nodes_in_memory, Rc::clone(&gl), octree); let terrain_paths = matches.values_of("terrain").unwrap_or_default(); let mut terrain_renderer = TerrainRenderer::new(Rc::clone(&gl), terrain_paths); let local_from_global = ext_local_from_global.or_else(|| terrain_renderer.local_from_global()); let mut camera = Camera::new(&gl, WINDOW_WIDTH, WINDOW_HEIGHT, local_from_global); let mut events = ctx.event_pump().unwrap(); let mut last_frame_time = time::Instant::now(); 'outer_loop: loop { for event in events.poll_iter() { match event { Event::Quit {.. } => break 'outer_loop, Event::KeyDown { scancode: Some(code), keymod, .. } => { if keymod.is_empty() || keymod == Mod::NUMMOD { match code { Scancode::Escape => break 'outer_loop, Scancode::W => camera.moving_forward = true, Scancode::S => camera.moving_backward = true, Scancode::A => camera.moving_left = true, Scancode::D => camera.moving_right = true, Scancode::Z => camera.moving_down = true, Scancode::Q => camera.moving_up = true, Scancode::T => camera.toggle_ct_mode(&gl), Scancode::U => camera.move_ct(-0.5, &gl), Scancode::I => camera.move_ct(0.5, &gl), Scancode::J => camera.move_far_plane_ct(-0.5, &gl), Scancode::K => camera.move_far_plane_ct(0.5, &gl), Scancode::Left => camera.turning_left = true, Scancode::Right => camera.turning_right = true, Scancode::Down => camera.turning_down = true, Scancode::Up => camera.turning_up = true, Scancode::O => renderer.toggle_show_octree_nodes(), Scancode::Num7 => renderer.adjust_gamma(-0.1), Scancode::Num8 => renderer.adjust_gamma(0.1), Scancode::Num9 => renderer.adjust_point_size(-0.1), Scancode::Num0 => renderer.adjust_point_size(0.1), _ => (), } } else if keymod.intersects(Mod::LCTRLMOD | Mod::RCTRLMOD) && keymod.intersects(Mod::LSHIFTMOD | Mod::RSHIFTMOD) { // CTRL + SHIFT is pressed. match code { Scancode::Num1 => save_camera(0, &pose_path, &camera), Scancode::Num2 => save_camera(1, &pose_path, &camera), Scancode::Num3 => save_camera(2, &pose_path, &camera), Scancode::Num4 => save_camera(3, &pose_path, &camera), Scancode::Num5 => save_camera(4, &pose_path, &camera), Scancode::Num6 => save_camera(5, &pose_path, &camera), Scancode::Num7 => save_camera(6, &pose_path, &camera),
{ self.max_nodes_moving }
conditional_block
lib.rs
// This thread waits for requests to calculate the currently visible nodes, runs a // calculation and sends the visible nodes back to the drawing thread. If multiple requests // queue up while it is processing one, it will drop all but the latest one before // restarting the next calculation. let (get_visible_nodes_params_tx, rx) = mpsc::channel::<Matrix4<f64>>(); let (tx, get_visible_nodes_result_rx) = mpsc::channel(); let octree_clone = octree.clone(); thread::spawn(move || { while let Ok(mut matrix) = rx.recv() { // Drain the channel, we only ever want to update the latest. while let Ok(newer_matrix) = rx.try_recv() { matrix = newer_matrix; } let visible_nodes = octree_clone.get_visible_nodes(&matrix); tx.send(visible_nodes).unwrap(); } }); Self { last_moving: now, last_log: now, visible_nodes: Vec::new(), node_drawer: NodeDrawer::new(&Rc::clone(&gl)), num_frames: 0, point_size: 1., gamma: 1., get_visible_nodes_params_tx, get_visible_nodes_result_rx, max_nodes_moving: max_nodes_in_memory, needs_drawing: true, show_octree_nodes: false, max_nodes_in_memory, node_views: NodeViewContainer::new(octree, max_nodes_in_memory), box_drawer: BoxDrawer::new(&Rc::clone(&gl)), world_to_gl: Matrix4::identity(), gl, } } pub fn camera_changed(&mut self, world_to_gl: &Matrix4<f64>) { self.last_moving = time::Instant::now(); self.needs_drawing = true; self.node_drawer.update_world_to_gl(world_to_gl); self.get_visible_nodes_params_tx.send(*world_to_gl).unwrap(); self.last_moving = time::Instant::now(); self.world_to_gl = *world_to_gl; } pub fn toggle_show_octree_nodes(&mut self) { self.show_octree_nodes =!self.show_octree_nodes; } pub fn adjust_gamma(&mut self, delta: f32) { self.gamma += delta; self.needs_drawing = true; } pub fn adjust_point_size(&mut self, delta: f32) { // Point size == 1. is the smallest that is rendered. self.point_size = (self.point_size + delta).max(1.); self.needs_drawing = true; } pub fn draw(&mut self) -> DrawResult { let mut draw_result = DrawResult::NoChange; let mut num_points_drawn = 0; let mut num_nodes_drawn = 0; let now = time::Instant::now(); let moving = now - self.last_moving < time::Duration::milliseconds(150); self.needs_drawing |= self.node_views.consume_arrived_nodes(&self.node_drawer); while let Ok(visible_nodes) = self.get_visible_nodes_result_rx.try_recv() { self.visible_nodes.clear(); self.visible_nodes.extend(visible_nodes); self.needs_drawing = true; } if self.needs_drawing { unsafe { self.gl.ClearColor(0., 0., 0., 1.); self.gl .Clear(opengl::COLOR_BUFFER_BIT | opengl::DEPTH_BUFFER_BIT); } } // We use a heuristic to keep the frame rate as stable as possible by increasing/decreasing the number of nodes to draw. let max_nodes_to_display = if moving { self.max_nodes_moving } else { self.max_nodes_in_memory }; let filtered_visible_nodes = self.visible_nodes.iter().take(max_nodes_to_display); for node_id in filtered_visible_nodes { let view = self.node_views.get_or_request(&node_id); if!self.needs_drawing || view.is_none() { continue; } let view = view.unwrap(); num_points_drawn += self.node_drawer.draw( view, 1, /* level of detail */ self.point_size, self.gamma, ); num_nodes_drawn += 1; if self.show_octree_nodes { self.box_drawer.draw_outlines( &view.meta.bounding_cube.to_aabb(), &self.world_to_gl, &YELLOW, ); } } if self.needs_drawing { draw_result = DrawResult::HasDrawn; } self.needs_drawing = moving; self.num_frames += 1; let now = time::Instant::now(); if now - self.last_log > time::Duration::seconds(1) { let duration_s = (now - self.last_log).as_seconds_f64(); let fps = f64::from(self.num_frames) / duration_s; if moving { if fps < 20. { self.max_nodes_moving = (self.max_nodes_moving as f32 * 0.9) as usize; } if fps > 25. && self.max_nodes_moving < self.max_nodes_in_memory { self.max_nodes_moving = (self.max_nodes_moving as f32 * 1.1) as usize; } } self.num_frames = 0; self.last_log = now; eprintln!( "FPS: {:.2}, Drew {} points from {} loaded nodes. {} nodes \ should be shown, Cache {} MB", fps, num_points_drawn, num_nodes_drawn, self.visible_nodes.len(), self.node_views.get_used_memory_bytes() as f32 / 1024. / 1024., ); } draw_result } } #[derive(Debug, Serialize, Deserialize)] pub struct CameraStates { states: Vec<camera::State>, } fn save_camera(index: usize, pose_path: &Option<PathBuf>, camera: &Camera) { if pose_path.is_none() { eprintln!("Not serving from a local directory. Cannot save camera."); return; } assert!(index < 10); let mut states = ::std::fs::read_to_string(pose_path.as_ref().unwrap()) .and_then(|data| { serde_json::from_str(&data) .map_err(|_| io::Error::new(io::ErrorKind::Other, "Could not read camera file.")) }) .unwrap_or_else(|_| CameraStates { states: vec![camera.state(); 10], }); states.states[index] = camera.state(); match std::fs::write( pose_path.as_ref().unwrap(), serde_json::to_string_pretty(&states).unwrap().as_bytes(), ) { Ok(_) => (), Err(e) => eprintln!( "Could not write {}: {}", pose_path.as_ref().unwrap().display(), e ), } eprintln!("Saved current camera position as {}.", index); } fn load_camera(index: usize, pose_path: &Option<PathBuf>, camera: &mut Camera)
pub trait Extension { fn pre_init(app: clap::App) -> clap::App; fn new(matches: &clap::ArgMatches, opengl: Rc<opengl::Gl>) -> Self; fn local_from_global(matches: &clap::ArgMatches, octree: &Octree) -> Option<Isometry3<f64>>; fn camera_changed(&mut self, transform: &Matrix4<f64>); fn draw(&mut self); } trait Joystick { fn act(&self, camera: &mut Camera); fn joystick(&self) -> &sdl2::joystick::Joystick; } struct XBoxJoystick { joystick: sdl2::joystick::Joystick, } impl Joystick for XBoxJoystick { fn act(&self, camera: &mut Camera) { let right = f64::from(self.joystick.axis(0).unwrap()) / 1000.; let forward = f64::from(self.joystick.axis(1).unwrap()) / 1000.; let turning_right = -f64::from(self.joystick.axis(3).unwrap()) / 32000.; let turning_up = -f64::from(self.joystick.axis(4).unwrap()) / 32000.; camera.pan(right, 0., forward); camera.rotate(turning_up, turning_right); } fn joystick(&self) -> &sdl2::joystick::Joystick { &self.joystick } } struct SpaceMouseJoystick { joystick: sdl2::joystick::Joystick, } impl Joystick for SpaceMouseJoystick { fn act(&self, camera: &mut Camera) { let x = f64::from(self.joystick.axis(0).unwrap()) / 500.; let y = f64::from(-self.joystick.axis(1).unwrap()) / 500.; let z = f64::from(-self.joystick.axis(2).unwrap()) / 500.; let up = f64::from(self.joystick.axis(3).unwrap()) / 500.; // Combine tilting and turning on the knob. let around = f64::from(self.joystick.axis(4).unwrap()) / 500. - f64::from(self.joystick.axis(5).unwrap()) / 500.; camera.pan(x, y, z); camera.rotate(up, around); } fn joystick(&self) -> &sdl2::joystick::Joystick { &self.joystick } } pub fn run<T: Extension>(data_provider_factory: DataProviderFactory) { let mut app = clap::App::new("sdl_viewer").args(&[ clap::Arg::with_name("octree") .about("Input path of the octree.") .index(1) .required(true), clap::Arg::with_name("terrain") .long("terrain") .takes_value(true) .multiple(true) .about("Terrain directories (multiple possible)."), clap::Arg::with_name("cache_size_mb") .about( "Maximum cache size in MB for octree nodes in GPU memory. \ The default value is 2000 MB and the valid range is 1000 MB to 16000 MB.", ) .required(false), ]); app = T::pre_init(app); let matches = app.get_matches(); let octree_argument = matches.value_of("octree").unwrap(); // Maximum number of MB for the octree node cache. The default is 2 GB let cache_size_mb: usize = matches .value_of("cache_size_mb") .unwrap_or("2000") .parse() .expect("Could not parse 'cache_size_mb' option."); // Maximum number of MB for the octree node cache in range 1..16 GB. The default is 2 GB let limit_cache_size_mb = cmp::max(1000, cmp::min(16_000, cache_size_mb)); // Assuming about 200 KB per octree node on average let max_nodes_in_memory = limit_cache_size_mb * 5; // If no octree was generated create a FromDisk loader let octree: Arc<Octree> = Arc::from( data_provider_factory .generate_data_provider(octree_argument) .and_then(|provider| Octree::from_data_provider(provider)) .unwrap_or_else(|_| panic!("Couldn't create octree from path '{}'.", octree_argument)), ); let mut pose_path = None; let pose_path_buf = PathBuf::from(&octree_argument).join("poses.json"); if pose_path_buf.exists() { pose_path = Some(pose_path_buf); } let ctx = sdl2::init().unwrap(); let video_subsystem = ctx.video().unwrap(); // We need to open the joysticks we are interested in and keep the object alive to receive // input from it. We just open the first we find. let joystick_subsystem = ctx.joystick().unwrap(); let mut joysticks = Vec::new(); for idx in 0..joystick_subsystem .num_joysticks() .expect("Should be able to enumerate joysticks.") { if let Ok(joystick) = joystick_subsystem.open(idx) { let (kind, j) = if joystick.name().contains("Xbox") { ( "XBox controller", Box::new(XBoxJoystick { joystick }) as Box<dyn Joystick>, ) } else { ( "Space mouse", Box::new(SpaceMouseJoystick { joystick }) as Box<dyn Joystick>, ) }; eprintln!( "Found a joystick named '{}' ({} axes, {} buttons, {} balls, {} hats). Will treat it as a {}.", j.joystick().name(), j.joystick().num_axes(), j.joystick().num_buttons(), j.joystick().num_balls(), j.joystick().num_hats(), kind ); joysticks.push(j); } } let gl_attr = video_subsystem.gl_attr(); // TODO(hrapp): This should use OpenGL ES 2.0 to be compatible with WebGL, so this can be made // to work with emscripten. gl_attr.set_context_profile(GLProfile::Core); gl_attr.set_context_version(4, 1); const WINDOW_WIDTH: i32 = 800; const WINDOW_HEIGHT: i32 = 600; let window = match video_subsystem .window("sdl2_viewer", WINDOW_WIDTH as u32, WINDOW_HEIGHT as u32) .position_centered() .resizable() .opengl() .build() { Ok(window) => window, Err(err) => panic!("failed to create window: {}", err), }; // We need to create a context now, only after can we actually legally load the gl functions // and query 'gl_attr'. let _context = window.gl_create_context().unwrap(); let _swap_interval = video_subsystem.gl_set_swap_interval(SwapInterval::VSync); assert_eq!(gl_attr.context_profile(), GLProfile::Core); let gl = Rc::new(opengl::Gl::load_with(|s| { let ptr = video_subsystem.gl_get_proc_address(s); ptr as *const std::ffi::c_void })); let mut extension = T::new(&matches, Rc::clone(&gl)); let ext_local_from_global = T::local_from_global(&matches, &octree); let mut renderer = PointCloudRenderer::new(max_nodes_in_memory, Rc::clone(&gl), octree); let terrain_paths = matches.values_of("terrain").unwrap_or_default(); let mut terrain_renderer = TerrainRenderer::new(Rc::clone(&gl), terrain_paths); let local_from_global = ext_local_from_global.or_else(|| terrain_renderer.local_from_global()); let mut camera = Camera::new(&gl, WINDOW_WIDTH, WINDOW_HEIGHT, local_from_global); let mut events = ctx.event_pump().unwrap(); let mut last_frame_time = time::Instant::now(); 'outer_loop: loop { for event in events.poll_iter() { match event { Event::Quit {.. } => break 'outer_loop, Event::KeyDown { scancode: Some(code), keymod, .. } => { if keymod.is_empty() || keymod == Mod::NUMMOD { match code { Scancode::Escape => break 'outer_loop, Scancode::W => camera.moving_forward = true, Scancode::S => camera.moving_backward = true, Scancode::A => camera.moving_left = true, Scancode::D => camera.moving_right = true, Scancode::Z => camera.moving_down = true, Scancode::Q => camera.moving_up = true, Scancode::T => camera.toggle_ct_mode(&gl), Scancode::U => camera.move_ct(-0.5, &gl), Scancode::I => camera.move_ct(0.5, &gl), Scancode::J => camera.move_far_plane_ct(-0.5, &gl), Scancode::K => camera.move_far_plane_ct(0.5, &gl), Scancode::Left => camera.turning_left = true, Scancode::Right => camera.turning_right = true, Scancode::Down => camera.turning_down = true, Scancode::Up => camera.turning_up = true, Scancode::O => renderer.toggle_show_octree_nodes(), Scancode::Num7 => renderer.adjust_gamma(-0.1), Scancode::Num8 => renderer.adjust_gamma(0.1), Scancode::Num9 => renderer.adjust_point_size(-0.1), Scancode::Num0 => renderer.adjust_point_size(0.1), _ => (), } } else if keymod.intersects(Mod::LCTRLMOD | Mod::RCTRLMOD) && keymod.intersects(Mod::LSHIFTMOD | Mod::RSHIFTMOD) { // CTRL + SHIFT is pressed. match code { Scancode::Num1 => save_camera(0, &pose_path, &camera), Scancode::Num2 => save_camera(1, &pose_path, &camera), Scancode::Num3 => save_camera(2, &pose_path, &camera), Scancode::Num4 => save_camera(3, &pose_path, &camera), Scancode::Num5 => save_camera(4, &pose_path, &camera), Scancode::Num6 => save_camera(5, &pose_path, &camera), Scancode::Num7 => save_camera(6, &pose_path, &camera),
{ if pose_path.is_none() { eprintln!("Not serving from a local directory. Cannot load camera."); return; } assert!(index < 10); let states = ::std::fs::read_to_string(pose_path.as_ref().unwrap()) .and_then(|data| { serde_json::from_str(&data) .map_err(|_| io::Error::new(io::ErrorKind::Other, "Could not read camera file.")) }) .unwrap_or_else(|_| CameraStates { states: vec![camera.state(); 10], }); camera.set_state(states.states[index]); }
identifier_body
lib.rs
{ gl: Rc<opengl::Gl>, node_drawer: NodeDrawer, last_moving: time::Instant, // TODO(sirver): Logging does not fit into this classes responsibilities. last_log: time::Instant, visible_nodes: Vec<octree::NodeId>, get_visible_nodes_params_tx: mpsc::Sender<Matrix4<f64>>, get_visible_nodes_result_rx: mpsc::Receiver<Vec<octree::NodeId>>, num_frames: u32, point_size: f32, gamma: f32, needs_drawing: bool, max_nodes_in_memory: usize, world_to_gl: Matrix4<f64>, max_nodes_moving: usize, show_octree_nodes: bool, node_views: NodeViewContainer, box_drawer: BoxDrawer, } #[derive(Debug)] enum DrawResult { HasDrawn, NoChange, } impl PointCloudRenderer { pub fn new( max_nodes_in_memory: usize, gl: Rc<opengl::Gl>, octree: Arc<octree::Octree>, ) -> Self { let now = time::Instant::now(); // This thread waits for requests to calculate the currently visible nodes, runs a // calculation and sends the visible nodes back to the drawing thread. If multiple requests // queue up while it is processing one, it will drop all but the latest one before // restarting the next calculation. let (get_visible_nodes_params_tx, rx) = mpsc::channel::<Matrix4<f64>>(); let (tx, get_visible_nodes_result_rx) = mpsc::channel(); let octree_clone = octree.clone(); thread::spawn(move || { while let Ok(mut matrix) = rx.recv() { // Drain the channel, we only ever want to update the latest. while let Ok(newer_matrix) = rx.try_recv() { matrix = newer_matrix; } let visible_nodes = octree_clone.get_visible_nodes(&matrix); tx.send(visible_nodes).unwrap(); } }); Self { last_moving: now, last_log: now, visible_nodes: Vec::new(), node_drawer: NodeDrawer::new(&Rc::clone(&gl)), num_frames: 0, point_size: 1., gamma: 1., get_visible_nodes_params_tx, get_visible_nodes_result_rx, max_nodes_moving: max_nodes_in_memory, needs_drawing: true, show_octree_nodes: false, max_nodes_in_memory, node_views: NodeViewContainer::new(octree, max_nodes_in_memory), box_drawer: BoxDrawer::new(&Rc::clone(&gl)), world_to_gl: Matrix4::identity(), gl, } } pub fn camera_changed(&mut self, world_to_gl: &Matrix4<f64>) { self.last_moving = time::Instant::now(); self.needs_drawing = true; self.node_drawer.update_world_to_gl(world_to_gl); self.get_visible_nodes_params_tx.send(*world_to_gl).unwrap(); self.last_moving = time::Instant::now(); self.world_to_gl = *world_to_gl; } pub fn toggle_show_octree_nodes(&mut self) { self.show_octree_nodes =!self.show_octree_nodes; } pub fn adjust_gamma(&mut self, delta: f32) { self.gamma += delta; self.needs_drawing = true; } pub fn adjust_point_size(&mut self, delta: f32) { // Point size == 1. is the smallest that is rendered. self.point_size = (self.point_size + delta).max(1.); self.needs_drawing = true; } pub fn draw(&mut self) -> DrawResult { let mut draw_result = DrawResult::NoChange; let mut num_points_drawn = 0; let mut num_nodes_drawn = 0; let now = time::Instant::now(); let moving = now - self.last_moving < time::Duration::milliseconds(150); self.needs_drawing |= self.node_views.consume_arrived_nodes(&self.node_drawer); while let Ok(visible_nodes) = self.get_visible_nodes_result_rx.try_recv() { self.visible_nodes.clear(); self.visible_nodes.extend(visible_nodes); self.needs_drawing = true; } if self.needs_drawing { unsafe { self.gl.ClearColor(0., 0., 0., 1.); self.gl .Clear(opengl::COLOR_BUFFER_BIT | opengl::DEPTH_BUFFER_BIT); } } // We use a heuristic to keep the frame rate as stable as possible by increasing/decreasing the number of nodes to draw. let max_nodes_to_display = if moving { self.max_nodes_moving } else { self.max_nodes_in_memory }; let filtered_visible_nodes = self.visible_nodes.iter().take(max_nodes_to_display); for node_id in filtered_visible_nodes { let view = self.node_views.get_or_request(&node_id); if!self.needs_drawing || view.is_none() { continue; } let view = view.unwrap(); num_points_drawn += self.node_drawer.draw( view, 1, /* level of detail */ self.point_size, self.gamma, ); num_nodes_drawn += 1; if self.show_octree_nodes { self.box_drawer.draw_outlines( &view.meta.bounding_cube.to_aabb(), &self.world_to_gl, &YELLOW, ); } } if self.needs_drawing { draw_result = DrawResult::HasDrawn; } self.needs_drawing = moving; self.num_frames += 1; let now = time::Instant::now(); if now - self.last_log > time::Duration::seconds(1) { let duration_s = (now - self.last_log).as_seconds_f64(); let fps = f64::from(self.num_frames) / duration_s; if moving { if fps < 20. { self.max_nodes_moving = (self.max_nodes_moving as f32 * 0.9) as usize; } if fps > 25. && self.max_nodes_moving < self.max_nodes_in_memory { self.max_nodes_moving = (self.max_nodes_moving as f32 * 1.1) as usize; } } self.num_frames = 0; self.last_log = now; eprintln!( "FPS: {:.2}, Drew {} points from {} loaded nodes. {} nodes \ should be shown, Cache {} MB", fps, num_points_drawn, num_nodes_drawn, self.visible_nodes.len(), self.node_views.get_used_memory_bytes() as f32 / 1024. / 1024., ); } draw_result } } #[derive(Debug, Serialize, Deserialize)] pub struct CameraStates { states: Vec<camera::State>, } fn save_camera(index: usize, pose_path: &Option<PathBuf>, camera: &Camera) { if pose_path.is_none() { eprintln!("Not serving from a local directory. Cannot save camera."); return; } assert!(index < 10); let mut states = ::std::fs::read_to_string(pose_path.as_ref().unwrap()) .and_then(|data| { serde_json::from_str(&data) .map_err(|_| io::Error::new(io::ErrorKind::Other, "Could not read camera file.")) }) .unwrap_or_else(|_| CameraStates { states: vec![camera.state(); 10], }); states.states[index] = camera.state(); match std::fs::write( pose_path.as_ref().unwrap(), serde_json::to_string_pretty(&states).unwrap().as_bytes(), ) { Ok(_) => (), Err(e) => eprintln!( "Could not write {}: {}", pose_path.as_ref().unwrap().display(), e ), } eprintln!("Saved current camera position as {}.", index); } fn load_camera(index: usize, pose_path: &Option<PathBuf>, camera: &mut Camera) { if pose_path.is_none() { eprintln!("Not serving from a local directory. Cannot load camera."); return; } assert!(index < 10); let states = ::std::fs::read_to_string(pose_path.as_ref().unwrap()) .and_then(|data| { serde_json::from_str(&data) .map_err(|_| io::Error::new(io::ErrorKind::Other, "Could not read camera file.")) }) .unwrap_or_else(|_| CameraStates { states: vec![camera.state(); 10], }); camera.set_state(states.states[index]); } pub trait Extension { fn pre_init(app: clap::App) -> clap::App; fn new(matches: &clap::ArgMatches, opengl: Rc<opengl::Gl>) -> Self; fn local_from_global(matches: &clap::ArgMatches, octree: &Octree) -> Option<Isometry3<f64>>; fn camera_changed(&mut self, transform: &Matrix4<f64>); fn draw(&mut self); } trait Joystick { fn act(&self, camera: &mut Camera); fn joystick(&self) -> &sdl2::joystick::Joystick; } struct XBoxJoystick { joystick: sdl2::joystick::Joystick, } impl Joystick for XBoxJoystick { fn act(&self, camera: &mut Camera) { let right = f64::from(self.joystick.axis(0).unwrap()) / 1000.; let forward = f64::from(self.joystick.axis(1).unwrap()) / 1000.; let turning_right = -f64::from(self.joystick.axis(3).unwrap()) / 32000.; let turning_up = -f64::from(self.joystick.axis(4).unwrap()) / 32000.; camera.pan(right, 0., forward); camera.rotate(turning_up, turning_right); } fn joystick(&self) -> &sdl2::joystick::Joystick { &self.joystick } } struct SpaceMouseJoystick { joystick: sdl2::joystick::Joystick, } impl Joystick for SpaceMouseJoystick { fn act(&self, camera: &mut Camera) { let x = f64::from(self.joystick.axis(0).unwrap()) / 500.; let y = f64::from(-self.joystick.axis(1).unwrap()) / 500.; let z = f64::from(-self.joystick.axis(2).unwrap()) / 500.; let up = f64::from(self.joystick.axis(3).unwrap()) / 500.; // Combine tilting and turning on the knob. let around = f64::from(self.joystick.axis(4).unwrap()) / 500. - f64::from(self.joystick.axis(5).unwrap()) / 500.; camera.pan(x, y, z); camera.rotate(up, around); } fn joystick(&self) -> &sdl2::joystick::Joystick { &self.joystick } } pub fn run<T: Extension>(data_provider_factory: DataProviderFactory) { let mut app = clap::App::new("sdl_viewer").args(&[ clap::Arg::with_name("octree") .about("Input path of the octree.") .index(1) .required(true), clap::Arg::with_name("terrain") .long("terrain") .takes_value(true) .multiple(true) .about("Terrain directories (multiple possible)."), clap::Arg::with_name("cache_size_mb") .about( "Maximum cache size in MB for octree nodes in GPU memory. \ The default value is 2000 MB and the valid range is 1000 MB to 16000 MB.", ) .required(false), ]); app = T::pre_init(app); let matches = app.get_matches(); let octree_argument = matches.value_of("octree").unwrap(); // Maximum number of MB for the octree node cache. The default is 2 GB let cache_size_mb: usize = matches .value_of("cache_size_mb") .unwrap_or("2000") .parse() .expect("Could not parse 'cache_size_mb' option."); // Maximum number of MB for the octree node cache in range 1..16 GB. The default is 2 GB let limit_cache_size_mb = cmp::max(1000, cmp::min(16_000, cache_size_mb)); // Assuming about 200 KB per octree node on average let max_nodes_in_memory = limit_cache_size_mb * 5; // If no octree was generated create a FromDisk loader let octree: Arc<Octree> = Arc::from( data_provider_factory .generate_data_provider(octree_argument) .and_then(|provider| Octree::from_data_provider(provider)) .unwrap_or_else(|_| panic!("Couldn't create octree from path '{}'.", octree_argument)), ); let mut pose_path = None; let pose_path_buf = PathBuf::from(&octree_argument).join("poses.json"); if pose_path_buf.exists() { pose_path = Some(pose_path_buf); } let ctx = sdl2::init().unwrap(); let video_subsystem = ctx.video().unwrap(); // We need to open the joysticks we are interested in and keep the object alive to receive // input from it. We just open the first we find. let joystick_subsystem = ctx.joystick().unwrap(); let mut joysticks = Vec::new(); for idx in 0..joystick_subsystem .num_joysticks() .expect("Should be able to enumerate joysticks.") { if let Ok(joystick) = joystick_subsystem.open(idx) { let (kind, j) = if joystick.name().contains("Xbox") { ( "XBox controller", Box::new(XBoxJoystick { joystick }) as Box<dyn Joystick>, ) } else { ( "Space mouse", Box::new(SpaceMouseJoystick { joystick }) as Box<dyn Joystick>, ) }; eprintln!( "Found a joystick named '{}' ({} axes, {} buttons, {} balls, {} hats). Will treat it as a {}.", j.joystick().name(), j.joystick().num_axes(), j.joystick().num_buttons(), j.joystick().num_balls(), j.joystick().num_hats(), kind ); joysticks.push(j); } } let gl_attr = video_subsystem.gl_attr(); // TODO(hrapp): This should use OpenGL ES 2.0 to be compatible with WebGL, so this can be made // to work with emscripten. gl_attr.set_context_profile(GLProfile::Core); gl_attr.set_context_version(4, 1); const WINDOW_WIDTH: i32 = 800; const WINDOW_HEIGHT: i32 = 600; let window = match video_subsystem .window("sdl2_viewer", WINDOW_WIDTH as u32, WINDOW_HEIGHT as u32) .position_centered() .resizable() .opengl() .build() { Ok(window) => window, Err(err) => panic!("failed to create window: {}", err), }; // We need to create a context now, only after can we actually legally load the gl functions // and query 'gl_attr'. let _context = window.gl_create_context().unwrap(); let _swap_interval = video_subsystem.gl_set_swap_interval(SwapInterval::VSync); assert_eq!(gl_attr.context_profile(), GLProfile::Core); let gl = Rc::new(opengl::Gl::load_with(|s| { let ptr = video_subsystem.gl_get_proc_address(s); ptr as *const std::ffi::c_void })); let mut extension = T::new(&matches, Rc::clone(&gl)); let ext_local_from_global = T::local_from_global(&matches, &octree); let mut renderer = PointCloudRenderer::new(max_nodes_in_memory, Rc::clone(&gl), octree); let terrain_paths = matches.values_of("terrain").unwrap_or_default(); let mut terrain_renderer = TerrainRenderer::new(Rc::clone(&gl), terrain_paths); let local_from_global = ext_local_from_global.or_else(|| terrain_renderer.local_from_global()); let mut camera = Camera::new(&gl, WINDOW_WIDTH, WINDOW_HEIGHT, local_from_global); let mut events = ctx.event_pump().unwrap(); let mut last_frame_time = time::Instant::now(); 'outer_loop: loop { for event in events.poll_iter() { match event { Event::Quit {.. } => break 'outer_loop, Event::KeyDown { scancode: Some(code), keymod, .. } => { if keymod.is_empty() || keymod == Mod::NUMMOD { match code { Scancode::Escape => break 'outer_loop, Scancode::W => camera.moving_forward = true, Scancode::S => camera.moving_backward = true, Scancode::A => camera.moving_left = true, Scancode::D => camera.moving_right = true, Scancode::Z => camera.moving_down = true, Scancode::Q => camera.moving_up = true, Scancode::T => camera.toggle_ct_mode(&gl), Scancode::U => camera.move_ct(-0.5, &gl), Scancode::I => camera.move_ct(0.5, &gl), Scancode::J => camera.move_far_plane_ct(-0.5, &gl), Scancode::K => camera.move_far_plane_ct(0.5, &gl), Scancode::Left => camera.turning_left = true, Scancode::Right => camera.turning_right = true, Scancode::Down => camera.turning_down = true, Scancode::Up => camera.turning_up = true, Scancode::O => renderer.toggle_show_octree_nodes(), Scancode::Num7 => renderer.adjust_gamma(-0.1), Scancode::Num8 => renderer.adjust_gamma(0.1), Scancode::Num9 => renderer.adjust_point_size(-0.1), Scancode::Num0 => renderer.adjust_point_size(0.1), _ => (), } } else if keymod.intersects(Mod::LCTRLMOD | Mod::RCTRLMOD) && keymod.intersects(Mod::LSHIFTMOD | Mod::RSHIFTMOD) {
PointCloudRenderer
identifier_name
lib.rs
pub mod box_drawer; pub mod graphic; pub mod node_drawer; pub mod terrain_drawer; use crate::box_drawer::BoxDrawer; use crate::camera::Camera; use crate::node_drawer::{NodeDrawer, NodeViewContainer}; use crate::terrain_drawer::TerrainRenderer; use nalgebra::{Isometry3, Matrix4}; use point_viewer::color::YELLOW; use point_viewer::data_provider::DataProviderFactory; use point_viewer::octree::{self, Octree}; use sdl2::event::{Event, WindowEvent}; use sdl2::keyboard::{Mod, Scancode}; use sdl2::video::{GLProfile, SwapInterval}; use std::cmp; use std::io; use std::path::PathBuf; use std::rc::Rc; use std::sync::{mpsc, Arc}; use std::thread; struct PointCloudRenderer { gl: Rc<opengl::Gl>, node_drawer: NodeDrawer, last_moving: time::Instant, // TODO(sirver): Logging does not fit into this classes responsibilities. last_log: time::Instant, visible_nodes: Vec<octree::NodeId>, get_visible_nodes_params_tx: mpsc::Sender<Matrix4<f64>>, get_visible_nodes_result_rx: mpsc::Receiver<Vec<octree::NodeId>>, num_frames: u32, point_size: f32, gamma: f32, needs_drawing: bool, max_nodes_in_memory: usize, world_to_gl: Matrix4<f64>, max_nodes_moving: usize, show_octree_nodes: bool, node_views: NodeViewContainer, box_drawer: BoxDrawer, } #[derive(Debug)] enum DrawResult { HasDrawn, NoChange, } impl PointCloudRenderer { pub fn new( max_nodes_in_memory: usize, gl: Rc<opengl::Gl>, octree: Arc<octree::Octree>, ) -> Self { let now = time::Instant::now(); // This thread waits for requests to calculate the currently visible nodes, runs a // calculation and sends the visible nodes back to the drawing thread. If multiple requests // queue up while it is processing one, it will drop all but the latest one before // restarting the next calculation. let (get_visible_nodes_params_tx, rx) = mpsc::channel::<Matrix4<f64>>(); let (tx, get_visible_nodes_result_rx) = mpsc::channel(); let octree_clone = octree.clone(); thread::spawn(move || { while let Ok(mut matrix) = rx.recv() { // Drain the channel, we only ever want to update the latest. while let Ok(newer_matrix) = rx.try_recv() { matrix = newer_matrix; } let visible_nodes = octree_clone.get_visible_nodes(&matrix); tx.send(visible_nodes).unwrap(); } }); Self { last_moving: now, last_log: now, visible_nodes: Vec::new(), node_drawer: NodeDrawer::new(&Rc::clone(&gl)), num_frames: 0, point_size: 1., gamma: 1., get_visible_nodes_params_tx, get_visible_nodes_result_rx, max_nodes_moving: max_nodes_in_memory, needs_drawing: true, show_octree_nodes: false, max_nodes_in_memory, node_views: NodeViewContainer::new(octree, max_nodes_in_memory), box_drawer: BoxDrawer::new(&Rc::clone(&gl)), world_to_gl: Matrix4::identity(), gl, } } pub fn camera_changed(&mut self, world_to_gl: &Matrix4<f64>) { self.last_moving = time::Instant::now(); self.needs_drawing = true; self.node_drawer.update_world_to_gl(world_to_gl); self.get_visible_nodes_params_tx.send(*world_to_gl).unwrap(); self.last_moving = time::Instant::now(); self.world_to_gl = *world_to_gl; } pub fn toggle_show_octree_nodes(&mut self) { self.show_octree_nodes =!self.show_octree_nodes; } pub fn adjust_gamma(&mut self, delta: f32) { self.gamma += delta; self.needs_drawing = true; } pub fn adjust_point_size(&mut self, delta: f32) { // Point size == 1. is the smallest that is rendered. self.point_size = (self.point_size + delta).max(1.); self.needs_drawing = true; } pub fn draw(&mut self) -> DrawResult { let mut draw_result = DrawResult::NoChange; let mut num_points_drawn = 0; let mut num_nodes_drawn = 0; let now = time::Instant::now(); let moving = now - self.last_moving < time::Duration::milliseconds(150); self.needs_drawing |= self.node_views.consume_arrived_nodes(&self.node_drawer); while let Ok(visible_nodes) = self.get_visible_nodes_result_rx.try_recv() { self.visible_nodes.clear(); self.visible_nodes.extend(visible_nodes); self.needs_drawing = true; } if self.needs_drawing { unsafe { self.gl.ClearColor(0., 0., 0., 1.); self.gl .Clear(opengl::COLOR_BUFFER_BIT | opengl::DEPTH_BUFFER_BIT); } } // We use a heuristic to keep the frame rate as stable as possible by increasing/decreasing the number of nodes to draw. let max_nodes_to_display = if moving { self.max_nodes_moving } else { self.max_nodes_in_memory }; let filtered_visible_nodes = self.visible_nodes.iter().take(max_nodes_to_display); for node_id in filtered_visible_nodes { let view = self.node_views.get_or_request(&node_id); if!self.needs_drawing || view.is_none() { continue; } let view = view.unwrap(); num_points_drawn += self.node_drawer.draw( view, 1, /* level of detail */ self.point_size, self.gamma, ); num_nodes_drawn += 1; if self.show_octree_nodes { self.box_drawer.draw_outlines( &view.meta.bounding_cube.to_aabb(), &self.world_to_gl, &YELLOW, ); } } if self.needs_drawing { draw_result = DrawResult::HasDrawn; } self.needs_drawing = moving; self.num_frames += 1; let now = time::Instant::now(); if now - self.last_log > time::Duration::seconds(1) { let duration_s = (now - self.last_log).as_seconds_f64(); let fps = f64::from(self.num_frames) / duration_s; if moving { if fps < 20. { self.max_nodes_moving = (self.max_nodes_moving as f32 * 0.9) as usize; } if fps > 25. && self.max_nodes_moving < self.max_nodes_in_memory { self.max_nodes_moving = (self.max_nodes_moving as f32 * 1.1) as usize; } } self.num_frames = 0; self.last_log = now; eprintln!( "FPS: {:.2}, Drew {} points from {} loaded nodes. {} nodes \ should be shown, Cache {} MB", fps, num_points_drawn, num_nodes_drawn, self.visible_nodes.len(), self.node_views.get_used_memory_bytes() as f32 / 1024. / 1024., ); } draw_result } } #[derive(Debug, Serialize, Deserialize)] pub struct CameraStates { states: Vec<camera::State>, } fn save_camera(index: usize, pose_path: &Option<PathBuf>, camera: &Camera) { if pose_path.is_none() { eprintln!("Not serving from a local directory. Cannot save camera."); return; } assert!(index < 10); let mut states = ::std::fs::read_to_string(pose_path.as_ref().unwrap()) .and_then(|data| { serde_json::from_str(&data) .map_err(|_| io::Error::new(io::ErrorKind::Other, "Could not read camera file.")) }) .unwrap_or_else(|_| CameraStates { states: vec![camera.state(); 10], }); states.states[index] = camera.state(); match std::fs::write( pose_path.as_ref().unwrap(), serde_json::to_string_pretty(&states).unwrap().as_bytes(), ) { Ok(_) => (), Err(e) => eprintln!( "Could not write {}: {}", pose_path.as_ref().unwrap().display(), e ), } eprintln!("Saved current camera position as {}.", index); } fn load_camera(index: usize, pose_path: &Option<PathBuf>, camera: &mut Camera) { if pose_path.is_none() { eprintln!("Not serving from a local directory. Cannot load camera."); return; } assert!(index < 10); let states = ::std::fs::read_to_string(pose_path.as_ref().unwrap()) .and_then(|data| { serde_json::from_str(&data) .map_err(|_| io::Error::new(io::ErrorKind::Other, "Could not read camera file.")) }) .unwrap_or_else(|_| CameraStates { states: vec![camera.state(); 10], }); camera.set_state(states.states[index]); } pub trait Extension { fn pre_init(app: clap::App) -> clap::App; fn new(matches: &clap::ArgMatches, opengl: Rc<opengl::Gl>) -> Self; fn local_from_global(matches: &clap::ArgMatches, octree: &Octree) -> Option<Isometry3<f64>>; fn camera_changed(&mut self, transform: &Matrix4<f64>); fn draw(&mut self); } trait Joystick { fn act(&self, camera: &mut Camera); fn joystick(&self) -> &sdl2::joystick::Joystick; } struct XBoxJoystick { joystick: sdl2::joystick::Joystick, } impl Joystick for XBoxJoystick { fn act(&self, camera: &mut Camera) { let right = f64::from(self.joystick.axis(0).unwrap()) / 1000.; let forward = f64::from(self.joystick.axis(1).unwrap()) / 1000.; let turning_right = -f64::from(self.joystick.axis(3).unwrap()) / 32000.; let turning_up = -f64::from(self.joystick.axis(4).unwrap()) / 32000.; camera.pan(right, 0., forward); camera.rotate(turning_up, turning_right); } fn joystick(&self) -> &sdl2::joystick::Joystick { &self.joystick } } struct SpaceMouseJoystick { joystick: sdl2::joystick::Joystick, } impl Joystick for SpaceMouseJoystick { fn act(&self, camera: &mut Camera) { let x = f64::from(self.joystick.axis(0).unwrap()) / 500.; let y = f64::from(-self.joystick.axis(1).unwrap()) / 500.; let z = f64::from(-self.joystick.axis(2).unwrap()) / 500.; let up = f64::from(self.joystick.axis(3).unwrap()) / 500.; // Combine tilting and turning on the knob. let around = f64::from(self.joystick.axis(4).unwrap()) / 500. - f64::from(self.joystick.axis(5).unwrap()) / 500.; camera.pan(x, y, z); camera.rotate(up, around); } fn joystick(&self) -> &sdl2::joystick::Joystick { &self.joystick } } pub fn run<T: Extension>(data_provider_factory: DataProviderFactory) { let mut app = clap::App::new("sdl_viewer").args(&[ clap::Arg::with_name("octree") .about("Input path of the octree.") .index(1) .required(true), clap::Arg::with_name("terrain") .long("terrain") .takes_value(true) .multiple(true) .about("Terrain directories (multiple possible)."), clap::Arg::with_name("cache_size_mb") .about( "Maximum cache size in MB for octree nodes in GPU memory. \ The default value is 2000 MB and the valid range is 1000 MB to 16000 MB.", ) .required(false), ]); app = T::pre_init(app); let matches = app.get_matches(); let octree_argument = matches.value_of("octree").unwrap(); // Maximum number of MB for the octree node cache. The default is 2 GB let cache_size_mb: usize = matches .value_of("cache_size_mb") .unwrap_or("2000") .parse() .expect("Could not parse 'cache_size_mb' option."); // Maximum number of MB for the octree node cache in range 1..16 GB. The default is 2 GB let limit_cache_size_mb = cmp::max(1000, cmp::min(16_000, cache_size_mb)); // Assuming about 200 KB per octree node on average let max_nodes_in_memory = limit_cache_size_mb * 5; // If no octree was generated create a FromDisk loader let octree: Arc<Octree> = Arc::from( data_provider_factory .generate_data_provider(octree_argument) .and_then(|provider| Octree::from_data_provider(provider)) .unwrap_or_else(|_| panic!("Couldn't create octree from path '{}'.", octree_argument)), ); let mut pose_path = None; let pose_path_buf = PathBuf::from(&octree_argument).join("poses.json"); if pose_path_buf.exists() { pose_path = Some(pose_path_buf); } let ctx = sdl2::init().unwrap(); let video_subsystem = ctx.video().unwrap(); // We need to open the joysticks we are interested in and keep the object alive to receive // input from it. We just open the first we find. let joystick_subsystem = ctx.joystick().unwrap(); let mut joysticks = Vec::new(); for idx in 0..joystick_subsystem .num_joysticks() .expect("Should be able to enumerate joysticks.") { if let Ok(joystick) = joystick_subsystem.open(idx) { let (kind, j) = if joystick.name().contains("Xbox") { ( "XBox controller", Box::new(XBoxJoystick { joystick }) as Box<dyn Joystick>, ) } else { ( "Space mouse", Box::new(SpaceMouseJoystick { joystick }) as Box<dyn Joystick>, ) }; eprintln!( "Found a joystick named '{}' ({} axes, {} buttons, {} balls, {} hats). Will treat it as a {}.", j.joystick().name(), j.joystick().num_axes(), j.joystick().num_buttons(), j.joystick().num_balls(), j.joystick().num_hats(), kind ); joysticks.push(j); } } let gl_attr = video_subsystem.gl_attr(); // TODO(hrapp): This should use OpenGL ES 2.0 to be compatible with WebGL, so this can be made // to work with emscripten. gl_attr.set_context_profile(GLProfile::Core); gl_attr.set_context_version(4, 1); const WINDOW_WIDTH: i32 = 800; const WINDOW_HEIGHT: i32 = 600; let window = match video_subsystem .window("sdl2_viewer", WINDOW_WIDTH as u32, WINDOW_HEIGHT as u32) .position_centered() .resizable() .opengl() .build() { Ok(window) => window, Err(err) => panic!("failed to create window: {}", err), }; // We need to create a context now, only after can we actually legally load the gl functions // and query 'gl_attr'. let _context = window.gl_create_context().unwrap(); let _swap_interval = video_subsystem.gl_set_swap_interval(SwapInterval::VSync); assert_eq!(gl_attr.context_profile(), GLProfile::Core); let gl = Rc::new(opengl::Gl::load_with(|s| { let ptr = video_subsystem.gl_get_proc_address(s); ptr as *const std::ffi::c_void })); let mut extension = T::new(&matches, Rc::clone(&gl)); let ext_local_from_global = T::local_from_global(&matches, &octree); let mut renderer = PointCloudRenderer::new(max_nodes_in_memory, Rc::clone(&gl), octree); let terrain_paths = matches.values_of("terrain").unwrap_or_default(); let mut terrain_renderer = TerrainRenderer::new(Rc::clone(&gl), terrain_paths); let local_from_global = ext_local_from_global.or_else(|| terrain_renderer.local_from_global()); let mut camera = Camera::new(&gl, WINDOW_WIDTH, WINDOW_HEIGHT, local_from_global); let mut events = ctx.event_pump().unwrap(); let mut last_frame_time = time::Instant::now(); 'outer_loop: loop { for event in events.poll_iter() { match event { Event::Quit {.. } => break 'outer_loop, Event::KeyDown { scancode: Some(code), keymod, .. } => { if keymod.is_empty() || keymod == Mod::NUMMOD { match code { Scancode::Escape => break 'outer_loop, Scancode::W => camera.moving_forward = true, Scancode::S => camera.moving_backward = true, Scancode::A => camera.moving_left = true, Scancode::D => camera.moving_right = true, Scancode::Z => camera.moving_down = true, Scancode::Q => camera.moving_up = true, Scancode::T => camera.toggle_ct_mode(&gl), Scancode::U => camera.move_ct(-0.5, &gl), Scancode::I => camera.move_ct(0.5, &gl), Scancode::J => camera.move_far_plane_ct(-0.5, &gl), Scancode::K => camera.move_far_plane_ct(0.5, &gl), Scancode::Left => camera.turning_left = true, Scancode::Right => camera.turning_right = true, Scancode::Down => camera.turning_down = true, Scancode::Up => camera.turning_up = true, Scancode::O => renderer.toggle_show_octree_nodes(), Scancode::Num7 => renderer.adjust_gamma(-0.1), Scancode::Num8 => renderer.adjust_gamma(0.1),
#[rustversion::attr(since(1.45), allow(clippy::manual_non_exhaustive))] pub mod opengl { include!(concat!(env!("OUT_DIR"), "/bindings.rs")); }
random_line_split
problem-022.rs
// Copyright 2016 Peter Beard // Distributed under the GNU GPL v2. For full terms, see the LICENSE file. // // Using names.txt (right click and 'Save Link/Target As...'), a 46K text // file containing over five-thousand first names, begin by sorting it into // alphabetical order. Then working out the alphabetical value for each name, // multiply this value by its alphabetical position in the list to obtain a name score. // // For example, when the list is sorted into alphabetical order, COLIN, which // is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN // would obtain a score of 938 Γ— 53 = 49714. // // What is the total of all the name scores in the file? #![feature(test)] extern crate test; use std::io::prelude::*; use std::io::BufReader; use std::fs::File; /// Calculate the score for a name fn score(name: &str) -> u32 { let mut s = 0; for c in name.chars() { if c.is_alphabetic() { s += c as u32 - 64; } } s } pub fn solution(data_file: &str) -> u32 { let f = match File::open(data_file) { Ok(file) => file, Err(e) => panic!("Failed to read file: {}", e) }; let mut reader = BufReader::new(f); // We're only interested in the first line let mut line = String::new(); match reader.read_line(&mut line) { Ok(len) => len, Err(e) => panic!("Failed to read line: {}", e) }; let mut names: Vec<&str> = line.split(',').collect(); names.sort(); let scores: Vec<u32> = names.into_iter().map(score).collect(); let mut sum = 0; for i in 0..scores.len() { sum += scores[i] * (i as u32 + 1); } sum } fn main() { println!("The sum of the scores of the alphabetized names is {}", solution("../../../data/p022_names.txt") ); } #[cfg(test)] mod tests { use super::*; use test::Bencher; #[test] fn correct() { assert_eq!(871198282, solution("../data/p022_names.txt")); } #[bench] fn b
b: &mut Bencher) { b.iter(|| solution("../data/p022_names.txt")); } }
ench(
identifier_name
problem-022.rs
// Copyright 2016 Peter Beard // Distributed under the GNU GPL v2. For full terms, see the LICENSE file. // // Using names.txt (right click and 'Save Link/Target As...'), a 46K text // file containing over five-thousand first names, begin by sorting it into // alphabetical order. Then working out the alphabetical value for each name, // multiply this value by its alphabetical position in the list to obtain a name score. // // For example, when the list is sorted into alphabetical order, COLIN, which // is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN // would obtain a score of 938 Γ— 53 = 49714. // // What is the total of all the name scores in the file? #![feature(test)] extern crate test; use std::io::prelude::*; use std::io::BufReader; use std::fs::File; /// Calculate the score for a name fn score(name: &str) -> u32 { let mut s = 0; for c in name.chars() { if c.is_alphabetic() { s += c as u32 - 64; } } s } pub fn solution(data_file: &str) -> u32 { let f = match File::open(data_file) { Ok(file) => file, Err(e) => panic!("Failed to read file: {}", e) }; let mut reader = BufReader::new(f); // We're only interested in the first line let mut line = String::new(); match reader.read_line(&mut line) { Ok(len) => len, Err(e) => panic!("Failed to read line: {}", e) }; let mut names: Vec<&str> = line.split(',').collect(); names.sort(); let scores: Vec<u32> = names.into_iter().map(score).collect(); let mut sum = 0; for i in 0..scores.len() { sum += scores[i] * (i as u32 + 1); } sum } fn main() { println!("The sum of the scores of the alphabetized names is {}", solution("../../../data/p022_names.txt") ); } #[cfg(test)] mod tests { use super::*; use test::Bencher; #[test] fn correct() { assert_eq!(871198282, solution("../data/p022_names.txt")); } #[bench] fn bench(b: &mut Bencher) {
}
b.iter(|| solution("../data/p022_names.txt")); }
identifier_body
problem-022.rs
// Copyright 2016 Peter Beard // Distributed under the GNU GPL v2. For full terms, see the LICENSE file. // // Using names.txt (right click and 'Save Link/Target As...'), a 46K text // file containing over five-thousand first names, begin by sorting it into // alphabetical order. Then working out the alphabetical value for each name, // multiply this value by its alphabetical position in the list to obtain a name score. // // For example, when the list is sorted into alphabetical order, COLIN, which // is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN // would obtain a score of 938 Γ— 53 = 49714. // // What is the total of all the name scores in the file? #![feature(test)] extern crate test; use std::io::prelude::*; use std::io::BufReader; use std::fs::File; /// Calculate the score for a name fn score(name: &str) -> u32 {
} } s } pub fn solution(data_file: &str) -> u32 { let f = match File::open(data_file) { Ok(file) => file, Err(e) => panic!("Failed to read file: {}", e) }; let mut reader = BufReader::new(f); // We're only interested in the first line let mut line = String::new(); match reader.read_line(&mut line) { Ok(len) => len, Err(e) => panic!("Failed to read line: {}", e) }; let mut names: Vec<&str> = line.split(',').collect(); names.sort(); let scores: Vec<u32> = names.into_iter().map(score).collect(); let mut sum = 0; for i in 0..scores.len() { sum += scores[i] * (i as u32 + 1); } sum } fn main() { println!("The sum of the scores of the alphabetized names is {}", solution("../../../data/p022_names.txt") ); } #[cfg(test)] mod tests { use super::*; use test::Bencher; #[test] fn correct() { assert_eq!(871198282, solution("../data/p022_names.txt")); } #[bench] fn bench(b: &mut Bencher) { b.iter(|| solution("../data/p022_names.txt")); } }
let mut s = 0; for c in name.chars() { if c.is_alphabetic() { s += c as u32 - 64;
random_line_split
kind.rs
use std::convert::TryInto; use std::fmt; use crate::mir::interpret::{AllocId, ConstValue, Scalar}; use crate::mir::Promoted; use crate::ty::subst::{InternalSubsts, SubstsRef}; use crate::ty::ParamEnv; use crate::ty::{self, TyCtxt, TypeFoldable}; use rustc_errors::ErrorReported; use rustc_hir::def_id::DefId; use rustc_macros::HashStable; use rustc_target::abi::Size; use super::ScalarInt; /// An unevaluated, potentially generic, constant. /// /// If `substs_` is `None` it means that this anon const /// still has its default substs. /// /// We check for all possible substs in `fn default_anon_const_substs`, /// so refer to that check for more info. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Lift)] #[derive(Hash, HashStable)] pub struct Unevaluated<'tcx, P = Option<Promoted>> { pub def: ty::WithOptConstParam<DefId>, pub substs_: Option<SubstsRef<'tcx>>, pub promoted: P, } impl<'tcx> Unevaluated<'tcx> { #[inline] pub fn shrink(self) -> Unevaluated<'tcx, ()> { debug_assert_eq!(self.promoted, None); Unevaluated { def: self.def, substs_: self.substs_, promoted: () } } } impl<'tcx> Unevaluated<'tcx, ()> { #[inline] pub fn expand(self) -> Unevaluated<'tcx> { Unevaluated { def: self.def, substs_: self.substs_, promoted: None } } } impl<'tcx, P: Default> Unevaluated<'tcx, P> { #[inline] pub fn new(def: ty::WithOptConstParam<DefId>, substs: SubstsRef<'tcx>) -> Unevaluated<'tcx, P> { Unevaluated { def, substs_: Some(substs), promoted: Default::default() } } } impl<'tcx, P: Default + PartialEq + fmt::Debug> Unevaluated<'tcx, P> { #[inline] pub fn substs(self, tcx: TyCtxt<'tcx>) -> SubstsRef<'tcx> { self.substs_.unwrap_or_else(|| { // We must not use the parents default substs for promoted constants // as that can result in incorrect substs and calls the `default_anon_const_substs` // for something that might not actually be a constant. debug_assert_eq!(self.promoted, Default::default()); tcx.default_anon_const_substs(self.def.did) }) } } /// Represents a constant in Rust. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)] #[derive(Hash, HashStable)] pub enum ConstKind<'tcx> { /// A const generic parameter. Param(ty::ParamConst), /// Infer the value of the const. Infer(InferConst<'tcx>), /// Bound const variable, used only when preparing a trait query. Bound(ty::DebruijnIndex, ty::BoundVar), /// A placeholder const - universally quantified higher-ranked const. Placeholder(ty::PlaceholderConst<'tcx>), /// Used in the HIR by using `Unevaluated` everywhere and later normalizing to one of the other /// variants when the code is monomorphic enough for that. Unevaluated(Unevaluated<'tcx>), /// Used to hold computed value. Value(ConstValue<'tcx>), /// A placeholder for a const which could not be computed; this is /// propagated to avoid useless error messages. Error(ty::DelaySpanBugEmitted), } #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] static_assert_size!(ConstKind<'_>, 40); impl<'tcx> ConstKind<'tcx> { #[inline] pub fn try_to_value(self) -> Option<ConstValue<'tcx>> { if let ConstKind::Value(val) = self { Some(val) } else { None } } #[inline] pub fn try_to_scalar(self) -> Option<Scalar<AllocId>> { self.try_to_value()?.try_to_scalar() } #[inline] pub fn try_to_scalar_int(self) -> Option<ScalarInt> { Some(self.try_to_value()?.try_to_scalar()?.assert_int()) } #[inline] pub fn try_to_bits(self, size: Size) -> Option<u128> { self.try_to_scalar_int()?.to_bits(size).ok() } #[inline] pub fn
(self) -> Option<bool> { self.try_to_scalar_int()?.try_into().ok() } #[inline] pub fn try_to_machine_usize(self, tcx: TyCtxt<'tcx>) -> Option<u64> { self.try_to_value()?.try_to_machine_usize(tcx) } } /// An inference variable for a const, for use in const generics. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)] #[derive(HashStable)] pub enum InferConst<'tcx> { /// Infer the value of the const. Var(ty::ConstVid<'tcx>), /// A fresh const variable. See `infer::freshen` for more details. Fresh(u32), } impl<'tcx> ConstKind<'tcx> { #[inline] /// Tries to evaluate the constant if it is `Unevaluated`. If that doesn't succeed, return the /// unevaluated constant. pub fn eval(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Self { self.try_eval(tcx, param_env).and_then(Result::ok).map_or(self, ConstKind::Value) } #[inline] /// Tries to evaluate the constant if it is `Unevaluated`. If that isn't possible or necessary /// return `None`. pub(super) fn try_eval( self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, ) -> Option<Result<ConstValue<'tcx>, ErrorReported>> { if let ConstKind::Unevaluated(unevaluated) = self { use crate::mir::interpret::ErrorHandled; // HACK(eddyb) this erases lifetimes even though `const_eval_resolve` // also does later, but we want to do it before checking for // inference variables. // Note that we erase regions *before* calling `with_reveal_all_normalized`, // so that we don't try to invoke this query with // any region variables. let param_env_and = tcx .erase_regions(param_env) .with_reveal_all_normalized(tcx) .and(tcx.erase_regions(unevaluated)); // HACK(eddyb) when the query key would contain inference variables, // attempt using identity substs and `ParamEnv` instead, that will succeed // when the expression doesn't depend on any parameters. // FIXME(eddyb, skinny121) pass `InferCtxt` into here when it's available, so that // we can call `infcx.const_eval_resolve` which handles inference variables. let param_env_and = if param_env_and.needs_infer() { tcx.param_env(unevaluated.def.did).and(ty::Unevaluated { def: unevaluated.def, substs_: Some(InternalSubsts::identity_for_item(tcx, unevaluated.def.did)), promoted: unevaluated.promoted, }) } else { param_env_and }; // FIXME(eddyb) maybe the `const_eval_*` methods should take // `ty::ParamEnvAnd` instead of having them separate. let (param_env, unevaluated) = param_env_and.into_parts(); // try to resolve e.g. associated constants to their definition on an impl, and then // evaluate the const. match tcx.const_eval_resolve(param_env, unevaluated, None) { // NOTE(eddyb) `val` contains no lifetimes/types/consts, // and we use the original type, so nothing from `substs` // (which may be identity substs, see above), // can leak through `val` into the const we return. Ok(val) => Some(Ok(val)), Err(ErrorHandled::TooGeneric | ErrorHandled::Linted) => None, Err(ErrorHandled::Reported(e)) => Some(Err(e)), } } else { None } } }
try_to_bool
identifier_name
kind.rs
use std::convert::TryInto; use std::fmt; use crate::mir::interpret::{AllocId, ConstValue, Scalar}; use crate::mir::Promoted; use crate::ty::subst::{InternalSubsts, SubstsRef}; use crate::ty::ParamEnv; use crate::ty::{self, TyCtxt, TypeFoldable}; use rustc_errors::ErrorReported; use rustc_hir::def_id::DefId; use rustc_macros::HashStable; use rustc_target::abi::Size; use super::ScalarInt; /// An unevaluated, potentially generic, constant. /// /// If `substs_` is `None` it means that this anon const /// still has its default substs. /// /// We check for all possible substs in `fn default_anon_const_substs`, /// so refer to that check for more info. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Lift)] #[derive(Hash, HashStable)] pub struct Unevaluated<'tcx, P = Option<Promoted>> { pub def: ty::WithOptConstParam<DefId>, pub substs_: Option<SubstsRef<'tcx>>, pub promoted: P, } impl<'tcx> Unevaluated<'tcx> { #[inline] pub fn shrink(self) -> Unevaluated<'tcx, ()> { debug_assert_eq!(self.promoted, None); Unevaluated { def: self.def, substs_: self.substs_, promoted: () } } } impl<'tcx> Unevaluated<'tcx, ()> { #[inline] pub fn expand(self) -> Unevaluated<'tcx> { Unevaluated { def: self.def, substs_: self.substs_, promoted: None } } } impl<'tcx, P: Default> Unevaluated<'tcx, P> { #[inline] pub fn new(def: ty::WithOptConstParam<DefId>, substs: SubstsRef<'tcx>) -> Unevaluated<'tcx, P> { Unevaluated { def, substs_: Some(substs), promoted: Default::default() } } } impl<'tcx, P: Default + PartialEq + fmt::Debug> Unevaluated<'tcx, P> { #[inline] pub fn substs(self, tcx: TyCtxt<'tcx>) -> SubstsRef<'tcx> { self.substs_.unwrap_or_else(|| { // We must not use the parents default substs for promoted constants // as that can result in incorrect substs and calls the `default_anon_const_substs` // for something that might not actually be a constant. debug_assert_eq!(self.promoted, Default::default()); tcx.default_anon_const_substs(self.def.did) }) } } /// Represents a constant in Rust. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)] #[derive(Hash, HashStable)] pub enum ConstKind<'tcx> { /// A const generic parameter. Param(ty::ParamConst), /// Infer the value of the const. Infer(InferConst<'tcx>), /// Bound const variable, used only when preparing a trait query. Bound(ty::DebruijnIndex, ty::BoundVar), /// A placeholder const - universally quantified higher-ranked const. Placeholder(ty::PlaceholderConst<'tcx>), /// Used in the HIR by using `Unevaluated` everywhere and later normalizing to one of the other /// variants when the code is monomorphic enough for that. Unevaluated(Unevaluated<'tcx>), /// Used to hold computed value. Value(ConstValue<'tcx>), /// A placeholder for a const which could not be computed; this is /// propagated to avoid useless error messages. Error(ty::DelaySpanBugEmitted), } #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] static_assert_size!(ConstKind<'_>, 40); impl<'tcx> ConstKind<'tcx> { #[inline] pub fn try_to_value(self) -> Option<ConstValue<'tcx>> { if let ConstKind::Value(val) = self { Some(val) } else { None } } #[inline] pub fn try_to_scalar(self) -> Option<Scalar<AllocId>> { self.try_to_value()?.try_to_scalar() } #[inline] pub fn try_to_scalar_int(self) -> Option<ScalarInt> { Some(self.try_to_value()?.try_to_scalar()?.assert_int()) } #[inline] pub fn try_to_bits(self, size: Size) -> Option<u128> { self.try_to_scalar_int()?.to_bits(size).ok() } #[inline] pub fn try_to_bool(self) -> Option<bool> { self.try_to_scalar_int()?.try_into().ok() } #[inline] pub fn try_to_machine_usize(self, tcx: TyCtxt<'tcx>) -> Option<u64> { self.try_to_value()?.try_to_machine_usize(tcx) } } /// An inference variable for a const, for use in const generics. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)] #[derive(HashStable)] pub enum InferConst<'tcx> { /// Infer the value of the const. Var(ty::ConstVid<'tcx>), /// A fresh const variable. See `infer::freshen` for more details. Fresh(u32), } impl<'tcx> ConstKind<'tcx> { #[inline] /// Tries to evaluate the constant if it is `Unevaluated`. If that doesn't succeed, return the /// unevaluated constant. pub fn eval(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Self { self.try_eval(tcx, param_env).and_then(Result::ok).map_or(self, ConstKind::Value) } #[inline] /// Tries to evaluate the constant if it is `Unevaluated`. If that isn't possible or necessary /// return `None`. pub(super) fn try_eval( self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, ) -> Option<Result<ConstValue<'tcx>, ErrorReported>> { if let ConstKind::Unevaluated(unevaluated) = self { use crate::mir::interpret::ErrorHandled; // HACK(eddyb) this erases lifetimes even though `const_eval_resolve` // also does later, but we want to do it before checking for // inference variables. // Note that we erase regions *before* calling `with_reveal_all_normalized`, // so that we don't try to invoke this query with // any region variables. let param_env_and = tcx .erase_regions(param_env) .with_reveal_all_normalized(tcx) .and(tcx.erase_regions(unevaluated)); // HACK(eddyb) when the query key would contain inference variables, // attempt using identity substs and `ParamEnv` instead, that will succeed // when the expression doesn't depend on any parameters. // FIXME(eddyb, skinny121) pass `InferCtxt` into here when it's available, so that // we can call `infcx.const_eval_resolve` which handles inference variables. let param_env_and = if param_env_and.needs_infer() { tcx.param_env(unevaluated.def.did).and(ty::Unevaluated { def: unevaluated.def, substs_: Some(InternalSubsts::identity_for_item(tcx, unevaluated.def.did)), promoted: unevaluated.promoted, }) } else { param_env_and
// `ty::ParamEnvAnd` instead of having them separate. let (param_env, unevaluated) = param_env_and.into_parts(); // try to resolve e.g. associated constants to their definition on an impl, and then // evaluate the const. match tcx.const_eval_resolve(param_env, unevaluated, None) { // NOTE(eddyb) `val` contains no lifetimes/types/consts, // and we use the original type, so nothing from `substs` // (which may be identity substs, see above), // can leak through `val` into the const we return. Ok(val) => Some(Ok(val)), Err(ErrorHandled::TooGeneric | ErrorHandled::Linted) => None, Err(ErrorHandled::Reported(e)) => Some(Err(e)), } } else { None } } }
}; // FIXME(eddyb) maybe the `const_eval_*` methods should take
random_line_split
kind.rs
use std::convert::TryInto; use std::fmt; use crate::mir::interpret::{AllocId, ConstValue, Scalar}; use crate::mir::Promoted; use crate::ty::subst::{InternalSubsts, SubstsRef}; use crate::ty::ParamEnv; use crate::ty::{self, TyCtxt, TypeFoldable}; use rustc_errors::ErrorReported; use rustc_hir::def_id::DefId; use rustc_macros::HashStable; use rustc_target::abi::Size; use super::ScalarInt; /// An unevaluated, potentially generic, constant. /// /// If `substs_` is `None` it means that this anon const /// still has its default substs. /// /// We check for all possible substs in `fn default_anon_const_substs`, /// so refer to that check for more info. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Lift)] #[derive(Hash, HashStable)] pub struct Unevaluated<'tcx, P = Option<Promoted>> { pub def: ty::WithOptConstParam<DefId>, pub substs_: Option<SubstsRef<'tcx>>, pub promoted: P, } impl<'tcx> Unevaluated<'tcx> { #[inline] pub fn shrink(self) -> Unevaluated<'tcx, ()>
} impl<'tcx> Unevaluated<'tcx, ()> { #[inline] pub fn expand(self) -> Unevaluated<'tcx> { Unevaluated { def: self.def, substs_: self.substs_, promoted: None } } } impl<'tcx, P: Default> Unevaluated<'tcx, P> { #[inline] pub fn new(def: ty::WithOptConstParam<DefId>, substs: SubstsRef<'tcx>) -> Unevaluated<'tcx, P> { Unevaluated { def, substs_: Some(substs), promoted: Default::default() } } } impl<'tcx, P: Default + PartialEq + fmt::Debug> Unevaluated<'tcx, P> { #[inline] pub fn substs(self, tcx: TyCtxt<'tcx>) -> SubstsRef<'tcx> { self.substs_.unwrap_or_else(|| { // We must not use the parents default substs for promoted constants // as that can result in incorrect substs and calls the `default_anon_const_substs` // for something that might not actually be a constant. debug_assert_eq!(self.promoted, Default::default()); tcx.default_anon_const_substs(self.def.did) }) } } /// Represents a constant in Rust. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)] #[derive(Hash, HashStable)] pub enum ConstKind<'tcx> { /// A const generic parameter. Param(ty::ParamConst), /// Infer the value of the const. Infer(InferConst<'tcx>), /// Bound const variable, used only when preparing a trait query. Bound(ty::DebruijnIndex, ty::BoundVar), /// A placeholder const - universally quantified higher-ranked const. Placeholder(ty::PlaceholderConst<'tcx>), /// Used in the HIR by using `Unevaluated` everywhere and later normalizing to one of the other /// variants when the code is monomorphic enough for that. Unevaluated(Unevaluated<'tcx>), /// Used to hold computed value. Value(ConstValue<'tcx>), /// A placeholder for a const which could not be computed; this is /// propagated to avoid useless error messages. Error(ty::DelaySpanBugEmitted), } #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] static_assert_size!(ConstKind<'_>, 40); impl<'tcx> ConstKind<'tcx> { #[inline] pub fn try_to_value(self) -> Option<ConstValue<'tcx>> { if let ConstKind::Value(val) = self { Some(val) } else { None } } #[inline] pub fn try_to_scalar(self) -> Option<Scalar<AllocId>> { self.try_to_value()?.try_to_scalar() } #[inline] pub fn try_to_scalar_int(self) -> Option<ScalarInt> { Some(self.try_to_value()?.try_to_scalar()?.assert_int()) } #[inline] pub fn try_to_bits(self, size: Size) -> Option<u128> { self.try_to_scalar_int()?.to_bits(size).ok() } #[inline] pub fn try_to_bool(self) -> Option<bool> { self.try_to_scalar_int()?.try_into().ok() } #[inline] pub fn try_to_machine_usize(self, tcx: TyCtxt<'tcx>) -> Option<u64> { self.try_to_value()?.try_to_machine_usize(tcx) } } /// An inference variable for a const, for use in const generics. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)] #[derive(HashStable)] pub enum InferConst<'tcx> { /// Infer the value of the const. Var(ty::ConstVid<'tcx>), /// A fresh const variable. See `infer::freshen` for more details. Fresh(u32), } impl<'tcx> ConstKind<'tcx> { #[inline] /// Tries to evaluate the constant if it is `Unevaluated`. If that doesn't succeed, return the /// unevaluated constant. pub fn eval(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Self { self.try_eval(tcx, param_env).and_then(Result::ok).map_or(self, ConstKind::Value) } #[inline] /// Tries to evaluate the constant if it is `Unevaluated`. If that isn't possible or necessary /// return `None`. pub(super) fn try_eval( self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, ) -> Option<Result<ConstValue<'tcx>, ErrorReported>> { if let ConstKind::Unevaluated(unevaluated) = self { use crate::mir::interpret::ErrorHandled; // HACK(eddyb) this erases lifetimes even though `const_eval_resolve` // also does later, but we want to do it before checking for // inference variables. // Note that we erase regions *before* calling `with_reveal_all_normalized`, // so that we don't try to invoke this query with // any region variables. let param_env_and = tcx .erase_regions(param_env) .with_reveal_all_normalized(tcx) .and(tcx.erase_regions(unevaluated)); // HACK(eddyb) when the query key would contain inference variables, // attempt using identity substs and `ParamEnv` instead, that will succeed // when the expression doesn't depend on any parameters. // FIXME(eddyb, skinny121) pass `InferCtxt` into here when it's available, so that // we can call `infcx.const_eval_resolve` which handles inference variables. let param_env_and = if param_env_and.needs_infer() { tcx.param_env(unevaluated.def.did).and(ty::Unevaluated { def: unevaluated.def, substs_: Some(InternalSubsts::identity_for_item(tcx, unevaluated.def.did)), promoted: unevaluated.promoted, }) } else { param_env_and }; // FIXME(eddyb) maybe the `const_eval_*` methods should take // `ty::ParamEnvAnd` instead of having them separate. let (param_env, unevaluated) = param_env_and.into_parts(); // try to resolve e.g. associated constants to their definition on an impl, and then // evaluate the const. match tcx.const_eval_resolve(param_env, unevaluated, None) { // NOTE(eddyb) `val` contains no lifetimes/types/consts, // and we use the original type, so nothing from `substs` // (which may be identity substs, see above), // can leak through `val` into the const we return. Ok(val) => Some(Ok(val)), Err(ErrorHandled::TooGeneric | ErrorHandled::Linted) => None, Err(ErrorHandled::Reported(e)) => Some(Err(e)), } } else { None } } }
{ debug_assert_eq!(self.promoted, None); Unevaluated { def: self.def, substs_: self.substs_, promoted: () } }
identifier_body
htmlmeterelement.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::Bindings::HTMLMeterElementBinding::{self, HTMLMeterElementMethods}; use dom::bindings::inheritance::Castable; use dom::bindings::root::DomRoot; use dom::document::Document; use dom::htmlelement::HTMLElement; use dom::node::Node; use dom::nodelist::NodeList; use dom_struct::dom_struct; use html5ever::{LocalName, Prefix}; #[dom_struct] pub struct HTMLMeterElement { htmlelement: HTMLElement } impl HTMLMeterElement { fn new_inherited(local_name: LocalName, prefix: Option<Prefix>, document: &Document) -> HTMLMeterElement { HTMLMeterElement { htmlelement: HTMLElement::new_inherited(local_name, prefix, document) } } #[allow(unrooted_must_root)] pub fn
(local_name: LocalName, prefix: Option<Prefix>, document: &Document) -> DomRoot<HTMLMeterElement> { Node::reflect_node(box HTMLMeterElement::new_inherited(local_name, prefix, document), document, HTMLMeterElementBinding::Wrap) } } impl HTMLMeterElementMethods for HTMLMeterElement { // https://html.spec.whatwg.org/multipage/#dom-lfe-labels fn Labels(&self) -> DomRoot<NodeList> { self.upcast::<HTMLElement>().labels() } }
new
identifier_name
htmlmeterelement.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use dom::bindings::codegen::Bindings::HTMLMeterElementBinding::{self, HTMLMeterElementMethods}; use dom::bindings::inheritance::Castable; use dom::bindings::root::DomRoot; use dom::document::Document; use dom::htmlelement::HTMLElement; use dom::node::Node; use dom::nodelist::NodeList; use dom_struct::dom_struct; use html5ever::{LocalName, Prefix}; #[dom_struct] pub struct HTMLMeterElement { htmlelement: HTMLElement } impl HTMLMeterElement { fn new_inherited(local_name: LocalName, prefix: Option<Prefix>, document: &Document) -> HTMLMeterElement { HTMLMeterElement { htmlelement: HTMLElement::new_inherited(local_name, prefix, document) }
#[allow(unrooted_must_root)] pub fn new(local_name: LocalName, prefix: Option<Prefix>, document: &Document) -> DomRoot<HTMLMeterElement> { Node::reflect_node(box HTMLMeterElement::new_inherited(local_name, prefix, document), document, HTMLMeterElementBinding::Wrap) } } impl HTMLMeterElementMethods for HTMLMeterElement { // https://html.spec.whatwg.org/multipage/#dom-lfe-labels fn Labels(&self) -> DomRoot<NodeList> { self.upcast::<HTMLElement>().labels() } }
}
random_line_split
tunnel_client.rs
#[macro_use] extern crate log; extern crate env_logger; extern crate mig; use std::sync::{Arc}; use std::io::{Read, Write}; use std::net::TcpListener; use std::str; use std::thread; use mig::quic::threaded::QuicConnection; fn main() { env_logger::init().unwrap(); let args = ::std::env::args; if args().len()!= 3 || args().nth(1) == Some("--help".to_string()) { println!("Usage: tunnel_client <serverip>:<port> <targetip>:<port>"); return; } let address = args().nth(1).unwrap(); let target = args().nth(2).unwrap(); let listener = match TcpListener::bind(&*address) { Ok(listener) => { listener }, Err(e) =>
, }; info!("Server on {}: listening for connections...", address); loop { let (mut connection, _) = match listener.accept() { Ok(connection) => { connection }, Err(e) => { error!("Cannot accept a connection: {}", e); return; }, }; let mut connection_worker = connection.try_clone().unwrap(); info!("Got a connection"); let quic_own = Arc::new(QuicConnection::new(target.clone()).unwrap()); let quic_writer = quic_own.clone(); let mut stream_own = quic_own.get_stream(2); let target_writer = thread::spawn(move || { let mut quic_writer = quic_writer.get_stream(2); let mut buf = vec![0; 4096]; loop { let size = connection_worker.read(&mut buf).unwrap();; if size == 0 { break; } quic_writer.write(&mut buf[..size]).unwrap(); } }); let mut buf = vec![0; 4096]; loop { let size = stream_own.read(&mut buf).unwrap();; if size == 0 { break; } connection.write(&mut buf[..size]).unwrap(); } target_writer.join().unwrap(); } }
{ error!("Cannot bind to the address: {}", e); return; }
conditional_block
tunnel_client.rs
#[macro_use] extern crate log; extern crate env_logger; extern crate mig; use std::sync::{Arc}; use std::io::{Read, Write}; use std::net::TcpListener; use std::str;
use mig::quic::threaded::QuicConnection; fn main() { env_logger::init().unwrap(); let args = ::std::env::args; if args().len()!= 3 || args().nth(1) == Some("--help".to_string()) { println!("Usage: tunnel_client <serverip>:<port> <targetip>:<port>"); return; } let address = args().nth(1).unwrap(); let target = args().nth(2).unwrap(); let listener = match TcpListener::bind(&*address) { Ok(listener) => { listener }, Err(e) => { error!("Cannot bind to the address: {}", e); return; }, }; info!("Server on {}: listening for connections...", address); loop { let (mut connection, _) = match listener.accept() { Ok(connection) => { connection }, Err(e) => { error!("Cannot accept a connection: {}", e); return; }, }; let mut connection_worker = connection.try_clone().unwrap(); info!("Got a connection"); let quic_own = Arc::new(QuicConnection::new(target.clone()).unwrap()); let quic_writer = quic_own.clone(); let mut stream_own = quic_own.get_stream(2); let target_writer = thread::spawn(move || { let mut quic_writer = quic_writer.get_stream(2); let mut buf = vec![0; 4096]; loop { let size = connection_worker.read(&mut buf).unwrap();; if size == 0 { break; } quic_writer.write(&mut buf[..size]).unwrap(); } }); let mut buf = vec![0; 4096]; loop { let size = stream_own.read(&mut buf).unwrap();; if size == 0 { break; } connection.write(&mut buf[..size]).unwrap(); } target_writer.join().unwrap(); } }
use std::thread;
random_line_split
tunnel_client.rs
#[macro_use] extern crate log; extern crate env_logger; extern crate mig; use std::sync::{Arc}; use std::io::{Read, Write}; use std::net::TcpListener; use std::str; use std::thread; use mig::quic::threaded::QuicConnection; fn
() { env_logger::init().unwrap(); let args = ::std::env::args; if args().len()!= 3 || args().nth(1) == Some("--help".to_string()) { println!("Usage: tunnel_client <serverip>:<port> <targetip>:<port>"); return; } let address = args().nth(1).unwrap(); let target = args().nth(2).unwrap(); let listener = match TcpListener::bind(&*address) { Ok(listener) => { listener }, Err(e) => { error!("Cannot bind to the address: {}", e); return; }, }; info!("Server on {}: listening for connections...", address); loop { let (mut connection, _) = match listener.accept() { Ok(connection) => { connection }, Err(e) => { error!("Cannot accept a connection: {}", e); return; }, }; let mut connection_worker = connection.try_clone().unwrap(); info!("Got a connection"); let quic_own = Arc::new(QuicConnection::new(target.clone()).unwrap()); let quic_writer = quic_own.clone(); let mut stream_own = quic_own.get_stream(2); let target_writer = thread::spawn(move || { let mut quic_writer = quic_writer.get_stream(2); let mut buf = vec![0; 4096]; loop { let size = connection_worker.read(&mut buf).unwrap();; if size == 0 { break; } quic_writer.write(&mut buf[..size]).unwrap(); } }); let mut buf = vec![0; 4096]; loop { let size = stream_own.read(&mut buf).unwrap();; if size == 0 { break; } connection.write(&mut buf[..size]).unwrap(); } target_writer.join().unwrap(); } }
main
identifier_name
tunnel_client.rs
#[macro_use] extern crate log; extern crate env_logger; extern crate mig; use std::sync::{Arc}; use std::io::{Read, Write}; use std::net::TcpListener; use std::str; use std::thread; use mig::quic::threaded::QuicConnection; fn main()
}; info!("Server on {}: listening for connections...", address); loop { let (mut connection, _) = match listener.accept() { Ok(connection) => { connection }, Err(e) => { error!("Cannot accept a connection: {}", e); return; }, }; let mut connection_worker = connection.try_clone().unwrap(); info!("Got a connection"); let quic_own = Arc::new(QuicConnection::new(target.clone()).unwrap()); let quic_writer = quic_own.clone(); let mut stream_own = quic_own.get_stream(2); let target_writer = thread::spawn(move || { let mut quic_writer = quic_writer.get_stream(2); let mut buf = vec![0; 4096]; loop { let size = connection_worker.read(&mut buf).unwrap();; if size == 0 { break; } quic_writer.write(&mut buf[..size]).unwrap(); } }); let mut buf = vec![0; 4096]; loop { let size = stream_own.read(&mut buf).unwrap();; if size == 0 { break; } connection.write(&mut buf[..size]).unwrap(); } target_writer.join().unwrap(); } }
{ env_logger::init().unwrap(); let args = ::std::env::args; if args().len() != 3 || args().nth(1) == Some("--help".to_string()) { println!("Usage: tunnel_client <serverip>:<port> <targetip>:<port>"); return; } let address = args().nth(1).unwrap(); let target = args().nth(2).unwrap(); let listener = match TcpListener::bind(&*address) { Ok(listener) => { listener }, Err(e) => { error!("Cannot bind to the address: {}", e); return; },
identifier_body
task-comm-14.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // xfail-fast pub fn main() { let po = comm::PortSet(); // Spawn 10 tasks each sending us back one int. let mut i = 10;
let (p, ch) = comm::stream(); po.add(p); task::spawn({let i = i; || child(i, &ch)}); i = i - 1; } // Spawned tasks are likely killed before they get a chance to send // anything back, so we deadlock here. i = 10; while (i > 0) { debug!(i); po.recv(); i = i - 1; } debug!("main thread exiting"); } fn child(x: int, ch: &comm::Chan<int>) { debug!(x); ch.send(x); }
while (i > 0) { debug!(i);
random_line_split
task-comm-14.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // xfail-fast pub fn main() { let po = comm::PortSet(); // Spawn 10 tasks each sending us back one int. let mut i = 10; while (i > 0) { debug!(i); let (p, ch) = comm::stream(); po.add(p); task::spawn({let i = i; || child(i, &ch)}); i = i - 1; } // Spawned tasks are likely killed before they get a chance to send // anything back, so we deadlock here. i = 10; while (i > 0) { debug!(i); po.recv(); i = i - 1; } debug!("main thread exiting"); } fn
(x: int, ch: &comm::Chan<int>) { debug!(x); ch.send(x); }
child
identifier_name
task-comm-14.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // xfail-fast pub fn main()
i = i - 1; } debug!("main thread exiting"); } fn child(x: int, ch: &comm::Chan<int>) { debug!(x); ch.send(x); }
{ let po = comm::PortSet(); // Spawn 10 tasks each sending us back one int. let mut i = 10; while (i > 0) { debug!(i); let (p, ch) = comm::stream(); po.add(p); task::spawn({let i = i; || child(i, &ch)}); i = i - 1; } // Spawned tasks are likely killed before they get a chance to send // anything back, so we deadlock here. i = 10; while (i > 0) { debug!(i); po.recv();
identifier_body
camera_calibration.rs
use {Cmcs, Point, Result}; use std::path::Path; /// A camera calibration. /// /// Only opencv camera calibrations are supported at this time. #[derive(Clone, Debug, PartialEq, Serialize)] #[allow(missing_docs)] pub struct CameraCalibration { /// The name of the calibration. pub name: String, pub cx: f64, pub cy: f64, pub fx: f64, pub fy: f64, pub k1: f64, pub k2: f64, pub k3: f64, pub k4: f64, pub p1: f64, pub p2: f64, pub tan_max_horz: f64, pub tan_max_vert: f64, pub tan_min_horz: f64, pub tan_min_vert: f64, pub width: usize, pub height: usize, } impl CameraCalibration { /// Retrieves all camera calibrations from a project. /// /// # Examples /// /// ``` /// use riscan_pro::CameraCalibration; /// let camera_calibrations = CameraCalibration::from_project_path("data/project.RiSCAN") /// .unwrap(); /// assert_eq!(1, camera_calibrations.len()); /// ``` pub fn from_project_path<P: AsRef<Path>>(path: P) -> Result<Vec<CameraCalibration>> { use Project; let project = Project::from_path(path)?; Ok(project.camera_calibrations.values().cloned().collect()) } /// Converts a point in the camera's coordinate system to pixel values. /// /// The pixel values are floats, in case someone later wants to do more than a direct lookup. /// /// Returns None if: /// /// - The point is behind the camera (negative z). /// - The point is ouside the angle extents, as defined by `tan_{min|max}_{vert|horz}`. /// - The calculated pixel values are outside of the width/height of the image. /// /// These maths are taken from the `project.dtd` file in every RiSCAN Pro project. /// /// # Examples /// /// ``` /// use riscan_pro::{CameraCalibration, Point}; /// let camera_calibration = CameraCalibration::from_project_path("data/southpole.rsp") /// .unwrap() /// .pop() /// .unwrap(); /// let cmcs = Point::cmcs(1.312, -0.641, 3.019); /// let (u, v) = camera_calibration.cmcs_to_ics(&cmcs).unwrap(); /// ``` pub fn cmcs_to_ics(&self, point: &Point<Cmcs>) -> Option<(f64, f64)> { use nalgebra::Matrix3; use std::ops::Deref; if point.is_behind_camera() { return None; } let tan_horz = point.tan_horz(); let tan_vert = point.tan_vert(); if tan_horz < self.tan_min_horz || tan_horz > self.tan_max_horz || tan_vert < self.tan_min_vert || tan_vert > self.tan_max_vert { return None; } let a = Matrix3::new(self.fx, 0., self.cx, 0., self.fy, self.cy, 0., 0., 1.); let ud_prime = a * point.deref(); let u = ud_prime[0] / ud_prime[2]; let v = ud_prime[1] / ud_prime[2]; let x = (u - self.cx) / self.fx; let y = (v - self.cy) / self.fy; let r = (x.powi(2) + y.powi(2)).sqrt().atan().powi(2).sqrt(); let r_term = self.k1 * r.powi(2) + self.k2 * r.powi(4) + self.k3 * r.powi(6) + self.k4 * r.powi(8); let u = u + x * self.fx * r_term + 2. * self.fx * x * y * self.p1 + self.p2 * self.fx * (r.powi(2) + 2. * x.powi(2)); let v = v + y * self.fy * r_term + 2. * self.fy * x * y * self.p2 + self.p1 * self.fy * (r.powi(2) + 2. * y.powi(2)); if self.is_valid_pixel(u, v) { Some((u, v)) } else { None } } /// Returns true if this is a valid pixel value. /// /// # Examples /// /// ``` /// use riscan_pro::CameraCalibration; /// let camera_calibration = CameraCalibration::from_project_path("data/project.RiSCAN") /// .unwrap() /// .pop() /// .unwrap(); /// // The camera calibration is 1024x768 /// assert!(camera_calibration.is_valid_pixel(0., 0.)); /// assert!(!camera_calibration.is_valid_pixel(1024., 0.)); /// assert!(!camera_calibration.is_valid_pixel(0., 768.)); /// ``` pub fn is_valid_pixel<T: Into<f64>>(&self, u: T, v: T) -> bool { let u = u.into(); let v = v.into(); u >= 0. && v >= 0. && u < self.width as f64 && v < self.height as f64 } } #[cfg(test)] mod tests { use super::*; #[test] fn cmcs_to_ics() { let camera_calibration = CameraCalibration::from_project_path("data/southpole.rsp") .unwrap() .pop() .unwrap(); let cmcs = Point::cmcs(1.312, -0.641, 3.019); let (u, v) = camera_calibration.cmcs_to_ics(&cmcs).unwrap(); assert_relative_eq!(882.668, u, epsilon = 1e-3); assert_relative_eq!(228.443, v, epsilon = 1e-3); // Point is *way* low. let cmcs = Point::cmcs(-100., -0.641, 3.019); assert_eq!(None, camera_calibration.cmcs_to_ics(&cmcs)); // Point is behind camera. let cmcs = Point::cmcs(1.312, -0.641, -3.019); assert_eq!(None, camera_calibration.cmcs_to_ics(&cmcs)); } #[test] fn
() { let camera_calibration = CameraCalibration::from_project_path("data/southpole.rsp") .unwrap() .pop() .unwrap(); assert!(camera_calibration.is_valid_pixel(0, 0)); assert!(!camera_calibration.is_valid_pixel(-1, 0)); assert!(!camera_calibration.is_valid_pixel(0, -1)); assert!(!camera_calibration.is_valid_pixel(1024, 0)); assert!(!camera_calibration.is_valid_pixel(0, 768)); assert!(camera_calibration.is_valid_pixel(1023.9, 0.)); assert!(camera_calibration.is_valid_pixel(0., 767.9)); } }
is_valid_pixel
identifier_name
camera_calibration.rs
use {Cmcs, Point, Result}; use std::path::Path; /// A camera calibration. /// /// Only opencv camera calibrations are supported at this time. #[derive(Clone, Debug, PartialEq, Serialize)] #[allow(missing_docs)] pub struct CameraCalibration { /// The name of the calibration. pub name: String, pub cx: f64, pub cy: f64, pub fx: f64, pub fy: f64, pub k1: f64, pub k2: f64, pub k3: f64, pub k4: f64, pub p1: f64, pub p2: f64, pub tan_max_horz: f64, pub tan_max_vert: f64, pub tan_min_horz: f64, pub tan_min_vert: f64, pub width: usize, pub height: usize, } impl CameraCalibration { /// Retrieves all camera calibrations from a project. /// /// # Examples /// /// ``` /// use riscan_pro::CameraCalibration; /// let camera_calibrations = CameraCalibration::from_project_path("data/project.RiSCAN") /// .unwrap(); /// assert_eq!(1, camera_calibrations.len()); /// ``` pub fn from_project_path<P: AsRef<Path>>(path: P) -> Result<Vec<CameraCalibration>> { use Project; let project = Project::from_path(path)?; Ok(project.camera_calibrations.values().cloned().collect()) } /// Converts a point in the camera's coordinate system to pixel values. /// /// The pixel values are floats, in case someone later wants to do more than a direct lookup. /// /// Returns None if: /// /// - The point is behind the camera (negative z). /// - The point is ouside the angle extents, as defined by `tan_{min|max}_{vert|horz}`. /// - The calculated pixel values are outside of the width/height of the image. /// /// These maths are taken from the `project.dtd` file in every RiSCAN Pro project. /// /// # Examples /// /// ``` /// use riscan_pro::{CameraCalibration, Point}; /// let camera_calibration = CameraCalibration::from_project_path("data/southpole.rsp") /// .unwrap() /// .pop() /// .unwrap(); /// let cmcs = Point::cmcs(1.312, -0.641, 3.019); /// let (u, v) = camera_calibration.cmcs_to_ics(&cmcs).unwrap(); /// ``` pub fn cmcs_to_ics(&self, point: &Point<Cmcs>) -> Option<(f64, f64)> { use nalgebra::Matrix3; use std::ops::Deref; if point.is_behind_camera() { return None; } let tan_horz = point.tan_horz(); let tan_vert = point.tan_vert(); if tan_horz < self.tan_min_horz || tan_horz > self.tan_max_horz || tan_vert < self.tan_min_vert || tan_vert > self.tan_max_vert
let a = Matrix3::new(self.fx, 0., self.cx, 0., self.fy, self.cy, 0., 0., 1.); let ud_prime = a * point.deref(); let u = ud_prime[0] / ud_prime[2]; let v = ud_prime[1] / ud_prime[2]; let x = (u - self.cx) / self.fx; let y = (v - self.cy) / self.fy; let r = (x.powi(2) + y.powi(2)).sqrt().atan().powi(2).sqrt(); let r_term = self.k1 * r.powi(2) + self.k2 * r.powi(4) + self.k3 * r.powi(6) + self.k4 * r.powi(8); let u = u + x * self.fx * r_term + 2. * self.fx * x * y * self.p1 + self.p2 * self.fx * (r.powi(2) + 2. * x.powi(2)); let v = v + y * self.fy * r_term + 2. * self.fy * x * y * self.p2 + self.p1 * self.fy * (r.powi(2) + 2. * y.powi(2)); if self.is_valid_pixel(u, v) { Some((u, v)) } else { None } } /// Returns true if this is a valid pixel value. /// /// # Examples /// /// ``` /// use riscan_pro::CameraCalibration; /// let camera_calibration = CameraCalibration::from_project_path("data/project.RiSCAN") /// .unwrap() /// .pop() /// .unwrap(); /// // The camera calibration is 1024x768 /// assert!(camera_calibration.is_valid_pixel(0., 0.)); /// assert!(!camera_calibration.is_valid_pixel(1024., 0.)); /// assert!(!camera_calibration.is_valid_pixel(0., 768.)); /// ``` pub fn is_valid_pixel<T: Into<f64>>(&self, u: T, v: T) -> bool { let u = u.into(); let v = v.into(); u >= 0. && v >= 0. && u < self.width as f64 && v < self.height as f64 } } #[cfg(test)] mod tests { use super::*; #[test] fn cmcs_to_ics() { let camera_calibration = CameraCalibration::from_project_path("data/southpole.rsp") .unwrap() .pop() .unwrap(); let cmcs = Point::cmcs(1.312, -0.641, 3.019); let (u, v) = camera_calibration.cmcs_to_ics(&cmcs).unwrap(); assert_relative_eq!(882.668, u, epsilon = 1e-3); assert_relative_eq!(228.443, v, epsilon = 1e-3); // Point is *way* low. let cmcs = Point::cmcs(-100., -0.641, 3.019); assert_eq!(None, camera_calibration.cmcs_to_ics(&cmcs)); // Point is behind camera. let cmcs = Point::cmcs(1.312, -0.641, -3.019); assert_eq!(None, camera_calibration.cmcs_to_ics(&cmcs)); } #[test] fn is_valid_pixel() { let camera_calibration = CameraCalibration::from_project_path("data/southpole.rsp") .unwrap() .pop() .unwrap(); assert!(camera_calibration.is_valid_pixel(0, 0)); assert!(!camera_calibration.is_valid_pixel(-1, 0)); assert!(!camera_calibration.is_valid_pixel(0, -1)); assert!(!camera_calibration.is_valid_pixel(1024, 0)); assert!(!camera_calibration.is_valid_pixel(0, 768)); assert!(camera_calibration.is_valid_pixel(1023.9, 0.)); assert!(camera_calibration.is_valid_pixel(0., 767.9)); } }
{ return None; }
conditional_block
camera_calibration.rs
use {Cmcs, Point, Result}; use std::path::Path; /// A camera calibration. /// /// Only opencv camera calibrations are supported at this time. #[derive(Clone, Debug, PartialEq, Serialize)] #[allow(missing_docs)] pub struct CameraCalibration { /// The name of the calibration. pub name: String, pub cx: f64, pub cy: f64, pub fx: f64, pub fy: f64, pub k1: f64, pub k2: f64, pub k3: f64, pub k4: f64, pub p1: f64, pub p2: f64, pub tan_max_horz: f64, pub tan_max_vert: f64, pub tan_min_horz: f64, pub tan_min_vert: f64, pub width: usize, pub height: usize, } impl CameraCalibration { /// Retrieves all camera calibrations from a project. /// /// # Examples /// /// ``` /// use riscan_pro::CameraCalibration; /// let camera_calibrations = CameraCalibration::from_project_path("data/project.RiSCAN") /// .unwrap(); /// assert_eq!(1, camera_calibrations.len()); /// ``` pub fn from_project_path<P: AsRef<Path>>(path: P) -> Result<Vec<CameraCalibration>> { use Project; let project = Project::from_path(path)?; Ok(project.camera_calibrations.values().cloned().collect()) } /// Converts a point in the camera's coordinate system to pixel values. /// /// The pixel values are floats, in case someone later wants to do more than a direct lookup. /// /// Returns None if: /// /// - The point is behind the camera (negative z). /// - The point is ouside the angle extents, as defined by `tan_{min|max}_{vert|horz}`. /// - The calculated pixel values are outside of the width/height of the image. /// /// These maths are taken from the `project.dtd` file in every RiSCAN Pro project. /// /// # Examples /// /// ``` /// use riscan_pro::{CameraCalibration, Point}; /// let camera_calibration = CameraCalibration::from_project_path("data/southpole.rsp") /// .unwrap() /// .pop() /// .unwrap(); /// let cmcs = Point::cmcs(1.312, -0.641, 3.019); /// let (u, v) = camera_calibration.cmcs_to_ics(&cmcs).unwrap(); /// ``` pub fn cmcs_to_ics(&self, point: &Point<Cmcs>) -> Option<(f64, f64)> { use nalgebra::Matrix3; use std::ops::Deref; if point.is_behind_camera() { return None; } let tan_horz = point.tan_horz(); let tan_vert = point.tan_vert(); if tan_horz < self.tan_min_horz || tan_horz > self.tan_max_horz || tan_vert < self.tan_min_vert || tan_vert > self.tan_max_vert { return None; } let a = Matrix3::new(self.fx, 0., self.cx, 0., self.fy, self.cy, 0., 0., 1.); let ud_prime = a * point.deref(); let u = ud_prime[0] / ud_prime[2]; let v = ud_prime[1] / ud_prime[2]; let x = (u - self.cx) / self.fx; let y = (v - self.cy) / self.fy; let r = (x.powi(2) + y.powi(2)).sqrt().atan().powi(2).sqrt(); let r_term = self.k1 * r.powi(2) + self.k2 * r.powi(4) + self.k3 * r.powi(6) + self.k4 * r.powi(8); let u = u + x * self.fx * r_term + 2. * self.fx * x * y * self.p1 + self.p2 * self.fx * (r.powi(2) + 2. * x.powi(2)); let v = v + y * self.fy * r_term + 2. * self.fy * x * y * self.p2 + self.p1 * self.fy * (r.powi(2) + 2. * y.powi(2)); if self.is_valid_pixel(u, v) { Some((u, v)) } else { None } } /// Returns true if this is a valid pixel value. /// /// # Examples /// /// ``` /// use riscan_pro::CameraCalibration; /// let camera_calibration = CameraCalibration::from_project_path("data/project.RiSCAN") /// .unwrap() /// .pop() /// .unwrap(); /// // The camera calibration is 1024x768 /// assert!(camera_calibration.is_valid_pixel(0., 0.)); /// assert!(!camera_calibration.is_valid_pixel(1024., 0.)); /// assert!(!camera_calibration.is_valid_pixel(0., 768.)); /// ``` pub fn is_valid_pixel<T: Into<f64>>(&self, u: T, v: T) -> bool
} #[cfg(test)] mod tests { use super::*; #[test] fn cmcs_to_ics() { let camera_calibration = CameraCalibration::from_project_path("data/southpole.rsp") .unwrap() .pop() .unwrap(); let cmcs = Point::cmcs(1.312, -0.641, 3.019); let (u, v) = camera_calibration.cmcs_to_ics(&cmcs).unwrap(); assert_relative_eq!(882.668, u, epsilon = 1e-3); assert_relative_eq!(228.443, v, epsilon = 1e-3); // Point is *way* low. let cmcs = Point::cmcs(-100., -0.641, 3.019); assert_eq!(None, camera_calibration.cmcs_to_ics(&cmcs)); // Point is behind camera. let cmcs = Point::cmcs(1.312, -0.641, -3.019); assert_eq!(None, camera_calibration.cmcs_to_ics(&cmcs)); } #[test] fn is_valid_pixel() { let camera_calibration = CameraCalibration::from_project_path("data/southpole.rsp") .unwrap() .pop() .unwrap(); assert!(camera_calibration.is_valid_pixel(0, 0)); assert!(!camera_calibration.is_valid_pixel(-1, 0)); assert!(!camera_calibration.is_valid_pixel(0, -1)); assert!(!camera_calibration.is_valid_pixel(1024, 0)); assert!(!camera_calibration.is_valid_pixel(0, 768)); assert!(camera_calibration.is_valid_pixel(1023.9, 0.)); assert!(camera_calibration.is_valid_pixel(0., 767.9)); } }
{ let u = u.into(); let v = v.into(); u >= 0. && v >= 0. && u < self.width as f64 && v < self.height as f64 }
identifier_body
camera_calibration.rs
use {Cmcs, Point, Result}; use std::path::Path; /// A camera calibration. /// /// Only opencv camera calibrations are supported at this time. #[derive(Clone, Debug, PartialEq, Serialize)] #[allow(missing_docs)] pub struct CameraCalibration { /// The name of the calibration. pub name: String, pub cx: f64, pub cy: f64, pub fx: f64, pub fy: f64, pub k1: f64, pub k2: f64, pub k3: f64, pub k4: f64, pub p1: f64, pub p2: f64, pub tan_max_horz: f64,
} impl CameraCalibration { /// Retrieves all camera calibrations from a project. /// /// # Examples /// /// ``` /// use riscan_pro::CameraCalibration; /// let camera_calibrations = CameraCalibration::from_project_path("data/project.RiSCAN") /// .unwrap(); /// assert_eq!(1, camera_calibrations.len()); /// ``` pub fn from_project_path<P: AsRef<Path>>(path: P) -> Result<Vec<CameraCalibration>> { use Project; let project = Project::from_path(path)?; Ok(project.camera_calibrations.values().cloned().collect()) } /// Converts a point in the camera's coordinate system to pixel values. /// /// The pixel values are floats, in case someone later wants to do more than a direct lookup. /// /// Returns None if: /// /// - The point is behind the camera (negative z). /// - The point is ouside the angle extents, as defined by `tan_{min|max}_{vert|horz}`. /// - The calculated pixel values are outside of the width/height of the image. /// /// These maths are taken from the `project.dtd` file in every RiSCAN Pro project. /// /// # Examples /// /// ``` /// use riscan_pro::{CameraCalibration, Point}; /// let camera_calibration = CameraCalibration::from_project_path("data/southpole.rsp") /// .unwrap() /// .pop() /// .unwrap(); /// let cmcs = Point::cmcs(1.312, -0.641, 3.019); /// let (u, v) = camera_calibration.cmcs_to_ics(&cmcs).unwrap(); /// ``` pub fn cmcs_to_ics(&self, point: &Point<Cmcs>) -> Option<(f64, f64)> { use nalgebra::Matrix3; use std::ops::Deref; if point.is_behind_camera() { return None; } let tan_horz = point.tan_horz(); let tan_vert = point.tan_vert(); if tan_horz < self.tan_min_horz || tan_horz > self.tan_max_horz || tan_vert < self.tan_min_vert || tan_vert > self.tan_max_vert { return None; } let a = Matrix3::new(self.fx, 0., self.cx, 0., self.fy, self.cy, 0., 0., 1.); let ud_prime = a * point.deref(); let u = ud_prime[0] / ud_prime[2]; let v = ud_prime[1] / ud_prime[2]; let x = (u - self.cx) / self.fx; let y = (v - self.cy) / self.fy; let r = (x.powi(2) + y.powi(2)).sqrt().atan().powi(2).sqrt(); let r_term = self.k1 * r.powi(2) + self.k2 * r.powi(4) + self.k3 * r.powi(6) + self.k4 * r.powi(8); let u = u + x * self.fx * r_term + 2. * self.fx * x * y * self.p1 + self.p2 * self.fx * (r.powi(2) + 2. * x.powi(2)); let v = v + y * self.fy * r_term + 2. * self.fy * x * y * self.p2 + self.p1 * self.fy * (r.powi(2) + 2. * y.powi(2)); if self.is_valid_pixel(u, v) { Some((u, v)) } else { None } } /// Returns true if this is a valid pixel value. /// /// # Examples /// /// ``` /// use riscan_pro::CameraCalibration; /// let camera_calibration = CameraCalibration::from_project_path("data/project.RiSCAN") /// .unwrap() /// .pop() /// .unwrap(); /// // The camera calibration is 1024x768 /// assert!(camera_calibration.is_valid_pixel(0., 0.)); /// assert!(!camera_calibration.is_valid_pixel(1024., 0.)); /// assert!(!camera_calibration.is_valid_pixel(0., 768.)); /// ``` pub fn is_valid_pixel<T: Into<f64>>(&self, u: T, v: T) -> bool { let u = u.into(); let v = v.into(); u >= 0. && v >= 0. && u < self.width as f64 && v < self.height as f64 } } #[cfg(test)] mod tests { use super::*; #[test] fn cmcs_to_ics() { let camera_calibration = CameraCalibration::from_project_path("data/southpole.rsp") .unwrap() .pop() .unwrap(); let cmcs = Point::cmcs(1.312, -0.641, 3.019); let (u, v) = camera_calibration.cmcs_to_ics(&cmcs).unwrap(); assert_relative_eq!(882.668, u, epsilon = 1e-3); assert_relative_eq!(228.443, v, epsilon = 1e-3); // Point is *way* low. let cmcs = Point::cmcs(-100., -0.641, 3.019); assert_eq!(None, camera_calibration.cmcs_to_ics(&cmcs)); // Point is behind camera. let cmcs = Point::cmcs(1.312, -0.641, -3.019); assert_eq!(None, camera_calibration.cmcs_to_ics(&cmcs)); } #[test] fn is_valid_pixel() { let camera_calibration = CameraCalibration::from_project_path("data/southpole.rsp") .unwrap() .pop() .unwrap(); assert!(camera_calibration.is_valid_pixel(0, 0)); assert!(!camera_calibration.is_valid_pixel(-1, 0)); assert!(!camera_calibration.is_valid_pixel(0, -1)); assert!(!camera_calibration.is_valid_pixel(1024, 0)); assert!(!camera_calibration.is_valid_pixel(0, 768)); assert!(camera_calibration.is_valid_pixel(1023.9, 0.)); assert!(camera_calibration.is_valid_pixel(0., 767.9)); } }
pub tan_max_vert: f64, pub tan_min_horz: f64, pub tan_min_vert: f64, pub width: usize, pub height: usize,
random_line_split
shared_vec_slice.rs
use std::sync::Arc; #[derive(Clone)] pub struct SharedVecSlice { pub data: Arc<Vec<u8>>, pub start: usize, pub len: usize, } impl SharedVecSlice { pub fn empty() -> SharedVecSlice { SharedVecSlice::new(Arc::new(Vec::new())) } pub fn new(data: Arc<Vec<u8>>) -> SharedVecSlice { let data_len = data.len(); SharedVecSlice { data: data, start: 0, len: data_len, } } pub fn as_slice(&self) -> &[u8] { &self.data[self.start..self.start + self.len]
pub fn slice(&self, from_offset: usize, to_offset: usize) -> SharedVecSlice { SharedVecSlice { data: self.data.clone(), start: self.start + from_offset, len: to_offset - from_offset, } } }
}
random_line_split
shared_vec_slice.rs
use std::sync::Arc; #[derive(Clone)] pub struct SharedVecSlice { pub data: Arc<Vec<u8>>, pub start: usize, pub len: usize, } impl SharedVecSlice { pub fn empty() -> SharedVecSlice { SharedVecSlice::new(Arc::new(Vec::new())) } pub fn new(data: Arc<Vec<u8>>) -> SharedVecSlice { let data_len = data.len(); SharedVecSlice { data: data, start: 0, len: data_len, } } pub fn as_slice(&self) -> &[u8]
pub fn slice(&self, from_offset: usize, to_offset: usize) -> SharedVecSlice { SharedVecSlice { data: self.data.clone(), start: self.start + from_offset, len: to_offset - from_offset, } } }
{ &self.data[self.start..self.start + self.len] }
identifier_body
shared_vec_slice.rs
use std::sync::Arc; #[derive(Clone)] pub struct SharedVecSlice { pub data: Arc<Vec<u8>>, pub start: usize, pub len: usize, } impl SharedVecSlice { pub fn empty() -> SharedVecSlice { SharedVecSlice::new(Arc::new(Vec::new())) } pub fn new(data: Arc<Vec<u8>>) -> SharedVecSlice { let data_len = data.len(); SharedVecSlice { data: data, start: 0, len: data_len, } } pub fn
(&self) -> &[u8] { &self.data[self.start..self.start + self.len] } pub fn slice(&self, from_offset: usize, to_offset: usize) -> SharedVecSlice { SharedVecSlice { data: self.data.clone(), start: self.start + from_offset, len: to_offset - from_offset, } } }
as_slice
identifier_name
lib.rs
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The Rust parser and macro expander. //! //! # Note //! //! This API is completely unstable and subject to change. #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://docs.rs/syntex_syntax/0.59.1", test(attr(deny(warnings))))] #![deny(warnings)] #[macro_use] extern crate log; #[macro_use] extern crate bitflags; pub extern crate syntex_errors as errors; extern crate syntex_pos as syntax_pos; mod rustc_data_structures; extern crate extprim; extern crate serde; #[macro_use] extern crate serde_derive; extern crate serde_json; extern crate unicode_xid; // A variant of 'try!' that panics on an Err. This is used as a crutch on the // way towards a non-panic!-prone parser. It should be used for fatal parsing // errors; eventually we plan to convert all code using panictry to just use // normal try. // Exported for syntax_ext, not meant for general use. #[macro_export] macro_rules! panictry { ($e:expr) => ({ use std::result::Result::{Ok, Err}; use errors::FatalError; match $e { Ok(e) => e, Err(mut e) => { e.emit(); panic!(FatalError); } } }) } #[macro_export] macro_rules! unwrap_or { ($opt:expr, $default:expr) => { match $opt { Some(x) => x, None => $default, } } } #[macro_use] pub mod diagnostics { #[macro_use] pub mod macros; pub mod plugin; pub mod metadata; } // NB: This module needs to be declared first so diagnostics are // registered before they are used. pub mod diagnostic_list; pub mod util { pub mod lev_distance; pub mod node_count; pub mod parser; #[cfg(test)] pub mod parser_testing; pub mod small_vector; pub mod move_map; mod thin_vec; pub use self::thin_vec::ThinVec; mod rc_slice; pub use self::rc_slice::RcSlice; } pub mod json; pub mod syntax { pub use ext; pub use parse; pub use ast; } pub mod abi; pub mod ast; pub mod attr; pub mod codemap; #[macro_use] pub mod config; pub mod entry; pub mod feature_gate; pub mod fold; pub mod parse; pub mod ptr; pub mod show_span; pub mod std_inject; pub mod str; pub use syntax_pos::symbol; pub mod test; pub mod tokenstream; pub mod visit; pub mod print { pub mod pp; pub mod pprust; } pub mod ext { pub use syntax_pos::hygiene; pub mod base; pub mod build; pub mod derive; pub mod expand; pub mod placeholders; pub mod quote;
pub mod macro_parser; pub mod macro_rules; pub mod quoted; } } #[cfg(test)] mod test_snippet; // __build_diagnostic_array! { libsyntax, DIAGNOSTICS }
pub mod source_util; pub mod tt { pub mod transcribe;
random_line_split
url.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Common handling for the specified value CSS url() values. use cssparser::Parser; use gecko_bindings::bindings; use gecko_bindings::structs::root::mozilla::css::URLValue; use gecko_bindings::structs::root::mozilla::CORSMode; use gecko_bindings::structs::root::nsStyleImageRequest; use gecko_bindings::sugar::ownership::{FFIArcHelpers, HasArcFFI}; use gecko_bindings::sugar::refptr::RefPtr; use malloc_size_of::{MallocSizeOf, MallocSizeOfOps}; use nsstring::nsCString; use parser::{Parse, ParserContext}; use servo_arc::Arc; use std::fmt::{self, Write}; use style_traits::{CssWriter, ParseError, ToCss}; use stylesheets::UrlExtraData; use values::computed::{Context, ToComputedValue}; /// A CSS url() value for gecko. #[css(function = "url")] #[derive(Clone, Debug, PartialEq, SpecifiedValueInfo, ToCss)] pub struct CssUrl(pub Arc<CssUrlData>); /// Data shared between CssUrls. #[derive(Clone, Debug, PartialEq, SpecifiedValueInfo, ToCss)] pub struct CssUrlData { /// The URL in unresolved string form. serialization: String, /// The URL extra data. #[css(skip)] pub extra_data: UrlExtraData, } impl CssUrl { /// Parse a URL from a string value that is a valid CSS token for a URL. pub fn
(url: String, context: &ParserContext) -> Self { CssUrl(Arc::new(CssUrlData { serialization: url, extra_data: context.url_data.clone(), })) } /// Returns true if the URL is definitely invalid. We don't eagerly resolve /// URLs in gecko, so we just return false here. /// use its |resolved| status. pub fn is_invalid(&self) -> bool { false } /// Returns true if this URL looks like a fragment. /// See https://drafts.csswg.org/css-values/#local-urls #[inline] pub fn is_fragment(&self) -> bool { self.0.is_fragment() } /// Return the unresolved url as string, or the empty string if it's /// invalid. #[inline] pub fn as_str(&self) -> &str { self.0.as_str() } } impl CssUrlData { /// Returns true if this URL looks like a fragment. /// See https://drafts.csswg.org/css-values/#local-urls pub fn is_fragment(&self) -> bool { self.as_str().chars().next().map_or(false, |c| c == '#') } /// Return the unresolved url as string, or the empty string if it's /// invalid. pub fn as_str(&self) -> &str { &*self.serialization } } impl Parse for CssUrl { fn parse<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { let url = input.expect_url()?; Ok(Self::parse_from_string(url.as_ref().to_owned(), context)) } } impl Eq for CssUrl {} impl MallocSizeOf for CssUrl { fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { // XXX: measure `serialization` once bug 1397971 lands // We ignore `extra_data`, because RefPtr is tricky, and there aren't // many of them in practise (sharing is common). 0 } } /// A specified non-image `url()` value. #[derive(Clone, Debug, SpecifiedValueInfo, ToCss)] pub struct SpecifiedUrl { /// The specified url value. pub url: CssUrl, /// Gecko's URLValue so that we can reuse it while rematching a /// property with this specified value. #[css(skip)] pub url_value: RefPtr<URLValue>, } impl SpecifiedUrl { /// Parse a URL from a string value. pub fn parse_from_string(url: String, context: &ParserContext) -> Self { Self::from_css_url(CssUrl::parse_from_string(url, context)) } fn from_css_url_with_cors(url: CssUrl, cors: CORSMode) -> Self { let url_value = unsafe { let ptr = bindings::Gecko_URLValue_Create(url.0.clone().into_strong(), cors); // We do not expect Gecko_URLValue_Create returns null. debug_assert!(!ptr.is_null()); RefPtr::from_addrefed(ptr) }; Self { url, url_value } } fn from_css_url(url: CssUrl) -> Self { use gecko_bindings::structs::root::mozilla::CORSMode_CORS_NONE; Self::from_css_url_with_cors(url, CORSMode_CORS_NONE) } fn from_css_url_with_cors_anonymous(url: CssUrl) -> Self { use gecko_bindings::structs::root::mozilla::CORSMode_CORS_ANONYMOUS; Self::from_css_url_with_cors(url, CORSMode_CORS_ANONYMOUS) } } impl Parse for SpecifiedUrl { fn parse<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { CssUrl::parse(context, input).map(Self::from_css_url) } } impl PartialEq for SpecifiedUrl { fn eq(&self, other: &Self) -> bool { self.url.eq(&other.url) } } impl Eq for SpecifiedUrl {} impl MallocSizeOf for SpecifiedUrl { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { let mut n = self.url.size_of(ops); // Although this is a RefPtr, this is the primary reference because // SpecifiedUrl is responsible for creating the url_value. So we // measure unconditionally here. n += unsafe { bindings::Gecko_URLValue_SizeOfIncludingThis(self.url_value.get()) }; n } } impl ToComputedValue for SpecifiedUrl { type ComputedValue = ComputedUrl; #[inline] fn to_computed_value(&self, _: &Context) -> Self::ComputedValue { ComputedUrl(self.clone()) } #[inline] fn from_computed_value(computed: &Self::ComputedValue) -> Self { computed.0.clone() } } /// A specified image `url()` value. #[derive(Clone, Debug, Eq, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToCss)] pub struct SpecifiedImageUrl(pub SpecifiedUrl); impl SpecifiedImageUrl { /// Parse a URL from a string value that is a valid CSS token for a URL. pub fn parse_from_string(url: String, context: &ParserContext) -> Self { SpecifiedImageUrl(SpecifiedUrl::parse_from_string(url, context)) } /// Provides an alternate method for parsing that associates the URL /// with anonymous CORS headers. pub fn parse_with_cors_anonymous<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { CssUrl::parse(context, input) .map(SpecifiedUrl::from_css_url_with_cors_anonymous) .map(SpecifiedImageUrl) } } impl Parse for SpecifiedImageUrl { fn parse<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { SpecifiedUrl::parse(context, input).map(SpecifiedImageUrl) } } impl ToComputedValue for SpecifiedImageUrl { type ComputedValue = ComputedImageUrl; #[inline] fn to_computed_value(&self, _: &Context) -> Self::ComputedValue { ComputedImageUrl(self.clone()) } #[inline] fn from_computed_value(computed: &Self::ComputedValue) -> Self { computed.0.clone() } } fn serialize_computed_url<W>( url_value: &URLValue, dest: &mut CssWriter<W>, get_url: unsafe extern "C" fn(*const URLValue, *mut nsCString), ) -> fmt::Result where W: Write, { dest.write_str("url(")?; unsafe { let mut string = nsCString::new(); get_url(url_value, &mut string); string.as_str_unchecked().to_css(dest)?; } dest.write_char(')') } /// The computed value of a CSS non-image `url()`. /// /// The only difference between specified and computed URLs is the /// serialization. #[derive(Clone, Debug, Eq, MallocSizeOf, PartialEq)] pub struct ComputedUrl(pub SpecifiedUrl); impl ToCss for ComputedUrl { fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result where W: Write, { serialize_computed_url(&self.0.url_value, dest, bindings::Gecko_GetComputedURLSpec) } } impl ComputedUrl { /// Convert from RefPtr<URLValue> to ComputedUrl. pub unsafe fn from_url_value(url_value: RefPtr<URLValue>) -> Self { let css_url = &*url_value.mCssUrl.mRawPtr; let url = CssUrl(CssUrlData::as_arc(&css_url).clone_arc()); ComputedUrl(SpecifiedUrl { url, url_value }) } /// Get a raw pointer to the URLValue held by this ComputedUrl, for FFI. pub fn url_value_ptr(&self) -> *mut URLValue { self.0.url_value.get() } } /// The computed value of a CSS image `url()`. #[derive(Clone, Debug, Eq, MallocSizeOf, PartialEq)] pub struct ComputedImageUrl(pub SpecifiedImageUrl); impl ToCss for ComputedImageUrl { fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result where W: Write, { serialize_computed_url( &(self.0).0.url_value, dest, bindings::Gecko_GetComputedImageURLSpec, ) } } impl ComputedImageUrl { /// Convert from nsStyleImageReques to ComputedImageUrl. pub unsafe fn from_image_request(image_request: &nsStyleImageRequest) -> Self { let url_value = image_request.mImageValue.to_safe(); let css_url = &*url_value.mCssUrl.mRawPtr; let url = CssUrl(CssUrlData::as_arc(&css_url).clone_arc()); ComputedImageUrl(SpecifiedImageUrl(SpecifiedUrl { url, url_value })) } /// Get a raw pointer to the URLValue held by this ComputedImageUrl, for FFI. pub fn url_value_ptr(&self) -> *mut URLValue { (self.0).0.url_value.get() } }
parse_from_string
identifier_name
url.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Common handling for the specified value CSS url() values. use cssparser::Parser; use gecko_bindings::bindings; use gecko_bindings::structs::root::mozilla::css::URLValue; use gecko_bindings::structs::root::mozilla::CORSMode; use gecko_bindings::structs::root::nsStyleImageRequest; use gecko_bindings::sugar::ownership::{FFIArcHelpers, HasArcFFI}; use gecko_bindings::sugar::refptr::RefPtr; use malloc_size_of::{MallocSizeOf, MallocSizeOfOps}; use nsstring::nsCString; use parser::{Parse, ParserContext}; use servo_arc::Arc; use std::fmt::{self, Write}; use style_traits::{CssWriter, ParseError, ToCss}; use stylesheets::UrlExtraData; use values::computed::{Context, ToComputedValue}; /// A CSS url() value for gecko. #[css(function = "url")] #[derive(Clone, Debug, PartialEq, SpecifiedValueInfo, ToCss)] pub struct CssUrl(pub Arc<CssUrlData>); /// Data shared between CssUrls. #[derive(Clone, Debug, PartialEq, SpecifiedValueInfo, ToCss)] pub struct CssUrlData { /// The URL in unresolved string form. serialization: String, /// The URL extra data. #[css(skip)] pub extra_data: UrlExtraData, } impl CssUrl { /// Parse a URL from a string value that is a valid CSS token for a URL. pub fn parse_from_string(url: String, context: &ParserContext) -> Self { CssUrl(Arc::new(CssUrlData { serialization: url, extra_data: context.url_data.clone(), })) } /// Returns true if the URL is definitely invalid. We don't eagerly resolve /// URLs in gecko, so we just return false here. /// use its |resolved| status. pub fn is_invalid(&self) -> bool { false } /// Returns true if this URL looks like a fragment. /// See https://drafts.csswg.org/css-values/#local-urls #[inline] pub fn is_fragment(&self) -> bool { self.0.is_fragment() } /// Return the unresolved url as string, or the empty string if it's /// invalid. #[inline] pub fn as_str(&self) -> &str { self.0.as_str() } } impl CssUrlData { /// Returns true if this URL looks like a fragment. /// See https://drafts.csswg.org/css-values/#local-urls pub fn is_fragment(&self) -> bool { self.as_str().chars().next().map_or(false, |c| c == '#') } /// Return the unresolved url as string, or the empty string if it's /// invalid. pub fn as_str(&self) -> &str { &*self.serialization } } impl Parse for CssUrl { fn parse<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { let url = input.expect_url()?; Ok(Self::parse_from_string(url.as_ref().to_owned(), context)) } } impl Eq for CssUrl {} impl MallocSizeOf for CssUrl { fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { // XXX: measure `serialization` once bug 1397971 lands // We ignore `extra_data`, because RefPtr is tricky, and there aren't // many of them in practise (sharing is common). 0 } } /// A specified non-image `url()` value. #[derive(Clone, Debug, SpecifiedValueInfo, ToCss)] pub struct SpecifiedUrl { /// The specified url value. pub url: CssUrl, /// Gecko's URLValue so that we can reuse it while rematching a /// property with this specified value. #[css(skip)] pub url_value: RefPtr<URLValue>, } impl SpecifiedUrl { /// Parse a URL from a string value. pub fn parse_from_string(url: String, context: &ParserContext) -> Self { Self::from_css_url(CssUrl::parse_from_string(url, context)) } fn from_css_url_with_cors(url: CssUrl, cors: CORSMode) -> Self { let url_value = unsafe { let ptr = bindings::Gecko_URLValue_Create(url.0.clone().into_strong(), cors); // We do not expect Gecko_URLValue_Create returns null. debug_assert!(!ptr.is_null()); RefPtr::from_addrefed(ptr) }; Self { url, url_value } } fn from_css_url(url: CssUrl) -> Self { use gecko_bindings::structs::root::mozilla::CORSMode_CORS_NONE; Self::from_css_url_with_cors(url, CORSMode_CORS_NONE) } fn from_css_url_with_cors_anonymous(url: CssUrl) -> Self { use gecko_bindings::structs::root::mozilla::CORSMode_CORS_ANONYMOUS; Self::from_css_url_with_cors(url, CORSMode_CORS_ANONYMOUS) } } impl Parse for SpecifiedUrl { fn parse<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { CssUrl::parse(context, input).map(Self::from_css_url) }
self.url.eq(&other.url) } } impl Eq for SpecifiedUrl {} impl MallocSizeOf for SpecifiedUrl { fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { let mut n = self.url.size_of(ops); // Although this is a RefPtr, this is the primary reference because // SpecifiedUrl is responsible for creating the url_value. So we // measure unconditionally here. n += unsafe { bindings::Gecko_URLValue_SizeOfIncludingThis(self.url_value.get()) }; n } } impl ToComputedValue for SpecifiedUrl { type ComputedValue = ComputedUrl; #[inline] fn to_computed_value(&self, _: &Context) -> Self::ComputedValue { ComputedUrl(self.clone()) } #[inline] fn from_computed_value(computed: &Self::ComputedValue) -> Self { computed.0.clone() } } /// A specified image `url()` value. #[derive(Clone, Debug, Eq, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToCss)] pub struct SpecifiedImageUrl(pub SpecifiedUrl); impl SpecifiedImageUrl { /// Parse a URL from a string value that is a valid CSS token for a URL. pub fn parse_from_string(url: String, context: &ParserContext) -> Self { SpecifiedImageUrl(SpecifiedUrl::parse_from_string(url, context)) } /// Provides an alternate method for parsing that associates the URL /// with anonymous CORS headers. pub fn parse_with_cors_anonymous<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { CssUrl::parse(context, input) .map(SpecifiedUrl::from_css_url_with_cors_anonymous) .map(SpecifiedImageUrl) } } impl Parse for SpecifiedImageUrl { fn parse<'i, 't>( context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<Self, ParseError<'i>> { SpecifiedUrl::parse(context, input).map(SpecifiedImageUrl) } } impl ToComputedValue for SpecifiedImageUrl { type ComputedValue = ComputedImageUrl; #[inline] fn to_computed_value(&self, _: &Context) -> Self::ComputedValue { ComputedImageUrl(self.clone()) } #[inline] fn from_computed_value(computed: &Self::ComputedValue) -> Self { computed.0.clone() } } fn serialize_computed_url<W>( url_value: &URLValue, dest: &mut CssWriter<W>, get_url: unsafe extern "C" fn(*const URLValue, *mut nsCString), ) -> fmt::Result where W: Write, { dest.write_str("url(")?; unsafe { let mut string = nsCString::new(); get_url(url_value, &mut string); string.as_str_unchecked().to_css(dest)?; } dest.write_char(')') } /// The computed value of a CSS non-image `url()`. /// /// The only difference between specified and computed URLs is the /// serialization. #[derive(Clone, Debug, Eq, MallocSizeOf, PartialEq)] pub struct ComputedUrl(pub SpecifiedUrl); impl ToCss for ComputedUrl { fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result where W: Write, { serialize_computed_url(&self.0.url_value, dest, bindings::Gecko_GetComputedURLSpec) } } impl ComputedUrl { /// Convert from RefPtr<URLValue> to ComputedUrl. pub unsafe fn from_url_value(url_value: RefPtr<URLValue>) -> Self { let css_url = &*url_value.mCssUrl.mRawPtr; let url = CssUrl(CssUrlData::as_arc(&css_url).clone_arc()); ComputedUrl(SpecifiedUrl { url, url_value }) } /// Get a raw pointer to the URLValue held by this ComputedUrl, for FFI. pub fn url_value_ptr(&self) -> *mut URLValue { self.0.url_value.get() } } /// The computed value of a CSS image `url()`. #[derive(Clone, Debug, Eq, MallocSizeOf, PartialEq)] pub struct ComputedImageUrl(pub SpecifiedImageUrl); impl ToCss for ComputedImageUrl { fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result where W: Write, { serialize_computed_url( &(self.0).0.url_value, dest, bindings::Gecko_GetComputedImageURLSpec, ) } } impl ComputedImageUrl { /// Convert from nsStyleImageReques to ComputedImageUrl. pub unsafe fn from_image_request(image_request: &nsStyleImageRequest) -> Self { let url_value = image_request.mImageValue.to_safe(); let css_url = &*url_value.mCssUrl.mRawPtr; let url = CssUrl(CssUrlData::as_arc(&css_url).clone_arc()); ComputedImageUrl(SpecifiedImageUrl(SpecifiedUrl { url, url_value })) } /// Get a raw pointer to the URLValue held by this ComputedImageUrl, for FFI. pub fn url_value_ptr(&self) -> *mut URLValue { (self.0).0.url_value.get() } }
} impl PartialEq for SpecifiedUrl { fn eq(&self, other: &Self) -> bool {
random_line_split
lib.rs
// Copyright 2015 MaidSafe.net limited. // // This SAFE Network Software is licensed to you under (1) the MaidSafe.net Commercial License, // version 1.0 or later, or (2) The General Public License (GPL), version 3, depending on which // licence you accepted on initial access to the Software (the "Licences"). // // By contributing code to the SAFE Network Software, or to this project generally, you agree to be // bound by the terms of the MaidSafe Contributor Agreement, version 1.0. This, along with the // Licenses can be found in the root directory of this project at LICENSE, COPYING and CONTRIBUTOR. // // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. // // Please review the Licences for the specific language governing permissions and limitations // relating to use of the SAFE Network Software. //! Sentinel cryptographically confirms the origin of a claim in a decentralised network. //! //! A claim has to implement Claimable in order to be verifiable and mergeable. //! A request has to implement GetSigningKeys such that Sentinel can acquire //! the necessary public signing keys. //! The request is passed immutably through sentinel //! and is used as a key to group claims and corresponding keys. //! When sentinel resolves a threshold on verified and merged message, //! it returns the requester key and the merged claim. //! Claimant names and associated signatures are discarded after successful resolution, //! as such abstracting the original request into a resolved claim. //! //! The keys_threshold specifies a minimal threshold on the number of independent mentions of //! a single public signing key needed to consider it for verifying a claim. This threshold //! can be one or higher. //! The claims_threshold specifies a minimal threshold on the number of verified claims before //! sentinel will attempt to merge these verified claims. #![doc(html_logo_url = "https://raw.githubusercontent.com/maidsafe/QA/master/Images/maidsafe_logo.png", html_favicon_url = "http://maidsafe.net/img/favicon.ico", html_root_url = "http://maidsafe.github.io/sentinel")] // For explanation of lint checks, run `rustc -W help` or see // https://github.com/maidsafe/QA/blob/master/Documentation/Rust%20Lint%20Checks.md #![forbid(bad_style, exceeding_bitshifts, mutable_transmutes, no_mangle_const_items, unknown_crate_types, warnings)] #![deny(deprecated, drop_with_repr_extern, improper_ctypes, missing_docs, non_shorthand_field_patterns, overflowing_literals, plugin_as_library, private_no_mangle_fns, private_no_mangle_statics, raw_pointer_derive, stable_features, unconditional_recursion, unknown_lints, unsafe_code, unused, unused_allocation, unused_attributes, unused_comparisons, unused_features, unused_parens, while_true)] #![warn(trivial_casts, trivial_numeric_casts, unused_extern_crates, unused_import_braces, unused_qualifications, unused_results, variant_size_differences)] #![allow(box_pointers, fat_ptr_transmutes, missing_copy_implementations, missing_debug_implementations)] extern crate rustc_serialize; extern crate accumulator; extern crate lru_time_cache; extern crate sodiumoxide; extern crate cbor; extern crate rand; use sodiumoxide::crypto; use sodiumoxide::crypto::sign::verify_detached; use sodiumoxide::crypto::sign::PublicKey; use sodiumoxide::crypto::sign::Signature; pub type SerialisedClaim = Vec<u8>; /// Sentinel provides a consensus mechanism on all content messages. /// The claims made must be identical and cryptographically signed. pub mod pure_sentinel; mod key_store; pub mod key_sentinel; mod wrappers; mod refresh_sentinel; mod statistics; fn
(signature: &Signature, public_key: &PublicKey, claim: &SerialisedClaim) -> Option<SerialisedClaim> { match crypto::sign::verify_detached(&signature, claim, public_key) { true => Some(claim.clone()), false => None, } }
verify_signature
identifier_name
lib.rs
// Copyright 2015 MaidSafe.net limited. // // This SAFE Network Software is licensed to you under (1) the MaidSafe.net Commercial License, // version 1.0 or later, or (2) The General Public License (GPL), version 3, depending on which // licence you accepted on initial access to the Software (the "Licences"). // // By contributing code to the SAFE Network Software, or to this project generally, you agree to be // bound by the terms of the MaidSafe Contributor Agreement, version 1.0. This, along with the // Licenses can be found in the root directory of this project at LICENSE, COPYING and CONTRIBUTOR. // // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. // // Please review the Licences for the specific language governing permissions and limitations // relating to use of the SAFE Network Software. //! Sentinel cryptographically confirms the origin of a claim in a decentralised network. //! //! A claim has to implement Claimable in order to be verifiable and mergeable. //! A request has to implement GetSigningKeys such that Sentinel can acquire //! the necessary public signing keys. //! The request is passed immutably through sentinel //! and is used as a key to group claims and corresponding keys. //! When sentinel resolves a threshold on verified and merged message, //! it returns the requester key and the merged claim. //! Claimant names and associated signatures are discarded after successful resolution, //! as such abstracting the original request into a resolved claim. //! //! The keys_threshold specifies a minimal threshold on the number of independent mentions of //! a single public signing key needed to consider it for verifying a claim. This threshold //! can be one or higher. //! The claims_threshold specifies a minimal threshold on the number of verified claims before //! sentinel will attempt to merge these verified claims. #![doc(html_logo_url = "https://raw.githubusercontent.com/maidsafe/QA/master/Images/maidsafe_logo.png", html_favicon_url = "http://maidsafe.net/img/favicon.ico", html_root_url = "http://maidsafe.github.io/sentinel")] // For explanation of lint checks, run `rustc -W help` or see
private_no_mangle_fns, private_no_mangle_statics, raw_pointer_derive, stable_features, unconditional_recursion, unknown_lints, unsafe_code, unused, unused_allocation, unused_attributes, unused_comparisons, unused_features, unused_parens, while_true)] #![warn(trivial_casts, trivial_numeric_casts, unused_extern_crates, unused_import_braces, unused_qualifications, unused_results, variant_size_differences)] #![allow(box_pointers, fat_ptr_transmutes, missing_copy_implementations, missing_debug_implementations)] extern crate rustc_serialize; extern crate accumulator; extern crate lru_time_cache; extern crate sodiumoxide; extern crate cbor; extern crate rand; use sodiumoxide::crypto; use sodiumoxide::crypto::sign::verify_detached; use sodiumoxide::crypto::sign::PublicKey; use sodiumoxide::crypto::sign::Signature; pub type SerialisedClaim = Vec<u8>; /// Sentinel provides a consensus mechanism on all content messages. /// The claims made must be identical and cryptographically signed. pub mod pure_sentinel; mod key_store; pub mod key_sentinel; mod wrappers; mod refresh_sentinel; mod statistics; fn verify_signature(signature: &Signature, public_key: &PublicKey, claim: &SerialisedClaim) -> Option<SerialisedClaim> { match crypto::sign::verify_detached(&signature, claim, public_key) { true => Some(claim.clone()), false => None, } }
// https://github.com/maidsafe/QA/blob/master/Documentation/Rust%20Lint%20Checks.md #![forbid(bad_style, exceeding_bitshifts, mutable_transmutes, no_mangle_const_items, unknown_crate_types, warnings)] #![deny(deprecated, drop_with_repr_extern, improper_ctypes, missing_docs, non_shorthand_field_patterns, overflowing_literals, plugin_as_library,
random_line_split
lib.rs
// Copyright 2015 MaidSafe.net limited. // // This SAFE Network Software is licensed to you under (1) the MaidSafe.net Commercial License, // version 1.0 or later, or (2) The General Public License (GPL), version 3, depending on which // licence you accepted on initial access to the Software (the "Licences"). // // By contributing code to the SAFE Network Software, or to this project generally, you agree to be // bound by the terms of the MaidSafe Contributor Agreement, version 1.0. This, along with the // Licenses can be found in the root directory of this project at LICENSE, COPYING and CONTRIBUTOR. // // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. // // Please review the Licences for the specific language governing permissions and limitations // relating to use of the SAFE Network Software. //! Sentinel cryptographically confirms the origin of a claim in a decentralised network. //! //! A claim has to implement Claimable in order to be verifiable and mergeable. //! A request has to implement GetSigningKeys such that Sentinel can acquire //! the necessary public signing keys. //! The request is passed immutably through sentinel //! and is used as a key to group claims and corresponding keys. //! When sentinel resolves a threshold on verified and merged message, //! it returns the requester key and the merged claim. //! Claimant names and associated signatures are discarded after successful resolution, //! as such abstracting the original request into a resolved claim. //! //! The keys_threshold specifies a minimal threshold on the number of independent mentions of //! a single public signing key needed to consider it for verifying a claim. This threshold //! can be one or higher. //! The claims_threshold specifies a minimal threshold on the number of verified claims before //! sentinel will attempt to merge these verified claims. #![doc(html_logo_url = "https://raw.githubusercontent.com/maidsafe/QA/master/Images/maidsafe_logo.png", html_favicon_url = "http://maidsafe.net/img/favicon.ico", html_root_url = "http://maidsafe.github.io/sentinel")] // For explanation of lint checks, run `rustc -W help` or see // https://github.com/maidsafe/QA/blob/master/Documentation/Rust%20Lint%20Checks.md #![forbid(bad_style, exceeding_bitshifts, mutable_transmutes, no_mangle_const_items, unknown_crate_types, warnings)] #![deny(deprecated, drop_with_repr_extern, improper_ctypes, missing_docs, non_shorthand_field_patterns, overflowing_literals, plugin_as_library, private_no_mangle_fns, private_no_mangle_statics, raw_pointer_derive, stable_features, unconditional_recursion, unknown_lints, unsafe_code, unused, unused_allocation, unused_attributes, unused_comparisons, unused_features, unused_parens, while_true)] #![warn(trivial_casts, trivial_numeric_casts, unused_extern_crates, unused_import_braces, unused_qualifications, unused_results, variant_size_differences)] #![allow(box_pointers, fat_ptr_transmutes, missing_copy_implementations, missing_debug_implementations)] extern crate rustc_serialize; extern crate accumulator; extern crate lru_time_cache; extern crate sodiumoxide; extern crate cbor; extern crate rand; use sodiumoxide::crypto; use sodiumoxide::crypto::sign::verify_detached; use sodiumoxide::crypto::sign::PublicKey; use sodiumoxide::crypto::sign::Signature; pub type SerialisedClaim = Vec<u8>; /// Sentinel provides a consensus mechanism on all content messages. /// The claims made must be identical and cryptographically signed. pub mod pure_sentinel; mod key_store; pub mod key_sentinel; mod wrappers; mod refresh_sentinel; mod statistics; fn verify_signature(signature: &Signature, public_key: &PublicKey, claim: &SerialisedClaim) -> Option<SerialisedClaim>
{ match crypto::sign::verify_detached(&signature, claim, public_key) { true => Some(claim.clone()), false => None, } }
identifier_body
cargo_clean.rs
use std::default::Default; use std::fs; use std::path::Path; use core::{Package, Profiles}; use core::source::Source; use util::{CargoResult, human, ChainError, Config}; use ops::{self, Layout, Context, BuildConfig, Kind, Unit}; pub struct CleanOptions<'a> { pub spec: &'a [String], pub target: Option<&'a str>, pub config: &'a Config, pub release: bool, } /// Cleans the project from build artifacts. pub fn clean(manifest_path: &Path, opts: &CleanOptions) -> CargoResult<()> { let root = try!(Package::for_path(manifest_path, opts.config)); let target_dir = opts.config.target_dir(&root); // If we have a spec, then we need to delete some packages, otherwise, just // remove the whole target directory and be done with it! if opts.spec.is_empty() { return rm_rf(&target_dir); } let (resolve, packages) = try!(ops::fetch(manifest_path, opts.config)); let dest = if opts.release {"release"} else
; let host_layout = Layout::new(opts.config, &root, None, dest); let target_layout = opts.target.map(|target| { Layout::new(opts.config, &root, Some(target), dest) }); let cx = try!(Context::new(&resolve, &packages, opts.config, host_layout, target_layout, BuildConfig::default(), root.manifest().profiles())); // resolve package specs and remove the corresponding packages for spec in opts.spec { // Translate the spec to a Package let pkgid = try!(resolve.query(spec)); let pkg = try!(packages.get(&pkgid)); // And finally, clean everything out! for target in pkg.targets() { for kind in [Kind::Host, Kind::Target].iter() { let layout = cx.layout(&pkg, *kind); try!(rm_rf(&layout.proxy().fingerprint(&pkg))); try!(rm_rf(&layout.build(&pkg))); let Profiles { ref release, ref dev, ref test, ref bench, ref doc, ref custom_build, } = *root.manifest().profiles(); for profile in [release, dev, test, bench, doc, custom_build].iter() { let unit = Unit { pkg: &pkg, target: target, profile: profile, kind: *kind, }; let root = cx.out_dir(&unit); for filename in try!(cx.target_filenames(&unit)).iter() { try!(rm_rf(&root.join(&filename))); } } } } } Ok(()) } fn rm_rf(path: &Path) -> CargoResult<()> { let m = fs::metadata(path); if m.as_ref().map(|s| s.is_dir()).unwrap_or(false) { try!(fs::remove_dir_all(path).chain_error(|| { human("could not remove build directory") })); } else if m.is_ok() { try!(fs::remove_file(path).chain_error(|| { human("failed to remove build artifact") })); } Ok(()) }
{"debug"}
conditional_block
cargo_clean.rs
use std::default::Default; use std::fs; use std::path::Path; use core::{Package, Profiles}; use core::source::Source; use util::{CargoResult, human, ChainError, Config}; use ops::{self, Layout, Context, BuildConfig, Kind, Unit}; pub struct CleanOptions<'a> { pub spec: &'a [String], pub target: Option<&'a str>, pub config: &'a Config, pub release: bool, } /// Cleans the project from build artifacts. pub fn clean(manifest_path: &Path, opts: &CleanOptions) -> CargoResult<()> { let root = try!(Package::for_path(manifest_path, opts.config)); let target_dir = opts.config.target_dir(&root); // If we have a spec, then we need to delete some packages, otherwise, just
} let (resolve, packages) = try!(ops::fetch(manifest_path, opts.config)); let dest = if opts.release {"release"} else {"debug"}; let host_layout = Layout::new(opts.config, &root, None, dest); let target_layout = opts.target.map(|target| { Layout::new(opts.config, &root, Some(target), dest) }); let cx = try!(Context::new(&resolve, &packages, opts.config, host_layout, target_layout, BuildConfig::default(), root.manifest().profiles())); // resolve package specs and remove the corresponding packages for spec in opts.spec { // Translate the spec to a Package let pkgid = try!(resolve.query(spec)); let pkg = try!(packages.get(&pkgid)); // And finally, clean everything out! for target in pkg.targets() { for kind in [Kind::Host, Kind::Target].iter() { let layout = cx.layout(&pkg, *kind); try!(rm_rf(&layout.proxy().fingerprint(&pkg))); try!(rm_rf(&layout.build(&pkg))); let Profiles { ref release, ref dev, ref test, ref bench, ref doc, ref custom_build, } = *root.manifest().profiles(); for profile in [release, dev, test, bench, doc, custom_build].iter() { let unit = Unit { pkg: &pkg, target: target, profile: profile, kind: *kind, }; let root = cx.out_dir(&unit); for filename in try!(cx.target_filenames(&unit)).iter() { try!(rm_rf(&root.join(&filename))); } } } } } Ok(()) } fn rm_rf(path: &Path) -> CargoResult<()> { let m = fs::metadata(path); if m.as_ref().map(|s| s.is_dir()).unwrap_or(false) { try!(fs::remove_dir_all(path).chain_error(|| { human("could not remove build directory") })); } else if m.is_ok() { try!(fs::remove_file(path).chain_error(|| { human("failed to remove build artifact") })); } Ok(()) }
// remove the whole target directory and be done with it! if opts.spec.is_empty() { return rm_rf(&target_dir);
random_line_split
cargo_clean.rs
use std::default::Default; use std::fs; use std::path::Path; use core::{Package, Profiles}; use core::source::Source; use util::{CargoResult, human, ChainError, Config}; use ops::{self, Layout, Context, BuildConfig, Kind, Unit}; pub struct
<'a> { pub spec: &'a [String], pub target: Option<&'a str>, pub config: &'a Config, pub release: bool, } /// Cleans the project from build artifacts. pub fn clean(manifest_path: &Path, opts: &CleanOptions) -> CargoResult<()> { let root = try!(Package::for_path(manifest_path, opts.config)); let target_dir = opts.config.target_dir(&root); // If we have a spec, then we need to delete some packages, otherwise, just // remove the whole target directory and be done with it! if opts.spec.is_empty() { return rm_rf(&target_dir); } let (resolve, packages) = try!(ops::fetch(manifest_path, opts.config)); let dest = if opts.release {"release"} else {"debug"}; let host_layout = Layout::new(opts.config, &root, None, dest); let target_layout = opts.target.map(|target| { Layout::new(opts.config, &root, Some(target), dest) }); let cx = try!(Context::new(&resolve, &packages, opts.config, host_layout, target_layout, BuildConfig::default(), root.manifest().profiles())); // resolve package specs and remove the corresponding packages for spec in opts.spec { // Translate the spec to a Package let pkgid = try!(resolve.query(spec)); let pkg = try!(packages.get(&pkgid)); // And finally, clean everything out! for target in pkg.targets() { for kind in [Kind::Host, Kind::Target].iter() { let layout = cx.layout(&pkg, *kind); try!(rm_rf(&layout.proxy().fingerprint(&pkg))); try!(rm_rf(&layout.build(&pkg))); let Profiles { ref release, ref dev, ref test, ref bench, ref doc, ref custom_build, } = *root.manifest().profiles(); for profile in [release, dev, test, bench, doc, custom_build].iter() { let unit = Unit { pkg: &pkg, target: target, profile: profile, kind: *kind, }; let root = cx.out_dir(&unit); for filename in try!(cx.target_filenames(&unit)).iter() { try!(rm_rf(&root.join(&filename))); } } } } } Ok(()) } fn rm_rf(path: &Path) -> CargoResult<()> { let m = fs::metadata(path); if m.as_ref().map(|s| s.is_dir()).unwrap_or(false) { try!(fs::remove_dir_all(path).chain_error(|| { human("could not remove build directory") })); } else if m.is_ok() { try!(fs::remove_file(path).chain_error(|| { human("failed to remove build artifact") })); } Ok(()) }
CleanOptions
identifier_name
cargo_clean.rs
use std::default::Default; use std::fs; use std::path::Path; use core::{Package, Profiles}; use core::source::Source; use util::{CargoResult, human, ChainError, Config}; use ops::{self, Layout, Context, BuildConfig, Kind, Unit}; pub struct CleanOptions<'a> { pub spec: &'a [String], pub target: Option<&'a str>, pub config: &'a Config, pub release: bool, } /// Cleans the project from build artifacts. pub fn clean(manifest_path: &Path, opts: &CleanOptions) -> CargoResult<()>
BuildConfig::default(), root.manifest().profiles())); // resolve package specs and remove the corresponding packages for spec in opts.spec { // Translate the spec to a Package let pkgid = try!(resolve.query(spec)); let pkg = try!(packages.get(&pkgid)); // And finally, clean everything out! for target in pkg.targets() { for kind in [Kind::Host, Kind::Target].iter() { let layout = cx.layout(&pkg, *kind); try!(rm_rf(&layout.proxy().fingerprint(&pkg))); try!(rm_rf(&layout.build(&pkg))); let Profiles { ref release, ref dev, ref test, ref bench, ref doc, ref custom_build, } = *root.manifest().profiles(); for profile in [release, dev, test, bench, doc, custom_build].iter() { let unit = Unit { pkg: &pkg, target: target, profile: profile, kind: *kind, }; let root = cx.out_dir(&unit); for filename in try!(cx.target_filenames(&unit)).iter() { try!(rm_rf(&root.join(&filename))); } } } } } Ok(()) } fn rm_rf(path: &Path) -> CargoResult<()> { let m = fs::metadata(path); if m.as_ref().map(|s| s.is_dir()).unwrap_or(false) { try!(fs::remove_dir_all(path).chain_error(|| { human("could not remove build directory") })); } else if m.is_ok() { try!(fs::remove_file(path).chain_error(|| { human("failed to remove build artifact") })); } Ok(()) }
{ let root = try!(Package::for_path(manifest_path, opts.config)); let target_dir = opts.config.target_dir(&root); // If we have a spec, then we need to delete some packages, otherwise, just // remove the whole target directory and be done with it! if opts.spec.is_empty() { return rm_rf(&target_dir); } let (resolve, packages) = try!(ops::fetch(manifest_path, opts.config)); let dest = if opts.release {"release"} else {"debug"}; let host_layout = Layout::new(opts.config, &root, None, dest); let target_layout = opts.target.map(|target| { Layout::new(opts.config, &root, Some(target), dest) }); let cx = try!(Context::new(&resolve, &packages, opts.config, host_layout, target_layout,
identifier_body
serviceworkerregistration.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::ServiceWorkerBinding::ServiceWorkerState; use crate::dom::bindings::codegen::Bindings::ServiceWorkerRegistrationBinding::ServiceWorkerUpdateViaCache; use crate::dom::bindings::codegen::Bindings::ServiceWorkerRegistrationBinding::{ ServiceWorkerRegistrationMethods, Wrap, }; use crate::dom::bindings::reflector::{reflect_dom_object, DomObject}; use crate::dom::bindings::root::{Dom, DomRoot, MutNullableDom}; use crate::dom::bindings::str::{ByteString, USVString}; use crate::dom::eventtarget::EventTarget; use crate::dom::globalscope::GlobalScope; use crate::dom::navigationpreloadmanager::NavigationPreloadManager; use crate::dom::serviceworker::ServiceWorker; use crate::dom::workerglobalscope::prepare_workerscope_init; use devtools_traits::WorkerId; use dom_struct::dom_struct; use script_traits::{ScopeThings, WorkerScriptLoadOrigin}; use servo_url::ServoUrl; use std::cell::Cell; use uuid::Uuid; #[dom_struct] pub struct ServiceWorkerRegistration { eventtarget: EventTarget, active: Option<Dom<ServiceWorker>>, installing: Option<Dom<ServiceWorker>>, waiting: Option<Dom<ServiceWorker>>, navigation_preload: MutNullableDom<NavigationPreloadManager>, scope: ServoUrl, navigation_preload_enabled: Cell<bool>, navigation_preload_header_value: DomRefCell<Option<ByteString>>, update_via_cache: ServiceWorkerUpdateViaCache, uninstalling: Cell<bool>, } impl ServiceWorkerRegistration { fn new_inherited(active_sw: &ServiceWorker, scope: ServoUrl) -> ServiceWorkerRegistration { ServiceWorkerRegistration { eventtarget: EventTarget::new_inherited(), active: Some(Dom::from_ref(active_sw)), installing: None, waiting: None, navigation_preload: MutNullableDom::new(None), scope: scope, navigation_preload_enabled: Cell::new(false), navigation_preload_header_value: DomRefCell::new(None), update_via_cache: ServiceWorkerUpdateViaCache::Imports, uninstalling: Cell::new(false), } } #[allow(unrooted_must_root)] pub fn new( global: &GlobalScope, script_url: &ServoUrl, scope: ServoUrl, ) -> DomRoot<ServiceWorkerRegistration> { let active_worker = ServiceWorker::install_serviceworker(global, script_url.clone(), scope.clone(), true); active_worker.set_transition_state(ServiceWorkerState::Installed); reflect_dom_object( Box::new(ServiceWorkerRegistration::new_inherited( &*active_worker, scope, )), global, Wrap, ) } pub fn active(&self) -> Option<&ServiceWorker> { self.active.as_ref().map(|sw| &**sw) } pub fn get_installed(&self) -> &ServiceWorker { self.active.as_ref().unwrap() } pub fn get_navigation_preload_header_value(&self) -> Option<ByteString> { self.navigation_preload_header_value.borrow().clone() } pub fn set_navigation_preload_header_value(&self, value: ByteString) { let mut header_value = self.navigation_preload_header_value.borrow_mut(); *header_value = Some(value); } pub fn get_navigation_preload_enabled(&self) -> bool { self.navigation_preload_enabled.get() } pub fn set_navigation_preload_enabled(&self, flag: bool) { self.navigation_preload_enabled.set(flag) } pub fn get_uninstalling(&self) -> bool { self.uninstalling.get() } pub fn set_uninstalling(&self, flag: bool) { self.uninstalling.set(flag) } pub fn create_scope_things(global: &GlobalScope, script_url: ServoUrl) -> ScopeThings { let worker_load_origin = WorkerScriptLoadOrigin { referrer_url: None, referrer_policy: None, pipeline_id: global.pipeline_id(), }; let worker_id = WorkerId(Uuid::new_v4()); let devtools_chan = global.devtools_chan().cloned(); let init = prepare_workerscope_init(&global, None); ScopeThings { script_url: script_url, init: init, worker_load_origin: worker_load_origin, devtools_chan: devtools_chan, worker_id: worker_id, } } // https://w3c.github.io/ServiceWorker/#get-newest-worker-algorithm pub fn get_newest_worker(&self) -> Option<DomRoot<ServiceWorker>> { if self.installing.as_ref().is_some() { self.installing.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } else if self.waiting.as_ref().is_some() { self.waiting.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } else { self.active.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } } } pub fn longest_prefix_match(stored_scope: &ServoUrl, potential_match: &ServoUrl) -> bool { if stored_scope.origin()!= potential_match.origin() { return false; } let scope_chars = stored_scope.path().chars(); let matching_chars = potential_match.path().chars(); if scope_chars.count() > matching_chars.count() { return false; } stored_scope .path() .chars() .zip(potential_match.path().chars()) .all(|(scope, matched)| scope == matched) } impl ServiceWorkerRegistrationMethods for ServiceWorkerRegistration { // https://w3c.github.io/ServiceWorker/#service-worker-registration-installing-attribute fn GetInstalling(&self) -> Option<DomRoot<ServiceWorker>> { self.installing.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-active-attribute fn GetActive(&self) -> Option<DomRoot<ServiceWorker>> { self.active.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-waiting-attribute fn GetWaiting(&self) -> Option<DomRoot<ServiceWorker>> { self.waiting.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-scope-attribute fn
(&self) -> USVString { USVString(self.scope.as_str().to_owned()) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-updateviacache fn UpdateViaCache(&self) -> ServiceWorkerUpdateViaCache { self.update_via_cache } // https://w3c.github.io/ServiceWorker/#service-worker-registration-navigationpreload fn NavigationPreload(&self) -> DomRoot<NavigationPreloadManager> { self.navigation_preload .or_init(|| NavigationPreloadManager::new(&self.global(), &self)) } }
Scope
identifier_name
serviceworkerregistration.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::ServiceWorkerBinding::ServiceWorkerState; use crate::dom::bindings::codegen::Bindings::ServiceWorkerRegistrationBinding::ServiceWorkerUpdateViaCache; use crate::dom::bindings::codegen::Bindings::ServiceWorkerRegistrationBinding::{ ServiceWorkerRegistrationMethods, Wrap, }; use crate::dom::bindings::reflector::{reflect_dom_object, DomObject}; use crate::dom::bindings::root::{Dom, DomRoot, MutNullableDom}; use crate::dom::bindings::str::{ByteString, USVString}; use crate::dom::eventtarget::EventTarget; use crate::dom::globalscope::GlobalScope; use crate::dom::navigationpreloadmanager::NavigationPreloadManager; use crate::dom::serviceworker::ServiceWorker; use crate::dom::workerglobalscope::prepare_workerscope_init; use devtools_traits::WorkerId; use dom_struct::dom_struct; use script_traits::{ScopeThings, WorkerScriptLoadOrigin}; use servo_url::ServoUrl; use std::cell::Cell; use uuid::Uuid; #[dom_struct] pub struct ServiceWorkerRegistration { eventtarget: EventTarget, active: Option<Dom<ServiceWorker>>, installing: Option<Dom<ServiceWorker>>, waiting: Option<Dom<ServiceWorker>>, navigation_preload: MutNullableDom<NavigationPreloadManager>, scope: ServoUrl, navigation_preload_enabled: Cell<bool>, navigation_preload_header_value: DomRefCell<Option<ByteString>>, update_via_cache: ServiceWorkerUpdateViaCache, uninstalling: Cell<bool>, } impl ServiceWorkerRegistration { fn new_inherited(active_sw: &ServiceWorker, scope: ServoUrl) -> ServiceWorkerRegistration { ServiceWorkerRegistration { eventtarget: EventTarget::new_inherited(), active: Some(Dom::from_ref(active_sw)), installing: None, waiting: None, navigation_preload: MutNullableDom::new(None), scope: scope, navigation_preload_enabled: Cell::new(false), navigation_preload_header_value: DomRefCell::new(None), update_via_cache: ServiceWorkerUpdateViaCache::Imports, uninstalling: Cell::new(false), } } #[allow(unrooted_must_root)] pub fn new( global: &GlobalScope, script_url: &ServoUrl, scope: ServoUrl, ) -> DomRoot<ServiceWorkerRegistration> { let active_worker = ServiceWorker::install_serviceworker(global, script_url.clone(), scope.clone(), true); active_worker.set_transition_state(ServiceWorkerState::Installed); reflect_dom_object( Box::new(ServiceWorkerRegistration::new_inherited( &*active_worker, scope, )), global, Wrap, ) } pub fn active(&self) -> Option<&ServiceWorker> { self.active.as_ref().map(|sw| &**sw) } pub fn get_installed(&self) -> &ServiceWorker { self.active.as_ref().unwrap() } pub fn get_navigation_preload_header_value(&self) -> Option<ByteString> { self.navigation_preload_header_value.borrow().clone() } pub fn set_navigation_preload_header_value(&self, value: ByteString) { let mut header_value = self.navigation_preload_header_value.borrow_mut(); *header_value = Some(value); } pub fn get_navigation_preload_enabled(&self) -> bool { self.navigation_preload_enabled.get() } pub fn set_navigation_preload_enabled(&self, flag: bool) { self.navigation_preload_enabled.set(flag) } pub fn get_uninstalling(&self) -> bool { self.uninstalling.get() } pub fn set_uninstalling(&self, flag: bool) { self.uninstalling.set(flag) } pub fn create_scope_things(global: &GlobalScope, script_url: ServoUrl) -> ScopeThings { let worker_load_origin = WorkerScriptLoadOrigin { referrer_url: None, referrer_policy: None, pipeline_id: global.pipeline_id(), }; let worker_id = WorkerId(Uuid::new_v4()); let devtools_chan = global.devtools_chan().cloned(); let init = prepare_workerscope_init(&global, None); ScopeThings { script_url: script_url, init: init, worker_load_origin: worker_load_origin, devtools_chan: devtools_chan, worker_id: worker_id, } } // https://w3c.github.io/ServiceWorker/#get-newest-worker-algorithm pub fn get_newest_worker(&self) -> Option<DomRoot<ServiceWorker>> { if self.installing.as_ref().is_some() { self.installing.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } else if self.waiting.as_ref().is_some()
else { self.active.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } } } pub fn longest_prefix_match(stored_scope: &ServoUrl, potential_match: &ServoUrl) -> bool { if stored_scope.origin()!= potential_match.origin() { return false; } let scope_chars = stored_scope.path().chars(); let matching_chars = potential_match.path().chars(); if scope_chars.count() > matching_chars.count() { return false; } stored_scope .path() .chars() .zip(potential_match.path().chars()) .all(|(scope, matched)| scope == matched) } impl ServiceWorkerRegistrationMethods for ServiceWorkerRegistration { // https://w3c.github.io/ServiceWorker/#service-worker-registration-installing-attribute fn GetInstalling(&self) -> Option<DomRoot<ServiceWorker>> { self.installing.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-active-attribute fn GetActive(&self) -> Option<DomRoot<ServiceWorker>> { self.active.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-waiting-attribute fn GetWaiting(&self) -> Option<DomRoot<ServiceWorker>> { self.waiting.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-scope-attribute fn Scope(&self) -> USVString { USVString(self.scope.as_str().to_owned()) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-updateviacache fn UpdateViaCache(&self) -> ServiceWorkerUpdateViaCache { self.update_via_cache } // https://w3c.github.io/ServiceWorker/#service-worker-registration-navigationpreload fn NavigationPreload(&self) -> DomRoot<NavigationPreloadManager> { self.navigation_preload .or_init(|| NavigationPreloadManager::new(&self.global(), &self)) } }
{ self.waiting.as_ref().map(|sw| DomRoot::from_ref(&**sw)) }
conditional_block
serviceworkerregistration.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::ServiceWorkerBinding::ServiceWorkerState; use crate::dom::bindings::codegen::Bindings::ServiceWorkerRegistrationBinding::ServiceWorkerUpdateViaCache; use crate::dom::bindings::codegen::Bindings::ServiceWorkerRegistrationBinding::{ ServiceWorkerRegistrationMethods, Wrap, }; use crate::dom::bindings::reflector::{reflect_dom_object, DomObject}; use crate::dom::bindings::root::{Dom, DomRoot, MutNullableDom}; use crate::dom::bindings::str::{ByteString, USVString}; use crate::dom::eventtarget::EventTarget; use crate::dom::globalscope::GlobalScope; use crate::dom::navigationpreloadmanager::NavigationPreloadManager; use crate::dom::serviceworker::ServiceWorker; use crate::dom::workerglobalscope::prepare_workerscope_init; use devtools_traits::WorkerId; use dom_struct::dom_struct; use script_traits::{ScopeThings, WorkerScriptLoadOrigin}; use servo_url::ServoUrl; use std::cell::Cell; use uuid::Uuid; #[dom_struct] pub struct ServiceWorkerRegistration { eventtarget: EventTarget, active: Option<Dom<ServiceWorker>>, installing: Option<Dom<ServiceWorker>>, waiting: Option<Dom<ServiceWorker>>, navigation_preload: MutNullableDom<NavigationPreloadManager>, scope: ServoUrl, navigation_preload_enabled: Cell<bool>, navigation_preload_header_value: DomRefCell<Option<ByteString>>, update_via_cache: ServiceWorkerUpdateViaCache, uninstalling: Cell<bool>, } impl ServiceWorkerRegistration { fn new_inherited(active_sw: &ServiceWorker, scope: ServoUrl) -> ServiceWorkerRegistration { ServiceWorkerRegistration { eventtarget: EventTarget::new_inherited(), active: Some(Dom::from_ref(active_sw)), installing: None, waiting: None, navigation_preload: MutNullableDom::new(None), scope: scope, navigation_preload_enabled: Cell::new(false), navigation_preload_header_value: DomRefCell::new(None), update_via_cache: ServiceWorkerUpdateViaCache::Imports, uninstalling: Cell::new(false), } } #[allow(unrooted_must_root)] pub fn new( global: &GlobalScope, script_url: &ServoUrl, scope: ServoUrl, ) -> DomRoot<ServiceWorkerRegistration> { let active_worker = ServiceWorker::install_serviceworker(global, script_url.clone(), scope.clone(), true); active_worker.set_transition_state(ServiceWorkerState::Installed); reflect_dom_object( Box::new(ServiceWorkerRegistration::new_inherited( &*active_worker, scope, )), global, Wrap, ) } pub fn active(&self) -> Option<&ServiceWorker> { self.active.as_ref().map(|sw| &**sw) } pub fn get_installed(&self) -> &ServiceWorker { self.active.as_ref().unwrap() } pub fn get_navigation_preload_header_value(&self) -> Option<ByteString> { self.navigation_preload_header_value.borrow().clone() } pub fn set_navigation_preload_header_value(&self, value: ByteString) { let mut header_value = self.navigation_preload_header_value.borrow_mut(); *header_value = Some(value); } pub fn get_navigation_preload_enabled(&self) -> bool { self.navigation_preload_enabled.get() } pub fn set_navigation_preload_enabled(&self, flag: bool) { self.navigation_preload_enabled.set(flag) } pub fn get_uninstalling(&self) -> bool { self.uninstalling.get() } pub fn set_uninstalling(&self, flag: bool) { self.uninstalling.set(flag) } pub fn create_scope_things(global: &GlobalScope, script_url: ServoUrl) -> ScopeThings { let worker_load_origin = WorkerScriptLoadOrigin { referrer_url: None, referrer_policy: None, pipeline_id: global.pipeline_id(), }; let worker_id = WorkerId(Uuid::new_v4()); let devtools_chan = global.devtools_chan().cloned(); let init = prepare_workerscope_init(&global, None); ScopeThings { script_url: script_url, init: init, worker_load_origin: worker_load_origin, devtools_chan: devtools_chan, worker_id: worker_id, } } // https://w3c.github.io/ServiceWorker/#get-newest-worker-algorithm pub fn get_newest_worker(&self) -> Option<DomRoot<ServiceWorker>> { if self.installing.as_ref().is_some() { self.installing.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } else if self.waiting.as_ref().is_some() { self.waiting.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } else { self.active.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } } } pub fn longest_prefix_match(stored_scope: &ServoUrl, potential_match: &ServoUrl) -> bool
impl ServiceWorkerRegistrationMethods for ServiceWorkerRegistration { // https://w3c.github.io/ServiceWorker/#service-worker-registration-installing-attribute fn GetInstalling(&self) -> Option<DomRoot<ServiceWorker>> { self.installing.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-active-attribute fn GetActive(&self) -> Option<DomRoot<ServiceWorker>> { self.active.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-waiting-attribute fn GetWaiting(&self) -> Option<DomRoot<ServiceWorker>> { self.waiting.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-scope-attribute fn Scope(&self) -> USVString { USVString(self.scope.as_str().to_owned()) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-updateviacache fn UpdateViaCache(&self) -> ServiceWorkerUpdateViaCache { self.update_via_cache } // https://w3c.github.io/ServiceWorker/#service-worker-registration-navigationpreload fn NavigationPreload(&self) -> DomRoot<NavigationPreloadManager> { self.navigation_preload .or_init(|| NavigationPreloadManager::new(&self.global(), &self)) } }
{ if stored_scope.origin() != potential_match.origin() { return false; } let scope_chars = stored_scope.path().chars(); let matching_chars = potential_match.path().chars(); if scope_chars.count() > matching_chars.count() { return false; } stored_scope .path() .chars() .zip(potential_match.path().chars()) .all(|(scope, matched)| scope == matched) }
identifier_body
serviceworkerregistration.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::ServiceWorkerBinding::ServiceWorkerState; use crate::dom::bindings::codegen::Bindings::ServiceWorkerRegistrationBinding::ServiceWorkerUpdateViaCache; use crate::dom::bindings::codegen::Bindings::ServiceWorkerRegistrationBinding::{ ServiceWorkerRegistrationMethods, Wrap, }; use crate::dom::bindings::reflector::{reflect_dom_object, DomObject}; use crate::dom::bindings::root::{Dom, DomRoot, MutNullableDom}; use crate::dom::bindings::str::{ByteString, USVString}; use crate::dom::eventtarget::EventTarget; use crate::dom::globalscope::GlobalScope; use crate::dom::navigationpreloadmanager::NavigationPreloadManager; use crate::dom::serviceworker::ServiceWorker; use crate::dom::workerglobalscope::prepare_workerscope_init; use devtools_traits::WorkerId; use dom_struct::dom_struct; use script_traits::{ScopeThings, WorkerScriptLoadOrigin}; use servo_url::ServoUrl; use std::cell::Cell; use uuid::Uuid; #[dom_struct] pub struct ServiceWorkerRegistration { eventtarget: EventTarget, active: Option<Dom<ServiceWorker>>, installing: Option<Dom<ServiceWorker>>, waiting: Option<Dom<ServiceWorker>>, navigation_preload: MutNullableDom<NavigationPreloadManager>, scope: ServoUrl, navigation_preload_enabled: Cell<bool>, navigation_preload_header_value: DomRefCell<Option<ByteString>>, update_via_cache: ServiceWorkerUpdateViaCache, uninstalling: Cell<bool>, } impl ServiceWorkerRegistration { fn new_inherited(active_sw: &ServiceWorker, scope: ServoUrl) -> ServiceWorkerRegistration { ServiceWorkerRegistration { eventtarget: EventTarget::new_inherited(), active: Some(Dom::from_ref(active_sw)), installing: None, waiting: None, navigation_preload: MutNullableDom::new(None), scope: scope, navigation_preload_enabled: Cell::new(false), navigation_preload_header_value: DomRefCell::new(None), update_via_cache: ServiceWorkerUpdateViaCache::Imports, uninstalling: Cell::new(false), } } #[allow(unrooted_must_root)] pub fn new( global: &GlobalScope, script_url: &ServoUrl, scope: ServoUrl, ) -> DomRoot<ServiceWorkerRegistration> { let active_worker = ServiceWorker::install_serviceworker(global, script_url.clone(), scope.clone(), true); active_worker.set_transition_state(ServiceWorkerState::Installed); reflect_dom_object( Box::new(ServiceWorkerRegistration::new_inherited( &*active_worker, scope, )), global, Wrap, ) } pub fn active(&self) -> Option<&ServiceWorker> { self.active.as_ref().map(|sw| &**sw) } pub fn get_installed(&self) -> &ServiceWorker { self.active.as_ref().unwrap() }
pub fn set_navigation_preload_header_value(&self, value: ByteString) { let mut header_value = self.navigation_preload_header_value.borrow_mut(); *header_value = Some(value); } pub fn get_navigation_preload_enabled(&self) -> bool { self.navigation_preload_enabled.get() } pub fn set_navigation_preload_enabled(&self, flag: bool) { self.navigation_preload_enabled.set(flag) } pub fn get_uninstalling(&self) -> bool { self.uninstalling.get() } pub fn set_uninstalling(&self, flag: bool) { self.uninstalling.set(flag) } pub fn create_scope_things(global: &GlobalScope, script_url: ServoUrl) -> ScopeThings { let worker_load_origin = WorkerScriptLoadOrigin { referrer_url: None, referrer_policy: None, pipeline_id: global.pipeline_id(), }; let worker_id = WorkerId(Uuid::new_v4()); let devtools_chan = global.devtools_chan().cloned(); let init = prepare_workerscope_init(&global, None); ScopeThings { script_url: script_url, init: init, worker_load_origin: worker_load_origin, devtools_chan: devtools_chan, worker_id: worker_id, } } // https://w3c.github.io/ServiceWorker/#get-newest-worker-algorithm pub fn get_newest_worker(&self) -> Option<DomRoot<ServiceWorker>> { if self.installing.as_ref().is_some() { self.installing.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } else if self.waiting.as_ref().is_some() { self.waiting.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } else { self.active.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } } } pub fn longest_prefix_match(stored_scope: &ServoUrl, potential_match: &ServoUrl) -> bool { if stored_scope.origin()!= potential_match.origin() { return false; } let scope_chars = stored_scope.path().chars(); let matching_chars = potential_match.path().chars(); if scope_chars.count() > matching_chars.count() { return false; } stored_scope .path() .chars() .zip(potential_match.path().chars()) .all(|(scope, matched)| scope == matched) } impl ServiceWorkerRegistrationMethods for ServiceWorkerRegistration { // https://w3c.github.io/ServiceWorker/#service-worker-registration-installing-attribute fn GetInstalling(&self) -> Option<DomRoot<ServiceWorker>> { self.installing.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-active-attribute fn GetActive(&self) -> Option<DomRoot<ServiceWorker>> { self.active.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-waiting-attribute fn GetWaiting(&self) -> Option<DomRoot<ServiceWorker>> { self.waiting.as_ref().map(|sw| DomRoot::from_ref(&**sw)) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-scope-attribute fn Scope(&self) -> USVString { USVString(self.scope.as_str().to_owned()) } // https://w3c.github.io/ServiceWorker/#service-worker-registration-updateviacache fn UpdateViaCache(&self) -> ServiceWorkerUpdateViaCache { self.update_via_cache } // https://w3c.github.io/ServiceWorker/#service-worker-registration-navigationpreload fn NavigationPreload(&self) -> DomRoot<NavigationPreloadManager> { self.navigation_preload .or_init(|| NavigationPreloadManager::new(&self.global(), &self)) } }
pub fn get_navigation_preload_header_value(&self) -> Option<ByteString> { self.navigation_preload_header_value.borrow().clone() }
random_line_split
graphics.rs
use editor::Editor; use orbital::Color; use redraw::RedrawTask; use mode::Mode; use mode::PrimitiveMode; use mode::CommandMode; impl Editor { /// Redraw the window pub fn redraw(&mut self) { // TODO: Only draw when relevant for the window let (mut pos_x, pos_y) = self.pos(); // Redraw window self.window.set(Color::rgb(25, 25, 25)); let w = self.window.width(); pos_x += self.delta(); if self.options.line_marker { self.window.rect(0, (pos_y - self.scroll_y) as isize * 16, w, 16, Color::rgb(45, 45, 45)); } self.window.rect(8 * (pos_x - self.scroll_x) as isize, 16 * (pos_y - self.scroll_y) as isize, 8, 16, Color::WHITE); let mut string = false; for (y, row) in self.text.iter().enumerate() { for (x, &c) in row.iter().enumerate() { // TODO: Move outta here let color = if self.options.highlight { match c { '\'' | '"' => { string =!string; (226, 225, 167) //(167, 222, 156) } _ if string => (226, 225, 167), //(167, 222, 156) '!' | '@' | '#' | '$' | '%' | '^' | '&' | '|' | '*' | '+' | '-' | '/' | ':' | '=' | '<' | '>' => (198, 83, 83), //(228, 190, 175), //(194, 106, 71), '.' | ',' => (241, 213, 226), '(' | ')' | '[' | ']' | '{' | '}' => (164, 212, 125), //(195, 139, 75), '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => (209, 209, 177), _ => (255, 255, 255), } } else { (255, 255, 255) }; let c = if c == '\t' { '' } else { c }; if pos_x == x && pos_y == y { self.window.char(8 * (x - self.scroll_x) as isize, 16 * (y - self.scroll_y) as isize, c, Color::rgb(color.0 / 3, color.1 / 3, color.2 / 3)); } else { self.window.char(8 * (x - self.scroll_x) as isize, 16 * (y - self.scroll_y) as isize, c, Color::rgb(color.0, color.1, color.2)); } } } self.redraw_task = RedrawTask::Null; self.redraw_status_bar(); self.window.sync(); } /// Redraw the status bar pub fn redraw_status_bar(&mut self) { let h = self.window.height(); let w = self.window.width(); let mode = self.cursor().mode; self.window.rect(0, h as isize - 18 - { if mode == Mode::Primitive(PrimitiveMode::Prompt) { 18 } else { 0 } }, w, 18, Color::rgba(74, 74, 74, 255)); let sb_mode = self.status_bar.mode.clone(); status_bar(self, sb_mode, 0, 4); let sb_file = self.status_bar.file.clone(); status_bar(self, sb_file, 1, 4); let sb_cmd = self.status_bar.cmd.clone(); status_bar(self, sb_cmd, 2, 4); let sb_msg = self.status_bar.msg.clone(); status_bar(self, sb_msg, 3, 4); for (n, c) in self.prompt.chars().enumerate() { self.window.char(n as isize * 8, h as isize - 16 - 1, c, Color::WHITE); } self.window.sync(); } } fn
(editor: &mut Editor, text: String, a: usize, b: usize) { let h = editor.window.height(); let w = editor.window.width(); // let y = editor.y(); let mode = editor.cursor().mode; for (n, c) in (if text.len() > w / (8 * b) { text.chars().take(w / (8 * b) - 5).chain(vec!['.'; 3]).collect::<Vec<_>>() } else { text.chars().collect() }) .into_iter() .enumerate() { editor.window.char(((w * a) / b) as isize + (n as isize * 8), h as isize - 16 - 1 - { if mode == Mode::Primitive(PrimitiveMode::Prompt) { 16 + 1 + 1 } else { 0 } }, c, Color::WHITE); } } /// The statubar (showing various info about the current state of the editor) pub struct StatusBar { /// The current mode pub mode: String, /// The cureent char pub file: String, /// The current command pub cmd: String, /// A message (such as an error or other info to the user) pub msg: String, } impl StatusBar { /// Create new status bar pub fn new() -> Self { StatusBar { mode: "Normal".to_string(), file: String::new(), cmd: String::new(), msg: "Welcome to Sodium!".to_string(), } } }
status_bar
identifier_name
graphics.rs
use editor::Editor; use orbital::Color; use redraw::RedrawTask; use mode::Mode; use mode::PrimitiveMode; use mode::CommandMode; impl Editor { /// Redraw the window pub fn redraw(&mut self) { // TODO: Only draw when relevant for the window let (mut pos_x, pos_y) = self.pos(); // Redraw window self.window.set(Color::rgb(25, 25, 25)); let w = self.window.width(); pos_x += self.delta(); if self.options.line_marker { self.window.rect(0, (pos_y - self.scroll_y) as isize * 16, w, 16, Color::rgb(45, 45, 45)); } self.window.rect(8 * (pos_x - self.scroll_x) as isize, 16 * (pos_y - self.scroll_y) as isize, 8, 16, Color::WHITE); let mut string = false; for (y, row) in self.text.iter().enumerate() { for (x, &c) in row.iter().enumerate() { // TODO: Move outta here let color = if self.options.highlight { match c { '\'' | '"' => { string =!string; (226, 225, 167) //(167, 222, 156) } _ if string => (226, 225, 167), //(167, 222, 156) '!' | '@' | '#' | '$' | '%' | '^' | '&' | '|' | '*' | '+' | '-' | '/' | ':' | '=' | '<' | '>' => (198, 83, 83), //(228, 190, 175), //(194, 106, 71), '.' | ',' => (241, 213, 226), '(' | ')' | '[' | ']' | '{' | '}' => (164, 212, 125), //(195, 139, 75), '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => (209, 209, 177), _ => (255, 255, 255), } } else { (255, 255, 255) }; let c = if c == '\t' { '' } else { c }; if pos_x == x && pos_y == y { self.window.char(8 * (x - self.scroll_x) as isize, 16 * (y - self.scroll_y) as isize, c, Color::rgb(color.0 / 3, color.1 / 3, color.2 / 3)); } else { self.window.char(8 * (x - self.scroll_x) as isize, 16 * (y - self.scroll_y) as isize, c, Color::rgb(color.0, color.1, color.2)); } } } self.redraw_task = RedrawTask::Null; self.redraw_status_bar(); self.window.sync(); } /// Redraw the status bar pub fn redraw_status_bar(&mut self)
status_bar(self, sb_file, 1, 4); let sb_cmd = self.status_bar.cmd.clone(); status_bar(self, sb_cmd, 2, 4); let sb_msg = self.status_bar.msg.clone(); status_bar(self, sb_msg, 3, 4); for (n, c) in self.prompt.chars().enumerate() { self.window.char(n as isize * 8, h as isize - 16 - 1, c, Color::WHITE); } self.window.sync(); } } fn status_bar(editor: &mut Editor, text: String, a: usize, b: usize) { let h = editor.window.height(); let w = editor.window.width(); // let y = editor.y(); let mode = editor.cursor().mode; for (n, c) in (if text.len() > w / (8 * b) { text.chars().take(w / (8 * b) - 5).chain(vec!['.'; 3]).collect::<Vec<_>>() } else { text.chars().collect() }) .into_iter() .enumerate() { editor.window.char(((w * a) / b) as isize + (n as isize * 8), h as isize - 16 - 1 - { if mode == Mode::Primitive(PrimitiveMode::Prompt) { 16 + 1 + 1 } else { 0 } }, c, Color::WHITE); } } /// The statubar (showing various info about the current state of the editor) pub struct StatusBar { /// The current mode pub mode: String, /// The cureent char pub file: String, /// The current command pub cmd: String, /// A message (such as an error or other info to the user) pub msg: String, } impl StatusBar { /// Create new status bar pub fn new() -> Self { StatusBar { mode: "Normal".to_string(), file: String::new(), cmd: String::new(), msg: "Welcome to Sodium!".to_string(), } } }
{ let h = self.window.height(); let w = self.window.width(); let mode = self.cursor().mode; self.window.rect(0, h as isize - 18 - { if mode == Mode::Primitive(PrimitiveMode::Prompt) { 18 } else { 0 } }, w, 18, Color::rgba(74, 74, 74, 255)); let sb_mode = self.status_bar.mode.clone(); status_bar(self, sb_mode, 0, 4); let sb_file = self.status_bar.file.clone();
identifier_body
graphics.rs
use editor::Editor; use orbital::Color; use redraw::RedrawTask; use mode::Mode; use mode::PrimitiveMode; use mode::CommandMode; impl Editor { /// Redraw the window pub fn redraw(&mut self) { // TODO: Only draw when relevant for the window let (mut pos_x, pos_y) = self.pos(); // Redraw window self.window.set(Color::rgb(25, 25, 25)); let w = self.window.width(); pos_x += self.delta(); if self.options.line_marker { self.window.rect(0, (pos_y - self.scroll_y) as isize * 16, w, 16, Color::rgb(45, 45, 45)); } self.window.rect(8 * (pos_x - self.scroll_x) as isize, 16 * (pos_y - self.scroll_y) as isize, 8, 16, Color::WHITE); let mut string = false; for (y, row) in self.text.iter().enumerate() { for (x, &c) in row.iter().enumerate() { // TODO: Move outta here let color = if self.options.highlight { match c { '\'' | '"' =>
_ if string => (226, 225, 167), //(167, 222, 156) '!' | '@' | '#' | '$' | '%' | '^' | '&' | '|' | '*' | '+' | '-' | '/' | ':' | '=' | '<' | '>' => (198, 83, 83), //(228, 190, 175), //(194, 106, 71), '.' | ',' => (241, 213, 226), '(' | ')' | '[' | ']' | '{' | '}' => (164, 212, 125), //(195, 139, 75), '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => (209, 209, 177), _ => (255, 255, 255), } } else { (255, 255, 255) }; let c = if c == '\t' { '' } else { c }; if pos_x == x && pos_y == y { self.window.char(8 * (x - self.scroll_x) as isize, 16 * (y - self.scroll_y) as isize, c, Color::rgb(color.0 / 3, color.1 / 3, color.2 / 3)); } else { self.window.char(8 * (x - self.scroll_x) as isize, 16 * (y - self.scroll_y) as isize, c, Color::rgb(color.0, color.1, color.2)); } } } self.redraw_task = RedrawTask::Null; self.redraw_status_bar(); self.window.sync(); } /// Redraw the status bar pub fn redraw_status_bar(&mut self) { let h = self.window.height(); let w = self.window.width(); let mode = self.cursor().mode; self.window.rect(0, h as isize - 18 - { if mode == Mode::Primitive(PrimitiveMode::Prompt) { 18 } else { 0 } }, w, 18, Color::rgba(74, 74, 74, 255)); let sb_mode = self.status_bar.mode.clone(); status_bar(self, sb_mode, 0, 4); let sb_file = self.status_bar.file.clone(); status_bar(self, sb_file, 1, 4); let sb_cmd = self.status_bar.cmd.clone(); status_bar(self, sb_cmd, 2, 4); let sb_msg = self.status_bar.msg.clone(); status_bar(self, sb_msg, 3, 4); for (n, c) in self.prompt.chars().enumerate() { self.window.char(n as isize * 8, h as isize - 16 - 1, c, Color::WHITE); } self.window.sync(); } } fn status_bar(editor: &mut Editor, text: String, a: usize, b: usize) { let h = editor.window.height(); let w = editor.window.width(); // let y = editor.y(); let mode = editor.cursor().mode; for (n, c) in (if text.len() > w / (8 * b) { text.chars().take(w / (8 * b) - 5).chain(vec!['.'; 3]).collect::<Vec<_>>() } else { text.chars().collect() }) .into_iter() .enumerate() { editor.window.char(((w * a) / b) as isize + (n as isize * 8), h as isize - 16 - 1 - { if mode == Mode::Primitive(PrimitiveMode::Prompt) { 16 + 1 + 1 } else { 0 } }, c, Color::WHITE); } } /// The statubar (showing various info about the current state of the editor) pub struct StatusBar { /// The current mode pub mode: String, /// The cureent char pub file: String, /// The current command pub cmd: String, /// A message (such as an error or other info to the user) pub msg: String, } impl StatusBar { /// Create new status bar pub fn new() -> Self { StatusBar { mode: "Normal".to_string(), file: String::new(), cmd: String::new(), msg: "Welcome to Sodium!".to_string(), } } }
{ string = !string; (226, 225, 167) //(167, 222, 156) }
conditional_block
graphics.rs
use editor::Editor; use orbital::Color; use redraw::RedrawTask; use mode::Mode; use mode::PrimitiveMode; use mode::CommandMode; impl Editor { /// Redraw the window pub fn redraw(&mut self) { // TODO: Only draw when relevant for the window let (mut pos_x, pos_y) = self.pos(); // Redraw window self.window.set(Color::rgb(25, 25, 25)); let w = self.window.width(); pos_x += self.delta(); if self.options.line_marker { self.window.rect(0, (pos_y - self.scroll_y) as isize * 16, w, 16, Color::rgb(45, 45, 45)); } self.window.rect(8 * (pos_x - self.scroll_x) as isize, 16 * (pos_y - self.scroll_y) as isize, 8, 16, Color::WHITE); let mut string = false; for (y, row) in self.text.iter().enumerate() { for (x, &c) in row.iter().enumerate() { // TODO: Move outta here let color = if self.options.highlight { match c { '\'' | '"' => { string =!string; (226, 225, 167) //(167, 222, 156) } _ if string => (226, 225, 167), //(167, 222, 156) '!' | '@' | '#' | '$' | '%' | '^' | '&' | '|' | '*' | '+' | '-' | '/' | ':' | '=' | '<' | '>' => (198, 83, 83), //(228, 190, 175), //(194, 106, 71), '.' | ',' => (241, 213, 226), '(' | ')' | '[' | ']' | '{' | '}' => (164, 212, 125), //(195, 139, 75), '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => (209, 209, 177), _ => (255, 255, 255), } } else { (255, 255, 255) }; let c = if c == '\t' { '' } else { c }; if pos_x == x && pos_y == y { self.window.char(8 * (x - self.scroll_x) as isize, 16 * (y - self.scroll_y) as isize, c, Color::rgb(color.0 / 3, color.1 / 3, color.2 / 3)); } else { self.window.char(8 * (x - self.scroll_x) as isize, 16 * (y - self.scroll_y) as isize, c, Color::rgb(color.0, color.1, color.2)); } } } self.redraw_task = RedrawTask::Null; self.redraw_status_bar(); self.window.sync(); } /// Redraw the status bar pub fn redraw_status_bar(&mut self) { let h = self.window.height(); let w = self.window.width(); let mode = self.cursor().mode; self.window.rect(0, h as isize - 18 - { if mode == Mode::Primitive(PrimitiveMode::Prompt) { 18 } else { 0 } }, w, 18, Color::rgba(74, 74, 74, 255)); let sb_mode = self.status_bar.mode.clone(); status_bar(self, sb_mode, 0, 4); let sb_file = self.status_bar.file.clone(); status_bar(self, sb_file, 1, 4);
let sb_msg = self.status_bar.msg.clone(); status_bar(self, sb_msg, 3, 4); for (n, c) in self.prompt.chars().enumerate() { self.window.char(n as isize * 8, h as isize - 16 - 1, c, Color::WHITE); } self.window.sync(); } } fn status_bar(editor: &mut Editor, text: String, a: usize, b: usize) { let h = editor.window.height(); let w = editor.window.width(); // let y = editor.y(); let mode = editor.cursor().mode; for (n, c) in (if text.len() > w / (8 * b) { text.chars().take(w / (8 * b) - 5).chain(vec!['.'; 3]).collect::<Vec<_>>() } else { text.chars().collect() }) .into_iter() .enumerate() { editor.window.char(((w * a) / b) as isize + (n as isize * 8), h as isize - 16 - 1 - { if mode == Mode::Primitive(PrimitiveMode::Prompt) { 16 + 1 + 1 } else { 0 } }, c, Color::WHITE); } } /// The statubar (showing various info about the current state of the editor) pub struct StatusBar { /// The current mode pub mode: String, /// The cureent char pub file: String, /// The current command pub cmd: String, /// A message (such as an error or other info to the user) pub msg: String, } impl StatusBar { /// Create new status bar pub fn new() -> Self { StatusBar { mode: "Normal".to_string(), file: String::new(), cmd: String::new(), msg: "Welcome to Sodium!".to_string(), } } }
let sb_cmd = self.status_bar.cmd.clone(); status_bar(self, sb_cmd, 2, 4);
random_line_split
running-with-no-runtime.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. extern crate native; use std::io::process::{Command, ProcessOutput}; use std::os; use std::str; use std::rt::unwind::try; local_data_key!(foo: int) #[start] fn start(argc: int, argv: *const *const u8) -> int
fn main() { let args = os::args(); let me = args.get(0).as_slice(); let x: &[u8] = &[1u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[2u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[3u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[4u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[5u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[6u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[7u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[8u8]; pass(Command::new(me).arg(x).output().unwrap()); } fn pass(output: ProcessOutput) { if!output.status.success() { println!("{}", str::from_utf8(output.output.as_slice())); println!("{}", str::from_utf8(output.error.as_slice())); } }
{ if argc > 1 { unsafe { match **argv.offset(1) { 1 => {} 2 => println!("foo"), 3 => assert!(try(|| {}).is_ok()), 4 => assert!(try(|| fail!()).is_err()), 5 => assert!(try(|| spawn(proc() {})).is_err()), 6 => assert!(Command::new("test").spawn().is_err()), 7 => assert!(foo.get().is_none()), 8 => assert!(try(|| { foo.replace(Some(3)); }).is_err()), _ => fail!() } } return 0 } native::start(argc, argv, main) }
identifier_body
running-with-no-runtime.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. extern crate native; use std::io::process::{Command, ProcessOutput}; use std::os; use std::str; use std::rt::unwind::try; local_data_key!(foo: int) #[start] fn start(argc: int, argv: *const *const u8) -> int { if argc > 1 { unsafe { match **argv.offset(1) { 1 => {} 2 => println!("foo"), 3 => assert!(try(|| {}).is_ok()), 4 => assert!(try(|| fail!()).is_err()), 5 => assert!(try(|| spawn(proc() {})).is_err()), 6 => assert!(Command::new("test").spawn().is_err()), 7 => assert!(foo.get().is_none()), 8 => assert!(try(|| { foo.replace(Some(3)); }).is_err()), _ => fail!() } } return 0 } native::start(argc, argv, main) } fn main() { let args = os::args(); let me = args.get(0).as_slice(); let x: &[u8] = &[1u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[2u8];
pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[4u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[5u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[6u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[7u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[8u8]; pass(Command::new(me).arg(x).output().unwrap()); } fn pass(output: ProcessOutput) { if!output.status.success() { println!("{}", str::from_utf8(output.output.as_slice())); println!("{}", str::from_utf8(output.error.as_slice())); } }
pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[3u8];
random_line_split
running-with-no-runtime.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. extern crate native; use std::io::process::{Command, ProcessOutput}; use std::os; use std::str; use std::rt::unwind::try; local_data_key!(foo: int) #[start] fn start(argc: int, argv: *const *const u8) -> int { if argc > 1 { unsafe { match **argv.offset(1) { 1 => {} 2 => println!("foo"), 3 => assert!(try(|| {}).is_ok()), 4 => assert!(try(|| fail!()).is_err()), 5 => assert!(try(|| spawn(proc() {})).is_err()), 6 => assert!(Command::new("test").spawn().is_err()), 7 => assert!(foo.get().is_none()), 8 => assert!(try(|| { foo.replace(Some(3)); }).is_err()), _ => fail!() } } return 0 } native::start(argc, argv, main) } fn main() { let args = os::args(); let me = args.get(0).as_slice(); let x: &[u8] = &[1u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[2u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[3u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[4u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[5u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[6u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[7u8]; pass(Command::new(me).arg(x).output().unwrap()); let x: &[u8] = &[8u8]; pass(Command::new(me).arg(x).output().unwrap()); } fn
(output: ProcessOutput) { if!output.status.success() { println!("{}", str::from_utf8(output.output.as_slice())); println!("{}", str::from_utf8(output.error.as_slice())); } }
pass
identifier_name
twoBitFreq.rs
// Implement twoBitFreq in Rust extern crate twobit; use twobit::TwoBit; use std::error::Error; fn main() { let mut args = std::env::args(); if args.len()!= 3 { println!("Usage: {} <2bit filename> <name> ", args.next().unwrap()); } else {
let filename = args.next().unwrap(); let chrom = args.next().unwrap(); let tb = TwoBit::new(&filename); match tb { Ok(tbv) => { match tbv.base_frequencies(&chrom) { Some(freqs) => println!("{} base frequencies (ACGT): {} {} {} {}", chrom, freqs[0], freqs[1], freqs[2], freqs[3]), None => println!("Unknown sequence: {}", chrom), } }, Err(ioerr) => println!("{}: {}", ioerr.description(), filename) } } }
let mut args = args.skip(1);
random_line_split
twoBitFreq.rs
// Implement twoBitFreq in Rust extern crate twobit; use twobit::TwoBit; use std::error::Error; fn
() { let mut args = std::env::args(); if args.len()!= 3 { println!("Usage: {} <2bit filename> <name> ", args.next().unwrap()); } else { let mut args = args.skip(1); let filename = args.next().unwrap(); let chrom = args.next().unwrap(); let tb = TwoBit::new(&filename); match tb { Ok(tbv) => { match tbv.base_frequencies(&chrom) { Some(freqs) => println!("{} base frequencies (ACGT): {} {} {} {}", chrom, freqs[0], freqs[1], freqs[2], freqs[3]), None => println!("Unknown sequence: {}", chrom), } }, Err(ioerr) => println!("{}: {}", ioerr.description(), filename) } } }
main
identifier_name
twoBitFreq.rs
// Implement twoBitFreq in Rust extern crate twobit; use twobit::TwoBit; use std::error::Error; fn main()
Err(ioerr) => println!("{}: {}", ioerr.description(), filename) } } }
{ let mut args = std::env::args(); if args.len() != 3 { println!("Usage: {} <2bit filename> <name> ", args.next().unwrap()); } else { let mut args = args.skip(1); let filename = args.next().unwrap(); let chrom = args.next().unwrap(); let tb = TwoBit::new(&filename); match tb { Ok(tbv) => { match tbv.base_frequencies(&chrom) { Some(freqs) => println!("{} base frequencies (ACGT): {} {} {} {}", chrom, freqs[0], freqs[1], freqs[2], freqs[3]), None => println!("Unknown sequence: {}", chrom), } },
identifier_body
html.rs
use cw::{Crosswords, Dir, Point, PrintItem}; use std::collections::HashMap; use std::io::{Result, Write}; const CSS: &'static str = r#" .solution { font: 22px monospace; text-align: center; position: absolute; left: 0px; right: 0px; bottom: 0px; } .hint { font: 8px monospace; color: Gray; position: absolute; } .row { overflow: hidden; float: left; } .row > div:nth-child(even) { width: 30px; } .row > div:nth-child(odd) { width: 2px; } .row > div { float: left; position: relative; } .low { height: 2px; } .high { height: 30px; } .dark { background-color: DarkBlue; } .light { background-color: LightGray; } .blockcol { background-color: DarkBlue; } "#; fn get_border_class(border: bool) -> &'static str { if border { "dark" } else { "light" } } fn string_for(item: &PrintItem, solution: bool) -> String { match *item { PrintItem::HorizBorder(b) | PrintItem::Cross(b) => format!(r#"<div class="low {}"></div>"#, get_border_class(b)), PrintItem::VertBorder(b) => format!(r#"<div class="high {}"></div>"#, get_border_class(b)), PrintItem::Block => r#"<div class="high blockcol"></div>"#.to_string(), PrintItem::CharHint(c, hint) => { format!(concat!(r#"<div class = "high">"#, r#"<span class="hint">{}</span>"#, r#"<span class="solution">{}</span>"#, r#"</div>"#), hint.map(|h| h.to_string()) .unwrap_or_else(|| "".to_owned()), if solution { c.to_string() } else { "&nbsp;".to_owned() }) } PrintItem::LineBreak => r#"</div><div class="row">"#.to_owned(), } } fn write_grid<T: Write, I: Iterator<Item = PrintItem>>(writer: &mut T, items: I, solution: bool) -> Result<()> { try!(writeln!(writer, r#"<div class="row">"#)); for item in items { try!(writer.write_all(string_for(&item, solution).as_bytes())) } try!(writeln!(writer, "</div>")); Ok(()) } fn write_hints<T: Write>(writer: &mut T, cw: &Crosswords, dir: Dir, hint_text: &HashMap<String, String>) -> Result<()> { try!(writeln!(writer, "<p><br><b>{}:</b>&nbsp;", match dir { Dir::Right => "Horizontal", Dir::Down => "Vertical", })); let mut hint_count = 0; for y in 0..cw.get_height() { for x in 0..cw.get_width() { let p = Point::new(x as i32, y as i32); if cw.has_hint_at(p) { hint_count += 1; } if cw.has_hint_at_dir(p, dir)
} } try!(writeln!(writer, "</p>")); Ok(()) } /// Write the crosswords to the given writer as an HTML page. pub fn write_html<T: Write>(writer: &mut T, cw: &Crosswords, solution: bool, hint_text: &HashMap<String, String>) -> Result<()> { try!(writeln!(writer, r#"<!doctype html>"#)); try!(writeln!(writer, r#"<head>"#)); try!(writeln!(writer, r#"<meta charset="utf-8" />"#)); try!(writeln!(writer, r#"<style type="text/css">{}</style>"#, CSS)); try!(writeln!(writer, r#"<title>Crosswords</title>"#)); try!(writeln!(writer, r#"</head><body>"#)); try!(writeln!(writer, r#"<div style="width: {}px">"#, cw.get_width() * 32 + 2)); try!(write_grid(writer, cw.print_items(), solution)); try!(writeln!(writer, r#"</div><br><div style="clear: both"></div>"#)); try!(write_hints(writer, cw, Dir::Right, hint_text)); try!(write_hints(writer, cw, Dir::Down, hint_text)); try!(writeln!(writer, "<br></body>")); Ok(()) }
{ let word: String = cw.chars_at(p, dir).collect(); let hint = hint_text .get(&word) .cloned() .unwrap_or_else(|| format!("[{}]", word)); try!(write!(writer, "<b>{}.</b> {} &nbsp;", hint_count, hint)); }
conditional_block
html.rs
use cw::{Crosswords, Dir, Point, PrintItem}; use std::collections::HashMap; use std::io::{Result, Write}; const CSS: &'static str = r#" .solution { font: 22px monospace; text-align: center; position: absolute; left: 0px; right: 0px; bottom: 0px; } .hint { font: 8px monospace; color: Gray; position: absolute; } .row { overflow: hidden; float: left; } .row > div:nth-child(even) { width: 30px; } .row > div:nth-child(odd) { width: 2px; } .row > div { float: left; position: relative; } .low { height: 2px; } .high { height: 30px; } .dark { background-color: DarkBlue; }
.light { background-color: LightGray; } .blockcol { background-color: DarkBlue; } "#; fn get_border_class(border: bool) -> &'static str { if border { "dark" } else { "light" } } fn string_for(item: &PrintItem, solution: bool) -> String { match *item { PrintItem::HorizBorder(b) | PrintItem::Cross(b) => format!(r#"<div class="low {}"></div>"#, get_border_class(b)), PrintItem::VertBorder(b) => format!(r#"<div class="high {}"></div>"#, get_border_class(b)), PrintItem::Block => r#"<div class="high blockcol"></div>"#.to_string(), PrintItem::CharHint(c, hint) => { format!(concat!(r#"<div class = "high">"#, r#"<span class="hint">{}</span>"#, r#"<span class="solution">{}</span>"#, r#"</div>"#), hint.map(|h| h.to_string()) .unwrap_or_else(|| "".to_owned()), if solution { c.to_string() } else { "&nbsp;".to_owned() }) } PrintItem::LineBreak => r#"</div><div class="row">"#.to_owned(), } } fn write_grid<T: Write, I: Iterator<Item = PrintItem>>(writer: &mut T, items: I, solution: bool) -> Result<()> { try!(writeln!(writer, r#"<div class="row">"#)); for item in items { try!(writer.write_all(string_for(&item, solution).as_bytes())) } try!(writeln!(writer, "</div>")); Ok(()) } fn write_hints<T: Write>(writer: &mut T, cw: &Crosswords, dir: Dir, hint_text: &HashMap<String, String>) -> Result<()> { try!(writeln!(writer, "<p><br><b>{}:</b>&nbsp;", match dir { Dir::Right => "Horizontal", Dir::Down => "Vertical", })); let mut hint_count = 0; for y in 0..cw.get_height() { for x in 0..cw.get_width() { let p = Point::new(x as i32, y as i32); if cw.has_hint_at(p) { hint_count += 1; } if cw.has_hint_at_dir(p, dir) { let word: String = cw.chars_at(p, dir).collect(); let hint = hint_text .get(&word) .cloned() .unwrap_or_else(|| format!("[{}]", word)); try!(write!(writer, "<b>{}.</b> {} &nbsp;", hint_count, hint)); } } } try!(writeln!(writer, "</p>")); Ok(()) } /// Write the crosswords to the given writer as an HTML page. pub fn write_html<T: Write>(writer: &mut T, cw: &Crosswords, solution: bool, hint_text: &HashMap<String, String>) -> Result<()> { try!(writeln!(writer, r#"<!doctype html>"#)); try!(writeln!(writer, r#"<head>"#)); try!(writeln!(writer, r#"<meta charset="utf-8" />"#)); try!(writeln!(writer, r#"<style type="text/css">{}</style>"#, CSS)); try!(writeln!(writer, r#"<title>Crosswords</title>"#)); try!(writeln!(writer, r#"</head><body>"#)); try!(writeln!(writer, r#"<div style="width: {}px">"#, cw.get_width() * 32 + 2)); try!(write_grid(writer, cw.print_items(), solution)); try!(writeln!(writer, r#"</div><br><div style="clear: both"></div>"#)); try!(write_hints(writer, cw, Dir::Right, hint_text)); try!(write_hints(writer, cw, Dir::Down, hint_text)); try!(writeln!(writer, "<br></body>")); Ok(()) }
random_line_split
html.rs
use cw::{Crosswords, Dir, Point, PrintItem}; use std::collections::HashMap; use std::io::{Result, Write}; const CSS: &'static str = r#" .solution { font: 22px monospace; text-align: center; position: absolute; left: 0px; right: 0px; bottom: 0px; } .hint { font: 8px monospace; color: Gray; position: absolute; } .row { overflow: hidden; float: left; } .row > div:nth-child(even) { width: 30px; } .row > div:nth-child(odd) { width: 2px; } .row > div { float: left; position: relative; } .low { height: 2px; } .high { height: 30px; } .dark { background-color: DarkBlue; } .light { background-color: LightGray; } .blockcol { background-color: DarkBlue; } "#; fn get_border_class(border: bool) -> &'static str { if border { "dark" } else { "light" } } fn string_for(item: &PrintItem, solution: bool) -> String { match *item { PrintItem::HorizBorder(b) | PrintItem::Cross(b) => format!(r#"<div class="low {}"></div>"#, get_border_class(b)), PrintItem::VertBorder(b) => format!(r#"<div class="high {}"></div>"#, get_border_class(b)), PrintItem::Block => r#"<div class="high blockcol"></div>"#.to_string(), PrintItem::CharHint(c, hint) => { format!(concat!(r#"<div class = "high">"#, r#"<span class="hint">{}</span>"#, r#"<span class="solution">{}</span>"#, r#"</div>"#), hint.map(|h| h.to_string()) .unwrap_or_else(|| "".to_owned()), if solution { c.to_string() } else { "&nbsp;".to_owned() }) } PrintItem::LineBreak => r#"</div><div class="row">"#.to_owned(), } } fn write_grid<T: Write, I: Iterator<Item = PrintItem>>(writer: &mut T, items: I, solution: bool) -> Result<()>
fn write_hints<T: Write>(writer: &mut T, cw: &Crosswords, dir: Dir, hint_text: &HashMap<String, String>) -> Result<()> { try!(writeln!(writer, "<p><br><b>{}:</b>&nbsp;", match dir { Dir::Right => "Horizontal", Dir::Down => "Vertical", })); let mut hint_count = 0; for y in 0..cw.get_height() { for x in 0..cw.get_width() { let p = Point::new(x as i32, y as i32); if cw.has_hint_at(p) { hint_count += 1; } if cw.has_hint_at_dir(p, dir) { let word: String = cw.chars_at(p, dir).collect(); let hint = hint_text .get(&word) .cloned() .unwrap_or_else(|| format!("[{}]", word)); try!(write!(writer, "<b>{}.</b> {} &nbsp;", hint_count, hint)); } } } try!(writeln!(writer, "</p>")); Ok(()) } /// Write the crosswords to the given writer as an HTML page. pub fn write_html<T: Write>(writer: &mut T, cw: &Crosswords, solution: bool, hint_text: &HashMap<String, String>) -> Result<()> { try!(writeln!(writer, r#"<!doctype html>"#)); try!(writeln!(writer, r#"<head>"#)); try!(writeln!(writer, r#"<meta charset="utf-8" />"#)); try!(writeln!(writer, r#"<style type="text/css">{}</style>"#, CSS)); try!(writeln!(writer, r#"<title>Crosswords</title>"#)); try!(writeln!(writer, r#"</head><body>"#)); try!(writeln!(writer, r#"<div style="width: {}px">"#, cw.get_width() * 32 + 2)); try!(write_grid(writer, cw.print_items(), solution)); try!(writeln!(writer, r#"</div><br><div style="clear: both"></div>"#)); try!(write_hints(writer, cw, Dir::Right, hint_text)); try!(write_hints(writer, cw, Dir::Down, hint_text)); try!(writeln!(writer, "<br></body>")); Ok(()) }
{ try!(writeln!(writer, r#"<div class="row">"#)); for item in items { try!(writer.write_all(string_for(&item, solution).as_bytes())) } try!(writeln!(writer, "</div>")); Ok(()) }
identifier_body
html.rs
use cw::{Crosswords, Dir, Point, PrintItem}; use std::collections::HashMap; use std::io::{Result, Write}; const CSS: &'static str = r#" .solution { font: 22px monospace; text-align: center; position: absolute; left: 0px; right: 0px; bottom: 0px; } .hint { font: 8px monospace; color: Gray; position: absolute; } .row { overflow: hidden; float: left; } .row > div:nth-child(even) { width: 30px; } .row > div:nth-child(odd) { width: 2px; } .row > div { float: left; position: relative; } .low { height: 2px; } .high { height: 30px; } .dark { background-color: DarkBlue; } .light { background-color: LightGray; } .blockcol { background-color: DarkBlue; } "#; fn get_border_class(border: bool) -> &'static str { if border { "dark" } else { "light" } } fn string_for(item: &PrintItem, solution: bool) -> String { match *item { PrintItem::HorizBorder(b) | PrintItem::Cross(b) => format!(r#"<div class="low {}"></div>"#, get_border_class(b)), PrintItem::VertBorder(b) => format!(r#"<div class="high {}"></div>"#, get_border_class(b)), PrintItem::Block => r#"<div class="high blockcol"></div>"#.to_string(), PrintItem::CharHint(c, hint) => { format!(concat!(r#"<div class = "high">"#, r#"<span class="hint">{}</span>"#, r#"<span class="solution">{}</span>"#, r#"</div>"#), hint.map(|h| h.to_string()) .unwrap_or_else(|| "".to_owned()), if solution { c.to_string() } else { "&nbsp;".to_owned() }) } PrintItem::LineBreak => r#"</div><div class="row">"#.to_owned(), } } fn write_grid<T: Write, I: Iterator<Item = PrintItem>>(writer: &mut T, items: I, solution: bool) -> Result<()> { try!(writeln!(writer, r#"<div class="row">"#)); for item in items { try!(writer.write_all(string_for(&item, solution).as_bytes())) } try!(writeln!(writer, "</div>")); Ok(()) } fn
<T: Write>(writer: &mut T, cw: &Crosswords, dir: Dir, hint_text: &HashMap<String, String>) -> Result<()> { try!(writeln!(writer, "<p><br><b>{}:</b>&nbsp;", match dir { Dir::Right => "Horizontal", Dir::Down => "Vertical", })); let mut hint_count = 0; for y in 0..cw.get_height() { for x in 0..cw.get_width() { let p = Point::new(x as i32, y as i32); if cw.has_hint_at(p) { hint_count += 1; } if cw.has_hint_at_dir(p, dir) { let word: String = cw.chars_at(p, dir).collect(); let hint = hint_text .get(&word) .cloned() .unwrap_or_else(|| format!("[{}]", word)); try!(write!(writer, "<b>{}.</b> {} &nbsp;", hint_count, hint)); } } } try!(writeln!(writer, "</p>")); Ok(()) } /// Write the crosswords to the given writer as an HTML page. pub fn write_html<T: Write>(writer: &mut T, cw: &Crosswords, solution: bool, hint_text: &HashMap<String, String>) -> Result<()> { try!(writeln!(writer, r#"<!doctype html>"#)); try!(writeln!(writer, r#"<head>"#)); try!(writeln!(writer, r#"<meta charset="utf-8" />"#)); try!(writeln!(writer, r#"<style type="text/css">{}</style>"#, CSS)); try!(writeln!(writer, r#"<title>Crosswords</title>"#)); try!(writeln!(writer, r#"</head><body>"#)); try!(writeln!(writer, r#"<div style="width: {}px">"#, cw.get_width() * 32 + 2)); try!(write_grid(writer, cw.print_items(), solution)); try!(writeln!(writer, r#"</div><br><div style="clear: both"></div>"#)); try!(write_hints(writer, cw, Dir::Right, hint_text)); try!(write_hints(writer, cw, Dir::Down, hint_text)); try!(writeln!(writer, "<br></body>")); Ok(()) }
write_hints
identifier_name
reexported-static-methods-cross-crate.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // aux-build:reexported_static_methods.rs extern crate reexported_static_methods; use reexported_static_methods::Foo; use reexported_static_methods::Baz; use reexported_static_methods::Boz; use reexported_static_methods::Bort; pub fn main()
{ assert_eq!(42, Foo::foo()); assert_eq!(84, Baz::bar()); assert!(Boz::boz(1)); assert_eq!("bort()".to_string(), Bort::bort()); }
identifier_body
reexported-static-methods-cross-crate.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // aux-build:reexported_static_methods.rs extern crate reexported_static_methods; use reexported_static_methods::Foo; use reexported_static_methods::Baz; use reexported_static_methods::Boz; use reexported_static_methods::Bort; pub fn main() { assert_eq!(42, Foo::foo()); assert_eq!(84, Baz::bar());
assert!(Boz::boz(1)); assert_eq!("bort()".to_string(), Bort::bort()); }
random_line_split
reexported-static-methods-cross-crate.rs
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // aux-build:reexported_static_methods.rs extern crate reexported_static_methods; use reexported_static_methods::Foo; use reexported_static_methods::Baz; use reexported_static_methods::Boz; use reexported_static_methods::Bort; pub fn
() { assert_eq!(42, Foo::foo()); assert_eq!(84, Baz::bar()); assert!(Boz::boz(1)); assert_eq!("bort()".to_string(), Bort::bort()); }
main
identifier_name
regions-return-interior-of-option.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. fn get<T>(opt: &Option<T>) -> &T { match *opt { Some(ref v) => v, None => fail!("none") } } pub fn main()
{ let mut x = Some(23i); { let y = get(&x); assert_eq!(*y, 23); } x = Some(24i); { let y = get(&x); assert_eq!(*y, 24); } }
identifier_body
regions-return-interior-of-option.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. fn get<T>(opt: &Option<T>) -> &T { match *opt { Some(ref v) => v, None => fail!("none")
} } pub fn main() { let mut x = Some(23i); { let y = get(&x); assert_eq!(*y, 23); } x = Some(24i); { let y = get(&x); assert_eq!(*y, 24); } }
random_line_split
regions-return-interior-of-option.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. fn
<T>(opt: &Option<T>) -> &T { match *opt { Some(ref v) => v, None => fail!("none") } } pub fn main() { let mut x = Some(23i); { let y = get(&x); assert_eq!(*y, 23); } x = Some(24i); { let y = get(&x); assert_eq!(*y, 24); } }
get
identifier_name
build.rs
// Copyright 2013-2015, The Rust-GNOME Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT> extern crate gcc; extern crate pkg_config; use std::ascii::AsciiExt; use std::process::Command; use gcc::Config; use std::env; use std::path::Path; const MIN_MAJOR: u16 = 3; const MIN_MINOR: u16 = 4; const MINOR_STEP: u16 = 2; fn
() { let lib = pkg_config::find_library("gtk+-3.0") .unwrap_or_else(|e| panic!("{}", e)); let mut parts = lib.version.splitn(3, '.') .map(|s| s.parse()) .take_while(|r| r.is_ok()) .map(|r| r.unwrap()); let version: (u16, u16) = (parts.next().unwrap_or(0), parts.next().unwrap_or(0)); let mut cfgs = Vec::new(); if version.0 == MIN_MAJOR && version.1 > MIN_MINOR { let major = version.0; let mut minor = MIN_MINOR; while minor <= version.1 { cfgs.push(format!("gtk_{}_{}", major, minor)); minor += MINOR_STEP; } } for cfg in &cfgs { println!("cargo:rustc-cfg={}", cfg); } println!("cargo:cfg={}", cfgs.connect(" ")); env::set_var("PKG_CONFIG_ALLOW_CROSS", "1"); // call native pkg-config, there is no way to do this with pkg-config for now let cmd = Command::new("pkg-config").arg("--cflags").arg("gtk+-3.0") .output().unwrap(); if!cmd.status.success() { panic!("{}", String::from_utf8_lossy(&cmd.stderr)); } // make the vector of path to set to gcc::Config let output = String::from_utf8(cmd.stdout).unwrap(); // build include path let mut gcc_conf = Config::new(); for s in output.split(' ') { if s.starts_with("-I") { let path: &Path = s[2..].as_ref(); gcc_conf.include(path); } } gcc_conf.file("src/gtk_glue.c"); // pass the GTK feature flags for cfg in &cfgs { gcc_conf.flag(&format!("-D{}", cfg.to_ascii_uppercase())); } // build library gcc_conf.compile("librgtk_glue.a"); }
main
identifier_name
build.rs
// Copyright 2013-2015, The Rust-GNOME Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT> extern crate gcc; extern crate pkg_config; use std::ascii::AsciiExt; use std::process::Command; use gcc::Config; use std::env; use std::path::Path; const MIN_MAJOR: u16 = 3; const MIN_MINOR: u16 = 4; const MINOR_STEP: u16 = 2; fn main()
println!("cargo:cfg={}", cfgs.connect(" ")); env::set_var("PKG_CONFIG_ALLOW_CROSS", "1"); // call native pkg-config, there is no way to do this with pkg-config for now let cmd = Command::new("pkg-config").arg("--cflags").arg("gtk+-3.0") .output().unwrap(); if!cmd.status.success() { panic!("{}", String::from_utf8_lossy(&cmd.stderr)); } // make the vector of path to set to gcc::Config let output = String::from_utf8(cmd.stdout).unwrap(); // build include path let mut gcc_conf = Config::new(); for s in output.split(' ') { if s.starts_with("-I") { let path: &Path = s[2..].as_ref(); gcc_conf.include(path); } } gcc_conf.file("src/gtk_glue.c"); // pass the GTK feature flags for cfg in &cfgs { gcc_conf.flag(&format!("-D{}", cfg.to_ascii_uppercase())); } // build library gcc_conf.compile("librgtk_glue.a"); }
{ let lib = pkg_config::find_library("gtk+-3.0") .unwrap_or_else(|e| panic!("{}", e)); let mut parts = lib.version.splitn(3, '.') .map(|s| s.parse()) .take_while(|r| r.is_ok()) .map(|r| r.unwrap()); let version: (u16, u16) = (parts.next().unwrap_or(0), parts.next().unwrap_or(0)); let mut cfgs = Vec::new(); if version.0 == MIN_MAJOR && version.1 > MIN_MINOR { let major = version.0; let mut minor = MIN_MINOR; while minor <= version.1 { cfgs.push(format!("gtk_{}_{}", major, minor)); minor += MINOR_STEP; } } for cfg in &cfgs { println!("cargo:rustc-cfg={}", cfg); }
identifier_body
build.rs
// Copyright 2013-2015, The Rust-GNOME Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT> extern crate gcc; extern crate pkg_config; use std::ascii::AsciiExt; use std::process::Command; use gcc::Config; use std::env; use std::path::Path; const MIN_MAJOR: u16 = 3; const MIN_MINOR: u16 = 4; const MINOR_STEP: u16 = 2; fn main() { let lib = pkg_config::find_library("gtk+-3.0") .unwrap_or_else(|e| panic!("{}", e)); let mut parts = lib.version.splitn(3, '.') .map(|s| s.parse()) .take_while(|r| r.is_ok()) .map(|r| r.unwrap()); let version: (u16, u16) = (parts.next().unwrap_or(0), parts.next().unwrap_or(0)); let mut cfgs = Vec::new(); if version.0 == MIN_MAJOR && version.1 > MIN_MINOR { let major = version.0; let mut minor = MIN_MINOR; while minor <= version.1 { cfgs.push(format!("gtk_{}_{}", major, minor)); minor += MINOR_STEP; } } for cfg in &cfgs { println!("cargo:rustc-cfg={}", cfg); } println!("cargo:cfg={}", cfgs.connect(" ")); env::set_var("PKG_CONFIG_ALLOW_CROSS", "1"); // call native pkg-config, there is no way to do this with pkg-config for now let cmd = Command::new("pkg-config").arg("--cflags").arg("gtk+-3.0") .output().unwrap(); if!cmd.status.success() { panic!("{}", String::from_utf8_lossy(&cmd.stderr)); } // make the vector of path to set to gcc::Config let output = String::from_utf8(cmd.stdout).unwrap(); // build include path let mut gcc_conf = Config::new(); for s in output.split(' ') { if s.starts_with("-I")
} gcc_conf.file("src/gtk_glue.c"); // pass the GTK feature flags for cfg in &cfgs { gcc_conf.flag(&format!("-D{}", cfg.to_ascii_uppercase())); } // build library gcc_conf.compile("librgtk_glue.a"); }
{ let path: &Path = s[2..].as_ref(); gcc_conf.include(path); }
conditional_block
build.rs
// Copyright 2013-2015, The Rust-GNOME Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT> extern crate gcc; extern crate pkg_config; use std::ascii::AsciiExt; use std::process::Command; use gcc::Config; use std::env; use std::path::Path; const MIN_MAJOR: u16 = 3; const MIN_MINOR: u16 = 4; const MINOR_STEP: u16 = 2; fn main() { let lib = pkg_config::find_library("gtk+-3.0") .unwrap_or_else(|e| panic!("{}", e)); let mut parts = lib.version.splitn(3, '.') .map(|s| s.parse()) .take_while(|r| r.is_ok()) .map(|r| r.unwrap()); let version: (u16, u16) = (parts.next().unwrap_or(0), parts.next().unwrap_or(0)); let mut cfgs = Vec::new(); if version.0 == MIN_MAJOR && version.1 > MIN_MINOR { let major = version.0; let mut minor = MIN_MINOR; while minor <= version.1 { cfgs.push(format!("gtk_{}_{}", major, minor)); minor += MINOR_STEP; } } for cfg in &cfgs { println!("cargo:rustc-cfg={}", cfg); } println!("cargo:cfg={}", cfgs.connect(" ")); env::set_var("PKG_CONFIG_ALLOW_CROSS", "1"); // call native pkg-config, there is no way to do this with pkg-config for now let cmd = Command::new("pkg-config").arg("--cflags").arg("gtk+-3.0") .output().unwrap(); if!cmd.status.success() { panic!("{}", String::from_utf8_lossy(&cmd.stderr)); } // make the vector of path to set to gcc::Config let output = String::from_utf8(cmd.stdout).unwrap(); // build include path let mut gcc_conf = Config::new(); for s in output.split(' ') { if s.starts_with("-I") { let path: &Path = s[2..].as_ref(); gcc_conf.include(path); } } gcc_conf.file("src/gtk_glue.c");
// build library gcc_conf.compile("librgtk_glue.a"); }
// pass the GTK feature flags for cfg in &cfgs { gcc_conf.flag(&format!("-D{}", cfg.to_ascii_uppercase())); }
random_line_split
mod.rs
mod raster; mod shader; mod samplers; use std::ops::Add; use std::ops::Mul; use std::f32; // A vector in 4-space. pub struct Vector([f32; 4]); // A 4x4 matrix. pub struct Matrix([f32; 16]); impl Vector { pub fn new(x: f32, y: f32, z: f32) -> Vector { Vector([x, y, z, 1.]) } pub fn zero() -> Vector { Vector([0., 0., 0., 0.]) } fn nth(&self, idx: usize) -> Option<f32> { match (self, idx) { (&Vector(ref data), 0...3) => Some(data[idx]), _ => None } } fn x(&self) -> f32 { match self.nth(0) { Some(s) => s, _ => panic!() } } fn y(&self) -> f32 { match self.nth(1) { Some(s) => s, _ => panic!() } } fn z(&self) -> f32 { match self.nth(2) { Some(s) => s, _ => panic!() } } fn w(&self) -> f32 { match self.nth(3) { Some(s) => s, _ => panic!() } } fn dot(&self, vec: &Vector) -> f32 { match (self, vec) { (&Vector(a), &Vector(b)) => { a.iter().zip(b.iter()).fold(0., |sum, (i, j)| sum + (i * j)) } } } fn sub(&self, vec: &Vector) -> Vector { Vector([self.x() - vec.x(), self.y() - vec.y(), self.z() - vec.z(), self.w() - vec.w()]) } } impl Matrix { fn identity() -> Matrix { Matrix([1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1.]) } fn translate(pos: &Vector) -> Matrix { Matrix([1., 0., 0., pos.x(), 0., 1., 0., pos.y(), 0., 0., 1., pos.z(), 0., 0., 0., pos.w()]) } fn scale(scale: &Vector) -> Matrix { Matrix([scale.x(), 0., 0., 0., 0., scale.y(), 0., 0., 0., 0., scale.z(), 0., 0., 0., 0., scale.w()]) } fn apply(&self, vec: &Vector) -> Vector { let mut data: [f32; 4] = [0.; 4]; for i in 0..3 { data[i] = self.row(i).dot(vec); } Vector(data) } fn row(&self, row: usize) -> Vector { match self { &Matrix(ref data) => { Vector([data[row * 4], data[1 + (row * 4)], data[2 + (row * 4)], data[3 + (row * 4)]]) } } } fn col(&self, col: usize) -> Vector { match (self) { &Matrix(ref data) => { Vector([data[col], data[col + 4], data[col + 8], data[col + 12]]) } } } } impl Mul for Matrix { type Output = Matrix; // Produces the matrix AB. fn mul(self, rhs: Matrix) -> Matrix { let mut out: [f32; 16] = [0.; 16]; for j in 0..3 { for i in 0..3 { out[i * j] = self.row(j).dot(&rhs.col(i)); } } Matrix(out) } } pub struct Rect { pub top: f32, pub bottom: f32, pub left: f32, pub right: f32, } // A primitive triangle. pub struct Triangle(Vector, Vector, Vector); impl Triangle { pub fn new(a: Vector, b: Vector, c: Vector) -> Triangle { Triangle(a, b, c) } fn vertices(&self) -> Vec<&Vector> { match self { &Triangle(ref a, ref b, ref c) => vec![a, b, c] } } // Returns a bounding box encapsulating the triangle in the XY-plane. fn bounds(&self) -> Rect { let &Triangle(ref a, ref b, ref c) = self; let mut rect = Rect { top: f32::MAX, bottom: f32::MIN, left: f32::MAX, right: f32::MIN, }; for i in [a, b, c].iter() { rect.top = rect.top.min(i.x()); rect.bottom = rect.bottom.max(i.x()); rect.left = rect.left.min(i.y()); rect.right = rect.right.max(i.y()); } rect } } pub struct Mesh(Vec<Triangle>); impl Mesh { pub fn new(tris: Vec<Triangle>) -> Mesh { Mesh(tris) } } pub struct Model { mesh: Mesh, pos: Vector, scale: Vector, rot: Vector, } impl Model { pub fn new(mesh: Mesh) -> Model { Model { mesh: mesh, pos: Vector::zero(), scale: Vector::new(1., 1., 1.), rot: Vector::zero(), } } fn rotate(&mut self, rotation: &Vector) { // TODO } fn translate(&mut self, translation: &Vector) { // TODO } fn scale(&mut self, scale: &Vector) { // TODO } fn get_transform(&self) -> Matrix { let translate: Matrix = Matrix::translate(&self.pos); // TODO(acomminos): other transforms translate } } // A perspective camera. pub struct Camera { pos: Vector, rot: Vector, z_near: f32, // The near z-clipping plane. z_far: f32, // The far z-clipping plane. fov: f32, // The horizontal field of view, in radians. ratio: f32, // Screen aspect ratio of width/height. } impl Camera { pub fn new(pos: Vector, rot: Vector, aspect: f32, fov: f32, near: f32, far: f32) -> Camera { Camera { pos: pos, rot: rot, ratio: aspect, fov: fov, z_near: near, z_far: far } } // Projects the vector into normalized screen coordinates. // Does not perform any clipping. // TODO: replace this with a simple function returning a matrix to be used // in a homogenous coordinate system fn project_vector(&self, v: &Vector) -> Vector { let x = v.x()/(self.ratio * (self.fov / 2.).tan() * v.z()); let y = v.y()/v.z(); let z = (v.z() - self.z_near)/(self.z_far - self.z_near); Vector([x, y, z, 1.]) } fn project_triangle(&self, tri: &Triangle) -> Triangle { match tri { &Triangle(ref a, ref b, ref c) => { Triangle(self.project_vector(a), self.project_vector(b), self.project_vector(c)) } } } fn contains_point(&self, (x, y, z): (f32, f32, f32)) -> bool { x >= -1. && x <= 1. && y >= -1. && y <= 1. && z >= -1. && z <= 1. } } pub struct Scene { camera: Camera, models: Vec<Model>, } impl Scene { pub fn new(camera: Camera) -> Scene { Scene { camera: camera, models: vec![] } } pub fn camera<'a>(&'a self) -> &'a Camera { &self.camera } pub fn add_model(&mut self, model: Model) { self.models.push(model); } pub fn render(&self, rt: &mut RenderTarget) { for m in &self.models { let model_transform = &m.get_transform(); let &Mesh(ref triangles) = &m.mesh; for t in triangles { // FIXME(acomminos): placeholder let ph_shader = shader::SolidColorShader(Color::white()); let sampler = samplers::SimpleMultiSampler(2); // TODO(acomminos): use model_transform let t_proj = self.camera.project_triangle(t); raster::rasterize_barycentric_ccw(&t_proj, rt, &self.camera, &sampler, &ph_shader); } } } } pub struct Buffer<T> { width: usize, height: usize, data: Vec<T>, } impl <T> Buffer<T> where T: Clone { pub fn new(width: usize, height: usize, initial: T) -> Buffer<T> { let mut data: Vec<T> = Vec::with_capacity(width * height); // FIXME(acomminos): find more idiomatic way to do this for i in 0..(width * height) { data.push(initial.clone()); } Buffer { width: width, height: height,
pub fn put(&mut self, (x, y): (usize, usize), val: T) { self.data[x + (y * self.width)] = val; } pub fn get(&self, x: usize, y: usize) -> &T { &self.data[x + (y * self.width)] } } // Pixel blend modes. pub enum CompositeMode { SourceOver, } // A 32-bit ARGB colour. // Use premultiplied alpha for consistency. #[derive(Copy, Clone)] pub struct Color { r: f32, g: f32, b: f32, a: f32 } impl Color { fn white() -> Color { Color::new(1., 1., 1., 1.) } fn zero() -> Color { Color::new(0., 0., 0., 0.) } // Create fn new(r: f32, g: f32, b: f32, a: f32) -> Color { Color { r: r, g: g, b: b, a: a } } fn from_rgba32(rgba: &u32) -> Color { let max = u8::max_value() as f32; Color::new((((rgba >> 24) & 0xFFu32) as f32)/max, (((rgba >> 16) & 0xFFu32) as f32)/max, (((rgba >> 8) & 0xFFu32) as f32)/max, (((rgba >> 0) & 0xFFu32) as f32)/max) } fn to_rgba32(&self) -> (u8, u8, u8, u8) { ((self.r * (u8::max_value() as f32)) as u8, (self.g * (u8::max_value() as f32)) as u8, (self.b * (u8::max_value() as f32)) as u8, (self.a * (u8::max_value() as f32)) as u8) } fn unpremultiply(&self) -> Color { Color { r: self.r / self.a, g: self.g / self.a, b: self.b / self.a, a: self.a, } } fn multiply(&self, val: f32) -> Color { Color::new(self.r * val, self.g * val, self.b * val, self.a * val) } } impl Add for Color { type Output = Color; fn add(self, rhs: Color) -> Color { Color::new(self.r + rhs.r, self.g + rhs.g, self.b + rhs.b, self.a + rhs.a) } } // A standard render target with a ARGB color buffer and floating point depth // buffer. pub struct RenderTarget { width: usize, height: usize, color: Buffer<u32>, depth: Buffer<f32>, } impl RenderTarget { pub fn new(width: usize, height: usize) -> RenderTarget { RenderTarget { width: width, height: height, color: Buffer::<u32>::new(width, height, 0u32), depth: Buffer::<f32>::new(width, height, 1.), } } // Toy painting function to paint the pixel at (x, y) with the 32-bit RGBA // colour provided. pub fn paint(&mut self, (x, y): (usize, usize), src: &Color, op: CompositeMode) { let dest = Color::from_rgba32(self.color.get(x, y)); let color = match op { // note: colors here are premultiplied SourceOver => dest.multiply(1. - src.a) + *src }; let (r, g, b, a) = color.to_rgba32(); self.color.put((x, y), ((r as u32) << 24) | ((g as u32) << 16) | ((b as u32) << 8) | a as u32) } // Checks to see if depth is less than the value stored in the depth buffer. // If so, returns true and stores the depth value. // The depth buffer stores floating-point values in the range [0, 1]. By // default, it is initialized to 1. pub fn check_depth(&mut self, (x, y): (usize, usize), depth: f32) -> bool { if depth < *self.depth.get(x, y) { self.depth.put((x, y), depth); return true; } return false; } // Returns the ratio of width:height. pub fn aspect(&self) -> f32 { (self.width as f32) / (self.height as f32) } pub fn print_ascii(&self) { print!["β”Œβ”€β”€"]; for _ in 1..(self.color.width - 1) { print!["──"]; } println!["──┐"]; for y in 0..self.color.height { print!["β”‚"]; for x in 0..self.color.width { let color = Color::from_rgba32(self.color.get(x, y)); let a = color.a; let block = if a == 0. { " " } else if a <= 0.25 { "β–‘β–‘" } else if a <= 0.5 { "β–’β–’" } else if a <= 0.75 { "β–“β–“" } else { "β–ˆβ–ˆ" }; print!["{}", block]; } println!["β”‚"]; } print!["└──"]; for _ in 1..(self.color.width - 1) { print!["──"]; } println!["β”€β”€β”˜"]; } }
data: data, } }
random_line_split
mod.rs
mod raster; mod shader; mod samplers; use std::ops::Add; use std::ops::Mul; use std::f32; // A vector in 4-space. pub struct Vector([f32; 4]); // A 4x4 matrix. pub struct Matrix([f32; 16]); impl Vector { pub fn new(x: f32, y: f32, z: f32) -> Vector { Vector([x, y, z, 1.]) } pub fn zero() -> Vector { Vector([0., 0., 0., 0.]) } fn nth(&self, idx: usize) -> Option<f32> { match (self, idx) { (&Vector(ref data), 0...3) => Some(data[idx]), _ => None } } fn x(&self) -> f32 { match self.nth(0) { Some(s) => s, _ => panic!() } } fn y(&self) -> f32 { match self.nth(1) { Some(s) => s, _ => panic!() } } fn z(&self) -> f32 { match self.nth(2) { Some(s) => s, _ => panic!() } } fn w(&self) -> f32 { match self.nth(3) { Some(s) => s, _ => panic!() } } fn dot(&self, vec: &Vector) -> f32 { match (self, vec) { (&Vector(a), &Vector(b)) => { a.iter().zip(b.iter()).fold(0., |sum, (i, j)| sum + (i * j)) } } } fn sub(&self, vec: &Vector) -> Vector { Vector([self.x() - vec.x(), self.y() - vec.y(), self.z() - vec.z(), self.w() - vec.w()]) } } impl Matrix { fn identity() -> Matrix { Matrix([1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1.]) } fn translate(pos: &Vector) -> Matrix { Matrix([1., 0., 0., pos.x(), 0., 1., 0., pos.y(), 0., 0., 1., pos.z(), 0., 0., 0., pos.w()]) } fn scale(scale: &Vector) -> Matrix { Matrix([scale.x(), 0., 0., 0., 0., scale.y(), 0., 0., 0., 0., scale.z(), 0., 0., 0., 0., scale.w()]) } fn apply(&self, vec: &Vector) -> Vector { let mut data: [f32; 4] = [0.; 4]; for i in 0..3 { data[i] = self.row(i).dot(vec); } Vector(data) } fn row(&self, row: usize) -> Vector { match self { &Matrix(ref data) => { Vector([data[row * 4], data[1 + (row * 4)], data[2 + (row * 4)], data[3 + (row * 4)]]) } } } fn col(&self, col: usize) -> Vector { match (self) { &Matrix(ref data) => { Vector([data[col], data[col + 4], data[col + 8], data[col + 12]]) } } } } impl Mul for Matrix { type Output = Matrix; // Produces the matrix AB. fn mul(self, rhs: Matrix) -> Matrix { let mut out: [f32; 16] = [0.; 16]; for j in 0..3 { for i in 0..3 { out[i * j] = self.row(j).dot(&rhs.col(i)); } } Matrix(out) } } pub struct Rect { pub top: f32, pub bottom: f32, pub left: f32, pub right: f32, } // A primitive triangle. pub struct Triangle(Vector, Vector, Vector); impl Triangle { pub fn new(a: Vector, b: Vector, c: Vector) -> Triangle { Triangle(a, b, c) } fn vertices(&self) -> Vec<&Vector> { match self { &Triangle(ref a, ref b, ref c) => vec![a, b, c] } } // Returns a bounding box encapsulating the triangle in the XY-plane. fn bounds(&self) -> Rect { let &Triangle(ref a, ref b, ref c) = self; let mut rect = Rect { top: f32::MAX, bottom: f32::MIN, left: f32::MAX, right: f32::MIN, }; for i in [a, b, c].iter() { rect.top = rect.top.min(i.x()); rect.bottom = rect.bottom.max(i.x()); rect.left = rect.left.min(i.y()); rect.right = rect.right.max(i.y()); } rect } } pub struct Mesh(Vec<Triangle>); impl Mesh { pub fn new(tris: Vec<Triangle>) -> Mesh { Mesh(tris) } } pub struct Model { mesh: Mesh, pos: Vector, scale: Vector, rot: Vector, } impl Model { pub fn new(mesh: Mesh) -> Model { Model { mesh: mesh, pos: Vector::zero(), scale: Vector::new(1., 1., 1.), rot: Vector::zero(), } } fn rotate(&mut self, rotation: &Vector) { // TODO } fn translate(&mut self, translation: &Vector) { // TODO } fn scale(&mut self, scale: &Vector) { // TODO } fn get_transform(&self) -> Matrix { let translate: Matrix = Matrix::translate(&self.pos); // TODO(acomminos): other transforms translate } } // A perspective camera. pub struct Camera { pos: Vector, rot: Vector, z_near: f32, // The near z-clipping plane. z_far: f32, // The far z-clipping plane. fov: f32, // The horizontal field of view, in radians. ratio: f32, // Screen aspect ratio of width/height. } impl Camera { pub fn new(pos: Vector, rot: Vector, aspect: f32, fov: f32, near: f32, far: f32) -> Camera { Camera { pos: pos, rot: rot, ratio: aspect, fov: fov, z_near: near, z_far: far } } // Projects the vector into normalized screen coordinates. // Does not perform any clipping. // TODO: replace this with a simple function returning a matrix to be used // in a homogenous coordinate system fn project_vector(&self, v: &Vector) -> Vector { let x = v.x()/(self.ratio * (self.fov / 2.).tan() * v.z()); let y = v.y()/v.z(); let z = (v.z() - self.z_near)/(self.z_far - self.z_near); Vector([x, y, z, 1.]) } fn project_triangle(&self, tri: &Triangle) -> Triangle { match tri { &Triangle(ref a, ref b, ref c) => { Triangle(self.project_vector(a), self.project_vector(b), self.project_vector(c)) } } } fn contains_point(&self, (x, y, z): (f32, f32, f32)) -> bool { x >= -1. && x <= 1. && y >= -1. && y <= 1. && z >= -1. && z <= 1. } } pub struct Scene { camera: Camera, models: Vec<Model>, } impl Scene { pub fn new(camera: Camera) -> Scene { Scene { camera: camera, models: vec![] } } pub fn camera<'a>(&'a self) -> &'a Camera { &self.camera } pub fn add_model(&mut self, model: Model) { self.models.push(model); } pub fn render(&self, rt: &mut RenderTarget) { for m in &self.models { let model_transform = &m.get_transform(); let &Mesh(ref triangles) = &m.mesh; for t in triangles { // FIXME(acomminos): placeholder let ph_shader = shader::SolidColorShader(Color::white()); let sampler = samplers::SimpleMultiSampler(2); // TODO(acomminos): use model_transform let t_proj = self.camera.project_triangle(t); raster::rasterize_barycentric_ccw(&t_proj, rt, &self.camera, &sampler, &ph_shader); } } } } pub struct Buffer<T> { width: usize, height: usize, data: Vec<T>, } impl <T> Buffer<T> where T: Clone { pub fn new(width: usize, height: usize, initial: T) -> Buffer<T> { let mut data: Vec<T> = Vec::with_capacity(width * height); // FIXME(acomminos): find more idiomatic way to do this for i in 0..(width * height) { data.push(initial.clone()); } Buffer { width: width, height: height, data: data, } } pub fn put(&mut self, (x, y): (usize, usize), val: T) { self.data[x + (y * self.width)] = val; } pub fn get(&self, x: usize, y: usize) -> &T { &self.data[x + (y * self.width)] } } // Pixel blend modes. pub enum CompositeMode { SourceOver, } // A 32-bit ARGB colour. // Use premultiplied alpha for consistency. #[derive(Copy, Clone)] pub struct Color { r: f32, g: f32, b: f32, a: f32 } impl Color { fn white() -> Color { Color::new(1., 1., 1., 1.) } fn zero() -> Color { Color::new(0., 0., 0., 0.) } // Create fn new(r: f32, g: f32, b: f32, a: f32) -> Color { Color { r: r, g: g, b: b, a: a } } fn from_rgba32(rgba: &u32) -> Color { let max = u8::max_value() as f32; Color::new((((rgba >> 24) & 0xFFu32) as f32)/max, (((rgba >> 16) & 0xFFu32) as f32)/max, (((rgba >> 8) & 0xFFu32) as f32)/max, (((rgba >> 0) & 0xFFu32) as f32)/max) } fn
(&self) -> (u8, u8, u8, u8) { ((self.r * (u8::max_value() as f32)) as u8, (self.g * (u8::max_value() as f32)) as u8, (self.b * (u8::max_value() as f32)) as u8, (self.a * (u8::max_value() as f32)) as u8) } fn unpremultiply(&self) -> Color { Color { r: self.r / self.a, g: self.g / self.a, b: self.b / self.a, a: self.a, } } fn multiply(&self, val: f32) -> Color { Color::new(self.r * val, self.g * val, self.b * val, self.a * val) } } impl Add for Color { type Output = Color; fn add(self, rhs: Color) -> Color { Color::new(self.r + rhs.r, self.g + rhs.g, self.b + rhs.b, self.a + rhs.a) } } // A standard render target with a ARGB color buffer and floating point depth // buffer. pub struct RenderTarget { width: usize, height: usize, color: Buffer<u32>, depth: Buffer<f32>, } impl RenderTarget { pub fn new(width: usize, height: usize) -> RenderTarget { RenderTarget { width: width, height: height, color: Buffer::<u32>::new(width, height, 0u32), depth: Buffer::<f32>::new(width, height, 1.), } } // Toy painting function to paint the pixel at (x, y) with the 32-bit RGBA // colour provided. pub fn paint(&mut self, (x, y): (usize, usize), src: &Color, op: CompositeMode) { let dest = Color::from_rgba32(self.color.get(x, y)); let color = match op { // note: colors here are premultiplied SourceOver => dest.multiply(1. - src.a) + *src }; let (r, g, b, a) = color.to_rgba32(); self.color.put((x, y), ((r as u32) << 24) | ((g as u32) << 16) | ((b as u32) << 8) | a as u32) } // Checks to see if depth is less than the value stored in the depth buffer. // If so, returns true and stores the depth value. // The depth buffer stores floating-point values in the range [0, 1]. By // default, it is initialized to 1. pub fn check_depth(&mut self, (x, y): (usize, usize), depth: f32) -> bool { if depth < *self.depth.get(x, y) { self.depth.put((x, y), depth); return true; } return false; } // Returns the ratio of width:height. pub fn aspect(&self) -> f32 { (self.width as f32) / (self.height as f32) } pub fn print_ascii(&self) { print!["β”Œβ”€β”€"]; for _ in 1..(self.color.width - 1) { print!["──"]; } println!["──┐"]; for y in 0..self.color.height { print!["β”‚"]; for x in 0..self.color.width { let color = Color::from_rgba32(self.color.get(x, y)); let a = color.a; let block = if a == 0. { " " } else if a <= 0.25 { "β–‘β–‘" } else if a <= 0.5 { "β–’β–’" } else if a <= 0.75 { "β–“β–“" } else { "β–ˆβ–ˆ" }; print!["{}", block]; } println!["β”‚"]; } print!["└──"]; for _ in 1..(self.color.width - 1) { print!["──"]; } println!["β”€β”€β”˜"]; } }
to_rgba32
identifier_name
mod.rs
mod raster; mod shader; mod samplers; use std::ops::Add; use std::ops::Mul; use std::f32; // A vector in 4-space. pub struct Vector([f32; 4]); // A 4x4 matrix. pub struct Matrix([f32; 16]); impl Vector { pub fn new(x: f32, y: f32, z: f32) -> Vector { Vector([x, y, z, 1.]) } pub fn zero() -> Vector { Vector([0., 0., 0., 0.]) } fn nth(&self, idx: usize) -> Option<f32> { match (self, idx) { (&Vector(ref data), 0...3) => Some(data[idx]), _ => None } } fn x(&self) -> f32 { match self.nth(0) { Some(s) => s, _ => panic!() } } fn y(&self) -> f32 { match self.nth(1) { Some(s) => s, _ => panic!() } } fn z(&self) -> f32 { match self.nth(2) { Some(s) => s, _ => panic!() } } fn w(&self) -> f32 { match self.nth(3) { Some(s) => s, _ => panic!() } } fn dot(&self, vec: &Vector) -> f32 { match (self, vec) { (&Vector(a), &Vector(b)) => { a.iter().zip(b.iter()).fold(0., |sum, (i, j)| sum + (i * j)) } } } fn sub(&self, vec: &Vector) -> Vector { Vector([self.x() - vec.x(), self.y() - vec.y(), self.z() - vec.z(), self.w() - vec.w()]) } } impl Matrix { fn identity() -> Matrix { Matrix([1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1.]) } fn translate(pos: &Vector) -> Matrix { Matrix([1., 0., 0., pos.x(), 0., 1., 0., pos.y(), 0., 0., 1., pos.z(), 0., 0., 0., pos.w()]) } fn scale(scale: &Vector) -> Matrix { Matrix([scale.x(), 0., 0., 0., 0., scale.y(), 0., 0., 0., 0., scale.z(), 0., 0., 0., 0., scale.w()]) } fn apply(&self, vec: &Vector) -> Vector { let mut data: [f32; 4] = [0.; 4]; for i in 0..3 { data[i] = self.row(i).dot(vec); } Vector(data) } fn row(&self, row: usize) -> Vector { match self { &Matrix(ref data) => { Vector([data[row * 4], data[1 + (row * 4)], data[2 + (row * 4)], data[3 + (row * 4)]]) } } } fn col(&self, col: usize) -> Vector { match (self) { &Matrix(ref data) => { Vector([data[col], data[col + 4], data[col + 8], data[col + 12]]) } } } } impl Mul for Matrix { type Output = Matrix; // Produces the matrix AB. fn mul(self, rhs: Matrix) -> Matrix { let mut out: [f32; 16] = [0.; 16]; for j in 0..3 { for i in 0..3 { out[i * j] = self.row(j).dot(&rhs.col(i)); } } Matrix(out) } } pub struct Rect { pub top: f32, pub bottom: f32, pub left: f32, pub right: f32, } // A primitive triangle. pub struct Triangle(Vector, Vector, Vector); impl Triangle { pub fn new(a: Vector, b: Vector, c: Vector) -> Triangle { Triangle(a, b, c) } fn vertices(&self) -> Vec<&Vector> { match self { &Triangle(ref a, ref b, ref c) => vec![a, b, c] } } // Returns a bounding box encapsulating the triangle in the XY-plane. fn bounds(&self) -> Rect { let &Triangle(ref a, ref b, ref c) = self; let mut rect = Rect { top: f32::MAX, bottom: f32::MIN, left: f32::MAX, right: f32::MIN, }; for i in [a, b, c].iter() { rect.top = rect.top.min(i.x()); rect.bottom = rect.bottom.max(i.x()); rect.left = rect.left.min(i.y()); rect.right = rect.right.max(i.y()); } rect } } pub struct Mesh(Vec<Triangle>); impl Mesh { pub fn new(tris: Vec<Triangle>) -> Mesh { Mesh(tris) } } pub struct Model { mesh: Mesh, pos: Vector, scale: Vector, rot: Vector, } impl Model { pub fn new(mesh: Mesh) -> Model { Model { mesh: mesh, pos: Vector::zero(), scale: Vector::new(1., 1., 1.), rot: Vector::zero(), } } fn rotate(&mut self, rotation: &Vector) { // TODO } fn translate(&mut self, translation: &Vector) { // TODO } fn scale(&mut self, scale: &Vector) { // TODO } fn get_transform(&self) -> Matrix { let translate: Matrix = Matrix::translate(&self.pos); // TODO(acomminos): other transforms translate } } // A perspective camera. pub struct Camera { pos: Vector, rot: Vector, z_near: f32, // The near z-clipping plane. z_far: f32, // The far z-clipping plane. fov: f32, // The horizontal field of view, in radians. ratio: f32, // Screen aspect ratio of width/height. } impl Camera { pub fn new(pos: Vector, rot: Vector, aspect: f32, fov: f32, near: f32, far: f32) -> Camera { Camera { pos: pos, rot: rot, ratio: aspect, fov: fov, z_near: near, z_far: far } } // Projects the vector into normalized screen coordinates. // Does not perform any clipping. // TODO: replace this with a simple function returning a matrix to be used // in a homogenous coordinate system fn project_vector(&self, v: &Vector) -> Vector { let x = v.x()/(self.ratio * (self.fov / 2.).tan() * v.z()); let y = v.y()/v.z(); let z = (v.z() - self.z_near)/(self.z_far - self.z_near); Vector([x, y, z, 1.]) } fn project_triangle(&self, tri: &Triangle) -> Triangle { match tri { &Triangle(ref a, ref b, ref c) => { Triangle(self.project_vector(a), self.project_vector(b), self.project_vector(c)) } } } fn contains_point(&self, (x, y, z): (f32, f32, f32)) -> bool { x >= -1. && x <= 1. && y >= -1. && y <= 1. && z >= -1. && z <= 1. } } pub struct Scene { camera: Camera, models: Vec<Model>, } impl Scene { pub fn new(camera: Camera) -> Scene { Scene { camera: camera, models: vec![] } } pub fn camera<'a>(&'a self) -> &'a Camera { &self.camera } pub fn add_model(&mut self, model: Model) { self.models.push(model); } pub fn render(&self, rt: &mut RenderTarget) { for m in &self.models { let model_transform = &m.get_transform(); let &Mesh(ref triangles) = &m.mesh; for t in triangles { // FIXME(acomminos): placeholder let ph_shader = shader::SolidColorShader(Color::white()); let sampler = samplers::SimpleMultiSampler(2); // TODO(acomminos): use model_transform let t_proj = self.camera.project_triangle(t); raster::rasterize_barycentric_ccw(&t_proj, rt, &self.camera, &sampler, &ph_shader); } } } } pub struct Buffer<T> { width: usize, height: usize, data: Vec<T>, } impl <T> Buffer<T> where T: Clone { pub fn new(width: usize, height: usize, initial: T) -> Buffer<T> { let mut data: Vec<T> = Vec::with_capacity(width * height); // FIXME(acomminos): find more idiomatic way to do this for i in 0..(width * height) { data.push(initial.clone()); } Buffer { width: width, height: height, data: data, } } pub fn put(&mut self, (x, y): (usize, usize), val: T) { self.data[x + (y * self.width)] = val; } pub fn get(&self, x: usize, y: usize) -> &T { &self.data[x + (y * self.width)] } } // Pixel blend modes. pub enum CompositeMode { SourceOver, } // A 32-bit ARGB colour. // Use premultiplied alpha for consistency. #[derive(Copy, Clone)] pub struct Color { r: f32, g: f32, b: f32, a: f32 } impl Color { fn white() -> Color { Color::new(1., 1., 1., 1.) } fn zero() -> Color { Color::new(0., 0., 0., 0.) } // Create fn new(r: f32, g: f32, b: f32, a: f32) -> Color { Color { r: r, g: g, b: b, a: a } } fn from_rgba32(rgba: &u32) -> Color { let max = u8::max_value() as f32; Color::new((((rgba >> 24) & 0xFFu32) as f32)/max, (((rgba >> 16) & 0xFFu32) as f32)/max, (((rgba >> 8) & 0xFFu32) as f32)/max, (((rgba >> 0) & 0xFFu32) as f32)/max) } fn to_rgba32(&self) -> (u8, u8, u8, u8) { ((self.r * (u8::max_value() as f32)) as u8, (self.g * (u8::max_value() as f32)) as u8, (self.b * (u8::max_value() as f32)) as u8, (self.a * (u8::max_value() as f32)) as u8) } fn unpremultiply(&self) -> Color { Color { r: self.r / self.a, g: self.g / self.a, b: self.b / self.a, a: self.a, } } fn multiply(&self, val: f32) -> Color
} impl Add for Color { type Output = Color; fn add(self, rhs: Color) -> Color { Color::new(self.r + rhs.r, self.g + rhs.g, self.b + rhs.b, self.a + rhs.a) } } // A standard render target with a ARGB color buffer and floating point depth // buffer. pub struct RenderTarget { width: usize, height: usize, color: Buffer<u32>, depth: Buffer<f32>, } impl RenderTarget { pub fn new(width: usize, height: usize) -> RenderTarget { RenderTarget { width: width, height: height, color: Buffer::<u32>::new(width, height, 0u32), depth: Buffer::<f32>::new(width, height, 1.), } } // Toy painting function to paint the pixel at (x, y) with the 32-bit RGBA // colour provided. pub fn paint(&mut self, (x, y): (usize, usize), src: &Color, op: CompositeMode) { let dest = Color::from_rgba32(self.color.get(x, y)); let color = match op { // note: colors here are premultiplied SourceOver => dest.multiply(1. - src.a) + *src }; let (r, g, b, a) = color.to_rgba32(); self.color.put((x, y), ((r as u32) << 24) | ((g as u32) << 16) | ((b as u32) << 8) | a as u32) } // Checks to see if depth is less than the value stored in the depth buffer. // If so, returns true and stores the depth value. // The depth buffer stores floating-point values in the range [0, 1]. By // default, it is initialized to 1. pub fn check_depth(&mut self, (x, y): (usize, usize), depth: f32) -> bool { if depth < *self.depth.get(x, y) { self.depth.put((x, y), depth); return true; } return false; } // Returns the ratio of width:height. pub fn aspect(&self) -> f32 { (self.width as f32) / (self.height as f32) } pub fn print_ascii(&self) { print!["β”Œβ”€β”€"]; for _ in 1..(self.color.width - 1) { print!["──"]; } println!["──┐"]; for y in 0..self.color.height { print!["β”‚"]; for x in 0..self.color.width { let color = Color::from_rgba32(self.color.get(x, y)); let a = color.a; let block = if a == 0. { " " } else if a <= 0.25 { "β–‘β–‘" } else if a <= 0.5 { "β–’β–’" } else if a <= 0.75 { "β–“β–“" } else { "β–ˆβ–ˆ" }; print!["{}", block]; } println!["β”‚"]; } print!["└──"]; for _ in 1..(self.color.width - 1) { print!["──"]; } println!["β”€β”€β”˜"]; } }
{ Color::new(self.r * val, self.g * val, self.b * val, self.a * val) }
identifier_body
cargo_common_metadata.rs
//! lint on missing cargo common metadata use clippy_utils::{diagnostics::span_lint, is_lint_allowed}; use rustc_hir::hir_id::CRATE_HIR_ID; use rustc_lint::{LateContext, LateLintPass}; use rustc_session::{declare_tool_lint, impl_lint_pass}; use rustc_span::source_map::DUMMY_SP; declare_clippy_lint! { /// ### What it does /// Checks to see if all common metadata is defined in /// `Cargo.toml`. See: https://rust-lang-nursery.github.io/api-guidelines/documentation.html#cargotoml-includes-all-common-metadata-c-metadata /// /// ### Why is this bad? /// It will be more difficult for users to discover the /// purpose of the crate, and key information related to it. /// /// ### Example /// ```toml /// # This `Cargo.toml` is missing a description field: /// [package] /// name = "clippy" /// version = "0.0.212" /// repository = "https://github.com/rust-lang/rust-clippy" /// readme = "README.md" /// license = "MIT OR Apache-2.0" /// keywords = ["clippy", "lint", "plugin"] /// categories = ["development-tools", "development-tools::cargo-plugins"] /// ``` /// /// Should include a description field like: /// /// ```toml /// # This `Cargo.toml` includes all common metadata /// [package] /// name = "clippy" /// version = "0.0.212" /// description = "A bunch of helpful lints to avoid common pitfalls in Rust" /// repository = "https://github.com/rust-lang/rust-clippy" /// readme = "README.md" /// license = "MIT OR Apache-2.0" /// keywords = ["clippy", "lint", "plugin"] /// categories = ["development-tools", "development-tools::cargo-plugins"] /// ``` pub CARGO_COMMON_METADATA, cargo, "common metadata is defined in `Cargo.toml`" } #[derive(Copy, Clone, Debug)] pub struct CargoCommonMetadata { ignore_publish: bool, } impl CargoCommonMetadata { pub fn new(ignore_publish: bool) -> Self { Self { ignore_publish } } } impl_lint_pass!(CargoCommonMetadata => [ CARGO_COMMON_METADATA ]); fn missing_warning(cx: &LateContext<'_>, package: &cargo_metadata::Package, field: &str) { let message = format!("package `{}` is missing `{}` metadata", package.name, field); span_lint(cx, CARGO_COMMON_METADATA, DUMMY_SP, &message); } fn is_empty_str<T: AsRef<std::ffi::OsStr>>(value: &Option<T>) -> bool { value.as_ref().map_or(true, |s| s.as_ref().is_empty()) } fn is_empty_vec(value: &[String]) -> bool { // This works because empty iterators return true value.iter().all(String::is_empty) } impl LateLintPass<'_> for CargoCommonMetadata { fn check_crate(&mut self, cx: &LateContext<'_>) { if is_lint_allowed(cx, CARGO_COMMON_METADATA, CRATE_HIR_ID) { return; } let metadata = unwrap_cargo_metadata!(cx, CARGO_COMMON_METADATA, false); for package in metadata.packages { // only run the lint if publish is `None` (`publish = true` or skipped entirely) // or if the vector isn't empty (`publish = ["something"]`) if package.publish.as_ref().filter(|publish| publish.is_empty()).is_none() || self.ignore_publish { if is_empty_str(&package.description) { missing_warning(cx, &package, "package.description"); } if is_empty_str(&package.license) && is_empty_str(&package.license_file) { missing_warning(cx, &package, "either package.license or package.license_file"); } if is_empty_str(&package.repository) { missing_warning(cx, &package, "package.repository"); } if is_empty_str(&package.readme) { missing_warning(cx, &package, "package.readme"); } if is_empty_vec(&package.keywords)
if is_empty_vec(&package.categories) { missing_warning(cx, &package, "package.categories"); } } } } }
{ missing_warning(cx, &package, "package.keywords"); }
conditional_block
cargo_common_metadata.rs
//! lint on missing cargo common metadata use clippy_utils::{diagnostics::span_lint, is_lint_allowed}; use rustc_hir::hir_id::CRATE_HIR_ID; use rustc_lint::{LateContext, LateLintPass}; use rustc_session::{declare_tool_lint, impl_lint_pass}; use rustc_span::source_map::DUMMY_SP; declare_clippy_lint! { /// ### What it does /// Checks to see if all common metadata is defined in /// `Cargo.toml`. See: https://rust-lang-nursery.github.io/api-guidelines/documentation.html#cargotoml-includes-all-common-metadata-c-metadata /// /// ### Why is this bad? /// It will be more difficult for users to discover the /// purpose of the crate, and key information related to it. /// /// ### Example /// ```toml /// # This `Cargo.toml` is missing a description field: /// [package] /// name = "clippy" /// version = "0.0.212" /// repository = "https://github.com/rust-lang/rust-clippy" /// readme = "README.md" /// license = "MIT OR Apache-2.0" /// keywords = ["clippy", "lint", "plugin"] /// categories = ["development-tools", "development-tools::cargo-plugins"] /// ``` /// /// Should include a description field like: /// /// ```toml /// # This `Cargo.toml` includes all common metadata /// [package] /// name = "clippy" /// version = "0.0.212" /// description = "A bunch of helpful lints to avoid common pitfalls in Rust" /// repository = "https://github.com/rust-lang/rust-clippy" /// readme = "README.md" /// license = "MIT OR Apache-2.0" /// keywords = ["clippy", "lint", "plugin"] /// categories = ["development-tools", "development-tools::cargo-plugins"] /// ``` pub CARGO_COMMON_METADATA, cargo, "common metadata is defined in `Cargo.toml`" } #[derive(Copy, Clone, Debug)] pub struct
{ ignore_publish: bool, } impl CargoCommonMetadata { pub fn new(ignore_publish: bool) -> Self { Self { ignore_publish } } } impl_lint_pass!(CargoCommonMetadata => [ CARGO_COMMON_METADATA ]); fn missing_warning(cx: &LateContext<'_>, package: &cargo_metadata::Package, field: &str) { let message = format!("package `{}` is missing `{}` metadata", package.name, field); span_lint(cx, CARGO_COMMON_METADATA, DUMMY_SP, &message); } fn is_empty_str<T: AsRef<std::ffi::OsStr>>(value: &Option<T>) -> bool { value.as_ref().map_or(true, |s| s.as_ref().is_empty()) } fn is_empty_vec(value: &[String]) -> bool { // This works because empty iterators return true value.iter().all(String::is_empty) } impl LateLintPass<'_> for CargoCommonMetadata { fn check_crate(&mut self, cx: &LateContext<'_>) { if is_lint_allowed(cx, CARGO_COMMON_METADATA, CRATE_HIR_ID) { return; } let metadata = unwrap_cargo_metadata!(cx, CARGO_COMMON_METADATA, false); for package in metadata.packages { // only run the lint if publish is `None` (`publish = true` or skipped entirely) // or if the vector isn't empty (`publish = ["something"]`) if package.publish.as_ref().filter(|publish| publish.is_empty()).is_none() || self.ignore_publish { if is_empty_str(&package.description) { missing_warning(cx, &package, "package.description"); } if is_empty_str(&package.license) && is_empty_str(&package.license_file) { missing_warning(cx, &package, "either package.license or package.license_file"); } if is_empty_str(&package.repository) { missing_warning(cx, &package, "package.repository"); } if is_empty_str(&package.readme) { missing_warning(cx, &package, "package.readme"); } if is_empty_vec(&package.keywords) { missing_warning(cx, &package, "package.keywords"); } if is_empty_vec(&package.categories) { missing_warning(cx, &package, "package.categories"); } } } } }
CargoCommonMetadata
identifier_name
cargo_common_metadata.rs
//! lint on missing cargo common metadata use clippy_utils::{diagnostics::span_lint, is_lint_allowed}; use rustc_hir::hir_id::CRATE_HIR_ID; use rustc_lint::{LateContext, LateLintPass}; use rustc_session::{declare_tool_lint, impl_lint_pass}; use rustc_span::source_map::DUMMY_SP; declare_clippy_lint! { /// ### What it does /// Checks to see if all common metadata is defined in /// `Cargo.toml`. See: https://rust-lang-nursery.github.io/api-guidelines/documentation.html#cargotoml-includes-all-common-metadata-c-metadata /// /// ### Why is this bad? /// It will be more difficult for users to discover the /// purpose of the crate, and key information related to it. /// /// ### Example /// ```toml /// # This `Cargo.toml` is missing a description field: /// [package] /// name = "clippy" /// version = "0.0.212" /// repository = "https://github.com/rust-lang/rust-clippy" /// readme = "README.md" /// license = "MIT OR Apache-2.0" /// keywords = ["clippy", "lint", "plugin"] /// categories = ["development-tools", "development-tools::cargo-plugins"] /// ``` /// /// Should include a description field like: /// /// ```toml /// # This `Cargo.toml` includes all common metadata /// [package] /// name = "clippy" /// version = "0.0.212" /// description = "A bunch of helpful lints to avoid common pitfalls in Rust" /// repository = "https://github.com/rust-lang/rust-clippy" /// readme = "README.md" /// license = "MIT OR Apache-2.0" /// keywords = ["clippy", "lint", "plugin"] /// categories = ["development-tools", "development-tools::cargo-plugins"] /// ``` pub CARGO_COMMON_METADATA, cargo, "common metadata is defined in `Cargo.toml`" } #[derive(Copy, Clone, Debug)] pub struct CargoCommonMetadata { ignore_publish: bool, } impl CargoCommonMetadata { pub fn new(ignore_publish: bool) -> Self { Self { ignore_publish } } } impl_lint_pass!(CargoCommonMetadata => [ CARGO_COMMON_METADATA ]); fn missing_warning(cx: &LateContext<'_>, package: &cargo_metadata::Package, field: &str) { let message = format!("package `{}` is missing `{}` metadata", package.name, field); span_lint(cx, CARGO_COMMON_METADATA, DUMMY_SP, &message); } fn is_empty_str<T: AsRef<std::ffi::OsStr>>(value: &Option<T>) -> bool { value.as_ref().map_or(true, |s| s.as_ref().is_empty()) } fn is_empty_vec(value: &[String]) -> bool { // This works because empty iterators return true value.iter().all(String::is_empty) } impl LateLintPass<'_> for CargoCommonMetadata { fn check_crate(&mut self, cx: &LateContext<'_>)
missing_warning(cx, &package, "package.repository"); } if is_empty_str(&package.readme) { missing_warning(cx, &package, "package.readme"); } if is_empty_vec(&package.keywords) { missing_warning(cx, &package, "package.keywords"); } if is_empty_vec(&package.categories) { missing_warning(cx, &package, "package.categories"); } } } } }
{ if is_lint_allowed(cx, CARGO_COMMON_METADATA, CRATE_HIR_ID) { return; } let metadata = unwrap_cargo_metadata!(cx, CARGO_COMMON_METADATA, false); for package in metadata.packages { // only run the lint if publish is `None` (`publish = true` or skipped entirely) // or if the vector isn't empty (`publish = ["something"]`) if package.publish.as_ref().filter(|publish| publish.is_empty()).is_none() || self.ignore_publish { if is_empty_str(&package.description) { missing_warning(cx, &package, "package.description"); } if is_empty_str(&package.license) && is_empty_str(&package.license_file) { missing_warning(cx, &package, "either package.license or package.license_file"); } if is_empty_str(&package.repository) {
identifier_body
cargo_common_metadata.rs
//! lint on missing cargo common metadata use clippy_utils::{diagnostics::span_lint, is_lint_allowed}; use rustc_hir::hir_id::CRATE_HIR_ID; use rustc_lint::{LateContext, LateLintPass}; use rustc_session::{declare_tool_lint, impl_lint_pass}; use rustc_span::source_map::DUMMY_SP;
declare_clippy_lint! { /// ### What it does /// Checks to see if all common metadata is defined in /// `Cargo.toml`. See: https://rust-lang-nursery.github.io/api-guidelines/documentation.html#cargotoml-includes-all-common-metadata-c-metadata /// /// ### Why is this bad? /// It will be more difficult for users to discover the /// purpose of the crate, and key information related to it. /// /// ### Example /// ```toml /// # This `Cargo.toml` is missing a description field: /// [package] /// name = "clippy" /// version = "0.0.212" /// repository = "https://github.com/rust-lang/rust-clippy" /// readme = "README.md" /// license = "MIT OR Apache-2.0" /// keywords = ["clippy", "lint", "plugin"] /// categories = ["development-tools", "development-tools::cargo-plugins"] /// ``` /// /// Should include a description field like: /// /// ```toml /// # This `Cargo.toml` includes all common metadata /// [package] /// name = "clippy" /// version = "0.0.212" /// description = "A bunch of helpful lints to avoid common pitfalls in Rust" /// repository = "https://github.com/rust-lang/rust-clippy" /// readme = "README.md" /// license = "MIT OR Apache-2.0" /// keywords = ["clippy", "lint", "plugin"] /// categories = ["development-tools", "development-tools::cargo-plugins"] /// ``` pub CARGO_COMMON_METADATA, cargo, "common metadata is defined in `Cargo.toml`" } #[derive(Copy, Clone, Debug)] pub struct CargoCommonMetadata { ignore_publish: bool, } impl CargoCommonMetadata { pub fn new(ignore_publish: bool) -> Self { Self { ignore_publish } } } impl_lint_pass!(CargoCommonMetadata => [ CARGO_COMMON_METADATA ]); fn missing_warning(cx: &LateContext<'_>, package: &cargo_metadata::Package, field: &str) { let message = format!("package `{}` is missing `{}` metadata", package.name, field); span_lint(cx, CARGO_COMMON_METADATA, DUMMY_SP, &message); } fn is_empty_str<T: AsRef<std::ffi::OsStr>>(value: &Option<T>) -> bool { value.as_ref().map_or(true, |s| s.as_ref().is_empty()) } fn is_empty_vec(value: &[String]) -> bool { // This works because empty iterators return true value.iter().all(String::is_empty) } impl LateLintPass<'_> for CargoCommonMetadata { fn check_crate(&mut self, cx: &LateContext<'_>) { if is_lint_allowed(cx, CARGO_COMMON_METADATA, CRATE_HIR_ID) { return; } let metadata = unwrap_cargo_metadata!(cx, CARGO_COMMON_METADATA, false); for package in metadata.packages { // only run the lint if publish is `None` (`publish = true` or skipped entirely) // or if the vector isn't empty (`publish = ["something"]`) if package.publish.as_ref().filter(|publish| publish.is_empty()).is_none() || self.ignore_publish { if is_empty_str(&package.description) { missing_warning(cx, &package, "package.description"); } if is_empty_str(&package.license) && is_empty_str(&package.license_file) { missing_warning(cx, &package, "either package.license or package.license_file"); } if is_empty_str(&package.repository) { missing_warning(cx, &package, "package.repository"); } if is_empty_str(&package.readme) { missing_warning(cx, &package, "package.readme"); } if is_empty_vec(&package.keywords) { missing_warning(cx, &package, "package.keywords"); } if is_empty_vec(&package.categories) { missing_warning(cx, &package, "package.categories"); } } } } }
random_line_split