file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
handler.rs
|
use Reset;
use epoll::*;
use error::*;
|
#[derive(Debug)]
pub struct SyncMux<'m, H, P, R> {
epfd: EpollFd,
handlers: Slab<H, usize>,
resources: Vec<R>,
factory: P,
interests: EpollEventKind,
_marker: ::std::marker::PhantomData<&'m ()>,
}
impl<'m, H, P, R> SyncMux<'m, H, P, R>
where H: Handler<MuxEvent<'m, R>, MuxCmd> + EpollHandler,
P: HandlerFactory<'m, H, R> +'m,
R: Clone +'m,
{
pub fn new(max_handlers: usize, epfd: EpollFd, factory: P) -> SyncMux<'m, H, P, R> {
SyncMux {
epfd: epfd,
handlers: Slab::with_capacity(max_handlers),
resources: vec!(factory.new_resource(); max_handlers),
factory: factory,
interests: H::interests(),
_marker: ::std::marker::PhantomData {},
}
}
}
macro_rules! some {
($cmd:expr) => {{
match $cmd {
None => {
return;
},
Some(res) => res,
}
}}
}
impl<'m, H, P, R> Handler<EpollEvent, EpollCmd> for SyncMux<'m, H, P, R>
where H: Handler<MuxEvent<'m, R>, MuxCmd> + EpollHandler,
P: HandlerFactory<'m, H, R> +'m,
R: Reset + Clone +'m,
{
#[inline(always)]
fn next(&mut self) -> EpollCmd {
EpollCmd::Poll
}
fn on_next(&mut self, event: EpollEvent) {
match Action::decode(event.data) {
Action::Notify(i, clifd) => {
// ignore outstanding event from removed handler
let mut entry = some!(self.handlers.entry(i));
let resource = unsafe { &mut *(&mut self.resources[i] as *mut R) };
entry.get_mut().on_next(MuxEvent {
resource: resource,
events: event.events,
fd: clifd,
});
keep_or!(entry.get_mut().next(), {
self.resources[i].reset();
entry.remove();
if let Err(e) = self.epfd.unregister(clifd) {
report_err!(e.into());
}
if let Err(e) = syscall!(::unistd::close(clifd)) {
report_err!(e.into());
}
return;
});
}
Action::New(data) => {
let srvfd = data as i32;
// do not accept unless we have a vacant entry
// TODO grow slab, deprecate max_conn in favour of reserve slots
// or Mux::reserve to pre-allocate and then grow as it needs more
let entry = some!(self.handlers.vacant_entry());
match syscall!(accept(srvfd)) {
Ok(Some(clifd)) => {
debug!("accept: accepted new tcp client {}", &clifd);
let i = entry.index();
let h = self.factory.new_handler(self.epfd, clifd);
let event = EpollEvent {
events: self.interests,
data: Action::encode(Action::Notify(i, clifd)),
};
self.epfd.register(clifd, &event).unwrap();
entry.insert(h);
}
Ok(None) => debug!("accept4: socket not ready"),
Err(e) => report_err!(e.into()),
}
}
};
}
}
impl<'m, H, P, R> EpollHandler for SyncMux<'m, H, P, R> {
// TODO: check that linux >= 4.5 for EPOLLEXCLUSIVE
fn interests() -> EpollEventKind {
EPOLLIN | EPOLLEXCLUSIVE
}
fn with_epfd(&mut self, epfd: EpollFd) {
self.epfd = epfd;
}
}
impl<'m, H, P, R> Clone for SyncMux<'m, H, P, R>
where H: Handler<MuxEvent<'m, R>, MuxCmd> + EpollHandler,
P: HandlerFactory<'m, H, R> + Clone +'m,
R: Clone +'m,
{
fn clone(&self) -> Self {
SyncMux {
epfd: self.epfd,
handlers: Slab::with_capacity(self.handlers.capacity()),
resources: vec!(self.factory.new_resource(); self.handlers.capacity()),
factory: self.factory.clone(),
interests: self.interests,
_marker: ::std::marker::PhantomData {},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn should_grow_slab() {
// TODO assert!(false);
}
}
|
use handler::*;
use nix::sys::socket::*;
use slab::Slab;
use super::*;
use super::action::*;
|
random_line_split
|
handler.rs
|
use Reset;
use epoll::*;
use error::*;
use handler::*;
use nix::sys::socket::*;
use slab::Slab;
use super::*;
use super::action::*;
#[derive(Debug)]
pub struct SyncMux<'m, H, P, R> {
epfd: EpollFd,
handlers: Slab<H, usize>,
resources: Vec<R>,
factory: P,
interests: EpollEventKind,
_marker: ::std::marker::PhantomData<&'m ()>,
}
impl<'m, H, P, R> SyncMux<'m, H, P, R>
where H: Handler<MuxEvent<'m, R>, MuxCmd> + EpollHandler,
P: HandlerFactory<'m, H, R> +'m,
R: Clone +'m,
{
pub fn new(max_handlers: usize, epfd: EpollFd, factory: P) -> SyncMux<'m, H, P, R> {
SyncMux {
epfd: epfd,
handlers: Slab::with_capacity(max_handlers),
resources: vec!(factory.new_resource(); max_handlers),
factory: factory,
interests: H::interests(),
_marker: ::std::marker::PhantomData {},
}
}
}
macro_rules! some {
($cmd:expr) => {{
match $cmd {
None => {
return;
},
Some(res) => res,
}
}}
}
impl<'m, H, P, R> Handler<EpollEvent, EpollCmd> for SyncMux<'m, H, P, R>
where H: Handler<MuxEvent<'m, R>, MuxCmd> + EpollHandler,
P: HandlerFactory<'m, H, R> +'m,
R: Reset + Clone +'m,
{
#[inline(always)]
fn next(&mut self) -> EpollCmd {
EpollCmd::Poll
}
fn on_next(&mut self, event: EpollEvent) {
match Action::decode(event.data) {
Action::Notify(i, clifd) => {
// ignore outstanding event from removed handler
let mut entry = some!(self.handlers.entry(i));
let resource = unsafe { &mut *(&mut self.resources[i] as *mut R) };
entry.get_mut().on_next(MuxEvent {
resource: resource,
events: event.events,
fd: clifd,
});
keep_or!(entry.get_mut().next(), {
self.resources[i].reset();
entry.remove();
if let Err(e) = self.epfd.unregister(clifd) {
report_err!(e.into());
}
if let Err(e) = syscall!(::unistd::close(clifd)) {
report_err!(e.into());
}
return;
});
}
Action::New(data) => {
let srvfd = data as i32;
// do not accept unless we have a vacant entry
// TODO grow slab, deprecate max_conn in favour of reserve slots
// or Mux::reserve to pre-allocate and then grow as it needs more
let entry = some!(self.handlers.vacant_entry());
match syscall!(accept(srvfd)) {
Ok(Some(clifd)) => {
debug!("accept: accepted new tcp client {}", &clifd);
let i = entry.index();
let h = self.factory.new_handler(self.epfd, clifd);
let event = EpollEvent {
events: self.interests,
data: Action::encode(Action::Notify(i, clifd)),
};
self.epfd.register(clifd, &event).unwrap();
entry.insert(h);
}
Ok(None) => debug!("accept4: socket not ready"),
Err(e) => report_err!(e.into()),
}
}
};
}
}
impl<'m, H, P, R> EpollHandler for SyncMux<'m, H, P, R> {
// TODO: check that linux >= 4.5 for EPOLLEXCLUSIVE
fn interests() -> EpollEventKind {
EPOLLIN | EPOLLEXCLUSIVE
}
fn with_epfd(&mut self, epfd: EpollFd) {
self.epfd = epfd;
}
}
impl<'m, H, P, R> Clone for SyncMux<'m, H, P, R>
where H: Handler<MuxEvent<'m, R>, MuxCmd> + EpollHandler,
P: HandlerFactory<'m, H, R> + Clone +'m,
R: Clone +'m,
{
fn
|
(&self) -> Self {
SyncMux {
epfd: self.epfd,
handlers: Slab::with_capacity(self.handlers.capacity()),
resources: vec!(self.factory.new_resource(); self.handlers.capacity()),
factory: self.factory.clone(),
interests: self.interests,
_marker: ::std::marker::PhantomData {},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn should_grow_slab() {
// TODO assert!(false);
}
}
|
clone
|
identifier_name
|
handler.rs
|
use Reset;
use epoll::*;
use error::*;
use handler::*;
use nix::sys::socket::*;
use slab::Slab;
use super::*;
use super::action::*;
#[derive(Debug)]
pub struct SyncMux<'m, H, P, R> {
epfd: EpollFd,
handlers: Slab<H, usize>,
resources: Vec<R>,
factory: P,
interests: EpollEventKind,
_marker: ::std::marker::PhantomData<&'m ()>,
}
impl<'m, H, P, R> SyncMux<'m, H, P, R>
where H: Handler<MuxEvent<'m, R>, MuxCmd> + EpollHandler,
P: HandlerFactory<'m, H, R> +'m,
R: Clone +'m,
{
pub fn new(max_handlers: usize, epfd: EpollFd, factory: P) -> SyncMux<'m, H, P, R>
|
}
macro_rules! some {
($cmd:expr) => {{
match $cmd {
None => {
return;
},
Some(res) => res,
}
}}
}
impl<'m, H, P, R> Handler<EpollEvent, EpollCmd> for SyncMux<'m, H, P, R>
where H: Handler<MuxEvent<'m, R>, MuxCmd> + EpollHandler,
P: HandlerFactory<'m, H, R> +'m,
R: Reset + Clone +'m,
{
#[inline(always)]
fn next(&mut self) -> EpollCmd {
EpollCmd::Poll
}
fn on_next(&mut self, event: EpollEvent) {
match Action::decode(event.data) {
Action::Notify(i, clifd) => {
// ignore outstanding event from removed handler
let mut entry = some!(self.handlers.entry(i));
let resource = unsafe { &mut *(&mut self.resources[i] as *mut R) };
entry.get_mut().on_next(MuxEvent {
resource: resource,
events: event.events,
fd: clifd,
});
keep_or!(entry.get_mut().next(), {
self.resources[i].reset();
entry.remove();
if let Err(e) = self.epfd.unregister(clifd) {
report_err!(e.into());
}
if let Err(e) = syscall!(::unistd::close(clifd)) {
report_err!(e.into());
}
return;
});
}
Action::New(data) => {
let srvfd = data as i32;
// do not accept unless we have a vacant entry
// TODO grow slab, deprecate max_conn in favour of reserve slots
// or Mux::reserve to pre-allocate and then grow as it needs more
let entry = some!(self.handlers.vacant_entry());
match syscall!(accept(srvfd)) {
Ok(Some(clifd)) => {
debug!("accept: accepted new tcp client {}", &clifd);
let i = entry.index();
let h = self.factory.new_handler(self.epfd, clifd);
let event = EpollEvent {
events: self.interests,
data: Action::encode(Action::Notify(i, clifd)),
};
self.epfd.register(clifd, &event).unwrap();
entry.insert(h);
}
Ok(None) => debug!("accept4: socket not ready"),
Err(e) => report_err!(e.into()),
}
}
};
}
}
impl<'m, H, P, R> EpollHandler for SyncMux<'m, H, P, R> {
// TODO: check that linux >= 4.5 for EPOLLEXCLUSIVE
fn interests() -> EpollEventKind {
EPOLLIN | EPOLLEXCLUSIVE
}
fn with_epfd(&mut self, epfd: EpollFd) {
self.epfd = epfd;
}
}
impl<'m, H, P, R> Clone for SyncMux<'m, H, P, R>
where H: Handler<MuxEvent<'m, R>, MuxCmd> + EpollHandler,
P: HandlerFactory<'m, H, R> + Clone +'m,
R: Clone +'m,
{
fn clone(&self) -> Self {
SyncMux {
epfd: self.epfd,
handlers: Slab::with_capacity(self.handlers.capacity()),
resources: vec!(self.factory.new_resource(); self.handlers.capacity()),
factory: self.factory.clone(),
interests: self.interests,
_marker: ::std::marker::PhantomData {},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn should_grow_slab() {
// TODO assert!(false);
}
}
|
{
SyncMux {
epfd: epfd,
handlers: Slab::with_capacity(max_handlers),
resources: vec!(factory.new_resource(); max_handlers),
factory: factory,
interests: H::interests(),
_marker: ::std::marker::PhantomData {},
}
}
|
identifier_body
|
lib.rs
|
extern crate coinaddress;
use coinaddress::{validate_base58_hash,validate_btc_address,validate_ltc_address,ValidationError};
#[test]
fn test_validate_hash() {
assert_eq!(validate_base58_hash("17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem"), Ok(0));
assert_eq!(validate_base58_hash("mipcBbFg9gMiCh81Kj8tqqdgoZub1ZJRfn"), Ok(111));
assert_eq!(validate_base58_hash("17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYheX"), Err(ValidationError::HashMismatch));
assert_eq!(validate_base58_hash("17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYh "), Err(ValidationError::InvalidEncoding));
assert_eq!(validate_base58_hash("1"), Err(ValidationError::HashMismatch));
assert_eq!(validate_base58_hash(""), Err(ValidationError::TooShort));
}
#[test]
fn test_validate_btc_address() {
assert_eq!(validate_btc_address("17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem"), Ok(0));
assert_eq!(validate_btc_address("3EktnHQD7RiAE6uzMj2ZifT9YgRrkSgzQX"), Ok(5));
assert_eq!(validate_btc_address("mipcBbFg9gMiCh81Kj8tqqdgoZub1ZJRfn"), Ok(111));
assert_eq!(validate_btc_address("LRELGDJyeCPRDXz4Dh1kWorMN9hTBB7CEz"), Err(ValidationError::NotBitcoin));
}
#[test]
fn test_validate_ltc_address()
|
{
assert_eq!(validate_ltc_address("LRELGDJyeCPRDXz4Dh1kWorMN9hTBB7CEz"), Ok(48));
assert_eq!(validate_ltc_address("muen9zszN6rVwXaFw48xh6YkdUSjJcfzek"), Ok(111));
assert_eq!(validate_ltc_address("17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem"), Err(ValidationError::NotLitecoin));
}
|
identifier_body
|
|
lib.rs
|
extern crate coinaddress;
use coinaddress::{validate_base58_hash,validate_btc_address,validate_ltc_address,ValidationError};
#[test]
fn
|
() {
assert_eq!(validate_base58_hash("17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem"), Ok(0));
assert_eq!(validate_base58_hash("mipcBbFg9gMiCh81Kj8tqqdgoZub1ZJRfn"), Ok(111));
assert_eq!(validate_base58_hash("17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYheX"), Err(ValidationError::HashMismatch));
assert_eq!(validate_base58_hash("17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYh "), Err(ValidationError::InvalidEncoding));
assert_eq!(validate_base58_hash("1"), Err(ValidationError::HashMismatch));
assert_eq!(validate_base58_hash(""), Err(ValidationError::TooShort));
}
#[test]
fn test_validate_btc_address() {
assert_eq!(validate_btc_address("17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem"), Ok(0));
assert_eq!(validate_btc_address("3EktnHQD7RiAE6uzMj2ZifT9YgRrkSgzQX"), Ok(5));
assert_eq!(validate_btc_address("mipcBbFg9gMiCh81Kj8tqqdgoZub1ZJRfn"), Ok(111));
assert_eq!(validate_btc_address("LRELGDJyeCPRDXz4Dh1kWorMN9hTBB7CEz"), Err(ValidationError::NotBitcoin));
}
#[test]
fn test_validate_ltc_address() {
assert_eq!(validate_ltc_address("LRELGDJyeCPRDXz4Dh1kWorMN9hTBB7CEz"), Ok(48));
assert_eq!(validate_ltc_address("muen9zszN6rVwXaFw48xh6YkdUSjJcfzek"), Ok(111));
assert_eq!(validate_ltc_address("17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem"), Err(ValidationError::NotLitecoin));
}
|
test_validate_hash
|
identifier_name
|
lib.rs
|
extern crate coinaddress;
use coinaddress::{validate_base58_hash,validate_btc_address,validate_ltc_address,ValidationError};
#[test]
fn test_validate_hash() {
assert_eq!(validate_base58_hash("17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem"), Ok(0));
assert_eq!(validate_base58_hash("mipcBbFg9gMiCh81Kj8tqqdgoZub1ZJRfn"), Ok(111));
assert_eq!(validate_base58_hash("17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYheX"), Err(ValidationError::HashMismatch));
assert_eq!(validate_base58_hash("17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYh "), Err(ValidationError::InvalidEncoding));
assert_eq!(validate_base58_hash("1"), Err(ValidationError::HashMismatch));
assert_eq!(validate_base58_hash(""), Err(ValidationError::TooShort));
}
|
assert_eq!(validate_btc_address("mipcBbFg9gMiCh81Kj8tqqdgoZub1ZJRfn"), Ok(111));
assert_eq!(validate_btc_address("LRELGDJyeCPRDXz4Dh1kWorMN9hTBB7CEz"), Err(ValidationError::NotBitcoin));
}
#[test]
fn test_validate_ltc_address() {
assert_eq!(validate_ltc_address("LRELGDJyeCPRDXz4Dh1kWorMN9hTBB7CEz"), Ok(48));
assert_eq!(validate_ltc_address("muen9zszN6rVwXaFw48xh6YkdUSjJcfzek"), Ok(111));
assert_eq!(validate_ltc_address("17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem"), Err(ValidationError::NotLitecoin));
}
|
#[test]
fn test_validate_btc_address() {
assert_eq!(validate_btc_address("17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem"), Ok(0));
assert_eq!(validate_btc_address("3EktnHQD7RiAE6uzMj2ZifT9YgRrkSgzQX"), Ok(5));
|
random_line_split
|
document_state.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! An invalidation processor for style changes due to document state changes.
use dom::TElement;
use element_state::DocumentState;
use invalidation::element::invalidator::{DescendantInvalidationLists, InvalidationVector};
use invalidation::element::invalidator::{Invalidation, InvalidationProcessor};
use invalidation::element::state_and_attributes;
use selectors::matching::{MatchingContext, MatchingMode, QuirksMode, VisitedHandlingMode};
use stylist::CascadeData;
/// A struct holding the members necessary to invalidate document state
/// selectors.
pub struct InvalidationMatchingData {
/// The document state that has changed, which makes it always match.
pub document_state: DocumentState,
}
impl Default for InvalidationMatchingData {
#[inline(always)]
fn default() -> Self {
Self {
document_state: DocumentState::empty(),
}
}
}
/// An invalidation processor for style changes due to state and attribute
/// changes.
pub struct DocumentStateInvalidationProcessor<'a, E: TElement, I> {
// TODO(emilio): We might want to just run everything for every possible
// binding along with the document data, or just apply the XBL stuff to the
// bound subtrees.
rules: I,
matching_context: MatchingContext<'a, E::Impl>,
document_states_changed: DocumentState,
}
impl<'a, E: TElement, I> DocumentStateInvalidationProcessor<'a, E, I> {
/// Creates a new DocumentStateInvalidationProcessor.
#[inline]
pub fn new(
rules: I,
document_states_changed: DocumentState,
quirks_mode: QuirksMode,
) -> Self {
let mut matching_context = MatchingContext::new_for_visited(
MatchingMode::Normal,
None,
None,
VisitedHandlingMode::AllLinksVisitedAndUnvisited,
quirks_mode,
);
matching_context.extra_data = InvalidationMatchingData {
document_state: document_states_changed,
};
Self { rules, document_states_changed, matching_context }
}
}
impl<'a, E, I> InvalidationProcessor<'a, E> for DocumentStateInvalidationProcessor<'a, E, I>
where
E: TElement,
I: Iterator<Item = &'a CascadeData>,
{
fn
|
(
&mut self,
_element: E,
self_invalidations: &mut InvalidationVector<'a>,
_descendant_invalidations: &mut DescendantInvalidationLists<'a>,
_sibling_invalidations: &mut InvalidationVector<'a>,
) -> bool {
for cascade_data in &mut self.rules {
let map = cascade_data.invalidation_map();
for dependency in &map.document_state_selectors {
if!dependency.state.intersects(self.document_states_changed) {
continue;
}
self_invalidations.push(Invalidation::new(&dependency.selector, 0));
}
}
false
}
fn matching_context(&mut self) -> &mut MatchingContext<'a, E::Impl> {
&mut self.matching_context
}
fn recursion_limit_exceeded(&mut self, _: E) {
unreachable!("We don't run document state invalidation with stack limits")
}
fn should_process_descendants(&mut self, element: E) -> bool {
match element.borrow_data() {
Some(d) => state_and_attributes::should_process_descendants(&d),
None => false,
}
}
fn invalidated_descendants(&mut self, element: E, child: E) {
state_and_attributes::invalidated_descendants(element, child)
}
fn invalidated_self(&mut self, element: E) {
state_and_attributes::invalidated_self(element);
}
}
|
collect_invalidations
|
identifier_name
|
document_state.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! An invalidation processor for style changes due to document state changes.
use dom::TElement;
use element_state::DocumentState;
use invalidation::element::invalidator::{DescendantInvalidationLists, InvalidationVector};
use invalidation::element::invalidator::{Invalidation, InvalidationProcessor};
use invalidation::element::state_and_attributes;
use selectors::matching::{MatchingContext, MatchingMode, QuirksMode, VisitedHandlingMode};
use stylist::CascadeData;
/// A struct holding the members necessary to invalidate document state
/// selectors.
pub struct InvalidationMatchingData {
/// The document state that has changed, which makes it always match.
pub document_state: DocumentState,
}
impl Default for InvalidationMatchingData {
#[inline(always)]
fn default() -> Self {
Self {
document_state: DocumentState::empty(),
}
|
}
/// An invalidation processor for style changes due to state and attribute
/// changes.
pub struct DocumentStateInvalidationProcessor<'a, E: TElement, I> {
// TODO(emilio): We might want to just run everything for every possible
// binding along with the document data, or just apply the XBL stuff to the
// bound subtrees.
rules: I,
matching_context: MatchingContext<'a, E::Impl>,
document_states_changed: DocumentState,
}
impl<'a, E: TElement, I> DocumentStateInvalidationProcessor<'a, E, I> {
/// Creates a new DocumentStateInvalidationProcessor.
#[inline]
pub fn new(
rules: I,
document_states_changed: DocumentState,
quirks_mode: QuirksMode,
) -> Self {
let mut matching_context = MatchingContext::new_for_visited(
MatchingMode::Normal,
None,
None,
VisitedHandlingMode::AllLinksVisitedAndUnvisited,
quirks_mode,
);
matching_context.extra_data = InvalidationMatchingData {
document_state: document_states_changed,
};
Self { rules, document_states_changed, matching_context }
}
}
impl<'a, E, I> InvalidationProcessor<'a, E> for DocumentStateInvalidationProcessor<'a, E, I>
where
E: TElement,
I: Iterator<Item = &'a CascadeData>,
{
fn collect_invalidations(
&mut self,
_element: E,
self_invalidations: &mut InvalidationVector<'a>,
_descendant_invalidations: &mut DescendantInvalidationLists<'a>,
_sibling_invalidations: &mut InvalidationVector<'a>,
) -> bool {
for cascade_data in &mut self.rules {
let map = cascade_data.invalidation_map();
for dependency in &map.document_state_selectors {
if!dependency.state.intersects(self.document_states_changed) {
continue;
}
self_invalidations.push(Invalidation::new(&dependency.selector, 0));
}
}
false
}
fn matching_context(&mut self) -> &mut MatchingContext<'a, E::Impl> {
&mut self.matching_context
}
fn recursion_limit_exceeded(&mut self, _: E) {
unreachable!("We don't run document state invalidation with stack limits")
}
fn should_process_descendants(&mut self, element: E) -> bool {
match element.borrow_data() {
Some(d) => state_and_attributes::should_process_descendants(&d),
None => false,
}
}
fn invalidated_descendants(&mut self, element: E, child: E) {
state_and_attributes::invalidated_descendants(element, child)
}
fn invalidated_self(&mut self, element: E) {
state_and_attributes::invalidated_self(element);
}
}
|
}
|
random_line_split
|
document_state.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! An invalidation processor for style changes due to document state changes.
use dom::TElement;
use element_state::DocumentState;
use invalidation::element::invalidator::{DescendantInvalidationLists, InvalidationVector};
use invalidation::element::invalidator::{Invalidation, InvalidationProcessor};
use invalidation::element::state_and_attributes;
use selectors::matching::{MatchingContext, MatchingMode, QuirksMode, VisitedHandlingMode};
use stylist::CascadeData;
/// A struct holding the members necessary to invalidate document state
/// selectors.
pub struct InvalidationMatchingData {
/// The document state that has changed, which makes it always match.
pub document_state: DocumentState,
}
impl Default for InvalidationMatchingData {
#[inline(always)]
fn default() -> Self
|
}
/// An invalidation processor for style changes due to state and attribute
/// changes.
pub struct DocumentStateInvalidationProcessor<'a, E: TElement, I> {
// TODO(emilio): We might want to just run everything for every possible
// binding along with the document data, or just apply the XBL stuff to the
// bound subtrees.
rules: I,
matching_context: MatchingContext<'a, E::Impl>,
document_states_changed: DocumentState,
}
impl<'a, E: TElement, I> DocumentStateInvalidationProcessor<'a, E, I> {
/// Creates a new DocumentStateInvalidationProcessor.
#[inline]
pub fn new(
rules: I,
document_states_changed: DocumentState,
quirks_mode: QuirksMode,
) -> Self {
let mut matching_context = MatchingContext::new_for_visited(
MatchingMode::Normal,
None,
None,
VisitedHandlingMode::AllLinksVisitedAndUnvisited,
quirks_mode,
);
matching_context.extra_data = InvalidationMatchingData {
document_state: document_states_changed,
};
Self { rules, document_states_changed, matching_context }
}
}
impl<'a, E, I> InvalidationProcessor<'a, E> for DocumentStateInvalidationProcessor<'a, E, I>
where
E: TElement,
I: Iterator<Item = &'a CascadeData>,
{
fn collect_invalidations(
&mut self,
_element: E,
self_invalidations: &mut InvalidationVector<'a>,
_descendant_invalidations: &mut DescendantInvalidationLists<'a>,
_sibling_invalidations: &mut InvalidationVector<'a>,
) -> bool {
for cascade_data in &mut self.rules {
let map = cascade_data.invalidation_map();
for dependency in &map.document_state_selectors {
if!dependency.state.intersects(self.document_states_changed) {
continue;
}
self_invalidations.push(Invalidation::new(&dependency.selector, 0));
}
}
false
}
fn matching_context(&mut self) -> &mut MatchingContext<'a, E::Impl> {
&mut self.matching_context
}
fn recursion_limit_exceeded(&mut self, _: E) {
unreachable!("We don't run document state invalidation with stack limits")
}
fn should_process_descendants(&mut self, element: E) -> bool {
match element.borrow_data() {
Some(d) => state_and_attributes::should_process_descendants(&d),
None => false,
}
}
fn invalidated_descendants(&mut self, element: E, child: E) {
state_and_attributes::invalidated_descendants(element, child)
}
fn invalidated_self(&mut self, element: E) {
state_and_attributes::invalidated_self(element);
}
}
|
{
Self {
document_state: DocumentState::empty(),
}
}
|
identifier_body
|
map-types.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate collections;
use collections::HashMap;
// Test that trait types printed in error msgs include the type arguments.
fn main() {
let x: Box<HashMap<int, int>> = box HashMap::new();
let x: Box<Map<int, int>> = x;
let y: Box<Map<uint, int>> = box x;
//~^ ERROR failed to find an implementation of trait core::container::Map<uint,int>
// for ~core::container::Map<int,int>:Send
|
}
|
random_line_split
|
|
map-types.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate collections;
use collections::HashMap;
// Test that trait types printed in error msgs include the type arguments.
fn main()
|
{
let x: Box<HashMap<int, int>> = box HashMap::new();
let x: Box<Map<int, int>> = x;
let y: Box<Map<uint, int>> = box x;
//~^ ERROR failed to find an implementation of trait core::container::Map<uint,int>
// for ~core::container::Map<int,int>:Send
}
|
identifier_body
|
|
map-types.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern crate collections;
use collections::HashMap;
// Test that trait types printed in error msgs include the type arguments.
fn
|
() {
let x: Box<HashMap<int, int>> = box HashMap::new();
let x: Box<Map<int, int>> = x;
let y: Box<Map<uint, int>> = box x;
//~^ ERROR failed to find an implementation of trait core::container::Map<uint,int>
// for ~core::container::Map<int,int>:Send
}
|
main
|
identifier_name
|
windows_base.rs
|
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::TargetOptions;
use std::default::Default;
pub fn opts() -> TargetOptions {
TargetOptions {
// FIXME(#13846) this should be enabled for windows
function_sections: false,
linker: "gcc".to_string(),
dynamic_linking: true,
executables: true,
dll_prefix: "".to_string(),
dll_suffix: ".dll".to_string(),
exe_suffix: ".exe".to_string(),
staticlib_prefix: "".to_string(),
staticlib_suffix: ".lib".to_string(),
morestack: false,
is_like_windows: true,
archive_format: "gnu".to_string(),
pre_link_args: vec!(
// And here, we see obscure linker flags #45. On windows, it has been
// found to be necessary to have this flag to compile liblibc.
//
// First a bit of background. On Windows, the file format is not ELF,
// but COFF (at least according to LLVM). COFF doesn't officially allow
// for section names over 8 characters, apparently. Our metadata
// section, ".note.rustc", you'll note is over 8 characters.
//
// On more recent versions of gcc on mingw, apparently the section name
// is *not* truncated, but rather stored elsewhere in a separate lookup
// table. On older versions of gcc, they apparently always truncated th
// section names (at least in some cases). Truncating the section name
// actually creates "invalid" objects [1] [2], but only for some
// introspection tools, not in terms of whether it can be loaded.
//
// Long story short, passing this flag forces the linker to *not*
// truncate section names (so we can find the metadata section after
// it's compiled). The real kicker is that rust compiled just fine on
// windows for quite a long time *without* this flag, so I have no idea
// why it suddenly started failing for liblibc. Regardless, we
// definitely don't want section name truncation, so we're keeping this
// flag for windows.
//
// [1] - https://sourceware.org/bugzilla/show_bug.cgi?id=13130
// [2] - https://code.google.com/p/go/issues/detail?id=2139
"-Wl,--enable-long-section-names".to_string(),
// Tell GCC to avoid linker plugins, because we are not bundling
// them with Windows installer, and Rust does its own LTO anyways.
"-fno-use-linker-plugin".to_string(),
// Always enable DEP (NX bit) when it is available
"-Wl,--nxcompat".to_string(),
),
.. Default::default()
}
}
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
random_line_split
|
|
windows_base.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::TargetOptions;
use std::default::Default;
pub fn
|
() -> TargetOptions {
TargetOptions {
// FIXME(#13846) this should be enabled for windows
function_sections: false,
linker: "gcc".to_string(),
dynamic_linking: true,
executables: true,
dll_prefix: "".to_string(),
dll_suffix: ".dll".to_string(),
exe_suffix: ".exe".to_string(),
staticlib_prefix: "".to_string(),
staticlib_suffix: ".lib".to_string(),
morestack: false,
is_like_windows: true,
archive_format: "gnu".to_string(),
pre_link_args: vec!(
// And here, we see obscure linker flags #45. On windows, it has been
// found to be necessary to have this flag to compile liblibc.
//
// First a bit of background. On Windows, the file format is not ELF,
// but COFF (at least according to LLVM). COFF doesn't officially allow
// for section names over 8 characters, apparently. Our metadata
// section, ".note.rustc", you'll note is over 8 characters.
//
// On more recent versions of gcc on mingw, apparently the section name
// is *not* truncated, but rather stored elsewhere in a separate lookup
// table. On older versions of gcc, they apparently always truncated th
// section names (at least in some cases). Truncating the section name
// actually creates "invalid" objects [1] [2], but only for some
// introspection tools, not in terms of whether it can be loaded.
//
// Long story short, passing this flag forces the linker to *not*
// truncate section names (so we can find the metadata section after
// it's compiled). The real kicker is that rust compiled just fine on
// windows for quite a long time *without* this flag, so I have no idea
// why it suddenly started failing for liblibc. Regardless, we
// definitely don't want section name truncation, so we're keeping this
// flag for windows.
//
// [1] - https://sourceware.org/bugzilla/show_bug.cgi?id=13130
// [2] - https://code.google.com/p/go/issues/detail?id=2139
"-Wl,--enable-long-section-names".to_string(),
// Tell GCC to avoid linker plugins, because we are not bundling
// them with Windows installer, and Rust does its own LTO anyways.
"-fno-use-linker-plugin".to_string(),
// Always enable DEP (NX bit) when it is available
"-Wl,--nxcompat".to_string(),
),
.. Default::default()
}
}
|
opts
|
identifier_name
|
windows_base.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::TargetOptions;
use std::default::Default;
pub fn opts() -> TargetOptions
|
// but COFF (at least according to LLVM). COFF doesn't officially allow
// for section names over 8 characters, apparently. Our metadata
// section, ".note.rustc", you'll note is over 8 characters.
//
// On more recent versions of gcc on mingw, apparently the section name
// is *not* truncated, but rather stored elsewhere in a separate lookup
// table. On older versions of gcc, they apparently always truncated th
// section names (at least in some cases). Truncating the section name
// actually creates "invalid" objects [1] [2], but only for some
// introspection tools, not in terms of whether it can be loaded.
//
// Long story short, passing this flag forces the linker to *not*
// truncate section names (so we can find the metadata section after
// it's compiled). The real kicker is that rust compiled just fine on
// windows for quite a long time *without* this flag, so I have no idea
// why it suddenly started failing for liblibc. Regardless, we
// definitely don't want section name truncation, so we're keeping this
// flag for windows.
//
// [1] - https://sourceware.org/bugzilla/show_bug.cgi?id=13130
// [2] - https://code.google.com/p/go/issues/detail?id=2139
"-Wl,--enable-long-section-names".to_string(),
// Tell GCC to avoid linker plugins, because we are not bundling
// them with Windows installer, and Rust does its own LTO anyways.
"-fno-use-linker-plugin".to_string(),
// Always enable DEP (NX bit) when it is available
"-Wl,--nxcompat".to_string(),
),
.. Default::default()
}
}
|
{
TargetOptions {
// FIXME(#13846) this should be enabled for windows
function_sections: false,
linker: "gcc".to_string(),
dynamic_linking: true,
executables: true,
dll_prefix: "".to_string(),
dll_suffix: ".dll".to_string(),
exe_suffix: ".exe".to_string(),
staticlib_prefix: "".to_string(),
staticlib_suffix: ".lib".to_string(),
morestack: false,
is_like_windows: true,
archive_format: "gnu".to_string(),
pre_link_args: vec!(
// And here, we see obscure linker flags #45. On windows, it has been
// found to be necessary to have this flag to compile liblibc.
//
// First a bit of background. On Windows, the file format is not ELF,
|
identifier_body
|
mod.rs
|
//! Platform-specific extensions to `std` for Windows.
//!
//! Provides access to platform-level information for Windows, and exposes
//! Windows-specific idioms that would otherwise be inappropriate as part
//! the core `std` library. These extensions allow developers to use
//! `std` types and idioms with Windows in a way that the normal
//! platform-agnostic idioms would not normally support.
//!
//! # Examples
//!
//! ```no_run
//! use std::fs::File;
//! use std::os::windows::prelude::*;
//!
//! fn main() -> std::io::Result<()> {
//! let f = File::create("foo.txt")?;
//! let handle = f.as_raw_handle();
//!
//! // use handle with native windows bindings
//!
//! Ok(())
//! }
//! ```
#![stable(feature = "rust1", since = "1.0.0")]
#![doc(cfg(windows))]
pub mod ffi;
pub mod fs;
pub mod io;
|
pub mod process;
pub mod raw;
pub mod thread;
/// A prelude for conveniently writing platform-specific code.
///
/// Includes all extension traits, and some important type definitions.
#[stable(feature = "rust1", since = "1.0.0")]
pub mod prelude {
#[doc(no_inline)]
#[stable(feature = "rust1", since = "1.0.0")]
pub use super::ffi::{OsStrExt, OsStringExt};
#[doc(no_inline)]
#[stable(feature = "file_offset", since = "1.15.0")]
pub use super::fs::FileExt;
#[doc(no_inline)]
#[stable(feature = "rust1", since = "1.0.0")]
pub use super::fs::{MetadataExt, OpenOptionsExt};
#[doc(no_inline)]
#[stable(feature = "rust1", since = "1.0.0")]
pub use super::io::{
AsHandle, AsSocket, BorrowedHandle, BorrowedSocket, FromRawHandle, FromRawSocket,
HandleOrInvalid, IntoRawHandle, IntoRawSocket, OwnedHandle, OwnedSocket,
};
#[doc(no_inline)]
#[stable(feature = "rust1", since = "1.0.0")]
pub use super::io::{AsRawHandle, AsRawSocket, RawHandle, RawSocket};
}
|
random_line_split
|
|
issue-7673-cast-generically-implemented-trait.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*
#7673 Polymorphically creating traits barely works
*/
pub fn main() {}
trait A {}
impl<T:'static> A for T {}
fn owned1<T:'static>(a: T) { ~a as ~A:; } /* note `:` */
fn owned2<T:'static>(a: ~T)
|
fn owned3<T:'static>(a: ~T) { ~a as ~A:; }
fn managed1<T:'static>(a: T) { @a as @A; }
fn managed2<T:'static>(a: @T) { a as @A; }
fn managed3<T:'static>(a: @T) { @a as @A; }
|
{ a as ~A:; }
|
identifier_body
|
issue-7673-cast-generically-implemented-trait.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*
#7673 Polymorphically creating traits barely works
|
*/
pub fn main() {}
trait A {}
impl<T:'static> A for T {}
fn owned1<T:'static>(a: T) { ~a as ~A:; } /* note `:` */
fn owned2<T:'static>(a: ~T) { a as ~A:; }
fn owned3<T:'static>(a: ~T) { ~a as ~A:; }
fn managed1<T:'static>(a: T) { @a as @A; }
fn managed2<T:'static>(a: @T) { a as @A; }
fn managed3<T:'static>(a: @T) { @a as @A; }
|
random_line_split
|
|
issue-7673-cast-generically-implemented-trait.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*
#7673 Polymorphically creating traits barely works
*/
pub fn main() {}
trait A {}
impl<T:'static> A for T {}
fn owned1<T:'static>(a: T) { ~a as ~A:; } /* note `:` */
fn
|
<T:'static>(a: ~T) { a as ~A:; }
fn owned3<T:'static>(a: ~T) { ~a as ~A:; }
fn managed1<T:'static>(a: T) { @a as @A; }
fn managed2<T:'static>(a: @T) { a as @A; }
fn managed3<T:'static>(a: @T) { @a as @A; }
|
owned2
|
identifier_name
|
state_helper.rs
|
use std::collections::BTreeMap;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::MutexGuard;
use std::time::SystemTime;
use anyhow::{Context, Result};
use chrono::prelude::*;
use log::{debug, info};
use pueue_lib::state::{Group, GroupStatus, State, PUEUE_DEFAULT_GROUP};
use pueue_lib::task::{TaskResult, TaskStatus};
pub type LockedState<'a> = MutexGuard<'a, State>;
/// Check if a task can be deleted. \
/// We have to check all dependant tasks, that haven't finished yet.
/// This is necessary to prevent deletion of tasks which are specified as a dependency.
///
/// `to_delete` A list of task ids, which should also be deleted.
/// This allows to remove dependency tasks as well as their dependants.
pub fn is_task_removable(state: &LockedState, task_id: &usize, to_delete: &[usize]) -> bool {
// Get all task ids of any dependant tasks.
let dependants: Vec<usize> = state
.tasks
.iter()
.filter(|(_, task)| {
task.dependencies.contains(task_id) &&!matches!(task.status, TaskStatus::Done(_))
})
.map(|(_, task)| task.id)
.collect();
if dependants.is_empty() {
return true;
}
// Check if the dependants are supposed to be deleted as well.
let should_delete_dependants = dependants.iter().all(|task_id| to_delete.contains(task_id));
if!should_delete_dependants {
return false;
}
// Lastly, do a recursive check if there are any dependants on our dependants
dependants
.iter()
.all(|task_id| is_task_removable(state, task_id, to_delete))
}
/// A small helper for handling task failures. \
/// Users can specify whether they want to pause the task's group or the
/// whole daemon on a failed tasks. This function wraps that logic and decides if anything should be
/// paused depending on the current settings.
///
/// `group` should be the name of the failed task.
pub fn pause_on_failure(state: &mut LockedState, group: &str) {
if state.settings.daemon.pause_group_on_failure {
if let Some(group) = state.groups.get_mut(group) {
group.status = GroupStatus::Paused;
}
} else if state.settings.daemon.pause_all_on_failure {
state.set_status_for_all_groups(GroupStatus::Paused);
}
}
/// Do a full reset of the state.
/// This doesn't reset any processes!
pub fn reset_state(state: &mut LockedState) -> Result<()> {
backup_state(state)?;
state.tasks = BTreeMap::new();
state.set_status_for_all_groups(GroupStatus::Running);
save_state(state)
}
/// Convenience wrapper around save_to_file.
pub fn save_state(state: &State) -> Result<()> {
save_state_to_file(state, false)
}
/// Save the current current state in a file with a timestamp.
/// At the same time remove old state logs from the log directory.
/// This function is called, when large changes to the state are applied, e.g. clean/reset.
pub fn backup_state(state: &LockedState) -> Result<()> {
save_state_to_file(state, true)?;
rotate_state(state).context("Failed to rotate old log files")?;
Ok(())
}
/// Save the current state to disk. \
/// We do this to restore in case of a crash. \
/// If log == true, the file will be saved with a time stamp.
///
/// In comparison to the daemon -> client communication, the state is saved
/// as JSON for readability and debugging purposes.
fn save_state_to_file(state: &State, log: bool) -> Result<()> {
let serialized = serde_json::to_string(&state).context("Failed to serialize state:");
let serialized = serialized.unwrap();
let path = state.settings.shared.pueue_directory();
let (temp, real) = if log {
let path = path.join("log");
let now: DateTime<Utc> = Utc::now();
let time = now.format("%Y-%m-%d_%H-%M-%S");
(
path.join(format!("{time}_state.json.partial")),
path.join(format!("{time}_state.json")),
)
} else {
(path.join("state.json.partial"), path.join("state.json"))
};
// Write to temporary log file first, to prevent loss due to crashes.
fs::write(&temp, serialized).context("Failed to write temp file while saving state.")?;
// Overwrite the original with the temp file, if everything went fine.
fs::rename(&temp, &real).context("Failed to overwrite old state while saving state")?;
if log {
debug!("State backup created at: {real:?}");
} else {
debug!("State saved at: {real:?}");
}
Ok(())
}
/// Restore the last state from a previous session. \
/// The state is stored as json in the `pueue_directory`.
///
/// If the state cannot be deserialized, an empty default state will be used instead. \
/// All groups with queued tasks will be automatically paused to prevent unwanted execution.
pub fn restore_state(pueue_directory: &Path) -> Result<Option<State>> {
let path = pueue_directory.join("state.json");
// Ignore if the file doesn't exist. It doesn't have to.
if!path.exists() {
info!("Couldn't find state from previous session at location: {path:?}");
return Ok(None);
}
info!("Start restoring state");
// Try to load the file.
let data = fs::read_to_string(&path).context("State restore: Failed to read file:\n\n{}")?;
// Try to deserialize the state file.
let mut state: State = serde_json::from_str(&data).context("Failed to deserialize state.")?;
// Restore all tasks.
// While restoring the tasks, check for any invalid/broken stati.
for (_, task) in state.tasks.iter_mut() {
// Handle ungraceful shutdowns while executing tasks.
if task.status == TaskStatus::Running || task.status == TaskStatus::Paused
|
// Handle crash during editing of the task command.
if task.status == TaskStatus::Locked {
task.status = TaskStatus::Stashed { enqueue_at: None };
}
// Go trough all tasks and set all groups that are no longer
// listed in the configuration file to the default.
let group = match state.groups.get_mut(&task.group) {
Some(group) => group,
None => {
task.set_default_group();
state
.groups
.entry(PUEUE_DEFAULT_GROUP.into())
.or_insert(Group {
status: GroupStatus::Running,
parallel_tasks: 1,
})
}
};
// If there are any queued tasks, pause the group.
// This should prevent any unwanted execution of tasks due to a system crash.
if task.status == TaskStatus::Queued {
info!(
"Pausing group {} to prevent unwanted execution of previous tasks",
&task.group
);
group.status = GroupStatus::Paused;
}
}
Ok(Some(state))
}
/// Remove old logs that aren't needed any longer.
fn rotate_state(state: &LockedState) -> Result<()> {
let path = state.settings.shared.pueue_directory().join("log");
// Get all log files in the directory with their respective system time.
let mut entries: BTreeMap<SystemTime, PathBuf> = BTreeMap::new();
let mut directory_list = fs::read_dir(path)?;
while let Some(Ok(entry)) = directory_list.next() {
let path = entry.path();
let metadata = entry.metadata()?;
let time = metadata.modified()?;
entries.insert(time, path);
}
// Remove all files above the threshold.
// Old files are removed first (implictly by the BTree order).
let mut number_entries = entries.len();
let mut iter = entries.iter();
while number_entries > 10 {
if let Some((_, path)) = iter.next() {
fs::remove_file(path)?;
number_entries -= 1;
}
}
Ok(())
}
|
{
info!(
"Setting task {} with previous status {:?} to new status {:?}",
task.id,
task.status,
TaskResult::Killed
);
task.status = TaskStatus::Done(TaskResult::Killed);
}
|
conditional_block
|
state_helper.rs
|
use std::collections::BTreeMap;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::MutexGuard;
use std::time::SystemTime;
use anyhow::{Context, Result};
use chrono::prelude::*;
use log::{debug, info};
use pueue_lib::state::{Group, GroupStatus, State, PUEUE_DEFAULT_GROUP};
use pueue_lib::task::{TaskResult, TaskStatus};
pub type LockedState<'a> = MutexGuard<'a, State>;
/// Check if a task can be deleted. \
/// We have to check all dependant tasks, that haven't finished yet.
/// This is necessary to prevent deletion of tasks which are specified as a dependency.
///
/// `to_delete` A list of task ids, which should also be deleted.
/// This allows to remove dependency tasks as well as their dependants.
pub fn is_task_removable(state: &LockedState, task_id: &usize, to_delete: &[usize]) -> bool {
// Get all task ids of any dependant tasks.
let dependants: Vec<usize> = state
.tasks
.iter()
.filter(|(_, task)| {
task.dependencies.contains(task_id) &&!matches!(task.status, TaskStatus::Done(_))
})
.map(|(_, task)| task.id)
.collect();
if dependants.is_empty() {
return true;
}
// Check if the dependants are supposed to be deleted as well.
let should_delete_dependants = dependants.iter().all(|task_id| to_delete.contains(task_id));
if!should_delete_dependants {
return false;
}
// Lastly, do a recursive check if there are any dependants on our dependants
dependants
.iter()
.all(|task_id| is_task_removable(state, task_id, to_delete))
}
/// A small helper for handling task failures. \
/// Users can specify whether they want to pause the task's group or the
/// whole daemon on a failed tasks. This function wraps that logic and decides if anything should be
/// paused depending on the current settings.
///
/// `group` should be the name of the failed task.
pub fn pause_on_failure(state: &mut LockedState, group: &str) {
if state.settings.daemon.pause_group_on_failure {
if let Some(group) = state.groups.get_mut(group) {
group.status = GroupStatus::Paused;
}
} else if state.settings.daemon.pause_all_on_failure {
state.set_status_for_all_groups(GroupStatus::Paused);
}
}
/// Do a full reset of the state.
/// This doesn't reset any processes!
pub fn reset_state(state: &mut LockedState) -> Result<()> {
backup_state(state)?;
state.tasks = BTreeMap::new();
state.set_status_for_all_groups(GroupStatus::Running);
save_state(state)
}
/// Convenience wrapper around save_to_file.
pub fn save_state(state: &State) -> Result<()> {
save_state_to_file(state, false)
}
/// Save the current current state in a file with a timestamp.
/// At the same time remove old state logs from the log directory.
/// This function is called, when large changes to the state are applied, e.g. clean/reset.
pub fn backup_state(state: &LockedState) -> Result<()> {
save_state_to_file(state, true)?;
rotate_state(state).context("Failed to rotate old log files")?;
Ok(())
}
/// Save the current state to disk. \
/// We do this to restore in case of a crash. \
/// If log == true, the file will be saved with a time stamp.
///
/// In comparison to the daemon -> client communication, the state is saved
/// as JSON for readability and debugging purposes.
fn save_state_to_file(state: &State, log: bool) -> Result<()> {
let serialized = serde_json::to_string(&state).context("Failed to serialize state:");
let serialized = serialized.unwrap();
let path = state.settings.shared.pueue_directory();
let (temp, real) = if log {
let path = path.join("log");
let now: DateTime<Utc> = Utc::now();
let time = now.format("%Y-%m-%d_%H-%M-%S");
(
path.join(format!("{time}_state.json.partial")),
path.join(format!("{time}_state.json")),
)
} else {
(path.join("state.json.partial"), path.join("state.json"))
};
// Write to temporary log file first, to prevent loss due to crashes.
fs::write(&temp, serialized).context("Failed to write temp file while saving state.")?;
// Overwrite the original with the temp file, if everything went fine.
fs::rename(&temp, &real).context("Failed to overwrite old state while saving state")?;
if log {
debug!("State backup created at: {real:?}");
} else {
debug!("State saved at: {real:?}");
}
Ok(())
}
/// Restore the last state from a previous session. \
/// The state is stored as json in the `pueue_directory`.
///
/// If the state cannot be deserialized, an empty default state will be used instead. \
/// All groups with queued tasks will be automatically paused to prevent unwanted execution.
pub fn restore_state(pueue_directory: &Path) -> Result<Option<State>> {
let path = pueue_directory.join("state.json");
// Ignore if the file doesn't exist. It doesn't have to.
if!path.exists() {
info!("Couldn't find state from previous session at location: {path:?}");
return Ok(None);
}
info!("Start restoring state");
// Try to load the file.
let data = fs::read_to_string(&path).context("State restore: Failed to read file:\n\n{}")?;
// Try to deserialize the state file.
let mut state: State = serde_json::from_str(&data).context("Failed to deserialize state.")?;
// Restore all tasks.
// While restoring the tasks, check for any invalid/broken stati.
for (_, task) in state.tasks.iter_mut() {
// Handle ungraceful shutdowns while executing tasks.
if task.status == TaskStatus::Running || task.status == TaskStatus::Paused {
info!(
"Setting task {} with previous status {:?} to new status {:?}",
task.id,
task.status,
TaskResult::Killed
);
task.status = TaskStatus::Done(TaskResult::Killed);
}
// Handle crash during editing of the task command.
if task.status == TaskStatus::Locked {
task.status = TaskStatus::Stashed { enqueue_at: None };
}
// Go trough all tasks and set all groups that are no longer
// listed in the configuration file to the default.
let group = match state.groups.get_mut(&task.group) {
Some(group) => group,
None => {
task.set_default_group();
state
.groups
.entry(PUEUE_DEFAULT_GROUP.into())
.or_insert(Group {
status: GroupStatus::Running,
parallel_tasks: 1,
})
}
};
// If there are any queued tasks, pause the group.
// This should prevent any unwanted execution of tasks due to a system crash.
if task.status == TaskStatus::Queued {
info!(
"Pausing group {} to prevent unwanted execution of previous tasks",
&task.group
);
group.status = GroupStatus::Paused;
}
}
Ok(Some(state))
}
/// Remove old logs that aren't needed any longer.
fn
|
(state: &LockedState) -> Result<()> {
let path = state.settings.shared.pueue_directory().join("log");
// Get all log files in the directory with their respective system time.
let mut entries: BTreeMap<SystemTime, PathBuf> = BTreeMap::new();
let mut directory_list = fs::read_dir(path)?;
while let Some(Ok(entry)) = directory_list.next() {
let path = entry.path();
let metadata = entry.metadata()?;
let time = metadata.modified()?;
entries.insert(time, path);
}
// Remove all files above the threshold.
// Old files are removed first (implictly by the BTree order).
let mut number_entries = entries.len();
let mut iter = entries.iter();
while number_entries > 10 {
if let Some((_, path)) = iter.next() {
fs::remove_file(path)?;
number_entries -= 1;
}
}
Ok(())
}
|
rotate_state
|
identifier_name
|
state_helper.rs
|
use std::collections::BTreeMap;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::MutexGuard;
use std::time::SystemTime;
use anyhow::{Context, Result};
use chrono::prelude::*;
use log::{debug, info};
use pueue_lib::state::{Group, GroupStatus, State, PUEUE_DEFAULT_GROUP};
use pueue_lib::task::{TaskResult, TaskStatus};
pub type LockedState<'a> = MutexGuard<'a, State>;
/// Check if a task can be deleted. \
/// We have to check all dependant tasks, that haven't finished yet.
/// This is necessary to prevent deletion of tasks which are specified as a dependency.
///
/// `to_delete` A list of task ids, which should also be deleted.
/// This allows to remove dependency tasks as well as their dependants.
pub fn is_task_removable(state: &LockedState, task_id: &usize, to_delete: &[usize]) -> bool {
// Get all task ids of any dependant tasks.
let dependants: Vec<usize> = state
.tasks
.iter()
.filter(|(_, task)| {
task.dependencies.contains(task_id) &&!matches!(task.status, TaskStatus::Done(_))
})
.map(|(_, task)| task.id)
.collect();
if dependants.is_empty() {
return true;
}
// Check if the dependants are supposed to be deleted as well.
let should_delete_dependants = dependants.iter().all(|task_id| to_delete.contains(task_id));
if!should_delete_dependants {
return false;
}
// Lastly, do a recursive check if there are any dependants on our dependants
dependants
.iter()
.all(|task_id| is_task_removable(state, task_id, to_delete))
}
/// A small helper for handling task failures. \
/// Users can specify whether they want to pause the task's group or the
/// whole daemon on a failed tasks. This function wraps that logic and decides if anything should be
/// paused depending on the current settings.
///
/// `group` should be the name of the failed task.
pub fn pause_on_failure(state: &mut LockedState, group: &str) {
if state.settings.daemon.pause_group_on_failure {
if let Some(group) = state.groups.get_mut(group) {
group.status = GroupStatus::Paused;
}
} else if state.settings.daemon.pause_all_on_failure {
state.set_status_for_all_groups(GroupStatus::Paused);
}
}
/// Do a full reset of the state.
/// This doesn't reset any processes!
pub fn reset_state(state: &mut LockedState) -> Result<()> {
backup_state(state)?;
state.tasks = BTreeMap::new();
state.set_status_for_all_groups(GroupStatus::Running);
save_state(state)
}
/// Convenience wrapper around save_to_file.
pub fn save_state(state: &State) -> Result<()> {
save_state_to_file(state, false)
}
/// Save the current current state in a file with a timestamp.
/// At the same time remove old state logs from the log directory.
/// This function is called, when large changes to the state are applied, e.g. clean/reset.
pub fn backup_state(state: &LockedState) -> Result<()> {
save_state_to_file(state, true)?;
rotate_state(state).context("Failed to rotate old log files")?;
Ok(())
}
/// Save the current state to disk. \
/// We do this to restore in case of a crash. \
/// If log == true, the file will be saved with a time stamp.
///
/// In comparison to the daemon -> client communication, the state is saved
/// as JSON for readability and debugging purposes.
fn save_state_to_file(state: &State, log: bool) -> Result<()> {
let serialized = serde_json::to_string(&state).context("Failed to serialize state:");
let serialized = serialized.unwrap();
let path = state.settings.shared.pueue_directory();
let (temp, real) = if log {
let path = path.join("log");
let now: DateTime<Utc> = Utc::now();
let time = now.format("%Y-%m-%d_%H-%M-%S");
(
path.join(format!("{time}_state.json.partial")),
path.join(format!("{time}_state.json")),
)
} else {
(path.join("state.json.partial"), path.join("state.json"))
};
// Write to temporary log file first, to prevent loss due to crashes.
fs::write(&temp, serialized).context("Failed to write temp file while saving state.")?;
// Overwrite the original with the temp file, if everything went fine.
fs::rename(&temp, &real).context("Failed to overwrite old state while saving state")?;
if log {
debug!("State backup created at: {real:?}");
} else {
debug!("State saved at: {real:?}");
}
Ok(())
}
/// Restore the last state from a previous session. \
/// The state is stored as json in the `pueue_directory`.
///
/// If the state cannot be deserialized, an empty default state will be used instead. \
/// All groups with queued tasks will be automatically paused to prevent unwanted execution.
pub fn restore_state(pueue_directory: &Path) -> Result<Option<State>> {
let path = pueue_directory.join("state.json");
// Ignore if the file doesn't exist. It doesn't have to.
if!path.exists() {
info!("Couldn't find state from previous session at location: {path:?}");
return Ok(None);
}
info!("Start restoring state");
// Try to load the file.
let data = fs::read_to_string(&path).context("State restore: Failed to read file:\n\n{}")?;
// Try to deserialize the state file.
let mut state: State = serde_json::from_str(&data).context("Failed to deserialize state.")?;
// Restore all tasks.
// While restoring the tasks, check for any invalid/broken stati.
for (_, task) in state.tasks.iter_mut() {
// Handle ungraceful shutdowns while executing tasks.
if task.status == TaskStatus::Running || task.status == TaskStatus::Paused {
info!(
"Setting task {} with previous status {:?} to new status {:?}",
task.id,
task.status,
TaskResult::Killed
);
task.status = TaskStatus::Done(TaskResult::Killed);
|
task.status = TaskStatus::Stashed { enqueue_at: None };
}
// Go trough all tasks and set all groups that are no longer
// listed in the configuration file to the default.
let group = match state.groups.get_mut(&task.group) {
Some(group) => group,
None => {
task.set_default_group();
state
.groups
.entry(PUEUE_DEFAULT_GROUP.into())
.or_insert(Group {
status: GroupStatus::Running,
parallel_tasks: 1,
})
}
};
// If there are any queued tasks, pause the group.
// This should prevent any unwanted execution of tasks due to a system crash.
if task.status == TaskStatus::Queued {
info!(
"Pausing group {} to prevent unwanted execution of previous tasks",
&task.group
);
group.status = GroupStatus::Paused;
}
}
Ok(Some(state))
}
/// Remove old logs that aren't needed any longer.
fn rotate_state(state: &LockedState) -> Result<()> {
let path = state.settings.shared.pueue_directory().join("log");
// Get all log files in the directory with their respective system time.
let mut entries: BTreeMap<SystemTime, PathBuf> = BTreeMap::new();
let mut directory_list = fs::read_dir(path)?;
while let Some(Ok(entry)) = directory_list.next() {
let path = entry.path();
let metadata = entry.metadata()?;
let time = metadata.modified()?;
entries.insert(time, path);
}
// Remove all files above the threshold.
// Old files are removed first (implictly by the BTree order).
let mut number_entries = entries.len();
let mut iter = entries.iter();
while number_entries > 10 {
if let Some((_, path)) = iter.next() {
fs::remove_file(path)?;
number_entries -= 1;
}
}
Ok(())
}
|
}
// Handle crash during editing of the task command.
if task.status == TaskStatus::Locked {
|
random_line_split
|
state_helper.rs
|
use std::collections::BTreeMap;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::MutexGuard;
use std::time::SystemTime;
use anyhow::{Context, Result};
use chrono::prelude::*;
use log::{debug, info};
use pueue_lib::state::{Group, GroupStatus, State, PUEUE_DEFAULT_GROUP};
use pueue_lib::task::{TaskResult, TaskStatus};
pub type LockedState<'a> = MutexGuard<'a, State>;
/// Check if a task can be deleted. \
/// We have to check all dependant tasks, that haven't finished yet.
/// This is necessary to prevent deletion of tasks which are specified as a dependency.
///
/// `to_delete` A list of task ids, which should also be deleted.
/// This allows to remove dependency tasks as well as their dependants.
pub fn is_task_removable(state: &LockedState, task_id: &usize, to_delete: &[usize]) -> bool {
// Get all task ids of any dependant tasks.
let dependants: Vec<usize> = state
.tasks
.iter()
.filter(|(_, task)| {
task.dependencies.contains(task_id) &&!matches!(task.status, TaskStatus::Done(_))
})
.map(|(_, task)| task.id)
.collect();
if dependants.is_empty() {
return true;
}
// Check if the dependants are supposed to be deleted as well.
let should_delete_dependants = dependants.iter().all(|task_id| to_delete.contains(task_id));
if!should_delete_dependants {
return false;
}
// Lastly, do a recursive check if there are any dependants on our dependants
dependants
.iter()
.all(|task_id| is_task_removable(state, task_id, to_delete))
}
/// A small helper for handling task failures. \
/// Users can specify whether they want to pause the task's group or the
/// whole daemon on a failed tasks. This function wraps that logic and decides if anything should be
/// paused depending on the current settings.
///
/// `group` should be the name of the failed task.
pub fn pause_on_failure(state: &mut LockedState, group: &str) {
if state.settings.daemon.pause_group_on_failure {
if let Some(group) = state.groups.get_mut(group) {
group.status = GroupStatus::Paused;
}
} else if state.settings.daemon.pause_all_on_failure {
state.set_status_for_all_groups(GroupStatus::Paused);
}
}
/// Do a full reset of the state.
/// This doesn't reset any processes!
pub fn reset_state(state: &mut LockedState) -> Result<()> {
backup_state(state)?;
state.tasks = BTreeMap::new();
state.set_status_for_all_groups(GroupStatus::Running);
save_state(state)
}
/// Convenience wrapper around save_to_file.
pub fn save_state(state: &State) -> Result<()> {
save_state_to_file(state, false)
}
/// Save the current current state in a file with a timestamp.
/// At the same time remove old state logs from the log directory.
/// This function is called, when large changes to the state are applied, e.g. clean/reset.
pub fn backup_state(state: &LockedState) -> Result<()> {
save_state_to_file(state, true)?;
rotate_state(state).context("Failed to rotate old log files")?;
Ok(())
}
/// Save the current state to disk. \
/// We do this to restore in case of a crash. \
/// If log == true, the file will be saved with a time stamp.
///
/// In comparison to the daemon -> client communication, the state is saved
/// as JSON for readability and debugging purposes.
fn save_state_to_file(state: &State, log: bool) -> Result<()> {
let serialized = serde_json::to_string(&state).context("Failed to serialize state:");
let serialized = serialized.unwrap();
let path = state.settings.shared.pueue_directory();
let (temp, real) = if log {
let path = path.join("log");
let now: DateTime<Utc> = Utc::now();
let time = now.format("%Y-%m-%d_%H-%M-%S");
(
path.join(format!("{time}_state.json.partial")),
path.join(format!("{time}_state.json")),
)
} else {
(path.join("state.json.partial"), path.join("state.json"))
};
// Write to temporary log file first, to prevent loss due to crashes.
fs::write(&temp, serialized).context("Failed to write temp file while saving state.")?;
// Overwrite the original with the temp file, if everything went fine.
fs::rename(&temp, &real).context("Failed to overwrite old state while saving state")?;
if log {
debug!("State backup created at: {real:?}");
} else {
debug!("State saved at: {real:?}");
}
Ok(())
}
/// Restore the last state from a previous session. \
/// The state is stored as json in the `pueue_directory`.
///
/// If the state cannot be deserialized, an empty default state will be used instead. \
/// All groups with queued tasks will be automatically paused to prevent unwanted execution.
pub fn restore_state(pueue_directory: &Path) -> Result<Option<State>> {
let path = pueue_directory.join("state.json");
// Ignore if the file doesn't exist. It doesn't have to.
if!path.exists() {
info!("Couldn't find state from previous session at location: {path:?}");
return Ok(None);
}
info!("Start restoring state");
// Try to load the file.
let data = fs::read_to_string(&path).context("State restore: Failed to read file:\n\n{}")?;
// Try to deserialize the state file.
let mut state: State = serde_json::from_str(&data).context("Failed to deserialize state.")?;
// Restore all tasks.
// While restoring the tasks, check for any invalid/broken stati.
for (_, task) in state.tasks.iter_mut() {
// Handle ungraceful shutdowns while executing tasks.
if task.status == TaskStatus::Running || task.status == TaskStatus::Paused {
info!(
"Setting task {} with previous status {:?} to new status {:?}",
task.id,
task.status,
TaskResult::Killed
);
task.status = TaskStatus::Done(TaskResult::Killed);
}
// Handle crash during editing of the task command.
if task.status == TaskStatus::Locked {
task.status = TaskStatus::Stashed { enqueue_at: None };
}
// Go trough all tasks and set all groups that are no longer
// listed in the configuration file to the default.
let group = match state.groups.get_mut(&task.group) {
Some(group) => group,
None => {
task.set_default_group();
state
.groups
.entry(PUEUE_DEFAULT_GROUP.into())
.or_insert(Group {
status: GroupStatus::Running,
parallel_tasks: 1,
})
}
};
// If there are any queued tasks, pause the group.
// This should prevent any unwanted execution of tasks due to a system crash.
if task.status == TaskStatus::Queued {
info!(
"Pausing group {} to prevent unwanted execution of previous tasks",
&task.group
);
group.status = GroupStatus::Paused;
}
}
Ok(Some(state))
}
/// Remove old logs that aren't needed any longer.
fn rotate_state(state: &LockedState) -> Result<()>
|
fs::remove_file(path)?;
number_entries -= 1;
}
}
Ok(())
}
|
{
let path = state.settings.shared.pueue_directory().join("log");
// Get all log files in the directory with their respective system time.
let mut entries: BTreeMap<SystemTime, PathBuf> = BTreeMap::new();
let mut directory_list = fs::read_dir(path)?;
while let Some(Ok(entry)) = directory_list.next() {
let path = entry.path();
let metadata = entry.metadata()?;
let time = metadata.modified()?;
entries.insert(time, path);
}
// Remove all files above the threshold.
// Old files are removed first (implictly by the BTree order).
let mut number_entries = entries.len();
let mut iter = entries.iter();
while number_entries > 10 {
if let Some((_, path)) = iter.next() {
|
identifier_body
|
associated-types-in-inherent-method.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Get {
type Value;
fn get(&self) -> &<Self as Get>::Value;
}
struct Struct {
x: int,
}
impl Get for Struct {
type Value = int;
fn get(&self) -> &int {
&self.x
}
}
impl Struct {
fn
|
<T:Get>(x: &T) -> &<T as Get>::Value {
x.get()
}
}
fn main() {
let s = Struct {
x: 100,
};
assert_eq!(*Struct::grab(&s), 100);
}
|
grab
|
identifier_name
|
associated-types-in-inherent-method.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
|
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
trait Get {
type Value;
fn get(&self) -> &<Self as Get>::Value;
}
struct Struct {
x: int,
}
impl Get for Struct {
type Value = int;
fn get(&self) -> &int {
&self.x
}
}
impl Struct {
fn grab<T:Get>(x: &T) -> &<T as Get>::Value {
x.get()
}
}
fn main() {
let s = Struct {
x: 100,
};
assert_eq!(*Struct::grab(&s), 100);
}
|
random_line_split
|
|
lib.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The "green scheduling" library
//!
//! This library provides M:N threading for rust programs. Internally this has
//! the implementation of a green scheduler along with context switching and a
//! stack-allocation strategy. This can be optionally linked in to rust
//! programs in order to provide M:N functionality inside of 1:1 programs.
//!
//! # Architecture
//!
//! An M:N scheduling library implies that there are N OS thread upon which M
//! "green threads" are multiplexed. In other words, a set of green threads are
//! all run inside a pool of OS threads.
//!
//! With this design, you can achieve _concurrency_ by spawning many green
//! threads, and you can achieve _parallelism_ by running the green threads
//! simultaneously on multiple OS threads. Each OS thread is a candidate for
//! being scheduled on a different core (the source of parallelism), and then
//! all of the green threads cooperatively schedule amongst one another (the
//! source of concurrency).
//!
//! ## Schedulers
//!
//! In order to coordinate among green threads, each OS thread is primarily
//! running something which we call a Scheduler. Whenever a reference to a
//! Scheduler is made, it is synonymous to referencing one OS thread. Each
//! scheduler is bound to one and exactly one OS thread, and the thread that it
//! is bound to never changes.
//!
//! Each scheduler is connected to a pool of other schedulers (a `SchedPool`)
//! which is the thread pool term from above. A pool of schedulers all share the
//! work that they create. Furthermore, whenever a green thread is created (also
//! synonymously referred to as a green task), it is associated with a
//! `SchedPool` forevermore. A green thread cannot leave its scheduler pool.
//!
//! Schedulers can have at most one green thread running on them at a time. When
//! a scheduler is asleep on its event loop, there are no green tasks running on
//! the OS thread or the scheduler. The term "context switch" is used for when
//! the running green thread is swapped out, but this simply changes the one
//! green thread which is running on the scheduler.
//!
//! ## Green Threads
//!
//! A green thread can largely be summarized by a stack and a register context.
//! Whenever a green thread is spawned, it allocates a stack, and then prepares
//! a register context for execution. The green task may be executed across
//! multiple OS threads, but it will always use the same stack and it will carry
//! its register context across OS threads.
//!
//! Each green thread is cooperatively scheduled with other green threads.
//! Primarily, this means that there is no pre-emption of a green thread. The
//! major consequence of this design is that a green thread stuck in an infinite
//! loop will prevent all other green threads from running on that particular
//! scheduler.
//!
//! Scheduling events for green threads occur on communication and I/O
//! boundaries. For example, if a green task blocks waiting for a message on a
//! channel some other green thread can now run on the scheduler. This also has
//! the consequence that until a green thread performs any form of scheduling
//! event, it will be running on the same OS thread (unconditionally).
//!
//! ## Work Stealing
//!
//! With a pool of schedulers, a new green task has a number of options when
//! deciding where to run initially. The current implementation uses a concept
//! called work stealing in order to spread out work among schedulers.
//!
//! In a work-stealing model, each scheduler maintains a local queue of tasks to
//! run, and this queue is stolen from by other schedulers. Implementation-wise,
//! work stealing has some hairy parts, but from a user-perspective, work
//! stealing simply implies what with M green threads and N schedulers where
//! M > N it is very likely that all schedulers will be busy executing work.
//!
//! # Considerations when using libgreen
//!
//! An M:N runtime has both pros and cons, and there is no one answer as to
//! whether M:N or 1:1 is appropriate to use. As always, there are many
//! advantages and disadvantages between the two. Regardless of the workload,
//! however, there are some aspects of using green thread which you should be
//! aware of:
//!
//! * The largest concern when using libgreen is interoperating with native
//! code. Care should be taken when calling native code that will block the OS
//! thread as it will prevent further green tasks from being scheduled on the
//! OS thread.
//!
//! * Native code using thread-local-storage should be approached
//! with care. Green threads may migrate among OS threads at any time, so
//! native libraries using thread-local state may not always work.
//!
//! * Native synchronization primitives (e.g. pthread mutexes) will also not
//! work for green threads. The reason for this is because native primitives
//! often operate on a _os thread_ granularity whereas green threads are
//! operating on a more granular unit of work.
//!
//! * A green threading runtime is not fork-safe. If the process forks(), it
//! cannot expect to make reasonable progress by continuing to use green
//! threads.
//!
//! Note that these concerns do not mean that operating with native code is a
//! lost cause. These are simply just concerns which should be considered when
//! invoking native code.
//!
//! # Starting with libgreen
//!
//! ```rust
//! extern crate green;
//!
//! #[start]
//! fn start(argc: int, argv: *const *const u8) -> int {
//! green::start(argc, argv, green::basic::event_loop, main)
//! }
//!
//! fn main() {
//! // this code is running in a pool of schedulers
//! }
//! ```
//!
//! > **Note**: This `main` function in this example does *not* have I/O
//! > support. The basic event loop does not provide any support
//!
//! # Starting with I/O support in libgreen
//!
//! ```rust
//! extern crate green;
//! extern crate rustuv;
//!
//! #[start]
//! fn start(argc: int, argv: *const *const u8) -> int {
//! green::start(argc, argv, rustuv::event_loop, main)
//! }
//!
//! fn main() {
//! // this code is running in a pool of schedulers all powered by libuv
//! }
//! ```
//!
//! The above code can also be shortened with a macro from libgreen.
//!
//! ```
//! #![feature(phase)]
//! #[phase(plugin)] extern crate green;
//!
//! green_start!(main)
//!
//! fn main() {
//! // run inside of a green pool
//! }
//! ```
//!
//! # Using a scheduler pool
//!
//! This library adds a `GreenTaskBuilder` trait that extends the methods
//! available on `std::task::TaskBuilder` to allow spawning a green task,
//! possibly pinned to a particular scheduler thread:
//!
//! ```rust
//! use std::task::TaskBuilder;
//! use green::{SchedPool, PoolConfig, GreenTaskBuilder};
//!
//! let config = PoolConfig::new();
//! let mut pool = SchedPool::new(config);
//!
//! // Spawn tasks into the pool of schedulers
//! TaskBuilder::new().green(&mut pool).spawn(proc() {
//! // this code is running inside the pool of schedulers
//!
//! spawn(proc() {
//! // this code is also running inside the same scheduler pool
//! });
//! });
//!
//! // Dynamically add a new scheduler to the scheduler pool. This adds another
//! // OS thread that green threads can be multiplexed on to.
//! let mut handle = pool.spawn_sched();
//!
//! // Pin a task to the spawned scheduler
//! TaskBuilder::new().green_pinned(&mut pool, &mut handle).spawn(proc() {
//! /*... */
//! });
//!
//! // Handles keep schedulers alive, so be sure to drop all handles before
//! // destroying the sched pool
//! drop(handle);
//!
//! // Required to shut down this scheduler pool.
//! // The task will fail if `shutdown` is not called.
//! pool.shutdown();
//! ```
#![crate_id = "green#0.11.0-pre"]
#![experimental]
#![license = "MIT/ASL2"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/",
html_playground_url = "http://play.rust-lang.org/")]
// NB this does *not* include globs, please keep it that way.
#![feature(macro_rules, phase)]
#![allow(visible_private_types)]
#![allow(deprecated)]
#![feature(default_type_params)]
#[cfg(test)] #[phase(plugin, link)] extern crate log;
#[cfg(test)] extern crate rustuv;
extern crate libc;
extern crate alloc;
use alloc::arc::Arc;
use std::mem::replace;
use std::os;
use std::rt::rtio;
use std::rt::thread::Thread;
use std::rt::task::TaskOpts;
use std::rt;
use std::sync::atomics::{SeqCst, AtomicUint, INIT_ATOMIC_UINT};
use std::sync::deque;
use std::task::{TaskBuilder, Spawner};
use sched::{Shutdown, Scheduler, SchedHandle, TaskFromFriend, PinnedTask, NewNeighbor};
use sleeper_list::SleeperList;
use stack::StackPool;
use task::GreenTask;
mod macros;
mod simple;
mod message_queue;
pub mod basic;
pub mod context;
pub mod coroutine;
pub mod sched;
pub mod sleeper_list;
pub mod stack;
pub mod task;
/// A helper macro for booting a program with libgreen
///
/// # Example
///
/// ```
/// #![feature(phase)]
/// #[phase(plugin)] extern crate green;
///
/// green_start!(main)
///
/// fn main() {
/// // running with libgreen
/// }
/// ```
#[macro_export]
macro_rules! green_start( ($f:ident) => (
mod __start {
extern crate green;
extern crate rustuv;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, super::$f)
}
}
) )
/// Set up a default runtime configuration, given compiler-supplied arguments.
///
/// This function will block until the entire pool of M:N schedulers have
/// exited. This function also requires a local task to be available.
///
/// # Arguments
///
/// * `argc` & `argv` - The argument vector. On Unix this information is used
/// by os::args.
/// * `main` - The initial procedure to run inside of the M:N scheduling pool.
/// Once this procedure exits, the scheduling pool will begin to shut
/// down. The entire pool (and this function) will only return once
/// all child tasks have finished executing.
///
/// # Return value
///
/// The return value is used as the process return code. 0 on success, 101 on
/// error.
pub fn start(argc: int, argv: *const *const u8,
event_loop_factory: fn() -> Box<rtio::EventLoop + Send>,
main: proc():Send) -> int {
rt::init(argc, argv);
let mut main = Some(main);
let mut ret = None;
simple::task().run(|| {
ret = Some(run(event_loop_factory, main.take_unwrap()));
}).destroy();
// unsafe is ok b/c we're sure that the runtime is gone
unsafe { rt::cleanup() }
ret.unwrap()
}
/// Execute the main function in a pool of M:N schedulers.
///
/// Configures the runtime according to the environment, by default using a task
/// scheduler with the same number of threads as cores. Returns a process exit
/// code.
///
/// This function will not return until all schedulers in the associated pool
/// have returned.
pub fn run(event_loop_factory: fn() -> Box<rtio::EventLoop + Send>,
main: proc():Send) -> int {
// Create a scheduler pool and spawn the main task into this pool. We will
// get notified over a channel when the main task exits.
let mut cfg = PoolConfig::new();
cfg.event_loop_factory = event_loop_factory;
let mut pool = SchedPool::new(cfg);
let (tx, rx) = channel();
let mut opts = TaskOpts::new();
opts.on_exit = Some(proc(r) tx.send(r));
opts.name = Some("<main>".into_maybe_owned());
pool.spawn(opts, main);
// Wait for the main task to return, and set the process error code
// appropriately.
if rx.recv().is_err() {
os::set_exit_status(rt::DEFAULT_ERROR_CODE);
}
// Now that we're sure all tasks are dead, shut down the pool of schedulers,
// waiting for them all to return.
pool.shutdown();
os::get_exit_status()
}
/// Configuration of how an M:N pool of schedulers is spawned.
pub struct PoolConfig {
/// The number of schedulers (OS threads) to spawn into this M:N pool.
pub threads: uint,
/// A factory function used to create new event loops. If this is not
/// specified then the default event loop factory is used.
pub event_loop_factory: fn() -> Box<rtio::EventLoop + Send>,
}
impl PoolConfig {
/// Returns the default configuration, as determined the environment
/// variables of this process.
pub fn new() -> PoolConfig {
PoolConfig {
threads: rt::default_sched_threads(),
event_loop_factory: basic::event_loop,
}
}
}
/// A structure representing a handle to a pool of schedulers. This handle is
/// used to keep the pool alive and also reap the status from the pool.
pub struct SchedPool {
id: uint,
threads: Vec<Thread<()>>,
handles: Vec<SchedHandle>,
stealers: Vec<deque::Stealer<Box<task::GreenTask>>>,
next_friend: uint,
stack_pool: StackPool,
deque_pool: deque::BufferPool<Box<task::GreenTask>>,
sleepers: SleeperList,
factory: fn() -> Box<rtio::EventLoop + Send>,
task_state: TaskState,
tasks_done: Receiver<()>,
}
/// This is an internal state shared among a pool of schedulers. This is used to
/// keep track of how many tasks are currently running in the pool and then
/// sending on a channel once the entire pool has been drained of all tasks.
#[deriving(Clone)]
struct TaskState {
cnt: Arc<AtomicUint>,
done: Sender<()>,
}
impl SchedPool {
/// Execute the main function in a pool of M:N schedulers.
///
/// This will configure the pool according to the `config` parameter, and
/// initially run `main` inside the pool of schedulers.
pub fn new(config: PoolConfig) -> SchedPool
|
factory: factory,
task_state: state,
tasks_done: p,
};
// Create a work queue for each scheduler, ntimes. Create an extra
// for the main thread if that flag is set. We won't steal from it.
let mut workers = Vec::with_capacity(nscheds);
let mut stealers = Vec::with_capacity(nscheds);
for _ in range(0, nscheds) {
let (w, s) = pool.deque_pool.deque();
workers.push(w);
stealers.push(s);
}
pool.stealers = stealers;
// Now that we've got all our work queues, create one scheduler per
// queue, spawn the scheduler into a thread, and be sure to keep a
// handle to the scheduler and the thread to keep them alive.
for worker in workers.move_iter() {
rtdebug!("inserting a regular scheduler");
let mut sched = box Scheduler::new(pool.id,
(pool.factory)(),
worker,
pool.stealers.clone(),
pool.sleepers.clone(),
pool.task_state.clone());
pool.handles.push(sched.make_handle());
pool.threads.push(Thread::start(proc() { sched.bootstrap(); }));
}
return pool;
}
/// Creates a new task configured to run inside of this pool of schedulers.
/// This is useful to create a task which can then be sent to a specific
/// scheduler created by `spawn_sched` (and possibly pin it to that
/// scheduler).
#[deprecated = "use the green and green_pinned methods of GreenTaskBuilder instead"]
pub fn task(&mut self, opts: TaskOpts, f: proc():Send) -> Box<GreenTask> {
GreenTask::configure(&mut self.stack_pool, opts, f)
}
/// Spawns a new task into this pool of schedulers, using the specified
/// options to configure the new task which is spawned.
///
/// New tasks are spawned in a round-robin fashion to the schedulers in this
/// pool, but tasks can certainly migrate among schedulers once they're in
/// the pool.
#[deprecated = "use the green and green_pinned methods of GreenTaskBuilder instead"]
pub fn spawn(&mut self, opts: TaskOpts, f: proc():Send) {
let task = self.task(opts, f);
// Figure out someone to send this task to
let idx = self.next_friend;
self.next_friend += 1;
if self.next_friend >= self.handles.len() {
self.next_friend = 0;
}
// Jettison the task away!
self.handles.get_mut(idx).send(TaskFromFriend(task));
}
/// Spawns a new scheduler into this M:N pool. A handle is returned to the
/// scheduler for use. The scheduler will not exit as long as this handle is
/// active.
///
/// The scheduler spawned will participate in work stealing with all of the
/// other schedulers currently in the scheduler pool.
pub fn spawn_sched(&mut self) -> SchedHandle {
let (worker, stealer) = self.deque_pool.deque();
self.stealers.push(stealer.clone());
// Tell all existing schedulers about this new scheduler so they can all
// steal work from it
for handle in self.handles.mut_iter() {
handle.send(NewNeighbor(stealer.clone()));
}
// Create the new scheduler, using the same sleeper list as all the
// other schedulers as well as having a stealer handle to all other
// schedulers.
let mut sched = box Scheduler::new(self.id,
(self.factory)(),
worker,
self.stealers.clone(),
self.sleepers.clone(),
self.task_state.clone());
let ret = sched.make_handle();
self.handles.push(sched.make_handle());
self.threads.push(Thread::start(proc() { sched.bootstrap() }));
return ret;
}
/// Consumes the pool of schedulers, waiting for all tasks to exit and all
/// schedulers to shut down.
///
/// This function is required to be called in order to drop a pool of
/// schedulers, it is considered an error to drop a pool without calling
/// this method.
///
/// This only waits for all tasks in *this pool* of schedulers to exit, any
/// native tasks or extern pools will not be waited on
pub fn shutdown(mut self) {
self.stealers = vec![];
// Wait for everyone to exit. We may have reached a 0-task count
// multiple times in the past, meaning there could be several buffered
// messages on the `tasks_done` port. We're guaranteed that after *some*
// message the current task count will be 0, so we just receive in a
// loop until everything is totally dead.
while self.task_state.active() {
self.tasks_done.recv();
}
// Now that everyone's gone, tell everything to shut down.
for mut handle in replace(&mut self.handles, vec![]).move_iter() {
handle.send(Shutdown);
}
for thread in replace(&mut self.threads, vec![]).move_iter() {
thread.join();
}
}
}
impl TaskState {
fn new() -> (Receiver<()>, TaskState) {
let (tx, rx) = channel();
(rx, TaskState {
cnt: Arc::new(AtomicUint::new(0)),
done: tx,
})
}
fn increment(&mut self) {
self.cnt.fetch_add(1, SeqCst);
}
fn active(&self) -> bool {
self.cnt.load(SeqCst)!= 0
}
fn decrement(&mut self) {
let prev = self.cnt.fetch_sub(1, SeqCst);
if prev == 1 {
self.done.send(());
}
}
}
impl Drop for SchedPool {
fn drop(&mut self) {
if self.threads.len() > 0 {
fail!("dropping a M:N scheduler pool that wasn't shut down");
}
}
}
/// A spawner for green tasks
pub struct GreenSpawner<'a>{
pool: &'a mut SchedPool,
handle: Option<&'a mut SchedHandle>
}
impl<'a> Spawner for GreenSpawner<'a> {
#[inline]
fn spawn(self, opts: TaskOpts, f: proc():Send) {
let GreenSpawner { pool, handle } = self;
match handle {
None => pool.spawn(opts, f),
Some(h) => h.send(PinnedTask(pool.task(opts, f)))
}
}
}
/// An extension trait adding `green` configuration methods to `TaskBuilder`.
pub trait GreenTaskBuilder {
fn green<'a>(self, &'a mut SchedPool) -> TaskBuilder<GreenSpawner<'a>>;
fn green_pinned<'a>(self, &'a mut SchedPool, &'a mut SchedHandle)
-> TaskBuilder<GreenSpawner<'a>>;
}
impl<S: Spawner> GreenTaskBuilder for TaskBuilder<S> {
fn green<'a>(self, pool: &'a mut SchedPool) -> TaskBuilder<GreenSpawner<'a>> {
self.spawner(GreenSpawner {pool: pool, handle: None})
}
fn green_pinned<'a>(self, pool: &'a mut SchedPool, handle: &'a mut SchedHandle)
-> TaskBuilder<GreenSpawner<'a>> {
self.spawner(GreenSpawner {pool: pool, handle: Some(handle)})
}
}
#[cfg(test)]
mod test {
use std::task::TaskBuilder;
use super::{SchedPool, PoolConfig, GreenTaskBuilder};
#[test]
fn test_green_builder() {
let mut pool = SchedPool::new(PoolConfig::new());
let res = TaskBuilder::new().green(&mut pool).try(proc() {
"Success!".to_string()
});
assert_eq!(res.ok().unwrap(), "Success!".to_string());
pool.shutdown();
}
}
|
{
static mut POOL_ID: AtomicUint = INIT_ATOMIC_UINT;
let PoolConfig {
threads: nscheds,
event_loop_factory: factory
} = config;
assert!(nscheds > 0);
// The pool of schedulers that will be returned from this function
let (p, state) = TaskState::new();
let mut pool = SchedPool {
threads: vec![],
handles: vec![],
stealers: vec![],
id: unsafe { POOL_ID.fetch_add(1, SeqCst) },
sleepers: SleeperList::new(),
stack_pool: StackPool::new(),
deque_pool: deque::BufferPool::new(),
next_friend: 0,
|
identifier_body
|
lib.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The "green scheduling" library
//!
//! This library provides M:N threading for rust programs. Internally this has
//! the implementation of a green scheduler along with context switching and a
//! stack-allocation strategy. This can be optionally linked in to rust
//! programs in order to provide M:N functionality inside of 1:1 programs.
//!
//! # Architecture
//!
//! An M:N scheduling library implies that there are N OS thread upon which M
//! "green threads" are multiplexed. In other words, a set of green threads are
//! all run inside a pool of OS threads.
//!
//! With this design, you can achieve _concurrency_ by spawning many green
//! threads, and you can achieve _parallelism_ by running the green threads
//! simultaneously on multiple OS threads. Each OS thread is a candidate for
//! being scheduled on a different core (the source of parallelism), and then
//! all of the green threads cooperatively schedule amongst one another (the
//! source of concurrency).
//!
//! ## Schedulers
//!
//! In order to coordinate among green threads, each OS thread is primarily
//! running something which we call a Scheduler. Whenever a reference to a
//! Scheduler is made, it is synonymous to referencing one OS thread. Each
//! scheduler is bound to one and exactly one OS thread, and the thread that it
//! is bound to never changes.
//!
//! Each scheduler is connected to a pool of other schedulers (a `SchedPool`)
//! which is the thread pool term from above. A pool of schedulers all share the
//! work that they create. Furthermore, whenever a green thread is created (also
//! synonymously referred to as a green task), it is associated with a
//! `SchedPool` forevermore. A green thread cannot leave its scheduler pool.
//!
//! Schedulers can have at most one green thread running on them at a time. When
//! a scheduler is asleep on its event loop, there are no green tasks running on
//! the OS thread or the scheduler. The term "context switch" is used for when
//! the running green thread is swapped out, but this simply changes the one
//! green thread which is running on the scheduler.
//!
//! ## Green Threads
//!
//! A green thread can largely be summarized by a stack and a register context.
//! Whenever a green thread is spawned, it allocates a stack, and then prepares
//! a register context for execution. The green task may be executed across
//! multiple OS threads, but it will always use the same stack and it will carry
//! its register context across OS threads.
//!
//! Each green thread is cooperatively scheduled with other green threads.
//! Primarily, this means that there is no pre-emption of a green thread. The
//! major consequence of this design is that a green thread stuck in an infinite
//! loop will prevent all other green threads from running on that particular
//! scheduler.
//!
//! Scheduling events for green threads occur on communication and I/O
//! boundaries. For example, if a green task blocks waiting for a message on a
//! channel some other green thread can now run on the scheduler. This also has
//! the consequence that until a green thread performs any form of scheduling
//! event, it will be running on the same OS thread (unconditionally).
//!
//! ## Work Stealing
//!
//! With a pool of schedulers, a new green task has a number of options when
//! deciding where to run initially. The current implementation uses a concept
//! called work stealing in order to spread out work among schedulers.
//!
//! In a work-stealing model, each scheduler maintains a local queue of tasks to
//! run, and this queue is stolen from by other schedulers. Implementation-wise,
//! work stealing has some hairy parts, but from a user-perspective, work
//! stealing simply implies what with M green threads and N schedulers where
//! M > N it is very likely that all schedulers will be busy executing work.
//!
//! # Considerations when using libgreen
//!
//! An M:N runtime has both pros and cons, and there is no one answer as to
//! whether M:N or 1:1 is appropriate to use. As always, there are many
//! advantages and disadvantages between the two. Regardless of the workload,
//! however, there are some aspects of using green thread which you should be
//! aware of:
//!
//! * The largest concern when using libgreen is interoperating with native
//! code. Care should be taken when calling native code that will block the OS
//! thread as it will prevent further green tasks from being scheduled on the
//! OS thread.
//!
//! * Native code using thread-local-storage should be approached
//! with care. Green threads may migrate among OS threads at any time, so
//! native libraries using thread-local state may not always work.
//!
//! * Native synchronization primitives (e.g. pthread mutexes) will also not
//! work for green threads. The reason for this is because native primitives
//! often operate on a _os thread_ granularity whereas green threads are
//! operating on a more granular unit of work.
//!
//! * A green threading runtime is not fork-safe. If the process forks(), it
//! cannot expect to make reasonable progress by continuing to use green
//! threads.
//!
//! Note that these concerns do not mean that operating with native code is a
//! lost cause. These are simply just concerns which should be considered when
//! invoking native code.
//!
//! # Starting with libgreen
//!
//! ```rust
//! extern crate green;
//!
//! #[start]
//! fn start(argc: int, argv: *const *const u8) -> int {
//! green::start(argc, argv, green::basic::event_loop, main)
//! }
//!
//! fn main() {
//! // this code is running in a pool of schedulers
//! }
//! ```
//!
//! > **Note**: This `main` function in this example does *not* have I/O
//! > support. The basic event loop does not provide any support
//!
//! # Starting with I/O support in libgreen
//!
//! ```rust
//! extern crate green;
//! extern crate rustuv;
//!
//! #[start]
//! fn start(argc: int, argv: *const *const u8) -> int {
//! green::start(argc, argv, rustuv::event_loop, main)
//! }
//!
//! fn main() {
//! // this code is running in a pool of schedulers all powered by libuv
//! }
//! ```
//!
//! The above code can also be shortened with a macro from libgreen.
//!
//! ```
//! #![feature(phase)]
//! #[phase(plugin)] extern crate green;
//!
//! green_start!(main)
//!
//! fn main() {
//! // run inside of a green pool
//! }
//! ```
//!
//! # Using a scheduler pool
//!
//! This library adds a `GreenTaskBuilder` trait that extends the methods
//! available on `std::task::TaskBuilder` to allow spawning a green task,
//! possibly pinned to a particular scheduler thread:
//!
//! ```rust
//! use std::task::TaskBuilder;
//! use green::{SchedPool, PoolConfig, GreenTaskBuilder};
//!
//! let config = PoolConfig::new();
//! let mut pool = SchedPool::new(config);
//!
//! // Spawn tasks into the pool of schedulers
//! TaskBuilder::new().green(&mut pool).spawn(proc() {
//! // this code is running inside the pool of schedulers
//!
//! spawn(proc() {
//! // this code is also running inside the same scheduler pool
//! });
//! });
//!
//! // Dynamically add a new scheduler to the scheduler pool. This adds another
//! // OS thread that green threads can be multiplexed on to.
//! let mut handle = pool.spawn_sched();
//!
//! // Pin a task to the spawned scheduler
//! TaskBuilder::new().green_pinned(&mut pool, &mut handle).spawn(proc() {
//! /*... */
//! });
//!
//! // Handles keep schedulers alive, so be sure to drop all handles before
//! // destroying the sched pool
//! drop(handle);
//!
//! // Required to shut down this scheduler pool.
//! // The task will fail if `shutdown` is not called.
//! pool.shutdown();
//! ```
#![crate_id = "green#0.11.0-pre"]
#![experimental]
#![license = "MIT/ASL2"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/",
html_playground_url = "http://play.rust-lang.org/")]
// NB this does *not* include globs, please keep it that way.
#![feature(macro_rules, phase)]
#![allow(visible_private_types)]
#![allow(deprecated)]
#![feature(default_type_params)]
#[cfg(test)] #[phase(plugin, link)] extern crate log;
#[cfg(test)] extern crate rustuv;
extern crate libc;
extern crate alloc;
use alloc::arc::Arc;
use std::mem::replace;
use std::os;
use std::rt::rtio;
use std::rt::thread::Thread;
use std::rt::task::TaskOpts;
use std::rt;
use std::sync::atomics::{SeqCst, AtomicUint, INIT_ATOMIC_UINT};
use std::sync::deque;
use std::task::{TaskBuilder, Spawner};
use sched::{Shutdown, Scheduler, SchedHandle, TaskFromFriend, PinnedTask, NewNeighbor};
use sleeper_list::SleeperList;
use stack::StackPool;
use task::GreenTask;
mod macros;
mod simple;
mod message_queue;
pub mod basic;
pub mod context;
pub mod coroutine;
pub mod sched;
pub mod sleeper_list;
pub mod stack;
pub mod task;
/// A helper macro for booting a program with libgreen
///
/// # Example
///
/// ```
/// #![feature(phase)]
/// #[phase(plugin)] extern crate green;
///
/// green_start!(main)
///
/// fn main() {
/// // running with libgreen
/// }
/// ```
#[macro_export]
macro_rules! green_start( ($f:ident) => (
mod __start {
extern crate green;
extern crate rustuv;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, super::$f)
}
}
) )
/// Set up a default runtime configuration, given compiler-supplied arguments.
///
/// This function will block until the entire pool of M:N schedulers have
/// exited. This function also requires a local task to be available.
///
/// # Arguments
///
/// * `argc` & `argv` - The argument vector. On Unix this information is used
|
/// down. The entire pool (and this function) will only return once
/// all child tasks have finished executing.
///
/// # Return value
///
/// The return value is used as the process return code. 0 on success, 101 on
/// error.
pub fn start(argc: int, argv: *const *const u8,
event_loop_factory: fn() -> Box<rtio::EventLoop + Send>,
main: proc():Send) -> int {
rt::init(argc, argv);
let mut main = Some(main);
let mut ret = None;
simple::task().run(|| {
ret = Some(run(event_loop_factory, main.take_unwrap()));
}).destroy();
// unsafe is ok b/c we're sure that the runtime is gone
unsafe { rt::cleanup() }
ret.unwrap()
}
/// Execute the main function in a pool of M:N schedulers.
///
/// Configures the runtime according to the environment, by default using a task
/// scheduler with the same number of threads as cores. Returns a process exit
/// code.
///
/// This function will not return until all schedulers in the associated pool
/// have returned.
pub fn run(event_loop_factory: fn() -> Box<rtio::EventLoop + Send>,
main: proc():Send) -> int {
// Create a scheduler pool and spawn the main task into this pool. We will
// get notified over a channel when the main task exits.
let mut cfg = PoolConfig::new();
cfg.event_loop_factory = event_loop_factory;
let mut pool = SchedPool::new(cfg);
let (tx, rx) = channel();
let mut opts = TaskOpts::new();
opts.on_exit = Some(proc(r) tx.send(r));
opts.name = Some("<main>".into_maybe_owned());
pool.spawn(opts, main);
// Wait for the main task to return, and set the process error code
// appropriately.
if rx.recv().is_err() {
os::set_exit_status(rt::DEFAULT_ERROR_CODE);
}
// Now that we're sure all tasks are dead, shut down the pool of schedulers,
// waiting for them all to return.
pool.shutdown();
os::get_exit_status()
}
/// Configuration of how an M:N pool of schedulers is spawned.
pub struct PoolConfig {
/// The number of schedulers (OS threads) to spawn into this M:N pool.
pub threads: uint,
/// A factory function used to create new event loops. If this is not
/// specified then the default event loop factory is used.
pub event_loop_factory: fn() -> Box<rtio::EventLoop + Send>,
}
impl PoolConfig {
/// Returns the default configuration, as determined the environment
/// variables of this process.
pub fn new() -> PoolConfig {
PoolConfig {
threads: rt::default_sched_threads(),
event_loop_factory: basic::event_loop,
}
}
}
/// A structure representing a handle to a pool of schedulers. This handle is
/// used to keep the pool alive and also reap the status from the pool.
pub struct SchedPool {
id: uint,
threads: Vec<Thread<()>>,
handles: Vec<SchedHandle>,
stealers: Vec<deque::Stealer<Box<task::GreenTask>>>,
next_friend: uint,
stack_pool: StackPool,
deque_pool: deque::BufferPool<Box<task::GreenTask>>,
sleepers: SleeperList,
factory: fn() -> Box<rtio::EventLoop + Send>,
task_state: TaskState,
tasks_done: Receiver<()>,
}
/// This is an internal state shared among a pool of schedulers. This is used to
/// keep track of how many tasks are currently running in the pool and then
/// sending on a channel once the entire pool has been drained of all tasks.
#[deriving(Clone)]
struct TaskState {
cnt: Arc<AtomicUint>,
done: Sender<()>,
}
impl SchedPool {
/// Execute the main function in a pool of M:N schedulers.
///
/// This will configure the pool according to the `config` parameter, and
/// initially run `main` inside the pool of schedulers.
pub fn new(config: PoolConfig) -> SchedPool {
static mut POOL_ID: AtomicUint = INIT_ATOMIC_UINT;
let PoolConfig {
threads: nscheds,
event_loop_factory: factory
} = config;
assert!(nscheds > 0);
// The pool of schedulers that will be returned from this function
let (p, state) = TaskState::new();
let mut pool = SchedPool {
threads: vec![],
handles: vec![],
stealers: vec![],
id: unsafe { POOL_ID.fetch_add(1, SeqCst) },
sleepers: SleeperList::new(),
stack_pool: StackPool::new(),
deque_pool: deque::BufferPool::new(),
next_friend: 0,
factory: factory,
task_state: state,
tasks_done: p,
};
// Create a work queue for each scheduler, ntimes. Create an extra
// for the main thread if that flag is set. We won't steal from it.
let mut workers = Vec::with_capacity(nscheds);
let mut stealers = Vec::with_capacity(nscheds);
for _ in range(0, nscheds) {
let (w, s) = pool.deque_pool.deque();
workers.push(w);
stealers.push(s);
}
pool.stealers = stealers;
// Now that we've got all our work queues, create one scheduler per
// queue, spawn the scheduler into a thread, and be sure to keep a
// handle to the scheduler and the thread to keep them alive.
for worker in workers.move_iter() {
rtdebug!("inserting a regular scheduler");
let mut sched = box Scheduler::new(pool.id,
(pool.factory)(),
worker,
pool.stealers.clone(),
pool.sleepers.clone(),
pool.task_state.clone());
pool.handles.push(sched.make_handle());
pool.threads.push(Thread::start(proc() { sched.bootstrap(); }));
}
return pool;
}
/// Creates a new task configured to run inside of this pool of schedulers.
/// This is useful to create a task which can then be sent to a specific
/// scheduler created by `spawn_sched` (and possibly pin it to that
/// scheduler).
#[deprecated = "use the green and green_pinned methods of GreenTaskBuilder instead"]
pub fn task(&mut self, opts: TaskOpts, f: proc():Send) -> Box<GreenTask> {
GreenTask::configure(&mut self.stack_pool, opts, f)
}
/// Spawns a new task into this pool of schedulers, using the specified
/// options to configure the new task which is spawned.
///
/// New tasks are spawned in a round-robin fashion to the schedulers in this
/// pool, but tasks can certainly migrate among schedulers once they're in
/// the pool.
#[deprecated = "use the green and green_pinned methods of GreenTaskBuilder instead"]
pub fn spawn(&mut self, opts: TaskOpts, f: proc():Send) {
let task = self.task(opts, f);
// Figure out someone to send this task to
let idx = self.next_friend;
self.next_friend += 1;
if self.next_friend >= self.handles.len() {
self.next_friend = 0;
}
// Jettison the task away!
self.handles.get_mut(idx).send(TaskFromFriend(task));
}
/// Spawns a new scheduler into this M:N pool. A handle is returned to the
/// scheduler for use. The scheduler will not exit as long as this handle is
/// active.
///
/// The scheduler spawned will participate in work stealing with all of the
/// other schedulers currently in the scheduler pool.
pub fn spawn_sched(&mut self) -> SchedHandle {
let (worker, stealer) = self.deque_pool.deque();
self.stealers.push(stealer.clone());
// Tell all existing schedulers about this new scheduler so they can all
// steal work from it
for handle in self.handles.mut_iter() {
handle.send(NewNeighbor(stealer.clone()));
}
// Create the new scheduler, using the same sleeper list as all the
// other schedulers as well as having a stealer handle to all other
// schedulers.
let mut sched = box Scheduler::new(self.id,
(self.factory)(),
worker,
self.stealers.clone(),
self.sleepers.clone(),
self.task_state.clone());
let ret = sched.make_handle();
self.handles.push(sched.make_handle());
self.threads.push(Thread::start(proc() { sched.bootstrap() }));
return ret;
}
/// Consumes the pool of schedulers, waiting for all tasks to exit and all
/// schedulers to shut down.
///
/// This function is required to be called in order to drop a pool of
/// schedulers, it is considered an error to drop a pool without calling
/// this method.
///
/// This only waits for all tasks in *this pool* of schedulers to exit, any
/// native tasks or extern pools will not be waited on
pub fn shutdown(mut self) {
self.stealers = vec![];
// Wait for everyone to exit. We may have reached a 0-task count
// multiple times in the past, meaning there could be several buffered
// messages on the `tasks_done` port. We're guaranteed that after *some*
// message the current task count will be 0, so we just receive in a
// loop until everything is totally dead.
while self.task_state.active() {
self.tasks_done.recv();
}
// Now that everyone's gone, tell everything to shut down.
for mut handle in replace(&mut self.handles, vec![]).move_iter() {
handle.send(Shutdown);
}
for thread in replace(&mut self.threads, vec![]).move_iter() {
thread.join();
}
}
}
impl TaskState {
fn new() -> (Receiver<()>, TaskState) {
let (tx, rx) = channel();
(rx, TaskState {
cnt: Arc::new(AtomicUint::new(0)),
done: tx,
})
}
fn increment(&mut self) {
self.cnt.fetch_add(1, SeqCst);
}
fn active(&self) -> bool {
self.cnt.load(SeqCst)!= 0
}
fn decrement(&mut self) {
let prev = self.cnt.fetch_sub(1, SeqCst);
if prev == 1 {
self.done.send(());
}
}
}
impl Drop for SchedPool {
fn drop(&mut self) {
if self.threads.len() > 0 {
fail!("dropping a M:N scheduler pool that wasn't shut down");
}
}
}
/// A spawner for green tasks
pub struct GreenSpawner<'a>{
pool: &'a mut SchedPool,
handle: Option<&'a mut SchedHandle>
}
impl<'a> Spawner for GreenSpawner<'a> {
#[inline]
fn spawn(self, opts: TaskOpts, f: proc():Send) {
let GreenSpawner { pool, handle } = self;
match handle {
None => pool.spawn(opts, f),
Some(h) => h.send(PinnedTask(pool.task(opts, f)))
}
}
}
/// An extension trait adding `green` configuration methods to `TaskBuilder`.
pub trait GreenTaskBuilder {
fn green<'a>(self, &'a mut SchedPool) -> TaskBuilder<GreenSpawner<'a>>;
fn green_pinned<'a>(self, &'a mut SchedPool, &'a mut SchedHandle)
-> TaskBuilder<GreenSpawner<'a>>;
}
impl<S: Spawner> GreenTaskBuilder for TaskBuilder<S> {
fn green<'a>(self, pool: &'a mut SchedPool) -> TaskBuilder<GreenSpawner<'a>> {
self.spawner(GreenSpawner {pool: pool, handle: None})
}
fn green_pinned<'a>(self, pool: &'a mut SchedPool, handle: &'a mut SchedHandle)
-> TaskBuilder<GreenSpawner<'a>> {
self.spawner(GreenSpawner {pool: pool, handle: Some(handle)})
}
}
#[cfg(test)]
mod test {
use std::task::TaskBuilder;
use super::{SchedPool, PoolConfig, GreenTaskBuilder};
#[test]
fn test_green_builder() {
let mut pool = SchedPool::new(PoolConfig::new());
let res = TaskBuilder::new().green(&mut pool).try(proc() {
"Success!".to_string()
});
assert_eq!(res.ok().unwrap(), "Success!".to_string());
pool.shutdown();
}
}
|
/// by os::args.
/// * `main` - The initial procedure to run inside of the M:N scheduling pool.
/// Once this procedure exits, the scheduling pool will begin to shut
|
random_line_split
|
lib.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The "green scheduling" library
//!
//! This library provides M:N threading for rust programs. Internally this has
//! the implementation of a green scheduler along with context switching and a
//! stack-allocation strategy. This can be optionally linked in to rust
//! programs in order to provide M:N functionality inside of 1:1 programs.
//!
//! # Architecture
//!
//! An M:N scheduling library implies that there are N OS thread upon which M
//! "green threads" are multiplexed. In other words, a set of green threads are
//! all run inside a pool of OS threads.
//!
//! With this design, you can achieve _concurrency_ by spawning many green
//! threads, and you can achieve _parallelism_ by running the green threads
//! simultaneously on multiple OS threads. Each OS thread is a candidate for
//! being scheduled on a different core (the source of parallelism), and then
//! all of the green threads cooperatively schedule amongst one another (the
//! source of concurrency).
//!
//! ## Schedulers
//!
//! In order to coordinate among green threads, each OS thread is primarily
//! running something which we call a Scheduler. Whenever a reference to a
//! Scheduler is made, it is synonymous to referencing one OS thread. Each
//! scheduler is bound to one and exactly one OS thread, and the thread that it
//! is bound to never changes.
//!
//! Each scheduler is connected to a pool of other schedulers (a `SchedPool`)
//! which is the thread pool term from above. A pool of schedulers all share the
//! work that they create. Furthermore, whenever a green thread is created (also
//! synonymously referred to as a green task), it is associated with a
//! `SchedPool` forevermore. A green thread cannot leave its scheduler pool.
//!
//! Schedulers can have at most one green thread running on them at a time. When
//! a scheduler is asleep on its event loop, there are no green tasks running on
//! the OS thread or the scheduler. The term "context switch" is used for when
//! the running green thread is swapped out, but this simply changes the one
//! green thread which is running on the scheduler.
//!
//! ## Green Threads
//!
//! A green thread can largely be summarized by a stack and a register context.
//! Whenever a green thread is spawned, it allocates a stack, and then prepares
//! a register context for execution. The green task may be executed across
//! multiple OS threads, but it will always use the same stack and it will carry
//! its register context across OS threads.
//!
//! Each green thread is cooperatively scheduled with other green threads.
//! Primarily, this means that there is no pre-emption of a green thread. The
//! major consequence of this design is that a green thread stuck in an infinite
//! loop will prevent all other green threads from running on that particular
//! scheduler.
//!
//! Scheduling events for green threads occur on communication and I/O
//! boundaries. For example, if a green task blocks waiting for a message on a
//! channel some other green thread can now run on the scheduler. This also has
//! the consequence that until a green thread performs any form of scheduling
//! event, it will be running on the same OS thread (unconditionally).
//!
//! ## Work Stealing
//!
//! With a pool of schedulers, a new green task has a number of options when
//! deciding where to run initially. The current implementation uses a concept
//! called work stealing in order to spread out work among schedulers.
//!
//! In a work-stealing model, each scheduler maintains a local queue of tasks to
//! run, and this queue is stolen from by other schedulers. Implementation-wise,
//! work stealing has some hairy parts, but from a user-perspective, work
//! stealing simply implies what with M green threads and N schedulers where
//! M > N it is very likely that all schedulers will be busy executing work.
//!
//! # Considerations when using libgreen
//!
//! An M:N runtime has both pros and cons, and there is no one answer as to
//! whether M:N or 1:1 is appropriate to use. As always, there are many
//! advantages and disadvantages between the two. Regardless of the workload,
//! however, there are some aspects of using green thread which you should be
//! aware of:
//!
//! * The largest concern when using libgreen is interoperating with native
//! code. Care should be taken when calling native code that will block the OS
//! thread as it will prevent further green tasks from being scheduled on the
//! OS thread.
//!
//! * Native code using thread-local-storage should be approached
//! with care. Green threads may migrate among OS threads at any time, so
//! native libraries using thread-local state may not always work.
//!
//! * Native synchronization primitives (e.g. pthread mutexes) will also not
//! work for green threads. The reason for this is because native primitives
//! often operate on a _os thread_ granularity whereas green threads are
//! operating on a more granular unit of work.
//!
//! * A green threading runtime is not fork-safe. If the process forks(), it
//! cannot expect to make reasonable progress by continuing to use green
//! threads.
//!
//! Note that these concerns do not mean that operating with native code is a
//! lost cause. These are simply just concerns which should be considered when
//! invoking native code.
//!
//! # Starting with libgreen
//!
//! ```rust
//! extern crate green;
//!
//! #[start]
//! fn start(argc: int, argv: *const *const u8) -> int {
//! green::start(argc, argv, green::basic::event_loop, main)
//! }
//!
//! fn main() {
//! // this code is running in a pool of schedulers
//! }
//! ```
//!
//! > **Note**: This `main` function in this example does *not* have I/O
//! > support. The basic event loop does not provide any support
//!
//! # Starting with I/O support in libgreen
//!
//! ```rust
//! extern crate green;
//! extern crate rustuv;
//!
//! #[start]
//! fn start(argc: int, argv: *const *const u8) -> int {
//! green::start(argc, argv, rustuv::event_loop, main)
//! }
//!
//! fn main() {
//! // this code is running in a pool of schedulers all powered by libuv
//! }
//! ```
//!
//! The above code can also be shortened with a macro from libgreen.
//!
//! ```
//! #![feature(phase)]
//! #[phase(plugin)] extern crate green;
//!
//! green_start!(main)
//!
//! fn main() {
//! // run inside of a green pool
//! }
//! ```
//!
//! # Using a scheduler pool
//!
//! This library adds a `GreenTaskBuilder` trait that extends the methods
//! available on `std::task::TaskBuilder` to allow spawning a green task,
//! possibly pinned to a particular scheduler thread:
//!
//! ```rust
//! use std::task::TaskBuilder;
//! use green::{SchedPool, PoolConfig, GreenTaskBuilder};
//!
//! let config = PoolConfig::new();
//! let mut pool = SchedPool::new(config);
//!
//! // Spawn tasks into the pool of schedulers
//! TaskBuilder::new().green(&mut pool).spawn(proc() {
//! // this code is running inside the pool of schedulers
//!
//! spawn(proc() {
//! // this code is also running inside the same scheduler pool
//! });
//! });
//!
//! // Dynamically add a new scheduler to the scheduler pool. This adds another
//! // OS thread that green threads can be multiplexed on to.
//! let mut handle = pool.spawn_sched();
//!
//! // Pin a task to the spawned scheduler
//! TaskBuilder::new().green_pinned(&mut pool, &mut handle).spawn(proc() {
//! /*... */
//! });
//!
//! // Handles keep schedulers alive, so be sure to drop all handles before
//! // destroying the sched pool
//! drop(handle);
//!
//! // Required to shut down this scheduler pool.
//! // The task will fail if `shutdown` is not called.
//! pool.shutdown();
//! ```
#![crate_id = "green#0.11.0-pre"]
#![experimental]
#![license = "MIT/ASL2"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/",
html_playground_url = "http://play.rust-lang.org/")]
// NB this does *not* include globs, please keep it that way.
#![feature(macro_rules, phase)]
#![allow(visible_private_types)]
#![allow(deprecated)]
#![feature(default_type_params)]
#[cfg(test)] #[phase(plugin, link)] extern crate log;
#[cfg(test)] extern crate rustuv;
extern crate libc;
extern crate alloc;
use alloc::arc::Arc;
use std::mem::replace;
use std::os;
use std::rt::rtio;
use std::rt::thread::Thread;
use std::rt::task::TaskOpts;
use std::rt;
use std::sync::atomics::{SeqCst, AtomicUint, INIT_ATOMIC_UINT};
use std::sync::deque;
use std::task::{TaskBuilder, Spawner};
use sched::{Shutdown, Scheduler, SchedHandle, TaskFromFriend, PinnedTask, NewNeighbor};
use sleeper_list::SleeperList;
use stack::StackPool;
use task::GreenTask;
mod macros;
mod simple;
mod message_queue;
pub mod basic;
pub mod context;
pub mod coroutine;
pub mod sched;
pub mod sleeper_list;
pub mod stack;
pub mod task;
/// A helper macro for booting a program with libgreen
///
/// # Example
///
/// ```
/// #![feature(phase)]
/// #[phase(plugin)] extern crate green;
///
/// green_start!(main)
///
/// fn main() {
/// // running with libgreen
/// }
/// ```
#[macro_export]
macro_rules! green_start( ($f:ident) => (
mod __start {
extern crate green;
extern crate rustuv;
#[start]
fn start(argc: int, argv: *const *const u8) -> int {
green::start(argc, argv, rustuv::event_loop, super::$f)
}
}
) )
/// Set up a default runtime configuration, given compiler-supplied arguments.
///
/// This function will block until the entire pool of M:N schedulers have
/// exited. This function also requires a local task to be available.
///
/// # Arguments
///
/// * `argc` & `argv` - The argument vector. On Unix this information is used
/// by os::args.
/// * `main` - The initial procedure to run inside of the M:N scheduling pool.
/// Once this procedure exits, the scheduling pool will begin to shut
/// down. The entire pool (and this function) will only return once
/// all child tasks have finished executing.
///
/// # Return value
///
/// The return value is used as the process return code. 0 on success, 101 on
/// error.
pub fn start(argc: int, argv: *const *const u8,
event_loop_factory: fn() -> Box<rtio::EventLoop + Send>,
main: proc():Send) -> int {
rt::init(argc, argv);
let mut main = Some(main);
let mut ret = None;
simple::task().run(|| {
ret = Some(run(event_loop_factory, main.take_unwrap()));
}).destroy();
// unsafe is ok b/c we're sure that the runtime is gone
unsafe { rt::cleanup() }
ret.unwrap()
}
/// Execute the main function in a pool of M:N schedulers.
///
/// Configures the runtime according to the environment, by default using a task
/// scheduler with the same number of threads as cores. Returns a process exit
/// code.
///
/// This function will not return until all schedulers in the associated pool
/// have returned.
pub fn run(event_loop_factory: fn() -> Box<rtio::EventLoop + Send>,
main: proc():Send) -> int {
// Create a scheduler pool and spawn the main task into this pool. We will
// get notified over a channel when the main task exits.
let mut cfg = PoolConfig::new();
cfg.event_loop_factory = event_loop_factory;
let mut pool = SchedPool::new(cfg);
let (tx, rx) = channel();
let mut opts = TaskOpts::new();
opts.on_exit = Some(proc(r) tx.send(r));
opts.name = Some("<main>".into_maybe_owned());
pool.spawn(opts, main);
// Wait for the main task to return, and set the process error code
// appropriately.
if rx.recv().is_err() {
os::set_exit_status(rt::DEFAULT_ERROR_CODE);
}
// Now that we're sure all tasks are dead, shut down the pool of schedulers,
// waiting for them all to return.
pool.shutdown();
os::get_exit_status()
}
/// Configuration of how an M:N pool of schedulers is spawned.
pub struct PoolConfig {
/// The number of schedulers (OS threads) to spawn into this M:N pool.
pub threads: uint,
/// A factory function used to create new event loops. If this is not
/// specified then the default event loop factory is used.
pub event_loop_factory: fn() -> Box<rtio::EventLoop + Send>,
}
impl PoolConfig {
/// Returns the default configuration, as determined the environment
/// variables of this process.
pub fn new() -> PoolConfig {
PoolConfig {
threads: rt::default_sched_threads(),
event_loop_factory: basic::event_loop,
}
}
}
/// A structure representing a handle to a pool of schedulers. This handle is
/// used to keep the pool alive and also reap the status from the pool.
pub struct SchedPool {
id: uint,
threads: Vec<Thread<()>>,
handles: Vec<SchedHandle>,
stealers: Vec<deque::Stealer<Box<task::GreenTask>>>,
next_friend: uint,
stack_pool: StackPool,
deque_pool: deque::BufferPool<Box<task::GreenTask>>,
sleepers: SleeperList,
factory: fn() -> Box<rtio::EventLoop + Send>,
task_state: TaskState,
tasks_done: Receiver<()>,
}
/// This is an internal state shared among a pool of schedulers. This is used to
/// keep track of how many tasks are currently running in the pool and then
/// sending on a channel once the entire pool has been drained of all tasks.
#[deriving(Clone)]
struct
|
{
cnt: Arc<AtomicUint>,
done: Sender<()>,
}
impl SchedPool {
/// Execute the main function in a pool of M:N schedulers.
///
/// This will configure the pool according to the `config` parameter, and
/// initially run `main` inside the pool of schedulers.
pub fn new(config: PoolConfig) -> SchedPool {
static mut POOL_ID: AtomicUint = INIT_ATOMIC_UINT;
let PoolConfig {
threads: nscheds,
event_loop_factory: factory
} = config;
assert!(nscheds > 0);
// The pool of schedulers that will be returned from this function
let (p, state) = TaskState::new();
let mut pool = SchedPool {
threads: vec![],
handles: vec![],
stealers: vec![],
id: unsafe { POOL_ID.fetch_add(1, SeqCst) },
sleepers: SleeperList::new(),
stack_pool: StackPool::new(),
deque_pool: deque::BufferPool::new(),
next_friend: 0,
factory: factory,
task_state: state,
tasks_done: p,
};
// Create a work queue for each scheduler, ntimes. Create an extra
// for the main thread if that flag is set. We won't steal from it.
let mut workers = Vec::with_capacity(nscheds);
let mut stealers = Vec::with_capacity(nscheds);
for _ in range(0, nscheds) {
let (w, s) = pool.deque_pool.deque();
workers.push(w);
stealers.push(s);
}
pool.stealers = stealers;
// Now that we've got all our work queues, create one scheduler per
// queue, spawn the scheduler into a thread, and be sure to keep a
// handle to the scheduler and the thread to keep them alive.
for worker in workers.move_iter() {
rtdebug!("inserting a regular scheduler");
let mut sched = box Scheduler::new(pool.id,
(pool.factory)(),
worker,
pool.stealers.clone(),
pool.sleepers.clone(),
pool.task_state.clone());
pool.handles.push(sched.make_handle());
pool.threads.push(Thread::start(proc() { sched.bootstrap(); }));
}
return pool;
}
/// Creates a new task configured to run inside of this pool of schedulers.
/// This is useful to create a task which can then be sent to a specific
/// scheduler created by `spawn_sched` (and possibly pin it to that
/// scheduler).
#[deprecated = "use the green and green_pinned methods of GreenTaskBuilder instead"]
pub fn task(&mut self, opts: TaskOpts, f: proc():Send) -> Box<GreenTask> {
GreenTask::configure(&mut self.stack_pool, opts, f)
}
/// Spawns a new task into this pool of schedulers, using the specified
/// options to configure the new task which is spawned.
///
/// New tasks are spawned in a round-robin fashion to the schedulers in this
/// pool, but tasks can certainly migrate among schedulers once they're in
/// the pool.
#[deprecated = "use the green and green_pinned methods of GreenTaskBuilder instead"]
pub fn spawn(&mut self, opts: TaskOpts, f: proc():Send) {
let task = self.task(opts, f);
// Figure out someone to send this task to
let idx = self.next_friend;
self.next_friend += 1;
if self.next_friend >= self.handles.len() {
self.next_friend = 0;
}
// Jettison the task away!
self.handles.get_mut(idx).send(TaskFromFriend(task));
}
/// Spawns a new scheduler into this M:N pool. A handle is returned to the
/// scheduler for use. The scheduler will not exit as long as this handle is
/// active.
///
/// The scheduler spawned will participate in work stealing with all of the
/// other schedulers currently in the scheduler pool.
pub fn spawn_sched(&mut self) -> SchedHandle {
let (worker, stealer) = self.deque_pool.deque();
self.stealers.push(stealer.clone());
// Tell all existing schedulers about this new scheduler so they can all
// steal work from it
for handle in self.handles.mut_iter() {
handle.send(NewNeighbor(stealer.clone()));
}
// Create the new scheduler, using the same sleeper list as all the
// other schedulers as well as having a stealer handle to all other
// schedulers.
let mut sched = box Scheduler::new(self.id,
(self.factory)(),
worker,
self.stealers.clone(),
self.sleepers.clone(),
self.task_state.clone());
let ret = sched.make_handle();
self.handles.push(sched.make_handle());
self.threads.push(Thread::start(proc() { sched.bootstrap() }));
return ret;
}
/// Consumes the pool of schedulers, waiting for all tasks to exit and all
/// schedulers to shut down.
///
/// This function is required to be called in order to drop a pool of
/// schedulers, it is considered an error to drop a pool without calling
/// this method.
///
/// This only waits for all tasks in *this pool* of schedulers to exit, any
/// native tasks or extern pools will not be waited on
pub fn shutdown(mut self) {
self.stealers = vec![];
// Wait for everyone to exit. We may have reached a 0-task count
// multiple times in the past, meaning there could be several buffered
// messages on the `tasks_done` port. We're guaranteed that after *some*
// message the current task count will be 0, so we just receive in a
// loop until everything is totally dead.
while self.task_state.active() {
self.tasks_done.recv();
}
// Now that everyone's gone, tell everything to shut down.
for mut handle in replace(&mut self.handles, vec![]).move_iter() {
handle.send(Shutdown);
}
for thread in replace(&mut self.threads, vec![]).move_iter() {
thread.join();
}
}
}
impl TaskState {
fn new() -> (Receiver<()>, TaskState) {
let (tx, rx) = channel();
(rx, TaskState {
cnt: Arc::new(AtomicUint::new(0)),
done: tx,
})
}
fn increment(&mut self) {
self.cnt.fetch_add(1, SeqCst);
}
fn active(&self) -> bool {
self.cnt.load(SeqCst)!= 0
}
fn decrement(&mut self) {
let prev = self.cnt.fetch_sub(1, SeqCst);
if prev == 1 {
self.done.send(());
}
}
}
impl Drop for SchedPool {
fn drop(&mut self) {
if self.threads.len() > 0 {
fail!("dropping a M:N scheduler pool that wasn't shut down");
}
}
}
/// A spawner for green tasks
pub struct GreenSpawner<'a>{
pool: &'a mut SchedPool,
handle: Option<&'a mut SchedHandle>
}
impl<'a> Spawner for GreenSpawner<'a> {
#[inline]
fn spawn(self, opts: TaskOpts, f: proc():Send) {
let GreenSpawner { pool, handle } = self;
match handle {
None => pool.spawn(opts, f),
Some(h) => h.send(PinnedTask(pool.task(opts, f)))
}
}
}
/// An extension trait adding `green` configuration methods to `TaskBuilder`.
pub trait GreenTaskBuilder {
fn green<'a>(self, &'a mut SchedPool) -> TaskBuilder<GreenSpawner<'a>>;
fn green_pinned<'a>(self, &'a mut SchedPool, &'a mut SchedHandle)
-> TaskBuilder<GreenSpawner<'a>>;
}
impl<S: Spawner> GreenTaskBuilder for TaskBuilder<S> {
fn green<'a>(self, pool: &'a mut SchedPool) -> TaskBuilder<GreenSpawner<'a>> {
self.spawner(GreenSpawner {pool: pool, handle: None})
}
fn green_pinned<'a>(self, pool: &'a mut SchedPool, handle: &'a mut SchedHandle)
-> TaskBuilder<GreenSpawner<'a>> {
self.spawner(GreenSpawner {pool: pool, handle: Some(handle)})
}
}
#[cfg(test)]
mod test {
use std::task::TaskBuilder;
use super::{SchedPool, PoolConfig, GreenTaskBuilder};
#[test]
fn test_green_builder() {
let mut pool = SchedPool::new(PoolConfig::new());
let res = TaskBuilder::new().green(&mut pool).try(proc() {
"Success!".to_string()
});
assert_eq!(res.ok().unwrap(), "Success!".to_string());
pool.shutdown();
}
}
|
TaskState
|
identifier_name
|
reporter.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::{Parser, SourcePosition};
use log;
use style_traits::ParseErrorReporter;
#[derive(JSTraceable, HeapSizeOf)]
pub struct CSSErrorReporter;
impl ParseErrorReporter for CSSErrorReporter {
fn report_error(&self, input: &mut Parser, position: SourcePosition, message: &str) {
if log_enabled!(log::LogLevel::Info) {
|
let location = input.source_location(position);
// TODO eventually this will got into a "web console" or something.
info!("{}:{} {}", location.line, location.column, message)
}
}
fn clone(&self) -> Box<ParseErrorReporter + Send + Sync> {
let error_reporter = box CSSErrorReporter;
return error_reporter;
}
}
|
random_line_split
|
|
reporter.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::{Parser, SourcePosition};
use log;
use style_traits::ParseErrorReporter;
#[derive(JSTraceable, HeapSizeOf)]
pub struct CSSErrorReporter;
impl ParseErrorReporter for CSSErrorReporter {
fn report_error(&self, input: &mut Parser, position: SourcePosition, message: &str) {
if log_enabled!(log::LogLevel::Info)
|
}
fn clone(&self) -> Box<ParseErrorReporter + Send + Sync> {
let error_reporter = box CSSErrorReporter;
return error_reporter;
}
}
|
{
let location = input.source_location(position);
// TODO eventually this will got into a "web console" or something.
info!("{}:{} {}", location.line, location.column, message)
}
|
conditional_block
|
reporter.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::{Parser, SourcePosition};
use log;
use style_traits::ParseErrorReporter;
#[derive(JSTraceable, HeapSizeOf)]
pub struct CSSErrorReporter;
impl ParseErrorReporter for CSSErrorReporter {
fn report_error(&self, input: &mut Parser, position: SourcePosition, message: &str) {
if log_enabled!(log::LogLevel::Info) {
let location = input.source_location(position);
// TODO eventually this will got into a "web console" or something.
info!("{}:{} {}", location.line, location.column, message)
}
}
fn
|
(&self) -> Box<ParseErrorReporter + Send + Sync> {
let error_reporter = box CSSErrorReporter;
return error_reporter;
}
}
|
clone
|
identifier_name
|
optimizer.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Transforms a display list to produce a visually-equivalent, but cheaper-to-paint, one.
use display_list::{DisplayItem, DisplayList, StackingContext};
use std::collections::linked_list::LinkedList;
use geom::rect::Rect;
use util::geometry::{self, Au};
use std::sync::Arc;
/// Transforms a display list to produce a visually-equivalent, but cheaper-to-paint, one.
pub struct DisplayListOptimizer {
/// The visible rect in page coordinates.
visible_rect: Rect<Au>,
}
impl DisplayListOptimizer {
/// Creates a new display list optimizer object. `visible_rect` specifies the visible rect in
/// page coordinates.
pub fn
|
(visible_rect: &Rect<f32>) -> DisplayListOptimizer {
DisplayListOptimizer {
visible_rect: geometry::f32_rect_to_au_rect(*visible_rect),
}
}
/// Optimizes the given display list, returning an equivalent, but cheaper-to-paint, one.
pub fn optimize(self, display_list: &DisplayList) -> DisplayList {
let mut result = DisplayList::new();
self.add_in_bounds_display_items(&mut result.background_and_borders,
display_list.background_and_borders.iter());
self.add_in_bounds_display_items(&mut result.block_backgrounds_and_borders,
display_list.block_backgrounds_and_borders.iter());
self.add_in_bounds_display_items(&mut result.floats, display_list.floats.iter());
self.add_in_bounds_display_items(&mut result.content, display_list.content.iter());
self.add_in_bounds_display_items(&mut result.positioned_content,
display_list.positioned_content.iter());
self.add_in_bounds_display_items(&mut result.outlines, display_list.outlines.iter());
self.add_in_bounds_stacking_contexts(&mut result.children, display_list.children.iter());
result
}
/// Adds display items that intersect the visible rect to `result_list`.
fn add_in_bounds_display_items<'a,I>(&self,
result_list: &mut LinkedList<DisplayItem>,
display_items: I)
where I: Iterator<Item=&'a DisplayItem> {
for display_item in display_items {
if self.visible_rect.intersects(&display_item.base().bounds) &&
display_item.base().clip.might_intersect_rect(&self.visible_rect) {
result_list.push_back((*display_item).clone())
}
}
}
/// Adds child stacking contexts whose boundaries intersect the visible rect to `result_list`.
fn add_in_bounds_stacking_contexts<'a,I>(&self,
result_list: &mut LinkedList<Arc<StackingContext>>,
stacking_contexts: I)
where I: Iterator<Item=&'a Arc<StackingContext>> {
for stacking_context in stacking_contexts {
let overflow = stacking_context.overflow.translate(&stacking_context.bounds.origin);
if self.visible_rect.intersects(&overflow) {
result_list.push_back((*stacking_context).clone())
}
}
}
}
|
new
|
identifier_name
|
optimizer.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Transforms a display list to produce a visually-equivalent, but cheaper-to-paint, one.
use display_list::{DisplayItem, DisplayList, StackingContext};
use std::collections::linked_list::LinkedList;
use geom::rect::Rect;
use util::geometry::{self, Au};
use std::sync::Arc;
/// Transforms a display list to produce a visually-equivalent, but cheaper-to-paint, one.
pub struct DisplayListOptimizer {
/// The visible rect in page coordinates.
visible_rect: Rect<Au>,
}
impl DisplayListOptimizer {
/// Creates a new display list optimizer object. `visible_rect` specifies the visible rect in
/// page coordinates.
pub fn new(visible_rect: &Rect<f32>) -> DisplayListOptimizer {
DisplayListOptimizer {
visible_rect: geometry::f32_rect_to_au_rect(*visible_rect),
}
}
/// Optimizes the given display list, returning an equivalent, but cheaper-to-paint, one.
pub fn optimize(self, display_list: &DisplayList) -> DisplayList {
let mut result = DisplayList::new();
self.add_in_bounds_display_items(&mut result.background_and_borders,
display_list.background_and_borders.iter());
self.add_in_bounds_display_items(&mut result.block_backgrounds_and_borders,
display_list.block_backgrounds_and_borders.iter());
self.add_in_bounds_display_items(&mut result.floats, display_list.floats.iter());
self.add_in_bounds_display_items(&mut result.content, display_list.content.iter());
self.add_in_bounds_display_items(&mut result.positioned_content,
display_list.positioned_content.iter());
self.add_in_bounds_display_items(&mut result.outlines, display_list.outlines.iter());
self.add_in_bounds_stacking_contexts(&mut result.children, display_list.children.iter());
result
}
/// Adds display items that intersect the visible rect to `result_list`.
fn add_in_bounds_display_items<'a,I>(&self,
result_list: &mut LinkedList<DisplayItem>,
display_items: I)
where I: Iterator<Item=&'a DisplayItem> {
for display_item in display_items {
if self.visible_rect.intersects(&display_item.base().bounds) &&
display_item.base().clip.might_intersect_rect(&self.visible_rect) {
result_list.push_back((*display_item).clone())
}
}
}
/// Adds child stacking contexts whose boundaries intersect the visible rect to `result_list`.
fn add_in_bounds_stacking_contexts<'a,I>(&self,
result_list: &mut LinkedList<Arc<StackingContext>>,
stacking_contexts: I)
where I: Iterator<Item=&'a Arc<StackingContext>> {
for stacking_context in stacking_contexts {
let overflow = stacking_context.overflow.translate(&stacking_context.bounds.origin);
if self.visible_rect.intersects(&overflow)
|
}
}
}
|
{
result_list.push_back((*stacking_context).clone())
}
|
conditional_block
|
optimizer.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Transforms a display list to produce a visually-equivalent, but cheaper-to-paint, one.
use display_list::{DisplayItem, DisplayList, StackingContext};
use std::collections::linked_list::LinkedList;
use geom::rect::Rect;
use util::geometry::{self, Au};
use std::sync::Arc;
/// Transforms a display list to produce a visually-equivalent, but cheaper-to-paint, one.
pub struct DisplayListOptimizer {
/// The visible rect in page coordinates.
visible_rect: Rect<Au>,
}
impl DisplayListOptimizer {
/// Creates a new display list optimizer object. `visible_rect` specifies the visible rect in
/// page coordinates.
pub fn new(visible_rect: &Rect<f32>) -> DisplayListOptimizer {
DisplayListOptimizer {
visible_rect: geometry::f32_rect_to_au_rect(*visible_rect),
}
}
/// Optimizes the given display list, returning an equivalent, but cheaper-to-paint, one.
pub fn optimize(self, display_list: &DisplayList) -> DisplayList {
let mut result = DisplayList::new();
self.add_in_bounds_display_items(&mut result.background_and_borders,
display_list.background_and_borders.iter());
self.add_in_bounds_display_items(&mut result.block_backgrounds_and_borders,
display_list.block_backgrounds_and_borders.iter());
self.add_in_bounds_display_items(&mut result.floats, display_list.floats.iter());
self.add_in_bounds_display_items(&mut result.content, display_list.content.iter());
self.add_in_bounds_display_items(&mut result.positioned_content,
display_list.positioned_content.iter());
self.add_in_bounds_display_items(&mut result.outlines, display_list.outlines.iter());
self.add_in_bounds_stacking_contexts(&mut result.children, display_list.children.iter());
result
|
fn add_in_bounds_display_items<'a,I>(&self,
result_list: &mut LinkedList<DisplayItem>,
display_items: I)
where I: Iterator<Item=&'a DisplayItem> {
for display_item in display_items {
if self.visible_rect.intersects(&display_item.base().bounds) &&
display_item.base().clip.might_intersect_rect(&self.visible_rect) {
result_list.push_back((*display_item).clone())
}
}
}
/// Adds child stacking contexts whose boundaries intersect the visible rect to `result_list`.
fn add_in_bounds_stacking_contexts<'a,I>(&self,
result_list: &mut LinkedList<Arc<StackingContext>>,
stacking_contexts: I)
where I: Iterator<Item=&'a Arc<StackingContext>> {
for stacking_context in stacking_contexts {
let overflow = stacking_context.overflow.translate(&stacking_context.bounds.origin);
if self.visible_rect.intersects(&overflow) {
result_list.push_back((*stacking_context).clone())
}
}
}
}
|
}
/// Adds display items that intersect the visible rect to `result_list`.
|
random_line_split
|
main.rs
|
#[macro_use]
extern crate futures;
extern crate tokio_io;
extern crate tokio_core;
extern crate bytes;
|
extern crate rpassword;
#[macro_use]
extern crate serde_derive;
extern crate docopt;
extern crate toml;
#[macro_use]
extern crate log;
use std::{io, str};
use futures::{future, stream, Future, Stream, Sink};
use tokio_core::reactor::{Core, Handle};
use tokio_core::net::TcpStream;
use tokio_io::AsyncRead;
pub mod args;
pub mod logs;
pub mod codec;
pub mod config;
use codec::{LineCodec, PingPong};
use codec::line::Line;
use logs::Logs;
use config::{Config, Server};
fn _server(srv_name: String, srv: Server, log_path: &str, handle: Handle) {
// TODO: make sure someone's nick can't contains directory traversal
// `NICK../../../../dev/sda1`
// TODO: stop passwords from leaking into log files (don't long conn msg)
let mut logs = Logs::new(log_path);
//let conn_msg: Vec<_> = srv.conn_msg();
//info!(srv.logger, "Initiating connection: {:?}", conn_msg);
//let conn_lines: Vec<Result<Line, io::Error>> = conn_msg
// .iter().map(|s| Ok(Line::from_str(s))).collect();
let conn_lines: Vec<io::Result<Line>> = vec![];
//let addr = srv.get_addr().to_socket_addrs().unwrap().next().unwrap();
//info!(srv.logger, "Connecting to {} w/ tls={}", addr, srv.tls);
let stream = TcpStream::connect(&srv.addr, &handle);
let listen = stream.and_then(move |socket| {
let transport = PingPong::new(socket.framed(LineCodec));
let (sink, stream) = transport.split();
sink.send_all(stream::iter(conn_lines))
.and_then(move |_| {
stream.for_each(move |line| {
//info!(srv.logger, "{:?}", line); // if super verbose
//info!(srv.logger, "{}", line.to_string());
if let Some((name,text)) = line.format_privmsg(&srv_name) {
logs.write(name,&text).unwrap();
}
future::ok(())
})
})
}).map_err(|_| ());
handle.spawn(listen);
}
fn main() {
info!("Parsing arguments");
let args: args::Args = docopt::Docopt::new(args::USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
println!("ARGS: {:?}", args);
}
fn _main() {
// TODO: clap/docopt CLI args for logging verbosity|output / config file
let config_file = "config2.toml";
/*
let dec = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(dec).build().fuse();
let async_drain = slog_async::Async::new(drain).build().fuse();
let log = slog::Logger::root(async_drain, o!("cfg" => config_file));
let config = Config::from(config_file, &log).unwrap();
let mut core = Core::new().unwrap();
for (name,srv) in config.servers {
_server(name, srv, &config.logs_dir, core.handle());
}
let empty: future::Empty<(),()> = future::empty();
core.run(empty).unwrap();
*/
/*
let conn_msg: Vec<Result<Line, io::Error>> = vec![
Ok(Line::from_str("USER a b c d")),
Ok(Line::from_str("NICK qjkxk")),
Ok(Line::from_str("JOIN #test")),
];
let mut logs = Logs::new("/tmp/irc_logs");
let mut core = Core::new().unwrap();
//let addr = "irc.freenode.org:6667".to_socket_addrs().unwrap().next().unwrap();
let addr = "irc.mozilla.org:6667".to_socket_addrs().unwrap().next().unwrap();
//let addr = "0.0.0.0:12345".to_socket_addrs().unwrap().next().unwrap();
let stream = TcpStream::connect(&addr, &core.handle());
let listen = stream.and_then(|socket| {
let transport = PingPong::new(socket.framed(LineCodec));
let (sink, stream) = transport.split();
sink.send_all(stream::iter(conn_msg))
.and_then(|_| {
stream.for_each(|line| {
println!("SAW: `{:?}`", line);
if let Some((name,text)) = line.format_privmsg("mozilla") {
logs.write(name,&text).unwrap();
}
futures::future::ok(())
})
})
});
core.run(listen).unwrap();
*/
}
/*
use std::net::SocketAddr;
use tokio_core::net::TcpListener;
fn listener(addr: SocketAddr, handle: &Handle) -> io::Result<()> {
let socket = TcpListener::bind(&addr, &handle).unwrap();
println!("Listening on: {}", addr);
let (tx, rx) = futures::sync::mpsc::unbounded();
tx.send(String::from("foo"));
let data = socket.incoming().for_each(|(stream,addr)| {
let (reader, writer) = stream.split();
futures::future::ok(())
});
Ok(())
}
fn main() {
let conn_msg: Vec<Result<Line, io::Error>> = vec![
Ok(Line::from_str("USER a b c d")),
Ok(Line::from_str("NICK qjkxk")),
Ok(Line::from_str("JOIN #test")),
];
let mut logs = Logs::new("/tmp/irc_logs");
let mut core = Core::new().unwrap();
//let addr = "irc.freenode.org:6667".to_socket_addrs().unwrap().next().unwrap();
//let addr = "irc.mozilla.org:6665".to_socket_addrs().unwrap().next().unwrap();
let addr = "0.0.0.0:12345".to_socket_addrs().unwrap().next().unwrap();
let stream = TcpStream::connect(&addr, &core.handle());
let listen = stream.and_then(|socket| {
let transport = PingPong::new(socket.framed(LineCodec));
let (sink, stream) = transport.split();
sink.send_all(stream::iter(conn_msg))
.and_then(|_| {
stream.for_each(|line| {
println!("SAW: `{:?}`", line);
if let Some((name,text)) = line.format_privmsg("mozilla") {
logs.write(name,&text).unwrap();
}
futures::future::ok(())
})
})
});
core.run(listen).unwrap();
}
*/
|
extern crate time;
|
random_line_split
|
main.rs
|
#[macro_use]
extern crate futures;
extern crate tokio_io;
extern crate tokio_core;
extern crate bytes;
extern crate time;
extern crate rpassword;
#[macro_use]
extern crate serde_derive;
extern crate docopt;
extern crate toml;
#[macro_use]
extern crate log;
use std::{io, str};
use futures::{future, stream, Future, Stream, Sink};
use tokio_core::reactor::{Core, Handle};
use tokio_core::net::TcpStream;
use tokio_io::AsyncRead;
pub mod args;
pub mod logs;
pub mod codec;
pub mod config;
use codec::{LineCodec, PingPong};
use codec::line::Line;
use logs::Logs;
use config::{Config, Server};
fn _server(srv_name: String, srv: Server, log_path: &str, handle: Handle) {
// TODO: make sure someone's nick can't contains directory traversal
// `NICK../../../../dev/sda1`
// TODO: stop passwords from leaking into log files (don't long conn msg)
let mut logs = Logs::new(log_path);
//let conn_msg: Vec<_> = srv.conn_msg();
//info!(srv.logger, "Initiating connection: {:?}", conn_msg);
//let conn_lines: Vec<Result<Line, io::Error>> = conn_msg
// .iter().map(|s| Ok(Line::from_str(s))).collect();
let conn_lines: Vec<io::Result<Line>> = vec![];
//let addr = srv.get_addr().to_socket_addrs().unwrap().next().unwrap();
//info!(srv.logger, "Connecting to {} w/ tls={}", addr, srv.tls);
let stream = TcpStream::connect(&srv.addr, &handle);
let listen = stream.and_then(move |socket| {
let transport = PingPong::new(socket.framed(LineCodec));
let (sink, stream) = transport.split();
sink.send_all(stream::iter(conn_lines))
.and_then(move |_| {
stream.for_each(move |line| {
//info!(srv.logger, "{:?}", line); // if super verbose
//info!(srv.logger, "{}", line.to_string());
if let Some((name,text)) = line.format_privmsg(&srv_name) {
logs.write(name,&text).unwrap();
}
future::ok(())
})
})
}).map_err(|_| ());
handle.spawn(listen);
}
fn main() {
info!("Parsing arguments");
let args: args::Args = docopt::Docopt::new(args::USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
println!("ARGS: {:?}", args);
}
fn _main()
|
/*
let conn_msg: Vec<Result<Line, io::Error>> = vec![
Ok(Line::from_str("USER a b c d")),
Ok(Line::from_str("NICK qjkxk")),
Ok(Line::from_str("JOIN #test")),
];
let mut logs = Logs::new("/tmp/irc_logs");
let mut core = Core::new().unwrap();
//let addr = "irc.freenode.org:6667".to_socket_addrs().unwrap().next().unwrap();
let addr = "irc.mozilla.org:6667".to_socket_addrs().unwrap().next().unwrap();
//let addr = "0.0.0.0:12345".to_socket_addrs().unwrap().next().unwrap();
let stream = TcpStream::connect(&addr, &core.handle());
let listen = stream.and_then(|socket| {
let transport = PingPong::new(socket.framed(LineCodec));
let (sink, stream) = transport.split();
sink.send_all(stream::iter(conn_msg))
.and_then(|_| {
stream.for_each(|line| {
println!("SAW: `{:?}`", line);
if let Some((name,text)) = line.format_privmsg("mozilla") {
logs.write(name,&text).unwrap();
}
futures::future::ok(())
})
})
});
core.run(listen).unwrap();
*/
}
/*
use std::net::SocketAddr;
use tokio_core::net::TcpListener;
fn listener(addr: SocketAddr, handle: &Handle) -> io::Result<()> {
let socket = TcpListener::bind(&addr, &handle).unwrap();
println!("Listening on: {}", addr);
let (tx, rx) = futures::sync::mpsc::unbounded();
tx.send(String::from("foo"));
let data = socket.incoming().for_each(|(stream,addr)| {
let (reader, writer) = stream.split();
futures::future::ok(())
});
Ok(())
}
fn main() {
let conn_msg: Vec<Result<Line, io::Error>> = vec![
Ok(Line::from_str("USER a b c d")),
Ok(Line::from_str("NICK qjkxk")),
Ok(Line::from_str("JOIN #test")),
];
let mut logs = Logs::new("/tmp/irc_logs");
let mut core = Core::new().unwrap();
//let addr = "irc.freenode.org:6667".to_socket_addrs().unwrap().next().unwrap();
//let addr = "irc.mozilla.org:6665".to_socket_addrs().unwrap().next().unwrap();
let addr = "0.0.0.0:12345".to_socket_addrs().unwrap().next().unwrap();
let stream = TcpStream::connect(&addr, &core.handle());
let listen = stream.and_then(|socket| {
let transport = PingPong::new(socket.framed(LineCodec));
let (sink, stream) = transport.split();
sink.send_all(stream::iter(conn_msg))
.and_then(|_| {
stream.for_each(|line| {
println!("SAW: `{:?}`", line);
if let Some((name,text)) = line.format_privmsg("mozilla") {
logs.write(name,&text).unwrap();
}
futures::future::ok(())
})
})
});
core.run(listen).unwrap();
}
*/
|
{
// TODO: clap/docopt CLI args for logging verbosity|output / config file
let config_file = "config2.toml";
/*
let dec = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(dec).build().fuse();
let async_drain = slog_async::Async::new(drain).build().fuse();
let log = slog::Logger::root(async_drain, o!("cfg" => config_file));
let config = Config::from(config_file, &log).unwrap();
let mut core = Core::new().unwrap();
for (name,srv) in config.servers {
_server(name, srv, &config.logs_dir, core.handle());
}
let empty: future::Empty<(),()> = future::empty();
core.run(empty).unwrap();
*/
|
identifier_body
|
main.rs
|
#[macro_use]
extern crate futures;
extern crate tokio_io;
extern crate tokio_core;
extern crate bytes;
extern crate time;
extern crate rpassword;
#[macro_use]
extern crate serde_derive;
extern crate docopt;
extern crate toml;
#[macro_use]
extern crate log;
use std::{io, str};
use futures::{future, stream, Future, Stream, Sink};
use tokio_core::reactor::{Core, Handle};
use tokio_core::net::TcpStream;
use tokio_io::AsyncRead;
pub mod args;
pub mod logs;
pub mod codec;
pub mod config;
use codec::{LineCodec, PingPong};
use codec::line::Line;
use logs::Logs;
use config::{Config, Server};
fn _server(srv_name: String, srv: Server, log_path: &str, handle: Handle) {
// TODO: make sure someone's nick can't contains directory traversal
// `NICK../../../../dev/sda1`
// TODO: stop passwords from leaking into log files (don't long conn msg)
let mut logs = Logs::new(log_path);
//let conn_msg: Vec<_> = srv.conn_msg();
//info!(srv.logger, "Initiating connection: {:?}", conn_msg);
//let conn_lines: Vec<Result<Line, io::Error>> = conn_msg
// .iter().map(|s| Ok(Line::from_str(s))).collect();
let conn_lines: Vec<io::Result<Line>> = vec![];
//let addr = srv.get_addr().to_socket_addrs().unwrap().next().unwrap();
//info!(srv.logger, "Connecting to {} w/ tls={}", addr, srv.tls);
let stream = TcpStream::connect(&srv.addr, &handle);
let listen = stream.and_then(move |socket| {
let transport = PingPong::new(socket.framed(LineCodec));
let (sink, stream) = transport.split();
sink.send_all(stream::iter(conn_lines))
.and_then(move |_| {
stream.for_each(move |line| {
//info!(srv.logger, "{:?}", line); // if super verbose
//info!(srv.logger, "{}", line.to_string());
if let Some((name,text)) = line.format_privmsg(&srv_name)
|
future::ok(())
})
})
}).map_err(|_| ());
handle.spawn(listen);
}
fn main() {
info!("Parsing arguments");
let args: args::Args = docopt::Docopt::new(args::USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
println!("ARGS: {:?}", args);
}
fn _main() {
// TODO: clap/docopt CLI args for logging verbosity|output / config file
let config_file = "config2.toml";
/*
let dec = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(dec).build().fuse();
let async_drain = slog_async::Async::new(drain).build().fuse();
let log = slog::Logger::root(async_drain, o!("cfg" => config_file));
let config = Config::from(config_file, &log).unwrap();
let mut core = Core::new().unwrap();
for (name,srv) in config.servers {
_server(name, srv, &config.logs_dir, core.handle());
}
let empty: future::Empty<(),()> = future::empty();
core.run(empty).unwrap();
*/
/*
let conn_msg: Vec<Result<Line, io::Error>> = vec![
Ok(Line::from_str("USER a b c d")),
Ok(Line::from_str("NICK qjkxk")),
Ok(Line::from_str("JOIN #test")),
];
let mut logs = Logs::new("/tmp/irc_logs");
let mut core = Core::new().unwrap();
//let addr = "irc.freenode.org:6667".to_socket_addrs().unwrap().next().unwrap();
let addr = "irc.mozilla.org:6667".to_socket_addrs().unwrap().next().unwrap();
//let addr = "0.0.0.0:12345".to_socket_addrs().unwrap().next().unwrap();
let stream = TcpStream::connect(&addr, &core.handle());
let listen = stream.and_then(|socket| {
let transport = PingPong::new(socket.framed(LineCodec));
let (sink, stream) = transport.split();
sink.send_all(stream::iter(conn_msg))
.and_then(|_| {
stream.for_each(|line| {
println!("SAW: `{:?}`", line);
if let Some((name,text)) = line.format_privmsg("mozilla") {
logs.write(name,&text).unwrap();
}
futures::future::ok(())
})
})
});
core.run(listen).unwrap();
*/
}
/*
use std::net::SocketAddr;
use tokio_core::net::TcpListener;
fn listener(addr: SocketAddr, handle: &Handle) -> io::Result<()> {
let socket = TcpListener::bind(&addr, &handle).unwrap();
println!("Listening on: {}", addr);
let (tx, rx) = futures::sync::mpsc::unbounded();
tx.send(String::from("foo"));
let data = socket.incoming().for_each(|(stream,addr)| {
let (reader, writer) = stream.split();
futures::future::ok(())
});
Ok(())
}
fn main() {
let conn_msg: Vec<Result<Line, io::Error>> = vec![
Ok(Line::from_str("USER a b c d")),
Ok(Line::from_str("NICK qjkxk")),
Ok(Line::from_str("JOIN #test")),
];
let mut logs = Logs::new("/tmp/irc_logs");
let mut core = Core::new().unwrap();
//let addr = "irc.freenode.org:6667".to_socket_addrs().unwrap().next().unwrap();
//let addr = "irc.mozilla.org:6665".to_socket_addrs().unwrap().next().unwrap();
let addr = "0.0.0.0:12345".to_socket_addrs().unwrap().next().unwrap();
let stream = TcpStream::connect(&addr, &core.handle());
let listen = stream.and_then(|socket| {
let transport = PingPong::new(socket.framed(LineCodec));
let (sink, stream) = transport.split();
sink.send_all(stream::iter(conn_msg))
.and_then(|_| {
stream.for_each(|line| {
println!("SAW: `{:?}`", line);
if let Some((name,text)) = line.format_privmsg("mozilla") {
logs.write(name,&text).unwrap();
}
futures::future::ok(())
})
})
});
core.run(listen).unwrap();
}
*/
|
{
logs.write(name,&text).unwrap();
}
|
conditional_block
|
main.rs
|
#[macro_use]
extern crate futures;
extern crate tokio_io;
extern crate tokio_core;
extern crate bytes;
extern crate time;
extern crate rpassword;
#[macro_use]
extern crate serde_derive;
extern crate docopt;
extern crate toml;
#[macro_use]
extern crate log;
use std::{io, str};
use futures::{future, stream, Future, Stream, Sink};
use tokio_core::reactor::{Core, Handle};
use tokio_core::net::TcpStream;
use tokio_io::AsyncRead;
pub mod args;
pub mod logs;
pub mod codec;
pub mod config;
use codec::{LineCodec, PingPong};
use codec::line::Line;
use logs::Logs;
use config::{Config, Server};
fn
|
(srv_name: String, srv: Server, log_path: &str, handle: Handle) {
// TODO: make sure someone's nick can't contains directory traversal
// `NICK../../../../dev/sda1`
// TODO: stop passwords from leaking into log files (don't long conn msg)
let mut logs = Logs::new(log_path);
//let conn_msg: Vec<_> = srv.conn_msg();
//info!(srv.logger, "Initiating connection: {:?}", conn_msg);
//let conn_lines: Vec<Result<Line, io::Error>> = conn_msg
// .iter().map(|s| Ok(Line::from_str(s))).collect();
let conn_lines: Vec<io::Result<Line>> = vec![];
//let addr = srv.get_addr().to_socket_addrs().unwrap().next().unwrap();
//info!(srv.logger, "Connecting to {} w/ tls={}", addr, srv.tls);
let stream = TcpStream::connect(&srv.addr, &handle);
let listen = stream.and_then(move |socket| {
let transport = PingPong::new(socket.framed(LineCodec));
let (sink, stream) = transport.split();
sink.send_all(stream::iter(conn_lines))
.and_then(move |_| {
stream.for_each(move |line| {
//info!(srv.logger, "{:?}", line); // if super verbose
//info!(srv.logger, "{}", line.to_string());
if let Some((name,text)) = line.format_privmsg(&srv_name) {
logs.write(name,&text).unwrap();
}
future::ok(())
})
})
}).map_err(|_| ());
handle.spawn(listen);
}
fn main() {
info!("Parsing arguments");
let args: args::Args = docopt::Docopt::new(args::USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
println!("ARGS: {:?}", args);
}
fn _main() {
// TODO: clap/docopt CLI args for logging verbosity|output / config file
let config_file = "config2.toml";
/*
let dec = slog_term::TermDecorator::new().build();
let drain = slog_term::FullFormat::new(dec).build().fuse();
let async_drain = slog_async::Async::new(drain).build().fuse();
let log = slog::Logger::root(async_drain, o!("cfg" => config_file));
let config = Config::from(config_file, &log).unwrap();
let mut core = Core::new().unwrap();
for (name,srv) in config.servers {
_server(name, srv, &config.logs_dir, core.handle());
}
let empty: future::Empty<(),()> = future::empty();
core.run(empty).unwrap();
*/
/*
let conn_msg: Vec<Result<Line, io::Error>> = vec![
Ok(Line::from_str("USER a b c d")),
Ok(Line::from_str("NICK qjkxk")),
Ok(Line::from_str("JOIN #test")),
];
let mut logs = Logs::new("/tmp/irc_logs");
let mut core = Core::new().unwrap();
//let addr = "irc.freenode.org:6667".to_socket_addrs().unwrap().next().unwrap();
let addr = "irc.mozilla.org:6667".to_socket_addrs().unwrap().next().unwrap();
//let addr = "0.0.0.0:12345".to_socket_addrs().unwrap().next().unwrap();
let stream = TcpStream::connect(&addr, &core.handle());
let listen = stream.and_then(|socket| {
let transport = PingPong::new(socket.framed(LineCodec));
let (sink, stream) = transport.split();
sink.send_all(stream::iter(conn_msg))
.and_then(|_| {
stream.for_each(|line| {
println!("SAW: `{:?}`", line);
if let Some((name,text)) = line.format_privmsg("mozilla") {
logs.write(name,&text).unwrap();
}
futures::future::ok(())
})
})
});
core.run(listen).unwrap();
*/
}
/*
use std::net::SocketAddr;
use tokio_core::net::TcpListener;
fn listener(addr: SocketAddr, handle: &Handle) -> io::Result<()> {
let socket = TcpListener::bind(&addr, &handle).unwrap();
println!("Listening on: {}", addr);
let (tx, rx) = futures::sync::mpsc::unbounded();
tx.send(String::from("foo"));
let data = socket.incoming().for_each(|(stream,addr)| {
let (reader, writer) = stream.split();
futures::future::ok(())
});
Ok(())
}
fn main() {
let conn_msg: Vec<Result<Line, io::Error>> = vec![
Ok(Line::from_str("USER a b c d")),
Ok(Line::from_str("NICK qjkxk")),
Ok(Line::from_str("JOIN #test")),
];
let mut logs = Logs::new("/tmp/irc_logs");
let mut core = Core::new().unwrap();
//let addr = "irc.freenode.org:6667".to_socket_addrs().unwrap().next().unwrap();
//let addr = "irc.mozilla.org:6665".to_socket_addrs().unwrap().next().unwrap();
let addr = "0.0.0.0:12345".to_socket_addrs().unwrap().next().unwrap();
let stream = TcpStream::connect(&addr, &core.handle());
let listen = stream.and_then(|socket| {
let transport = PingPong::new(socket.framed(LineCodec));
let (sink, stream) = transport.split();
sink.send_all(stream::iter(conn_msg))
.and_then(|_| {
stream.for_each(|line| {
println!("SAW: `{:?}`", line);
if let Some((name,text)) = line.format_privmsg("mozilla") {
logs.write(name,&text).unwrap();
}
futures::future::ok(())
})
})
});
core.run(listen).unwrap();
}
*/
|
_server
|
identifier_name
|
buffer.rs
|
pub struct
|
{
line: u32,
column: u32
}
pub enum Cursor {
Simple(Position),
Range(Position, Position),
Block(Position, Position)
}
pub trait StrLike {
type Bytes: Iterator;
type Chars: Iterator;
type Lines: Iterator;
/// Return number of bytes in this object
fn len(&self) -> usize;
/// Return number of unicode characters in this object
fn char_count(&self) -> usize;
/// Return number of lines in this object
fn line_count(&self) -> usize;
fn bytes(&self) -> Bytes;
fn chars(&self) -> Chars;
fn lines(&self) -> Lines;
// TODO: strchr or some such. Bytes or chars based?
// TODO: Storing styles
}
pub trait Snapshot: StrLike {
fn cursors(&self) -> &[Cursor];
/// Return a snapshot on which this one was based or None if it's not available
fn previous(&self) -> Option<Self>;
/// Return all snapshots based on this one
fn next(&self) -> &[Self];
}
pub trait Buffer {
type SnapshotType: Snapshot;
fn current_snapshot(&self) -> Snapshot;
}
|
Position
|
identifier_name
|
buffer.rs
|
pub struct Position {
line: u32,
column: u32
}
pub enum Cursor {
Simple(Position),
Range(Position, Position),
Block(Position, Position)
}
pub trait StrLike {
type Bytes: Iterator;
|
/// Return number of unicode characters in this object
fn char_count(&self) -> usize;
/// Return number of lines in this object
fn line_count(&self) -> usize;
fn bytes(&self) -> Bytes;
fn chars(&self) -> Chars;
fn lines(&self) -> Lines;
// TODO: strchr or some such. Bytes or chars based?
// TODO: Storing styles
}
pub trait Snapshot: StrLike {
fn cursors(&self) -> &[Cursor];
/// Return a snapshot on which this one was based or None if it's not available
fn previous(&self) -> Option<Self>;
/// Return all snapshots based on this one
fn next(&self) -> &[Self];
}
pub trait Buffer {
type SnapshotType: Snapshot;
fn current_snapshot(&self) -> Snapshot;
}
|
type Chars: Iterator;
type Lines: Iterator;
/// Return number of bytes in this object
fn len(&self) -> usize;
|
random_line_split
|
documenttype.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::DocumentTypeBinding::DocumentTypeMethods;
use crate::dom::bindings::codegen::UnionTypes::NodeOrString;
use crate::dom::bindings::error::ErrorResult;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::document::Document;
use crate::dom::node::Node;
use dom_struct::dom_struct;
// https://dom.spec.whatwg.org/#documenttype
/// The `DOCTYPE` tag.
#[dom_struct]
pub struct DocumentType {
node: Node,
name: DOMString,
public_id: DOMString,
system_id: DOMString,
}
impl DocumentType {
fn new_inherited(
name: DOMString,
public_id: Option<DOMString>,
system_id: Option<DOMString>,
document: &Document,
) -> DocumentType {
DocumentType {
node: Node::new_inherited(document),
name: name,
public_id: public_id.unwrap_or_default(),
system_id: system_id.unwrap_or_default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(
name: DOMString,
public_id: Option<DOMString>,
system_id: Option<DOMString>,
document: &Document,
) -> DomRoot<DocumentType> {
Node::reflect_node(
Box::new(DocumentType::new_inherited(
name, public_id, system_id, document,
)),
document,
)
}
#[inline]
pub fn name(&self) -> &DOMString {
&self.name
}
#[inline]
pub fn
|
(&self) -> &DOMString {
&self.public_id
}
#[inline]
pub fn system_id(&self) -> &DOMString {
&self.system_id
}
}
impl DocumentTypeMethods for DocumentType {
// https://dom.spec.whatwg.org/#dom-documenttype-name
fn Name(&self) -> DOMString {
self.name.clone()
}
// https://dom.spec.whatwg.org/#dom-documenttype-publicid
fn PublicId(&self) -> DOMString {
self.public_id.clone()
}
// https://dom.spec.whatwg.org/#dom-documenttype-systemid
fn SystemId(&self) -> DOMString {
self.system_id.clone()
}
// https://dom.spec.whatwg.org/#dom-childnode-before
fn Before(&self, nodes: Vec<NodeOrString>) -> ErrorResult {
self.upcast::<Node>().before(nodes)
}
// https://dom.spec.whatwg.org/#dom-childnode-after
fn After(&self, nodes: Vec<NodeOrString>) -> ErrorResult {
self.upcast::<Node>().after(nodes)
}
// https://dom.spec.whatwg.org/#dom-childnode-replacewith
fn ReplaceWith(&self, nodes: Vec<NodeOrString>) -> ErrorResult {
self.upcast::<Node>().replace_with(nodes)
}
// https://dom.spec.whatwg.org/#dom-childnode-remove
fn Remove(&self) {
self.upcast::<Node>().remove_self();
}
}
|
public_id
|
identifier_name
|
documenttype.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::DocumentTypeBinding::DocumentTypeMethods;
use crate::dom::bindings::codegen::UnionTypes::NodeOrString;
use crate::dom::bindings::error::ErrorResult;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::document::Document;
use crate::dom::node::Node;
use dom_struct::dom_struct;
// https://dom.spec.whatwg.org/#documenttype
/// The `DOCTYPE` tag.
#[dom_struct]
pub struct DocumentType {
node: Node,
name: DOMString,
public_id: DOMString,
system_id: DOMString,
}
impl DocumentType {
fn new_inherited(
name: DOMString,
public_id: Option<DOMString>,
system_id: Option<DOMString>,
document: &Document,
) -> DocumentType {
DocumentType {
node: Node::new_inherited(document),
name: name,
public_id: public_id.unwrap_or_default(),
system_id: system_id.unwrap_or_default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(
name: DOMString,
public_id: Option<DOMString>,
system_id: Option<DOMString>,
document: &Document,
) -> DomRoot<DocumentType> {
Node::reflect_node(
Box::new(DocumentType::new_inherited(
name, public_id, system_id, document,
)),
document,
)
}
#[inline]
pub fn name(&self) -> &DOMString {
&self.name
}
#[inline]
pub fn public_id(&self) -> &DOMString {
&self.public_id
}
#[inline]
pub fn system_id(&self) -> &DOMString {
&self.system_id
}
}
impl DocumentTypeMethods for DocumentType {
// https://dom.spec.whatwg.org/#dom-documenttype-name
fn Name(&self) -> DOMString {
self.name.clone()
}
// https://dom.spec.whatwg.org/#dom-documenttype-publicid
fn PublicId(&self) -> DOMString {
self.public_id.clone()
}
// https://dom.spec.whatwg.org/#dom-documenttype-systemid
fn SystemId(&self) -> DOMString {
self.system_id.clone()
}
// https://dom.spec.whatwg.org/#dom-childnode-before
fn Before(&self, nodes: Vec<NodeOrString>) -> ErrorResult
|
// https://dom.spec.whatwg.org/#dom-childnode-after
fn After(&self, nodes: Vec<NodeOrString>) -> ErrorResult {
self.upcast::<Node>().after(nodes)
}
// https://dom.spec.whatwg.org/#dom-childnode-replacewith
fn ReplaceWith(&self, nodes: Vec<NodeOrString>) -> ErrorResult {
self.upcast::<Node>().replace_with(nodes)
}
// https://dom.spec.whatwg.org/#dom-childnode-remove
fn Remove(&self) {
self.upcast::<Node>().remove_self();
}
}
|
{
self.upcast::<Node>().before(nodes)
}
|
identifier_body
|
documenttype.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::DocumentTypeBinding::DocumentTypeMethods;
use crate::dom::bindings::codegen::UnionTypes::NodeOrString;
use crate::dom::bindings::error::ErrorResult;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::document::Document;
use crate::dom::node::Node;
use dom_struct::dom_struct;
// https://dom.spec.whatwg.org/#documenttype
/// The `DOCTYPE` tag.
#[dom_struct]
pub struct DocumentType {
node: Node,
name: DOMString,
public_id: DOMString,
system_id: DOMString,
}
impl DocumentType {
fn new_inherited(
name: DOMString,
public_id: Option<DOMString>,
system_id: Option<DOMString>,
document: &Document,
) -> DocumentType {
DocumentType {
node: Node::new_inherited(document),
name: name,
public_id: public_id.unwrap_or_default(),
system_id: system_id.unwrap_or_default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(
name: DOMString,
public_id: Option<DOMString>,
system_id: Option<DOMString>,
document: &Document,
) -> DomRoot<DocumentType> {
Node::reflect_node(
Box::new(DocumentType::new_inherited(
name, public_id, system_id, document,
)),
document,
|
)
}
#[inline]
pub fn name(&self) -> &DOMString {
&self.name
}
#[inline]
pub fn public_id(&self) -> &DOMString {
&self.public_id
}
#[inline]
pub fn system_id(&self) -> &DOMString {
&self.system_id
}
}
impl DocumentTypeMethods for DocumentType {
// https://dom.spec.whatwg.org/#dom-documenttype-name
fn Name(&self) -> DOMString {
self.name.clone()
}
// https://dom.spec.whatwg.org/#dom-documenttype-publicid
fn PublicId(&self) -> DOMString {
self.public_id.clone()
}
// https://dom.spec.whatwg.org/#dom-documenttype-systemid
fn SystemId(&self) -> DOMString {
self.system_id.clone()
}
// https://dom.spec.whatwg.org/#dom-childnode-before
fn Before(&self, nodes: Vec<NodeOrString>) -> ErrorResult {
self.upcast::<Node>().before(nodes)
}
// https://dom.spec.whatwg.org/#dom-childnode-after
fn After(&self, nodes: Vec<NodeOrString>) -> ErrorResult {
self.upcast::<Node>().after(nodes)
}
// https://dom.spec.whatwg.org/#dom-childnode-replacewith
fn ReplaceWith(&self, nodes: Vec<NodeOrString>) -> ErrorResult {
self.upcast::<Node>().replace_with(nodes)
}
// https://dom.spec.whatwg.org/#dom-childnode-remove
fn Remove(&self) {
self.upcast::<Node>().remove_self();
}
}
|
random_line_split
|
|
color.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Computed color values.
use cssparser::{Color as CSSParserColor, RGBA};
use std::fmt;
use style_traits::ToCss;
/// This struct represents a combined color from a numeric color and
/// the current foreground color (currentcolor keyword).
/// Conceptually, the formula is "color * (1 - p) + currentcolor * p"
/// where p is foreground_ratio.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct Color {
/// RGBA color.
pub color: RGBA,
/// The ratio of currentcolor in complex color.
pub foreground_ratio: u8,
}
fn blend_color_component(bg: u8, fg: u8, fg_alpha: u8) -> u8 {
let bg_ratio = (u8::max_value() - fg_alpha) as u32;
let fg_ratio = fg_alpha as u32;
let color = bg as u32 * bg_ratio + fg as u32 * fg_ratio;
// Rounding divide the number by 255
((color + 127) / 255) as u8
}
impl Color {
/// Returns a numeric color representing the given RGBA value.
pub fn rgba(rgba: RGBA) -> Color {
Color {
color: rgba,
foreground_ratio: 0,
}
}
/// Returns a complex color value representing transparent.
pub fn transparent() -> Color {
Color::rgba(RGBA::transparent())
}
/// Returns a complex color value representing currentcolor.
pub fn currentcolor() -> Color {
Color {
color: RGBA::transparent(),
foreground_ratio: u8::max_value(),
}
}
/// Whether it is a numeric color (no currentcolor component).
pub fn is_numeric(&self) -> bool {
self.foreground_ratio == 0
}
/// Whether it is a currentcolor value (no numeric color component).
pub fn is_currentcolor(&self) -> bool {
self.foreground_ratio == u8::max_value()
}
/// Combine this complex color with the given foreground color into
/// a numeric RGBA color. It currently uses linear blending.
pub fn to_rgba(&self, fg_color: RGBA) -> RGBA {
// Common cases that the complex color is either pure numeric
// color or pure currentcolor.
if self.is_numeric() {
return self.color;
}
if self.is_currentcolor() {
return fg_color.clone();
|
let fg_ratio = self.foreground_ratio;
if self.color.alpha == fg_color.alpha {
let r = blend_color_component(self.color.red, fg_color.red, fg_ratio);
let g = blend_color_component(self.color.green, fg_color.green, fg_ratio);
let b = blend_color_component(self.color.blue, fg_color.blue, fg_ratio);
return RGBA::new(r, g, b, fg_color.alpha);
}
// For the more complicated case that the alpha value differs,
// we use the following formula to compute the components:
// alpha = self_alpha * (1 - fg_ratio) + fg_alpha * fg_ratio
// color = (self_color * self_alpha * (1 - fg_ratio) +
// fg_color * fg_alpha * fg_ratio) / alpha
let p1 = (1. / 255.) * (255 - fg_ratio) as f32;
let a1 = self.color.alpha_f32();
let r1 = a1 * self.color.red_f32();
let g1 = a1 * self.color.green_f32();
let b1 = a1 * self.color.blue_f32();
let p2 = 1. - p1;
let a2 = fg_color.alpha_f32();
let r2 = a2 * fg_color.red_f32();
let g2 = a2 * fg_color.green_f32();
let b2 = a2 * fg_color.blue_f32();
let a = p1 * a1 + p2 * a2;
if a == 0.0 {
return RGBA::transparent();
}
let inverse_a = 1. / a;
let r = (p1 * r1 + p2 * r2) * inverse_a;
let g = (p1 * g1 + p2 * g2) * inverse_a;
let b = (p1 * b1 + p2 * b2) * inverse_a;
return RGBA::from_floats(r, g, b, a);
}
}
impl PartialEq for Color {
fn eq(&self, other: &Color) -> bool {
self.foreground_ratio == other.foreground_ratio &&
(self.is_currentcolor() || self.color == other.color)
}
}
impl From<RGBA> for Color {
fn from(color: RGBA) -> Color {
Color {
color: color,
foreground_ratio: 0,
}
}
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
if self.is_numeric() {
self.color.to_css(dest)
} else if self.is_currentcolor() {
CSSParserColor::CurrentColor.to_css(dest)
} else {
Ok(())
}
}
}
/// Computed value type for the specified RGBAColor.
pub type RGBAColor = RGBA;
|
}
// Common case that alpha channel is equal (usually both are opaque).
|
random_line_split
|
color.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Computed color values.
use cssparser::{Color as CSSParserColor, RGBA};
use std::fmt;
use style_traits::ToCss;
/// This struct represents a combined color from a numeric color and
/// the current foreground color (currentcolor keyword).
/// Conceptually, the formula is "color * (1 - p) + currentcolor * p"
/// where p is foreground_ratio.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct Color {
/// RGBA color.
pub color: RGBA,
/// The ratio of currentcolor in complex color.
pub foreground_ratio: u8,
}
fn blend_color_component(bg: u8, fg: u8, fg_alpha: u8) -> u8 {
let bg_ratio = (u8::max_value() - fg_alpha) as u32;
let fg_ratio = fg_alpha as u32;
let color = bg as u32 * bg_ratio + fg as u32 * fg_ratio;
// Rounding divide the number by 255
((color + 127) / 255) as u8
}
impl Color {
/// Returns a numeric color representing the given RGBA value.
pub fn rgba(rgba: RGBA) -> Color
|
/// Returns a complex color value representing transparent.
pub fn transparent() -> Color {
Color::rgba(RGBA::transparent())
}
/// Returns a complex color value representing currentcolor.
pub fn currentcolor() -> Color {
Color {
color: RGBA::transparent(),
foreground_ratio: u8::max_value(),
}
}
/// Whether it is a numeric color (no currentcolor component).
pub fn is_numeric(&self) -> bool {
self.foreground_ratio == 0
}
/// Whether it is a currentcolor value (no numeric color component).
pub fn is_currentcolor(&self) -> bool {
self.foreground_ratio == u8::max_value()
}
/// Combine this complex color with the given foreground color into
/// a numeric RGBA color. It currently uses linear blending.
pub fn to_rgba(&self, fg_color: RGBA) -> RGBA {
// Common cases that the complex color is either pure numeric
// color or pure currentcolor.
if self.is_numeric() {
return self.color;
}
if self.is_currentcolor() {
return fg_color.clone();
}
// Common case that alpha channel is equal (usually both are opaque).
let fg_ratio = self.foreground_ratio;
if self.color.alpha == fg_color.alpha {
let r = blend_color_component(self.color.red, fg_color.red, fg_ratio);
let g = blend_color_component(self.color.green, fg_color.green, fg_ratio);
let b = blend_color_component(self.color.blue, fg_color.blue, fg_ratio);
return RGBA::new(r, g, b, fg_color.alpha);
}
// For the more complicated case that the alpha value differs,
// we use the following formula to compute the components:
// alpha = self_alpha * (1 - fg_ratio) + fg_alpha * fg_ratio
// color = (self_color * self_alpha * (1 - fg_ratio) +
// fg_color * fg_alpha * fg_ratio) / alpha
let p1 = (1. / 255.) * (255 - fg_ratio) as f32;
let a1 = self.color.alpha_f32();
let r1 = a1 * self.color.red_f32();
let g1 = a1 * self.color.green_f32();
let b1 = a1 * self.color.blue_f32();
let p2 = 1. - p1;
let a2 = fg_color.alpha_f32();
let r2 = a2 * fg_color.red_f32();
let g2 = a2 * fg_color.green_f32();
let b2 = a2 * fg_color.blue_f32();
let a = p1 * a1 + p2 * a2;
if a == 0.0 {
return RGBA::transparent();
}
let inverse_a = 1. / a;
let r = (p1 * r1 + p2 * r2) * inverse_a;
let g = (p1 * g1 + p2 * g2) * inverse_a;
let b = (p1 * b1 + p2 * b2) * inverse_a;
return RGBA::from_floats(r, g, b, a);
}
}
impl PartialEq for Color {
fn eq(&self, other: &Color) -> bool {
self.foreground_ratio == other.foreground_ratio &&
(self.is_currentcolor() || self.color == other.color)
}
}
impl From<RGBA> for Color {
fn from(color: RGBA) -> Color {
Color {
color: color,
foreground_ratio: 0,
}
}
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
if self.is_numeric() {
self.color.to_css(dest)
} else if self.is_currentcolor() {
CSSParserColor::CurrentColor.to_css(dest)
} else {
Ok(())
}
}
}
/// Computed value type for the specified RGBAColor.
pub type RGBAColor = RGBA;
|
{
Color {
color: rgba,
foreground_ratio: 0,
}
}
|
identifier_body
|
color.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Computed color values.
use cssparser::{Color as CSSParserColor, RGBA};
use std::fmt;
use style_traits::ToCss;
/// This struct represents a combined color from a numeric color and
/// the current foreground color (currentcolor keyword).
/// Conceptually, the formula is "color * (1 - p) + currentcolor * p"
/// where p is foreground_ratio.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct
|
{
/// RGBA color.
pub color: RGBA,
/// The ratio of currentcolor in complex color.
pub foreground_ratio: u8,
}
fn blend_color_component(bg: u8, fg: u8, fg_alpha: u8) -> u8 {
let bg_ratio = (u8::max_value() - fg_alpha) as u32;
let fg_ratio = fg_alpha as u32;
let color = bg as u32 * bg_ratio + fg as u32 * fg_ratio;
// Rounding divide the number by 255
((color + 127) / 255) as u8
}
impl Color {
/// Returns a numeric color representing the given RGBA value.
pub fn rgba(rgba: RGBA) -> Color {
Color {
color: rgba,
foreground_ratio: 0,
}
}
/// Returns a complex color value representing transparent.
pub fn transparent() -> Color {
Color::rgba(RGBA::transparent())
}
/// Returns a complex color value representing currentcolor.
pub fn currentcolor() -> Color {
Color {
color: RGBA::transparent(),
foreground_ratio: u8::max_value(),
}
}
/// Whether it is a numeric color (no currentcolor component).
pub fn is_numeric(&self) -> bool {
self.foreground_ratio == 0
}
/// Whether it is a currentcolor value (no numeric color component).
pub fn is_currentcolor(&self) -> bool {
self.foreground_ratio == u8::max_value()
}
/// Combine this complex color with the given foreground color into
/// a numeric RGBA color. It currently uses linear blending.
pub fn to_rgba(&self, fg_color: RGBA) -> RGBA {
// Common cases that the complex color is either pure numeric
// color or pure currentcolor.
if self.is_numeric() {
return self.color;
}
if self.is_currentcolor() {
return fg_color.clone();
}
// Common case that alpha channel is equal (usually both are opaque).
let fg_ratio = self.foreground_ratio;
if self.color.alpha == fg_color.alpha {
let r = blend_color_component(self.color.red, fg_color.red, fg_ratio);
let g = blend_color_component(self.color.green, fg_color.green, fg_ratio);
let b = blend_color_component(self.color.blue, fg_color.blue, fg_ratio);
return RGBA::new(r, g, b, fg_color.alpha);
}
// For the more complicated case that the alpha value differs,
// we use the following formula to compute the components:
// alpha = self_alpha * (1 - fg_ratio) + fg_alpha * fg_ratio
// color = (self_color * self_alpha * (1 - fg_ratio) +
// fg_color * fg_alpha * fg_ratio) / alpha
let p1 = (1. / 255.) * (255 - fg_ratio) as f32;
let a1 = self.color.alpha_f32();
let r1 = a1 * self.color.red_f32();
let g1 = a1 * self.color.green_f32();
let b1 = a1 * self.color.blue_f32();
let p2 = 1. - p1;
let a2 = fg_color.alpha_f32();
let r2 = a2 * fg_color.red_f32();
let g2 = a2 * fg_color.green_f32();
let b2 = a2 * fg_color.blue_f32();
let a = p1 * a1 + p2 * a2;
if a == 0.0 {
return RGBA::transparent();
}
let inverse_a = 1. / a;
let r = (p1 * r1 + p2 * r2) * inverse_a;
let g = (p1 * g1 + p2 * g2) * inverse_a;
let b = (p1 * b1 + p2 * b2) * inverse_a;
return RGBA::from_floats(r, g, b, a);
}
}
impl PartialEq for Color {
fn eq(&self, other: &Color) -> bool {
self.foreground_ratio == other.foreground_ratio &&
(self.is_currentcolor() || self.color == other.color)
}
}
impl From<RGBA> for Color {
fn from(color: RGBA) -> Color {
Color {
color: color,
foreground_ratio: 0,
}
}
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
if self.is_numeric() {
self.color.to_css(dest)
} else if self.is_currentcolor() {
CSSParserColor::CurrentColor.to_css(dest)
} else {
Ok(())
}
}
}
/// Computed value type for the specified RGBAColor.
pub type RGBAColor = RGBA;
|
Color
|
identifier_name
|
color.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Computed color values.
use cssparser::{Color as CSSParserColor, RGBA};
use std::fmt;
use style_traits::ToCss;
/// This struct represents a combined color from a numeric color and
/// the current foreground color (currentcolor keyword).
/// Conceptually, the formula is "color * (1 - p) + currentcolor * p"
/// where p is foreground_ratio.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub struct Color {
/// RGBA color.
pub color: RGBA,
/// The ratio of currentcolor in complex color.
pub foreground_ratio: u8,
}
fn blend_color_component(bg: u8, fg: u8, fg_alpha: u8) -> u8 {
let bg_ratio = (u8::max_value() - fg_alpha) as u32;
let fg_ratio = fg_alpha as u32;
let color = bg as u32 * bg_ratio + fg as u32 * fg_ratio;
// Rounding divide the number by 255
((color + 127) / 255) as u8
}
impl Color {
/// Returns a numeric color representing the given RGBA value.
pub fn rgba(rgba: RGBA) -> Color {
Color {
color: rgba,
foreground_ratio: 0,
}
}
/// Returns a complex color value representing transparent.
pub fn transparent() -> Color {
Color::rgba(RGBA::transparent())
}
/// Returns a complex color value representing currentcolor.
pub fn currentcolor() -> Color {
Color {
color: RGBA::transparent(),
foreground_ratio: u8::max_value(),
}
}
/// Whether it is a numeric color (no currentcolor component).
pub fn is_numeric(&self) -> bool {
self.foreground_ratio == 0
}
/// Whether it is a currentcolor value (no numeric color component).
pub fn is_currentcolor(&self) -> bool {
self.foreground_ratio == u8::max_value()
}
/// Combine this complex color with the given foreground color into
/// a numeric RGBA color. It currently uses linear blending.
pub fn to_rgba(&self, fg_color: RGBA) -> RGBA {
// Common cases that the complex color is either pure numeric
// color or pure currentcolor.
if self.is_numeric() {
return self.color;
}
if self.is_currentcolor() {
return fg_color.clone();
}
// Common case that alpha channel is equal (usually both are opaque).
let fg_ratio = self.foreground_ratio;
if self.color.alpha == fg_color.alpha {
let r = blend_color_component(self.color.red, fg_color.red, fg_ratio);
let g = blend_color_component(self.color.green, fg_color.green, fg_ratio);
let b = blend_color_component(self.color.blue, fg_color.blue, fg_ratio);
return RGBA::new(r, g, b, fg_color.alpha);
}
// For the more complicated case that the alpha value differs,
// we use the following formula to compute the components:
// alpha = self_alpha * (1 - fg_ratio) + fg_alpha * fg_ratio
// color = (self_color * self_alpha * (1 - fg_ratio) +
// fg_color * fg_alpha * fg_ratio) / alpha
let p1 = (1. / 255.) * (255 - fg_ratio) as f32;
let a1 = self.color.alpha_f32();
let r1 = a1 * self.color.red_f32();
let g1 = a1 * self.color.green_f32();
let b1 = a1 * self.color.blue_f32();
let p2 = 1. - p1;
let a2 = fg_color.alpha_f32();
let r2 = a2 * fg_color.red_f32();
let g2 = a2 * fg_color.green_f32();
let b2 = a2 * fg_color.blue_f32();
let a = p1 * a1 + p2 * a2;
if a == 0.0 {
return RGBA::transparent();
}
let inverse_a = 1. / a;
let r = (p1 * r1 + p2 * r2) * inverse_a;
let g = (p1 * g1 + p2 * g2) * inverse_a;
let b = (p1 * b1 + p2 * b2) * inverse_a;
return RGBA::from_floats(r, g, b, a);
}
}
impl PartialEq for Color {
fn eq(&self, other: &Color) -> bool {
self.foreground_ratio == other.foreground_ratio &&
(self.is_currentcolor() || self.color == other.color)
}
}
impl From<RGBA> for Color {
fn from(color: RGBA) -> Color {
Color {
color: color,
foreground_ratio: 0,
}
}
}
impl ToCss for Color {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
if self.is_numeric() {
self.color.to_css(dest)
} else if self.is_currentcolor() {
CSSParserColor::CurrentColor.to_css(dest)
} else
|
}
}
/// Computed value type for the specified RGBAColor.
pub type RGBAColor = RGBA;
|
{
Ok(())
}
|
conditional_block
|
test_select.rs
|
use nix::sys::select::*;
use nix::unistd::{pipe, write};
use nix::sys::signal::SigSet;
use nix::sys::time::{TimeSpec, TimeValLike};
#[test]
pub fn test_pselect() {
let _mtx = ::SIGNAL_MTX
.lock()
.expect("Mutex got poisoned by another test");
let (r1, w1) = pipe().unwrap();
write(w1, b"hi!").unwrap();
let (r2, _w2) = pipe().unwrap();
let mut fd_set = FdSet::new();
fd_set.insert(r1);
fd_set.insert(r2);
let timeout = TimeSpec::seconds(10);
let sigmask = SigSet::empty();
assert_eq!(
1,
pselect(None, &mut fd_set, None, None, &timeout, &sigmask).unwrap()
);
assert!(fd_set.contains(r1));
assert!(!fd_set.contains(r2));
}
#[test]
pub fn test_pselect_nfds2() {
let (r1, w1) = pipe().unwrap();
write(w1, b"hi!").unwrap();
let (r2, _w2) = pipe().unwrap();
let mut fd_set = FdSet::new();
fd_set.insert(r1);
fd_set.insert(r2);
let timeout = TimeSpec::seconds(10);
assert_eq!(
1,
pselect(
::std::cmp::max(r1, r2) + 1,
&mut fd_set,
None,
None,
&timeout,
None
).unwrap()
);
assert!(fd_set.contains(r1));
assert!(!fd_set.contains(r2));
|
}
|
random_line_split
|
|
test_select.rs
|
use nix::sys::select::*;
use nix::unistd::{pipe, write};
use nix::sys::signal::SigSet;
use nix::sys::time::{TimeSpec, TimeValLike};
#[test]
pub fn test_pselect() {
let _mtx = ::SIGNAL_MTX
.lock()
.expect("Mutex got poisoned by another test");
let (r1, w1) = pipe().unwrap();
write(w1, b"hi!").unwrap();
let (r2, _w2) = pipe().unwrap();
let mut fd_set = FdSet::new();
fd_set.insert(r1);
fd_set.insert(r2);
let timeout = TimeSpec::seconds(10);
let sigmask = SigSet::empty();
assert_eq!(
1,
pselect(None, &mut fd_set, None, None, &timeout, &sigmask).unwrap()
);
assert!(fd_set.contains(r1));
assert!(!fd_set.contains(r2));
}
#[test]
pub fn test_pselect_nfds2()
|
);
assert!(fd_set.contains(r1));
assert!(!fd_set.contains(r2));
}
|
{
let (r1, w1) = pipe().unwrap();
write(w1, b"hi!").unwrap();
let (r2, _w2) = pipe().unwrap();
let mut fd_set = FdSet::new();
fd_set.insert(r1);
fd_set.insert(r2);
let timeout = TimeSpec::seconds(10);
assert_eq!(
1,
pselect(
::std::cmp::max(r1, r2) + 1,
&mut fd_set,
None,
None,
&timeout,
None
).unwrap()
|
identifier_body
|
test_select.rs
|
use nix::sys::select::*;
use nix::unistd::{pipe, write};
use nix::sys::signal::SigSet;
use nix::sys::time::{TimeSpec, TimeValLike};
#[test]
pub fn test_pselect() {
let _mtx = ::SIGNAL_MTX
.lock()
.expect("Mutex got poisoned by another test");
let (r1, w1) = pipe().unwrap();
write(w1, b"hi!").unwrap();
let (r2, _w2) = pipe().unwrap();
let mut fd_set = FdSet::new();
fd_set.insert(r1);
fd_set.insert(r2);
let timeout = TimeSpec::seconds(10);
let sigmask = SigSet::empty();
assert_eq!(
1,
pselect(None, &mut fd_set, None, None, &timeout, &sigmask).unwrap()
);
assert!(fd_set.contains(r1));
assert!(!fd_set.contains(r2));
}
#[test]
pub fn
|
() {
let (r1, w1) = pipe().unwrap();
write(w1, b"hi!").unwrap();
let (r2, _w2) = pipe().unwrap();
let mut fd_set = FdSet::new();
fd_set.insert(r1);
fd_set.insert(r2);
let timeout = TimeSpec::seconds(10);
assert_eq!(
1,
pselect(
::std::cmp::max(r1, r2) + 1,
&mut fd_set,
None,
None,
&timeout,
None
).unwrap()
);
assert!(fd_set.contains(r1));
assert!(!fd_set.contains(r2));
}
|
test_pselect_nfds2
|
identifier_name
|
registry.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Used by plugin crates to tell `rustc` about the plugins they provide.
use lint::{LintPassObject, LintId, Lint};
use syntax::ext::base::{SyntaxExtension, NamedSyntaxExtension, NormalTT};
use syntax::ext::base::{IdentTT, LetSyntaxTT, Decorator, Modifier};
use syntax::ext::base::{MacroExpanderFn};
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::ast;
use std::collections::HashMap;
/// Structure used to register plugins.
///
/// A plugin registrar function takes an `&mut Registry` and should call
/// methods to register its plugins.
///
/// This struct has public fields and other methods for use by `rustc`
/// itself. They are not documented here, and plugin authors should
/// not use them.
pub struct Registry {
#[doc(hidden)]
pub krate_span: Span,
#[doc(hidden)]
pub syntax_exts: Vec<NamedSyntaxExtension>,
#[doc(hidden)]
pub lint_passes: Vec<LintPassObject>,
#[doc(hidden)]
pub lint_groups: HashMap<&'static str, Vec<LintId>>,
}
impl Registry {
#[doc(hidden)]
pub fn new(krate: &ast::Crate) -> Registry {
Registry {
krate_span: krate.span,
syntax_exts: vec!(),
lint_passes: vec!(),
lint_groups: HashMap::new(),
}
}
/// Register a syntax extension of any kind.
///
/// This is the most general hook into `libsyntax`'s expansion behavior.
pub fn register_syntax_extension(&mut self, name: ast::Name, extension: SyntaxExtension) {
self.syntax_exts.push((name, match extension {
NormalTT(ext, _) => NormalTT(ext, Some(self.krate_span)),
IdentTT(ext, _) => IdentTT(ext, Some(self.krate_span)),
Decorator(ext) => Decorator(ext),
Modifier(ext) => Modifier(ext),
// there's probably a nicer way to signal this:
LetSyntaxTT(_, _) => panic!("can't register a new LetSyntax!"),
}));
}
/// Register a macro of the usual kind.
///
/// This is a convenience wrapper for `register_syntax_extension`.
/// It builds for you a `NormalTT` that calls `expander`,
/// and also takes care of interning the macro's name.
pub fn register_macro(&mut self, name: &str, expander: MacroExpanderFn) {
self.register_syntax_extension(token::intern(name), NormalTT(box expander, None));
}
/// Register a compiler lint pass.
pub fn register_lint_pass(&mut self, lint_pass: LintPassObject) {
self.lint_passes.push(lint_pass);
}
/// Register a lint group.
pub fn register_lint_group(&mut self, name: &'static str, to: Vec<&'static Lint>)
|
}
|
{
self.lint_groups.insert(name, to.into_iter().map(|x| LintId::of(x)).collect());
}
|
identifier_body
|
registry.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Used by plugin crates to tell `rustc` about the plugins they provide.
use lint::{LintPassObject, LintId, Lint};
use syntax::ext::base::{SyntaxExtension, NamedSyntaxExtension, NormalTT};
use syntax::ext::base::{IdentTT, LetSyntaxTT, Decorator, Modifier};
use syntax::ext::base::{MacroExpanderFn};
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::ast;
|
///
/// A plugin registrar function takes an `&mut Registry` and should call
/// methods to register its plugins.
///
/// This struct has public fields and other methods for use by `rustc`
/// itself. They are not documented here, and plugin authors should
/// not use them.
pub struct Registry {
#[doc(hidden)]
pub krate_span: Span,
#[doc(hidden)]
pub syntax_exts: Vec<NamedSyntaxExtension>,
#[doc(hidden)]
pub lint_passes: Vec<LintPassObject>,
#[doc(hidden)]
pub lint_groups: HashMap<&'static str, Vec<LintId>>,
}
impl Registry {
#[doc(hidden)]
pub fn new(krate: &ast::Crate) -> Registry {
Registry {
krate_span: krate.span,
syntax_exts: vec!(),
lint_passes: vec!(),
lint_groups: HashMap::new(),
}
}
/// Register a syntax extension of any kind.
///
/// This is the most general hook into `libsyntax`'s expansion behavior.
pub fn register_syntax_extension(&mut self, name: ast::Name, extension: SyntaxExtension) {
self.syntax_exts.push((name, match extension {
NormalTT(ext, _) => NormalTT(ext, Some(self.krate_span)),
IdentTT(ext, _) => IdentTT(ext, Some(self.krate_span)),
Decorator(ext) => Decorator(ext),
Modifier(ext) => Modifier(ext),
// there's probably a nicer way to signal this:
LetSyntaxTT(_, _) => panic!("can't register a new LetSyntax!"),
}));
}
/// Register a macro of the usual kind.
///
/// This is a convenience wrapper for `register_syntax_extension`.
/// It builds for you a `NormalTT` that calls `expander`,
/// and also takes care of interning the macro's name.
pub fn register_macro(&mut self, name: &str, expander: MacroExpanderFn) {
self.register_syntax_extension(token::intern(name), NormalTT(box expander, None));
}
/// Register a compiler lint pass.
pub fn register_lint_pass(&mut self, lint_pass: LintPassObject) {
self.lint_passes.push(lint_pass);
}
/// Register a lint group.
pub fn register_lint_group(&mut self, name: &'static str, to: Vec<&'static Lint>) {
self.lint_groups.insert(name, to.into_iter().map(|x| LintId::of(x)).collect());
}
}
|
use std::collections::HashMap;
/// Structure used to register plugins.
|
random_line_split
|
registry.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Used by plugin crates to tell `rustc` about the plugins they provide.
use lint::{LintPassObject, LintId, Lint};
use syntax::ext::base::{SyntaxExtension, NamedSyntaxExtension, NormalTT};
use syntax::ext::base::{IdentTT, LetSyntaxTT, Decorator, Modifier};
use syntax::ext::base::{MacroExpanderFn};
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::ast;
use std::collections::HashMap;
/// Structure used to register plugins.
///
/// A plugin registrar function takes an `&mut Registry` and should call
/// methods to register its plugins.
///
/// This struct has public fields and other methods for use by `rustc`
/// itself. They are not documented here, and plugin authors should
/// not use them.
pub struct Registry {
#[doc(hidden)]
pub krate_span: Span,
#[doc(hidden)]
pub syntax_exts: Vec<NamedSyntaxExtension>,
#[doc(hidden)]
pub lint_passes: Vec<LintPassObject>,
#[doc(hidden)]
pub lint_groups: HashMap<&'static str, Vec<LintId>>,
}
impl Registry {
#[doc(hidden)]
pub fn new(krate: &ast::Crate) -> Registry {
Registry {
krate_span: krate.span,
syntax_exts: vec!(),
lint_passes: vec!(),
lint_groups: HashMap::new(),
}
}
/// Register a syntax extension of any kind.
///
/// This is the most general hook into `libsyntax`'s expansion behavior.
pub fn register_syntax_extension(&mut self, name: ast::Name, extension: SyntaxExtension) {
self.syntax_exts.push((name, match extension {
NormalTT(ext, _) => NormalTT(ext, Some(self.krate_span)),
IdentTT(ext, _) => IdentTT(ext, Some(self.krate_span)),
Decorator(ext) => Decorator(ext),
Modifier(ext) => Modifier(ext),
// there's probably a nicer way to signal this:
LetSyntaxTT(_, _) => panic!("can't register a new LetSyntax!"),
}));
}
/// Register a macro of the usual kind.
///
/// This is a convenience wrapper for `register_syntax_extension`.
/// It builds for you a `NormalTT` that calls `expander`,
/// and also takes care of interning the macro's name.
pub fn register_macro(&mut self, name: &str, expander: MacroExpanderFn) {
self.register_syntax_extension(token::intern(name), NormalTT(box expander, None));
}
/// Register a compiler lint pass.
pub fn
|
(&mut self, lint_pass: LintPassObject) {
self.lint_passes.push(lint_pass);
}
/// Register a lint group.
pub fn register_lint_group(&mut self, name: &'static str, to: Vec<&'static Lint>) {
self.lint_groups.insert(name, to.into_iter().map(|x| LintId::of(x)).collect());
}
}
|
register_lint_pass
|
identifier_name
|
fs.rs
|
use io::{Read, Result, Write, Seek, SeekFrom};
use path::PathBuf;
use str;
use string::{String, ToString};
use vec::Vec;
use system::syscall::{sys_open, sys_dup, sys_close, sys_fpath, sys_ftruncate, sys_read,
sys_write, sys_lseek, sys_fsync, sys_mkdir, sys_rmdir, sys_unlink};
use system::syscall::{O_RDWR, O_CREAT, O_TRUNC, SEEK_SET, SEEK_CUR, SEEK_END};
/// A Unix-style file
pub struct File {
/// The id for the file
fd: usize,
}
impl File {
pub fn from_fd(fd: usize) -> File {
File {
fd: fd
}
}
/// Open a new file using a path
pub fn open(path: &str) -> Result<File> {
let path_c = path.to_string() + "\0";
unsafe {
sys_open(path_c.as_ptr(), O_RDWR, 0).map(|fd| File::from_fd(fd) )
}
}
/// Create a new file using a path
pub fn create(path: &str) -> Result<File> {
let path_c = path.to_string() + "\0";
unsafe {
sys_open(path_c.as_ptr(), O_CREAT | O_RDWR | O_TRUNC, 0).map(|fd| File::from_fd(fd) )
}
}
/// Duplicate the file
pub fn dup(&self) -> Result<File> {
sys_dup(self.fd).map(|fd| File::from_fd(fd))
}
/// Get the canonical path of the file
pub fn path(&self) -> Result<PathBuf> {
let mut buf: [u8; 4096] = [0; 4096];
match sys_fpath(self.fd, &mut buf) {
Ok(count) => Ok(PathBuf::from(unsafe { String::from_utf8_unchecked(Vec::from(&buf[0..count])) })),
Err(err) => Err(err),
}
}
/// Flush the file data and metadata
pub fn sync_all(&mut self) -> Result<()> {
sys_fsync(self.fd).and(Ok(()))
}
/// Flush the file data
pub fn sync_data(&mut self) -> Result<()> {
sys_fsync(self.fd).and(Ok(()))
}
/// Truncates the file
pub fn set_len(&mut self, size: u64) -> Result<()> {
sys_ftruncate(self.fd, size as usize).and(Ok(()))
}
}
impl Read for File {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
sys_read(self.fd, buf)
}
}
impl Write for File {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
sys_write(self.fd, buf)
}
}
impl Seek for File {
/// Seek a given position
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
let (whence, offset) = match pos {
SeekFrom::Start(offset) => (SEEK_SET, offset as isize),
SeekFrom::Current(offset) => (SEEK_CUR, offset as isize),
SeekFrom::End(offset) => (SEEK_END, offset as isize),
};
sys_lseek(self.fd, offset, whence).map(|position| position as u64)
}
}
impl Drop for File {
fn drop(&mut self) {
let _ = sys_close(self.fd);
}
}
pub struct FileType {
dir: bool,
file: bool,
}
impl FileType {
pub fn is_dir(&self) -> bool {
self.dir
}
pub fn is_file(&self) -> bool {
self.file
}
}
pub struct DirEntry {
path: PathBuf,
dir: bool,
file: bool,
}
impl DirEntry {
pub fn file_name(&self) -> &PathBuf {
&self.path
}
pub fn file_type(&self) -> Result<FileType> {
Ok(FileType {
dir: self.dir,
file: self.file,
})
}
pub fn path(&self) -> &PathBuf {
&self.path
}
}
pub struct ReadDir {
file: File,
}
impl Iterator for ReadDir {
type Item = Result<DirEntry>;
fn next(&mut self) -> Option<Result<DirEntry>> {
let mut path = String::new();
let mut buf: [u8; 1] = [0; 1];
loop {
match self.file.read(&mut buf) {
Ok(0) => break,
Ok(count) => {
if buf[0] == 10 {
break;
} else {
path.push_str(unsafe { str::from_utf8_unchecked(&buf[..count]) });
}
}
Err(_err) => break,
}
}
if path.is_empty() {
None
} else {
let dir = path.ends_with('/');
if dir {
path.pop();
}
Some(Ok(DirEntry {
path: PathBuf::from(path),
dir: dir,
file:!dir,
}))
}
}
}
/// Find the canonical path of a file
pub fn canonicalize(path: &str) -> Result<PathBuf> {
match File::open(path) {
Ok(file) => {
match file.path() {
Ok(realpath) => Ok(realpath),
Err(err) => Err(err)
}
},
Err(err) => Err(err)
}
}
/// Create a new directory, using a path
/// The default mode of the directory is 744
pub fn
|
(path: &str) -> Result<()> {
let path_c = path.to_string() + "\0";
unsafe {
sys_mkdir(path_c.as_ptr(), 755).and(Ok(()))
}
}
pub fn read_dir(path: &str) -> Result<ReadDir> {
let file_result = if path.is_empty() || path.ends_with('/') {
File::open(path)
} else {
File::open(&(path.to_string() + "/"))
};
match file_result {
Ok(file) => Ok(ReadDir { file: file }),
Err(err) => Err(err),
}
}
pub fn remove_dir(path: &str) -> Result<()> {
let path_c = path.to_string() + "\0";
unsafe {
sys_rmdir(path_c.as_ptr()).and(Ok(()))
}
}
pub fn remove_file(path: &str) -> Result<()> {
let path_c = path.to_string() + "\0";
unsafe {
sys_unlink(path_c.as_ptr()).and(Ok(()))
}
}
|
create_dir
|
identifier_name
|
fs.rs
|
use io::{Read, Result, Write, Seek, SeekFrom};
use path::PathBuf;
use str;
use string::{String, ToString};
use vec::Vec;
use system::syscall::{sys_open, sys_dup, sys_close, sys_fpath, sys_ftruncate, sys_read,
sys_write, sys_lseek, sys_fsync, sys_mkdir, sys_rmdir, sys_unlink};
use system::syscall::{O_RDWR, O_CREAT, O_TRUNC, SEEK_SET, SEEK_CUR, SEEK_END};
/// A Unix-style file
pub struct File {
/// The id for the file
fd: usize,
}
impl File {
pub fn from_fd(fd: usize) -> File {
File {
fd: fd
}
}
/// Open a new file using a path
pub fn open(path: &str) -> Result<File> {
let path_c = path.to_string() + "\0";
unsafe {
sys_open(path_c.as_ptr(), O_RDWR, 0).map(|fd| File::from_fd(fd) )
}
}
/// Create a new file using a path
pub fn create(path: &str) -> Result<File> {
let path_c = path.to_string() + "\0";
unsafe {
sys_open(path_c.as_ptr(), O_CREAT | O_RDWR | O_TRUNC, 0).map(|fd| File::from_fd(fd) )
}
}
/// Duplicate the file
pub fn dup(&self) -> Result<File> {
sys_dup(self.fd).map(|fd| File::from_fd(fd))
}
/// Get the canonical path of the file
pub fn path(&self) -> Result<PathBuf> {
let mut buf: [u8; 4096] = [0; 4096];
match sys_fpath(self.fd, &mut buf) {
Ok(count) => Ok(PathBuf::from(unsafe { String::from_utf8_unchecked(Vec::from(&buf[0..count])) })),
Err(err) => Err(err),
}
}
/// Flush the file data and metadata
pub fn sync_all(&mut self) -> Result<()> {
sys_fsync(self.fd).and(Ok(()))
}
/// Flush the file data
pub fn sync_data(&mut self) -> Result<()> {
sys_fsync(self.fd).and(Ok(()))
}
/// Truncates the file
pub fn set_len(&mut self, size: u64) -> Result<()> {
sys_ftruncate(self.fd, size as usize).and(Ok(()))
}
}
impl Read for File {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
sys_read(self.fd, buf)
}
}
impl Write for File {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
sys_write(self.fd, buf)
}
}
impl Seek for File {
/// Seek a given position
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
let (whence, offset) = match pos {
SeekFrom::Start(offset) => (SEEK_SET, offset as isize),
SeekFrom::Current(offset) => (SEEK_CUR, offset as isize),
SeekFrom::End(offset) => (SEEK_END, offset as isize),
};
sys_lseek(self.fd, offset, whence).map(|position| position as u64)
}
}
impl Drop for File {
fn drop(&mut self) {
let _ = sys_close(self.fd);
}
}
pub struct FileType {
dir: bool,
file: bool,
}
impl FileType {
pub fn is_dir(&self) -> bool {
self.dir
}
pub fn is_file(&self) -> bool {
self.file
}
}
pub struct DirEntry {
path: PathBuf,
dir: bool,
file: bool,
}
impl DirEntry {
pub fn file_name(&self) -> &PathBuf {
&self.path
}
pub fn file_type(&self) -> Result<FileType> {
Ok(FileType {
dir: self.dir,
file: self.file,
})
}
pub fn path(&self) -> &PathBuf {
&self.path
}
}
pub struct ReadDir {
|
impl Iterator for ReadDir {
type Item = Result<DirEntry>;
fn next(&mut self) -> Option<Result<DirEntry>> {
let mut path = String::new();
let mut buf: [u8; 1] = [0; 1];
loop {
match self.file.read(&mut buf) {
Ok(0) => break,
Ok(count) => {
if buf[0] == 10 {
break;
} else {
path.push_str(unsafe { str::from_utf8_unchecked(&buf[..count]) });
}
}
Err(_err) => break,
}
}
if path.is_empty() {
None
} else {
let dir = path.ends_with('/');
if dir {
path.pop();
}
Some(Ok(DirEntry {
path: PathBuf::from(path),
dir: dir,
file:!dir,
}))
}
}
}
/// Find the canonical path of a file
pub fn canonicalize(path: &str) -> Result<PathBuf> {
match File::open(path) {
Ok(file) => {
match file.path() {
Ok(realpath) => Ok(realpath),
Err(err) => Err(err)
}
},
Err(err) => Err(err)
}
}
/// Create a new directory, using a path
/// The default mode of the directory is 744
pub fn create_dir(path: &str) -> Result<()> {
let path_c = path.to_string() + "\0";
unsafe {
sys_mkdir(path_c.as_ptr(), 755).and(Ok(()))
}
}
pub fn read_dir(path: &str) -> Result<ReadDir> {
let file_result = if path.is_empty() || path.ends_with('/') {
File::open(path)
} else {
File::open(&(path.to_string() + "/"))
};
match file_result {
Ok(file) => Ok(ReadDir { file: file }),
Err(err) => Err(err),
}
}
pub fn remove_dir(path: &str) -> Result<()> {
let path_c = path.to_string() + "\0";
unsafe {
sys_rmdir(path_c.as_ptr()).and(Ok(()))
}
}
pub fn remove_file(path: &str) -> Result<()> {
let path_c = path.to_string() + "\0";
unsafe {
sys_unlink(path_c.as_ptr()).and(Ok(()))
}
}
|
file: File,
}
|
random_line_split
|
asm-out-assign-imm.rs
|
// Copyright 2012-2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
// except according to those terms.
// ignore-fast #[feature] doesn't work with check-fast
#![feature(asm)]
fn foo(x: int) { println!("{}", x); }
#[cfg(target_arch = "x86")]
#[cfg(target_arch = "x86_64")]
#[cfg(target_arch = "arm")]
pub fn main() {
let x: int;
x = 1; //~ NOTE prior assignment occurs here
foo(x);
unsafe {
asm!("mov $1, $0" : "=r"(x) : "r"(5u)); //~ ERROR re-assignment of immutable variable `x`
}
foo(x);
}
#[cfg(not(target_arch = "x86"), not(target_arch = "x86_64"), not(target_arch = "arm"))]
pub fn main() {}
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
|
random_line_split
|
asm-out-assign-imm.rs
|
// Copyright 2012-2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-fast #[feature] doesn't work with check-fast
#![feature(asm)]
fn
|
(x: int) { println!("{}", x); }
#[cfg(target_arch = "x86")]
#[cfg(target_arch = "x86_64")]
#[cfg(target_arch = "arm")]
pub fn main() {
let x: int;
x = 1; //~ NOTE prior assignment occurs here
foo(x);
unsafe {
asm!("mov $1, $0" : "=r"(x) : "r"(5u)); //~ ERROR re-assignment of immutable variable `x`
}
foo(x);
}
#[cfg(not(target_arch = "x86"), not(target_arch = "x86_64"), not(target_arch = "arm"))]
pub fn main() {}
|
foo
|
identifier_name
|
asm-out-assign-imm.rs
|
// Copyright 2012-2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-fast #[feature] doesn't work with check-fast
#![feature(asm)]
fn foo(x: int) { println!("{}", x); }
#[cfg(target_arch = "x86")]
#[cfg(target_arch = "x86_64")]
#[cfg(target_arch = "arm")]
pub fn main() {
let x: int;
x = 1; //~ NOTE prior assignment occurs here
foo(x);
unsafe {
asm!("mov $1, $0" : "=r"(x) : "r"(5u)); //~ ERROR re-assignment of immutable variable `x`
}
foo(x);
}
#[cfg(not(target_arch = "x86"), not(target_arch = "x86_64"), not(target_arch = "arm"))]
pub fn main()
|
{}
|
identifier_body
|
|
lock.rs
|
/* Copyright 2013 Leon Sixt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::hashmap::HashSet;
use std::cast;
use extra::sync::Mutex;
pub trait LockManager<T> {
fn lock(&self, id: T);
fn unlock(&self, id: &T);
}
struct SimpleLockManager<T> {
set: HashSet<T>,
mutex: Mutex
}
impl<T: Hash + Eq + Freeze> SimpleLockManager<T> {
pub fn new() -> SimpleLockManager<T> {
SimpleLockManager {
set: HashSet::new(),
mutex: Mutex::new()
}
}
}
impl<T: Hash + Eq + Clone + Freeze + ToStr> LockManager<T> for SimpleLockManager<T> {
fn lock(&self, id: T)
|
fn unlock(&self, id: &T) {
debug!("unlocking ptr: {}", id.to_str());
/*do self.mutex.lock_cond |cond| {
unsafe {
let mut_set = cast::transmute_mut(&self.set);
mut_set.remove(id);
cond.signal();
}
}*/
}
}
|
{
debug!("locking ptr: {}", id.to_str());
/* do self.mutex.lock_cond |cond| {
while self.set.contains(&id) {
cond.wait();
}
unsafe {
let mut_set = cast::transmute_mut(&self.set);
mut_set.insert(id.clone());
}
}*/
}
|
identifier_body
|
lock.rs
|
/* Copyright 2013 Leon Sixt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::hashmap::HashSet;
use std::cast;
use extra::sync::Mutex;
pub trait LockManager<T> {
fn lock(&self, id: T);
fn unlock(&self, id: &T);
}
struct SimpleLockManager<T> {
set: HashSet<T>,
mutex: Mutex
}
impl<T: Hash + Eq + Freeze> SimpleLockManager<T> {
pub fn new() -> SimpleLockManager<T> {
SimpleLockManager {
set: HashSet::new(),
mutex: Mutex::new()
}
}
}
impl<T: Hash + Eq + Clone + Freeze + ToStr> LockManager<T> for SimpleLockManager<T> {
fn lock(&self, id: T) {
debug!("locking ptr: {}", id.to_str());
/* do self.mutex.lock_cond |cond| {
while self.set.contains(&id) {
cond.wait();
}
unsafe {
let mut_set = cast::transmute_mut(&self.set);
mut_set.insert(id.clone());
}
}*/
}
fn
|
(&self, id: &T) {
debug!("unlocking ptr: {}", id.to_str());
/*do self.mutex.lock_cond |cond| {
unsafe {
let mut_set = cast::transmute_mut(&self.set);
mut_set.remove(id);
cond.signal();
}
}*/
}
}
|
unlock
|
identifier_name
|
lock.rs
|
/* Copyright 2013 Leon Sixt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::hashmap::HashSet;
use std::cast;
use extra::sync::Mutex;
pub trait LockManager<T> {
fn lock(&self, id: T);
fn unlock(&self, id: &T);
}
struct SimpleLockManager<T> {
set: HashSet<T>,
mutex: Mutex
}
impl<T: Hash + Eq + Freeze> SimpleLockManager<T> {
pub fn new() -> SimpleLockManager<T> {
SimpleLockManager {
set: HashSet::new(),
mutex: Mutex::new()
}
}
}
|
cond.wait();
}
unsafe {
let mut_set = cast::transmute_mut(&self.set);
mut_set.insert(id.clone());
}
}*/
}
fn unlock(&self, id: &T) {
debug!("unlocking ptr: {}", id.to_str());
/*do self.mutex.lock_cond |cond| {
unsafe {
let mut_set = cast::transmute_mut(&self.set);
mut_set.remove(id);
cond.signal();
}
}*/
}
}
|
impl<T: Hash + Eq + Clone + Freeze + ToStr> LockManager<T> for SimpleLockManager<T> {
fn lock(&self, id: T) {
debug!("locking ptr: {}", id.to_str());
/* do self.mutex.lock_cond |cond| {
while self.set.contains(&id) {
|
random_line_split
|
cloudwatch.rs
|
#![cfg(feature = "cloudwatch")]
extern crate rusoto;
use rusoto::cloudwatch::{CloudWatch, CloudWatchClient, PutMetricDataInput, Dimension, MetricDatum};
use rusoto::{DefaultCredentialsProvider, Region};
use rusoto::default_tls_client;
#[test]
fn should_put_metric_data()
|
let response = client.put_metric_data(&request).unwrap();
println!("{:#?}", response);
}
|
{
let client = CloudWatchClient::new(default_tls_client().unwrap(),
DefaultCredentialsProvider::new().unwrap(),
Region::UsEast1);
let metric_data = vec![MetricDatum {
dimensions: Some(vec![Dimension {
name: "foo".to_string(),
value: "bar".to_string(),
}]),
metric_name: "buffers".to_string(),
statistic_values: None,
timestamp: None,
unit: Some("Bytes".to_string()),
value: Some(1.0),
}];
let request = PutMetricDataInput {
namespace: "TestNamespace".to_string(),
metric_data: metric_data,
};
|
identifier_body
|
cloudwatch.rs
|
#![cfg(feature = "cloudwatch")]
extern crate rusoto;
use rusoto::cloudwatch::{CloudWatch, CloudWatchClient, PutMetricDataInput, Dimension, MetricDatum};
use rusoto::{DefaultCredentialsProvider, Region};
use rusoto::default_tls_client;
#[test]
fn
|
() {
let client = CloudWatchClient::new(default_tls_client().unwrap(),
DefaultCredentialsProvider::new().unwrap(),
Region::UsEast1);
let metric_data = vec![MetricDatum {
dimensions: Some(vec![Dimension {
name: "foo".to_string(),
value: "bar".to_string(),
}]),
metric_name: "buffers".to_string(),
statistic_values: None,
timestamp: None,
unit: Some("Bytes".to_string()),
value: Some(1.0),
}];
let request = PutMetricDataInput {
namespace: "TestNamespace".to_string(),
metric_data: metric_data,
};
let response = client.put_metric_data(&request).unwrap();
println!("{:#?}", response);
}
|
should_put_metric_data
|
identifier_name
|
cloudwatch.rs
|
#![cfg(feature = "cloudwatch")]
extern crate rusoto;
use rusoto::cloudwatch::{CloudWatch, CloudWatchClient, PutMetricDataInput, Dimension, MetricDatum};
use rusoto::{DefaultCredentialsProvider, Region};
use rusoto::default_tls_client;
#[test]
fn should_put_metric_data() {
let client = CloudWatchClient::new(default_tls_client().unwrap(),
DefaultCredentialsProvider::new().unwrap(),
Region::UsEast1);
let metric_data = vec![MetricDatum {
dimensions: Some(vec![Dimension {
name: "foo".to_string(),
value: "bar".to_string(),
}]),
metric_name: "buffers".to_string(),
statistic_values: None,
timestamp: None,
unit: Some("Bytes".to_string()),
|
}];
let request = PutMetricDataInput {
namespace: "TestNamespace".to_string(),
metric_data: metric_data,
};
let response = client.put_metric_data(&request).unwrap();
println!("{:#?}", response);
}
|
value: Some(1.0),
|
random_line_split
|
receiver.rs
|
use std::io::Cursor;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use rotor_redis;
use rotor_redis::Message;
use cbor::Encoder;
use inner::{MANAGER, SockId};
pub struct Receiver(SockId, usize, Arc<AtomicUsize>);
impl Receiver {
pub fn new(id: SockId) -> Receiver {
Receiver(id, 0, Arc::new(AtomicUsize::new(0)))
}
}
impl Clone for Receiver {
fn clone(&self) -> Receiver {
Receiver(self.0, self.2.load(SeqCst), self.2.clone())
}
}
fn write_message(enc: &mut Encoder<Cursor<Vec<u8>>>, msg: &Message)
|
impl Receiver {
pub fn next_id(&mut self) -> usize {
self.2.fetch_add(1, SeqCst)
}
}
impl rotor_redis::Receiver for Receiver {
fn receive(&mut self, msg: &Message) {
let mut enc = Encoder::new(Cursor::new(Vec::new()));
enc.u64(self.1 as u64).unwrap();
write_message(&mut enc, msg);
let vec = enc.into_writer().into_inner();
MANAGER.send(self.0, vec.into_boxed_slice());
self.1 += 1;
}
}
|
{
use rotor_redis::Message::*;
match *msg {
Simple(s) => enc.text(s).unwrap(),
Error(kind, text) => {
enc.object(2).unwrap();
enc.text("error_kind").unwrap();
enc.text(kind).unwrap();
enc.text("error_text").unwrap();
enc.text(text).unwrap();
}
Int(x) => enc.i64(x).unwrap(),
Bytes(x) => enc.bytes(x).unwrap(),
Null => enc.null().unwrap(),
Array(_) => unimplemented!(),
}
}
|
identifier_body
|
receiver.rs
|
use std::io::Cursor;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
|
use cbor::Encoder;
use inner::{MANAGER, SockId};
pub struct Receiver(SockId, usize, Arc<AtomicUsize>);
impl Receiver {
pub fn new(id: SockId) -> Receiver {
Receiver(id, 0, Arc::new(AtomicUsize::new(0)))
}
}
impl Clone for Receiver {
fn clone(&self) -> Receiver {
Receiver(self.0, self.2.load(SeqCst), self.2.clone())
}
}
fn write_message(enc: &mut Encoder<Cursor<Vec<u8>>>, msg: &Message) {
use rotor_redis::Message::*;
match *msg {
Simple(s) => enc.text(s).unwrap(),
Error(kind, text) => {
enc.object(2).unwrap();
enc.text("error_kind").unwrap();
enc.text(kind).unwrap();
enc.text("error_text").unwrap();
enc.text(text).unwrap();
}
Int(x) => enc.i64(x).unwrap(),
Bytes(x) => enc.bytes(x).unwrap(),
Null => enc.null().unwrap(),
Array(_) => unimplemented!(),
}
}
impl Receiver {
pub fn next_id(&mut self) -> usize {
self.2.fetch_add(1, SeqCst)
}
}
impl rotor_redis::Receiver for Receiver {
fn receive(&mut self, msg: &Message) {
let mut enc = Encoder::new(Cursor::new(Vec::new()));
enc.u64(self.1 as u64).unwrap();
write_message(&mut enc, msg);
let vec = enc.into_writer().into_inner();
MANAGER.send(self.0, vec.into_boxed_slice());
self.1 += 1;
}
}
|
use rotor_redis;
use rotor_redis::Message;
|
random_line_split
|
receiver.rs
|
use std::io::Cursor;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use rotor_redis;
use rotor_redis::Message;
use cbor::Encoder;
use inner::{MANAGER, SockId};
pub struct Receiver(SockId, usize, Arc<AtomicUsize>);
impl Receiver {
pub fn new(id: SockId) -> Receiver {
Receiver(id, 0, Arc::new(AtomicUsize::new(0)))
}
}
impl Clone for Receiver {
fn
|
(&self) -> Receiver {
Receiver(self.0, self.2.load(SeqCst), self.2.clone())
}
}
fn write_message(enc: &mut Encoder<Cursor<Vec<u8>>>, msg: &Message) {
use rotor_redis::Message::*;
match *msg {
Simple(s) => enc.text(s).unwrap(),
Error(kind, text) => {
enc.object(2).unwrap();
enc.text("error_kind").unwrap();
enc.text(kind).unwrap();
enc.text("error_text").unwrap();
enc.text(text).unwrap();
}
Int(x) => enc.i64(x).unwrap(),
Bytes(x) => enc.bytes(x).unwrap(),
Null => enc.null().unwrap(),
Array(_) => unimplemented!(),
}
}
impl Receiver {
pub fn next_id(&mut self) -> usize {
self.2.fetch_add(1, SeqCst)
}
}
impl rotor_redis::Receiver for Receiver {
fn receive(&mut self, msg: &Message) {
let mut enc = Encoder::new(Cursor::new(Vec::new()));
enc.u64(self.1 as u64).unwrap();
write_message(&mut enc, msg);
let vec = enc.into_writer().into_inner();
MANAGER.send(self.0, vec.into_boxed_slice());
self.1 += 1;
}
}
|
clone
|
identifier_name
|
properties.rs
|
//! Parse server.properties files
use std::fs::File;
use std::io::prelude::*;
use std::io::{self, BufReader, BufWriter, Error, ErrorKind};
use std::num::ParseIntError;
use std::path::Path;
use std::str::ParseBoolError;
macro_rules! parse {
($value:ident, String) => {
$value.to_string()
};
($value:ident, bool) => {
try!($value.parse().map_err(|_: ParseBoolError| io::Error::new(io::ErrorKind::InvalidInput, "invalid bool value")))
};
($value:ident, i32) => {
try!($value.parse().map_err(|_: ParseIntError| io::Error::new(io::ErrorKind::InvalidInput, "invalid i32 value")))
};
($value:ident, u16) => {
try!($value.parse().map_err(|_: ParseIntError| io::Error::new(io::ErrorKind::InvalidInput, "invalid u16 value")))
}
}
macro_rules! server_properties_impl {
($({ $field:ident, $hyphen:expr, $fty:ident, $default:expr})+) => {
/// Vanilla server.properties
///
/// Documentation of each filed here: http://minecraft.gamepedia.com/Server.properties
#[derive(Debug, PartialEq)]
pub struct Properties {
$(pub $field: $fty),*
}
impl Properties {
pub fn default() -> Properties {
Properties{
$($field: $default),*
}
}
/// Load and parse a server.properties file from `path`,
pub fn load(path: &Path) -> io::Result<Properties> {
let mut p = Properties::default();
let file = try!(File::open(path));
let file = BufReader::new(file);
for line in file.lines().map(|l| l.unwrap()) {
// Ignore comment lines
if line.trim().starts_with("#") {
continue
}
let parts: Vec<&str> = line.trim().splitn(2, '=').collect();
let (prop, value) = (parts[0], parts[1]);
match prop {
$($hyphen => p.$field = parse!(value, $fty),)*
prop => { return Err(Error::new(ErrorKind::Other, &format!("Unknown property {}", prop)[..])); }
}
}
Ok(p)
}
/// Saves a server.properties file into `path`. It creates the
/// file if it does not exist, and will truncate it if it does.
pub fn save(&self, path: &Path) -> io::Result<()> {
let file = try!(File::create(path));
let mut file = BufWriter::new(file);
// Header
try!(write!(&mut file, "#Minecraft server properties"));
try!(write!(&mut file, "#(File modification datestamp)"));
// Body. Vanilla MC does write 37 out of 40 properties by default, it
// only writes the 3 left if they are not using default values. It
// also writes them unsorted (possibly because they are stored in a
// HashMap).
$(try!(write!(&mut file, "{}={}\n", $hyphen, self.$field));)*
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn default_save_load() {
use std::env;
use std::fs;
let mut dir = env::temp_dir();
dir.push("default.properties");
let default_props = Properties::default();
match default_props.save(&dir) {
Ok(_) => {},
Err(err) => { panic!("Failed to save server.properties file with error: {}", err); }
}
|
},
Err(err) => {
panic!("Failed to load server.properties file with error: {}", err);
}
}
fs::remove_file(&dir).unwrap();
}
#[test]
fn custom_save_load() {
use std::env;
use std::fs;
let mut dir = env::temp_dir();
dir.push("custom.properties");
let custom_props = Properties{
server_port: 25570,
.. Properties::default()
};
match custom_props.save(&dir) {
Ok(_) => {},
Err(err) => { panic!("Failed to save server.properties file with error: {}", err); }
}
match Properties::load(&dir) {
Ok(props) => { assert_eq!(props, custom_props); },
Err(err) => { panic!("Failed to load server.properties file with error: {}", err); }
}
fs::remove_file(&dir).unwrap();
}
#[test]
fn load_unknown_property() {
use std::env;
use std::error::Error;
use std::fs;
use std::io::Write;
let mut dir = env::temp_dir();
dir.push("unknown.properties");
let mut f = fs::File::create(&dir).unwrap();
f.write_all(b"foo-bar=true\n").unwrap();
match Properties::load(&dir) {
Ok(_) => { panic!("server.properties should have failed to load"); }
Err(err) => { assert_eq!(err.description(), "Unknown property foo-bar"); },
}
fs::remove_file(&dir).unwrap();
}
#[test]
fn decode_default() {
let props = Properties::default();
$(assert_eq!(props.$field, $default));*
}
}
}
}
server_properties_impl! {
{ allow_flight, "allow-flight", bool, false }
{ allow_nether, "allow-nether", bool, true }
{ announce_player_achievements, "announce-player-achievements", bool, true }
{ difficulty, "difficulty", i32, 1 }
{ enable_query, "enable-query", bool, false }
{ enable_rcon, "enable-rcon", bool, false }
{ enable_command_block, "enable-command-block", bool, false }
{ force_gamemode, "force-gamemode", bool, false }
{ gamemode, "gamemode", i32, 0 }
{ generate_structures, "generate-structures", bool, true }
{ generator_settings, "generator-settings", String, "".to_string() }
{ hardcore, "hardcore", bool, false }
{ level_name, "level-name", String, "world".to_string() }
{ level_seed, "level-seed", String, "".to_string() }
{ level_type, "level-type", String, "DEFAULT".to_string() }
{ max_build_height, "max-build-height", i32, 256 }
{ max_players, "max-players", i32, 20 }
{ max_tick_time, "max-tick-time", i32, 60000 }
{ max_world_size, "max-world-size", i32, 29999984 }
{ motd, "motd", String, "A Minecraft Server".to_string() }
{ network_compression_threshold, "network-compression-threshold", i32, 256 }
{ online_mode, "online-mode", bool, true }
{ op_permission_level, "op-permission-level", i32, 4 }
{ player_idle_timeout, "player-idle-timeout", i32, 0 }
{ pvp, "pvp", bool, true }
{ query_port, "query.port", i32, 25565 }
{ rcon_password, "rcon.password", String, "".to_string() }
{ rcon_port, "rcon.port", i32, 25575 }
{ resource_pack, "resource-pack", String, "".to_string() }
{ resource_pack_hash, "resource-pack-hash", String, "".to_string() }
{ server_ip, "server-ip", String, "".to_string() }
{ server_port, "server-port", u16, 25565 }
{ snooper_enabled, "snooper-enabled", bool, true }
{ spawn_animals, "spawn-animals", bool, true }
{ spawn_monsters, "spawn-monsters", bool, true }
{ spawn_npcs, "spawn-npcs", bool, true }
{ spawn_protection, "spawn-protection", i32, 16 }
{ use_native_transport, "use-native-transport", bool, true }
{ view_distance, "view-distance", i32, 10 }
{ white_list, "white-list", bool, false }
}
|
match Properties::load(&dir) {
Ok(props) => {
assert_eq!(props, default_props);
|
random_line_split
|
options.rs
|
#[feature(convert)]
use std::convert::AsRef;
use std::collections;
use std::fmt;
use syntax::ast;
use syntax::ext::base;
use super::super::utils::string;
//use super::super::utils::attrs::{Attrs,AttrError};
use super::columns::options;
use ::models::model::Model;
/*
ModelOptionsError
error
*/
#[derive(Debug)]
pub enum ModelOptionsError {
NotStruct,
MetaItemError,
NoColumns,
OptionsError,
}
#[derive(Debug)]
pub struct ModelOptions {
pub name: &'static str,
pub db_name: &'static str,
pub primary_key: &'static str,
pub columns: Vec<&'static str>,
pub column_options: collections::BTreeMap<&'static str, options::ColumnOptions>,
}
// Returns model options for given type
pub fn get_model_options<T: Model>() -> ModelOptions {
T::model_options_static()
}
// generates model options implementing ModelOptions trait
#[allow(unused_variables)]
pub fn
|
(ann:&base::Annotatable) -> Result<Vec<String>, ModelOptionsError> {
let mut results:Vec<String> = vec![];
let mut name = "".to_string();
let mut db_name = "".to_string();
let mut primary_key = "".to_string();
let mut columns:Vec<String> = vec![];
let mut column_options:Vec<String> = vec![];
let mut column_inits:Vec<String> = vec![];
if let &base::Annotatable::Item(ref item) = ann {
name = item.ident.name.to_string();
db_name = string::camel_to_snake(name.clone());
primary_key = "".to_string();
// add column names
match get_column_options(ann) {
Ok(items) => {
if items.len() == 0 {
return Err(ModelOptionsError::NoColumns);
}
for (ref key, ref value) in items {
columns.push(format!(r#""{key}""#, key=key));
column_options.push(format!(r#"column_options.insert("{key}", {value})"#, key=key, value=value));
column_inits.push(format!(r#"{name}: treasure::models::columns::column::init_column(model_options.column_options.get("{name}").unwrap())"#,
name=key));
}
let column_names = columns.iter().map(|x| format!(r#""{}""#, x)).collect::<Vec<_>>();
results.push(format!(r#"
impl treasure::Model for {name}
{{
fn model_options_static() -> treasure::ModelOptions {{
use std::collections;
let mut column_options:collections::BTreeMap<&'static str, treasure::ColumnOptions> = collections::BTreeMap::new();
{column_options};
treasure::ModelOptions{{
name:"{name}",
db_name: "{db_name}",
primary_key: "{primary_key}",
columns: vec![{columns}],
column_options: column_options,
}}
}}
fn model_options(&self) -> treasure::ModelOptions {{
Self::model_options_static()
}}
// Constructor function for given model (query will use this)
fn init_new() -> {name} {{
let model_options = Self::model_options_static();
{name} {{{column_inits}}}
}}
}}
"#, name=name, db_name=db_name, primary_key=primary_key,
columns=columns.join(", "),
column_options=column_options.join(";"),
column_inits=column_inits.join(",")
).to_string()
);
},
Err(e) => return Err(e),
};
};
Ok(results)
}
fn get_column_options(ann:&base::Annotatable) -> Result<(Vec<(String, String)>), ModelOptionsError> {
let mut result = vec![];
let nserr = Err(ModelOptionsError::NotStruct);
if let &base::Annotatable::Item(ref item) = ann {
if let ast::Item_::ItemStruct(ref sd, _) = item.node {
match options::get_columns(sd) {
Ok(cols) => {
for (k, v) in cols {
result.push((k.clone(), format!(r#"{}"#, v)))
}
},
Err(_) => return nserr,
};
} else {
return nserr
}
}
Ok(result)
}
|
generate_model_options_impls
|
identifier_name
|
options.rs
|
#[feature(convert)]
use std::convert::AsRef;
use std::collections;
use std::fmt;
use syntax::ast;
use syntax::ext::base;
use super::super::utils::string;
//use super::super::utils::attrs::{Attrs,AttrError};
use super::columns::options;
use ::models::model::Model;
/*
ModelOptionsError
error
*/
#[derive(Debug)]
pub enum ModelOptionsError {
NotStruct,
MetaItemError,
NoColumns,
OptionsError,
}
#[derive(Debug)]
pub struct ModelOptions {
pub name: &'static str,
pub db_name: &'static str,
pub primary_key: &'static str,
pub columns: Vec<&'static str>,
pub column_options: collections::BTreeMap<&'static str, options::ColumnOptions>,
}
// Returns model options for given type
pub fn get_model_options<T: Model>() -> ModelOptions {
T::model_options_static()
}
// generates model options implementing ModelOptions trait
#[allow(unused_variables)]
pub fn generate_model_options_impls(ann:&base::Annotatable) -> Result<Vec<String>, ModelOptionsError> {
let mut results:Vec<String> = vec![];
let mut name = "".to_string();
let mut db_name = "".to_string();
let mut primary_key = "".to_string();
let mut columns:Vec<String> = vec![];
let mut column_options:Vec<String> = vec![];
let mut column_inits:Vec<String> = vec![];
if let &base::Annotatable::Item(ref item) = ann {
name = item.ident.name.to_string();
db_name = string::camel_to_snake(name.clone());
primary_key = "".to_string();
// add column names
match get_column_options(ann) {
Ok(items) => {
if items.len() == 0 {
return Err(ModelOptionsError::NoColumns);
}
for (ref key, ref value) in items {
columns.push(format!(r#""{key}""#, key=key));
column_options.push(format!(r#"column_options.insert("{key}", {value})"#, key=key, value=value));
column_inits.push(format!(r#"{name}: treasure::models::columns::column::init_column(model_options.column_options.get("{name}").unwrap())"#,
name=key));
}
let column_names = columns.iter().map(|x| format!(r#""{}""#, x)).collect::<Vec<_>>();
results.push(format!(r#"
impl treasure::Model for {name}
{{
fn model_options_static() -> treasure::ModelOptions {{
use std::collections;
let mut column_options:collections::BTreeMap<&'static str, treasure::ColumnOptions> = collections::BTreeMap::new();
{column_options};
treasure::ModelOptions{{
name:"{name}",
db_name: "{db_name}",
primary_key: "{primary_key}",
columns: vec![{columns}],
column_options: column_options,
}}
}}
fn model_options(&self) -> treasure::ModelOptions {{
Self::model_options_static()
}}
// Constructor function for given model (query will use this)
fn init_new() -> {name} {{
let model_options = Self::model_options_static();
{name} {{{column_inits}}}
}}
}}
"#, name=name, db_name=db_name, primary_key=primary_key,
columns=columns.join(", "),
column_options=column_options.join(";"),
column_inits=column_inits.join(",")
).to_string()
);
},
Err(e) => return Err(e),
};
};
Ok(results)
|
fn get_column_options(ann:&base::Annotatable) -> Result<(Vec<(String, String)>), ModelOptionsError> {
let mut result = vec![];
let nserr = Err(ModelOptionsError::NotStruct);
if let &base::Annotatable::Item(ref item) = ann {
if let ast::Item_::ItemStruct(ref sd, _) = item.node {
match options::get_columns(sd) {
Ok(cols) => {
for (k, v) in cols {
result.push((k.clone(), format!(r#"{}"#, v)))
}
},
Err(_) => return nserr,
};
} else {
return nserr
}
}
Ok(result)
}
|
}
|
random_line_split
|
options.rs
|
#[feature(convert)]
use std::convert::AsRef;
use std::collections;
use std::fmt;
use syntax::ast;
use syntax::ext::base;
use super::super::utils::string;
//use super::super::utils::attrs::{Attrs,AttrError};
use super::columns::options;
use ::models::model::Model;
/*
ModelOptionsError
error
*/
#[derive(Debug)]
pub enum ModelOptionsError {
NotStruct,
MetaItemError,
NoColumns,
OptionsError,
}
#[derive(Debug)]
pub struct ModelOptions {
pub name: &'static str,
pub db_name: &'static str,
pub primary_key: &'static str,
pub columns: Vec<&'static str>,
pub column_options: collections::BTreeMap<&'static str, options::ColumnOptions>,
}
// Returns model options for given type
pub fn get_model_options<T: Model>() -> ModelOptions {
T::model_options_static()
}
// generates model options implementing ModelOptions trait
#[allow(unused_variables)]
pub fn generate_model_options_impls(ann:&base::Annotatable) -> Result<Vec<String>, ModelOptionsError> {
let mut results:Vec<String> = vec![];
let mut name = "".to_string();
let mut db_name = "".to_string();
let mut primary_key = "".to_string();
let mut columns:Vec<String> = vec![];
let mut column_options:Vec<String> = vec![];
let mut column_inits:Vec<String> = vec![];
if let &base::Annotatable::Item(ref item) = ann {
name = item.ident.name.to_string();
db_name = string::camel_to_snake(name.clone());
primary_key = "".to_string();
// add column names
match get_column_options(ann) {
Ok(items) => {
if items.len() == 0 {
return Err(ModelOptionsError::NoColumns);
}
for (ref key, ref value) in items {
columns.push(format!(r#""{key}""#, key=key));
column_options.push(format!(r#"column_options.insert("{key}", {value})"#, key=key, value=value));
column_inits.push(format!(r#"{name}: treasure::models::columns::column::init_column(model_options.column_options.get("{name}").unwrap())"#,
name=key));
}
let column_names = columns.iter().map(|x| format!(r#""{}""#, x)).collect::<Vec<_>>();
results.push(format!(r#"
impl treasure::Model for {name}
{{
fn model_options_static() -> treasure::ModelOptions {{
use std::collections;
let mut column_options:collections::BTreeMap<&'static str, treasure::ColumnOptions> = collections::BTreeMap::new();
{column_options};
treasure::ModelOptions{{
name:"{name}",
db_name: "{db_name}",
primary_key: "{primary_key}",
columns: vec![{columns}],
column_options: column_options,
}}
}}
fn model_options(&self) -> treasure::ModelOptions {{
Self::model_options_static()
}}
// Constructor function for given model (query will use this)
fn init_new() -> {name} {{
let model_options = Self::model_options_static();
{name} {{{column_inits}}}
}}
}}
"#, name=name, db_name=db_name, primary_key=primary_key,
columns=columns.join(", "),
column_options=column_options.join(";"),
column_inits=column_inits.join(",")
).to_string()
);
},
Err(e) => return Err(e),
};
};
Ok(results)
}
fn get_column_options(ann:&base::Annotatable) -> Result<(Vec<(String, String)>), ModelOptionsError>
|
{
let mut result = vec![];
let nserr = Err(ModelOptionsError::NotStruct);
if let &base::Annotatable::Item(ref item) = ann {
if let ast::Item_::ItemStruct(ref sd, _) = item.node {
match options::get_columns(sd) {
Ok(cols) => {
for (k, v) in cols {
result.push((k.clone(), format!(r#"{}"#, v)))
}
},
Err(_) => return nserr,
};
} else {
return nserr
}
}
Ok(result)
}
|
identifier_body
|
|
deriving-clone-struct.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
#[derive(Clone)]
struct S {
_int: isize,
_i8: i8,
_i16: i16,
_i32: i32,
_i64: i64,
_uint: usize,
_u8: u8,
_u16: u16,
_u32: u32,
_u64: u64,
_f32: f32,
_f64: f64,
_bool: bool,
_char: char,
_nil: ()
}
pub fn main() {}
|
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
|
random_line_split
|
deriving-clone-struct.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
#[derive(Clone)]
struct S {
_int: isize,
_i8: i8,
_i16: i16,
_i32: i32,
_i64: i64,
_uint: usize,
_u8: u8,
_u16: u16,
_u32: u32,
_u64: u64,
_f32: f32,
_f64: f64,
_bool: bool,
_char: char,
_nil: ()
}
pub fn
|
() {}
|
main
|
identifier_name
|
class-impl-very-parameterized-trait.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cmp;
#[deriving(Show)]
enum cat_type { tuxedo, tabby, tortoiseshell }
impl cmp::PartialEq for cat_type {
fn eq(&self, other: &cat_type) -> bool {
((*self) as uint) == ((*other) as uint)
}
fn ne(&self, other: &cat_type) -> bool {!(*self).eq(other) }
}
// Very silly -- this just returns the value of the name field
// for any int value that's less than the meows field
// ok: T should be in scope when resolving the trait ref for map
struct cat<T> {
// Yes, you can have negative meows
meows : int,
how_hungry : int,
name : T,
}
impl<T> cat<T> {
pub fn speak(&mut self) { self.meow(); }
pub fn eat(&mut self) -> bool {
if self.how_hungry > 0 {
println!("OM NOM NOM");
self.how_hungry -= 2;
return true;
} else {
println!("Not hungry!");
return false;
}
}
}
impl<T> Collection for cat<T> {
fn len(&self) -> uint { self.meows as uint }
fn is_empty(&self) -> bool { self.meows == 0 }
}
impl<T> Mutable for cat<T> {
fn clear(&mut self) {}
}
impl<T> Map<int, T> for cat<T> {
fn
|
(&self, k: &int) -> bool { *k <= self.meows }
fn find(&self, k: &int) -> Option<&T> {
if *k <= self.meows {
Some(&self.name)
} else {
None
}
}
}
impl<T> MutableMap<int, T> for cat<T> {
fn insert(&mut self, k: int, _: T) -> bool {
self.meows += k;
true
}
fn find_mut(&mut self, _k: &int) -> Option<&mut T> { fail!() }
fn remove(&mut self, k: &int) -> bool {
if self.find(k).is_some() {
self.meows -= *k; true
} else {
false
}
}
fn pop(&mut self, _k: &int) -> Option<T> { fail!() }
fn swap(&mut self, _k: int, _v: T) -> Option<T> { fail!() }
}
impl<T> cat<T> {
pub fn get(&self, k: &int) -> &T {
match self.find(k) {
Some(v) => { v }
None => { fail!("epic fail"); }
}
}
pub fn new(in_x: int, in_y: int, in_name: T) -> cat<T> {
cat{meows: in_x, how_hungry: in_y, name: in_name }
}
}
impl<T> cat<T> {
fn meow(&mut self) {
self.meows += 1;
println!("Meow {}", self.meows);
if self.meows % 5 == 0 {
self.how_hungry += 1;
}
}
}
pub fn main() {
let mut nyan: cat<String> = cat::new(0, 2, "nyan".to_string());
for _ in range(1u, 5) { nyan.speak(); }
assert!(*nyan.find(&1).unwrap() == "nyan".to_string());
assert_eq!(nyan.find(&10), None);
let mut spotty: cat<cat_type> = cat::new(2, 57, tuxedo);
for _ in range(0u, 6) { spotty.speak(); }
assert_eq!(spotty.len(), 8);
assert!((spotty.contains_key(&2)));
assert_eq!(spotty.get(&3), &tuxedo);
}
|
contains_key
|
identifier_name
|
class-impl-very-parameterized-trait.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cmp;
#[deriving(Show)]
enum cat_type { tuxedo, tabby, tortoiseshell }
impl cmp::PartialEq for cat_type {
fn eq(&self, other: &cat_type) -> bool {
((*self) as uint) == ((*other) as uint)
}
fn ne(&self, other: &cat_type) -> bool {!(*self).eq(other) }
}
// Very silly -- this just returns the value of the name field
// for any int value that's less than the meows field
// ok: T should be in scope when resolving the trait ref for map
struct cat<T> {
// Yes, you can have negative meows
meows : int,
how_hungry : int,
name : T,
}
impl<T> cat<T> {
pub fn speak(&mut self) { self.meow(); }
pub fn eat(&mut self) -> bool {
if self.how_hungry > 0 {
println!("OM NOM NOM");
self.how_hungry -= 2;
return true;
} else {
println!("Not hungry!");
return false;
}
}
}
impl<T> Collection for cat<T> {
fn len(&self) -> uint { self.meows as uint }
fn is_empty(&self) -> bool { self.meows == 0 }
}
impl<T> Mutable for cat<T> {
fn clear(&mut self) {}
}
impl<T> Map<int, T> for cat<T> {
fn contains_key(&self, k: &int) -> bool { *k <= self.meows }
fn find(&self, k: &int) -> Option<&T> {
if *k <= self.meows {
Some(&self.name)
} else {
None
}
}
}
impl<T> MutableMap<int, T> for cat<T> {
fn insert(&mut self, k: int, _: T) -> bool {
self.meows += k;
true
}
fn find_mut(&mut self, _k: &int) -> Option<&mut T> { fail!() }
fn remove(&mut self, k: &int) -> bool {
if self.find(k).is_some() {
self.meows -= *k; true
} else {
false
}
}
fn pop(&mut self, _k: &int) -> Option<T> { fail!() }
fn swap(&mut self, _k: int, _v: T) -> Option<T> { fail!() }
}
impl<T> cat<T> {
pub fn get(&self, k: &int) -> &T {
match self.find(k) {
Some(v) => { v }
None =>
|
}
}
pub fn new(in_x: int, in_y: int, in_name: T) -> cat<T> {
cat{meows: in_x, how_hungry: in_y, name: in_name }
}
}
impl<T> cat<T> {
fn meow(&mut self) {
self.meows += 1;
println!("Meow {}", self.meows);
if self.meows % 5 == 0 {
self.how_hungry += 1;
}
}
}
pub fn main() {
let mut nyan: cat<String> = cat::new(0, 2, "nyan".to_string());
for _ in range(1u, 5) { nyan.speak(); }
assert!(*nyan.find(&1).unwrap() == "nyan".to_string());
assert_eq!(nyan.find(&10), None);
let mut spotty: cat<cat_type> = cat::new(2, 57, tuxedo);
for _ in range(0u, 6) { spotty.speak(); }
assert_eq!(spotty.len(), 8);
assert!((spotty.contains_key(&2)));
assert_eq!(spotty.get(&3), &tuxedo);
}
|
{ fail!("epic fail"); }
|
conditional_block
|
class-impl-very-parameterized-trait.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cmp;
#[deriving(Show)]
enum cat_type { tuxedo, tabby, tortoiseshell }
impl cmp::PartialEq for cat_type {
fn eq(&self, other: &cat_type) -> bool {
((*self) as uint) == ((*other) as uint)
}
fn ne(&self, other: &cat_type) -> bool {!(*self).eq(other) }
}
// Very silly -- this just returns the value of the name field
// for any int value that's less than the meows field
// ok: T should be in scope when resolving the trait ref for map
struct cat<T> {
// Yes, you can have negative meows
meows : int,
how_hungry : int,
name : T,
}
impl<T> cat<T> {
pub fn speak(&mut self) { self.meow(); }
pub fn eat(&mut self) -> bool {
if self.how_hungry > 0 {
println!("OM NOM NOM");
self.how_hungry -= 2;
return true;
} else {
println!("Not hungry!");
return false;
}
}
}
impl<T> Collection for cat<T> {
fn len(&self) -> uint { self.meows as uint }
fn is_empty(&self) -> bool { self.meows == 0 }
}
impl<T> Mutable for cat<T> {
fn clear(&mut self) {}
}
impl<T> Map<int, T> for cat<T> {
fn contains_key(&self, k: &int) -> bool { *k <= self.meows }
fn find(&self, k: &int) -> Option<&T> {
if *k <= self.meows {
Some(&self.name)
} else {
None
}
}
}
impl<T> MutableMap<int, T> for cat<T> {
fn insert(&mut self, k: int, _: T) -> bool {
self.meows += k;
true
}
fn find_mut(&mut self, _k: &int) -> Option<&mut T> { fail!() }
fn remove(&mut self, k: &int) -> bool {
if self.find(k).is_some() {
self.meows -= *k; true
} else {
false
}
}
fn pop(&mut self, _k: &int) -> Option<T> { fail!() }
fn swap(&mut self, _k: int, _v: T) -> Option<T> { fail!() }
}
impl<T> cat<T> {
pub fn get(&self, k: &int) -> &T {
match self.find(k) {
Some(v) => { v }
None => { fail!("epic fail"); }
}
}
pub fn new(in_x: int, in_y: int, in_name: T) -> cat<T> {
cat{meows: in_x, how_hungry: in_y, name: in_name }
}
}
impl<T> cat<T> {
fn meow(&mut self) {
self.meows += 1;
println!("Meow {}", self.meows);
if self.meows % 5 == 0 {
self.how_hungry += 1;
}
}
}
pub fn main()
|
{
let mut nyan: cat<String> = cat::new(0, 2, "nyan".to_string());
for _ in range(1u, 5) { nyan.speak(); }
assert!(*nyan.find(&1).unwrap() == "nyan".to_string());
assert_eq!(nyan.find(&10), None);
let mut spotty: cat<cat_type> = cat::new(2, 57, tuxedo);
for _ in range(0u, 6) { spotty.speak(); }
assert_eq!(spotty.len(), 8);
assert!((spotty.contains_key(&2)));
assert_eq!(spotty.get(&3), &tuxedo);
}
|
identifier_body
|
|
class-impl-very-parameterized-trait.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::cmp;
|
impl cmp::PartialEq for cat_type {
fn eq(&self, other: &cat_type) -> bool {
((*self) as uint) == ((*other) as uint)
}
fn ne(&self, other: &cat_type) -> bool {!(*self).eq(other) }
}
// Very silly -- this just returns the value of the name field
// for any int value that's less than the meows field
// ok: T should be in scope when resolving the trait ref for map
struct cat<T> {
// Yes, you can have negative meows
meows : int,
how_hungry : int,
name : T,
}
impl<T> cat<T> {
pub fn speak(&mut self) { self.meow(); }
pub fn eat(&mut self) -> bool {
if self.how_hungry > 0 {
println!("OM NOM NOM");
self.how_hungry -= 2;
return true;
} else {
println!("Not hungry!");
return false;
}
}
}
impl<T> Collection for cat<T> {
fn len(&self) -> uint { self.meows as uint }
fn is_empty(&self) -> bool { self.meows == 0 }
}
impl<T> Mutable for cat<T> {
fn clear(&mut self) {}
}
impl<T> Map<int, T> for cat<T> {
fn contains_key(&self, k: &int) -> bool { *k <= self.meows }
fn find(&self, k: &int) -> Option<&T> {
if *k <= self.meows {
Some(&self.name)
} else {
None
}
}
}
impl<T> MutableMap<int, T> for cat<T> {
fn insert(&mut self, k: int, _: T) -> bool {
self.meows += k;
true
}
fn find_mut(&mut self, _k: &int) -> Option<&mut T> { fail!() }
fn remove(&mut self, k: &int) -> bool {
if self.find(k).is_some() {
self.meows -= *k; true
} else {
false
}
}
fn pop(&mut self, _k: &int) -> Option<T> { fail!() }
fn swap(&mut self, _k: int, _v: T) -> Option<T> { fail!() }
}
impl<T> cat<T> {
pub fn get(&self, k: &int) -> &T {
match self.find(k) {
Some(v) => { v }
None => { fail!("epic fail"); }
}
}
pub fn new(in_x: int, in_y: int, in_name: T) -> cat<T> {
cat{meows: in_x, how_hungry: in_y, name: in_name }
}
}
impl<T> cat<T> {
fn meow(&mut self) {
self.meows += 1;
println!("Meow {}", self.meows);
if self.meows % 5 == 0 {
self.how_hungry += 1;
}
}
}
pub fn main() {
let mut nyan: cat<String> = cat::new(0, 2, "nyan".to_string());
for _ in range(1u, 5) { nyan.speak(); }
assert!(*nyan.find(&1).unwrap() == "nyan".to_string());
assert_eq!(nyan.find(&10), None);
let mut spotty: cat<cat_type> = cat::new(2, 57, tuxedo);
for _ in range(0u, 6) { spotty.speak(); }
assert_eq!(spotty.len(), 8);
assert!((spotty.contains_key(&2)));
assert_eq!(spotty.get(&3), &tuxedo);
}
|
#[deriving(Show)]
enum cat_type { tuxedo, tabby, tortoiseshell }
|
random_line_split
|
mod.rs
|
use std::io::Write;
pub mod export;
pub mod import;
pub fn print_progress(noun: &str, start_time: ::time::Timespec, done: usize, total: usize) {
let remaining_jobs = total - done;
let progress: f64 = 100f64 * done as f64 / total as f64;
let current_time = ::time::get_time().sec;
let time_per_job = (current_time - start_time.sec) as f64 / done as f64;
let remaining_time = time_per_job * remaining_jobs as f64;
print!("\r{} {}/{} complete\t{:.2}% [{}]",
noun, done, total, progress,
::util::make_progress_bar(progress / 100.0, 20)
);
if remaining_jobs == 0 {
println!(" (took {:.2} min) ", (current_time - start_time.sec) as f64 / 60.0);
} else {
print!(" ETA {:.2} min ", remaining_time / 60.0);
::std::io::stdout().flush().ok().expect("failed to flush io");
}
}
fn make_progress_bar(ratio: f64, length: usize) -> String {
let filled = (ratio * length as f64).round() as usize;
let mut bar: String = repeat('|').take(filled).collect();
for _ in 0..(length - filled) {
bar.push('-');
}
bar
}
|
use std::iter::repeat;
|
random_line_split
|
|
mod.rs
|
use std::iter::repeat;
use std::io::Write;
pub mod export;
pub mod import;
pub fn
|
(noun: &str, start_time: ::time::Timespec, done: usize, total: usize) {
let remaining_jobs = total - done;
let progress: f64 = 100f64 * done as f64 / total as f64;
let current_time = ::time::get_time().sec;
let time_per_job = (current_time - start_time.sec) as f64 / done as f64;
let remaining_time = time_per_job * remaining_jobs as f64;
print!("\r{} {}/{} complete\t{:.2}% [{}]",
noun, done, total, progress,
::util::make_progress_bar(progress / 100.0, 20)
);
if remaining_jobs == 0 {
println!(" (took {:.2} min) ", (current_time - start_time.sec) as f64 / 60.0);
} else {
print!(" ETA {:.2} min ", remaining_time / 60.0);
::std::io::stdout().flush().ok().expect("failed to flush io");
}
}
fn make_progress_bar(ratio: f64, length: usize) -> String {
let filled = (ratio * length as f64).round() as usize;
let mut bar: String = repeat('|').take(filled).collect();
for _ in 0..(length - filled) {
bar.push('-');
}
bar
}
|
print_progress
|
identifier_name
|
mod.rs
|
use std::iter::repeat;
use std::io::Write;
pub mod export;
pub mod import;
pub fn print_progress(noun: &str, start_time: ::time::Timespec, done: usize, total: usize)
|
fn make_progress_bar(ratio: f64, length: usize) -> String {
let filled = (ratio * length as f64).round() as usize;
let mut bar: String = repeat('|').take(filled).collect();
for _ in 0..(length - filled) {
bar.push('-');
}
bar
}
|
{
let remaining_jobs = total - done;
let progress: f64 = 100f64 * done as f64 / total as f64;
let current_time = ::time::get_time().sec;
let time_per_job = (current_time - start_time.sec) as f64 / done as f64;
let remaining_time = time_per_job * remaining_jobs as f64;
print!("\r{} {}/{} complete\t{:.2}% [{}]",
noun, done, total, progress,
::util::make_progress_bar(progress / 100.0, 20)
);
if remaining_jobs == 0 {
println!(" (took {:.2} min) ", (current_time - start_time.sec) as f64 / 60.0);
} else {
print!(" ETA {:.2} min ", remaining_time / 60.0);
::std::io::stdout().flush().ok().expect("failed to flush io");
}
}
|
identifier_body
|
mod.rs
|
use std::iter::repeat;
use std::io::Write;
pub mod export;
pub mod import;
pub fn print_progress(noun: &str, start_time: ::time::Timespec, done: usize, total: usize) {
let remaining_jobs = total - done;
let progress: f64 = 100f64 * done as f64 / total as f64;
let current_time = ::time::get_time().sec;
let time_per_job = (current_time - start_time.sec) as f64 / done as f64;
let remaining_time = time_per_job * remaining_jobs as f64;
print!("\r{} {}/{} complete\t{:.2}% [{}]",
noun, done, total, progress,
::util::make_progress_bar(progress / 100.0, 20)
);
if remaining_jobs == 0 {
println!(" (took {:.2} min) ", (current_time - start_time.sec) as f64 / 60.0);
} else
|
}
fn make_progress_bar(ratio: f64, length: usize) -> String {
let filled = (ratio * length as f64).round() as usize;
let mut bar: String = repeat('|').take(filled).collect();
for _ in 0..(length - filled) {
bar.push('-');
}
bar
}
|
{
print!(" ETA {:.2} min ", remaining_time / 60.0);
::std::io::stdout().flush().ok().expect("failed to flush io");
}
|
conditional_block
|
lib.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Simple numerics.
//!
//! This crate contains arbitrary-sized integer, rational, and complex types.
|
//! approximate a square root to arbitrary precision:
//!
//! ```
//! extern crate num;
//!
//! use num::bigint::BigInt;
//! use num::rational::{Ratio, BigRational};
//!
//! fn approx_sqrt(number: u64, iterations: uint) -> BigRational {
//! let start: Ratio<BigInt> = Ratio::from_integer(FromPrimitive::from_u64(number).unwrap());
//! let mut approx = start.clone();
//!
//! for _ in range(0, iterations) {
//! approx = (approx + (start / approx)) /
//! Ratio::from_integer(FromPrimitive::from_u64(2).unwrap());
//! }
//!
//! approx
//! }
//!
//! fn main() {
//! println!("{}", approx_sqrt(10, 4)); // prints 4057691201/1283082416
//! }
//! ```
//!
//! [newt]: https://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Babylonian_method
#![feature(macro_rules)]
#![crate_name = "num"]
#![experimental]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/master/",
html_playground_url = "http://play.rust-lang.org/")]
#![allow(deprecated)] // from_str_radix
extern crate rand;
pub use bigint::{BigInt, BigUint};
pub use rational::{Rational, BigRational};
pub use complex::Complex;
pub use integer::Integer;
pub mod bigint;
pub mod complex;
pub mod integer;
pub mod rational;
|
//!
//! ## Example
//!
//! This example uses the BigRational type and [Newton's method][newt] to
|
random_line_split
|
lib.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_name = "rustrt"]
#![license = "MIT/ASL2"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![allow(unknown_features)]
#![feature(macro_rules, phase, globs, thread_local, asm)]
#![feature(linkage, lang_items, unsafe_destructor, default_type_params)]
#![feature(import_shadowing, slicing_syntax)]
#![no_std]
#![experimental]
#[phase(plugin, link)] extern crate core;
extern crate alloc;
extern crate libc;
extern crate collections;
#[cfg(test)] extern crate "rustrt" as realrustrt;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate native;
#[cfg(test)] #[phase(plugin, link)] extern crate std;
pub use self::util::{Stdio, Stdout, Stderr};
pub use self::unwind::{begin_unwind, begin_unwind_fmt};
use core::prelude::*;
use alloc::boxed::Box;
use core::any::Any;
use task::{Task, BlockedTask, TaskOpts};
mod macros;
mod at_exit_imp;
mod local_ptr;
mod thread_local_storage;
mod util;
mod libunwind;
mod stack_overflow;
pub mod args;
pub mod bookkeeping;
pub mod c_str;
pub mod exclusive;
pub mod local;
pub mod local_data;
pub mod mutex;
pub mod rtio;
pub mod stack;
pub mod task;
pub mod thread;
pub mod unwind;
/// The interface to the current runtime.
///
/// This trait is used as the abstraction between 1:1 and M:N scheduling. The
/// two independent crates, libnative and libgreen, both have objects which
/// implement this trait. The goal of this trait is to encompass all the
/// fundamental differences in functionality between the 1:1 and M:N runtime
/// modes.
pub trait Runtime {
// Necessary scheduling functions, used for channels and blocking I/O
// (sometimes).
fn yield_now(self: Box<Self>, cur_task: Box<Task>);
|
fn maybe_yield(self: Box<Self>, cur_task: Box<Task>);
fn deschedule(self: Box<Self>,
times: uint,
cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>);
fn reawaken(self: Box<Self>, to_wake: Box<Task>);
// Miscellaneous calls which are very different depending on what context
// you're in.
fn spawn_sibling(self: Box<Self>,
cur_task: Box<Task>,
opts: TaskOpts,
f: proc():Send);
/// The (low, high) edges of the current stack.
fn stack_bounds(&self) -> (uint, uint); // (lo, hi)
/// The last writable byte of the stack next to the guard page
fn stack_guard(&self) -> Option<uint>;
fn can_block(&self) -> bool;
// FIXME: This is a serious code smell and this should not exist at all.
fn wrap(self: Box<Self>) -> Box<Any+'static>;
}
/// The default error code of the rust runtime if the main task panics instead
/// of exiting cleanly.
pub const DEFAULT_ERROR_CODE: int = 101;
/// One-time runtime initialization.
///
/// Initializes global state, including frobbing the crate's logging flags,
/// and storing the process arguments.
pub fn init(argc: int, argv: *const *const u8) {
// FIXME: Derefing these pointers is not safe.
// Need to propagate the unsafety to `start`.
unsafe {
args::init(argc, argv);
local_ptr::init();
at_exit_imp::init();
thread::init();
}
// FIXME(#14344) this shouldn't be necessary
collections::fixme_14344_be_sure_to_link_to_collections();
alloc::fixme_14344_be_sure_to_link_to_collections();
libc::issue_14344_workaround();
}
/// Enqueues a procedure to run when the runtime is cleaned up
///
/// The procedure passed to this function will be executed as part of the
/// runtime cleanup phase. For normal rust programs, this means that it will run
/// after all other tasks have exited.
///
/// The procedure is *not* executed with a local `Task` available to it, so
/// primitives like logging, I/O, channels, spawning, etc, are *not* available.
/// This is meant for "bare bones" usage to clean up runtime details, this is
/// not meant as a general-purpose "let's clean everything up" function.
///
/// It is forbidden for procedures to register more `at_exit` handlers when they
/// are running, and doing so will lead to a process abort.
pub fn at_exit(f: proc():Send) {
at_exit_imp::push(f);
}
/// One-time runtime cleanup.
///
/// This function is unsafe because it performs no checks to ensure that the
/// runtime has completely ceased running. It is the responsibility of the
/// caller to ensure that the runtime is entirely shut down and nothing will be
/// poking around at the internal components.
///
/// Invoking cleanup while portions of the runtime are still in use may cause
/// undefined behavior.
pub unsafe fn cleanup() {
bookkeeping::wait_for_other_tasks();
at_exit_imp::run();
args::cleanup();
thread::cleanup();
local_ptr::cleanup();
}
// FIXME: these probably shouldn't be public...
#[doc(hidden)]
pub mod shouldnt_be_public {
#[cfg(not(test))]
pub use super::local_ptr::native::maybe_tls_key;
#[cfg(all(not(windows), not(target_os = "android"), not(target_os = "ios")))]
pub use super::local_ptr::compiled::RT_TLS_PTR;
}
#[cfg(not(test))]
mod std {
pub use core::{fmt, option, cmp};
}
|
random_line_split
|
|
lib.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_name = "rustrt"]
#![license = "MIT/ASL2"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![allow(unknown_features)]
#![feature(macro_rules, phase, globs, thread_local, asm)]
#![feature(linkage, lang_items, unsafe_destructor, default_type_params)]
#![feature(import_shadowing, slicing_syntax)]
#![no_std]
#![experimental]
#[phase(plugin, link)] extern crate core;
extern crate alloc;
extern crate libc;
extern crate collections;
#[cfg(test)] extern crate "rustrt" as realrustrt;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate native;
#[cfg(test)] #[phase(plugin, link)] extern crate std;
pub use self::util::{Stdio, Stdout, Stderr};
pub use self::unwind::{begin_unwind, begin_unwind_fmt};
use core::prelude::*;
use alloc::boxed::Box;
use core::any::Any;
use task::{Task, BlockedTask, TaskOpts};
mod macros;
mod at_exit_imp;
mod local_ptr;
mod thread_local_storage;
mod util;
mod libunwind;
mod stack_overflow;
pub mod args;
pub mod bookkeeping;
pub mod c_str;
pub mod exclusive;
pub mod local;
pub mod local_data;
pub mod mutex;
pub mod rtio;
pub mod stack;
pub mod task;
pub mod thread;
pub mod unwind;
/// The interface to the current runtime.
///
/// This trait is used as the abstraction between 1:1 and M:N scheduling. The
/// two independent crates, libnative and libgreen, both have objects which
/// implement this trait. The goal of this trait is to encompass all the
/// fundamental differences in functionality between the 1:1 and M:N runtime
/// modes.
pub trait Runtime {
// Necessary scheduling functions, used for channels and blocking I/O
// (sometimes).
fn yield_now(self: Box<Self>, cur_task: Box<Task>);
fn maybe_yield(self: Box<Self>, cur_task: Box<Task>);
fn deschedule(self: Box<Self>,
times: uint,
cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>);
fn reawaken(self: Box<Self>, to_wake: Box<Task>);
// Miscellaneous calls which are very different depending on what context
// you're in.
fn spawn_sibling(self: Box<Self>,
cur_task: Box<Task>,
opts: TaskOpts,
f: proc():Send);
/// The (low, high) edges of the current stack.
fn stack_bounds(&self) -> (uint, uint); // (lo, hi)
/// The last writable byte of the stack next to the guard page
fn stack_guard(&self) -> Option<uint>;
fn can_block(&self) -> bool;
// FIXME: This is a serious code smell and this should not exist at all.
fn wrap(self: Box<Self>) -> Box<Any+'static>;
}
/// The default error code of the rust runtime if the main task panics instead
/// of exiting cleanly.
pub const DEFAULT_ERROR_CODE: int = 101;
/// One-time runtime initialization.
///
/// Initializes global state, including frobbing the crate's logging flags,
/// and storing the process arguments.
pub fn init(argc: int, argv: *const *const u8) {
// FIXME: Derefing these pointers is not safe.
// Need to propagate the unsafety to `start`.
unsafe {
args::init(argc, argv);
local_ptr::init();
at_exit_imp::init();
thread::init();
}
// FIXME(#14344) this shouldn't be necessary
collections::fixme_14344_be_sure_to_link_to_collections();
alloc::fixme_14344_be_sure_to_link_to_collections();
libc::issue_14344_workaround();
}
/// Enqueues a procedure to run when the runtime is cleaned up
///
/// The procedure passed to this function will be executed as part of the
/// runtime cleanup phase. For normal rust programs, this means that it will run
/// after all other tasks have exited.
///
/// The procedure is *not* executed with a local `Task` available to it, so
/// primitives like logging, I/O, channels, spawning, etc, are *not* available.
/// This is meant for "bare bones" usage to clean up runtime details, this is
/// not meant as a general-purpose "let's clean everything up" function.
///
/// It is forbidden for procedures to register more `at_exit` handlers when they
/// are running, and doing so will lead to a process abort.
pub fn at_exit(f: proc():Send) {
at_exit_imp::push(f);
}
/// One-time runtime cleanup.
///
/// This function is unsafe because it performs no checks to ensure that the
/// runtime has completely ceased running. It is the responsibility of the
/// caller to ensure that the runtime is entirely shut down and nothing will be
/// poking around at the internal components.
///
/// Invoking cleanup while portions of the runtime are still in use may cause
/// undefined behavior.
pub unsafe fn cleanup()
|
// FIXME: these probably shouldn't be public...
#[doc(hidden)]
pub mod shouldnt_be_public {
#[cfg(not(test))]
pub use super::local_ptr::native::maybe_tls_key;
#[cfg(all(not(windows), not(target_os = "android"), not(target_os = "ios")))]
pub use super::local_ptr::compiled::RT_TLS_PTR;
}
#[cfg(not(test))]
mod std {
pub use core::{fmt, option, cmp};
}
|
{
bookkeeping::wait_for_other_tasks();
at_exit_imp::run();
args::cleanup();
thread::cleanup();
local_ptr::cleanup();
}
|
identifier_body
|
lib.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_name = "rustrt"]
#![license = "MIT/ASL2"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![allow(unknown_features)]
#![feature(macro_rules, phase, globs, thread_local, asm)]
#![feature(linkage, lang_items, unsafe_destructor, default_type_params)]
#![feature(import_shadowing, slicing_syntax)]
#![no_std]
#![experimental]
#[phase(plugin, link)] extern crate core;
extern crate alloc;
extern crate libc;
extern crate collections;
#[cfg(test)] extern crate "rustrt" as realrustrt;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate native;
#[cfg(test)] #[phase(plugin, link)] extern crate std;
pub use self::util::{Stdio, Stdout, Stderr};
pub use self::unwind::{begin_unwind, begin_unwind_fmt};
use core::prelude::*;
use alloc::boxed::Box;
use core::any::Any;
use task::{Task, BlockedTask, TaskOpts};
mod macros;
mod at_exit_imp;
mod local_ptr;
mod thread_local_storage;
mod util;
mod libunwind;
mod stack_overflow;
pub mod args;
pub mod bookkeeping;
pub mod c_str;
pub mod exclusive;
pub mod local;
pub mod local_data;
pub mod mutex;
pub mod rtio;
pub mod stack;
pub mod task;
pub mod thread;
pub mod unwind;
/// The interface to the current runtime.
///
/// This trait is used as the abstraction between 1:1 and M:N scheduling. The
/// two independent crates, libnative and libgreen, both have objects which
/// implement this trait. The goal of this trait is to encompass all the
/// fundamental differences in functionality between the 1:1 and M:N runtime
/// modes.
pub trait Runtime {
// Necessary scheduling functions, used for channels and blocking I/O
// (sometimes).
fn yield_now(self: Box<Self>, cur_task: Box<Task>);
fn maybe_yield(self: Box<Self>, cur_task: Box<Task>);
fn deschedule(self: Box<Self>,
times: uint,
cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>);
fn reawaken(self: Box<Self>, to_wake: Box<Task>);
// Miscellaneous calls which are very different depending on what context
// you're in.
fn spawn_sibling(self: Box<Self>,
cur_task: Box<Task>,
opts: TaskOpts,
f: proc():Send);
/// The (low, high) edges of the current stack.
fn stack_bounds(&self) -> (uint, uint); // (lo, hi)
/// The last writable byte of the stack next to the guard page
fn stack_guard(&self) -> Option<uint>;
fn can_block(&self) -> bool;
// FIXME: This is a serious code smell and this should not exist at all.
fn wrap(self: Box<Self>) -> Box<Any+'static>;
}
/// The default error code of the rust runtime if the main task panics instead
/// of exiting cleanly.
pub const DEFAULT_ERROR_CODE: int = 101;
/// One-time runtime initialization.
///
/// Initializes global state, including frobbing the crate's logging flags,
/// and storing the process arguments.
pub fn init(argc: int, argv: *const *const u8) {
// FIXME: Derefing these pointers is not safe.
// Need to propagate the unsafety to `start`.
unsafe {
args::init(argc, argv);
local_ptr::init();
at_exit_imp::init();
thread::init();
}
// FIXME(#14344) this shouldn't be necessary
collections::fixme_14344_be_sure_to_link_to_collections();
alloc::fixme_14344_be_sure_to_link_to_collections();
libc::issue_14344_workaround();
}
/// Enqueues a procedure to run when the runtime is cleaned up
///
/// The procedure passed to this function will be executed as part of the
/// runtime cleanup phase. For normal rust programs, this means that it will run
/// after all other tasks have exited.
///
/// The procedure is *not* executed with a local `Task` available to it, so
/// primitives like logging, I/O, channels, spawning, etc, are *not* available.
/// This is meant for "bare bones" usage to clean up runtime details, this is
/// not meant as a general-purpose "let's clean everything up" function.
///
/// It is forbidden for procedures to register more `at_exit` handlers when they
/// are running, and doing so will lead to a process abort.
pub fn at_exit(f: proc():Send) {
at_exit_imp::push(f);
}
/// One-time runtime cleanup.
///
/// This function is unsafe because it performs no checks to ensure that the
/// runtime has completely ceased running. It is the responsibility of the
/// caller to ensure that the runtime is entirely shut down and nothing will be
/// poking around at the internal components.
///
/// Invoking cleanup while portions of the runtime are still in use may cause
/// undefined behavior.
pub unsafe fn
|
() {
bookkeeping::wait_for_other_tasks();
at_exit_imp::run();
args::cleanup();
thread::cleanup();
local_ptr::cleanup();
}
// FIXME: these probably shouldn't be public...
#[doc(hidden)]
pub mod shouldnt_be_public {
#[cfg(not(test))]
pub use super::local_ptr::native::maybe_tls_key;
#[cfg(all(not(windows), not(target_os = "android"), not(target_os = "ios")))]
pub use super::local_ptr::compiled::RT_TLS_PTR;
}
#[cfg(not(test))]
mod std {
pub use core::{fmt, option, cmp};
}
|
cleanup
|
identifier_name
|
walkState.rs
|
# order is:
# Heading
# Position
# Orientation
# Velocity
# AngularVelocity
# Relative Orientation
# Relative Angular Velocity
#----------------
# Heading
0
# Root(body)
0 0.562244 0
0.999554 0.029849 -0.000000 -0.000152
0.249120 0.375816 0.866547
0.257640 0.100468 0.388803
# body_neck
0.971329 0.236049 -0.026133 0.010859
1.284002 -0.001600 -0.184769
# lHip
0.945588 -0.308475 0.102839 0.011456
3.254196 -0.322589 -1.152888
# rHip
0.991720 -0.055775 -0.068613 -0.093132
1.945090 1.340484 -0.409870
# lShoulder
0.999031 0.044015 0.000000 -0.000000
-0.846975 0.000081 -0.000029
# rShoulder
0.999919 0.012727 0.000000 -0.000000
-0.322678 0.000031 -0.000011
|
# tail_1
0.997887 0.059312 0.026492 -0.001575
-0.944952 -0.316730 0.050275
# neck_head
0.978705 -0.204796 -0.013719 0.002532
-0.816336 -0.004197 0.002574
# lKnee
0.821044 0.570865 -0.000000 0.000000
-2.636854 -0.000040 0.000011
# rKnee
0.835187 0.549966 -0.000001 -0.000001
-2.321718 -0.000044 -0.001092
# lElbow
0.808695 -0.588228 -0.000000 -0.000000
3.590452 -0.000337 0.000157
# rElbow
0.795908 -0.605418 -0.000000 -0.000000
3.686549 -0.000358 0.000140
# tail_2
0.999502 0.022834 0.021787 -0.000498
-0.247141 -0.025675 0.010785
# lAnkle
0.788617 -0.614885 -0.000000 0.000000
-0.973770 -0.000000 0.000012
# rAnkle
0.877432 -0.479702 0.000001 0.000001
1.503026 0.000674 0.000254
# tail_3
0.998683 -0.048837 0.015720 0.000769
0.106479 0.102742 -0.003331
# lToeJoint
0.943267 0.331479 0.006367 0.018119
-0.005999 -0.000237 0.072710
# rToeJoint
0.995236 -0.043699 -0.003823 0.087069
-0.070184 -0.012520 -0.498030
# tail_4
0.999432 -0.031060 0.013064 0.000406
0.296221 0.162244 -0.007705
# tail_5
0.999542 -0.027252 0.013160 0.000359
0.464454 0.226639 -0.012165
# tail_6
0.999821 -0.016443 0.009325 0.000153
0.398090 0.177272 -0.007375
|
random_line_split
|
|
main.rs
|
struct Counter {
count: u32,
}
impl Counter {
fn
|
() -> Counter {
Counter { count: 0 }
}
}
// define the iterator functionality
impl Iterator for Counter {
type Item = u32;
fn next(&mut self) -> Option<Self::Item> {
if self.count < 5 {
self.count += 1;
Some(self.count)
} else {
None
}
}
}
fn main() {
let v1 = vec![1, 2, 3];
let v1_iter = v1.iter();
// use an iterator - this consumes the iterator
// v1_iter so v1_iter cannot be used again
for val in v1_iter {
println!("Value is: {}", val);
}
// after the sum() call, you can no longer use v2_iter
// because sum() consumes the iterator
let v2_iter = v1.iter();
let total: i32 = v2_iter.sum();
if total == 6 {
println!("Found total!");
} else {
println!("Something went wrong!");
}
// map() is lazy evaluated and doesn't do anything on its own
// we call collect() to evaluate the map() functionality/closure
let x1: Vec<i32> = vec![4, 5, 6];
let x2: Vec<_> = x1.iter().map(|z| z + 1).collect();
if x2 == vec![5, 6, 7] {
println!("collect() worked as expected!");
} else {
println!("Something went wrong with collect()!");
}
let mut counter = Counter::new();
println!("Value 1: {:?}", counter.next());
println!("Value 2: {:?}", counter.next());
println!("Value 3: {:?}", counter.next());
println!("Value 4: {:?}", counter.next());
println!("Value 5: {:?}", counter.next());
}
|
new
|
identifier_name
|
main.rs
|
struct Counter {
count: u32,
}
impl Counter {
fn new() -> Counter {
Counter { count: 0 }
}
}
// define the iterator functionality
impl Iterator for Counter {
type Item = u32;
fn next(&mut self) -> Option<Self::Item> {
if self.count < 5 {
self.count += 1;
Some(self.count)
} else
|
}
}
fn main() {
let v1 = vec![1, 2, 3];
let v1_iter = v1.iter();
// use an iterator - this consumes the iterator
// v1_iter so v1_iter cannot be used again
for val in v1_iter {
println!("Value is: {}", val);
}
// after the sum() call, you can no longer use v2_iter
// because sum() consumes the iterator
let v2_iter = v1.iter();
let total: i32 = v2_iter.sum();
if total == 6 {
println!("Found total!");
} else {
println!("Something went wrong!");
}
// map() is lazy evaluated and doesn't do anything on its own
// we call collect() to evaluate the map() functionality/closure
let x1: Vec<i32> = vec![4, 5, 6];
let x2: Vec<_> = x1.iter().map(|z| z + 1).collect();
if x2 == vec![5, 6, 7] {
println!("collect() worked as expected!");
} else {
println!("Something went wrong with collect()!");
}
let mut counter = Counter::new();
println!("Value 1: {:?}", counter.next());
println!("Value 2: {:?}", counter.next());
println!("Value 3: {:?}", counter.next());
println!("Value 4: {:?}", counter.next());
println!("Value 5: {:?}", counter.next());
}
|
{
None
}
|
conditional_block
|
main.rs
|
struct Counter {
count: u32,
}
impl Counter {
fn new() -> Counter {
Counter { count: 0 }
}
}
// define the iterator functionality
impl Iterator for Counter {
type Item = u32;
fn next(&mut self) -> Option<Self::Item> {
if self.count < 5 {
self.count += 1;
Some(self.count)
} else {
None
}
}
}
fn main() {
let v1 = vec![1, 2, 3];
let v1_iter = v1.iter();
// use an iterator - this consumes the iterator
// v1_iter so v1_iter cannot be used again
for val in v1_iter {
println!("Value is: {}", val);
}
|
// after the sum() call, you can no longer use v2_iter
// because sum() consumes the iterator
let v2_iter = v1.iter();
let total: i32 = v2_iter.sum();
if total == 6 {
println!("Found total!");
} else {
println!("Something went wrong!");
}
// map() is lazy evaluated and doesn't do anything on its own
// we call collect() to evaluate the map() functionality/closure
let x1: Vec<i32> = vec![4, 5, 6];
let x2: Vec<_> = x1.iter().map(|z| z + 1).collect();
if x2 == vec![5, 6, 7] {
println!("collect() worked as expected!");
} else {
println!("Something went wrong with collect()!");
}
let mut counter = Counter::new();
println!("Value 1: {:?}", counter.next());
println!("Value 2: {:?}", counter.next());
println!("Value 3: {:?}", counter.next());
println!("Value 4: {:?}", counter.next());
println!("Value 5: {:?}", counter.next());
}
|
random_line_split
|
|
main.rs
|
struct Counter {
count: u32,
}
impl Counter {
fn new() -> Counter
|
}
// define the iterator functionality
impl Iterator for Counter {
type Item = u32;
fn next(&mut self) -> Option<Self::Item> {
if self.count < 5 {
self.count += 1;
Some(self.count)
} else {
None
}
}
}
fn main() {
let v1 = vec![1, 2, 3];
let v1_iter = v1.iter();
// use an iterator - this consumes the iterator
// v1_iter so v1_iter cannot be used again
for val in v1_iter {
println!("Value is: {}", val);
}
// after the sum() call, you can no longer use v2_iter
// because sum() consumes the iterator
let v2_iter = v1.iter();
let total: i32 = v2_iter.sum();
if total == 6 {
println!("Found total!");
} else {
println!("Something went wrong!");
}
// map() is lazy evaluated and doesn't do anything on its own
// we call collect() to evaluate the map() functionality/closure
let x1: Vec<i32> = vec![4, 5, 6];
let x2: Vec<_> = x1.iter().map(|z| z + 1).collect();
if x2 == vec![5, 6, 7] {
println!("collect() worked as expected!");
} else {
println!("Something went wrong with collect()!");
}
let mut counter = Counter::new();
println!("Value 1: {:?}", counter.next());
println!("Value 2: {:?}", counter.next());
println!("Value 3: {:?}", counter.next());
println!("Value 4: {:?}", counter.next());
println!("Value 5: {:?}", counter.next());
}
|
{
Counter { count: 0 }
}
|
identifier_body
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTitleElementDerived, NodeCast};
use dom::bindings::codegen::InheritTypes::{CharacterDataCast, TextCast};
use dom::bindings::js::Root;
use dom::characterdata::CharacterDataHelpers;
use dom::document::{Document, DocumentHelpers};
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{ChildrenMutation, Node, NodeHelpers, NodeTypeId};
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTitleElement {
htmlelement: HTMLElement,
}
impl HTMLTitleElementDerived for EventTarget {
fn is_htmltitleelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement)))
}
}
impl HTMLTitleElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLTitleElement {
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTitleElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLTitleElement> {
let element = HTMLTitleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTitleElementBinding::Wrap)
}
}
impl<'a> HTMLTitleElementMethods for &'a HTMLTitleElement {
// https://www.whatwg.org/html/#dom-title-text
fn Text(self) -> DOMString {
let node = NodeCast::from_ref(self);
let mut content = String::new();
for child in node.children() {
let text: Option<&Text> = TextCast::to_ref(child.r());
match text {
Some(text) => content.push_str(&CharacterDataCast::from_ref(text).data()),
None => (),
}
}
content
}
// https://www.whatwg.org/html/#dom-title-text
fn SetText(self, value: DOMString) {
let node = NodeCast::from_ref(self);
node.SetTextContent(Some(value))
}
}
impl<'a> VirtualMethods for &'a HTMLTitleElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &&HTMLElement = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type() {
s.children_changed(mutation);
}
let node = NodeCast::from_ref(*self);
if node.is_in_doc() {
node.owner_doc().title_changed();
}
}
fn bind_to_tree(&self, is_in_doc: bool)
|
}
|
{
let node = NodeCast::from_ref(*self);
if is_in_doc {
let document = node.owner_doc();
document.r().title_changed();
}
}
|
identifier_body
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTitleElementDerived, NodeCast};
use dom::bindings::codegen::InheritTypes::{CharacterDataCast, TextCast};
use dom::bindings::js::Root;
use dom::characterdata::CharacterDataHelpers;
use dom::document::{Document, DocumentHelpers};
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{ChildrenMutation, Node, NodeHelpers, NodeTypeId};
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTitleElement {
htmlelement: HTMLElement,
}
impl HTMLTitleElementDerived for EventTarget {
fn is_htmltitleelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement)))
}
}
impl HTMLTitleElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLTitleElement {
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTitleElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLTitleElement> {
let element = HTMLTitleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTitleElementBinding::Wrap)
}
}
impl<'a> HTMLTitleElementMethods for &'a HTMLTitleElement {
// https://www.whatwg.org/html/#dom-title-text
fn Text(self) -> DOMString {
let node = NodeCast::from_ref(self);
let mut content = String::new();
for child in node.children() {
let text: Option<&Text> = TextCast::to_ref(child.r());
match text {
Some(text) => content.push_str(&CharacterDataCast::from_ref(text).data()),
None => (),
}
}
content
}
// https://www.whatwg.org/html/#dom-title-text
fn SetText(self, value: DOMString) {
let node = NodeCast::from_ref(self);
node.SetTextContent(Some(value))
}
}
impl<'a> VirtualMethods for &'a HTMLTitleElement {
fn
|
<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &&HTMLElement = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type() {
s.children_changed(mutation);
}
let node = NodeCast::from_ref(*self);
if node.is_in_doc() {
node.owner_doc().title_changed();
}
}
fn bind_to_tree(&self, is_in_doc: bool) {
let node = NodeCast::from_ref(*self);
if is_in_doc {
let document = node.owner_doc();
document.r().title_changed();
}
}
}
|
super_type
|
identifier_name
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTitleElementDerived, NodeCast};
use dom::bindings::codegen::InheritTypes::{CharacterDataCast, TextCast};
use dom::bindings::js::Root;
use dom::characterdata::CharacterDataHelpers;
use dom::document::{Document, DocumentHelpers};
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{ChildrenMutation, Node, NodeHelpers, NodeTypeId};
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTitleElement {
htmlelement: HTMLElement,
}
impl HTMLTitleElementDerived for EventTarget {
fn is_htmltitleelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement)))
}
}
impl HTMLTitleElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLTitleElement {
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTitleElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLTitleElement> {
let element = HTMLTitleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTitleElementBinding::Wrap)
}
}
impl<'a> HTMLTitleElementMethods for &'a HTMLTitleElement {
// https://www.whatwg.org/html/#dom-title-text
fn Text(self) -> DOMString {
let node = NodeCast::from_ref(self);
let mut content = String::new();
for child in node.children() {
let text: Option<&Text> = TextCast::to_ref(child.r());
match text {
Some(text) => content.push_str(&CharacterDataCast::from_ref(text).data()),
None => (),
}
}
content
}
// https://www.whatwg.org/html/#dom-title-text
fn SetText(self, value: DOMString) {
let node = NodeCast::from_ref(self);
node.SetTextContent(Some(value))
}
}
impl<'a> VirtualMethods for &'a HTMLTitleElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &&HTMLElement = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type()
|
let node = NodeCast::from_ref(*self);
if node.is_in_doc() {
node.owner_doc().title_changed();
}
}
fn bind_to_tree(&self, is_in_doc: bool) {
let node = NodeCast::from_ref(*self);
if is_in_doc {
let document = node.owner_doc();
document.r().title_changed();
}
}
}
|
{
s.children_changed(mutation);
}
|
conditional_block
|
htmltitleelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding;
use dom::bindings::codegen::Bindings::HTMLTitleElementBinding::HTMLTitleElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::codegen::InheritTypes::{HTMLElementCast, HTMLTitleElementDerived, NodeCast};
use dom::bindings::codegen::InheritTypes::{CharacterDataCast, TextCast};
use dom::bindings::js::Root;
use dom::characterdata::CharacterDataHelpers;
use dom::document::{Document, DocumentHelpers};
use dom::eventtarget::{EventTarget, EventTargetTypeId};
use dom::element::ElementTypeId;
use dom::htmlelement::{HTMLElement, HTMLElementTypeId};
use dom::node::{ChildrenMutation, Node, NodeHelpers, NodeTypeId};
use dom::text::Text;
use dom::virtualmethods::VirtualMethods;
use util::str::DOMString;
#[dom_struct]
pub struct HTMLTitleElement {
htmlelement: HTMLElement,
}
impl HTMLTitleElementDerived for EventTarget {
fn is_htmltitleelement(&self) -> bool {
*self.type_id() ==
EventTargetTypeId::Node(
NodeTypeId::Element(ElementTypeId::HTMLElement(HTMLElementTypeId::HTMLTitleElement)))
}
}
impl HTMLTitleElement {
fn new_inherited(localName: DOMString, prefix: Option<DOMString>, document: &Document) -> HTMLTitleElement {
HTMLTitleElement {
htmlelement: HTMLElement::new_inherited(HTMLElementTypeId::HTMLTitleElement, localName, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(localName: DOMString,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLTitleElement> {
let element = HTMLTitleElement::new_inherited(localName, prefix, document);
Node::reflect_node(box element, document, HTMLTitleElementBinding::Wrap)
}
}
impl<'a> HTMLTitleElementMethods for &'a HTMLTitleElement {
// https://www.whatwg.org/html/#dom-title-text
fn Text(self) -> DOMString {
let node = NodeCast::from_ref(self);
let mut content = String::new();
for child in node.children() {
let text: Option<&Text> = TextCast::to_ref(child.r());
match text {
Some(text) => content.push_str(&CharacterDataCast::from_ref(text).data()),
None => (),
}
}
content
}
// https://www.whatwg.org/html/#dom-title-text
fn SetText(self, value: DOMString) {
let node = NodeCast::from_ref(self);
node.SetTextContent(Some(value))
}
}
impl<'a> VirtualMethods for &'a HTMLTitleElement {
fn super_type<'b>(&'b self) -> Option<&'b VirtualMethods> {
let htmlelement: &&HTMLElement = HTMLElementCast::from_borrowed_ref(self);
Some(htmlelement as &VirtualMethods)
}
fn children_changed(&self, mutation: &ChildrenMutation) {
if let Some(ref s) = self.super_type() {
s.children_changed(mutation);
}
let node = NodeCast::from_ref(*self);
if node.is_in_doc() {
node.owner_doc().title_changed();
|
let node = NodeCast::from_ref(*self);
if is_in_doc {
let document = node.owner_doc();
document.r().title_changed();
}
}
}
|
}
}
fn bind_to_tree(&self, is_in_doc: bool) {
|
random_line_split
|
local.rs
|
// Copyright (c) 2013-2017 Sandstorm Development Group, Inc. and contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use capnp::{any_pointer, message};
use capnp::Error;
use capnp::traits::{Imbue, ImbueMut};
use capnp::capability::{self, Promise};
use capnp::private::capability::{ClientHook, ParamsHook, PipelineHook, PipelineOp,
RequestHook, ResponseHook, ResultsHook};
use crate::attach::Attach;
use futures::Future;
use futures::sync::oneshot;
use std::cell::RefCell;
use std::rc::{Rc};
use std::mem;
pub trait ResultsDoneHook {
fn add_ref(&self) -> Box<dyn ResultsDoneHook>;
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>>;
}
impl Clone for Box<dyn ResultsDoneHook> {
fn clone(&self) -> Box<dyn ResultsDoneHook> {
self.add_ref()
}
}
pub struct Response {
results: Box<dyn ResultsDoneHook>
}
impl Response {
fn new(results: Box<dyn ResultsDoneHook>) -> Response {
Response {
results: results
}
}
}
impl ResponseHook for Response {
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>> {
self.results.get()
}
}
struct Params {
request: message::Builder<message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
}
impl Params {
fn new(request: message::Builder<message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>)
-> Params
{
Params {
request: request,
cap_table: cap_table,
}
}
}
impl ParamsHook for Params {
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>>
|
}
struct Results {
message: Option<message::Builder<message::HeapAllocator>>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
results_done_fulfiller: Option<oneshot::Sender<Box<dyn ResultsDoneHook>>>,
}
impl Results {
fn new(fulfiller: oneshot::Sender<Box<dyn ResultsDoneHook>>) -> Results {
Results {
message: Some(::capnp::message::Builder::new_default()),
cap_table: Vec::new(),
results_done_fulfiller: Some(fulfiller),
}
}
}
impl Drop for Results {
fn drop(&mut self) {
if let (Some(message), Some(fulfiller)) = (self.message.take(), self.results_done_fulfiller.take()) {
let cap_table = mem::replace(&mut self.cap_table, Vec::new());
let _ = fulfiller.send(Box::new(ResultsDone::new(message, cap_table)));
} else {
unreachable!()
}
}
}
impl ResultsHook for Results {
fn get<'a>(&'a mut self) -> ::capnp::Result<any_pointer::Builder<'a>> {
match *self {
Results { message: Some(ref mut message), ref mut cap_table,.. } => {
let mut result: any_pointer::Builder = message.get_root()?;
result.imbue_mut(cap_table);
Ok(result)
}
_ => unreachable!(),
}
}
fn tail_call(self: Box<Self>, _request: Box<dyn RequestHook>) -> Promise<(), Error> {
unimplemented!()
}
fn direct_tail_call(self: Box<Self>, _request: Box<dyn RequestHook>)
-> (Promise<(), Error>, Box<dyn PipelineHook>)
{
unimplemented!()
}
fn allow_cancellation(&self) {
unimplemented!()
}
}
struct ResultsDoneInner {
message: ::capnp::message::Builder<::capnp::message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
}
struct ResultsDone {
inner: Rc<ResultsDoneInner>,
}
impl ResultsDone {
fn new(message: message::Builder<message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
)
-> ResultsDone
{
ResultsDone {
inner: Rc::new(ResultsDoneInner {
message: message,
cap_table: cap_table,
}),
}
}
}
impl ResultsDoneHook for ResultsDone {
fn add_ref(&self) -> Box<dyn ResultsDoneHook> {
Box::new(ResultsDone { inner: self.inner.clone() })
}
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>> {
let mut result: any_pointer::Reader = self.inner.message.get_root_as_reader()?;
result.imbue(&self.inner.cap_table);
Ok(result)
}
}
pub struct Request {
message: message::Builder<::capnp::message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
interface_id: u64,
method_id: u16,
client: Box<dyn ClientHook>,
}
impl Request {
pub fn new(interface_id: u64, method_id: u16,
_size_hint: Option<::capnp::MessageSize>,
client: Box<dyn ClientHook>)
-> Request
{
Request {
message: message::Builder::new_default(),
cap_table: Vec::new(),
interface_id: interface_id,
method_id: method_id,
client: client,
}
}
}
impl RequestHook for Request {
fn get<'a>(&'a mut self) -> any_pointer::Builder<'a> {
let mut result: any_pointer::Builder = self.message.get_root().unwrap();
result.imbue_mut(&mut self.cap_table);
result
}
fn get_brand(&self) -> usize {
0
}
fn send<'a>(self: Box<Self>) -> capability::RemotePromise<any_pointer::Owned> {
let tmp = *self;
let Request { message, cap_table, interface_id, method_id, client } = tmp;
let params = Params::new(message, cap_table);
let (results_done_fulfiller, results_done_promise) = oneshot::channel::<Box<dyn ResultsDoneHook>>();
let results_done_promise = results_done_promise.map_err(|e| e.into());
let results = Results::new(results_done_fulfiller);
let promise = client.call(interface_id, method_id, Box::new(params), Box::new(results));
let (pipeline_sender, mut pipeline) = crate::queued::Pipeline::new();
let p = promise.join(results_done_promise).and_then(move |((), results_done_hook)| {
pipeline_sender.complete(Box::new(Pipeline::new(results_done_hook.add_ref())) as Box<dyn PipelineHook>);
Ok((capability::Response::new(Box::new(Response::new(results_done_hook))), ()))
});
let (left, right) = crate::split::split(p);
pipeline.drive(right);
let pipeline = any_pointer::Pipeline::new(Box::new(pipeline));
capability::RemotePromise {
promise: Promise::from_future(left),
pipeline: pipeline,
}
}
fn tail_send(self: Box<Self>)
-> Option<(u32, Promise<(), Error>, Box<dyn PipelineHook>)>
{
unimplemented!()
}
}
struct PipelineInner {
results: Box<dyn ResultsDoneHook>,
}
pub struct Pipeline {
inner: Rc<RefCell<PipelineInner>>,
}
impl Pipeline {
pub fn new(results: Box<dyn ResultsDoneHook>) -> Pipeline {
Pipeline {
inner: Rc::new(RefCell::new(PipelineInner { results: results }))
}
}
}
impl Clone for Pipeline {
fn clone(&self) -> Pipeline {
Pipeline { inner: self.inner.clone() }
}
}
impl PipelineHook for Pipeline {
fn add_ref(&self) -> Box<dyn PipelineHook> {
Box::new(self.clone())
}
fn get_pipelined_cap(&self, ops: &[PipelineOp]) -> Box<dyn ClientHook> {
match self.inner.borrow_mut().results.get().unwrap().get_pipelined_cap(ops) {
Ok(v) => v,
Err(e) => Box::new(crate::broken::Client::new(e, true, 0)) as Box<dyn ClientHook>,
}
}
}
struct ClientInner {
server: Box<dyn capability::Server>,
}
pub struct Client {
inner: Rc<RefCell<ClientInner>>,
}
impl Client {
pub fn new(server: Box<dyn capability::Server>) -> Client {
Client {
inner: Rc::new(RefCell::new(ClientInner { server: server }))
}
}
}
impl Clone for Client {
fn clone(&self) -> Client {
Client { inner: self.inner.clone() }
}
}
impl ClientHook for Client {
fn add_ref(&self) -> Box<dyn ClientHook> {
Box::new(self.clone())
}
fn new_call(&self, interface_id: u64, method_id: u16,
size_hint: Option<::capnp::MessageSize>)
-> capability::Request<any_pointer::Owned, any_pointer::Owned>
{
capability::Request::new(
Box::new(Request::new(interface_id, method_id, size_hint, self.add_ref())))
}
fn call(&self, interface_id: u64, method_id: u16, params: Box<dyn ParamsHook>, results: Box<dyn ResultsHook>)
-> Promise<(), Error>
{
// We don't want to actually dispatch the call synchronously, because we don't want the callee
// to have any side effects before the promise is returned to the caller. This helps avoid
// race conditions.
//
// TODO: actually use some kind of queue here to guarantee that call order in maintained.
// This currently relies on the task scheduler being first-in-first-out.
let inner = self.inner.clone();
let promise = ::futures::future::lazy(move || {
let server = &mut inner.borrow_mut().server;
server.dispatch_call(interface_id, method_id,
::capnp::capability::Params::new(params),
::capnp::capability::Results::new(results))
}).attach(self.add_ref());
Promise::from_future(promise)
}
fn get_ptr(&self) -> usize {
(&*self.inner.borrow()) as * const _ as usize
}
fn get_brand(&self) -> usize {
0
}
fn get_resolved(&self) -> Option<Box<dyn ClientHook>> {
None
}
fn when_more_resolved(&self) -> Option<Promise<Box<dyn ClientHook>, Error>> {
None
}
}
|
{
let mut result: any_pointer::Reader = self.request.get_root_as_reader()?;
result.imbue(&self.cap_table);
Ok(result)
}
|
identifier_body
|
local.rs
|
// Copyright (c) 2013-2017 Sandstorm Development Group, Inc. and contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use capnp::{any_pointer, message};
use capnp::Error;
use capnp::traits::{Imbue, ImbueMut};
use capnp::capability::{self, Promise};
use capnp::private::capability::{ClientHook, ParamsHook, PipelineHook, PipelineOp,
RequestHook, ResponseHook, ResultsHook};
use crate::attach::Attach;
use futures::Future;
use futures::sync::oneshot;
use std::cell::RefCell;
use std::rc::{Rc};
use std::mem;
pub trait ResultsDoneHook {
fn add_ref(&self) -> Box<dyn ResultsDoneHook>;
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>>;
}
impl Clone for Box<dyn ResultsDoneHook> {
fn clone(&self) -> Box<dyn ResultsDoneHook> {
self.add_ref()
}
}
pub struct Response {
results: Box<dyn ResultsDoneHook>
}
impl Response {
fn new(results: Box<dyn ResultsDoneHook>) -> Response {
Response {
results: results
}
}
}
impl ResponseHook for Response {
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>> {
self.results.get()
}
}
struct Params {
request: message::Builder<message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
}
impl Params {
fn new(request: message::Builder<message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>)
-> Params
{
Params {
request: request,
cap_table: cap_table,
}
}
}
impl ParamsHook for Params {
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>> {
let mut result: any_pointer::Reader = self.request.get_root_as_reader()?;
result.imbue(&self.cap_table);
Ok(result)
}
}
struct
|
{
message: Option<message::Builder<message::HeapAllocator>>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
results_done_fulfiller: Option<oneshot::Sender<Box<dyn ResultsDoneHook>>>,
}
impl Results {
fn new(fulfiller: oneshot::Sender<Box<dyn ResultsDoneHook>>) -> Results {
Results {
message: Some(::capnp::message::Builder::new_default()),
cap_table: Vec::new(),
results_done_fulfiller: Some(fulfiller),
}
}
}
impl Drop for Results {
fn drop(&mut self) {
if let (Some(message), Some(fulfiller)) = (self.message.take(), self.results_done_fulfiller.take()) {
let cap_table = mem::replace(&mut self.cap_table, Vec::new());
let _ = fulfiller.send(Box::new(ResultsDone::new(message, cap_table)));
} else {
unreachable!()
}
}
}
impl ResultsHook for Results {
fn get<'a>(&'a mut self) -> ::capnp::Result<any_pointer::Builder<'a>> {
match *self {
Results { message: Some(ref mut message), ref mut cap_table,.. } => {
let mut result: any_pointer::Builder = message.get_root()?;
result.imbue_mut(cap_table);
Ok(result)
}
_ => unreachable!(),
}
}
fn tail_call(self: Box<Self>, _request: Box<dyn RequestHook>) -> Promise<(), Error> {
unimplemented!()
}
fn direct_tail_call(self: Box<Self>, _request: Box<dyn RequestHook>)
-> (Promise<(), Error>, Box<dyn PipelineHook>)
{
unimplemented!()
}
fn allow_cancellation(&self) {
unimplemented!()
}
}
struct ResultsDoneInner {
message: ::capnp::message::Builder<::capnp::message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
}
struct ResultsDone {
inner: Rc<ResultsDoneInner>,
}
impl ResultsDone {
fn new(message: message::Builder<message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
)
-> ResultsDone
{
ResultsDone {
inner: Rc::new(ResultsDoneInner {
message: message,
cap_table: cap_table,
}),
}
}
}
impl ResultsDoneHook for ResultsDone {
fn add_ref(&self) -> Box<dyn ResultsDoneHook> {
Box::new(ResultsDone { inner: self.inner.clone() })
}
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>> {
let mut result: any_pointer::Reader = self.inner.message.get_root_as_reader()?;
result.imbue(&self.inner.cap_table);
Ok(result)
}
}
pub struct Request {
message: message::Builder<::capnp::message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
interface_id: u64,
method_id: u16,
client: Box<dyn ClientHook>,
}
impl Request {
pub fn new(interface_id: u64, method_id: u16,
_size_hint: Option<::capnp::MessageSize>,
client: Box<dyn ClientHook>)
-> Request
{
Request {
message: message::Builder::new_default(),
cap_table: Vec::new(),
interface_id: interface_id,
method_id: method_id,
client: client,
}
}
}
impl RequestHook for Request {
fn get<'a>(&'a mut self) -> any_pointer::Builder<'a> {
let mut result: any_pointer::Builder = self.message.get_root().unwrap();
result.imbue_mut(&mut self.cap_table);
result
}
fn get_brand(&self) -> usize {
0
}
fn send<'a>(self: Box<Self>) -> capability::RemotePromise<any_pointer::Owned> {
let tmp = *self;
let Request { message, cap_table, interface_id, method_id, client } = tmp;
let params = Params::new(message, cap_table);
let (results_done_fulfiller, results_done_promise) = oneshot::channel::<Box<dyn ResultsDoneHook>>();
let results_done_promise = results_done_promise.map_err(|e| e.into());
let results = Results::new(results_done_fulfiller);
let promise = client.call(interface_id, method_id, Box::new(params), Box::new(results));
let (pipeline_sender, mut pipeline) = crate::queued::Pipeline::new();
let p = promise.join(results_done_promise).and_then(move |((), results_done_hook)| {
pipeline_sender.complete(Box::new(Pipeline::new(results_done_hook.add_ref())) as Box<dyn PipelineHook>);
Ok((capability::Response::new(Box::new(Response::new(results_done_hook))), ()))
});
let (left, right) = crate::split::split(p);
pipeline.drive(right);
let pipeline = any_pointer::Pipeline::new(Box::new(pipeline));
capability::RemotePromise {
promise: Promise::from_future(left),
pipeline: pipeline,
}
}
fn tail_send(self: Box<Self>)
-> Option<(u32, Promise<(), Error>, Box<dyn PipelineHook>)>
{
unimplemented!()
}
}
struct PipelineInner {
results: Box<dyn ResultsDoneHook>,
}
pub struct Pipeline {
inner: Rc<RefCell<PipelineInner>>,
}
impl Pipeline {
pub fn new(results: Box<dyn ResultsDoneHook>) -> Pipeline {
Pipeline {
inner: Rc::new(RefCell::new(PipelineInner { results: results }))
}
}
}
impl Clone for Pipeline {
fn clone(&self) -> Pipeline {
Pipeline { inner: self.inner.clone() }
}
}
impl PipelineHook for Pipeline {
fn add_ref(&self) -> Box<dyn PipelineHook> {
Box::new(self.clone())
}
fn get_pipelined_cap(&self, ops: &[PipelineOp]) -> Box<dyn ClientHook> {
match self.inner.borrow_mut().results.get().unwrap().get_pipelined_cap(ops) {
Ok(v) => v,
Err(e) => Box::new(crate::broken::Client::new(e, true, 0)) as Box<dyn ClientHook>,
}
}
}
struct ClientInner {
server: Box<dyn capability::Server>,
}
pub struct Client {
inner: Rc<RefCell<ClientInner>>,
}
impl Client {
pub fn new(server: Box<dyn capability::Server>) -> Client {
Client {
inner: Rc::new(RefCell::new(ClientInner { server: server }))
}
}
}
impl Clone for Client {
fn clone(&self) -> Client {
Client { inner: self.inner.clone() }
}
}
impl ClientHook for Client {
fn add_ref(&self) -> Box<dyn ClientHook> {
Box::new(self.clone())
}
fn new_call(&self, interface_id: u64, method_id: u16,
size_hint: Option<::capnp::MessageSize>)
-> capability::Request<any_pointer::Owned, any_pointer::Owned>
{
capability::Request::new(
Box::new(Request::new(interface_id, method_id, size_hint, self.add_ref())))
}
fn call(&self, interface_id: u64, method_id: u16, params: Box<dyn ParamsHook>, results: Box<dyn ResultsHook>)
-> Promise<(), Error>
{
// We don't want to actually dispatch the call synchronously, because we don't want the callee
// to have any side effects before the promise is returned to the caller. This helps avoid
// race conditions.
//
// TODO: actually use some kind of queue here to guarantee that call order in maintained.
// This currently relies on the task scheduler being first-in-first-out.
let inner = self.inner.clone();
let promise = ::futures::future::lazy(move || {
let server = &mut inner.borrow_mut().server;
server.dispatch_call(interface_id, method_id,
::capnp::capability::Params::new(params),
::capnp::capability::Results::new(results))
}).attach(self.add_ref());
Promise::from_future(promise)
}
fn get_ptr(&self) -> usize {
(&*self.inner.borrow()) as * const _ as usize
}
fn get_brand(&self) -> usize {
0
}
fn get_resolved(&self) -> Option<Box<dyn ClientHook>> {
None
}
fn when_more_resolved(&self) -> Option<Promise<Box<dyn ClientHook>, Error>> {
None
}
}
|
Results
|
identifier_name
|
local.rs
|
// Copyright (c) 2013-2017 Sandstorm Development Group, Inc. and contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use capnp::{any_pointer, message};
use capnp::Error;
use capnp::traits::{Imbue, ImbueMut};
use capnp::capability::{self, Promise};
use capnp::private::capability::{ClientHook, ParamsHook, PipelineHook, PipelineOp,
RequestHook, ResponseHook, ResultsHook};
use crate::attach::Attach;
use futures::Future;
use futures::sync::oneshot;
use std::cell::RefCell;
use std::rc::{Rc};
use std::mem;
pub trait ResultsDoneHook {
fn add_ref(&self) -> Box<dyn ResultsDoneHook>;
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>>;
}
impl Clone for Box<dyn ResultsDoneHook> {
fn clone(&self) -> Box<dyn ResultsDoneHook> {
self.add_ref()
}
}
pub struct Response {
results: Box<dyn ResultsDoneHook>
}
impl Response {
fn new(results: Box<dyn ResultsDoneHook>) -> Response {
Response {
results: results
}
}
}
impl ResponseHook for Response {
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>> {
self.results.get()
}
}
struct Params {
request: message::Builder<message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
}
impl Params {
fn new(request: message::Builder<message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>)
-> Params
{
Params {
request: request,
cap_table: cap_table,
}
}
}
impl ParamsHook for Params {
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>> {
let mut result: any_pointer::Reader = self.request.get_root_as_reader()?;
result.imbue(&self.cap_table);
Ok(result)
}
}
struct Results {
message: Option<message::Builder<message::HeapAllocator>>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
results_done_fulfiller: Option<oneshot::Sender<Box<dyn ResultsDoneHook>>>,
}
impl Results {
fn new(fulfiller: oneshot::Sender<Box<dyn ResultsDoneHook>>) -> Results {
Results {
message: Some(::capnp::message::Builder::new_default()),
cap_table: Vec::new(),
results_done_fulfiller: Some(fulfiller),
}
}
}
impl Drop for Results {
fn drop(&mut self) {
if let (Some(message), Some(fulfiller)) = (self.message.take(), self.results_done_fulfiller.take()) {
let cap_table = mem::replace(&mut self.cap_table, Vec::new());
let _ = fulfiller.send(Box::new(ResultsDone::new(message, cap_table)));
} else {
unreachable!()
}
}
}
impl ResultsHook for Results {
fn get<'a>(&'a mut self) -> ::capnp::Result<any_pointer::Builder<'a>> {
match *self {
Results { message: Some(ref mut message), ref mut cap_table,.. } => {
let mut result: any_pointer::Builder = message.get_root()?;
result.imbue_mut(cap_table);
Ok(result)
}
_ => unreachable!(),
}
}
fn tail_call(self: Box<Self>, _request: Box<dyn RequestHook>) -> Promise<(), Error> {
unimplemented!()
}
fn direct_tail_call(self: Box<Self>, _request: Box<dyn RequestHook>)
-> (Promise<(), Error>, Box<dyn PipelineHook>)
{
unimplemented!()
}
fn allow_cancellation(&self) {
unimplemented!()
}
}
struct ResultsDoneInner {
message: ::capnp::message::Builder<::capnp::message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
}
struct ResultsDone {
inner: Rc<ResultsDoneInner>,
}
impl ResultsDone {
fn new(message: message::Builder<message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
)
-> ResultsDone
{
ResultsDone {
inner: Rc::new(ResultsDoneInner {
message: message,
cap_table: cap_table,
}),
}
}
}
impl ResultsDoneHook for ResultsDone {
fn add_ref(&self) -> Box<dyn ResultsDoneHook> {
Box::new(ResultsDone { inner: self.inner.clone() })
}
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>> {
let mut result: any_pointer::Reader = self.inner.message.get_root_as_reader()?;
result.imbue(&self.inner.cap_table);
Ok(result)
}
}
pub struct Request {
message: message::Builder<::capnp::message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
interface_id: u64,
method_id: u16,
client: Box<dyn ClientHook>,
}
impl Request {
pub fn new(interface_id: u64, method_id: u16,
_size_hint: Option<::capnp::MessageSize>,
client: Box<dyn ClientHook>)
-> Request
{
Request {
message: message::Builder::new_default(),
cap_table: Vec::new(),
interface_id: interface_id,
method_id: method_id,
client: client,
}
}
}
impl RequestHook for Request {
fn get<'a>(&'a mut self) -> any_pointer::Builder<'a> {
let mut result: any_pointer::Builder = self.message.get_root().unwrap();
result.imbue_mut(&mut self.cap_table);
result
}
fn get_brand(&self) -> usize {
0
}
fn send<'a>(self: Box<Self>) -> capability::RemotePromise<any_pointer::Owned> {
let tmp = *self;
let Request { message, cap_table, interface_id, method_id, client } = tmp;
let params = Params::new(message, cap_table);
let (results_done_fulfiller, results_done_promise) = oneshot::channel::<Box<dyn ResultsDoneHook>>();
let results_done_promise = results_done_promise.map_err(|e| e.into());
let results = Results::new(results_done_fulfiller);
let promise = client.call(interface_id, method_id, Box::new(params), Box::new(results));
let (pipeline_sender, mut pipeline) = crate::queued::Pipeline::new();
let p = promise.join(results_done_promise).and_then(move |((), results_done_hook)| {
pipeline_sender.complete(Box::new(Pipeline::new(results_done_hook.add_ref())) as Box<dyn PipelineHook>);
Ok((capability::Response::new(Box::new(Response::new(results_done_hook))), ()))
});
let (left, right) = crate::split::split(p);
pipeline.drive(right);
let pipeline = any_pointer::Pipeline::new(Box::new(pipeline));
capability::RemotePromise {
promise: Promise::from_future(left),
pipeline: pipeline,
}
}
fn tail_send(self: Box<Self>)
-> Option<(u32, Promise<(), Error>, Box<dyn PipelineHook>)>
{
unimplemented!()
}
}
struct PipelineInner {
results: Box<dyn ResultsDoneHook>,
}
pub struct Pipeline {
inner: Rc<RefCell<PipelineInner>>,
}
impl Pipeline {
pub fn new(results: Box<dyn ResultsDoneHook>) -> Pipeline {
Pipeline {
inner: Rc::new(RefCell::new(PipelineInner { results: results }))
|
}
}
}
impl Clone for Pipeline {
fn clone(&self) -> Pipeline {
Pipeline { inner: self.inner.clone() }
}
}
impl PipelineHook for Pipeline {
fn add_ref(&self) -> Box<dyn PipelineHook> {
Box::new(self.clone())
}
fn get_pipelined_cap(&self, ops: &[PipelineOp]) -> Box<dyn ClientHook> {
match self.inner.borrow_mut().results.get().unwrap().get_pipelined_cap(ops) {
Ok(v) => v,
Err(e) => Box::new(crate::broken::Client::new(e, true, 0)) as Box<dyn ClientHook>,
}
}
}
struct ClientInner {
server: Box<dyn capability::Server>,
}
pub struct Client {
inner: Rc<RefCell<ClientInner>>,
}
impl Client {
pub fn new(server: Box<dyn capability::Server>) -> Client {
Client {
inner: Rc::new(RefCell::new(ClientInner { server: server }))
}
}
}
impl Clone for Client {
fn clone(&self) -> Client {
Client { inner: self.inner.clone() }
}
}
impl ClientHook for Client {
fn add_ref(&self) -> Box<dyn ClientHook> {
Box::new(self.clone())
}
fn new_call(&self, interface_id: u64, method_id: u16,
size_hint: Option<::capnp::MessageSize>)
-> capability::Request<any_pointer::Owned, any_pointer::Owned>
{
capability::Request::new(
Box::new(Request::new(interface_id, method_id, size_hint, self.add_ref())))
}
fn call(&self, interface_id: u64, method_id: u16, params: Box<dyn ParamsHook>, results: Box<dyn ResultsHook>)
-> Promise<(), Error>
{
// We don't want to actually dispatch the call synchronously, because we don't want the callee
// to have any side effects before the promise is returned to the caller. This helps avoid
// race conditions.
//
// TODO: actually use some kind of queue here to guarantee that call order in maintained.
// This currently relies on the task scheduler being first-in-first-out.
let inner = self.inner.clone();
let promise = ::futures::future::lazy(move || {
let server = &mut inner.borrow_mut().server;
server.dispatch_call(interface_id, method_id,
::capnp::capability::Params::new(params),
::capnp::capability::Results::new(results))
}).attach(self.add_ref());
Promise::from_future(promise)
}
fn get_ptr(&self) -> usize {
(&*self.inner.borrow()) as * const _ as usize
}
fn get_brand(&self) -> usize {
0
}
fn get_resolved(&self) -> Option<Box<dyn ClientHook>> {
None
}
fn when_more_resolved(&self) -> Option<Promise<Box<dyn ClientHook>, Error>> {
None
}
}
|
random_line_split
|
|
local.rs
|
// Copyright (c) 2013-2017 Sandstorm Development Group, Inc. and contributors
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use capnp::{any_pointer, message};
use capnp::Error;
use capnp::traits::{Imbue, ImbueMut};
use capnp::capability::{self, Promise};
use capnp::private::capability::{ClientHook, ParamsHook, PipelineHook, PipelineOp,
RequestHook, ResponseHook, ResultsHook};
use crate::attach::Attach;
use futures::Future;
use futures::sync::oneshot;
use std::cell::RefCell;
use std::rc::{Rc};
use std::mem;
pub trait ResultsDoneHook {
fn add_ref(&self) -> Box<dyn ResultsDoneHook>;
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>>;
}
impl Clone for Box<dyn ResultsDoneHook> {
fn clone(&self) -> Box<dyn ResultsDoneHook> {
self.add_ref()
}
}
pub struct Response {
results: Box<dyn ResultsDoneHook>
}
impl Response {
fn new(results: Box<dyn ResultsDoneHook>) -> Response {
Response {
results: results
}
}
}
impl ResponseHook for Response {
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>> {
self.results.get()
}
}
struct Params {
request: message::Builder<message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
}
impl Params {
fn new(request: message::Builder<message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>)
-> Params
{
Params {
request: request,
cap_table: cap_table,
}
}
}
impl ParamsHook for Params {
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>> {
let mut result: any_pointer::Reader = self.request.get_root_as_reader()?;
result.imbue(&self.cap_table);
Ok(result)
}
}
struct Results {
message: Option<message::Builder<message::HeapAllocator>>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
results_done_fulfiller: Option<oneshot::Sender<Box<dyn ResultsDoneHook>>>,
}
impl Results {
fn new(fulfiller: oneshot::Sender<Box<dyn ResultsDoneHook>>) -> Results {
Results {
message: Some(::capnp::message::Builder::new_default()),
cap_table: Vec::new(),
results_done_fulfiller: Some(fulfiller),
}
}
}
impl Drop for Results {
fn drop(&mut self) {
if let (Some(message), Some(fulfiller)) = (self.message.take(), self.results_done_fulfiller.take())
|
else {
unreachable!()
}
}
}
impl ResultsHook for Results {
fn get<'a>(&'a mut self) -> ::capnp::Result<any_pointer::Builder<'a>> {
match *self {
Results { message: Some(ref mut message), ref mut cap_table,.. } => {
let mut result: any_pointer::Builder = message.get_root()?;
result.imbue_mut(cap_table);
Ok(result)
}
_ => unreachable!(),
}
}
fn tail_call(self: Box<Self>, _request: Box<dyn RequestHook>) -> Promise<(), Error> {
unimplemented!()
}
fn direct_tail_call(self: Box<Self>, _request: Box<dyn RequestHook>)
-> (Promise<(), Error>, Box<dyn PipelineHook>)
{
unimplemented!()
}
fn allow_cancellation(&self) {
unimplemented!()
}
}
struct ResultsDoneInner {
message: ::capnp::message::Builder<::capnp::message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
}
struct ResultsDone {
inner: Rc<ResultsDoneInner>,
}
impl ResultsDone {
fn new(message: message::Builder<message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
)
-> ResultsDone
{
ResultsDone {
inner: Rc::new(ResultsDoneInner {
message: message,
cap_table: cap_table,
}),
}
}
}
impl ResultsDoneHook for ResultsDone {
fn add_ref(&self) -> Box<dyn ResultsDoneHook> {
Box::new(ResultsDone { inner: self.inner.clone() })
}
fn get<'a>(&'a self) -> ::capnp::Result<any_pointer::Reader<'a>> {
let mut result: any_pointer::Reader = self.inner.message.get_root_as_reader()?;
result.imbue(&self.inner.cap_table);
Ok(result)
}
}
pub struct Request {
message: message::Builder<::capnp::message::HeapAllocator>,
cap_table: Vec<Option<Box<dyn ClientHook>>>,
interface_id: u64,
method_id: u16,
client: Box<dyn ClientHook>,
}
impl Request {
pub fn new(interface_id: u64, method_id: u16,
_size_hint: Option<::capnp::MessageSize>,
client: Box<dyn ClientHook>)
-> Request
{
Request {
message: message::Builder::new_default(),
cap_table: Vec::new(),
interface_id: interface_id,
method_id: method_id,
client: client,
}
}
}
impl RequestHook for Request {
fn get<'a>(&'a mut self) -> any_pointer::Builder<'a> {
let mut result: any_pointer::Builder = self.message.get_root().unwrap();
result.imbue_mut(&mut self.cap_table);
result
}
fn get_brand(&self) -> usize {
0
}
fn send<'a>(self: Box<Self>) -> capability::RemotePromise<any_pointer::Owned> {
let tmp = *self;
let Request { message, cap_table, interface_id, method_id, client } = tmp;
let params = Params::new(message, cap_table);
let (results_done_fulfiller, results_done_promise) = oneshot::channel::<Box<dyn ResultsDoneHook>>();
let results_done_promise = results_done_promise.map_err(|e| e.into());
let results = Results::new(results_done_fulfiller);
let promise = client.call(interface_id, method_id, Box::new(params), Box::new(results));
let (pipeline_sender, mut pipeline) = crate::queued::Pipeline::new();
let p = promise.join(results_done_promise).and_then(move |((), results_done_hook)| {
pipeline_sender.complete(Box::new(Pipeline::new(results_done_hook.add_ref())) as Box<dyn PipelineHook>);
Ok((capability::Response::new(Box::new(Response::new(results_done_hook))), ()))
});
let (left, right) = crate::split::split(p);
pipeline.drive(right);
let pipeline = any_pointer::Pipeline::new(Box::new(pipeline));
capability::RemotePromise {
promise: Promise::from_future(left),
pipeline: pipeline,
}
}
fn tail_send(self: Box<Self>)
-> Option<(u32, Promise<(), Error>, Box<dyn PipelineHook>)>
{
unimplemented!()
}
}
struct PipelineInner {
results: Box<dyn ResultsDoneHook>,
}
pub struct Pipeline {
inner: Rc<RefCell<PipelineInner>>,
}
impl Pipeline {
pub fn new(results: Box<dyn ResultsDoneHook>) -> Pipeline {
Pipeline {
inner: Rc::new(RefCell::new(PipelineInner { results: results }))
}
}
}
impl Clone for Pipeline {
fn clone(&self) -> Pipeline {
Pipeline { inner: self.inner.clone() }
}
}
impl PipelineHook for Pipeline {
fn add_ref(&self) -> Box<dyn PipelineHook> {
Box::new(self.clone())
}
fn get_pipelined_cap(&self, ops: &[PipelineOp]) -> Box<dyn ClientHook> {
match self.inner.borrow_mut().results.get().unwrap().get_pipelined_cap(ops) {
Ok(v) => v,
Err(e) => Box::new(crate::broken::Client::new(e, true, 0)) as Box<dyn ClientHook>,
}
}
}
struct ClientInner {
server: Box<dyn capability::Server>,
}
pub struct Client {
inner: Rc<RefCell<ClientInner>>,
}
impl Client {
pub fn new(server: Box<dyn capability::Server>) -> Client {
Client {
inner: Rc::new(RefCell::new(ClientInner { server: server }))
}
}
}
impl Clone for Client {
fn clone(&self) -> Client {
Client { inner: self.inner.clone() }
}
}
impl ClientHook for Client {
fn add_ref(&self) -> Box<dyn ClientHook> {
Box::new(self.clone())
}
fn new_call(&self, interface_id: u64, method_id: u16,
size_hint: Option<::capnp::MessageSize>)
-> capability::Request<any_pointer::Owned, any_pointer::Owned>
{
capability::Request::new(
Box::new(Request::new(interface_id, method_id, size_hint, self.add_ref())))
}
fn call(&self, interface_id: u64, method_id: u16, params: Box<dyn ParamsHook>, results: Box<dyn ResultsHook>)
-> Promise<(), Error>
{
// We don't want to actually dispatch the call synchronously, because we don't want the callee
// to have any side effects before the promise is returned to the caller. This helps avoid
// race conditions.
//
// TODO: actually use some kind of queue here to guarantee that call order in maintained.
// This currently relies on the task scheduler being first-in-first-out.
let inner = self.inner.clone();
let promise = ::futures::future::lazy(move || {
let server = &mut inner.borrow_mut().server;
server.dispatch_call(interface_id, method_id,
::capnp::capability::Params::new(params),
::capnp::capability::Results::new(results))
}).attach(self.add_ref());
Promise::from_future(promise)
}
fn get_ptr(&self) -> usize {
(&*self.inner.borrow()) as * const _ as usize
}
fn get_brand(&self) -> usize {
0
}
fn get_resolved(&self) -> Option<Box<dyn ClientHook>> {
None
}
fn when_more_resolved(&self) -> Option<Promise<Box<dyn ClientHook>, Error>> {
None
}
}
|
{
let cap_table = mem::replace(&mut self.cap_table, Vec::new());
let _ = fulfiller.send(Box::new(ResultsDone::new(message, cap_table)));
}
|
conditional_block
|
cargo_clean.rs
|
use crate::core::InternedString;
use std::collections::HashMap;
use std::fs;
use std::path::Path;
use crate::core::compiler::unit_dependencies;
use crate::core::compiler::{BuildConfig, BuildContext, CompileKind, CompileMode, Context};
use crate::core::compiler::{RustcTargetData, UnitInterner};
use crate::core::profiles::{Profiles, UnitFor};
use crate::core::resolver::features::HasDevUnits;
use crate::core::resolver::ResolveOpts;
use crate::core::{PackageIdSpec, Workspace};
use crate::ops;
use crate::ops::resolve::WorkspaceResolve;
use crate::util::errors::{CargoResult, CargoResultExt};
use crate::util::paths;
use crate::util::Config;
pub struct CleanOptions<'a> {
pub config: &'a Config,
/// A list of packages to clean. If empty, everything is cleaned.
pub spec: Vec<String>,
/// The target arch triple to clean, or None for the host arch
pub target: Option<String>,
/// Whether to clean the release directory
pub profile_specified: bool,
/// Whether to clean the directory of a certain build profile
pub requested_profile: InternedString,
/// Whether to just clean the doc directory
pub doc: bool,
}
/// Cleans the package's build artifacts.
pub fn clean(ws: &Workspace<'_>, opts: &CleanOptions<'_>) -> CargoResult<()> {
let mut target_dir = ws.target_dir();
let config = ws.config();
// If the doc option is set, we just want to delete the doc directory.
if opts.doc {
target_dir = target_dir.join("doc");
return rm_rf(&target_dir.into_path_unlocked(), config);
}
let profiles = Profiles::new(ws.profiles(), config, opts.requested_profile, ws.features())?;
if opts.profile_specified
|
// If we have a spec, then we need to delete some packages, otherwise, just
// remove the whole target directory and be done with it!
//
// Note that we don't bother grabbing a lock here as we're just going to
// blow it all away anyway.
if opts.spec.is_empty() {
return rm_rf(&target_dir.into_path_unlocked(), config);
}
let mut build_config = BuildConfig::new(config, Some(1), &opts.target, CompileMode::Build)?;
build_config.requested_profile = opts.requested_profile;
let target_data = RustcTargetData::new(ws, build_config.requested_kind)?;
let resolve_opts = ResolveOpts::everything();
let specs = opts
.spec
.iter()
.map(|spec| PackageIdSpec::parse(spec))
.collect::<CargoResult<Vec<_>>>()?;
let ws_resolve = ops::resolve_ws_with_opts(
ws,
&target_data,
build_config.requested_kind,
&resolve_opts,
&specs,
HasDevUnits::Yes,
)?;
let WorkspaceResolve {
pkg_set,
targeted_resolve: resolve,
resolved_features: features,
..
} = ws_resolve;
let interner = UnitInterner::new();
let bcx = BuildContext::new(
ws,
&pkg_set,
opts.config,
&build_config,
profiles,
&interner,
HashMap::new(),
target_data,
)?;
let mut units = Vec::new();
for spec in opts.spec.iter() {
// Translate the spec to a Package
let pkgid = resolve.query(spec)?;
let pkg = pkg_set.get_one(pkgid)?;
// Generate all relevant `Unit` targets for this package
for target in pkg.targets() {
for kind in [CompileKind::Host, build_config.requested_kind].iter() {
for mode in CompileMode::all_modes() {
for unit_for in UnitFor::all_values() {
let profile = if mode.is_run_custom_build() {
bcx.profiles
.get_profile_run_custom_build(&bcx.profiles.get_profile(
pkg.package_id(),
ws.is_member(pkg),
*unit_for,
CompileMode::Build,
))
} else {
bcx.profiles.get_profile(
pkg.package_id(),
ws.is_member(pkg),
*unit_for,
*mode,
)
};
// Use unverified here since this is being more
// exhaustive than what is actually needed.
let features_for = unit_for.map_to_features_for();
let features =
features.activated_features_unverified(pkg.package_id(), features_for);
units.push(bcx.units.intern(
pkg, target, profile, *kind, *mode, features, /*is_std*/ false,
));
}
}
}
}
}
let unit_dependencies =
unit_dependencies::build_unit_dependencies(&bcx, &resolve, &features, None, &units, &[])?;
let mut cx = Context::new(config, &bcx, unit_dependencies, build_config.requested_kind)?;
cx.prepare_units(None, &units)?;
for unit in units.iter() {
if unit.mode.is_doc() || unit.mode.is_doc_test() {
// Cleaning individual rustdoc crates is currently not supported.
// For example, the search index would need to be rebuilt to fully
// remove it (otherwise you're left with lots of broken links).
// Doc tests produce no output.
continue;
}
rm_rf(&cx.files().fingerprint_dir(unit), config)?;
if unit.target.is_custom_build() {
if unit.mode.is_run_custom_build() {
rm_rf(&cx.files().build_script_out_dir(unit), config)?;
} else {
rm_rf(&cx.files().build_script_dir(unit), config)?;
}
continue;
}
for output in cx.outputs(unit)?.iter() {
rm_rf(&output.path, config)?;
if let Some(ref dst) = output.hardlink {
rm_rf(dst, config)?;
}
}
}
Ok(())
}
fn rm_rf(path: &Path, config: &Config) -> CargoResult<()> {
let m = fs::metadata(path);
if m.as_ref().map(|s| s.is_dir()).unwrap_or(false) {
config
.shell()
.verbose(|shell| shell.status("Removing", path.display()))?;
paths::remove_dir_all(path)
.chain_err(|| anyhow::format_err!("could not remove build directory"))?;
} else if m.is_ok() {
config
.shell()
.verbose(|shell| shell.status("Removing", path.display()))?;
paths::remove_file(path)
.chain_err(|| anyhow::format_err!("failed to remove build artifact"))?;
}
Ok(())
}
|
{
// After parsing profiles we know the dir-name of the profile, if a profile
// was passed from the command line. If so, delete only the directory of
// that profile.
let dir_name = profiles.get_dir_name();
target_dir = target_dir.join(dir_name);
}
|
conditional_block
|
cargo_clean.rs
|
use crate::core::InternedString;
use std::collections::HashMap;
use std::fs;
use std::path::Path;
use crate::core::compiler::unit_dependencies;
use crate::core::compiler::{BuildConfig, BuildContext, CompileKind, CompileMode, Context};
use crate::core::compiler::{RustcTargetData, UnitInterner};
use crate::core::profiles::{Profiles, UnitFor};
use crate::core::resolver::features::HasDevUnits;
use crate::core::resolver::ResolveOpts;
use crate::core::{PackageIdSpec, Workspace};
use crate::ops;
use crate::ops::resolve::WorkspaceResolve;
use crate::util::errors::{CargoResult, CargoResultExt};
use crate::util::paths;
use crate::util::Config;
pub struct CleanOptions<'a> {
pub config: &'a Config,
/// A list of packages to clean. If empty, everything is cleaned.
pub spec: Vec<String>,
/// The target arch triple to clean, or None for the host arch
pub target: Option<String>,
/// Whether to clean the release directory
pub profile_specified: bool,
/// Whether to clean the directory of a certain build profile
pub requested_profile: InternedString,
/// Whether to just clean the doc directory
pub doc: bool,
}
/// Cleans the package's build artifacts.
pub fn clean(ws: &Workspace<'_>, opts: &CleanOptions<'_>) -> CargoResult<()> {
let mut target_dir = ws.target_dir();
let config = ws.config();
// If the doc option is set, we just want to delete the doc directory.
if opts.doc {
target_dir = target_dir.join("doc");
return rm_rf(&target_dir.into_path_unlocked(), config);
}
let profiles = Profiles::new(ws.profiles(), config, opts.requested_profile, ws.features())?;
if opts.profile_specified {
// After parsing profiles we know the dir-name of the profile, if a profile
// was passed from the command line. If so, delete only the directory of
// that profile.
let dir_name = profiles.get_dir_name();
target_dir = target_dir.join(dir_name);
}
// If we have a spec, then we need to delete some packages, otherwise, just
// remove the whole target directory and be done with it!
//
// Note that we don't bother grabbing a lock here as we're just going to
// blow it all away anyway.
if opts.spec.is_empty() {
return rm_rf(&target_dir.into_path_unlocked(), config);
}
let mut build_config = BuildConfig::new(config, Some(1), &opts.target, CompileMode::Build)?;
build_config.requested_profile = opts.requested_profile;
let target_data = RustcTargetData::new(ws, build_config.requested_kind)?;
let resolve_opts = ResolveOpts::everything();
let specs = opts
.spec
.iter()
.map(|spec| PackageIdSpec::parse(spec))
.collect::<CargoResult<Vec<_>>>()?;
let ws_resolve = ops::resolve_ws_with_opts(
ws,
&target_data,
build_config.requested_kind,
&resolve_opts,
&specs,
HasDevUnits::Yes,
)?;
let WorkspaceResolve {
pkg_set,
targeted_resolve: resolve,
resolved_features: features,
..
} = ws_resolve;
let interner = UnitInterner::new();
let bcx = BuildContext::new(
ws,
&pkg_set,
opts.config,
&build_config,
profiles,
&interner,
HashMap::new(),
target_data,
)?;
let mut units = Vec::new();
for spec in opts.spec.iter() {
// Translate the spec to a Package
let pkgid = resolve.query(spec)?;
let pkg = pkg_set.get_one(pkgid)?;
// Generate all relevant `Unit` targets for this package
for target in pkg.targets() {
for kind in [CompileKind::Host, build_config.requested_kind].iter() {
for mode in CompileMode::all_modes() {
for unit_for in UnitFor::all_values() {
let profile = if mode.is_run_custom_build() {
bcx.profiles
.get_profile_run_custom_build(&bcx.profiles.get_profile(
pkg.package_id(),
ws.is_member(pkg),
*unit_for,
CompileMode::Build,
))
} else {
bcx.profiles.get_profile(
pkg.package_id(),
ws.is_member(pkg),
*unit_for,
*mode,
)
};
// Use unverified here since this is being more
// exhaustive than what is actually needed.
let features_for = unit_for.map_to_features_for();
let features =
features.activated_features_unverified(pkg.package_id(), features_for);
units.push(bcx.units.intern(
pkg, target, profile, *kind, *mode, features, /*is_std*/ false,
));
}
}
}
}
}
let unit_dependencies =
unit_dependencies::build_unit_dependencies(&bcx, &resolve, &features, None, &units, &[])?;
let mut cx = Context::new(config, &bcx, unit_dependencies, build_config.requested_kind)?;
cx.prepare_units(None, &units)?;
for unit in units.iter() {
if unit.mode.is_doc() || unit.mode.is_doc_test() {
// Cleaning individual rustdoc crates is currently not supported.
// For example, the search index would need to be rebuilt to fully
// remove it (otherwise you're left with lots of broken links).
// Doc tests produce no output.
continue;
}
rm_rf(&cx.files().fingerprint_dir(unit), config)?;
if unit.target.is_custom_build() {
if unit.mode.is_run_custom_build() {
rm_rf(&cx.files().build_script_out_dir(unit), config)?;
} else {
rm_rf(&cx.files().build_script_dir(unit), config)?;
}
continue;
}
for output in cx.outputs(unit)?.iter() {
rm_rf(&output.path, config)?;
if let Some(ref dst) = output.hardlink {
rm_rf(dst, config)?;
}
}
}
Ok(())
}
fn rm_rf(path: &Path, config: &Config) -> CargoResult<()>
|
{
let m = fs::metadata(path);
if m.as_ref().map(|s| s.is_dir()).unwrap_or(false) {
config
.shell()
.verbose(|shell| shell.status("Removing", path.display()))?;
paths::remove_dir_all(path)
.chain_err(|| anyhow::format_err!("could not remove build directory"))?;
} else if m.is_ok() {
config
.shell()
.verbose(|shell| shell.status("Removing", path.display()))?;
paths::remove_file(path)
.chain_err(|| anyhow::format_err!("failed to remove build artifact"))?;
}
Ok(())
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.