file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
data.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use construct::ConstructionResult;
use script_layout_interface::restyle_damage::RestyleDamage;
use style::data::PersistentStyleData;
/// Data that layout associates with a node.
pub struct
|
{
/// Data that the style system associates with a node. When the
/// style system is being used standalone, this is all that hangs
/// off the node. This must be first to permit the various
/// transmutations between PersistentStyleData and PersistentLayoutData.
pub style_data: PersistentStyleData,
/// Description of how to account for recent style changes.
pub restyle_damage: RestyleDamage,
/// The current results of flow construction for this node. This is either a
/// flow or a `ConstructionItem`. See comments in `construct.rs` for more
/// details.
pub flow_construction_result: ConstructionResult,
pub before_flow_construction_result: ConstructionResult,
pub after_flow_construction_result: ConstructionResult,
pub details_summary_flow_construction_result: ConstructionResult,
pub details_content_flow_construction_result: ConstructionResult,
/// Various flags.
pub flags: LayoutDataFlags,
}
impl PersistentLayoutData {
/// Creates new layout data.
pub fn new() -> PersistentLayoutData {
PersistentLayoutData {
style_data: PersistentStyleData::new(),
restyle_damage: RestyleDamage::empty(),
flow_construction_result: ConstructionResult::None,
before_flow_construction_result: ConstructionResult::None,
after_flow_construction_result: ConstructionResult::None,
details_summary_flow_construction_result: ConstructionResult::None,
details_content_flow_construction_result: ConstructionResult::None,
flags: LayoutDataFlags::empty(),
}
}
}
bitflags! {
pub flags LayoutDataFlags: u8 {
#[doc = "Whether a flow has been newly constructed."]
const HAS_NEWLY_CONSTRUCTED_FLOW = 0x01
}
}
|
PersistentLayoutData
|
identifier_name
|
ktestq.rs
|
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn
|
() {
run_test(&Instruction { mnemonic: Mnemonic::KTESTQ, operand1: Some(Direct(K7)), operand2: Some(Direct(K6)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 225, 248, 153, 254], OperandSize::Dword)
}
fn ktestq_2() {
run_test(&Instruction { mnemonic: Mnemonic::KTESTQ, operand1: Some(Direct(K5)), operand2: Some(Direct(K6)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 225, 248, 153, 238], OperandSize::Qword)
}
|
ktestq_1
|
identifier_name
|
ktestq.rs
|
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn ktestq_1() {
run_test(&Instruction { mnemonic: Mnemonic::KTESTQ, operand1: Some(Direct(K7)), operand2: Some(Direct(K6)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 225, 248, 153, 254], OperandSize::Dword)
|
}
|
}
fn ktestq_2() {
run_test(&Instruction { mnemonic: Mnemonic::KTESTQ, operand1: Some(Direct(K5)), operand2: Some(Direct(K6)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 225, 248, 153, 238], OperandSize::Qword)
|
random_line_split
|
ktestq.rs
|
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
fn ktestq_1()
|
fn ktestq_2() {
run_test(&Instruction { mnemonic: Mnemonic::KTESTQ, operand1: Some(Direct(K5)), operand2: Some(Direct(K6)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 225, 248, 153, 238], OperandSize::Qword)
}
|
{
run_test(&Instruction { mnemonic: Mnemonic::KTESTQ, operand1: Some(Direct(K7)), operand2: Some(Direct(K6)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[196, 225, 248, 153, 254], OperandSize::Dword)
}
|
identifier_body
|
rustup-init.rs
|
//! The main rustup commandline application
//!
//! The rustup binary is a chimera, changing its behavior based on the
//! name of the binary. This is used most prominently to enable
//! rustup's tool 'proxies' - that is, rustup itself and the rustup
//! proxies are the same binary; when the binary is called 'rustup' or
//! 'rustup.exe' rustup behaves like the rustup commandline
//! application; when it is called 'rustc' it behaves as a proxy to
//! 'rustc'.
//!
//! This scheme is further used to distinguish the rustup installer,
//! called 'rustup-init' which is again just the rustup binary under a
//! different name.
#![recursion_limit = "1024"]
use std::path::PathBuf;
use rs_tracing::*;
use rustup::cli::common;
use rustup::cli::errors::*;
use rustup::cli::proxy_mode;
use rustup::cli::rustup_mode;
use rustup::cli::self_update;
use rustup::cli::setup_mode;
use rustup::currentprocess::{process, with, OSProcess};
use rustup::env_var::RUST_RECURSION_COUNT_MAX;
use rustup::utils::utils;
fn main() {
let process = OSProcess::default();
with(Box::new(process), || match run_rustup() {
Err(ref e) => {
common::report_error(e);
std::process::exit(1);
}
Ok(utils::ExitCode(c)) => std::process::exit(c),
});
}
fn run_rustup() -> Result<utils::ExitCode> {
if let Ok(dir) = process().var("RUSTUP_TRACE_DIR") {
open_trace_file!(dir)?;
}
let result = run_rustup_inner();
if process().var("RUSTUP_TRACE_DIR").is_ok() {
close_trace_file!();
}
result
}
fn run_rustup_inner() -> Result<utils::ExitCode> {
// Guard against infinite proxy recursion. This mostly happens due to
// bugs in rustup.
do_recursion_guard()?;
// Before we do anything else, ensure we know where we are and who we
// are because otherwise we cannot proceed usefully.
process().current_dir()?;
utils::current_exe()?;
// The name of arg0 determines how the program is going to behave
let arg0 = match process().var("RUSTUP_FORCE_ARG0") {
Ok(v) => Some(v),
Err(_) => process().args().next(),
}
.map(PathBuf::from);
let name = arg0
.as_ref()
.and_then(|a| a.file_stem())
.and_then(std::ffi::OsStr::to_str);
match name {
Some("rustup") => rustup_mode::main(),
Some(n) if n.starts_with("rustup-setup") || n.starts_with("rustup-init") =>
|
Some(n) if n.starts_with("rustup-gc-") => {
// This is the final uninstallation stage on windows where
// rustup deletes its own exe
self_update::complete_windows_uninstall()
}
Some(_) => proxy_mode::main(),
None => {
// Weird case. No arg0, or it's unparsable.
Err(ErrorKind::NoExeName.into())
}
}
}
fn do_recursion_guard() -> Result<()> {
let recursion_count = process()
.var("RUST_RECURSION_COUNT")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(0);
if recursion_count > RUST_RECURSION_COUNT_MAX {
return Err(ErrorKind::InfiniteRecursion.into());
}
Ok(())
}
|
{
// NB: The above check is only for the prefix of the file
// name. Browsers rename duplicates to
// e.g. rustup-setup(2), and this allows all variations
// to work.
setup_mode::main()
}
|
conditional_block
|
rustup-init.rs
|
//! The main rustup commandline application
//!
//! The rustup binary is a chimera, changing its behavior based on the
//! name of the binary. This is used most prominently to enable
//! rustup's tool 'proxies' - that is, rustup itself and the rustup
//! proxies are the same binary; when the binary is called 'rustup' or
//! 'rustup.exe' rustup behaves like the rustup commandline
//! application; when it is called 'rustc' it behaves as a proxy to
//! 'rustc'.
//!
//! This scheme is further used to distinguish the rustup installer,
//! called 'rustup-init' which is again just the rustup binary under a
//! different name.
#![recursion_limit = "1024"]
use std::path::PathBuf;
use rs_tracing::*;
use rustup::cli::common;
use rustup::cli::errors::*;
use rustup::cli::proxy_mode;
use rustup::cli::rustup_mode;
use rustup::cli::self_update;
use rustup::cli::setup_mode;
use rustup::currentprocess::{process, with, OSProcess};
use rustup::env_var::RUST_RECURSION_COUNT_MAX;
use rustup::utils::utils;
fn main() {
let process = OSProcess::default();
with(Box::new(process), || match run_rustup() {
Err(ref e) => {
common::report_error(e);
std::process::exit(1);
}
Ok(utils::ExitCode(c)) => std::process::exit(c),
});
}
fn
|
() -> Result<utils::ExitCode> {
if let Ok(dir) = process().var("RUSTUP_TRACE_DIR") {
open_trace_file!(dir)?;
}
let result = run_rustup_inner();
if process().var("RUSTUP_TRACE_DIR").is_ok() {
close_trace_file!();
}
result
}
fn run_rustup_inner() -> Result<utils::ExitCode> {
// Guard against infinite proxy recursion. This mostly happens due to
// bugs in rustup.
do_recursion_guard()?;
// Before we do anything else, ensure we know where we are and who we
// are because otherwise we cannot proceed usefully.
process().current_dir()?;
utils::current_exe()?;
// The name of arg0 determines how the program is going to behave
let arg0 = match process().var("RUSTUP_FORCE_ARG0") {
Ok(v) => Some(v),
Err(_) => process().args().next(),
}
.map(PathBuf::from);
let name = arg0
.as_ref()
.and_then(|a| a.file_stem())
.and_then(std::ffi::OsStr::to_str);
match name {
Some("rustup") => rustup_mode::main(),
Some(n) if n.starts_with("rustup-setup") || n.starts_with("rustup-init") => {
// NB: The above check is only for the prefix of the file
// name. Browsers rename duplicates to
// e.g. rustup-setup(2), and this allows all variations
// to work.
setup_mode::main()
}
Some(n) if n.starts_with("rustup-gc-") => {
// This is the final uninstallation stage on windows where
// rustup deletes its own exe
self_update::complete_windows_uninstall()
}
Some(_) => proxy_mode::main(),
None => {
// Weird case. No arg0, or it's unparsable.
Err(ErrorKind::NoExeName.into())
}
}
}
fn do_recursion_guard() -> Result<()> {
let recursion_count = process()
.var("RUST_RECURSION_COUNT")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(0);
if recursion_count > RUST_RECURSION_COUNT_MAX {
return Err(ErrorKind::InfiniteRecursion.into());
}
Ok(())
}
|
run_rustup
|
identifier_name
|
rustup-init.rs
|
//! The main rustup commandline application
//!
//! The rustup binary is a chimera, changing its behavior based on the
//! name of the binary. This is used most prominently to enable
//! rustup's tool 'proxies' - that is, rustup itself and the rustup
//! proxies are the same binary; when the binary is called 'rustup' or
//! 'rustup.exe' rustup behaves like the rustup commandline
//! application; when it is called 'rustc' it behaves as a proxy to
//! 'rustc'.
//!
//! This scheme is further used to distinguish the rustup installer,
//! called 'rustup-init' which is again just the rustup binary under a
//! different name.
#![recursion_limit = "1024"]
use std::path::PathBuf;
use rs_tracing::*;
use rustup::cli::common;
use rustup::cli::errors::*;
use rustup::cli::proxy_mode;
use rustup::cli::rustup_mode;
use rustup::cli::self_update;
use rustup::cli::setup_mode;
use rustup::currentprocess::{process, with, OSProcess};
use rustup::env_var::RUST_RECURSION_COUNT_MAX;
use rustup::utils::utils;
fn main() {
let process = OSProcess::default();
with(Box::new(process), || match run_rustup() {
Err(ref e) => {
common::report_error(e);
std::process::exit(1);
}
Ok(utils::ExitCode(c)) => std::process::exit(c),
});
}
fn run_rustup() -> Result<utils::ExitCode> {
if let Ok(dir) = process().var("RUSTUP_TRACE_DIR") {
open_trace_file!(dir)?;
}
let result = run_rustup_inner();
if process().var("RUSTUP_TRACE_DIR").is_ok() {
close_trace_file!();
}
result
}
fn run_rustup_inner() -> Result<utils::ExitCode> {
// Guard against infinite proxy recursion. This mostly happens due to
// bugs in rustup.
do_recursion_guard()?;
// Before we do anything else, ensure we know where we are and who we
// are because otherwise we cannot proceed usefully.
|
utils::current_exe()?;
// The name of arg0 determines how the program is going to behave
let arg0 = match process().var("RUSTUP_FORCE_ARG0") {
Ok(v) => Some(v),
Err(_) => process().args().next(),
}
.map(PathBuf::from);
let name = arg0
.as_ref()
.and_then(|a| a.file_stem())
.and_then(std::ffi::OsStr::to_str);
match name {
Some("rustup") => rustup_mode::main(),
Some(n) if n.starts_with("rustup-setup") || n.starts_with("rustup-init") => {
// NB: The above check is only for the prefix of the file
// name. Browsers rename duplicates to
// e.g. rustup-setup(2), and this allows all variations
// to work.
setup_mode::main()
}
Some(n) if n.starts_with("rustup-gc-") => {
// This is the final uninstallation stage on windows where
// rustup deletes its own exe
self_update::complete_windows_uninstall()
}
Some(_) => proxy_mode::main(),
None => {
// Weird case. No arg0, or it's unparsable.
Err(ErrorKind::NoExeName.into())
}
}
}
fn do_recursion_guard() -> Result<()> {
let recursion_count = process()
.var("RUST_RECURSION_COUNT")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(0);
if recursion_count > RUST_RECURSION_COUNT_MAX {
return Err(ErrorKind::InfiniteRecursion.into());
}
Ok(())
}
|
process().current_dir()?;
|
random_line_split
|
rustup-init.rs
|
//! The main rustup commandline application
//!
//! The rustup binary is a chimera, changing its behavior based on the
//! name of the binary. This is used most prominently to enable
//! rustup's tool 'proxies' - that is, rustup itself and the rustup
//! proxies are the same binary; when the binary is called 'rustup' or
//! 'rustup.exe' rustup behaves like the rustup commandline
//! application; when it is called 'rustc' it behaves as a proxy to
//! 'rustc'.
//!
//! This scheme is further used to distinguish the rustup installer,
//! called 'rustup-init' which is again just the rustup binary under a
//! different name.
#![recursion_limit = "1024"]
use std::path::PathBuf;
use rs_tracing::*;
use rustup::cli::common;
use rustup::cli::errors::*;
use rustup::cli::proxy_mode;
use rustup::cli::rustup_mode;
use rustup::cli::self_update;
use rustup::cli::setup_mode;
use rustup::currentprocess::{process, with, OSProcess};
use rustup::env_var::RUST_RECURSION_COUNT_MAX;
use rustup::utils::utils;
fn main() {
let process = OSProcess::default();
with(Box::new(process), || match run_rustup() {
Err(ref e) => {
common::report_error(e);
std::process::exit(1);
}
Ok(utils::ExitCode(c)) => std::process::exit(c),
});
}
fn run_rustup() -> Result<utils::ExitCode> {
if let Ok(dir) = process().var("RUSTUP_TRACE_DIR") {
open_trace_file!(dir)?;
}
let result = run_rustup_inner();
if process().var("RUSTUP_TRACE_DIR").is_ok() {
close_trace_file!();
}
result
}
fn run_rustup_inner() -> Result<utils::ExitCode>
|
match name {
Some("rustup") => rustup_mode::main(),
Some(n) if n.starts_with("rustup-setup") || n.starts_with("rustup-init") => {
// NB: The above check is only for the prefix of the file
// name. Browsers rename duplicates to
// e.g. rustup-setup(2), and this allows all variations
// to work.
setup_mode::main()
}
Some(n) if n.starts_with("rustup-gc-") => {
// This is the final uninstallation stage on windows where
// rustup deletes its own exe
self_update::complete_windows_uninstall()
}
Some(_) => proxy_mode::main(),
None => {
// Weird case. No arg0, or it's unparsable.
Err(ErrorKind::NoExeName.into())
}
}
}
fn do_recursion_guard() -> Result<()> {
let recursion_count = process()
.var("RUST_RECURSION_COUNT")
.ok()
.and_then(|s| s.parse().ok())
.unwrap_or(0);
if recursion_count > RUST_RECURSION_COUNT_MAX {
return Err(ErrorKind::InfiniteRecursion.into());
}
Ok(())
}
|
{
// Guard against infinite proxy recursion. This mostly happens due to
// bugs in rustup.
do_recursion_guard()?;
// Before we do anything else, ensure we know where we are and who we
// are because otherwise we cannot proceed usefully.
process().current_dir()?;
utils::current_exe()?;
// The name of arg0 determines how the program is going to behave
let arg0 = match process().var("RUSTUP_FORCE_ARG0") {
Ok(v) => Some(v),
Err(_) => process().args().next(),
}
.map(PathBuf::from);
let name = arg0
.as_ref()
.and_then(|a| a.file_stem())
.and_then(std::ffi::OsStr::to_str);
|
identifier_body
|
panicking.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Panic support for libcore
//!
//! The core library cannot define panicking, but it does *declare* panicking. This
//! means that the functions inside of libcore are allowed to panic, but to be
//! useful an upstream crate must define panicking for libcore to use. The current
//! interface for panicking is:
//!
//! ```ignore
//! fn panic_impl(fmt: fmt::Arguments, &(&'static str, u32)) ->!;
//! ```
//!
//! This definition allows for panicking with any general message, but it does not
//! allow for failing with a `Box<Any>` value. The reason for this is that libcore
//! is not allowed to allocate.
//!
//! This module contains a few other panicking functions, but these are just the
//! necessary lang items for the compiler. All panics are funneled through this
//! one function. Currently, the actual symbol is declared in the standard
//! library, but the location of this may change over time.
#![allow(dead_code, missing_docs)]
use fmt;
#[cold] #[inline(never)] // this is the slow path, always
#[lang="panic"]
pub fn
|
(expr_file_line: &(&'static str, &'static str, u32)) ->! {
// Use Arguments::new_v1 instead of format_args!("{}", expr) to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_v1 may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
let (expr, file, line) = *expr_file_line;
panic_fmt(fmt::Arguments::new_v1(&[expr], &[]), &(file, line))
}
#[cold] #[inline(never)]
#[lang="panic_bounds_check"]
fn panic_bounds_check(file_line: &(&'static str, u32),
index: usize, len: usize) ->! {
panic_fmt(format_args!("index out of bounds: the len is {} but the index is {}",
len, index), file_line)
}
#[cold] #[inline(never)]
pub fn panic_fmt(fmt: fmt::Arguments, file_line: &(&'static str, u32)) ->! {
#[allow(improper_ctypes)]
extern {
#[lang = "panic_fmt"]
fn panic_impl(fmt: fmt::Arguments, file: &'static str, line: u32) ->!;
}
let (file, line) = *file_line;
unsafe { panic_impl(fmt, file, line) }
}
|
panic
|
identifier_name
|
panicking.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Panic support for libcore
//!
//! The core library cannot define panicking, but it does *declare* panicking. This
//! means that the functions inside of libcore are allowed to panic, but to be
//! useful an upstream crate must define panicking for libcore to use. The current
//! interface for panicking is:
//!
//! ```ignore
//! fn panic_impl(fmt: fmt::Arguments, &(&'static str, u32)) ->!;
//! ```
//!
//! This definition allows for panicking with any general message, but it does not
//! allow for failing with a `Box<Any>` value. The reason for this is that libcore
//! is not allowed to allocate.
//!
//! This module contains a few other panicking functions, but these are just the
//! necessary lang items for the compiler. All panics are funneled through this
//! one function. Currently, the actual symbol is declared in the standard
//! library, but the location of this may change over time.
|
#![allow(dead_code, missing_docs)]
use fmt;
#[cold] #[inline(never)] // this is the slow path, always
#[lang="panic"]
pub fn panic(expr_file_line: &(&'static str, &'static str, u32)) ->! {
// Use Arguments::new_v1 instead of format_args!("{}", expr) to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_v1 may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
let (expr, file, line) = *expr_file_line;
panic_fmt(fmt::Arguments::new_v1(&[expr], &[]), &(file, line))
}
#[cold] #[inline(never)]
#[lang="panic_bounds_check"]
fn panic_bounds_check(file_line: &(&'static str, u32),
index: usize, len: usize) ->! {
panic_fmt(format_args!("index out of bounds: the len is {} but the index is {}",
len, index), file_line)
}
#[cold] #[inline(never)]
pub fn panic_fmt(fmt: fmt::Arguments, file_line: &(&'static str, u32)) ->! {
#[allow(improper_ctypes)]
extern {
#[lang = "panic_fmt"]
fn panic_impl(fmt: fmt::Arguments, file: &'static str, line: u32) ->!;
}
let (file, line) = *file_line;
unsafe { panic_impl(fmt, file, line) }
}
|
random_line_split
|
|
panicking.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Panic support for libcore
//!
//! The core library cannot define panicking, but it does *declare* panicking. This
//! means that the functions inside of libcore are allowed to panic, but to be
//! useful an upstream crate must define panicking for libcore to use. The current
//! interface for panicking is:
//!
//! ```ignore
//! fn panic_impl(fmt: fmt::Arguments, &(&'static str, u32)) ->!;
//! ```
//!
//! This definition allows for panicking with any general message, but it does not
//! allow for failing with a `Box<Any>` value. The reason for this is that libcore
//! is not allowed to allocate.
//!
//! This module contains a few other panicking functions, but these are just the
//! necessary lang items for the compiler. All panics are funneled through this
//! one function. Currently, the actual symbol is declared in the standard
//! library, but the location of this may change over time.
#![allow(dead_code, missing_docs)]
use fmt;
#[cold] #[inline(never)] // this is the slow path, always
#[lang="panic"]
pub fn panic(expr_file_line: &(&'static str, &'static str, u32)) ->! {
// Use Arguments::new_v1 instead of format_args!("{}", expr) to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_v1 may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
let (expr, file, line) = *expr_file_line;
panic_fmt(fmt::Arguments::new_v1(&[expr], &[]), &(file, line))
}
#[cold] #[inline(never)]
#[lang="panic_bounds_check"]
fn panic_bounds_check(file_line: &(&'static str, u32),
index: usize, len: usize) ->! {
panic_fmt(format_args!("index out of bounds: the len is {} but the index is {}",
len, index), file_line)
}
#[cold] #[inline(never)]
pub fn panic_fmt(fmt: fmt::Arguments, file_line: &(&'static str, u32)) ->!
|
{
#[allow(improper_ctypes)]
extern {
#[lang = "panic_fmt"]
fn panic_impl(fmt: fmt::Arguments, file: &'static str, line: u32) -> !;
}
let (file, line) = *file_line;
unsafe { panic_impl(fmt, file, line) }
}
|
identifier_body
|
|
swap-overlapping.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #5041 - avoid overlapping memcpy when src and dest of a swap are the same
use std::ptr;
pub fn main() {
let mut test = TestDescAndFn {
desc: TestDesc {
name: DynTestName("test".to_strbuf()),
should_fail: false
},
testfn: DynTestFn(proc() ()),
};
do_swap(&mut test);
}
fn do_swap(test: &mut TestDescAndFn) {
unsafe {
ptr::swap(test, test);
}
}
pub enum TestName {
DynTestName(StrBuf)
}
pub enum TestFn {
DynTestFn(proc()),
DynBenchFn(proc(&mut int))
}
pub struct TestDesc {
name: TestName,
should_fail: bool
}
pub struct
|
{
desc: TestDesc,
testfn: TestFn,
}
|
TestDescAndFn
|
identifier_name
|
swap-overlapping.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Issue #5041 - avoid overlapping memcpy when src and dest of a swap are the same
use std::ptr;
pub fn main() {
let mut test = TestDescAndFn {
desc: TestDesc {
name: DynTestName("test".to_strbuf()),
should_fail: false
},
testfn: DynTestFn(proc() ()),
};
do_swap(&mut test);
}
fn do_swap(test: &mut TestDescAndFn) {
unsafe {
ptr::swap(test, test);
}
}
pub enum TestName {
DynTestName(StrBuf)
}
pub enum TestFn {
DynTestFn(proc()),
DynBenchFn(proc(&mut int))
}
pub struct TestDesc {
name: TestName,
should_fail: bool
}
pub struct TestDescAndFn {
desc: TestDesc,
testfn: TestFn,
}
|
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
random_line_split
|
input.rs
|
use engine::components::{Movable, Move};
use engine::resources::{ReplayMode, Skip};
use engine::ActionInput;
use specs::prelude::*;
use specs::world::Index;
use scaii_defs::protos::Action as ScaiiAction;
#[derive(SystemData)]
pub struct InputSystemData<'a> {
movable: ReadStorage<'a, Movable>,
input: FetchMut<'a, ActionInput>,
ids: Entities<'a>,
is_replay: Fetch<'a, ReplayMode>,
skip: FetchMut<'a, Skip>,
moves: WriteStorage<'a, Move>,
}
#[derive(Default)]
pub struct InputSystem {}
impl InputSystem {
pub fn
|
() -> Self {
InputSystem {}
}
}
impl<'a> System<'a> for InputSystem {
type SystemData = InputSystemData<'a>;
fn run(&mut self, mut sys_data: Self::SystemData) {
use engine::components::{MoveBehavior, MoveTarget};
use std::mem;
let actions = mem::replace(&mut sys_data.input.0, None);
let actions = if actions.is_some() {
let (actions, skip, skip_lua) = to_action_list(actions.unwrap());
// ignore skipping for replays
if!sys_data.is_replay.0 {
*sys_data.skip = Skip(skip, skip_lua);
}
actions
} else {
return;
};
for action in actions {
let entity = sys_data.ids.entity(action.unit_id);
// Maybe set an error state later?
if!sys_data.ids.is_alive(entity) {
continue;
}
if!sys_data.movable.get(entity).is_some() {
continue;
}
let move_order = match action.action {
ActionTarget::Attack(tar_id) => {
let target = sys_data.ids.entity(tar_id);
if!sys_data.ids.is_alive(target) {
continue;
}
Move {
behavior: MoveBehavior::Straight,
target: MoveTarget::AttackUnit(target),
}
}
};
sys_data.moves.insert(entity, move_order);
}
// for (pos, moves, id) in (&mut sys_data.positions, &sys_data.moves, &*sys_data.ids).join() {}
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
struct Action {
unit_id: Index,
action: ActionTarget,
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum ActionTarget {
Attack(Index),
}
fn to_action_list(raw: ScaiiAction) -> (Vec<Action>, bool, Option<String>) {
use prost::Message;
use protos::unit_action::Action as RtsAction;
use protos::{ActionList, AttackUnit};
if raw.alternate_actions.is_none() {
return Default::default();
}
let action: ActionList =
ActionList::decode(raw.alternate_actions.unwrap()).expect("Could parse inner message");
let actions = action
.actions
.into_iter()
.map(|a| Action {
unit_id: a.unit_id as Index,
action: match a.action.expect("Expected an action descriptor") {
RtsAction::AttackUnit(AttackUnit { target_id }) => {
ActionTarget::Attack(target_id as Index)
}
_ => unimplemented!(), // whats this line do
},
})
.collect();
(actions, action.skip.unwrap_or_default(), action.skip_lua)
}
#[cfg(test)]
mod tests {
use engine::components::{Movable, Move};
use engine::ActionInput;
use specs::prelude::*;
use scaii_defs::protos::Action as ScaiiAction;
use super::*;
#[test]
fn input() {
use engine::components::{MoveBehavior, MoveTarget};
use engine::{components, resources};
use prost::Message;
use protos::unit_action::Action;
use protos::{ActionList, AttackUnit, UnitAction};
let mut world = World::new();
components::register_world_components(&mut world);
resources::register_world_resources(&mut world);
let test_player = world.create_entity().with(Movable(0)).build();
let test_target = world.create_entity().build();
let actions = ActionList {
actions: vec![UnitAction {
unit_id: test_player.id().into(),
action: Some(Action::AttackUnit(AttackUnit {
target_id: test_target.id(),
})),
}],
..Default::default()
};
let mut buf = Vec::new();
actions.encode(&mut buf).unwrap();
world.write_resource::<ActionInput>().0 = Some(
ScaiiAction {
alternate_actions: Some(buf),
..Default::default()
}
.clone(),
);
let mut sys: Dispatcher = DispatcherBuilder::new()
.add(InputSystem::new(), "input", &[])
.build();
sys.dispatch(&mut world.res);
let moves = world.read::<Move>();
assert!(moves.get(test_player).unwrap().target == MoveTarget::AttackUnit(test_target)); // Verifies that test_player's target is test target
assert!(moves.get(test_player).unwrap().behavior == MoveBehavior::Straight); // Verifies that test_player's move behavior is straight
}
}
|
new
|
identifier_name
|
input.rs
|
use engine::components::{Movable, Move};
use engine::resources::{ReplayMode, Skip};
use engine::ActionInput;
use specs::prelude::*;
use specs::world::Index;
use scaii_defs::protos::Action as ScaiiAction;
#[derive(SystemData)]
pub struct InputSystemData<'a> {
movable: ReadStorage<'a, Movable>,
input: FetchMut<'a, ActionInput>,
ids: Entities<'a>,
is_replay: Fetch<'a, ReplayMode>,
skip: FetchMut<'a, Skip>,
moves: WriteStorage<'a, Move>,
}
#[derive(Default)]
pub struct InputSystem {}
impl InputSystem {
pub fn new() -> Self
|
}
impl<'a> System<'a> for InputSystem {
type SystemData = InputSystemData<'a>;
fn run(&mut self, mut sys_data: Self::SystemData) {
use engine::components::{MoveBehavior, MoveTarget};
use std::mem;
let actions = mem::replace(&mut sys_data.input.0, None);
let actions = if actions.is_some() {
let (actions, skip, skip_lua) = to_action_list(actions.unwrap());
// ignore skipping for replays
if!sys_data.is_replay.0 {
*sys_data.skip = Skip(skip, skip_lua);
}
actions
} else {
return;
};
for action in actions {
let entity = sys_data.ids.entity(action.unit_id);
// Maybe set an error state later?
if!sys_data.ids.is_alive(entity) {
continue;
}
if!sys_data.movable.get(entity).is_some() {
continue;
}
let move_order = match action.action {
ActionTarget::Attack(tar_id) => {
let target = sys_data.ids.entity(tar_id);
if!sys_data.ids.is_alive(target) {
continue;
}
Move {
behavior: MoveBehavior::Straight,
target: MoveTarget::AttackUnit(target),
}
}
};
sys_data.moves.insert(entity, move_order);
}
// for (pos, moves, id) in (&mut sys_data.positions, &sys_data.moves, &*sys_data.ids).join() {}
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
struct Action {
unit_id: Index,
action: ActionTarget,
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum ActionTarget {
Attack(Index),
}
fn to_action_list(raw: ScaiiAction) -> (Vec<Action>, bool, Option<String>) {
use prost::Message;
use protos::unit_action::Action as RtsAction;
use protos::{ActionList, AttackUnit};
if raw.alternate_actions.is_none() {
return Default::default();
}
let action: ActionList =
ActionList::decode(raw.alternate_actions.unwrap()).expect("Could parse inner message");
let actions = action
.actions
.into_iter()
.map(|a| Action {
unit_id: a.unit_id as Index,
action: match a.action.expect("Expected an action descriptor") {
RtsAction::AttackUnit(AttackUnit { target_id }) => {
ActionTarget::Attack(target_id as Index)
}
_ => unimplemented!(), // whats this line do
},
})
.collect();
(actions, action.skip.unwrap_or_default(), action.skip_lua)
}
#[cfg(test)]
mod tests {
use engine::components::{Movable, Move};
use engine::ActionInput;
use specs::prelude::*;
use scaii_defs::protos::Action as ScaiiAction;
use super::*;
#[test]
fn input() {
use engine::components::{MoveBehavior, MoveTarget};
use engine::{components, resources};
use prost::Message;
use protos::unit_action::Action;
use protos::{ActionList, AttackUnit, UnitAction};
let mut world = World::new();
components::register_world_components(&mut world);
resources::register_world_resources(&mut world);
let test_player = world.create_entity().with(Movable(0)).build();
let test_target = world.create_entity().build();
let actions = ActionList {
actions: vec![UnitAction {
unit_id: test_player.id().into(),
action: Some(Action::AttackUnit(AttackUnit {
target_id: test_target.id(),
})),
}],
..Default::default()
};
let mut buf = Vec::new();
actions.encode(&mut buf).unwrap();
world.write_resource::<ActionInput>().0 = Some(
ScaiiAction {
alternate_actions: Some(buf),
..Default::default()
}
.clone(),
);
let mut sys: Dispatcher = DispatcherBuilder::new()
.add(InputSystem::new(), "input", &[])
.build();
sys.dispatch(&mut world.res);
let moves = world.read::<Move>();
assert!(moves.get(test_player).unwrap().target == MoveTarget::AttackUnit(test_target)); // Verifies that test_player's target is test target
assert!(moves.get(test_player).unwrap().behavior == MoveBehavior::Straight); // Verifies that test_player's move behavior is straight
}
}
|
{
InputSystem {}
}
|
identifier_body
|
input.rs
|
use engine::components::{Movable, Move};
use engine::resources::{ReplayMode, Skip};
use engine::ActionInput;
use specs::prelude::*;
use specs::world::Index;
use scaii_defs::protos::Action as ScaiiAction;
#[derive(SystemData)]
pub struct InputSystemData<'a> {
movable: ReadStorage<'a, Movable>,
input: FetchMut<'a, ActionInput>,
ids: Entities<'a>,
is_replay: Fetch<'a, ReplayMode>,
skip: FetchMut<'a, Skip>,
moves: WriteStorage<'a, Move>,
}
#[derive(Default)]
pub struct InputSystem {}
impl InputSystem {
|
InputSystem {}
}
}
impl<'a> System<'a> for InputSystem {
type SystemData = InputSystemData<'a>;
fn run(&mut self, mut sys_data: Self::SystemData) {
use engine::components::{MoveBehavior, MoveTarget};
use std::mem;
let actions = mem::replace(&mut sys_data.input.0, None);
let actions = if actions.is_some() {
let (actions, skip, skip_lua) = to_action_list(actions.unwrap());
// ignore skipping for replays
if!sys_data.is_replay.0 {
*sys_data.skip = Skip(skip, skip_lua);
}
actions
} else {
return;
};
for action in actions {
let entity = sys_data.ids.entity(action.unit_id);
// Maybe set an error state later?
if!sys_data.ids.is_alive(entity) {
continue;
}
if!sys_data.movable.get(entity).is_some() {
continue;
}
let move_order = match action.action {
ActionTarget::Attack(tar_id) => {
let target = sys_data.ids.entity(tar_id);
if!sys_data.ids.is_alive(target) {
continue;
}
Move {
behavior: MoveBehavior::Straight,
target: MoveTarget::AttackUnit(target),
}
}
};
sys_data.moves.insert(entity, move_order);
}
// for (pos, moves, id) in (&mut sys_data.positions, &sys_data.moves, &*sys_data.ids).join() {}
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
struct Action {
unit_id: Index,
action: ActionTarget,
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum ActionTarget {
Attack(Index),
}
fn to_action_list(raw: ScaiiAction) -> (Vec<Action>, bool, Option<String>) {
use prost::Message;
use protos::unit_action::Action as RtsAction;
use protos::{ActionList, AttackUnit};
if raw.alternate_actions.is_none() {
return Default::default();
}
let action: ActionList =
ActionList::decode(raw.alternate_actions.unwrap()).expect("Could parse inner message");
let actions = action
.actions
.into_iter()
.map(|a| Action {
unit_id: a.unit_id as Index,
action: match a.action.expect("Expected an action descriptor") {
RtsAction::AttackUnit(AttackUnit { target_id }) => {
ActionTarget::Attack(target_id as Index)
}
_ => unimplemented!(), // whats this line do
},
})
.collect();
(actions, action.skip.unwrap_or_default(), action.skip_lua)
}
#[cfg(test)]
mod tests {
use engine::components::{Movable, Move};
use engine::ActionInput;
use specs::prelude::*;
use scaii_defs::protos::Action as ScaiiAction;
use super::*;
#[test]
fn input() {
use engine::components::{MoveBehavior, MoveTarget};
use engine::{components, resources};
use prost::Message;
use protos::unit_action::Action;
use protos::{ActionList, AttackUnit, UnitAction};
let mut world = World::new();
components::register_world_components(&mut world);
resources::register_world_resources(&mut world);
let test_player = world.create_entity().with(Movable(0)).build();
let test_target = world.create_entity().build();
let actions = ActionList {
actions: vec![UnitAction {
unit_id: test_player.id().into(),
action: Some(Action::AttackUnit(AttackUnit {
target_id: test_target.id(),
})),
}],
..Default::default()
};
let mut buf = Vec::new();
actions.encode(&mut buf).unwrap();
world.write_resource::<ActionInput>().0 = Some(
ScaiiAction {
alternate_actions: Some(buf),
..Default::default()
}
.clone(),
);
let mut sys: Dispatcher = DispatcherBuilder::new()
.add(InputSystem::new(), "input", &[])
.build();
sys.dispatch(&mut world.res);
let moves = world.read::<Move>();
assert!(moves.get(test_player).unwrap().target == MoveTarget::AttackUnit(test_target)); // Verifies that test_player's target is test target
assert!(moves.get(test_player).unwrap().behavior == MoveBehavior::Straight); // Verifies that test_player's move behavior is straight
}
}
|
pub fn new() -> Self {
|
random_line_split
|
input.rs
|
use engine::components::{Movable, Move};
use engine::resources::{ReplayMode, Skip};
use engine::ActionInput;
use specs::prelude::*;
use specs::world::Index;
use scaii_defs::protos::Action as ScaiiAction;
#[derive(SystemData)]
pub struct InputSystemData<'a> {
movable: ReadStorage<'a, Movable>,
input: FetchMut<'a, ActionInput>,
ids: Entities<'a>,
is_replay: Fetch<'a, ReplayMode>,
skip: FetchMut<'a, Skip>,
moves: WriteStorage<'a, Move>,
}
#[derive(Default)]
pub struct InputSystem {}
impl InputSystem {
pub fn new() -> Self {
InputSystem {}
}
}
impl<'a> System<'a> for InputSystem {
type SystemData = InputSystemData<'a>;
fn run(&mut self, mut sys_data: Self::SystemData) {
use engine::components::{MoveBehavior, MoveTarget};
use std::mem;
let actions = mem::replace(&mut sys_data.input.0, None);
let actions = if actions.is_some() {
let (actions, skip, skip_lua) = to_action_list(actions.unwrap());
// ignore skipping for replays
if!sys_data.is_replay.0 {
*sys_data.skip = Skip(skip, skip_lua);
}
actions
} else
|
;
for action in actions {
let entity = sys_data.ids.entity(action.unit_id);
// Maybe set an error state later?
if!sys_data.ids.is_alive(entity) {
continue;
}
if!sys_data.movable.get(entity).is_some() {
continue;
}
let move_order = match action.action {
ActionTarget::Attack(tar_id) => {
let target = sys_data.ids.entity(tar_id);
if!sys_data.ids.is_alive(target) {
continue;
}
Move {
behavior: MoveBehavior::Straight,
target: MoveTarget::AttackUnit(target),
}
}
};
sys_data.moves.insert(entity, move_order);
}
// for (pos, moves, id) in (&mut sys_data.positions, &sys_data.moves, &*sys_data.ids).join() {}
}
}
#[derive(Debug, Copy, Clone, PartialEq)]
struct Action {
unit_id: Index,
action: ActionTarget,
}
#[derive(Debug, Copy, Clone, PartialEq)]
enum ActionTarget {
Attack(Index),
}
fn to_action_list(raw: ScaiiAction) -> (Vec<Action>, bool, Option<String>) {
use prost::Message;
use protos::unit_action::Action as RtsAction;
use protos::{ActionList, AttackUnit};
if raw.alternate_actions.is_none() {
return Default::default();
}
let action: ActionList =
ActionList::decode(raw.alternate_actions.unwrap()).expect("Could parse inner message");
let actions = action
.actions
.into_iter()
.map(|a| Action {
unit_id: a.unit_id as Index,
action: match a.action.expect("Expected an action descriptor") {
RtsAction::AttackUnit(AttackUnit { target_id }) => {
ActionTarget::Attack(target_id as Index)
}
_ => unimplemented!(), // whats this line do
},
})
.collect();
(actions, action.skip.unwrap_or_default(), action.skip_lua)
}
#[cfg(test)]
mod tests {
use engine::components::{Movable, Move};
use engine::ActionInput;
use specs::prelude::*;
use scaii_defs::protos::Action as ScaiiAction;
use super::*;
#[test]
fn input() {
use engine::components::{MoveBehavior, MoveTarget};
use engine::{components, resources};
use prost::Message;
use protos::unit_action::Action;
use protos::{ActionList, AttackUnit, UnitAction};
let mut world = World::new();
components::register_world_components(&mut world);
resources::register_world_resources(&mut world);
let test_player = world.create_entity().with(Movable(0)).build();
let test_target = world.create_entity().build();
let actions = ActionList {
actions: vec![UnitAction {
unit_id: test_player.id().into(),
action: Some(Action::AttackUnit(AttackUnit {
target_id: test_target.id(),
})),
}],
..Default::default()
};
let mut buf = Vec::new();
actions.encode(&mut buf).unwrap();
world.write_resource::<ActionInput>().0 = Some(
ScaiiAction {
alternate_actions: Some(buf),
..Default::default()
}
.clone(),
);
let mut sys: Dispatcher = DispatcherBuilder::new()
.add(InputSystem::new(), "input", &[])
.build();
sys.dispatch(&mut world.res);
let moves = world.read::<Move>();
assert!(moves.get(test_player).unwrap().target == MoveTarget::AttackUnit(test_target)); // Verifies that test_player's target is test target
assert!(moves.get(test_player).unwrap().behavior == MoveBehavior::Straight); // Verifies that test_player's move behavior is straight
}
}
|
{
return;
}
|
conditional_block
|
scanner.rs
|
use std::io;
use std::io::prelude::*;
use std::sync::{ Arc, Mutex };
use std::sync::mpsc::{ channel, Sender, Receiver };
use std::thread;
use std::error;
// State Function
pub struct
|
<E: error::Error, T>(pub fn(l: &mut Scanner<E, T>)->Result<Option<StateFn<E, T>>, E>);
pub trait StateMachine<E: error::Error, T> {
fn start_state(self)->Result<Option<StateFn<E, T>>, E>;
}
#[derive(Clone, Debug)]
pub struct Pos {
pos: u64,
line: u64,
col: u64
}
// Position
impl Pos {
fn new()->Pos {
Pos {
pos: 0,
line: 0,
col: 0
}
}
fn next(&mut self, c: char) {
self.pos += 1;
match c {
'\n' => {
self.line += 1;
self.col = 0;
},
_ => self.col += 1
}
}
fn back(&mut self, c: char) {
self.pos -= 1;
match c {
// Don't back over newlines.
'\n' => {},
_ => self.col -= 1
}
}
}
#[derive(Debug)]
pub struct Token<T> {
pos: Pos,
typ: T,
val: String
}
pub trait Scanner<E: error::Error, T> {
fn next(&mut self)->Result<char, E>;
fn back(&mut self);
fn emit(&mut self, typ: T);
}
// State Machine
pub struct ReadScanner<R: io::Read+Send, T: Send> {
// Current position
pos: Pos,
// Input character stream
input: Arc<Mutex<io::Chars<R>>>,
// Back buffer
backbuf: Vec<char>,
// Unemitted buffer
buf: String,
// State machine position
state: StateFn<io::CharsError, T>,
// Sending handle to token channel
send: Option<Sender<Token<T>>>,
// Sending state machine thread handle
handle: Option<thread::JoinHandle<Option<io::CharsError>>>
}
impl<R: io::Read+Send+'static, T: Send+'static> ReadScanner<R, T> {
pub fn new<S: StateMachine<io::CharsError, T>>(reader: R, sstate: S)->ReadScanner<R, T> {
let l = ReadScanner {
pos: Pos::new(),
input: Arc::new(Mutex::new(reader.chars())),
buf: String::new(),
backbuf: Vec::<char>::new(),
state: sstate.start_state().unwrap().unwrap(),
send: None,
handle: None,
};
l
}
pub fn spawn(mut self)->Receiver<Token<T>> {
let (tx, rx) = channel();
self.send = Some(tx);
self.handle = Some(thread::spawn(move || {
loop {
match (self.state.0)(&mut self) {
Ok(s) => match s {
Some(s) => self.state = s,
None => return None,
},
Err(e) => return Some(e)
}
}
}));
rx
}
}
impl<R: io::Read+Send, T: Send> Scanner<io::CharsError, T> for ReadScanner<R, T> {
fn next(&mut self)->Result<char, io::CharsError> {
let c = if self.backbuf.len() == 0 {
try!(self.input.lock().unwrap().next().unwrap())
} else {
self.backbuf.pop().unwrap()
};
self.buf.push(c); // Push to unemitted buffer
self.pos.next(c); // Update position
Ok(c)
}
fn back(&mut self) {
let c = self.buf.pop().unwrap();
self.backbuf.push(c);
self.pos.back(c);
}
fn emit(&mut self, typ: T) {
let t = self.send.as_ref().unwrap().send(Token{
pos: self.pos.clone(),
typ: typ,
val: self.buf.clone()
}).unwrap();
self.buf = String::new();
t
}
}
|
StateFn
|
identifier_name
|
scanner.rs
|
use std::io;
use std::io::prelude::*;
use std::sync::{ Arc, Mutex };
use std::sync::mpsc::{ channel, Sender, Receiver };
use std::thread;
use std::error;
// State Function
pub struct StateFn<E: error::Error, T>(pub fn(l: &mut Scanner<E, T>)->Result<Option<StateFn<E, T>>, E>);
pub trait StateMachine<E: error::Error, T> {
fn start_state(self)->Result<Option<StateFn<E, T>>, E>;
}
#[derive(Clone, Debug)]
pub struct Pos {
pos: u64,
line: u64,
col: u64
}
// Position
impl Pos {
fn new()->Pos {
Pos {
pos: 0,
line: 0,
col: 0
}
}
fn next(&mut self, c: char) {
self.pos += 1;
match c {
'\n' => {
self.line += 1;
self.col = 0;
},
_ => self.col += 1
}
}
fn back(&mut self, c: char) {
self.pos -= 1;
match c {
// Don't back over newlines.
'\n' => {},
_ => self.col -= 1
}
}
}
#[derive(Debug)]
pub struct Token<T> {
pos: Pos,
typ: T,
val: String
}
pub trait Scanner<E: error::Error, T> {
fn next(&mut self)->Result<char, E>;
fn back(&mut self);
fn emit(&mut self, typ: T);
}
// State Machine
pub struct ReadScanner<R: io::Read+Send, T: Send> {
// Current position
pos: Pos,
// Input character stream
input: Arc<Mutex<io::Chars<R>>>,
// Back buffer
backbuf: Vec<char>,
// Unemitted buffer
buf: String,
// State machine position
state: StateFn<io::CharsError, T>,
// Sending handle to token channel
send: Option<Sender<Token<T>>>,
// Sending state machine thread handle
handle: Option<thread::JoinHandle<Option<io::CharsError>>>
}
impl<R: io::Read+Send+'static, T: Send+'static> ReadScanner<R, T> {
pub fn new<S: StateMachine<io::CharsError, T>>(reader: R, sstate: S)->ReadScanner<R, T> {
let l = ReadScanner {
pos: Pos::new(),
input: Arc::new(Mutex::new(reader.chars())),
buf: String::new(),
backbuf: Vec::<char>::new(),
state: sstate.start_state().unwrap().unwrap(),
send: None,
handle: None,
};
l
}
pub fn spawn(mut self)->Receiver<Token<T>> {
let (tx, rx) = channel();
self.send = Some(tx);
self.handle = Some(thread::spawn(move || {
loop {
match (self.state.0)(&mut self) {
Ok(s) => match s {
Some(s) => self.state = s,
None => return None,
},
Err(e) => return Some(e)
}
}
}));
rx
}
}
impl<R: io::Read+Send, T: Send> Scanner<io::CharsError, T> for ReadScanner<R, T> {
fn next(&mut self)->Result<char, io::CharsError> {
let c = if self.backbuf.len() == 0 {
try!(self.input.lock().unwrap().next().unwrap())
} else
|
;
self.buf.push(c); // Push to unemitted buffer
self.pos.next(c); // Update position
Ok(c)
}
fn back(&mut self) {
let c = self.buf.pop().unwrap();
self.backbuf.push(c);
self.pos.back(c);
}
fn emit(&mut self, typ: T) {
let t = self.send.as_ref().unwrap().send(Token{
pos: self.pos.clone(),
typ: typ,
val: self.buf.clone()
}).unwrap();
self.buf = String::new();
t
}
}
|
{
self.backbuf.pop().unwrap()
}
|
conditional_block
|
scanner.rs
|
use std::io;
use std::io::prelude::*;
use std::sync::{ Arc, Mutex };
use std::sync::mpsc::{ channel, Sender, Receiver };
use std::thread;
use std::error;
// State Function
pub struct StateFn<E: error::Error, T>(pub fn(l: &mut Scanner<E, T>)->Result<Option<StateFn<E, T>>, E>);
pub trait StateMachine<E: error::Error, T> {
fn start_state(self)->Result<Option<StateFn<E, T>>, E>;
}
#[derive(Clone, Debug)]
pub struct Pos {
pos: u64,
line: u64,
col: u64
}
// Position
impl Pos {
fn new()->Pos {
Pos {
pos: 0,
line: 0,
col: 0
}
}
fn next(&mut self, c: char) {
self.pos += 1;
match c {
'\n' => {
self.line += 1;
self.col = 0;
},
_ => self.col += 1
}
}
fn back(&mut self, c: char) {
self.pos -= 1;
match c {
// Don't back over newlines.
'\n' => {},
_ => self.col -= 1
}
}
}
#[derive(Debug)]
pub struct Token<T> {
pos: Pos,
typ: T,
val: String
}
pub trait Scanner<E: error::Error, T> {
|
fn emit(&mut self, typ: T);
}
// State Machine
pub struct ReadScanner<R: io::Read+Send, T: Send> {
// Current position
pos: Pos,
// Input character stream
input: Arc<Mutex<io::Chars<R>>>,
// Back buffer
backbuf: Vec<char>,
// Unemitted buffer
buf: String,
// State machine position
state: StateFn<io::CharsError, T>,
// Sending handle to token channel
send: Option<Sender<Token<T>>>,
// Sending state machine thread handle
handle: Option<thread::JoinHandle<Option<io::CharsError>>>
}
impl<R: io::Read+Send+'static, T: Send+'static> ReadScanner<R, T> {
pub fn new<S: StateMachine<io::CharsError, T>>(reader: R, sstate: S)->ReadScanner<R, T> {
let l = ReadScanner {
pos: Pos::new(),
input: Arc::new(Mutex::new(reader.chars())),
buf: String::new(),
backbuf: Vec::<char>::new(),
state: sstate.start_state().unwrap().unwrap(),
send: None,
handle: None,
};
l
}
pub fn spawn(mut self)->Receiver<Token<T>> {
let (tx, rx) = channel();
self.send = Some(tx);
self.handle = Some(thread::spawn(move || {
loop {
match (self.state.0)(&mut self) {
Ok(s) => match s {
Some(s) => self.state = s,
None => return None,
},
Err(e) => return Some(e)
}
}
}));
rx
}
}
impl<R: io::Read+Send, T: Send> Scanner<io::CharsError, T> for ReadScanner<R, T> {
fn next(&mut self)->Result<char, io::CharsError> {
let c = if self.backbuf.len() == 0 {
try!(self.input.lock().unwrap().next().unwrap())
} else {
self.backbuf.pop().unwrap()
};
self.buf.push(c); // Push to unemitted buffer
self.pos.next(c); // Update position
Ok(c)
}
fn back(&mut self) {
let c = self.buf.pop().unwrap();
self.backbuf.push(c);
self.pos.back(c);
}
fn emit(&mut self, typ: T) {
let t = self.send.as_ref().unwrap().send(Token{
pos: self.pos.clone(),
typ: typ,
val: self.buf.clone()
}).unwrap();
self.buf = String::new();
t
}
}
|
fn next(&mut self)->Result<char, E>;
fn back(&mut self);
|
random_line_split
|
scanner.rs
|
use std::io;
use std::io::prelude::*;
use std::sync::{ Arc, Mutex };
use std::sync::mpsc::{ channel, Sender, Receiver };
use std::thread;
use std::error;
// State Function
pub struct StateFn<E: error::Error, T>(pub fn(l: &mut Scanner<E, T>)->Result<Option<StateFn<E, T>>, E>);
pub trait StateMachine<E: error::Error, T> {
fn start_state(self)->Result<Option<StateFn<E, T>>, E>;
}
#[derive(Clone, Debug)]
pub struct Pos {
pos: u64,
line: u64,
col: u64
}
// Position
impl Pos {
fn new()->Pos
|
fn next(&mut self, c: char) {
self.pos += 1;
match c {
'\n' => {
self.line += 1;
self.col = 0;
},
_ => self.col += 1
}
}
fn back(&mut self, c: char) {
self.pos -= 1;
match c {
// Don't back over newlines.
'\n' => {},
_ => self.col -= 1
}
}
}
#[derive(Debug)]
pub struct Token<T> {
pos: Pos,
typ: T,
val: String
}
pub trait Scanner<E: error::Error, T> {
fn next(&mut self)->Result<char, E>;
fn back(&mut self);
fn emit(&mut self, typ: T);
}
// State Machine
pub struct ReadScanner<R: io::Read+Send, T: Send> {
// Current position
pos: Pos,
// Input character stream
input: Arc<Mutex<io::Chars<R>>>,
// Back buffer
backbuf: Vec<char>,
// Unemitted buffer
buf: String,
// State machine position
state: StateFn<io::CharsError, T>,
// Sending handle to token channel
send: Option<Sender<Token<T>>>,
// Sending state machine thread handle
handle: Option<thread::JoinHandle<Option<io::CharsError>>>
}
impl<R: io::Read+Send+'static, T: Send+'static> ReadScanner<R, T> {
pub fn new<S: StateMachine<io::CharsError, T>>(reader: R, sstate: S)->ReadScanner<R, T> {
let l = ReadScanner {
pos: Pos::new(),
input: Arc::new(Mutex::new(reader.chars())),
buf: String::new(),
backbuf: Vec::<char>::new(),
state: sstate.start_state().unwrap().unwrap(),
send: None,
handle: None,
};
l
}
pub fn spawn(mut self)->Receiver<Token<T>> {
let (tx, rx) = channel();
self.send = Some(tx);
self.handle = Some(thread::spawn(move || {
loop {
match (self.state.0)(&mut self) {
Ok(s) => match s {
Some(s) => self.state = s,
None => return None,
},
Err(e) => return Some(e)
}
}
}));
rx
}
}
impl<R: io::Read+Send, T: Send> Scanner<io::CharsError, T> for ReadScanner<R, T> {
fn next(&mut self)->Result<char, io::CharsError> {
let c = if self.backbuf.len() == 0 {
try!(self.input.lock().unwrap().next().unwrap())
} else {
self.backbuf.pop().unwrap()
};
self.buf.push(c); // Push to unemitted buffer
self.pos.next(c); // Update position
Ok(c)
}
fn back(&mut self) {
let c = self.buf.pop().unwrap();
self.backbuf.push(c);
self.pos.back(c);
}
fn emit(&mut self, typ: T) {
let t = self.send.as_ref().unwrap().send(Token{
pos: self.pos.clone(),
typ: typ,
val: self.buf.clone()
}).unwrap();
self.buf = String::new();
t
}
}
|
{
Pos {
pos: 0,
line: 0,
col: 0
}
}
|
identifier_body
|
ipc.rs
|
//! IPC module will hold all massive (that is why IPC) internal messages
//! which occur due to data collection, its start and its end.
use super::{
audio_info::{AudioInfo, AudioInfoKey},
IFInternalCollectionOutputData,
};
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct IFCollectionOutputData {
pub nr_searched_files: u32,
pub nr_found_songs: u32,
pub nr_internal_duplicates: u32,
pub size_of_data_in_kb: usize,
}
impl IFCollectionOutputData {
pub fn from(internal: &IFInternalCollectionOutputData) -> Self {
Self {
nr_searched_files: internal.nr_searched_files,
nr_found_songs: internal.nr_found_songs,
nr_internal_duplicates: internal.nr_internal_duplicates,
size_of_data_in_kb: 0,
}
|
#[derive(Serialize, Deserialize, Debug)]
pub enum IPC {
DoneSearching(IFCollectionOutputData),
PublishSingleAudioDataRecord(AudioInfoKey, AudioInfo),
}
|
}
}
|
random_line_split
|
ipc.rs
|
//! IPC module will hold all massive (that is why IPC) internal messages
//! which occur due to data collection, its start and its end.
use super::{
audio_info::{AudioInfo, AudioInfoKey},
IFInternalCollectionOutputData,
};
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct IFCollectionOutputData {
pub nr_searched_files: u32,
pub nr_found_songs: u32,
pub nr_internal_duplicates: u32,
pub size_of_data_in_kb: usize,
}
impl IFCollectionOutputData {
pub fn from(internal: &IFInternalCollectionOutputData) -> Self {
Self {
nr_searched_files: internal.nr_searched_files,
nr_found_songs: internal.nr_found_songs,
nr_internal_duplicates: internal.nr_internal_duplicates,
size_of_data_in_kb: 0,
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub enum
|
{
DoneSearching(IFCollectionOutputData),
PublishSingleAudioDataRecord(AudioInfoKey, AudioInfo),
}
|
IPC
|
identifier_name
|
ipc.rs
|
//! IPC module will hold all massive (that is why IPC) internal messages
//! which occur due to data collection, its start and its end.
use super::{
audio_info::{AudioInfo, AudioInfoKey},
IFInternalCollectionOutputData,
};
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct IFCollectionOutputData {
pub nr_searched_files: u32,
pub nr_found_songs: u32,
pub nr_internal_duplicates: u32,
pub size_of_data_in_kb: usize,
}
impl IFCollectionOutputData {
pub fn from(internal: &IFInternalCollectionOutputData) -> Self
|
}
#[derive(Serialize, Deserialize, Debug)]
pub enum IPC {
DoneSearching(IFCollectionOutputData),
PublishSingleAudioDataRecord(AudioInfoKey, AudioInfo),
}
|
{
Self {
nr_searched_files: internal.nr_searched_files,
nr_found_songs: internal.nr_found_songs,
nr_internal_duplicates: internal.nr_internal_duplicates,
size_of_data_in_kb: 0,
}
}
|
identifier_body
|
weighted_random_bools.rs
|
fn weighted_random_bools_helper(
p_numerator: u64,
p_denominator: u64,
expected_values: &[bool],
expected_common_values: &[(bool, usize)],
expected_median: (bool, Option<bool>),
) {
let xs = weighted_random_bools(EXAMPLE_SEED, p_numerator, p_denominator);
let values = xs.clone().take(20).collect_vec();
let common_values = common_values_map(1000000, 10, xs.clone());
let median = median(xs.take(1000000));
assert_eq!(
(values.as_slice(), common_values.as_slice(), median),
(expected_values, expected_common_values, expected_median)
);
}
#[test]
fn test_weighted_random_bools() {
// p = 0
weighted_random_bools_helper(0, 1, &[false; 20], &[(false, 1000000)], (false, None));
// p = 1
weighted_random_bools_helper(1, 1, &[true; 20], &[(true, 1000000)], (true, None));
// p = 1/2
weighted_random_bools_helper(
1,
2,
&[
false, true, true, true, false, false, false, true, false, false, false, false, true,
false, false, false, false, true, false, true,
],
&[(false, 500473), (true, 499527)],
(false, None),
);
// p = 1/51
weighted_random_bools_helper(
1,
51,
&[
false, false, false, false, false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
],
&[(false, 980406), (true, 19594)],
(false, None),
);
// w = 50/51
weighted_random_bools_helper(
50,
51,
&[
true, true, true, true, true, true, true, true, true, true, true, true, true, true,
true, true, false, true, true, true,
],
&[(true, 980602), (false, 19398)],
(true, None),
);
}
#[test]
#[should_panic]
fn weighted_random_bools_fail_1() {
weighted_random_bools(EXAMPLE_SEED, 1, 0);
}
#[test]
#[should_panic]
fn weighted_random_bools_fail_2() {
weighted_random_bools(EXAMPLE_SEED, 2, 1);
}
|
use itertools::Itertools;
use malachite_base::bools::random::weighted_random_bools;
use malachite_base::random::EXAMPLE_SEED;
use malachite_base_test_util::stats::common_values_map::common_values_map;
use malachite_base_test_util::stats::median;
|
random_line_split
|
|
weighted_random_bools.rs
|
use itertools::Itertools;
use malachite_base::bools::random::weighted_random_bools;
use malachite_base::random::EXAMPLE_SEED;
use malachite_base_test_util::stats::common_values_map::common_values_map;
use malachite_base_test_util::stats::median;
fn
|
(
p_numerator: u64,
p_denominator: u64,
expected_values: &[bool],
expected_common_values: &[(bool, usize)],
expected_median: (bool, Option<bool>),
) {
let xs = weighted_random_bools(EXAMPLE_SEED, p_numerator, p_denominator);
let values = xs.clone().take(20).collect_vec();
let common_values = common_values_map(1000000, 10, xs.clone());
let median = median(xs.take(1000000));
assert_eq!(
(values.as_slice(), common_values.as_slice(), median),
(expected_values, expected_common_values, expected_median)
);
}
#[test]
fn test_weighted_random_bools() {
// p = 0
weighted_random_bools_helper(0, 1, &[false; 20], &[(false, 1000000)], (false, None));
// p = 1
weighted_random_bools_helper(1, 1, &[true; 20], &[(true, 1000000)], (true, None));
// p = 1/2
weighted_random_bools_helper(
1,
2,
&[
false, true, true, true, false, false, false, true, false, false, false, false, true,
false, false, false, false, true, false, true,
],
&[(false, 500473), (true, 499527)],
(false, None),
);
// p = 1/51
weighted_random_bools_helper(
1,
51,
&[
false, false, false, false, false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
],
&[(false, 980406), (true, 19594)],
(false, None),
);
// w = 50/51
weighted_random_bools_helper(
50,
51,
&[
true, true, true, true, true, true, true, true, true, true, true, true, true, true,
true, true, false, true, true, true,
],
&[(true, 980602), (false, 19398)],
(true, None),
);
}
#[test]
#[should_panic]
fn weighted_random_bools_fail_1() {
weighted_random_bools(EXAMPLE_SEED, 1, 0);
}
#[test]
#[should_panic]
fn weighted_random_bools_fail_2() {
weighted_random_bools(EXAMPLE_SEED, 2, 1);
}
|
weighted_random_bools_helper
|
identifier_name
|
weighted_random_bools.rs
|
use itertools::Itertools;
use malachite_base::bools::random::weighted_random_bools;
use malachite_base::random::EXAMPLE_SEED;
use malachite_base_test_util::stats::common_values_map::common_values_map;
use malachite_base_test_util::stats::median;
fn weighted_random_bools_helper(
p_numerator: u64,
p_denominator: u64,
expected_values: &[bool],
expected_common_values: &[(bool, usize)],
expected_median: (bool, Option<bool>),
)
|
#[test]
fn test_weighted_random_bools() {
// p = 0
weighted_random_bools_helper(0, 1, &[false; 20], &[(false, 1000000)], (false, None));
// p = 1
weighted_random_bools_helper(1, 1, &[true; 20], &[(true, 1000000)], (true, None));
// p = 1/2
weighted_random_bools_helper(
1,
2,
&[
false, true, true, true, false, false, false, true, false, false, false, false, true,
false, false, false, false, true, false, true,
],
&[(false, 500473), (true, 499527)],
(false, None),
);
// p = 1/51
weighted_random_bools_helper(
1,
51,
&[
false, false, false, false, false, false, false, false, false, false, false, false,
false, false, false, false, false, false, false, false,
],
&[(false, 980406), (true, 19594)],
(false, None),
);
// w = 50/51
weighted_random_bools_helper(
50,
51,
&[
true, true, true, true, true, true, true, true, true, true, true, true, true, true,
true, true, false, true, true, true,
],
&[(true, 980602), (false, 19398)],
(true, None),
);
}
#[test]
#[should_panic]
fn weighted_random_bools_fail_1() {
weighted_random_bools(EXAMPLE_SEED, 1, 0);
}
#[test]
#[should_panic]
fn weighted_random_bools_fail_2() {
weighted_random_bools(EXAMPLE_SEED, 2, 1);
}
|
{
let xs = weighted_random_bools(EXAMPLE_SEED, p_numerator, p_denominator);
let values = xs.clone().take(20).collect_vec();
let common_values = common_values_map(1000000, 10, xs.clone());
let median = median(xs.take(1000000));
assert_eq!(
(values.as_slice(), common_values.as_slice(), median),
(expected_values, expected_common_values, expected_median)
);
}
|
identifier_body
|
wav.rs
|
use std::char;
use std::error::Error;
use std::fmt::{Debug, Formatter};
use std::fs::File;
use std::io::prelude::*;
use std::mem;
use std::path::Path;
use std::slice;
const RIFF: [u8; 4] = ['R' as u8, 'I' as u8, 'F' as u8, 'F' as u8];
const WAVE: [u8; 4] = ['W' as u8, 'A' as u8, 'V' as u8, 'E' as u8];
const FMT: [u8; 4] = ['f' as u8,'m' as u8, 't' as u8,'' as u8];
const DATA: [u8; 4] = ['d' as u8, 'a' as u8, 't' as u8, 'a' as u8];
#[repr(C)]
pub struct ChunkHeader {
pub id: [u8; 4], //Four bytes: "fmt ", "data", "fact", etc.
pub chunk_size: u32, //Length of header in bytes
}
impl ChunkHeader {
pub fn from_stream(file: &mut File) -> Result<ChunkHeader, ::std::io::Error> {
let mut chunk_header: ChunkHeader = ChunkHeader {
id: [0, 0, 0, 0],
chunk_size: 0,
};
let mut buffer = unsafe {
::std::slice::from_raw_parts_mut::<u8>(
mem::transmute(&mut chunk_header),
mem::size_of::<Self>())
};
try!(file.read(buffer));
Ok(chunk_header)
}
}
|
"ChunkHeader {{ id: \"{}{}{}{}\", chunk_size: {} }}",
char::from_u32(self.id[0] as u32).unwrap(),
char::from_u32(self.id[1] as u32).unwrap(),
char::from_u32(self.id[2] as u32).unwrap(),
char::from_u32(self.id[3] as u32).unwrap(),
self.chunk_size)
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct FormatChunk {
pub format: u16, //1 if uncompressed Microsoft PCM audio
pub channels: u16, //Number of channels
pub frames_per_second: u32, //Frequency of the audio in Hz
pub avg_bytes_per_second: u32, //For estimating RAM allocation
pub bytes_per_frame: u16, //Sample frame size in bytes
// pub bits_per_sample: u32, //Bits per sample // TODO: Handle cases that do have this field.
}
impl FormatChunk {
pub fn new() -> FormatChunk {
FormatChunk {
format: 0,
channels: 0,
frames_per_second: 0,
avg_bytes_per_second: 0,
bytes_per_frame: 0,
}
}
pub fn from_stream(file: &mut File, header: ChunkHeader) -> Result<FormatChunk, ::std::io::Error> {
assert_eq!(header.chunk_size as usize, mem::size_of::<Self>());
let mut chunk: Self = unsafe { mem::uninitialized() };
let mut buffer = unsafe {
let base_ptr: *mut u8 = mem::transmute(&mut chunk);
::std::slice::from_raw_parts_mut::<u8>(
base_ptr,
mem::size_of::<Self>())
};
try!(file.read(buffer));
Ok(chunk)
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct DataChunk {
pub samples: Vec<u16>, //16 bit signed data
}
impl DataChunk {
pub fn new() -> DataChunk {
DataChunk {
samples: Vec::new(),
}
}
pub fn from_stream(file: &mut File, header: ChunkHeader) -> Result<DataChunk, ::std::io::Error> {
const INPUT_BUFFER_SIZE_SAMPLES: usize = 2048;
const INPUT_BUFFER_SIZE_BYTES: usize = INPUT_BUFFER_SIZE_SAMPLES * 2;
// TODO: The header specifies how many bytes are in the data chunk, so we should
// pre-allocate a buffer large enough and read the entire data chunk at once.
let mut samples: Vec<u16> = Vec::new();
let mut byte_buffer: [u8; INPUT_BUFFER_SIZE_BYTES] = [0; INPUT_BUFFER_SIZE_BYTES];
loop {
let bytes_read = file.read(&mut byte_buffer)?;
// Convert the data read from the file from bytes to samples.
let samples_read = bytes_read / 2;
let sample_buffer = unsafe {
slice::from_raw_parts(byte_buffer.as_ptr() as *const u16, INPUT_BUFFER_SIZE_SAMPLES)
};
// Copy data from the buffer to `samples`.
samples.extend(&sample_buffer[0..samples_read]);
// If there are no more bytes to read then we're done.
if bytes_read < INPUT_BUFFER_SIZE_BYTES {
break;
}
}
assert_eq!(samples.len() * 2, header.chunk_size as usize);
Ok(DataChunk {
samples: samples,
})
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct Wave {
pub format: FormatChunk,
pub data: DataChunk,
}
impl Wave {
pub fn new() -> Wave {
Wave {
format: FormatChunk::new(),
data: DataChunk::new(),
}
}
pub fn from_file(path: &str) -> Result<Wave, ::std::io::Error> {
let mut wave = Wave::new();
let file_path = Path::new(path);
let mut file = match File::open(&file_path) {
// The `desc` field of `IoError` is a string that describes the error
Err(why) => panic!("couldn't open {}: {}", file_path.display(), Error::description(&why)),
Ok(file) => file,
};
let file_header = try!(ChunkHeader::from_stream(&mut file));
assert_eq!(file_header.id, RIFF);
let mut riff_type: [u8; 4] = [0, 0, 0, 0];
try!(file.read(&mut riff_type));
assert_eq!(riff_type, WAVE);
try!(wave.fill_chunk(&mut file));
try!(wave.fill_chunk(&mut file));
Ok(wave)
}
fn fill_chunk(&mut self, file: &mut File) -> Result<(), ::std::io::Error> {
let header = try!(ChunkHeader::from_stream(file));
match header.id {
FMT => {
let chunk = try!(FormatChunk::from_stream(file, header));
self.format = chunk;
},
DATA => {
self.data = try!(DataChunk::from_stream(file, header));
},
_ => panic!("unknow chunk header: {:?}", header),
}
Ok(())
}
}
|
impl Debug for ChunkHeader {
fn fmt(&self, f: &mut Formatter) -> Result<(), ::std::fmt::Error> {
write!(f,
|
random_line_split
|
wav.rs
|
use std::char;
use std::error::Error;
use std::fmt::{Debug, Formatter};
use std::fs::File;
use std::io::prelude::*;
use std::mem;
use std::path::Path;
use std::slice;
const RIFF: [u8; 4] = ['R' as u8, 'I' as u8, 'F' as u8, 'F' as u8];
const WAVE: [u8; 4] = ['W' as u8, 'A' as u8, 'V' as u8, 'E' as u8];
const FMT: [u8; 4] = ['f' as u8,'m' as u8, 't' as u8,'' as u8];
const DATA: [u8; 4] = ['d' as u8, 'a' as u8, 't' as u8, 'a' as u8];
#[repr(C)]
pub struct ChunkHeader {
pub id: [u8; 4], //Four bytes: "fmt ", "data", "fact", etc.
pub chunk_size: u32, //Length of header in bytes
}
impl ChunkHeader {
pub fn from_stream(file: &mut File) -> Result<ChunkHeader, ::std::io::Error> {
let mut chunk_header: ChunkHeader = ChunkHeader {
id: [0, 0, 0, 0],
chunk_size: 0,
};
let mut buffer = unsafe {
::std::slice::from_raw_parts_mut::<u8>(
mem::transmute(&mut chunk_header),
mem::size_of::<Self>())
};
try!(file.read(buffer));
Ok(chunk_header)
}
}
impl Debug for ChunkHeader {
fn fmt(&self, f: &mut Formatter) -> Result<(), ::std::fmt::Error> {
write!(f,
"ChunkHeader {{ id: \"{}{}{}{}\", chunk_size: {} }}",
char::from_u32(self.id[0] as u32).unwrap(),
char::from_u32(self.id[1] as u32).unwrap(),
char::from_u32(self.id[2] as u32).unwrap(),
char::from_u32(self.id[3] as u32).unwrap(),
self.chunk_size)
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct FormatChunk {
pub format: u16, //1 if uncompressed Microsoft PCM audio
pub channels: u16, //Number of channels
pub frames_per_second: u32, //Frequency of the audio in Hz
pub avg_bytes_per_second: u32, //For estimating RAM allocation
pub bytes_per_frame: u16, //Sample frame size in bytes
// pub bits_per_sample: u32, //Bits per sample // TODO: Handle cases that do have this field.
}
impl FormatChunk {
pub fn new() -> FormatChunk {
FormatChunk {
format: 0,
channels: 0,
frames_per_second: 0,
avg_bytes_per_second: 0,
bytes_per_frame: 0,
}
}
pub fn
|
(file: &mut File, header: ChunkHeader) -> Result<FormatChunk, ::std::io::Error> {
assert_eq!(header.chunk_size as usize, mem::size_of::<Self>());
let mut chunk: Self = unsafe { mem::uninitialized() };
let mut buffer = unsafe {
let base_ptr: *mut u8 = mem::transmute(&mut chunk);
::std::slice::from_raw_parts_mut::<u8>(
base_ptr,
mem::size_of::<Self>())
};
try!(file.read(buffer));
Ok(chunk)
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct DataChunk {
pub samples: Vec<u16>, //16 bit signed data
}
impl DataChunk {
pub fn new() -> DataChunk {
DataChunk {
samples: Vec::new(),
}
}
pub fn from_stream(file: &mut File, header: ChunkHeader) -> Result<DataChunk, ::std::io::Error> {
const INPUT_BUFFER_SIZE_SAMPLES: usize = 2048;
const INPUT_BUFFER_SIZE_BYTES: usize = INPUT_BUFFER_SIZE_SAMPLES * 2;
// TODO: The header specifies how many bytes are in the data chunk, so we should
// pre-allocate a buffer large enough and read the entire data chunk at once.
let mut samples: Vec<u16> = Vec::new();
let mut byte_buffer: [u8; INPUT_BUFFER_SIZE_BYTES] = [0; INPUT_BUFFER_SIZE_BYTES];
loop {
let bytes_read = file.read(&mut byte_buffer)?;
// Convert the data read from the file from bytes to samples.
let samples_read = bytes_read / 2;
let sample_buffer = unsafe {
slice::from_raw_parts(byte_buffer.as_ptr() as *const u16, INPUT_BUFFER_SIZE_SAMPLES)
};
// Copy data from the buffer to `samples`.
samples.extend(&sample_buffer[0..samples_read]);
// If there are no more bytes to read then we're done.
if bytes_read < INPUT_BUFFER_SIZE_BYTES {
break;
}
}
assert_eq!(samples.len() * 2, header.chunk_size as usize);
Ok(DataChunk {
samples: samples,
})
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct Wave {
pub format: FormatChunk,
pub data: DataChunk,
}
impl Wave {
pub fn new() -> Wave {
Wave {
format: FormatChunk::new(),
data: DataChunk::new(),
}
}
pub fn from_file(path: &str) -> Result<Wave, ::std::io::Error> {
let mut wave = Wave::new();
let file_path = Path::new(path);
let mut file = match File::open(&file_path) {
// The `desc` field of `IoError` is a string that describes the error
Err(why) => panic!("couldn't open {}: {}", file_path.display(), Error::description(&why)),
Ok(file) => file,
};
let file_header = try!(ChunkHeader::from_stream(&mut file));
assert_eq!(file_header.id, RIFF);
let mut riff_type: [u8; 4] = [0, 0, 0, 0];
try!(file.read(&mut riff_type));
assert_eq!(riff_type, WAVE);
try!(wave.fill_chunk(&mut file));
try!(wave.fill_chunk(&mut file));
Ok(wave)
}
fn fill_chunk(&mut self, file: &mut File) -> Result<(), ::std::io::Error> {
let header = try!(ChunkHeader::from_stream(file));
match header.id {
FMT => {
let chunk = try!(FormatChunk::from_stream(file, header));
self.format = chunk;
},
DATA => {
self.data = try!(DataChunk::from_stream(file, header));
},
_ => panic!("unknow chunk header: {:?}", header),
}
Ok(())
}
}
|
from_stream
|
identifier_name
|
wav.rs
|
use std::char;
use std::error::Error;
use std::fmt::{Debug, Formatter};
use std::fs::File;
use std::io::prelude::*;
use std::mem;
use std::path::Path;
use std::slice;
const RIFF: [u8; 4] = ['R' as u8, 'I' as u8, 'F' as u8, 'F' as u8];
const WAVE: [u8; 4] = ['W' as u8, 'A' as u8, 'V' as u8, 'E' as u8];
const FMT: [u8; 4] = ['f' as u8,'m' as u8, 't' as u8,'' as u8];
const DATA: [u8; 4] = ['d' as u8, 'a' as u8, 't' as u8, 'a' as u8];
#[repr(C)]
pub struct ChunkHeader {
pub id: [u8; 4], //Four bytes: "fmt ", "data", "fact", etc.
pub chunk_size: u32, //Length of header in bytes
}
impl ChunkHeader {
pub fn from_stream(file: &mut File) -> Result<ChunkHeader, ::std::io::Error> {
let mut chunk_header: ChunkHeader = ChunkHeader {
id: [0, 0, 0, 0],
chunk_size: 0,
};
let mut buffer = unsafe {
::std::slice::from_raw_parts_mut::<u8>(
mem::transmute(&mut chunk_header),
mem::size_of::<Self>())
};
try!(file.read(buffer));
Ok(chunk_header)
}
}
impl Debug for ChunkHeader {
fn fmt(&self, f: &mut Formatter) -> Result<(), ::std::fmt::Error> {
write!(f,
"ChunkHeader {{ id: \"{}{}{}{}\", chunk_size: {} }}",
char::from_u32(self.id[0] as u32).unwrap(),
char::from_u32(self.id[1] as u32).unwrap(),
char::from_u32(self.id[2] as u32).unwrap(),
char::from_u32(self.id[3] as u32).unwrap(),
self.chunk_size)
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct FormatChunk {
pub format: u16, //1 if uncompressed Microsoft PCM audio
pub channels: u16, //Number of channels
pub frames_per_second: u32, //Frequency of the audio in Hz
pub avg_bytes_per_second: u32, //For estimating RAM allocation
pub bytes_per_frame: u16, //Sample frame size in bytes
// pub bits_per_sample: u32, //Bits per sample // TODO: Handle cases that do have this field.
}
impl FormatChunk {
pub fn new() -> FormatChunk {
FormatChunk {
format: 0,
channels: 0,
frames_per_second: 0,
avg_bytes_per_second: 0,
bytes_per_frame: 0,
}
}
pub fn from_stream(file: &mut File, header: ChunkHeader) -> Result<FormatChunk, ::std::io::Error> {
assert_eq!(header.chunk_size as usize, mem::size_of::<Self>());
let mut chunk: Self = unsafe { mem::uninitialized() };
let mut buffer = unsafe {
let base_ptr: *mut u8 = mem::transmute(&mut chunk);
::std::slice::from_raw_parts_mut::<u8>(
base_ptr,
mem::size_of::<Self>())
};
try!(file.read(buffer));
Ok(chunk)
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct DataChunk {
pub samples: Vec<u16>, //16 bit signed data
}
impl DataChunk {
pub fn new() -> DataChunk {
DataChunk {
samples: Vec::new(),
}
}
pub fn from_stream(file: &mut File, header: ChunkHeader) -> Result<DataChunk, ::std::io::Error> {
const INPUT_BUFFER_SIZE_SAMPLES: usize = 2048;
const INPUT_BUFFER_SIZE_BYTES: usize = INPUT_BUFFER_SIZE_SAMPLES * 2;
// TODO: The header specifies how many bytes are in the data chunk, so we should
// pre-allocate a buffer large enough and read the entire data chunk at once.
let mut samples: Vec<u16> = Vec::new();
let mut byte_buffer: [u8; INPUT_BUFFER_SIZE_BYTES] = [0; INPUT_BUFFER_SIZE_BYTES];
loop {
let bytes_read = file.read(&mut byte_buffer)?;
// Convert the data read from the file from bytes to samples.
let samples_read = bytes_read / 2;
let sample_buffer = unsafe {
slice::from_raw_parts(byte_buffer.as_ptr() as *const u16, INPUT_BUFFER_SIZE_SAMPLES)
};
// Copy data from the buffer to `samples`.
samples.extend(&sample_buffer[0..samples_read]);
// If there are no more bytes to read then we're done.
if bytes_read < INPUT_BUFFER_SIZE_BYTES {
break;
}
}
assert_eq!(samples.len() * 2, header.chunk_size as usize);
Ok(DataChunk {
samples: samples,
})
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct Wave {
pub format: FormatChunk,
pub data: DataChunk,
}
impl Wave {
pub fn new() -> Wave {
Wave {
format: FormatChunk::new(),
data: DataChunk::new(),
}
}
pub fn from_file(path: &str) -> Result<Wave, ::std::io::Error> {
let mut wave = Wave::new();
let file_path = Path::new(path);
let mut file = match File::open(&file_path) {
// The `desc` field of `IoError` is a string that describes the error
Err(why) => panic!("couldn't open {}: {}", file_path.display(), Error::description(&why)),
Ok(file) => file,
};
let file_header = try!(ChunkHeader::from_stream(&mut file));
assert_eq!(file_header.id, RIFF);
let mut riff_type: [u8; 4] = [0, 0, 0, 0];
try!(file.read(&mut riff_type));
assert_eq!(riff_type, WAVE);
try!(wave.fill_chunk(&mut file));
try!(wave.fill_chunk(&mut file));
Ok(wave)
}
fn fill_chunk(&mut self, file: &mut File) -> Result<(), ::std::io::Error> {
let header = try!(ChunkHeader::from_stream(file));
match header.id {
FMT =>
|
,
DATA => {
self.data = try!(DataChunk::from_stream(file, header));
},
_ => panic!("unknow chunk header: {:?}", header),
}
Ok(())
}
}
|
{
let chunk = try!(FormatChunk::from_stream(file, header));
self.format = chunk;
}
|
conditional_block
|
rustc.rs
|
use std::path::Path;
use util::{self, CargoResult, internal, ChainError};
pub struct Rustc {
pub verbose_version: String,
pub host: String,
pub cap_lints: bool,
}
impl Rustc {
/// Run the compiler at `path` to learn varioues pieces of information about
/// it.
///
/// If successful this function returns a description of the compiler along
/// with a list of its capabilities.
pub fn new<P: AsRef<Path>>(path: P) -> CargoResult<Rustc>
|
}));
triple.to_string()
};
Ok(ret)
}
pub fn blank() -> Rustc {
Rustc {
verbose_version: String::new(),
host: String::new(),
cap_lints: false,
}
}
}
|
{
let mut cmd = try!(util::process(path.as_ref()));
cmd.arg("-vV");
let mut ret = Rustc::blank();
let mut first = cmd.clone();
first.arg("--cap-lints").arg("allow");
let output = match first.exec_with_output() {
Ok(output) => { ret.cap_lints = true; output }
Err(..) => try!(cmd.exec_with_output()),
};
ret.verbose_version = try!(String::from_utf8(output.stdout).map_err(|_| {
internal("rustc -v didn't return utf8 output")
}));
ret.host = {
let triple = ret.verbose_version.lines().filter(|l| {
l.starts_with("host: ")
}).map(|l| &l[6..]).next();
let triple = try!(triple.chain_error(|| {
internal("rustc -v didn't have a line for `host:`")
|
identifier_body
|
rustc.rs
|
use std::path::Path;
use util::{self, CargoResult, internal, ChainError};
pub struct Rustc {
pub verbose_version: String,
pub host: String,
pub cap_lints: bool,
}
impl Rustc {
/// Run the compiler at `path` to learn varioues pieces of information about
/// it.
///
/// If successful this function returns a description of the compiler along
/// with a list of its capabilities.
pub fn new<P: AsRef<Path>>(path: P) -> CargoResult<Rustc> {
let mut cmd = try!(util::process(path.as_ref()));
cmd.arg("-vV");
let mut ret = Rustc::blank();
let mut first = cmd.clone();
first.arg("--cap-lints").arg("allow");
let output = match first.exec_with_output() {
Ok(output) => { ret.cap_lints = true; output }
Err(..) => try!(cmd.exec_with_output()),
};
ret.verbose_version = try!(String::from_utf8(output.stdout).map_err(|_| {
internal("rustc -v didn't return utf8 output")
}));
ret.host = {
let triple = ret.verbose_version.lines().filter(|l| {
l.starts_with("host: ")
}).map(|l| &l[6..]).next();
let triple = try!(triple.chain_error(|| {
internal("rustc -v didn't have a line for `host:`")
}));
triple.to_string()
};
Ok(ret)
}
pub fn
|
() -> Rustc {
Rustc {
verbose_version: String::new(),
host: String::new(),
cap_lints: false,
}
}
}
|
blank
|
identifier_name
|
rustc.rs
|
use std::path::Path;
use util::{self, CargoResult, internal, ChainError};
pub struct Rustc {
pub verbose_version: String,
pub host: String,
pub cap_lints: bool,
}
impl Rustc {
/// Run the compiler at `path` to learn varioues pieces of information about
/// it.
///
/// If successful this function returns a description of the compiler along
/// with a list of its capabilities.
pub fn new<P: AsRef<Path>>(path: P) -> CargoResult<Rustc> {
let mut cmd = try!(util::process(path.as_ref()));
cmd.arg("-vV");
let mut ret = Rustc::blank();
let mut first = cmd.clone();
first.arg("--cap-lints").arg("allow");
let output = match first.exec_with_output() {
Ok(output) => { ret.cap_lints = true; output }
Err(..) => try!(cmd.exec_with_output()),
};
ret.verbose_version = try!(String::from_utf8(output.stdout).map_err(|_| {
|
}).map(|l| &l[6..]).next();
let triple = try!(triple.chain_error(|| {
internal("rustc -v didn't have a line for `host:`")
}));
triple.to_string()
};
Ok(ret)
}
pub fn blank() -> Rustc {
Rustc {
verbose_version: String::new(),
host: String::new(),
cap_lints: false,
}
}
}
|
internal("rustc -v didn't return utf8 output")
}));
ret.host = {
let triple = ret.verbose_version.lines().filter(|l| {
l.starts_with("host: ")
|
random_line_split
|
scheduler.rs
|
//! The main scheduler logic.
//!
//! The scheduler is implemented as a singleton in order to make it easy for code anywhere in the
//! project to make use of async functionality. The actual scheduler instance is not publicly
//! accessible, instead we use various standalone functions like `start()` and `wait_for()` to
//! safely manage access to the scheduler.
//!
//! # Scheduling Work
//!
//! Use `scheduler::start()` to run some work asynchronously, getting an `Async<T>` representing the
//! result of the work. Use `Async::await()` to suspend the current fiber until the work completes
//! and get the result. By default, dropping an `Async<T>` will suspend the current fiber until
//! the work finishes, but you can use `Async::forget()` to ignore the result without blocking.
//!
//! # Sharing Data Between Work
//!
//! Unlike with `std::thread::spawn()`, it's possible for work started with `scheduler::start()`
//! to borrow data from the caller:
//!
//! ```
//!
//! ```
use fiber::{self, Fiber, FiberId};
use cell_extras::AtomicInitCell;
use std::boxed::FnBox;
use std::collections::{HashMap, HashSet, VecDeque};
use std::fmt::{self, Debug, Formatter};
use std::marker::PhantomData;
use std::mem;
use std::sync::{Condvar, Mutex, Once, ONCE_INIT};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{self, Receiver};
use stopwatch;
const DEFAULT_STACK_SIZE: usize = 64 * 1024;
static CONDVAR: AtomicInitCell<Condvar> = AtomicInitCell::new();
static INSTANCE: AtomicInitCell<Mutex<Scheduler>> = AtomicInitCell::new();
static INSTANCE_INIT: Once = ONCE_INIT;
static WORK_COUNTER: AtomicUsize = AtomicUsize::new(1);
/// Represents the result of a computation that may finish at some point in the future.
///
/// Use `scheduler::start()` to run some work asynchronously, getting an `Async<T>` representing the
/// result of the work. Use `Async::await()` to suspend the current fiber until the work completes
/// and get the result. By default, dropping an `Async<T>` will suspend the current fiber until
/// the work finishes, but you can use `Async::forget()` to ignore the result without blocking.
///
/// # Sharing Data Across Work
///
/// It's possible to share
#[derive(Debug)]
pub struct Async<'a, T> {
work: WorkId,
receiver: Receiver<T>,
_phantom: PhantomData<&'a FnMut()>,
}
impl<'a, T> Async<'a, T> {
/// Suspend the current fiber until the async operation finishes.
pub fn await(self) -> T {
let result = {
let Async { work, ref receiver,.. } = self;
work.await();
receiver.try_recv().expect("Failed to receive result of async computation")
};
result
}
pub fn work_id(&self) -> WorkId {
self.work
}
}
impl<T> Async<'static, T> {
pub fn forget(self) {
mem::forget(self);
}
}
impl<'a, T> Drop for Async<'a, T> {
fn drop(&mut self) {
self.work.await();
}
}
/// A shareable reference to a work unit, counterpart to `Async<T>`.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct WorkId(usize);
impl WorkId {
/// Suspends the current fiber until this work unit has completed.
///
/// If the work unit has already finished then `await()` will return immediately.
pub fn await(self) {
if Scheduler::with(|scheduler| scheduler.add_dependency(self)) {
suspend();
}
}
}
/// Initializes a newly-spawned worker thread.
///
/// Prepares the worker thread by initializing it for Fiber usage.
// TODO: This should probably only be public within the crate. Only the engine should be using this,
// and only at startup, we probably don't want user code to be spawning threads anyway.
pub fn init_thread() {
// Make sure the scheduler is initialized before first use.
Scheduler::with(|_| {});
// Manually convert current thread into a fiber because reasons.
fiber::init();
}
// TODO: This should probably only be public within the crate. Only the engine should be using this,
// and only at startup, we probably don't want user code to be spawning threads anyway.
pub fn run_wait_fiber() {
// Make sure the scheduler is initialized before first use.
Scheduler::with(|_| {});
// Setup this thread for running fibers and create an initial fiber for it. This will become
// the wait fiber for this thread.
fiber::init();
fiber_routine();
}
pub fn start<'a, F, T>(func: F) -> Async<'a, T>
where
F: FnOnce() -> T,
F: 'a + Send,
T: 'a + Send,
{
// Normally we can't box a closure with a non'static lifetime because it could outlive its
// borrowed data. In this case the lifetime parameter on the returned `Async` ensures that
// the closure can't outlive the borrowed data, so we use this evil magic to convince the
// compiler to allow us to box the closure.
unsafe fn erase_lifetime<'a, F>(func: F) -> Box<FnBox()>
where
F: FnOnce(),
F: 'a + Send,
{
let boxed_proc = Box::new(func);
let proc_ptr = Box::into_raw(boxed_proc) as *mut FnBox();
Box::from_raw(::std::mem::transmute(proc_ptr))
}
// Create the channel that'll be used to send the result of the operation to the `Async` object.
let (sender, receiver) = mpsc::sync_channel(1);
let work_id = WorkId(WORK_COUNTER.fetch_add(1, Ordering::Relaxed));
let work_proc = unsafe {
|
Scheduler::with(move |scheduler| scheduler.schedule_work(Work {
func: work_proc,
id: work_id,
}));
Async {
work: work_id,
receiver: receiver,
_phantom: PhantomData,
}
}
/// Suspends the current fiber and makes the wait fiber active.
///
/// Generally you shouldn't need to call this directly, but if you have one piece of code that
/// runs synchronously for a long time you can use `suspend()` to yield time to other work.
pub fn suspend() {
let next_fiber = Scheduler::with(|scheduler| scheduler.next_fiber());
let suspended = unsafe { next_fiber.resume() };
stopwatch::switch_context(suspended.id(), fiber::current().unwrap());
Scheduler::with(move |scheduler| scheduler.handle_suspended(suspended));
}
fn fiber_routine() ->! {
loop {
match Scheduler::with(|scheduler| scheduler.next()) {
Some(NextWork::Work(Work { func, id })) => {
Scheduler::with(|scheduler| scheduler.start_work(id));
func();
Scheduler::with(|scheduler| scheduler.finish_work(id));
},
Some(NextWork::Fiber(fiber)) => {
let suspended = unsafe { fiber.resume() };
stopwatch::switch_context(suspended.id(), fiber::current().unwrap());
Scheduler::with(move |scheduler| scheduler.handle_suspended(suspended));
},
None => {
// If there's no new work and no fibers ready to run then we want to block the
// thread until some becomes available.
let mutex = INSTANCE.borrow();
let condvar = CONDVAR.borrow();
let _ = condvar
.wait(mutex.lock().expect("Scheduler mutex was poisoned"))
.expect("Scheduler mutex was poisoned");
},
}
}
}
struct Work {
func: Box<FnBox()>,
id: WorkId,
}
impl Debug for Work {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
write!(formatter, "Work {{ id: {:?} }}", self.id)
}
}
enum NextWork {
Work(Work),
Fiber(Fiber),
}
struct Scheduler {
/// Work units that are currently pending or in progress.
current_work: HashSet<WorkId>,
work_map: HashMap<FiberId, WorkId>,
/// New units of work that haven't been started on a fiber yet.
///
/// These are ready to be made active at any time.
new_work: VecDeque<Work>,
/// Fibers that have no pending dependencies.
///
/// These are ready to be made active at any time.
ready_fibers: VecDeque<Fiber>,
/// A map specifying which pending fibers depend on which others.
///
/// Once all of a fiber's dependencies complete it should be moved to `new_work`.
dependencies: HashMap<FiberId, (Option<Fiber>, HashSet<WorkId>)>,
// TODO: Should we distinguise between "finished" and "ready" fibers? My intuition is that we'd
// want to give fibers that actively have work CPU time before we resume fibers that would be
// pulling new work, but maybe not? If we threw them all into one queue I guess the worst case
// scenario would be there's no work left, a bunch of empty fibers, and only a few fibers with
// active work. In which case we might have to cycle through a bunch of fibers before we can
// start doing actual work.
finished: VecDeque<Fiber>,
}
unsafe impl Send for Scheduler {}
impl Scheduler {
/// Provides safe access to the scheduler instance.
///
/// # Fiber Switches
///
/// Note that it is an error to call `Fiber::make_active()` within `func`. Doing so will cause
/// the `Mutex` guard on the instance to never unlock, making the scheduler instance
/// inaccessible. All standalone functions that access the scheduler and wish to switch fibers
/// should use `Scheduler::next()` to return the next fiber from `with()` and then call
/// `make_active()` *after* `with()` has returned.
fn with<F, T>(func: F) -> T
where F: FnOnce(&mut Scheduler) -> T
{
INSTANCE_INIT.call_once(|| {
let scheduler = Scheduler {
current_work: HashSet::new(),
work_map: HashMap::new(),
new_work: VecDeque::new(),
ready_fibers: VecDeque::new(),
dependencies: HashMap::new(),
finished: VecDeque::new(),
};
INSTANCE.init(Mutex::new(scheduler));
CONDVAR.init(Condvar::new());
});
let instance = INSTANCE.borrow();
let mut guard = instance.lock().expect("Scheduler mutex was poisoned");
func(&mut *guard)
}
/// Add a new unit of work to the pending queue.
fn schedule_work(&mut self, work: Work) {
assert!(self.current_work.insert(work.id), "Work's ID was already present in current work set");
self.new_work.push_back(work);
CONDVAR.borrow().notify_one();
}
/// Adds `dependency` as a dependency of the currently running fiber.
///
/// Returns `true` if work is still in progress and was added as a dependency, false otherwise.
fn add_dependency(&mut self, dependency: WorkId) -> bool {
let pending = fiber::current().unwrap();
if self.current_work.contains(&dependency) {
debug_assert!(
!self.dependencies.contains_key(&pending),
"Marking a fiber as pending but it is already pending: {:?}",
pending,
);
// Add `pending` to set of pending fibers and list `dependencies` as dependencies.
let &mut (_, ref mut dependencies_set) =
self.dependencies
.entry(pending)
.or_insert((None, HashSet::new()));
// Add `fibers` to the list of ready fibers.
dependencies_set.insert(dependency);
true
} else {
false
}
}
fn start_work(&mut self, new_work: WorkId) {
debug_assert!(self.current_work.contains(&new_work), "Work ID was not in current work set");
let current = fiber::current().unwrap();
self.work_map.insert(current, new_work);
}
/// Removes the specified unit of work from the scheduler, updating any dependent work.
fn finish_work(&mut self, finished_work: WorkId) {
// Iterate over all suspended work units, removing `finished_work` as a dependency where
// necessary. If any of the work units no longer have dependencies then
let mut ready = Vec::new();
for (&pending_fiber, &mut (_, ref mut dependencies)) in &mut self.dependencies {
dependencies.remove(&finished_work);
if dependencies.len() == 0 {
ready.push(pending_fiber);
}
}
for ready_work in ready {
let (maybe_fiber, _) = self.dependencies.remove(&ready_work).unwrap();
if let Some(ready_fiber) = maybe_fiber {
self.ready_fibers.push_back(ready_fiber);
CONDVAR.borrow().notify_one();
}
}
let fiber = fiber::current().unwrap();
assert!(self.current_work.remove(&finished_work), "{:?} wasn't in current work set when it finished", finished_work);
assert!(self.work_map.remove(&fiber).is_some(), "{:?} didn't have {:?} associated in the work map", fiber, finished_work);
}
/// Performs the necessary bookkeeping when a fiber becomes active.
fn handle_suspended(&mut self, suspended: Fiber) {
// If the suspended fiber has dependencies then update the dependencies map with the
// actual fiber, that way when its dependencies complete it can be resumed. Otherwise, the
// fiber is done and ready to take on more work. This means that we need to make sure that
// we always call `add_dependencies()` before suspending a fiber, otherwise a fiber could
// be marked as done before it's ready.
if let Some(&mut (ref mut none_fiber, _)) = self.dependencies.get_mut(&suspended.id()) {
debug_assert!(none_fiber.is_none(), "Dependencies map already had a fiber assicated with fiber ID");
mem::replace(none_fiber, Some(suspended));
} else if self.work_map.contains_key(&suspended.id()) {
self.ready_fibers.push_back(suspended);
} else {
self.finished.push_back(suspended);
}
}
/// Gets the next ready fiber, or creates a new one if necessary.
fn next_fiber(&mut self) -> Fiber {
self.ready_fibers.pop_front()
.or_else(|| self.finished.pop_front())
.unwrap_or_else(|| {
fn fiber_proc(suspended: Fiber) ->! {
stopwatch::switch_context(suspended.id(), fiber::current().unwrap());
// The current fiber has been resumed. Let the scheduler know that the previous fiber is no
// longer active.
Scheduler::with(|scheduler| scheduler.handle_suspended(suspended));
fiber_routine();
}
Fiber::new(DEFAULT_STACK_SIZE, fiber_proc)
})
}
/// Gets the next available work for a thread, either a new unit of work or a ready fiber.
///
/// Prioritizes new work over pending fibers, and will only return ready fibers that already
/// have work. To get *any* next fiber, including ones without active work or a new one if no
/// existing fibers are available, use `next_fiber()`.
fn next(&mut self) -> Option<NextWork> {
if let Some(work) = self.new_work.pop_front() {
Some(NextWork::Work(work))
} else {
self.ready_fibers.pop_front().map(|fiber| NextWork::Fiber(fiber))
}
}
}
|
erase_lifetime(move || {
let result = func();
sender.try_send(result).expect("Failed to send async result");
})
};
|
random_line_split
|
scheduler.rs
|
//! The main scheduler logic.
//!
//! The scheduler is implemented as a singleton in order to make it easy for code anywhere in the
//! project to make use of async functionality. The actual scheduler instance is not publicly
//! accessible, instead we use various standalone functions like `start()` and `wait_for()` to
//! safely manage access to the scheduler.
//!
//! # Scheduling Work
//!
//! Use `scheduler::start()` to run some work asynchronously, getting an `Async<T>` representing the
//! result of the work. Use `Async::await()` to suspend the current fiber until the work completes
//! and get the result. By default, dropping an `Async<T>` will suspend the current fiber until
//! the work finishes, but you can use `Async::forget()` to ignore the result without blocking.
//!
//! # Sharing Data Between Work
//!
//! Unlike with `std::thread::spawn()`, it's possible for work started with `scheduler::start()`
//! to borrow data from the caller:
//!
//! ```
//!
//! ```
use fiber::{self, Fiber, FiberId};
use cell_extras::AtomicInitCell;
use std::boxed::FnBox;
use std::collections::{HashMap, HashSet, VecDeque};
use std::fmt::{self, Debug, Formatter};
use std::marker::PhantomData;
use std::mem;
use std::sync::{Condvar, Mutex, Once, ONCE_INIT};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{self, Receiver};
use stopwatch;
const DEFAULT_STACK_SIZE: usize = 64 * 1024;
static CONDVAR: AtomicInitCell<Condvar> = AtomicInitCell::new();
static INSTANCE: AtomicInitCell<Mutex<Scheduler>> = AtomicInitCell::new();
static INSTANCE_INIT: Once = ONCE_INIT;
static WORK_COUNTER: AtomicUsize = AtomicUsize::new(1);
/// Represents the result of a computation that may finish at some point in the future.
///
/// Use `scheduler::start()` to run some work asynchronously, getting an `Async<T>` representing the
/// result of the work. Use `Async::await()` to suspend the current fiber until the work completes
/// and get the result. By default, dropping an `Async<T>` will suspend the current fiber until
/// the work finishes, but you can use `Async::forget()` to ignore the result without blocking.
///
/// # Sharing Data Across Work
///
/// It's possible to share
#[derive(Debug)]
pub struct Async<'a, T> {
work: WorkId,
receiver: Receiver<T>,
_phantom: PhantomData<&'a FnMut()>,
}
impl<'a, T> Async<'a, T> {
/// Suspend the current fiber until the async operation finishes.
pub fn await(self) -> T {
let result = {
let Async { work, ref receiver,.. } = self;
work.await();
receiver.try_recv().expect("Failed to receive result of async computation")
};
result
}
pub fn work_id(&self) -> WorkId {
self.work
}
}
impl<T> Async<'static, T> {
pub fn forget(self) {
mem::forget(self);
}
}
impl<'a, T> Drop for Async<'a, T> {
fn drop(&mut self) {
self.work.await();
}
}
/// A shareable reference to a work unit, counterpart to `Async<T>`.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct WorkId(usize);
impl WorkId {
/// Suspends the current fiber until this work unit has completed.
///
/// If the work unit has already finished then `await()` will return immediately.
pub fn await(self) {
if Scheduler::with(|scheduler| scheduler.add_dependency(self)) {
suspend();
}
}
}
/// Initializes a newly-spawned worker thread.
///
/// Prepares the worker thread by initializing it for Fiber usage.
// TODO: This should probably only be public within the crate. Only the engine should be using this,
// and only at startup, we probably don't want user code to be spawning threads anyway.
pub fn init_thread() {
// Make sure the scheduler is initialized before first use.
Scheduler::with(|_| {});
// Manually convert current thread into a fiber because reasons.
fiber::init();
}
// TODO: This should probably only be public within the crate. Only the engine should be using this,
// and only at startup, we probably don't want user code to be spawning threads anyway.
pub fn run_wait_fiber() {
// Make sure the scheduler is initialized before first use.
Scheduler::with(|_| {});
// Setup this thread for running fibers and create an initial fiber for it. This will become
// the wait fiber for this thread.
fiber::init();
fiber_routine();
}
pub fn start<'a, F, T>(func: F) -> Async<'a, T>
where
F: FnOnce() -> T,
F: 'a + Send,
T: 'a + Send,
{
// Normally we can't box a closure with a non'static lifetime because it could outlive its
// borrowed data. In this case the lifetime parameter on the returned `Async` ensures that
// the closure can't outlive the borrowed data, so we use this evil magic to convince the
// compiler to allow us to box the closure.
unsafe fn erase_lifetime<'a, F>(func: F) -> Box<FnBox()>
where
F: FnOnce(),
F: 'a + Send,
{
let boxed_proc = Box::new(func);
let proc_ptr = Box::into_raw(boxed_proc) as *mut FnBox();
Box::from_raw(::std::mem::transmute(proc_ptr))
}
// Create the channel that'll be used to send the result of the operation to the `Async` object.
let (sender, receiver) = mpsc::sync_channel(1);
let work_id = WorkId(WORK_COUNTER.fetch_add(1, Ordering::Relaxed));
let work_proc = unsafe {
erase_lifetime(move || {
let result = func();
sender.try_send(result).expect("Failed to send async result");
})
};
Scheduler::with(move |scheduler| scheduler.schedule_work(Work {
func: work_proc,
id: work_id,
}));
Async {
work: work_id,
receiver: receiver,
_phantom: PhantomData,
}
}
/// Suspends the current fiber and makes the wait fiber active.
///
/// Generally you shouldn't need to call this directly, but if you have one piece of code that
/// runs synchronously for a long time you can use `suspend()` to yield time to other work.
pub fn suspend() {
let next_fiber = Scheduler::with(|scheduler| scheduler.next_fiber());
let suspended = unsafe { next_fiber.resume() };
stopwatch::switch_context(suspended.id(), fiber::current().unwrap());
Scheduler::with(move |scheduler| scheduler.handle_suspended(suspended));
}
fn fiber_routine() ->! {
loop {
match Scheduler::with(|scheduler| scheduler.next()) {
Some(NextWork::Work(Work { func, id })) => {
Scheduler::with(|scheduler| scheduler.start_work(id));
func();
Scheduler::with(|scheduler| scheduler.finish_work(id));
},
Some(NextWork::Fiber(fiber)) => {
let suspended = unsafe { fiber.resume() };
stopwatch::switch_context(suspended.id(), fiber::current().unwrap());
Scheduler::with(move |scheduler| scheduler.handle_suspended(suspended));
},
None => {
// If there's no new work and no fibers ready to run then we want to block the
// thread until some becomes available.
let mutex = INSTANCE.borrow();
let condvar = CONDVAR.borrow();
let _ = condvar
.wait(mutex.lock().expect("Scheduler mutex was poisoned"))
.expect("Scheduler mutex was poisoned");
},
}
}
}
struct Work {
func: Box<FnBox()>,
id: WorkId,
}
impl Debug for Work {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error>
|
}
enum NextWork {
Work(Work),
Fiber(Fiber),
}
struct Scheduler {
/// Work units that are currently pending or in progress.
current_work: HashSet<WorkId>,
work_map: HashMap<FiberId, WorkId>,
/// New units of work that haven't been started on a fiber yet.
///
/// These are ready to be made active at any time.
new_work: VecDeque<Work>,
/// Fibers that have no pending dependencies.
///
/// These are ready to be made active at any time.
ready_fibers: VecDeque<Fiber>,
/// A map specifying which pending fibers depend on which others.
///
/// Once all of a fiber's dependencies complete it should be moved to `new_work`.
dependencies: HashMap<FiberId, (Option<Fiber>, HashSet<WorkId>)>,
// TODO: Should we distinguise between "finished" and "ready" fibers? My intuition is that we'd
// want to give fibers that actively have work CPU time before we resume fibers that would be
// pulling new work, but maybe not? If we threw them all into one queue I guess the worst case
// scenario would be there's no work left, a bunch of empty fibers, and only a few fibers with
// active work. In which case we might have to cycle through a bunch of fibers before we can
// start doing actual work.
finished: VecDeque<Fiber>,
}
unsafe impl Send for Scheduler {}
impl Scheduler {
/// Provides safe access to the scheduler instance.
///
/// # Fiber Switches
///
/// Note that it is an error to call `Fiber::make_active()` within `func`. Doing so will cause
/// the `Mutex` guard on the instance to never unlock, making the scheduler instance
/// inaccessible. All standalone functions that access the scheduler and wish to switch fibers
/// should use `Scheduler::next()` to return the next fiber from `with()` and then call
/// `make_active()` *after* `with()` has returned.
fn with<F, T>(func: F) -> T
where F: FnOnce(&mut Scheduler) -> T
{
INSTANCE_INIT.call_once(|| {
let scheduler = Scheduler {
current_work: HashSet::new(),
work_map: HashMap::new(),
new_work: VecDeque::new(),
ready_fibers: VecDeque::new(),
dependencies: HashMap::new(),
finished: VecDeque::new(),
};
INSTANCE.init(Mutex::new(scheduler));
CONDVAR.init(Condvar::new());
});
let instance = INSTANCE.borrow();
let mut guard = instance.lock().expect("Scheduler mutex was poisoned");
func(&mut *guard)
}
/// Add a new unit of work to the pending queue.
fn schedule_work(&mut self, work: Work) {
assert!(self.current_work.insert(work.id), "Work's ID was already present in current work set");
self.new_work.push_back(work);
CONDVAR.borrow().notify_one();
}
/// Adds `dependency` as a dependency of the currently running fiber.
///
/// Returns `true` if work is still in progress and was added as a dependency, false otherwise.
fn add_dependency(&mut self, dependency: WorkId) -> bool {
let pending = fiber::current().unwrap();
if self.current_work.contains(&dependency) {
debug_assert!(
!self.dependencies.contains_key(&pending),
"Marking a fiber as pending but it is already pending: {:?}",
pending,
);
// Add `pending` to set of pending fibers and list `dependencies` as dependencies.
let &mut (_, ref mut dependencies_set) =
self.dependencies
.entry(pending)
.or_insert((None, HashSet::new()));
// Add `fibers` to the list of ready fibers.
dependencies_set.insert(dependency);
true
} else {
false
}
}
fn start_work(&mut self, new_work: WorkId) {
debug_assert!(self.current_work.contains(&new_work), "Work ID was not in current work set");
let current = fiber::current().unwrap();
self.work_map.insert(current, new_work);
}
/// Removes the specified unit of work from the scheduler, updating any dependent work.
fn finish_work(&mut self, finished_work: WorkId) {
// Iterate over all suspended work units, removing `finished_work` as a dependency where
// necessary. If any of the work units no longer have dependencies then
let mut ready = Vec::new();
for (&pending_fiber, &mut (_, ref mut dependencies)) in &mut self.dependencies {
dependencies.remove(&finished_work);
if dependencies.len() == 0 {
ready.push(pending_fiber);
}
}
for ready_work in ready {
let (maybe_fiber, _) = self.dependencies.remove(&ready_work).unwrap();
if let Some(ready_fiber) = maybe_fiber {
self.ready_fibers.push_back(ready_fiber);
CONDVAR.borrow().notify_one();
}
}
let fiber = fiber::current().unwrap();
assert!(self.current_work.remove(&finished_work), "{:?} wasn't in current work set when it finished", finished_work);
assert!(self.work_map.remove(&fiber).is_some(), "{:?} didn't have {:?} associated in the work map", fiber, finished_work);
}
/// Performs the necessary bookkeeping when a fiber becomes active.
fn handle_suspended(&mut self, suspended: Fiber) {
// If the suspended fiber has dependencies then update the dependencies map with the
// actual fiber, that way when its dependencies complete it can be resumed. Otherwise, the
// fiber is done and ready to take on more work. This means that we need to make sure that
// we always call `add_dependencies()` before suspending a fiber, otherwise a fiber could
// be marked as done before it's ready.
if let Some(&mut (ref mut none_fiber, _)) = self.dependencies.get_mut(&suspended.id()) {
debug_assert!(none_fiber.is_none(), "Dependencies map already had a fiber assicated with fiber ID");
mem::replace(none_fiber, Some(suspended));
} else if self.work_map.contains_key(&suspended.id()) {
self.ready_fibers.push_back(suspended);
} else {
self.finished.push_back(suspended);
}
}
/// Gets the next ready fiber, or creates a new one if necessary.
fn next_fiber(&mut self) -> Fiber {
self.ready_fibers.pop_front()
.or_else(|| self.finished.pop_front())
.unwrap_or_else(|| {
fn fiber_proc(suspended: Fiber) ->! {
stopwatch::switch_context(suspended.id(), fiber::current().unwrap());
// The current fiber has been resumed. Let the scheduler know that the previous fiber is no
// longer active.
Scheduler::with(|scheduler| scheduler.handle_suspended(suspended));
fiber_routine();
}
Fiber::new(DEFAULT_STACK_SIZE, fiber_proc)
})
}
/// Gets the next available work for a thread, either a new unit of work or a ready fiber.
///
/// Prioritizes new work over pending fibers, and will only return ready fibers that already
/// have work. To get *any* next fiber, including ones without active work or a new one if no
/// existing fibers are available, use `next_fiber()`.
fn next(&mut self) -> Option<NextWork> {
if let Some(work) = self.new_work.pop_front() {
Some(NextWork::Work(work))
} else {
self.ready_fibers.pop_front().map(|fiber| NextWork::Fiber(fiber))
}
}
}
|
{
write!(formatter, "Work {{ id: {:?} }}", self.id)
}
|
identifier_body
|
scheduler.rs
|
//! The main scheduler logic.
//!
//! The scheduler is implemented as a singleton in order to make it easy for code anywhere in the
//! project to make use of async functionality. The actual scheduler instance is not publicly
//! accessible, instead we use various standalone functions like `start()` and `wait_for()` to
//! safely manage access to the scheduler.
//!
//! # Scheduling Work
//!
//! Use `scheduler::start()` to run some work asynchronously, getting an `Async<T>` representing the
//! result of the work. Use `Async::await()` to suspend the current fiber until the work completes
//! and get the result. By default, dropping an `Async<T>` will suspend the current fiber until
//! the work finishes, but you can use `Async::forget()` to ignore the result without blocking.
//!
//! # Sharing Data Between Work
//!
//! Unlike with `std::thread::spawn()`, it's possible for work started with `scheduler::start()`
//! to borrow data from the caller:
//!
//! ```
//!
//! ```
use fiber::{self, Fiber, FiberId};
use cell_extras::AtomicInitCell;
use std::boxed::FnBox;
use std::collections::{HashMap, HashSet, VecDeque};
use std::fmt::{self, Debug, Formatter};
use std::marker::PhantomData;
use std::mem;
use std::sync::{Condvar, Mutex, Once, ONCE_INIT};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{self, Receiver};
use stopwatch;
const DEFAULT_STACK_SIZE: usize = 64 * 1024;
static CONDVAR: AtomicInitCell<Condvar> = AtomicInitCell::new();
static INSTANCE: AtomicInitCell<Mutex<Scheduler>> = AtomicInitCell::new();
static INSTANCE_INIT: Once = ONCE_INIT;
static WORK_COUNTER: AtomicUsize = AtomicUsize::new(1);
/// Represents the result of a computation that may finish at some point in the future.
///
/// Use `scheduler::start()` to run some work asynchronously, getting an `Async<T>` representing the
/// result of the work. Use `Async::await()` to suspend the current fiber until the work completes
/// and get the result. By default, dropping an `Async<T>` will suspend the current fiber until
/// the work finishes, but you can use `Async::forget()` to ignore the result without blocking.
///
/// # Sharing Data Across Work
///
/// It's possible to share
#[derive(Debug)]
pub struct Async<'a, T> {
work: WorkId,
receiver: Receiver<T>,
_phantom: PhantomData<&'a FnMut()>,
}
impl<'a, T> Async<'a, T> {
/// Suspend the current fiber until the async operation finishes.
pub fn await(self) -> T {
let result = {
let Async { work, ref receiver,.. } = self;
work.await();
receiver.try_recv().expect("Failed to receive result of async computation")
};
result
}
pub fn work_id(&self) -> WorkId {
self.work
}
}
impl<T> Async<'static, T> {
pub fn forget(self) {
mem::forget(self);
}
}
impl<'a, T> Drop for Async<'a, T> {
fn drop(&mut self) {
self.work.await();
}
}
/// A shareable reference to a work unit, counterpart to `Async<T>`.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct WorkId(usize);
impl WorkId {
/// Suspends the current fiber until this work unit has completed.
///
/// If the work unit has already finished then `await()` will return immediately.
pub fn await(self) {
if Scheduler::with(|scheduler| scheduler.add_dependency(self)) {
suspend();
}
}
}
/// Initializes a newly-spawned worker thread.
///
/// Prepares the worker thread by initializing it for Fiber usage.
// TODO: This should probably only be public within the crate. Only the engine should be using this,
// and only at startup, we probably don't want user code to be spawning threads anyway.
pub fn init_thread() {
// Make sure the scheduler is initialized before first use.
Scheduler::with(|_| {});
// Manually convert current thread into a fiber because reasons.
fiber::init();
}
// TODO: This should probably only be public within the crate. Only the engine should be using this,
// and only at startup, we probably don't want user code to be spawning threads anyway.
pub fn run_wait_fiber() {
// Make sure the scheduler is initialized before first use.
Scheduler::with(|_| {});
// Setup this thread for running fibers and create an initial fiber for it. This will become
// the wait fiber for this thread.
fiber::init();
fiber_routine();
}
pub fn start<'a, F, T>(func: F) -> Async<'a, T>
where
F: FnOnce() -> T,
F: 'a + Send,
T: 'a + Send,
{
// Normally we can't box a closure with a non'static lifetime because it could outlive its
// borrowed data. In this case the lifetime parameter on the returned `Async` ensures that
// the closure can't outlive the borrowed data, so we use this evil magic to convince the
// compiler to allow us to box the closure.
unsafe fn erase_lifetime<'a, F>(func: F) -> Box<FnBox()>
where
F: FnOnce(),
F: 'a + Send,
{
let boxed_proc = Box::new(func);
let proc_ptr = Box::into_raw(boxed_proc) as *mut FnBox();
Box::from_raw(::std::mem::transmute(proc_ptr))
}
// Create the channel that'll be used to send the result of the operation to the `Async` object.
let (sender, receiver) = mpsc::sync_channel(1);
let work_id = WorkId(WORK_COUNTER.fetch_add(1, Ordering::Relaxed));
let work_proc = unsafe {
erase_lifetime(move || {
let result = func();
sender.try_send(result).expect("Failed to send async result");
})
};
Scheduler::with(move |scheduler| scheduler.schedule_work(Work {
func: work_proc,
id: work_id,
}));
Async {
work: work_id,
receiver: receiver,
_phantom: PhantomData,
}
}
/// Suspends the current fiber and makes the wait fiber active.
///
/// Generally you shouldn't need to call this directly, but if you have one piece of code that
/// runs synchronously for a long time you can use `suspend()` to yield time to other work.
pub fn suspend() {
let next_fiber = Scheduler::with(|scheduler| scheduler.next_fiber());
let suspended = unsafe { next_fiber.resume() };
stopwatch::switch_context(suspended.id(), fiber::current().unwrap());
Scheduler::with(move |scheduler| scheduler.handle_suspended(suspended));
}
fn fiber_routine() ->! {
loop {
match Scheduler::with(|scheduler| scheduler.next()) {
Some(NextWork::Work(Work { func, id })) => {
Scheduler::with(|scheduler| scheduler.start_work(id));
func();
Scheduler::with(|scheduler| scheduler.finish_work(id));
},
Some(NextWork::Fiber(fiber)) => {
let suspended = unsafe { fiber.resume() };
stopwatch::switch_context(suspended.id(), fiber::current().unwrap());
Scheduler::with(move |scheduler| scheduler.handle_suspended(suspended));
},
None => {
// If there's no new work and no fibers ready to run then we want to block the
// thread until some becomes available.
let mutex = INSTANCE.borrow();
let condvar = CONDVAR.borrow();
let _ = condvar
.wait(mutex.lock().expect("Scheduler mutex was poisoned"))
.expect("Scheduler mutex was poisoned");
},
}
}
}
struct Work {
func: Box<FnBox()>,
id: WorkId,
}
impl Debug for Work {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
write!(formatter, "Work {{ id: {:?} }}", self.id)
}
}
enum NextWork {
Work(Work),
Fiber(Fiber),
}
struct Scheduler {
/// Work units that are currently pending or in progress.
current_work: HashSet<WorkId>,
work_map: HashMap<FiberId, WorkId>,
/// New units of work that haven't been started on a fiber yet.
///
/// These are ready to be made active at any time.
new_work: VecDeque<Work>,
/// Fibers that have no pending dependencies.
///
/// These are ready to be made active at any time.
ready_fibers: VecDeque<Fiber>,
/// A map specifying which pending fibers depend on which others.
///
/// Once all of a fiber's dependencies complete it should be moved to `new_work`.
dependencies: HashMap<FiberId, (Option<Fiber>, HashSet<WorkId>)>,
// TODO: Should we distinguise between "finished" and "ready" fibers? My intuition is that we'd
// want to give fibers that actively have work CPU time before we resume fibers that would be
// pulling new work, but maybe not? If we threw them all into one queue I guess the worst case
// scenario would be there's no work left, a bunch of empty fibers, and only a few fibers with
// active work. In which case we might have to cycle through a bunch of fibers before we can
// start doing actual work.
finished: VecDeque<Fiber>,
}
unsafe impl Send for Scheduler {}
impl Scheduler {
/// Provides safe access to the scheduler instance.
///
/// # Fiber Switches
///
/// Note that it is an error to call `Fiber::make_active()` within `func`. Doing so will cause
/// the `Mutex` guard on the instance to never unlock, making the scheduler instance
/// inaccessible. All standalone functions that access the scheduler and wish to switch fibers
/// should use `Scheduler::next()` to return the next fiber from `with()` and then call
/// `make_active()` *after* `with()` has returned.
fn with<F, T>(func: F) -> T
where F: FnOnce(&mut Scheduler) -> T
{
INSTANCE_INIT.call_once(|| {
let scheduler = Scheduler {
current_work: HashSet::new(),
work_map: HashMap::new(),
new_work: VecDeque::new(),
ready_fibers: VecDeque::new(),
dependencies: HashMap::new(),
finished: VecDeque::new(),
};
INSTANCE.init(Mutex::new(scheduler));
CONDVAR.init(Condvar::new());
});
let instance = INSTANCE.borrow();
let mut guard = instance.lock().expect("Scheduler mutex was poisoned");
func(&mut *guard)
}
/// Add a new unit of work to the pending queue.
fn schedule_work(&mut self, work: Work) {
assert!(self.current_work.insert(work.id), "Work's ID was already present in current work set");
self.new_work.push_back(work);
CONDVAR.borrow().notify_one();
}
/// Adds `dependency` as a dependency of the currently running fiber.
///
/// Returns `true` if work is still in progress and was added as a dependency, false otherwise.
fn add_dependency(&mut self, dependency: WorkId) -> bool {
let pending = fiber::current().unwrap();
if self.current_work.contains(&dependency) {
debug_assert!(
!self.dependencies.contains_key(&pending),
"Marking a fiber as pending but it is already pending: {:?}",
pending,
);
// Add `pending` to set of pending fibers and list `dependencies` as dependencies.
let &mut (_, ref mut dependencies_set) =
self.dependencies
.entry(pending)
.or_insert((None, HashSet::new()));
// Add `fibers` to the list of ready fibers.
dependencies_set.insert(dependency);
true
} else {
false
}
}
fn start_work(&mut self, new_work: WorkId) {
debug_assert!(self.current_work.contains(&new_work), "Work ID was not in current work set");
let current = fiber::current().unwrap();
self.work_map.insert(current, new_work);
}
/// Removes the specified unit of work from the scheduler, updating any dependent work.
fn finish_work(&mut self, finished_work: WorkId) {
// Iterate over all suspended work units, removing `finished_work` as a dependency where
// necessary. If any of the work units no longer have dependencies then
let mut ready = Vec::new();
for (&pending_fiber, &mut (_, ref mut dependencies)) in &mut self.dependencies {
dependencies.remove(&finished_work);
if dependencies.len() == 0 {
ready.push(pending_fiber);
}
}
for ready_work in ready {
let (maybe_fiber, _) = self.dependencies.remove(&ready_work).unwrap();
if let Some(ready_fiber) = maybe_fiber {
self.ready_fibers.push_back(ready_fiber);
CONDVAR.borrow().notify_one();
}
}
let fiber = fiber::current().unwrap();
assert!(self.current_work.remove(&finished_work), "{:?} wasn't in current work set when it finished", finished_work);
assert!(self.work_map.remove(&fiber).is_some(), "{:?} didn't have {:?} associated in the work map", fiber, finished_work);
}
/// Performs the necessary bookkeeping when a fiber becomes active.
fn handle_suspended(&mut self, suspended: Fiber) {
// If the suspended fiber has dependencies then update the dependencies map with the
// actual fiber, that way when its dependencies complete it can be resumed. Otherwise, the
// fiber is done and ready to take on more work. This means that we need to make sure that
// we always call `add_dependencies()` before suspending a fiber, otherwise a fiber could
// be marked as done before it's ready.
if let Some(&mut (ref mut none_fiber, _)) = self.dependencies.get_mut(&suspended.id()) {
debug_assert!(none_fiber.is_none(), "Dependencies map already had a fiber assicated with fiber ID");
mem::replace(none_fiber, Some(suspended));
} else if self.work_map.contains_key(&suspended.id()) {
self.ready_fibers.push_back(suspended);
} else {
self.finished.push_back(suspended);
}
}
/// Gets the next ready fiber, or creates a new one if necessary.
fn next_fiber(&mut self) -> Fiber {
self.ready_fibers.pop_front()
.or_else(|| self.finished.pop_front())
.unwrap_or_else(|| {
fn
|
(suspended: Fiber) ->! {
stopwatch::switch_context(suspended.id(), fiber::current().unwrap());
// The current fiber has been resumed. Let the scheduler know that the previous fiber is no
// longer active.
Scheduler::with(|scheduler| scheduler.handle_suspended(suspended));
fiber_routine();
}
Fiber::new(DEFAULT_STACK_SIZE, fiber_proc)
})
}
/// Gets the next available work for a thread, either a new unit of work or a ready fiber.
///
/// Prioritizes new work over pending fibers, and will only return ready fibers that already
/// have work. To get *any* next fiber, including ones without active work or a new one if no
/// existing fibers are available, use `next_fiber()`.
fn next(&mut self) -> Option<NextWork> {
if let Some(work) = self.new_work.pop_front() {
Some(NextWork::Work(work))
} else {
self.ready_fibers.pop_front().map(|fiber| NextWork::Fiber(fiber))
}
}
}
|
fiber_proc
|
identifier_name
|
scheduler.rs
|
//! The main scheduler logic.
//!
//! The scheduler is implemented as a singleton in order to make it easy for code anywhere in the
//! project to make use of async functionality. The actual scheduler instance is not publicly
//! accessible, instead we use various standalone functions like `start()` and `wait_for()` to
//! safely manage access to the scheduler.
//!
//! # Scheduling Work
//!
//! Use `scheduler::start()` to run some work asynchronously, getting an `Async<T>` representing the
//! result of the work. Use `Async::await()` to suspend the current fiber until the work completes
//! and get the result. By default, dropping an `Async<T>` will suspend the current fiber until
//! the work finishes, but you can use `Async::forget()` to ignore the result without blocking.
//!
//! # Sharing Data Between Work
//!
//! Unlike with `std::thread::spawn()`, it's possible for work started with `scheduler::start()`
//! to borrow data from the caller:
//!
//! ```
//!
//! ```
use fiber::{self, Fiber, FiberId};
use cell_extras::AtomicInitCell;
use std::boxed::FnBox;
use std::collections::{HashMap, HashSet, VecDeque};
use std::fmt::{self, Debug, Formatter};
use std::marker::PhantomData;
use std::mem;
use std::sync::{Condvar, Mutex, Once, ONCE_INIT};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{self, Receiver};
use stopwatch;
const DEFAULT_STACK_SIZE: usize = 64 * 1024;
static CONDVAR: AtomicInitCell<Condvar> = AtomicInitCell::new();
static INSTANCE: AtomicInitCell<Mutex<Scheduler>> = AtomicInitCell::new();
static INSTANCE_INIT: Once = ONCE_INIT;
static WORK_COUNTER: AtomicUsize = AtomicUsize::new(1);
/// Represents the result of a computation that may finish at some point in the future.
///
/// Use `scheduler::start()` to run some work asynchronously, getting an `Async<T>` representing the
/// result of the work. Use `Async::await()` to suspend the current fiber until the work completes
/// and get the result. By default, dropping an `Async<T>` will suspend the current fiber until
/// the work finishes, but you can use `Async::forget()` to ignore the result without blocking.
///
/// # Sharing Data Across Work
///
/// It's possible to share
#[derive(Debug)]
pub struct Async<'a, T> {
work: WorkId,
receiver: Receiver<T>,
_phantom: PhantomData<&'a FnMut()>,
}
impl<'a, T> Async<'a, T> {
/// Suspend the current fiber until the async operation finishes.
pub fn await(self) -> T {
let result = {
let Async { work, ref receiver,.. } = self;
work.await();
receiver.try_recv().expect("Failed to receive result of async computation")
};
result
}
pub fn work_id(&self) -> WorkId {
self.work
}
}
impl<T> Async<'static, T> {
pub fn forget(self) {
mem::forget(self);
}
}
impl<'a, T> Drop for Async<'a, T> {
fn drop(&mut self) {
self.work.await();
}
}
/// A shareable reference to a work unit, counterpart to `Async<T>`.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct WorkId(usize);
impl WorkId {
/// Suspends the current fiber until this work unit has completed.
///
/// If the work unit has already finished then `await()` will return immediately.
pub fn await(self) {
if Scheduler::with(|scheduler| scheduler.add_dependency(self)) {
suspend();
}
}
}
/// Initializes a newly-spawned worker thread.
///
/// Prepares the worker thread by initializing it for Fiber usage.
// TODO: This should probably only be public within the crate. Only the engine should be using this,
// and only at startup, we probably don't want user code to be spawning threads anyway.
pub fn init_thread() {
// Make sure the scheduler is initialized before first use.
Scheduler::with(|_| {});
// Manually convert current thread into a fiber because reasons.
fiber::init();
}
// TODO: This should probably only be public within the crate. Only the engine should be using this,
// and only at startup, we probably don't want user code to be spawning threads anyway.
pub fn run_wait_fiber() {
// Make sure the scheduler is initialized before first use.
Scheduler::with(|_| {});
// Setup this thread for running fibers and create an initial fiber for it. This will become
// the wait fiber for this thread.
fiber::init();
fiber_routine();
}
pub fn start<'a, F, T>(func: F) -> Async<'a, T>
where
F: FnOnce() -> T,
F: 'a + Send,
T: 'a + Send,
{
// Normally we can't box a closure with a non'static lifetime because it could outlive its
// borrowed data. In this case the lifetime parameter on the returned `Async` ensures that
// the closure can't outlive the borrowed data, so we use this evil magic to convince the
// compiler to allow us to box the closure.
unsafe fn erase_lifetime<'a, F>(func: F) -> Box<FnBox()>
where
F: FnOnce(),
F: 'a + Send,
{
let boxed_proc = Box::new(func);
let proc_ptr = Box::into_raw(boxed_proc) as *mut FnBox();
Box::from_raw(::std::mem::transmute(proc_ptr))
}
// Create the channel that'll be used to send the result of the operation to the `Async` object.
let (sender, receiver) = mpsc::sync_channel(1);
let work_id = WorkId(WORK_COUNTER.fetch_add(1, Ordering::Relaxed));
let work_proc = unsafe {
erase_lifetime(move || {
let result = func();
sender.try_send(result).expect("Failed to send async result");
})
};
Scheduler::with(move |scheduler| scheduler.schedule_work(Work {
func: work_proc,
id: work_id,
}));
Async {
work: work_id,
receiver: receiver,
_phantom: PhantomData,
}
}
/// Suspends the current fiber and makes the wait fiber active.
///
/// Generally you shouldn't need to call this directly, but if you have one piece of code that
/// runs synchronously for a long time you can use `suspend()` to yield time to other work.
pub fn suspend() {
let next_fiber = Scheduler::with(|scheduler| scheduler.next_fiber());
let suspended = unsafe { next_fiber.resume() };
stopwatch::switch_context(suspended.id(), fiber::current().unwrap());
Scheduler::with(move |scheduler| scheduler.handle_suspended(suspended));
}
fn fiber_routine() ->! {
loop {
match Scheduler::with(|scheduler| scheduler.next()) {
Some(NextWork::Work(Work { func, id })) => {
Scheduler::with(|scheduler| scheduler.start_work(id));
func();
Scheduler::with(|scheduler| scheduler.finish_work(id));
},
Some(NextWork::Fiber(fiber)) => {
let suspended = unsafe { fiber.resume() };
stopwatch::switch_context(suspended.id(), fiber::current().unwrap());
Scheduler::with(move |scheduler| scheduler.handle_suspended(suspended));
},
None => {
// If there's no new work and no fibers ready to run then we want to block the
// thread until some becomes available.
let mutex = INSTANCE.borrow();
let condvar = CONDVAR.borrow();
let _ = condvar
.wait(mutex.lock().expect("Scheduler mutex was poisoned"))
.expect("Scheduler mutex was poisoned");
},
}
}
}
struct Work {
func: Box<FnBox()>,
id: WorkId,
}
impl Debug for Work {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
write!(formatter, "Work {{ id: {:?} }}", self.id)
}
}
enum NextWork {
Work(Work),
Fiber(Fiber),
}
struct Scheduler {
/// Work units that are currently pending or in progress.
current_work: HashSet<WorkId>,
work_map: HashMap<FiberId, WorkId>,
/// New units of work that haven't been started on a fiber yet.
///
/// These are ready to be made active at any time.
new_work: VecDeque<Work>,
/// Fibers that have no pending dependencies.
///
/// These are ready to be made active at any time.
ready_fibers: VecDeque<Fiber>,
/// A map specifying which pending fibers depend on which others.
///
/// Once all of a fiber's dependencies complete it should be moved to `new_work`.
dependencies: HashMap<FiberId, (Option<Fiber>, HashSet<WorkId>)>,
// TODO: Should we distinguise between "finished" and "ready" fibers? My intuition is that we'd
// want to give fibers that actively have work CPU time before we resume fibers that would be
// pulling new work, but maybe not? If we threw them all into one queue I guess the worst case
// scenario would be there's no work left, a bunch of empty fibers, and only a few fibers with
// active work. In which case we might have to cycle through a bunch of fibers before we can
// start doing actual work.
finished: VecDeque<Fiber>,
}
unsafe impl Send for Scheduler {}
impl Scheduler {
/// Provides safe access to the scheduler instance.
///
/// # Fiber Switches
///
/// Note that it is an error to call `Fiber::make_active()` within `func`. Doing so will cause
/// the `Mutex` guard on the instance to never unlock, making the scheduler instance
/// inaccessible. All standalone functions that access the scheduler and wish to switch fibers
/// should use `Scheduler::next()` to return the next fiber from `with()` and then call
/// `make_active()` *after* `with()` has returned.
fn with<F, T>(func: F) -> T
where F: FnOnce(&mut Scheduler) -> T
{
INSTANCE_INIT.call_once(|| {
let scheduler = Scheduler {
current_work: HashSet::new(),
work_map: HashMap::new(),
new_work: VecDeque::new(),
ready_fibers: VecDeque::new(),
dependencies: HashMap::new(),
finished: VecDeque::new(),
};
INSTANCE.init(Mutex::new(scheduler));
CONDVAR.init(Condvar::new());
});
let instance = INSTANCE.borrow();
let mut guard = instance.lock().expect("Scheduler mutex was poisoned");
func(&mut *guard)
}
/// Add a new unit of work to the pending queue.
fn schedule_work(&mut self, work: Work) {
assert!(self.current_work.insert(work.id), "Work's ID was already present in current work set");
self.new_work.push_back(work);
CONDVAR.borrow().notify_one();
}
/// Adds `dependency` as a dependency of the currently running fiber.
///
/// Returns `true` if work is still in progress and was added as a dependency, false otherwise.
fn add_dependency(&mut self, dependency: WorkId) -> bool {
let pending = fiber::current().unwrap();
if self.current_work.contains(&dependency) {
debug_assert!(
!self.dependencies.contains_key(&pending),
"Marking a fiber as pending but it is already pending: {:?}",
pending,
);
// Add `pending` to set of pending fibers and list `dependencies` as dependencies.
let &mut (_, ref mut dependencies_set) =
self.dependencies
.entry(pending)
.or_insert((None, HashSet::new()));
// Add `fibers` to the list of ready fibers.
dependencies_set.insert(dependency);
true
} else {
false
}
}
fn start_work(&mut self, new_work: WorkId) {
debug_assert!(self.current_work.contains(&new_work), "Work ID was not in current work set");
let current = fiber::current().unwrap();
self.work_map.insert(current, new_work);
}
/// Removes the specified unit of work from the scheduler, updating any dependent work.
fn finish_work(&mut self, finished_work: WorkId) {
// Iterate over all suspended work units, removing `finished_work` as a dependency where
// necessary. If any of the work units no longer have dependencies then
let mut ready = Vec::new();
for (&pending_fiber, &mut (_, ref mut dependencies)) in &mut self.dependencies {
dependencies.remove(&finished_work);
if dependencies.len() == 0
|
}
for ready_work in ready {
let (maybe_fiber, _) = self.dependencies.remove(&ready_work).unwrap();
if let Some(ready_fiber) = maybe_fiber {
self.ready_fibers.push_back(ready_fiber);
CONDVAR.borrow().notify_one();
}
}
let fiber = fiber::current().unwrap();
assert!(self.current_work.remove(&finished_work), "{:?} wasn't in current work set when it finished", finished_work);
assert!(self.work_map.remove(&fiber).is_some(), "{:?} didn't have {:?} associated in the work map", fiber, finished_work);
}
/// Performs the necessary bookkeeping when a fiber becomes active.
fn handle_suspended(&mut self, suspended: Fiber) {
// If the suspended fiber has dependencies then update the dependencies map with the
// actual fiber, that way when its dependencies complete it can be resumed. Otherwise, the
// fiber is done and ready to take on more work. This means that we need to make sure that
// we always call `add_dependencies()` before suspending a fiber, otherwise a fiber could
// be marked as done before it's ready.
if let Some(&mut (ref mut none_fiber, _)) = self.dependencies.get_mut(&suspended.id()) {
debug_assert!(none_fiber.is_none(), "Dependencies map already had a fiber assicated with fiber ID");
mem::replace(none_fiber, Some(suspended));
} else if self.work_map.contains_key(&suspended.id()) {
self.ready_fibers.push_back(suspended);
} else {
self.finished.push_back(suspended);
}
}
/// Gets the next ready fiber, or creates a new one if necessary.
fn next_fiber(&mut self) -> Fiber {
self.ready_fibers.pop_front()
.or_else(|| self.finished.pop_front())
.unwrap_or_else(|| {
fn fiber_proc(suspended: Fiber) ->! {
stopwatch::switch_context(suspended.id(), fiber::current().unwrap());
// The current fiber has been resumed. Let the scheduler know that the previous fiber is no
// longer active.
Scheduler::with(|scheduler| scheduler.handle_suspended(suspended));
fiber_routine();
}
Fiber::new(DEFAULT_STACK_SIZE, fiber_proc)
})
}
/// Gets the next available work for a thread, either a new unit of work or a ready fiber.
///
/// Prioritizes new work over pending fibers, and will only return ready fibers that already
/// have work. To get *any* next fiber, including ones without active work or a new one if no
/// existing fibers are available, use `next_fiber()`.
fn next(&mut self) -> Option<NextWork> {
if let Some(work) = self.new_work.pop_front() {
Some(NextWork::Work(work))
} else {
self.ready_fibers.pop_front().map(|fiber| NextWork::Fiber(fiber))
}
}
}
|
{
ready.push(pending_fiber);
}
|
conditional_block
|
saturating_neg.rs
|
use malachite_base::num::basic::signeds::PrimitiveSigned;
|
let test = |n: T, out| {
assert_eq!(n.saturating_neg(), out);
let mut n = n;
n.saturating_neg_assign();
assert_eq!(n, out);
};
test(T::ZERO, T::ZERO);
test(T::ONE, T::NEGATIVE_ONE);
test(T::exact_from(100), T::exact_from(-100));
test(T::MAX, T::MIN + T::ONE);
test(T::NEGATIVE_ONE, T::ONE);
test(T::exact_from(-100), T::exact_from(100));
test(T::MIN, T::MAX);
}
#[test]
fn test_saturating_neg_assign() {
apply_fn_to_signeds!(saturating_neg_assign_helper);
}
fn saturating_neg_properties_helper<T: PrimitiveSigned>() {
signed_gen::<T>().test_properties(|n| {
let mut neg = n;
neg.saturating_neg_assign();
assert_eq!(neg, n.saturating_neg());
if n!= T::MIN {
assert_eq!(neg.saturating_neg(), n);
}
assert_eq!(neg == n, n == T::ZERO);
});
}
#[test]
fn saturating_neg_properties() {
apply_fn_to_signeds!(saturating_neg_properties_helper);
}
|
use malachite_base_test_util::generators::signed_gen;
fn saturating_neg_assign_helper<T: PrimitiveSigned>() {
|
random_line_split
|
saturating_neg.rs
|
use malachite_base::num::basic::signeds::PrimitiveSigned;
use malachite_base_test_util::generators::signed_gen;
fn saturating_neg_assign_helper<T: PrimitiveSigned>() {
let test = |n: T, out| {
assert_eq!(n.saturating_neg(), out);
let mut n = n;
n.saturating_neg_assign();
assert_eq!(n, out);
};
test(T::ZERO, T::ZERO);
test(T::ONE, T::NEGATIVE_ONE);
test(T::exact_from(100), T::exact_from(-100));
test(T::MAX, T::MIN + T::ONE);
test(T::NEGATIVE_ONE, T::ONE);
test(T::exact_from(-100), T::exact_from(100));
test(T::MIN, T::MAX);
}
#[test]
fn test_saturating_neg_assign() {
apply_fn_to_signeds!(saturating_neg_assign_helper);
}
fn saturating_neg_properties_helper<T: PrimitiveSigned>() {
signed_gen::<T>().test_properties(|n| {
let mut neg = n;
neg.saturating_neg_assign();
assert_eq!(neg, n.saturating_neg());
if n!= T::MIN {
assert_eq!(neg.saturating_neg(), n);
}
assert_eq!(neg == n, n == T::ZERO);
});
}
#[test]
fn
|
() {
apply_fn_to_signeds!(saturating_neg_properties_helper);
}
|
saturating_neg_properties
|
identifier_name
|
saturating_neg.rs
|
use malachite_base::num::basic::signeds::PrimitiveSigned;
use malachite_base_test_util::generators::signed_gen;
fn saturating_neg_assign_helper<T: PrimitiveSigned>()
|
#[test]
fn test_saturating_neg_assign() {
apply_fn_to_signeds!(saturating_neg_assign_helper);
}
fn saturating_neg_properties_helper<T: PrimitiveSigned>() {
signed_gen::<T>().test_properties(|n| {
let mut neg = n;
neg.saturating_neg_assign();
assert_eq!(neg, n.saturating_neg());
if n!= T::MIN {
assert_eq!(neg.saturating_neg(), n);
}
assert_eq!(neg == n, n == T::ZERO);
});
}
#[test]
fn saturating_neg_properties() {
apply_fn_to_signeds!(saturating_neg_properties_helper);
}
|
{
let test = |n: T, out| {
assert_eq!(n.saturating_neg(), out);
let mut n = n;
n.saturating_neg_assign();
assert_eq!(n, out);
};
test(T::ZERO, T::ZERO);
test(T::ONE, T::NEGATIVE_ONE);
test(T::exact_from(100), T::exact_from(-100));
test(T::MAX, T::MIN + T::ONE);
test(T::NEGATIVE_ONE, T::ONE);
test(T::exact_from(-100), T::exact_from(100));
test(T::MIN, T::MAX);
}
|
identifier_body
|
saturating_neg.rs
|
use malachite_base::num::basic::signeds::PrimitiveSigned;
use malachite_base_test_util::generators::signed_gen;
fn saturating_neg_assign_helper<T: PrimitiveSigned>() {
let test = |n: T, out| {
assert_eq!(n.saturating_neg(), out);
let mut n = n;
n.saturating_neg_assign();
assert_eq!(n, out);
};
test(T::ZERO, T::ZERO);
test(T::ONE, T::NEGATIVE_ONE);
test(T::exact_from(100), T::exact_from(-100));
test(T::MAX, T::MIN + T::ONE);
test(T::NEGATIVE_ONE, T::ONE);
test(T::exact_from(-100), T::exact_from(100));
test(T::MIN, T::MAX);
}
#[test]
fn test_saturating_neg_assign() {
apply_fn_to_signeds!(saturating_neg_assign_helper);
}
fn saturating_neg_properties_helper<T: PrimitiveSigned>() {
signed_gen::<T>().test_properties(|n| {
let mut neg = n;
neg.saturating_neg_assign();
assert_eq!(neg, n.saturating_neg());
if n!= T::MIN
|
assert_eq!(neg == n, n == T::ZERO);
});
}
#[test]
fn saturating_neg_properties() {
apply_fn_to_signeds!(saturating_neg_properties_helper);
}
|
{
assert_eq!(neg.saturating_neg(), n);
}
|
conditional_block
|
store.rs
|
use libc::{c_ulong, c_ulonglong, c_void};
use super::super::error_type::ErrorType;
use super::super::operation::Operation;
use super::super::instance::Instance;
use super::format_error;
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct StoreInternal {
pub cookie: *mut c_void,
pub key: *const c_void,
pub nkey: c_ulong,
pub cas: c_ulonglong,
pub rc: ErrorType,
pub version: u16,
pub rflags: u16,
pub operation: Operation
}
impl StoreInternal {
pub fn key(&self) -> Option<String> {
unsafe {
match self.rc {
ErrorType::Success => {
let bytes = ::std::slice::from_raw_parts(self.key as *mut u8, self.nkey as usize);
let text = ::std::str::from_utf8(bytes).unwrap();
Some(text.to_owned())
},
_ => {
None
}
}
}
}
pub fn error(&self, instance: Instance) -> &'static str {
format_error(instance, &self.rc)
}
}
#[derive(Debug)]
pub struct
|
{
pub key: Option<String>,
pub cas: u64,
pub rc: ErrorType,
pub version: u16,
pub rflags: u16,
pub operation: Operation
}
impl Store {
pub fn new(internal: &StoreInternal) -> Store {
Store {
key: internal.key(),
cas: internal.cas,
rc: internal.rc,
version: internal.version,
rflags: internal.rflags,
operation: internal.operation
}
}
}
|
Store
|
identifier_name
|
store.rs
|
use libc::{c_ulong, c_ulonglong, c_void};
use super::super::error_type::ErrorType;
use super::super::operation::Operation;
use super::super::instance::Instance;
use super::format_error;
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct StoreInternal {
pub cookie: *mut c_void,
pub key: *const c_void,
pub nkey: c_ulong,
pub cas: c_ulonglong,
pub rc: ErrorType,
pub version: u16,
pub rflags: u16,
pub operation: Operation
}
impl StoreInternal {
pub fn key(&self) -> Option<String> {
unsafe {
match self.rc {
ErrorType::Success => {
let bytes = ::std::slice::from_raw_parts(self.key as *mut u8, self.nkey as usize);
let text = ::std::str::from_utf8(bytes).unwrap();
Some(text.to_owned())
},
_ => {
None
}
}
}
}
pub fn error(&self, instance: Instance) -> &'static str {
format_error(instance, &self.rc)
}
}
#[derive(Debug)]
pub struct Store {
pub key: Option<String>,
pub cas: u64,
pub rc: ErrorType,
pub version: u16,
pub rflags: u16,
pub operation: Operation
}
|
impl Store {
pub fn new(internal: &StoreInternal) -> Store {
Store {
key: internal.key(),
cas: internal.cas,
rc: internal.rc,
version: internal.version,
rflags: internal.rflags,
operation: internal.operation
}
}
}
|
random_line_split
|
|
closest_nodes_iter.rs
|
use std::sync::{Arc, Mutex, Condvar};
use std::sync::mpsc::Receiver;
use std::thread::spawn;
use node::{Node, NodeId};
#[cfg(test)]
use node::NODEID_BYTELEN;
#[derive(Clone)]
pub struct ClosestNodesIter {
key: Arc<NodeId>,
count: usize, // ask at least <count> nodes
processed_nodes: Arc<Mutex<Vec<Node>>>,
unprocessed_nodes: Arc<(Mutex<(Vec<Node>, usize)>, Condvar)>,
}
impl ClosestNodesIter {
pub fn new(key: NodeId, count: usize, node_list: Vec<Node>) -> ClosestNodesIter {
let this = ClosestNodesIter {
key: Arc::new(key),
count: count,
processed_nodes: Arc::new(Mutex::new(vec![])),
unprocessed_nodes: Arc::new((Mutex::new((vec![], 0)), Condvar::new())),
};
this.add_nodes(node_list);
this
}
#[allow(dead_code)]
pub fn get_closest_nodes(&self, n: usize) -> Vec<Node> {
let processed_nodes = self.processed_nodes.lock().unwrap();
let &(ref lock, _) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let &mut(ref mut unprocessed_nodes, _) = &mut *pair;
let mut nodes = vec![];
for n in unprocessed_nodes.iter().chain(processed_nodes.iter()) {
nodes.push(n.clone())
}
let key = &self.key;
nodes.sort_by(asc_dist_order!(key));
nodes.truncate(n);
nodes
}
pub fn add_nodes(&self, node_list: Vec<Node>) {
// wait for locks
let processed_nodes = self.processed_nodes.lock().unwrap();
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let &mut(ref mut unprocessed_nodes, _) = &mut *pair;
// add nodes
let iter = node_list.iter().filter(|n|!processed_nodes.contains(n));
for n in iter {
unprocessed_nodes.push(n.clone());
}
// sort nodes
let key = &*self.key;
unprocessed_nodes.sort_by(asc_dist_order!(key));
unprocessed_nodes.dedup();
unprocessed_nodes.truncate(self.count);
// done
cvar.notify_all();
}
pub fn add_node(&self, node: Node) {
// wait for locks
let processed_nodes = self.processed_nodes.lock().unwrap();
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let &mut(ref mut unprocessed_nodes, _) = &mut *pair;
// add nodes
if!processed_nodes.contains(&node) {
unprocessed_nodes.push(node);
}
// sort nodes
let key = &*self.key;
unprocessed_nodes.sort_by(asc_dist_order!(key));
unprocessed_nodes.dedup();
unprocessed_nodes.truncate(self.count);
// done
cvar.notify_all();
}
#[allow(dead_code)]
pub fn recv_nodes(&self, rx: Receiver<Vec<Node>>) {
// wait for lock
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
// increment receiver count
let &mut (_, ref mut pending_receivers) = &mut *pair;
*pending_receivers += 1;
cvar.notify_all();
let this = self.clone();
spawn(move || {
for addr_list in rx {
this.add_nodes(addr_list);
}
// wait for lock
let &(ref lock, ref cvar) = &*this.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
// decrement receiver count
let &mut (_, ref mut pending_receivers) = &mut *pair;
*pending_receivers -= 1;
cvar.notify_all();
});
}
}
impl Iterator for ClosestNodesIter {
type Item = Node;
fn
|
(&mut self) -> Option<Self::Item> {
let key = &*self.key;
loop {
// wait for lock
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let mut unprocessed_nodes = pair.0.len();
let mut pending_receivers = pair.1;
// either we have unprocessed_nodes or we wait for pending_receviers
while unprocessed_nodes == 0 && pending_receivers > 0 {
pair = cvar.wait(pair).unwrap();
unprocessed_nodes = pair.0.len();
pending_receivers = pair.1;
}
let mut processed_nodes = self.processed_nodes.lock().unwrap();
processed_nodes.sort_by(asc_dist_order!(key));
processed_nodes.dedup();
let &mut (ref mut unprocessed_nodes, _) = &mut *pair;
unprocessed_nodes.sort_by(desc_dist_order!(key));
unprocessed_nodes.dedup();
let closest_dist = processed_nodes.get(self.count-1).map(|n| n.dist(key));
debug!("Processed: {}/{}", processed_nodes.len(), processed_nodes.len() + unprocessed_nodes.len());
match unprocessed_nodes.pop() {
None => return None,
Some(node) => {
processed_nodes.push(node.clone());
if closest_dist.map(|dist| node.dist(key) >= dist).unwrap_or(false)
{
/*
* The node is not closer than the <count>th most distant
* node we already asked.
* Let's see if we will receive another node that is closer.
*/
debug!("looking for a closer node");
continue
}
return Some(node)
}
}
}
}
}
#[test]
fn empty() {
let key = [0; NODEID_BYTELEN];
let mut iter = ClosestNodesIter::new(key, 10, vec![]);
assert_eq!(iter.next(), None);
}
#[test]
fn clone() {
let key = [0; NODEID_BYTELEN];
let node = Node::new("127.0.0.1:2134", [0x00; NODEID_BYTELEN]).unwrap();
let mut iter1 = ClosestNodesIter::new(key, 10, vec![node.clone()]);
let mut iter2 = iter1.clone();
assert_eq!(iter2.next(), Some(node));
assert_eq!(iter1.next(), None);
assert_eq!(iter2.next(), None);
}
#[test]
fn order() {
for count in 2..4 {
let key = [0; NODEID_BYTELEN];
let node0xff = Node::new("127.0.0.1:2134", [0xff; NODEID_BYTELEN]).unwrap();
let mut iter = ClosestNodesIter::new(key, count, vec![node0xff.clone()]);
let node0x77 = Node::new("127.0.0.1:2134", [0x77; NODEID_BYTELEN]).unwrap();
iter.clone().add_nodes(vec![node0x77.clone()]);
assert_eq!(iter.next(), Some(node0x77));
let node0x00 = Node::new("127.0.0.1:2134", [0x00; NODEID_BYTELEN]).unwrap();
iter.clone().add_nodes(vec![node0x00.clone()]);
assert_eq!(iter.next(), Some(node0x00));
if count == 3 {
assert_eq!(iter.next(), Some(node0xff));
}
assert_eq!(iter.next(), None);
}
}
|
next
|
identifier_name
|
closest_nodes_iter.rs
|
use std::sync::{Arc, Mutex, Condvar};
use std::sync::mpsc::Receiver;
use std::thread::spawn;
use node::{Node, NodeId};
#[cfg(test)]
use node::NODEID_BYTELEN;
#[derive(Clone)]
pub struct ClosestNodesIter {
key: Arc<NodeId>,
count: usize, // ask at least <count> nodes
processed_nodes: Arc<Mutex<Vec<Node>>>,
unprocessed_nodes: Arc<(Mutex<(Vec<Node>, usize)>, Condvar)>,
}
impl ClosestNodesIter {
pub fn new(key: NodeId, count: usize, node_list: Vec<Node>) -> ClosestNodesIter {
let this = ClosestNodesIter {
key: Arc::new(key),
count: count,
processed_nodes: Arc::new(Mutex::new(vec![])),
unprocessed_nodes: Arc::new((Mutex::new((vec![], 0)), Condvar::new())),
};
this.add_nodes(node_list);
this
}
#[allow(dead_code)]
pub fn get_closest_nodes(&self, n: usize) -> Vec<Node> {
let processed_nodes = self.processed_nodes.lock().unwrap();
let &(ref lock, _) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let &mut(ref mut unprocessed_nodes, _) = &mut *pair;
let mut nodes = vec![];
for n in unprocessed_nodes.iter().chain(processed_nodes.iter()) {
nodes.push(n.clone())
}
let key = &self.key;
nodes.sort_by(asc_dist_order!(key));
nodes.truncate(n);
nodes
}
pub fn add_nodes(&self, node_list: Vec<Node>) {
// wait for locks
let processed_nodes = self.processed_nodes.lock().unwrap();
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let &mut(ref mut unprocessed_nodes, _) = &mut *pair;
// add nodes
let iter = node_list.iter().filter(|n|!processed_nodes.contains(n));
for n in iter {
unprocessed_nodes.push(n.clone());
}
// sort nodes
let key = &*self.key;
unprocessed_nodes.sort_by(asc_dist_order!(key));
unprocessed_nodes.dedup();
unprocessed_nodes.truncate(self.count);
// done
cvar.notify_all();
}
pub fn add_node(&self, node: Node) {
// wait for locks
let processed_nodes = self.processed_nodes.lock().unwrap();
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let &mut(ref mut unprocessed_nodes, _) = &mut *pair;
// add nodes
if!processed_nodes.contains(&node) {
unprocessed_nodes.push(node);
}
// sort nodes
let key = &*self.key;
unprocessed_nodes.sort_by(asc_dist_order!(key));
unprocessed_nodes.dedup();
unprocessed_nodes.truncate(self.count);
// done
cvar.notify_all();
}
#[allow(dead_code)]
pub fn recv_nodes(&self, rx: Receiver<Vec<Node>>) {
// wait for lock
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
// increment receiver count
let &mut (_, ref mut pending_receivers) = &mut *pair;
*pending_receivers += 1;
cvar.notify_all();
let this = self.clone();
spawn(move || {
for addr_list in rx {
this.add_nodes(addr_list);
}
// wait for lock
let &(ref lock, ref cvar) = &*this.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
// decrement receiver count
let &mut (_, ref mut pending_receivers) = &mut *pair;
*pending_receivers -= 1;
cvar.notify_all();
});
|
}
}
impl Iterator for ClosestNodesIter {
type Item = Node;
fn next(&mut self) -> Option<Self::Item> {
let key = &*self.key;
loop {
// wait for lock
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let mut unprocessed_nodes = pair.0.len();
let mut pending_receivers = pair.1;
// either we have unprocessed_nodes or we wait for pending_receviers
while unprocessed_nodes == 0 && pending_receivers > 0 {
pair = cvar.wait(pair).unwrap();
unprocessed_nodes = pair.0.len();
pending_receivers = pair.1;
}
let mut processed_nodes = self.processed_nodes.lock().unwrap();
processed_nodes.sort_by(asc_dist_order!(key));
processed_nodes.dedup();
let &mut (ref mut unprocessed_nodes, _) = &mut *pair;
unprocessed_nodes.sort_by(desc_dist_order!(key));
unprocessed_nodes.dedup();
let closest_dist = processed_nodes.get(self.count-1).map(|n| n.dist(key));
debug!("Processed: {}/{}", processed_nodes.len(), processed_nodes.len() + unprocessed_nodes.len());
match unprocessed_nodes.pop() {
None => return None,
Some(node) => {
processed_nodes.push(node.clone());
if closest_dist.map(|dist| node.dist(key) >= dist).unwrap_or(false)
{
/*
* The node is not closer than the <count>th most distant
* node we already asked.
* Let's see if we will receive another node that is closer.
*/
debug!("looking for a closer node");
continue
}
return Some(node)
}
}
}
}
}
#[test]
fn empty() {
let key = [0; NODEID_BYTELEN];
let mut iter = ClosestNodesIter::new(key, 10, vec![]);
assert_eq!(iter.next(), None);
}
#[test]
fn clone() {
let key = [0; NODEID_BYTELEN];
let node = Node::new("127.0.0.1:2134", [0x00; NODEID_BYTELEN]).unwrap();
let mut iter1 = ClosestNodesIter::new(key, 10, vec![node.clone()]);
let mut iter2 = iter1.clone();
assert_eq!(iter2.next(), Some(node));
assert_eq!(iter1.next(), None);
assert_eq!(iter2.next(), None);
}
#[test]
fn order() {
for count in 2..4 {
let key = [0; NODEID_BYTELEN];
let node0xff = Node::new("127.0.0.1:2134", [0xff; NODEID_BYTELEN]).unwrap();
let mut iter = ClosestNodesIter::new(key, count, vec![node0xff.clone()]);
let node0x77 = Node::new("127.0.0.1:2134", [0x77; NODEID_BYTELEN]).unwrap();
iter.clone().add_nodes(vec![node0x77.clone()]);
assert_eq!(iter.next(), Some(node0x77));
let node0x00 = Node::new("127.0.0.1:2134", [0x00; NODEID_BYTELEN]).unwrap();
iter.clone().add_nodes(vec![node0x00.clone()]);
assert_eq!(iter.next(), Some(node0x00));
if count == 3 {
assert_eq!(iter.next(), Some(node0xff));
}
assert_eq!(iter.next(), None);
}
}
|
random_line_split
|
|
closest_nodes_iter.rs
|
use std::sync::{Arc, Mutex, Condvar};
use std::sync::mpsc::Receiver;
use std::thread::spawn;
use node::{Node, NodeId};
#[cfg(test)]
use node::NODEID_BYTELEN;
#[derive(Clone)]
pub struct ClosestNodesIter {
key: Arc<NodeId>,
count: usize, // ask at least <count> nodes
processed_nodes: Arc<Mutex<Vec<Node>>>,
unprocessed_nodes: Arc<(Mutex<(Vec<Node>, usize)>, Condvar)>,
}
impl ClosestNodesIter {
pub fn new(key: NodeId, count: usize, node_list: Vec<Node>) -> ClosestNodesIter {
let this = ClosestNodesIter {
key: Arc::new(key),
count: count,
processed_nodes: Arc::new(Mutex::new(vec![])),
unprocessed_nodes: Arc::new((Mutex::new((vec![], 0)), Condvar::new())),
};
this.add_nodes(node_list);
this
}
#[allow(dead_code)]
pub fn get_closest_nodes(&self, n: usize) -> Vec<Node> {
let processed_nodes = self.processed_nodes.lock().unwrap();
let &(ref lock, _) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let &mut(ref mut unprocessed_nodes, _) = &mut *pair;
let mut nodes = vec![];
for n in unprocessed_nodes.iter().chain(processed_nodes.iter()) {
nodes.push(n.clone())
}
let key = &self.key;
nodes.sort_by(asc_dist_order!(key));
nodes.truncate(n);
nodes
}
pub fn add_nodes(&self, node_list: Vec<Node>) {
// wait for locks
let processed_nodes = self.processed_nodes.lock().unwrap();
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let &mut(ref mut unprocessed_nodes, _) = &mut *pair;
// add nodes
let iter = node_list.iter().filter(|n|!processed_nodes.contains(n));
for n in iter {
unprocessed_nodes.push(n.clone());
}
// sort nodes
let key = &*self.key;
unprocessed_nodes.sort_by(asc_dist_order!(key));
unprocessed_nodes.dedup();
unprocessed_nodes.truncate(self.count);
// done
cvar.notify_all();
}
pub fn add_node(&self, node: Node) {
// wait for locks
let processed_nodes = self.processed_nodes.lock().unwrap();
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let &mut(ref mut unprocessed_nodes, _) = &mut *pair;
// add nodes
if!processed_nodes.contains(&node) {
unprocessed_nodes.push(node);
}
// sort nodes
let key = &*self.key;
unprocessed_nodes.sort_by(asc_dist_order!(key));
unprocessed_nodes.dedup();
unprocessed_nodes.truncate(self.count);
// done
cvar.notify_all();
}
#[allow(dead_code)]
pub fn recv_nodes(&self, rx: Receiver<Vec<Node>>) {
// wait for lock
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
// increment receiver count
let &mut (_, ref mut pending_receivers) = &mut *pair;
*pending_receivers += 1;
cvar.notify_all();
let this = self.clone();
spawn(move || {
for addr_list in rx {
this.add_nodes(addr_list);
}
// wait for lock
let &(ref lock, ref cvar) = &*this.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
// decrement receiver count
let &mut (_, ref mut pending_receivers) = &mut *pair;
*pending_receivers -= 1;
cvar.notify_all();
});
}
}
impl Iterator for ClosestNodesIter {
type Item = Node;
fn next(&mut self) -> Option<Self::Item> {
let key = &*self.key;
loop {
// wait for lock
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let mut unprocessed_nodes = pair.0.len();
let mut pending_receivers = pair.1;
// either we have unprocessed_nodes or we wait for pending_receviers
while unprocessed_nodes == 0 && pending_receivers > 0 {
pair = cvar.wait(pair).unwrap();
unprocessed_nodes = pair.0.len();
pending_receivers = pair.1;
}
let mut processed_nodes = self.processed_nodes.lock().unwrap();
processed_nodes.sort_by(asc_dist_order!(key));
processed_nodes.dedup();
let &mut (ref mut unprocessed_nodes, _) = &mut *pair;
unprocessed_nodes.sort_by(desc_dist_order!(key));
unprocessed_nodes.dedup();
let closest_dist = processed_nodes.get(self.count-1).map(|n| n.dist(key));
debug!("Processed: {}/{}", processed_nodes.len(), processed_nodes.len() + unprocessed_nodes.len());
match unprocessed_nodes.pop() {
None => return None,
Some(node) => {
processed_nodes.push(node.clone());
if closest_dist.map(|dist| node.dist(key) >= dist).unwrap_or(false)
{
/*
* The node is not closer than the <count>th most distant
* node we already asked.
* Let's see if we will receive another node that is closer.
*/
debug!("looking for a closer node");
continue
}
return Some(node)
}
}
}
}
}
#[test]
fn empty() {
let key = [0; NODEID_BYTELEN];
let mut iter = ClosestNodesIter::new(key, 10, vec![]);
assert_eq!(iter.next(), None);
}
#[test]
fn clone() {
let key = [0; NODEID_BYTELEN];
let node = Node::new("127.0.0.1:2134", [0x00; NODEID_BYTELEN]).unwrap();
let mut iter1 = ClosestNodesIter::new(key, 10, vec![node.clone()]);
let mut iter2 = iter1.clone();
assert_eq!(iter2.next(), Some(node));
assert_eq!(iter1.next(), None);
assert_eq!(iter2.next(), None);
}
#[test]
fn order() {
for count in 2..4 {
let key = [0; NODEID_BYTELEN];
let node0xff = Node::new("127.0.0.1:2134", [0xff; NODEID_BYTELEN]).unwrap();
let mut iter = ClosestNodesIter::new(key, count, vec![node0xff.clone()]);
let node0x77 = Node::new("127.0.0.1:2134", [0x77; NODEID_BYTELEN]).unwrap();
iter.clone().add_nodes(vec![node0x77.clone()]);
assert_eq!(iter.next(), Some(node0x77));
let node0x00 = Node::new("127.0.0.1:2134", [0x00; NODEID_BYTELEN]).unwrap();
iter.clone().add_nodes(vec![node0x00.clone()]);
assert_eq!(iter.next(), Some(node0x00));
if count == 3
|
assert_eq!(iter.next(), None);
}
}
|
{
assert_eq!(iter.next(), Some(node0xff));
}
|
conditional_block
|
closest_nodes_iter.rs
|
use std::sync::{Arc, Mutex, Condvar};
use std::sync::mpsc::Receiver;
use std::thread::spawn;
use node::{Node, NodeId};
#[cfg(test)]
use node::NODEID_BYTELEN;
#[derive(Clone)]
pub struct ClosestNodesIter {
key: Arc<NodeId>,
count: usize, // ask at least <count> nodes
processed_nodes: Arc<Mutex<Vec<Node>>>,
unprocessed_nodes: Arc<(Mutex<(Vec<Node>, usize)>, Condvar)>,
}
impl ClosestNodesIter {
pub fn new(key: NodeId, count: usize, node_list: Vec<Node>) -> ClosestNodesIter {
let this = ClosestNodesIter {
key: Arc::new(key),
count: count,
processed_nodes: Arc::new(Mutex::new(vec![])),
unprocessed_nodes: Arc::new((Mutex::new((vec![], 0)), Condvar::new())),
};
this.add_nodes(node_list);
this
}
#[allow(dead_code)]
pub fn get_closest_nodes(&self, n: usize) -> Vec<Node> {
let processed_nodes = self.processed_nodes.lock().unwrap();
let &(ref lock, _) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let &mut(ref mut unprocessed_nodes, _) = &mut *pair;
let mut nodes = vec![];
for n in unprocessed_nodes.iter().chain(processed_nodes.iter()) {
nodes.push(n.clone())
}
let key = &self.key;
nodes.sort_by(asc_dist_order!(key));
nodes.truncate(n);
nodes
}
pub fn add_nodes(&self, node_list: Vec<Node>) {
// wait for locks
let processed_nodes = self.processed_nodes.lock().unwrap();
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let &mut(ref mut unprocessed_nodes, _) = &mut *pair;
// add nodes
let iter = node_list.iter().filter(|n|!processed_nodes.contains(n));
for n in iter {
unprocessed_nodes.push(n.clone());
}
// sort nodes
let key = &*self.key;
unprocessed_nodes.sort_by(asc_dist_order!(key));
unprocessed_nodes.dedup();
unprocessed_nodes.truncate(self.count);
// done
cvar.notify_all();
}
pub fn add_node(&self, node: Node) {
// wait for locks
let processed_nodes = self.processed_nodes.lock().unwrap();
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let &mut(ref mut unprocessed_nodes, _) = &mut *pair;
// add nodes
if!processed_nodes.contains(&node) {
unprocessed_nodes.push(node);
}
// sort nodes
let key = &*self.key;
unprocessed_nodes.sort_by(asc_dist_order!(key));
unprocessed_nodes.dedup();
unprocessed_nodes.truncate(self.count);
// done
cvar.notify_all();
}
#[allow(dead_code)]
pub fn recv_nodes(&self, rx: Receiver<Vec<Node>>) {
// wait for lock
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
// increment receiver count
let &mut (_, ref mut pending_receivers) = &mut *pair;
*pending_receivers += 1;
cvar.notify_all();
let this = self.clone();
spawn(move || {
for addr_list in rx {
this.add_nodes(addr_list);
}
// wait for lock
let &(ref lock, ref cvar) = &*this.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
// decrement receiver count
let &mut (_, ref mut pending_receivers) = &mut *pair;
*pending_receivers -= 1;
cvar.notify_all();
});
}
}
impl Iterator for ClosestNodesIter {
type Item = Node;
fn next(&mut self) -> Option<Self::Item> {
let key = &*self.key;
loop {
// wait for lock
let &(ref lock, ref cvar) = &*self.unprocessed_nodes;
let mut pair = lock.lock().unwrap();
let mut unprocessed_nodes = pair.0.len();
let mut pending_receivers = pair.1;
// either we have unprocessed_nodes or we wait for pending_receviers
while unprocessed_nodes == 0 && pending_receivers > 0 {
pair = cvar.wait(pair).unwrap();
unprocessed_nodes = pair.0.len();
pending_receivers = pair.1;
}
let mut processed_nodes = self.processed_nodes.lock().unwrap();
processed_nodes.sort_by(asc_dist_order!(key));
processed_nodes.dedup();
let &mut (ref mut unprocessed_nodes, _) = &mut *pair;
unprocessed_nodes.sort_by(desc_dist_order!(key));
unprocessed_nodes.dedup();
let closest_dist = processed_nodes.get(self.count-1).map(|n| n.dist(key));
debug!("Processed: {}/{}", processed_nodes.len(), processed_nodes.len() + unprocessed_nodes.len());
match unprocessed_nodes.pop() {
None => return None,
Some(node) => {
processed_nodes.push(node.clone());
if closest_dist.map(|dist| node.dist(key) >= dist).unwrap_or(false)
{
/*
* The node is not closer than the <count>th most distant
* node we already asked.
* Let's see if we will receive another node that is closer.
*/
debug!("looking for a closer node");
continue
}
return Some(node)
}
}
}
}
}
#[test]
fn empty()
|
#[test]
fn clone() {
let key = [0; NODEID_BYTELEN];
let node = Node::new("127.0.0.1:2134", [0x00; NODEID_BYTELEN]).unwrap();
let mut iter1 = ClosestNodesIter::new(key, 10, vec![node.clone()]);
let mut iter2 = iter1.clone();
assert_eq!(iter2.next(), Some(node));
assert_eq!(iter1.next(), None);
assert_eq!(iter2.next(), None);
}
#[test]
fn order() {
for count in 2..4 {
let key = [0; NODEID_BYTELEN];
let node0xff = Node::new("127.0.0.1:2134", [0xff; NODEID_BYTELEN]).unwrap();
let mut iter = ClosestNodesIter::new(key, count, vec![node0xff.clone()]);
let node0x77 = Node::new("127.0.0.1:2134", [0x77; NODEID_BYTELEN]).unwrap();
iter.clone().add_nodes(vec![node0x77.clone()]);
assert_eq!(iter.next(), Some(node0x77));
let node0x00 = Node::new("127.0.0.1:2134", [0x00; NODEID_BYTELEN]).unwrap();
iter.clone().add_nodes(vec![node0x00.clone()]);
assert_eq!(iter.next(), Some(node0x00));
if count == 3 {
assert_eq!(iter.next(), Some(node0xff));
}
assert_eq!(iter.next(), None);
}
}
|
{
let key = [0; NODEID_BYTELEN];
let mut iter = ClosestNodesIter::new(key, 10, vec![]);
assert_eq!(iter.next(), None);
}
|
identifier_body
|
rtl8139.rs
|
use alloc::boxed::Box;
use arch::memory;
use collections::slice;
use collections::string::ToString;
use collections::vec::Vec;
use collections::vec_deque::VecDeque;
use core::ptr;
use common::debug;
use drivers::pci::config::PciConfig;
use drivers::io::{Io, Pio};
use network::common::*;
use network::scheme::*;
use fs::{KScheme, Resource, Url};
use syscall::Result;
use sync::Intex;
const RTL8139_TSR_OWN: u32 = 1 << 13;
const RTL8139_CR_RST: u8 = 1 << 4;
const RTL8139_CR_RE: u8 = 1 << 3;
const RTL8139_CR_TE: u8 = 1 << 2;
const RTL8139_CR_BUFE: u8 = 1 << 0;
const RTL8139_ISR_SERR: u16 = 1 << 15;
const RTL8139_ISR_TIMEOUT: u16 = 1 << 14;
const RTL8139_ISR_LENCHG: u16 = 1 << 13;
const RTL8139_ISR_FOVW: u16 = 1 << 6;
const RTL8139_ISR_PUN_LINKCHG: u16 = 1 << 5;
const RTL8139_ISR_RXOVW: u16 = 1 << 4;
const RTL8139_ISR_TER: u16 = 1 << 3;
const RTL8139_ISR_TOK: u16 = 1 << 2;
const RTL8139_ISR_RER: u16 = 1 << 1;
const RTL8139_ISR_ROK: u16 = 1 << 0;
const RTL8139_TCR_IFG: u32 = 0b11 << 24;
const RTL8139_RCR_WRAP: u32 = 1 << 7;
const RTL8139_RCR_AR: u32 = 1 << 4;
const RTL8139_RCR_AB: u32 = 1 << 3;
const RTL8139_RCR_AM: u32 = 1 << 2;
const RTL8139_RCR_APM: u32 = 1 << 1;
#[repr(packed)]
struct Txd {
pub address_port: Pio<u32>,
pub status_port: Pio<u32>,
pub buffer: usize,
}
pub struct Rtl8139Port {
pub idr: [Pio<u8>; 6],
pub rbstart: Pio<u32>,
pub cr: Pio<u8>,
pub capr: Pio<u16>,
pub cbr: Pio<u16>,
pub imr: Pio<u16>,
pub isr: Pio<u16>,
pub tcr: Pio<u32>,
pub rcr: Pio<u32>,
pub config1: Pio<u8>,
}
impl Rtl8139Port {
pub fn new(base: u16) -> Self {
return Rtl8139Port {
idr: [Pio::<u8>::new(base + 0x00),
Pio::<u8>::new(base + 0x01),
Pio::<u8>::new(base + 0x02),
Pio::<u8>::new(base + 0x03),
Pio::<u8>::new(base + 0x04),
Pio::<u8>::new(base + 0x05)],
rbstart: Pio::<u32>::new(base + 0x30),
cr: Pio::<u8>::new(base + 0x37),
capr: Pio::<u16>::new(base + 0x38),
cbr: Pio::<u16>::new(base + 0x3A),
imr: Pio::<u16>::new(base + 0x3C),
isr: Pio::<u16>::new(base + 0x3E),
tcr: Pio::<u32>::new(base + 0x40),
rcr: Pio::<u32>::new(base + 0x44),
config1: Pio::<u8>::new(base + 0x52),
};
}
|
pub struct Rtl8139 {
pci: PciConfig,
base: usize,
memory_mapped: bool,
irq: u8,
resources: Intex<Vec<*mut NetworkResource>>,
inbound: VecDeque<Vec<u8>>,
outbound: VecDeque<Vec<u8>>,
txds: Vec<Txd>,
txd_i: usize,
port: Rtl8139Port,
}
impl Rtl8139 {
pub fn new(mut pci: PciConfig) -> Box<Self> {
let pci_id = unsafe { pci.read(0x00) };
let revision = (unsafe { pci.read(0x08) } & 0xFF) as u8;
if pci_id == 0x813910EC && revision < 0x20 {
debugln!("Not an 8139C+ compatible chip")
}
let base = unsafe { pci.read(0x10) as usize };
let irq = unsafe { pci.read(0x3C) as u8 & 0xF };
let mut module = box Rtl8139 {
pci: pci,
base: base & 0xFFFFFFF0,
memory_mapped: base & 1 == 0,
irq: irq,
resources: Intex::new(Vec::new()),
inbound: VecDeque::new(),
outbound: VecDeque::new(),
txds: Vec::new(),
txd_i: 0,
port: Rtl8139Port::new((base & 0xFFFFFFF0) as u16),
};
unsafe { module.init() };
module
}
unsafe fn init(&mut self) {
debug::d("RTL8139 on: ");
debug::dh(self.base);
if self.memory_mapped {
debug::d(" memory mapped");
} else {
debug::d(" port mapped");
}
debug::d(" IRQ: ");
debug::dbh(self.irq);
self.pci.flag(4, 4, true); // Bus mastering
let base = self.base as u16;
self.port.config1.write(0);
self.port.cr.write(RTL8139_CR_RST);
while self.port.cr.read() & RTL8139_CR_RST!= 0 {}
debug::d(" MAC: ");
MAC_ADDR = MacAddr {
bytes: [self.port.idr[0].read(),
self.port.idr[1].read(),
self.port.idr[2].read(),
self.port.idr[3].read(),
self.port.idr[4].read(),
self.port.idr[5].read()],
};
debug::d(&MAC_ADDR.to_string());
let receive_buffer = memory::alloc(10240);
self.port.rbstart.write(receive_buffer as u32);
for i in 0..4 {
self.txds.push(Txd {
address_port: Pio::<u32>::new(base + 0x20 + (i as u16) * 4),
status_port: Pio::<u32>::new(base + 0x10 + (i as u16) * 4),
buffer: memory::alloc(4096),
});
}
self.port.imr.write(RTL8139_ISR_TOK | RTL8139_ISR_ROK);
debug::d(" IMR: ");
debug::dh(self.port.imr.read() as usize);
self.port.cr.write(RTL8139_CR_RE | RTL8139_CR_TE);
debug::d(" CMD: ");
debug::dbh(self.port.cr.read());
self.port.rcr.write(RTL8139_RCR_WRAP | RTL8139_RCR_AR | RTL8139_RCR_AB | RTL8139_RCR_AM |
RTL8139_RCR_APM);
debug::d(" RCR: ");
debug::dh(self.port.rcr.read() as usize);
self.port.tcr.writef(RTL8139_TCR_IFG, true);
debug::d(" TCR: ");
debug::dh(self.port.tcr.read() as usize);
debug::dl();
}
unsafe fn receive_inbound(&mut self) {
let receive_buffer = self.port.rbstart.read() as usize;
let mut capr = (self.port.capr.read() + 16) as usize;
let cbr = self.port.cbr.read() as usize;
while capr!= cbr {
let frame_addr = receive_buffer + capr + 4;
let frame_status = ptr::read((receive_buffer + capr) as *const u16) as usize;
let frame_len = ptr::read((receive_buffer + capr + 2) as *const u16) as usize;
debug::d("Recv ");
debug::dh(capr as usize);
debug::d(" ");
debug::dh(frame_status);
debug::d(" ");
debug::dh(frame_addr);
debug::d(" ");
debug::dh(frame_len);
debug::dl();
self.inbound.push_back(Vec::from(slice::from_raw_parts(frame_addr as *const u8, frame_len - 4)));
capr = capr + frame_len + 4;
capr = (capr + 3) & (0xFFFFFFFF - 3);
if capr >= 8192 {
capr -= 8192
}
self.port.capr.write((capr as u16) - 16);
}
}
unsafe fn send_outbound(&mut self) {
while let Some(bytes) = self.outbound.pop_front() {
if let Some(ref mut txd) = self.txds.get_mut(self.txd_i) {
if bytes.len() < 4096 {
while!txd.status_port.readf(RTL8139_TSR_OWN) {}
debug::d("Send ");
debug::dh(self.txd_i as usize);
debug::d(" ");
debug::dh(txd.status_port.read() as usize);
debug::d(" ");
debug::dh(txd.buffer);
debug::d(" ");
debug::dh(bytes.len() & 0xFFF);
debug::dl();
::memcpy(txd.buffer as *mut u8, bytes.as_ptr(), bytes.len());
txd.address_port.write(txd.buffer as u32);
txd.status_port.write(bytes.len() as u32 & 0xFFF);
self.txd_i = (self.txd_i + 1) % 4;
} else {
debug::dl();
debug::d("RTL8139: Frame too long for transmit: ");
debug::dd(bytes.len());
debug::dl();
}
} else {
debug::d("RTL8139: TXD Overflow!\n");
self.txd_i = 0;
}
}
}
}
impl KScheme for Rtl8139 {
fn scheme(&self) -> &str {
"network"
}
fn open(&mut self, _: Url, _: usize) -> Result<Box<Resource>> {
Ok(NetworkResource::new(self))
}
fn on_irq(&mut self, irq: u8) {
if irq == self.irq {
let isr = self.port.isr.read();
self.port.isr.write(isr);
// dh(isr as usize);
// dl();
self.sync();
}
}
}
impl NetworkScheme for Rtl8139 {
fn add(&mut self, resource: *mut NetworkResource) {
self.resources.lock().push(resource);
}
fn remove(&mut self, resource: *mut NetworkResource) {
let mut resources = self.resources.lock();
let mut i = 0;
while i < resources.len() {
let mut remove = false;
match resources.get(i) {
Some(ptr) => if *ptr == resource {
remove = true;
} else {
i += 1;
},
None => break,
}
if remove {
resources.remove(i);
}
}
}
fn sync(&mut self) {
unsafe {
{
let resources = self.resources.lock();
for resource in resources.iter() {
while let Some(bytes) = (**resource).outbound.lock().pop_front() {
self.outbound.push_back(bytes);
}
}
}
self.send_outbound();
self.receive_inbound();
{
let resources = self.resources.lock();
while let Some(bytes) = self.inbound.pop_front() {
for resource in resources.iter() {
(**resource).inbound.lock().push_back(bytes.clone());
}
}
}
}
}
}
|
}
|
random_line_split
|
rtl8139.rs
|
use alloc::boxed::Box;
use arch::memory;
use collections::slice;
use collections::string::ToString;
use collections::vec::Vec;
use collections::vec_deque::VecDeque;
use core::ptr;
use common::debug;
use drivers::pci::config::PciConfig;
use drivers::io::{Io, Pio};
use network::common::*;
use network::scheme::*;
use fs::{KScheme, Resource, Url};
use syscall::Result;
use sync::Intex;
const RTL8139_TSR_OWN: u32 = 1 << 13;
const RTL8139_CR_RST: u8 = 1 << 4;
const RTL8139_CR_RE: u8 = 1 << 3;
const RTL8139_CR_TE: u8 = 1 << 2;
const RTL8139_CR_BUFE: u8 = 1 << 0;
const RTL8139_ISR_SERR: u16 = 1 << 15;
const RTL8139_ISR_TIMEOUT: u16 = 1 << 14;
const RTL8139_ISR_LENCHG: u16 = 1 << 13;
const RTL8139_ISR_FOVW: u16 = 1 << 6;
const RTL8139_ISR_PUN_LINKCHG: u16 = 1 << 5;
const RTL8139_ISR_RXOVW: u16 = 1 << 4;
const RTL8139_ISR_TER: u16 = 1 << 3;
const RTL8139_ISR_TOK: u16 = 1 << 2;
const RTL8139_ISR_RER: u16 = 1 << 1;
const RTL8139_ISR_ROK: u16 = 1 << 0;
const RTL8139_TCR_IFG: u32 = 0b11 << 24;
const RTL8139_RCR_WRAP: u32 = 1 << 7;
const RTL8139_RCR_AR: u32 = 1 << 4;
const RTL8139_RCR_AB: u32 = 1 << 3;
const RTL8139_RCR_AM: u32 = 1 << 2;
const RTL8139_RCR_APM: u32 = 1 << 1;
#[repr(packed)]
struct Txd {
pub address_port: Pio<u32>,
pub status_port: Pio<u32>,
pub buffer: usize,
}
pub struct Rtl8139Port {
pub idr: [Pio<u8>; 6],
pub rbstart: Pio<u32>,
pub cr: Pio<u8>,
pub capr: Pio<u16>,
pub cbr: Pio<u16>,
pub imr: Pio<u16>,
pub isr: Pio<u16>,
pub tcr: Pio<u32>,
pub rcr: Pio<u32>,
pub config1: Pio<u8>,
}
impl Rtl8139Port {
pub fn new(base: u16) -> Self {
return Rtl8139Port {
idr: [Pio::<u8>::new(base + 0x00),
Pio::<u8>::new(base + 0x01),
Pio::<u8>::new(base + 0x02),
Pio::<u8>::new(base + 0x03),
Pio::<u8>::new(base + 0x04),
Pio::<u8>::new(base + 0x05)],
rbstart: Pio::<u32>::new(base + 0x30),
cr: Pio::<u8>::new(base + 0x37),
capr: Pio::<u16>::new(base + 0x38),
cbr: Pio::<u16>::new(base + 0x3A),
imr: Pio::<u16>::new(base + 0x3C),
isr: Pio::<u16>::new(base + 0x3E),
tcr: Pio::<u32>::new(base + 0x40),
rcr: Pio::<u32>::new(base + 0x44),
config1: Pio::<u8>::new(base + 0x52),
};
}
}
pub struct Rtl8139 {
pci: PciConfig,
base: usize,
memory_mapped: bool,
irq: u8,
resources: Intex<Vec<*mut NetworkResource>>,
inbound: VecDeque<Vec<u8>>,
outbound: VecDeque<Vec<u8>>,
txds: Vec<Txd>,
txd_i: usize,
port: Rtl8139Port,
}
impl Rtl8139 {
pub fn new(mut pci: PciConfig) -> Box<Self> {
let pci_id = unsafe { pci.read(0x00) };
let revision = (unsafe { pci.read(0x08) } & 0xFF) as u8;
if pci_id == 0x813910EC && revision < 0x20 {
debugln!("Not an 8139C+ compatible chip")
}
let base = unsafe { pci.read(0x10) as usize };
let irq = unsafe { pci.read(0x3C) as u8 & 0xF };
let mut module = box Rtl8139 {
pci: pci,
base: base & 0xFFFFFFF0,
memory_mapped: base & 1 == 0,
irq: irq,
resources: Intex::new(Vec::new()),
inbound: VecDeque::new(),
outbound: VecDeque::new(),
txds: Vec::new(),
txd_i: 0,
port: Rtl8139Port::new((base & 0xFFFFFFF0) as u16),
};
unsafe { module.init() };
module
}
unsafe fn init(&mut self) {
debug::d("RTL8139 on: ");
debug::dh(self.base);
if self.memory_mapped {
debug::d(" memory mapped");
} else {
debug::d(" port mapped");
}
debug::d(" IRQ: ");
debug::dbh(self.irq);
self.pci.flag(4, 4, true); // Bus mastering
let base = self.base as u16;
self.port.config1.write(0);
self.port.cr.write(RTL8139_CR_RST);
while self.port.cr.read() & RTL8139_CR_RST!= 0 {}
debug::d(" MAC: ");
MAC_ADDR = MacAddr {
bytes: [self.port.idr[0].read(),
self.port.idr[1].read(),
self.port.idr[2].read(),
self.port.idr[3].read(),
self.port.idr[4].read(),
self.port.idr[5].read()],
};
debug::d(&MAC_ADDR.to_string());
let receive_buffer = memory::alloc(10240);
self.port.rbstart.write(receive_buffer as u32);
for i in 0..4 {
self.txds.push(Txd {
address_port: Pio::<u32>::new(base + 0x20 + (i as u16) * 4),
status_port: Pio::<u32>::new(base + 0x10 + (i as u16) * 4),
buffer: memory::alloc(4096),
});
}
self.port.imr.write(RTL8139_ISR_TOK | RTL8139_ISR_ROK);
debug::d(" IMR: ");
debug::dh(self.port.imr.read() as usize);
self.port.cr.write(RTL8139_CR_RE | RTL8139_CR_TE);
debug::d(" CMD: ");
debug::dbh(self.port.cr.read());
self.port.rcr.write(RTL8139_RCR_WRAP | RTL8139_RCR_AR | RTL8139_RCR_AB | RTL8139_RCR_AM |
RTL8139_RCR_APM);
debug::d(" RCR: ");
debug::dh(self.port.rcr.read() as usize);
self.port.tcr.writef(RTL8139_TCR_IFG, true);
debug::d(" TCR: ");
debug::dh(self.port.tcr.read() as usize);
debug::dl();
}
unsafe fn receive_inbound(&mut self) {
let receive_buffer = self.port.rbstart.read() as usize;
let mut capr = (self.port.capr.read() + 16) as usize;
let cbr = self.port.cbr.read() as usize;
while capr!= cbr {
let frame_addr = receive_buffer + capr + 4;
let frame_status = ptr::read((receive_buffer + capr) as *const u16) as usize;
let frame_len = ptr::read((receive_buffer + capr + 2) as *const u16) as usize;
debug::d("Recv ");
debug::dh(capr as usize);
debug::d(" ");
debug::dh(frame_status);
debug::d(" ");
debug::dh(frame_addr);
debug::d(" ");
debug::dh(frame_len);
debug::dl();
self.inbound.push_back(Vec::from(slice::from_raw_parts(frame_addr as *const u8, frame_len - 4)));
capr = capr + frame_len + 4;
capr = (capr + 3) & (0xFFFFFFFF - 3);
if capr >= 8192 {
capr -= 8192
}
self.port.capr.write((capr as u16) - 16);
}
}
unsafe fn send_outbound(&mut self) {
while let Some(bytes) = self.outbound.pop_front() {
if let Some(ref mut txd) = self.txds.get_mut(self.txd_i) {
if bytes.len() < 4096 {
while!txd.status_port.readf(RTL8139_TSR_OWN) {}
debug::d("Send ");
debug::dh(self.txd_i as usize);
debug::d(" ");
debug::dh(txd.status_port.read() as usize);
debug::d(" ");
debug::dh(txd.buffer);
debug::d(" ");
debug::dh(bytes.len() & 0xFFF);
debug::dl();
::memcpy(txd.buffer as *mut u8, bytes.as_ptr(), bytes.len());
txd.address_port.write(txd.buffer as u32);
txd.status_port.write(bytes.len() as u32 & 0xFFF);
self.txd_i = (self.txd_i + 1) % 4;
} else {
debug::dl();
debug::d("RTL8139: Frame too long for transmit: ");
debug::dd(bytes.len());
debug::dl();
}
} else {
debug::d("RTL8139: TXD Overflow!\n");
self.txd_i = 0;
}
}
}
}
impl KScheme for Rtl8139 {
fn scheme(&self) -> &str {
"network"
}
fn
|
(&mut self, _: Url, _: usize) -> Result<Box<Resource>> {
Ok(NetworkResource::new(self))
}
fn on_irq(&mut self, irq: u8) {
if irq == self.irq {
let isr = self.port.isr.read();
self.port.isr.write(isr);
// dh(isr as usize);
// dl();
self.sync();
}
}
}
impl NetworkScheme for Rtl8139 {
fn add(&mut self, resource: *mut NetworkResource) {
self.resources.lock().push(resource);
}
fn remove(&mut self, resource: *mut NetworkResource) {
let mut resources = self.resources.lock();
let mut i = 0;
while i < resources.len() {
let mut remove = false;
match resources.get(i) {
Some(ptr) => if *ptr == resource {
remove = true;
} else {
i += 1;
},
None => break,
}
if remove {
resources.remove(i);
}
}
}
fn sync(&mut self) {
unsafe {
{
let resources = self.resources.lock();
for resource in resources.iter() {
while let Some(bytes) = (**resource).outbound.lock().pop_front() {
self.outbound.push_back(bytes);
}
}
}
self.send_outbound();
self.receive_inbound();
{
let resources = self.resources.lock();
while let Some(bytes) = self.inbound.pop_front() {
for resource in resources.iter() {
(**resource).inbound.lock().push_back(bytes.clone());
}
}
}
}
}
}
|
open
|
identifier_name
|
rtl8139.rs
|
use alloc::boxed::Box;
use arch::memory;
use collections::slice;
use collections::string::ToString;
use collections::vec::Vec;
use collections::vec_deque::VecDeque;
use core::ptr;
use common::debug;
use drivers::pci::config::PciConfig;
use drivers::io::{Io, Pio};
use network::common::*;
use network::scheme::*;
use fs::{KScheme, Resource, Url};
use syscall::Result;
use sync::Intex;
const RTL8139_TSR_OWN: u32 = 1 << 13;
const RTL8139_CR_RST: u8 = 1 << 4;
const RTL8139_CR_RE: u8 = 1 << 3;
const RTL8139_CR_TE: u8 = 1 << 2;
const RTL8139_CR_BUFE: u8 = 1 << 0;
const RTL8139_ISR_SERR: u16 = 1 << 15;
const RTL8139_ISR_TIMEOUT: u16 = 1 << 14;
const RTL8139_ISR_LENCHG: u16 = 1 << 13;
const RTL8139_ISR_FOVW: u16 = 1 << 6;
const RTL8139_ISR_PUN_LINKCHG: u16 = 1 << 5;
const RTL8139_ISR_RXOVW: u16 = 1 << 4;
const RTL8139_ISR_TER: u16 = 1 << 3;
const RTL8139_ISR_TOK: u16 = 1 << 2;
const RTL8139_ISR_RER: u16 = 1 << 1;
const RTL8139_ISR_ROK: u16 = 1 << 0;
const RTL8139_TCR_IFG: u32 = 0b11 << 24;
const RTL8139_RCR_WRAP: u32 = 1 << 7;
const RTL8139_RCR_AR: u32 = 1 << 4;
const RTL8139_RCR_AB: u32 = 1 << 3;
const RTL8139_RCR_AM: u32 = 1 << 2;
const RTL8139_RCR_APM: u32 = 1 << 1;
#[repr(packed)]
struct Txd {
pub address_port: Pio<u32>,
pub status_port: Pio<u32>,
pub buffer: usize,
}
pub struct Rtl8139Port {
pub idr: [Pio<u8>; 6],
pub rbstart: Pio<u32>,
pub cr: Pio<u8>,
pub capr: Pio<u16>,
pub cbr: Pio<u16>,
pub imr: Pio<u16>,
pub isr: Pio<u16>,
pub tcr: Pio<u32>,
pub rcr: Pio<u32>,
pub config1: Pio<u8>,
}
impl Rtl8139Port {
pub fn new(base: u16) -> Self {
return Rtl8139Port {
idr: [Pio::<u8>::new(base + 0x00),
Pio::<u8>::new(base + 0x01),
Pio::<u8>::new(base + 0x02),
Pio::<u8>::new(base + 0x03),
Pio::<u8>::new(base + 0x04),
Pio::<u8>::new(base + 0x05)],
rbstart: Pio::<u32>::new(base + 0x30),
cr: Pio::<u8>::new(base + 0x37),
capr: Pio::<u16>::new(base + 0x38),
cbr: Pio::<u16>::new(base + 0x3A),
imr: Pio::<u16>::new(base + 0x3C),
isr: Pio::<u16>::new(base + 0x3E),
tcr: Pio::<u32>::new(base + 0x40),
rcr: Pio::<u32>::new(base + 0x44),
config1: Pio::<u8>::new(base + 0x52),
};
}
}
pub struct Rtl8139 {
pci: PciConfig,
base: usize,
memory_mapped: bool,
irq: u8,
resources: Intex<Vec<*mut NetworkResource>>,
inbound: VecDeque<Vec<u8>>,
outbound: VecDeque<Vec<u8>>,
txds: Vec<Txd>,
txd_i: usize,
port: Rtl8139Port,
}
impl Rtl8139 {
pub fn new(mut pci: PciConfig) -> Box<Self> {
let pci_id = unsafe { pci.read(0x00) };
let revision = (unsafe { pci.read(0x08) } & 0xFF) as u8;
if pci_id == 0x813910EC && revision < 0x20 {
debugln!("Not an 8139C+ compatible chip")
}
let base = unsafe { pci.read(0x10) as usize };
let irq = unsafe { pci.read(0x3C) as u8 & 0xF };
let mut module = box Rtl8139 {
pci: pci,
base: base & 0xFFFFFFF0,
memory_mapped: base & 1 == 0,
irq: irq,
resources: Intex::new(Vec::new()),
inbound: VecDeque::new(),
outbound: VecDeque::new(),
txds: Vec::new(),
txd_i: 0,
port: Rtl8139Port::new((base & 0xFFFFFFF0) as u16),
};
unsafe { module.init() };
module
}
unsafe fn init(&mut self) {
debug::d("RTL8139 on: ");
debug::dh(self.base);
if self.memory_mapped {
debug::d(" memory mapped");
} else {
debug::d(" port mapped");
}
debug::d(" IRQ: ");
debug::dbh(self.irq);
self.pci.flag(4, 4, true); // Bus mastering
let base = self.base as u16;
self.port.config1.write(0);
self.port.cr.write(RTL8139_CR_RST);
while self.port.cr.read() & RTL8139_CR_RST!= 0 {}
debug::d(" MAC: ");
MAC_ADDR = MacAddr {
bytes: [self.port.idr[0].read(),
self.port.idr[1].read(),
self.port.idr[2].read(),
self.port.idr[3].read(),
self.port.idr[4].read(),
self.port.idr[5].read()],
};
debug::d(&MAC_ADDR.to_string());
let receive_buffer = memory::alloc(10240);
self.port.rbstart.write(receive_buffer as u32);
for i in 0..4 {
self.txds.push(Txd {
address_port: Pio::<u32>::new(base + 0x20 + (i as u16) * 4),
status_port: Pio::<u32>::new(base + 0x10 + (i as u16) * 4),
buffer: memory::alloc(4096),
});
}
self.port.imr.write(RTL8139_ISR_TOK | RTL8139_ISR_ROK);
debug::d(" IMR: ");
debug::dh(self.port.imr.read() as usize);
self.port.cr.write(RTL8139_CR_RE | RTL8139_CR_TE);
debug::d(" CMD: ");
debug::dbh(self.port.cr.read());
self.port.rcr.write(RTL8139_RCR_WRAP | RTL8139_RCR_AR | RTL8139_RCR_AB | RTL8139_RCR_AM |
RTL8139_RCR_APM);
debug::d(" RCR: ");
debug::dh(self.port.rcr.read() as usize);
self.port.tcr.writef(RTL8139_TCR_IFG, true);
debug::d(" TCR: ");
debug::dh(self.port.tcr.read() as usize);
debug::dl();
}
unsafe fn receive_inbound(&mut self) {
let receive_buffer = self.port.rbstart.read() as usize;
let mut capr = (self.port.capr.read() + 16) as usize;
let cbr = self.port.cbr.read() as usize;
while capr!= cbr {
let frame_addr = receive_buffer + capr + 4;
let frame_status = ptr::read((receive_buffer + capr) as *const u16) as usize;
let frame_len = ptr::read((receive_buffer + capr + 2) as *const u16) as usize;
debug::d("Recv ");
debug::dh(capr as usize);
debug::d(" ");
debug::dh(frame_status);
debug::d(" ");
debug::dh(frame_addr);
debug::d(" ");
debug::dh(frame_len);
debug::dl();
self.inbound.push_back(Vec::from(slice::from_raw_parts(frame_addr as *const u8, frame_len - 4)));
capr = capr + frame_len + 4;
capr = (capr + 3) & (0xFFFFFFFF - 3);
if capr >= 8192 {
capr -= 8192
}
self.port.capr.write((capr as u16) - 16);
}
}
unsafe fn send_outbound(&mut self) {
while let Some(bytes) = self.outbound.pop_front() {
if let Some(ref mut txd) = self.txds.get_mut(self.txd_i) {
if bytes.len() < 4096
|
else {
debug::dl();
debug::d("RTL8139: Frame too long for transmit: ");
debug::dd(bytes.len());
debug::dl();
}
} else {
debug::d("RTL8139: TXD Overflow!\n");
self.txd_i = 0;
}
}
}
}
impl KScheme for Rtl8139 {
fn scheme(&self) -> &str {
"network"
}
fn open(&mut self, _: Url, _: usize) -> Result<Box<Resource>> {
Ok(NetworkResource::new(self))
}
fn on_irq(&mut self, irq: u8) {
if irq == self.irq {
let isr = self.port.isr.read();
self.port.isr.write(isr);
// dh(isr as usize);
// dl();
self.sync();
}
}
}
impl NetworkScheme for Rtl8139 {
fn add(&mut self, resource: *mut NetworkResource) {
self.resources.lock().push(resource);
}
fn remove(&mut self, resource: *mut NetworkResource) {
let mut resources = self.resources.lock();
let mut i = 0;
while i < resources.len() {
let mut remove = false;
match resources.get(i) {
Some(ptr) => if *ptr == resource {
remove = true;
} else {
i += 1;
},
None => break,
}
if remove {
resources.remove(i);
}
}
}
fn sync(&mut self) {
unsafe {
{
let resources = self.resources.lock();
for resource in resources.iter() {
while let Some(bytes) = (**resource).outbound.lock().pop_front() {
self.outbound.push_back(bytes);
}
}
}
self.send_outbound();
self.receive_inbound();
{
let resources = self.resources.lock();
while let Some(bytes) = self.inbound.pop_front() {
for resource in resources.iter() {
(**resource).inbound.lock().push_back(bytes.clone());
}
}
}
}
}
}
|
{
while !txd.status_port.readf(RTL8139_TSR_OWN) {}
debug::d("Send ");
debug::dh(self.txd_i as usize);
debug::d(" ");
debug::dh(txd.status_port.read() as usize);
debug::d(" ");
debug::dh(txd.buffer);
debug::d(" ");
debug::dh(bytes.len() & 0xFFF);
debug::dl();
::memcpy(txd.buffer as *mut u8, bytes.as_ptr(), bytes.len());
txd.address_port.write(txd.buffer as u32);
txd.status_port.write(bytes.len() as u32 & 0xFFF);
self.txd_i = (self.txd_i + 1) % 4;
}
|
conditional_block
|
text.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic types for text properties.
use app_units::Au;
use cssparser::Parser;
use parser::ParserContext;
use properties::animated_properties::Animatable;
use style_traits::ParseError;
use values::animated::ToAnimatedZero;
use values::distance::{ComputeSquaredDistance, SquaredDistance};
/// A generic value for the `initial-letter` property.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, HasViewportPercentage, PartialEq, ToComputedValue, ToCss)]
pub enum InitialLetter<Number, Integer> {
/// `normal`
Normal,
/// `<number> <integer>?`
Specified(Number, Option<Integer>),
}
impl<N, I> InitialLetter<N, I> {
/// Returns `normal`.
#[inline]
pub fn normal() -> Self {
InitialLetter::Normal
}
}
/// A generic spacing value for the `letter-spacing` and `word-spacing` properties.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, HasViewportPercentage, PartialEq, ToComputedValue, ToCss)]
pub enum Spacing<Value> {
/// `normal`
Normal,
/// `<value>`
Value(Value),
}
impl<Value> Spacing<Value> {
/// Returns `normal`.
#[inline]
pub fn normal() -> Self {
Spacing::Normal
}
/// Parses.
#[inline]
pub fn
|
<'i, 't, F>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
parse: F)
-> Result<Self, ParseError<'i>>
where F: FnOnce(&ParserContext, &mut Parser<'i, 't>) -> Result<Value, ParseError<'i>>
{
if input.try(|i| i.expect_ident_matching("normal")).is_ok() {
return Ok(Spacing::Normal);
}
parse(context, input).map(Spacing::Value)
}
/// Returns the spacing value, if not `normal`.
#[inline]
pub fn value(&self) -> Option<&Value> {
match *self {
Spacing::Normal => None,
Spacing::Value(ref value) => Some(value),
}
}
}
impl<Value> Animatable for Spacing<Value>
where Value: Animatable + From<Au>,
{
#[inline]
fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> {
if let (&Spacing::Normal, &Spacing::Normal) = (self, other) {
return Ok(Spacing::Normal);
}
let zero = Value::from(Au(0));
let this = self.value().unwrap_or(&zero);
let other = other.value().unwrap_or(&zero);
this.add_weighted(other, self_portion, other_portion).map(Spacing::Value)
}
}
impl<V> ComputeSquaredDistance for Spacing<V>
where
V: ComputeSquaredDistance + From<Au>,
{
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
let zero = V::from(Au(0));
let this = self.value().unwrap_or(&zero);
let other = other.value().unwrap_or(&zero);
this.compute_squared_distance(other)
}
}
impl<V> ToAnimatedZero for Spacing<V>
where
V: From<Au>,
{
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) }
}
/// A generic value for the `line-height` property.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, ComputeSquaredDistance, Copy, Debug, HasViewportPercentage, PartialEq, ToAnimatedValue, ToCss)]
pub enum LineHeight<Number, LengthOrPercentage> {
/// `normal`
Normal,
/// `-moz-block-height`
#[cfg(feature = "gecko")]
MozBlockHeight,
/// `<number>`
Number(Number),
/// `<length-or-percentage>`
Length(LengthOrPercentage),
}
impl<N, L> LineHeight<N, L> {
/// Returns `normal`.
#[inline]
pub fn normal() -> Self {
LineHeight::Normal
}
}
|
parse_with
|
identifier_name
|
text.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic types for text properties.
use app_units::Au;
use cssparser::Parser;
use parser::ParserContext;
use properties::animated_properties::Animatable;
use style_traits::ParseError;
use values::animated::ToAnimatedZero;
use values::distance::{ComputeSquaredDistance, SquaredDistance};
/// A generic value for the `initial-letter` property.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, HasViewportPercentage, PartialEq, ToComputedValue, ToCss)]
pub enum InitialLetter<Number, Integer> {
/// `normal`
Normal,
/// `<number> <integer>?`
Specified(Number, Option<Integer>),
}
impl<N, I> InitialLetter<N, I> {
/// Returns `normal`.
#[inline]
pub fn normal() -> Self {
InitialLetter::Normal
}
}
/// A generic spacing value for the `letter-spacing` and `word-spacing` properties.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, HasViewportPercentage, PartialEq, ToComputedValue, ToCss)]
|
}
impl<Value> Spacing<Value> {
/// Returns `normal`.
#[inline]
pub fn normal() -> Self {
Spacing::Normal
}
/// Parses.
#[inline]
pub fn parse_with<'i, 't, F>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
parse: F)
-> Result<Self, ParseError<'i>>
where F: FnOnce(&ParserContext, &mut Parser<'i, 't>) -> Result<Value, ParseError<'i>>
{
if input.try(|i| i.expect_ident_matching("normal")).is_ok() {
return Ok(Spacing::Normal);
}
parse(context, input).map(Spacing::Value)
}
/// Returns the spacing value, if not `normal`.
#[inline]
pub fn value(&self) -> Option<&Value> {
match *self {
Spacing::Normal => None,
Spacing::Value(ref value) => Some(value),
}
}
}
impl<Value> Animatable for Spacing<Value>
where Value: Animatable + From<Au>,
{
#[inline]
fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> {
if let (&Spacing::Normal, &Spacing::Normal) = (self, other) {
return Ok(Spacing::Normal);
}
let zero = Value::from(Au(0));
let this = self.value().unwrap_or(&zero);
let other = other.value().unwrap_or(&zero);
this.add_weighted(other, self_portion, other_portion).map(Spacing::Value)
}
}
impl<V> ComputeSquaredDistance for Spacing<V>
where
V: ComputeSquaredDistance + From<Au>,
{
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
let zero = V::from(Au(0));
let this = self.value().unwrap_or(&zero);
let other = other.value().unwrap_or(&zero);
this.compute_squared_distance(other)
}
}
impl<V> ToAnimatedZero for Spacing<V>
where
V: From<Au>,
{
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) }
}
/// A generic value for the `line-height` property.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, ComputeSquaredDistance, Copy, Debug, HasViewportPercentage, PartialEq, ToAnimatedValue, ToCss)]
pub enum LineHeight<Number, LengthOrPercentage> {
/// `normal`
Normal,
/// `-moz-block-height`
#[cfg(feature = "gecko")]
MozBlockHeight,
/// `<number>`
Number(Number),
/// `<length-or-percentage>`
Length(LengthOrPercentage),
}
impl<N, L> LineHeight<N, L> {
/// Returns `normal`.
#[inline]
pub fn normal() -> Self {
LineHeight::Normal
}
}
|
pub enum Spacing<Value> {
/// `normal`
Normal,
/// `<value>`
Value(Value),
|
random_line_split
|
text.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic types for text properties.
use app_units::Au;
use cssparser::Parser;
use parser::ParserContext;
use properties::animated_properties::Animatable;
use style_traits::ParseError;
use values::animated::ToAnimatedZero;
use values::distance::{ComputeSquaredDistance, SquaredDistance};
/// A generic value for the `initial-letter` property.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, HasViewportPercentage, PartialEq, ToComputedValue, ToCss)]
pub enum InitialLetter<Number, Integer> {
/// `normal`
Normal,
/// `<number> <integer>?`
Specified(Number, Option<Integer>),
}
impl<N, I> InitialLetter<N, I> {
/// Returns `normal`.
#[inline]
pub fn normal() -> Self {
InitialLetter::Normal
}
}
/// A generic spacing value for the `letter-spacing` and `word-spacing` properties.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, HasViewportPercentage, PartialEq, ToComputedValue, ToCss)]
pub enum Spacing<Value> {
/// `normal`
Normal,
/// `<value>`
Value(Value),
}
impl<Value> Spacing<Value> {
/// Returns `normal`.
#[inline]
pub fn normal() -> Self {
Spacing::Normal
}
/// Parses.
#[inline]
pub fn parse_with<'i, 't, F>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
parse: F)
-> Result<Self, ParseError<'i>>
where F: FnOnce(&ParserContext, &mut Parser<'i, 't>) -> Result<Value, ParseError<'i>>
{
if input.try(|i| i.expect_ident_matching("normal")).is_ok() {
return Ok(Spacing::Normal);
}
parse(context, input).map(Spacing::Value)
}
/// Returns the spacing value, if not `normal`.
#[inline]
pub fn value(&self) -> Option<&Value> {
match *self {
Spacing::Normal => None,
Spacing::Value(ref value) => Some(value),
}
}
}
impl<Value> Animatable for Spacing<Value>
where Value: Animatable + From<Au>,
{
#[inline]
fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> {
if let (&Spacing::Normal, &Spacing::Normal) = (self, other) {
return Ok(Spacing::Normal);
}
let zero = Value::from(Au(0));
let this = self.value().unwrap_or(&zero);
let other = other.value().unwrap_or(&zero);
this.add_weighted(other, self_portion, other_portion).map(Spacing::Value)
}
}
impl<V> ComputeSquaredDistance for Spacing<V>
where
V: ComputeSquaredDistance + From<Au>,
{
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()>
|
}
impl<V> ToAnimatedZero for Spacing<V>
where
V: From<Au>,
{
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) }
}
/// A generic value for the `line-height` property.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, ComputeSquaredDistance, Copy, Debug, HasViewportPercentage, PartialEq, ToAnimatedValue, ToCss)]
pub enum LineHeight<Number, LengthOrPercentage> {
/// `normal`
Normal,
/// `-moz-block-height`
#[cfg(feature = "gecko")]
MozBlockHeight,
/// `<number>`
Number(Number),
/// `<length-or-percentage>`
Length(LengthOrPercentage),
}
impl<N, L> LineHeight<N, L> {
/// Returns `normal`.
#[inline]
pub fn normal() -> Self {
LineHeight::Normal
}
}
|
{
let zero = V::from(Au(0));
let this = self.value().unwrap_or(&zero);
let other = other.value().unwrap_or(&zero);
this.compute_squared_distance(other)
}
|
identifier_body
|
text.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic types for text properties.
use app_units::Au;
use cssparser::Parser;
use parser::ParserContext;
use properties::animated_properties::Animatable;
use style_traits::ParseError;
use values::animated::ToAnimatedZero;
use values::distance::{ComputeSquaredDistance, SquaredDistance};
/// A generic value for the `initial-letter` property.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, HasViewportPercentage, PartialEq, ToComputedValue, ToCss)]
pub enum InitialLetter<Number, Integer> {
/// `normal`
Normal,
/// `<number> <integer>?`
Specified(Number, Option<Integer>),
}
impl<N, I> InitialLetter<N, I> {
/// Returns `normal`.
#[inline]
pub fn normal() -> Self {
InitialLetter::Normal
}
}
/// A generic spacing value for the `letter-spacing` and `word-spacing` properties.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, HasViewportPercentage, PartialEq, ToComputedValue, ToCss)]
pub enum Spacing<Value> {
/// `normal`
Normal,
/// `<value>`
Value(Value),
}
impl<Value> Spacing<Value> {
/// Returns `normal`.
#[inline]
pub fn normal() -> Self {
Spacing::Normal
}
/// Parses.
#[inline]
pub fn parse_with<'i, 't, F>(
context: &ParserContext,
input: &mut Parser<'i, 't>,
parse: F)
-> Result<Self, ParseError<'i>>
where F: FnOnce(&ParserContext, &mut Parser<'i, 't>) -> Result<Value, ParseError<'i>>
{
if input.try(|i| i.expect_ident_matching("normal")).is_ok() {
return Ok(Spacing::Normal);
}
parse(context, input).map(Spacing::Value)
}
/// Returns the spacing value, if not `normal`.
#[inline]
pub fn value(&self) -> Option<&Value> {
match *self {
Spacing::Normal => None,
Spacing::Value(ref value) => Some(value),
}
}
}
impl<Value> Animatable for Spacing<Value>
where Value: Animatable + From<Au>,
{
#[inline]
fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> {
if let (&Spacing::Normal, &Spacing::Normal) = (self, other)
|
let zero = Value::from(Au(0));
let this = self.value().unwrap_or(&zero);
let other = other.value().unwrap_or(&zero);
this.add_weighted(other, self_portion, other_portion).map(Spacing::Value)
}
}
impl<V> ComputeSquaredDistance for Spacing<V>
where
V: ComputeSquaredDistance + From<Au>,
{
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
let zero = V::from(Au(0));
let this = self.value().unwrap_or(&zero);
let other = other.value().unwrap_or(&zero);
this.compute_squared_distance(other)
}
}
impl<V> ToAnimatedZero for Spacing<V>
where
V: From<Au>,
{
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) }
}
/// A generic value for the `line-height` property.
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, ComputeSquaredDistance, Copy, Debug, HasViewportPercentage, PartialEq, ToAnimatedValue, ToCss)]
pub enum LineHeight<Number, LengthOrPercentage> {
/// `normal`
Normal,
/// `-moz-block-height`
#[cfg(feature = "gecko")]
MozBlockHeight,
/// `<number>`
Number(Number),
/// `<length-or-percentage>`
Length(LengthOrPercentage),
}
impl<N, L> LineHeight<N, L> {
/// Returns `normal`.
#[inline]
pub fn normal() -> Self {
LineHeight::Normal
}
}
|
{
return Ok(Spacing::Normal);
}
|
conditional_block
|
layout_debug.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Supports writing a trace file created during each layout scope
//! that can be viewed by an external tool to make layout debugging easier.
#![macro_use]
use flow;
use flow_ref::FlowRef;
use rustc_serialize::json;
use std::borrow::ToOwned;
use std::cell::RefCell;
use std::fs::File;
use std::io::Write;
use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
thread_local!(static STATE_KEY: RefCell<Option<State>> = RefCell::new(None));
static DEBUG_ID_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
pub struct Scope;
#[macro_export]
macro_rules! layout_debug_scope(
($($arg:tt)*) => (
if cfg!(debug_assertions) {
layout_debug::Scope::new(format!($($arg)*))
} else {
layout_debug::Scope
}
)
);
#[derive(RustcEncodable)]
struct ScopeData {
name: String,
pre: String,
post: String,
children: Vec<Box<ScopeData>>,
}
impl ScopeData {
fn new(name: String, pre: String) -> ScopeData {
ScopeData {
name: name,
pre: pre,
post: String::new(),
children: vec!(),
}
}
}
struct State {
flow_root: FlowRef,
scope_stack: Vec<Box<ScopeData>>,
}
/// A layout debugging scope. The entire state of the flow tree
/// will be output at the beginning and end of this scope.
impl Scope {
pub fn
|
(name: String) -> Scope {
STATE_KEY.with(|ref r| {
match *r.borrow_mut() {
Some(ref mut state) => {
let flow_trace = json::encode(&flow::base(&*state.flow_root)).unwrap();
let data = box ScopeData::new(name.clone(), flow_trace);
state.scope_stack.push(data);
}
None => {}
}
});
Scope
}
}
#[cfg(debug_assertions)]
impl Drop for Scope {
fn drop(&mut self) {
STATE_KEY.with(|ref r| {
match *r.borrow_mut() {
Some(ref mut state) => {
let mut current_scope = state.scope_stack.pop().unwrap();
current_scope.post = json::encode(&flow::base(&*state.flow_root)).unwrap();
let previous_scope = state.scope_stack.last_mut().unwrap();
previous_scope.children.push(current_scope);
}
None => {}
}
});
}
}
/// Generate a unique ID. This is used for items such as Fragment
/// which are often reallocated but represent essentially the
/// same data.
pub fn generate_unique_debug_id() -> u16 {
DEBUG_ID_COUNTER.fetch_add(1, Ordering::SeqCst) as u16
}
/// Begin a layout debug trace. If this has not been called,
/// creating debug scopes has no effect.
pub fn begin_trace(flow_root: FlowRef) {
assert!(STATE_KEY.with(|ref r| r.borrow().is_none()));
STATE_KEY.with(|ref r| {
let flow_trace = json::encode(&flow::base(&*flow_root)).unwrap();
let state = State {
scope_stack: vec![box ScopeData::new("root".to_owned(), flow_trace)],
flow_root: flow_root.clone(),
};
*r.borrow_mut() = Some(state);
});
}
/// End the debug layout trace. This will write the layout
/// trace to disk in the current directory. The output
/// file can then be viewed with an external tool.
pub fn end_trace() {
let mut task_state = STATE_KEY.with(|ref r| r.borrow_mut().take().unwrap());
assert!(task_state.scope_stack.len() == 1);
let mut root_scope = task_state.scope_stack.pop().unwrap();
root_scope.post = json::encode(&flow::base(&*task_state.flow_root)).unwrap();
let result = json::encode(&root_scope).unwrap();
let mut file = File::create("layout_trace.json").unwrap();
file.write_all(result.as_bytes()).unwrap();
}
|
new
|
identifier_name
|
layout_debug.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Supports writing a trace file created during each layout scope
//! that can be viewed by an external tool to make layout debugging easier.
#![macro_use]
use flow;
use flow_ref::FlowRef;
use rustc_serialize::json;
use std::borrow::ToOwned;
use std::cell::RefCell;
use std::fs::File;
use std::io::Write;
use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
thread_local!(static STATE_KEY: RefCell<Option<State>> = RefCell::new(None));
static DEBUG_ID_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
pub struct Scope;
#[macro_export]
macro_rules! layout_debug_scope(
($($arg:tt)*) => (
if cfg!(debug_assertions) {
layout_debug::Scope::new(format!($($arg)*))
} else {
layout_debug::Scope
}
)
);
#[derive(RustcEncodable)]
struct ScopeData {
name: String,
pre: String,
post: String,
children: Vec<Box<ScopeData>>,
}
impl ScopeData {
fn new(name: String, pre: String) -> ScopeData {
ScopeData {
name: name,
pre: pre,
post: String::new(),
children: vec!(),
}
}
}
struct State {
flow_root: FlowRef,
|
}
/// A layout debugging scope. The entire state of the flow tree
/// will be output at the beginning and end of this scope.
impl Scope {
pub fn new(name: String) -> Scope {
STATE_KEY.with(|ref r| {
match *r.borrow_mut() {
Some(ref mut state) => {
let flow_trace = json::encode(&flow::base(&*state.flow_root)).unwrap();
let data = box ScopeData::new(name.clone(), flow_trace);
state.scope_stack.push(data);
}
None => {}
}
});
Scope
}
}
#[cfg(debug_assertions)]
impl Drop for Scope {
fn drop(&mut self) {
STATE_KEY.with(|ref r| {
match *r.borrow_mut() {
Some(ref mut state) => {
let mut current_scope = state.scope_stack.pop().unwrap();
current_scope.post = json::encode(&flow::base(&*state.flow_root)).unwrap();
let previous_scope = state.scope_stack.last_mut().unwrap();
previous_scope.children.push(current_scope);
}
None => {}
}
});
}
}
/// Generate a unique ID. This is used for items such as Fragment
/// which are often reallocated but represent essentially the
/// same data.
pub fn generate_unique_debug_id() -> u16 {
DEBUG_ID_COUNTER.fetch_add(1, Ordering::SeqCst) as u16
}
/// Begin a layout debug trace. If this has not been called,
/// creating debug scopes has no effect.
pub fn begin_trace(flow_root: FlowRef) {
assert!(STATE_KEY.with(|ref r| r.borrow().is_none()));
STATE_KEY.with(|ref r| {
let flow_trace = json::encode(&flow::base(&*flow_root)).unwrap();
let state = State {
scope_stack: vec![box ScopeData::new("root".to_owned(), flow_trace)],
flow_root: flow_root.clone(),
};
*r.borrow_mut() = Some(state);
});
}
/// End the debug layout trace. This will write the layout
/// trace to disk in the current directory. The output
/// file can then be viewed with an external tool.
pub fn end_trace() {
let mut task_state = STATE_KEY.with(|ref r| r.borrow_mut().take().unwrap());
assert!(task_state.scope_stack.len() == 1);
let mut root_scope = task_state.scope_stack.pop().unwrap();
root_scope.post = json::encode(&flow::base(&*task_state.flow_root)).unwrap();
let result = json::encode(&root_scope).unwrap();
let mut file = File::create("layout_trace.json").unwrap();
file.write_all(result.as_bytes()).unwrap();
}
|
scope_stack: Vec<Box<ScopeData>>,
|
random_line_split
|
helpers.rs
|
use crate::pp::Printer;
use std::borrow::Cow;
impl Printer {
pub fn word_space<W: Into<Cow<'static, str>>>(&mut self, w: W) {
self.word(w);
self.space();
}
pub fn popen(&mut self) {
self.word("(");
}
pub fn pclose(&mut self) {
|
self.word(")");
}
pub fn hardbreak_if_not_bol(&mut self) {
if!self.is_beginning_of_line() {
self.hardbreak()
}
}
pub fn space_if_not_bol(&mut self) {
if!self.is_beginning_of_line() {
self.space();
}
}
pub fn nbsp(&mut self) {
self.word(" ")
}
pub fn word_nbsp<S: Into<Cow<'static, str>>>(&mut self, w: S) {
self.word(w);
self.nbsp()
}
}
|
random_line_split
|
|
helpers.rs
|
use crate::pp::Printer;
use std::borrow::Cow;
impl Printer {
pub fn word_space<W: Into<Cow<'static, str>>>(&mut self, w: W) {
self.word(w);
self.space();
}
pub fn popen(&mut self) {
self.word("(");
}
pub fn pclose(&mut self) {
self.word(")");
}
pub fn hardbreak_if_not_bol(&mut self) {
if!self.is_beginning_of_line()
|
}
pub fn space_if_not_bol(&mut self) {
if!self.is_beginning_of_line() {
self.space();
}
}
pub fn nbsp(&mut self) {
self.word(" ")
}
pub fn word_nbsp<S: Into<Cow<'static, str>>>(&mut self, w: S) {
self.word(w);
self.nbsp()
}
}
|
{
self.hardbreak()
}
|
conditional_block
|
helpers.rs
|
use crate::pp::Printer;
use std::borrow::Cow;
impl Printer {
pub fn word_space<W: Into<Cow<'static, str>>>(&mut self, w: W)
|
pub fn popen(&mut self) {
self.word("(");
}
pub fn pclose(&mut self) {
self.word(")");
}
pub fn hardbreak_if_not_bol(&mut self) {
if!self.is_beginning_of_line() {
self.hardbreak()
}
}
pub fn space_if_not_bol(&mut self) {
if!self.is_beginning_of_line() {
self.space();
}
}
pub fn nbsp(&mut self) {
self.word(" ")
}
pub fn word_nbsp<S: Into<Cow<'static, str>>>(&mut self, w: S) {
self.word(w);
self.nbsp()
}
}
|
{
self.word(w);
self.space();
}
|
identifier_body
|
helpers.rs
|
use crate::pp::Printer;
use std::borrow::Cow;
impl Printer {
pub fn word_space<W: Into<Cow<'static, str>>>(&mut self, w: W) {
self.word(w);
self.space();
}
pub fn popen(&mut self) {
self.word("(");
}
pub fn pclose(&mut self) {
self.word(")");
}
pub fn
|
(&mut self) {
if!self.is_beginning_of_line() {
self.hardbreak()
}
}
pub fn space_if_not_bol(&mut self) {
if!self.is_beginning_of_line() {
self.space();
}
}
pub fn nbsp(&mut self) {
self.word(" ")
}
pub fn word_nbsp<S: Into<Cow<'static, str>>>(&mut self, w: S) {
self.word(w);
self.nbsp()
}
}
|
hardbreak_if_not_bol
|
identifier_name
|
lib.rs
|
#![recursion_limit = "128"]
#[macro_use]
extern crate log;
#[macro_use]
extern crate quick_error;
mod bearer_token_client;
mod blob_sas_builder;
pub mod client;
mod client_endpoint;
mod connection_string;
mod connection_string_builder;
mod container_sas_builder;
mod hyper_client_endpoint;
mod into_azure_path;
pub mod key_client;
pub mod prelude;
mod rest_client;
pub mod shared_access_signature;
pub use self::connection_string::{ConnectionString, EndpointProtocol};
pub use self::connection_string_builder::ConnectionStringBuilder;
pub use self::into_azure_path::IntoAzurePath;
pub use self::rest_client::{
get_default_json_mime, get_json_mime_fullmetadata, get_json_mime_nometadata, perform_request,
ServiceType,
};
use crate::key_client::KeyClient;
use azure_sdk_core::errors::AzureError;
use azure_sdk_core::headers::COPY_ID;
use azure_sdk_core::util::HeaderMapExt;
pub use client::Client;
pub use client_endpoint::ClientEndpoint;
use http::HeaderMap;
pub use hyper_client_endpoint::HyperClientEndpoint;
pub trait ClientRequired<'a, C>
where
C: Client,
{
fn client(&self) -> &'a C;
}
pub trait KeyClientRequired<'a> {
fn key_client(&self) -> &'a KeyClient;
}
pub trait SharedAccessSignatureSupport<'a> {
type O;
fn with_shared_access_signature(
self,
signature: &'a shared_access_signature::SharedAccessSignature,
) -> Self::O;
}
pub trait SharedAccessSignatureRequired<'a> {
fn shared_access_signature(&self) -> &'a shared_access_signature::SharedAccessSignature;
}
#[derive(Debug, Clone, PartialEq)]
pub struct IPRange {
pub start: std::net::IpAddr,
pub end: std::net::IpAddr,
}
pub type CopyId = uuid::Uuid;
pub fn
|
(headers: &HeaderMap) -> Result<CopyId, AzureError> {
let copy_id = headers
.get_as_str(COPY_ID)
.ok_or_else(|| AzureError::HeaderNotFound(COPY_ID.to_owned()))?;
Ok(uuid::Uuid::parse_str(copy_id)?)
}
|
copy_id_from_headers
|
identifier_name
|
lib.rs
|
#![recursion_limit = "128"]
#[macro_use]
extern crate log;
#[macro_use]
extern crate quick_error;
mod bearer_token_client;
mod blob_sas_builder;
pub mod client;
mod client_endpoint;
mod connection_string;
mod connection_string_builder;
mod container_sas_builder;
mod hyper_client_endpoint;
mod into_azure_path;
pub mod key_client;
pub mod prelude;
mod rest_client;
pub mod shared_access_signature;
pub use self::connection_string::{ConnectionString, EndpointProtocol};
pub use self::connection_string_builder::ConnectionStringBuilder;
pub use self::into_azure_path::IntoAzurePath;
pub use self::rest_client::{
get_default_json_mime, get_json_mime_fullmetadata, get_json_mime_nometadata, perform_request,
ServiceType,
};
use crate::key_client::KeyClient;
use azure_sdk_core::errors::AzureError;
use azure_sdk_core::headers::COPY_ID;
use azure_sdk_core::util::HeaderMapExt;
pub use client::Client;
pub use client_endpoint::ClientEndpoint;
use http::HeaderMap;
pub use hyper_client_endpoint::HyperClientEndpoint;
pub trait ClientRequired<'a, C>
where
C: Client,
{
fn client(&self) -> &'a C;
}
pub trait KeyClientRequired<'a> {
fn key_client(&self) -> &'a KeyClient;
}
pub trait SharedAccessSignatureSupport<'a> {
type O;
fn with_shared_access_signature(
self,
signature: &'a shared_access_signature::SharedAccessSignature,
) -> Self::O;
}
pub trait SharedAccessSignatureRequired<'a> {
fn shared_access_signature(&self) -> &'a shared_access_signature::SharedAccessSignature;
}
#[derive(Debug, Clone, PartialEq)]
pub struct IPRange {
pub start: std::net::IpAddr,
pub end: std::net::IpAddr,
}
pub type CopyId = uuid::Uuid;
pub fn copy_id_from_headers(headers: &HeaderMap) -> Result<CopyId, AzureError> {
|
Ok(uuid::Uuid::parse_str(copy_id)?)
}
|
let copy_id = headers
.get_as_str(COPY_ID)
.ok_or_else(|| AzureError::HeaderNotFound(COPY_ID.to_owned()))?;
|
random_line_split
|
lib.rs
|
// DO NOT EDIT!
// This file was generated automatically from'src/mako/api/lib.rs.mako'
// DO NOT EDIT!
//! This documentation was generated from *logging* crate version *0.1.8+20150326*, where *20150326* is the exact revision of the *logging:v1beta3* schema built by the [mako](http://www.makotemplates.org/) code generator *v0.1.8*.
//! The original source code is [on github](https://github.com/Byron/google-apis-rs/tree/master/gen/logging1_beta3).
//! # Features
//!
//! Handle the following *Resources* with ease from the central [hub](struct.Logging.html)...
//!
//! * projects
//! * [*log services indexes list*](struct.ProjectLogServiceIndexeListCall.html), [*log services list*](struct.ProjectLogServiceListCall.html), [*log services sinks create*](struct.ProjectLogServiceSinkCreateCall.html), [*log services sinks delete*](struct.ProjectLogServiceSinkDeleteCall.html), [*log services sinks get*](struct.ProjectLogServiceSinkGetCall.html), [*log services sinks list*](struct.ProjectLogServiceSinkListCall.html), [*log services sinks update*](struct.ProjectLogServiceSinkUpdateCall.html), [*logs delete*](struct.ProjectLogDeleteCall.html), [*logs entries write*](struct.ProjectLogEntryWriteCall.html), [*logs list*](struct.ProjectLogListCall.html), [*logs sinks create*](struct.ProjectLogSinkCreateCall.html), [*logs sinks delete*](struct.ProjectLogSinkDeleteCall.html), [*logs sinks get*](struct.ProjectLogSinkGetCall.html), [*logs sinks list*](struct.ProjectLogSinkListCall.html) and [*logs sinks update*](struct.ProjectLogSinkUpdateCall.html)
//!
//!
//!
|
//! # Structure of this Library
//!
//! The API is structured into the following primary items:
//!
//! * **[Hub](struct.Logging.html)**
//! * a central object to maintain state and allow accessing all *Activities*
//! * creates [*Method Builders*](trait.MethodsBuilder.html) which in turn
//! allow access to individual [*Call Builders*](trait.CallBuilder.html)
//! * **[Resources](trait.Resource.html)**
//! * primary types that you can apply *Activities* to
//! * a collection of properties and *Parts*
//! * **[Parts](trait.Part.html)**
//! * a collection of properties
//! * never directly used in *Activities*
//! * **[Activities](trait.CallBuilder.html)**
//! * operations to apply to *Resources*
//!
//! All *structures* are marked with applicable traits to further categorize them and ease browsing.
//!
//! Generally speaking, you can invoke *Activities* like this:
//!
//! ```Rust,ignore
//! let r = hub.resource().activity(...).doit()
//! ```
//!
//! Or specifically...
//!
//! ```ignore
//! let r = hub.projects().logs_sinks_get(...).doit()
//! let r = hub.projects().log_services_sinks_update(...).doit()
//! let r = hub.projects().logs_sinks_update(...).doit()
//! let r = hub.projects().log_services_sinks_create(...).doit()
//! let r = hub.projects().logs_sinks_create(...).doit()
//! let r = hub.projects().log_services_sinks_get(...).doit()
//! ```
//!
//! The `resource()` and `activity(...)` calls create [builders][builder-pattern]. The second one dealing with `Activities`
//! supports various methods to configure the impending operation (not shown here). It is made such that all required arguments have to be
//! specified right away (i.e. `(...)`), whereas all optional ones can be [build up][builder-pattern] as desired.
//! The `doit()` method performs the actual communication with the server and returns the respective result.
//!
//! # Usage
//!
//! ## Setting up your Project
//!
//! To use this library, you would put the following lines into your `Cargo.toml` file:
//!
//! ```toml
//! [dependencies]
//! google-logging1_beta3 = "*"
//! ```
//!
//! ## A complete example
//!
//! ```test_harness,no_run
//! extern crate hyper;
//! extern crate yup_oauth2 as oauth2;
//! extern crate google_logging1_beta3 as logging1_beta3;
//! use logging1_beta3::LogSink;
//! use logging1_beta3::{Result, Error};
//! # #[test] fn egal() {
//! use std::default::Default;
//! use oauth2::{Authenticator, DefaultAuthenticatorDelegate, ApplicationSecret, MemoryStorage};
//! use logging1_beta3::Logging;
//!
//! // Get an ApplicationSecret instance by some means. It contains the `client_id` and
//! // `client_secret`, among other things.
//! let secret: ApplicationSecret = Default::default();
//! // Instantiate the authenticator. It will choose a suitable authentication flow for you,
//! // unless you replace `None` with the desired Flow.
//! // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about
//! // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and
//! // retrieve them from storage.
//! let auth = Authenticator::new(&secret, DefaultAuthenticatorDelegate,
//! hyper::Client::new(),
//! <MemoryStorage as Default>::default(), None);
//! let mut hub = Logging::new(hyper::Client::new(), auth);
//! // As the method needs a request, you would usually fill it with the desired information
//! // into the respective structure. Some of the parts shown here might not be applicable!
//! // Values shown here are possibly random and not representative!
//! let mut req = LogSink::default();
//!
//! // You can configure optional parameters by calling the respective setters at will, and
//! // execute the final call using `doit()`.
//! // Values shown here are possibly random and not representative!
//! let result = hub.projects().log_services_sinks_update(req, "projectsId", "logServicesId", "sinksId")
//! .doit();
//!
//! match result {
//! Err(e) => match e {
//! // The Error enum provides details about what exactly happened.
//! // You can also just use its `Debug`, `Display` or `Error` traits
//! Error::HttpError(_)
//! |Error::MissingAPIKey
//! |Error::MissingToken(_)
//! |Error::Cancelled
//! |Error::UploadSizeLimitExceeded(_, _)
//! |Error::Failure(_)
//! |Error::BadRequest(_)
//! |Error::FieldClash(_)
//! |Error::JsonDecodeError(_, _) => println!("{}", e),
//! },
//! Ok(res) => println!("Success: {:?}", res),
//! }
//! # }
//! ```
//! ## Handling Errors
//!
//! All errors produced by the system are provided either as [Result](enum.Result.html) enumeration as return value of
//! the doit() methods, or handed as possibly intermediate results to either the
//! [Hub Delegate](trait.Delegate.html), or the [Authenticator Delegate](../yup-oauth2/trait.AuthenticatorDelegate.html).
//!
//! When delegates handle errors or intermediate values, they may have a chance to instruct the system to retry. This
//! makes the system potentially resilient to all kinds of errors.
//!
//! ## Uploads and Downloads
//! If a method supports downloads, the response body, which is part of the [Result](enum.Result.html), should be
//! read by you to obtain the media.
//! If such a method also supports a [Response Result](trait.ResponseResult.html), it will return that by default.
//! You can see it as meta-data for the actual media. To trigger a media download, you will have to set up the builder by making
//! this call: `.param("alt", "media")`.
//!
//! Methods supporting uploads can do so using up to 2 different protocols:
//! *simple* and *resumable*. The distinctiveness of each is represented by customized
//! `doit(...)` methods, which are then named `upload(...)` and `upload_resumable(...)` respectively.
//!
//! ## Customization and Callbacks
//!
//! You may alter the way an `doit()` method is called by providing a [delegate](trait.Delegate.html) to the
//! [Method Builder](trait.CallBuilder.html) before making the final `doit()` call.
//! Respective methods will be called to provide progress information, as well as determine whether the system should
//! retry on failure.
//!
//! The [delegate trait](trait.Delegate.html) is default-implemented, allowing you to customize it with minimal effort.
//!
//! ## Optional Parts in Server-Requests
//!
//! All structures provided by this library are made to be [enocodable](trait.RequestValue.html) and
//! [decodable](trait.ResponseResult.html) via *json*. Optionals are used to indicate that partial requests are responses
//! are valid.
//! Most optionals are are considered [Parts](trait.Part.html) which are identifiable by name, which will be sent to
//! the server to indicate either the set parts of the request or the desired parts in the response.
//!
//! ## Builder Arguments
//!
//! Using [method builders](trait.CallBuilder.html), you are able to prepare an action call by repeatedly calling it's methods.
//! These will always take a single argument, for which the following statements are true.
//!
//! * [PODs][wiki-pod] are handed by copy
//! * strings are passed as `&str`
//! * [request values](trait.RequestValue.html) are moved
//!
//! Arguments will always be copied or cloned into the builder, to make them independent of their original life times.
//!
//! [wiki-pod]: http://en.wikipedia.org/wiki/Plain_old_data_structure
//! [builder-pattern]: http://en.wikipedia.org/wiki/Builder_pattern
//! [google-go-api]: https://github.com/google/google-api-go-client
//!
//!
// Unused attributes happen thanks to defined, but unused structures
// We don't warn about this, as depending on the API, some data structures or facilities are never used.
// Instead of pre-determining this, we just disable the lint. It's manually tuned to not have any
// unused imports in fully featured APIs. Same with unused_mut....
#![allow(unused_imports, unused_mut, dead_code)]
include!(concat!(env!("OUT_DIR"), "/lib.rs"));
|
//!
//! Not what you are looking for ? Find all other Google APIs in their Rust [documentation index](../index.html).
//!
|
random_line_split
|
functions.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! Lightweight utilities. To optimize compile time, do not put complex
//! functions or functions depending on other crates here.
use cpython::{py_fn, PyModule, PyResult, Python};
use cpython_ext::PyNone;
/// Populate an existing module so it contains utilities.
pub(crate) fn populate_module(py: Python<'_>, module: &PyModule) -> PyResult<PyNone> {
module.add(
py,
"sleep",
py_fn!(py, sleep(seconds: f64, gil: bool = false)),
)?;
Ok(PyNone)
}
fn sleep(py: Python, seconds: f64, gil: bool) -> PyResult<PyNone> {
let duration = std::time::Duration::from_micros((seconds * 1e6) as u64);
if gil {
std::thread::sleep(duration);
} else {
|
Ok(PyNone)
}
|
py.allow_threads(|| {
std::thread::sleep(duration);
});
}
|
random_line_split
|
functions.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! Lightweight utilities. To optimize compile time, do not put complex
//! functions or functions depending on other crates here.
use cpython::{py_fn, PyModule, PyResult, Python};
use cpython_ext::PyNone;
/// Populate an existing module so it contains utilities.
pub(crate) fn populate_module(py: Python<'_>, module: &PyModule) -> PyResult<PyNone> {
module.add(
py,
"sleep",
py_fn!(py, sleep(seconds: f64, gil: bool = false)),
)?;
Ok(PyNone)
}
fn
|
(py: Python, seconds: f64, gil: bool) -> PyResult<PyNone> {
let duration = std::time::Duration::from_micros((seconds * 1e6) as u64);
if gil {
std::thread::sleep(duration);
} else {
py.allow_threads(|| {
std::thread::sleep(duration);
});
}
Ok(PyNone)
}
|
sleep
|
identifier_name
|
functions.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! Lightweight utilities. To optimize compile time, do not put complex
//! functions or functions depending on other crates here.
use cpython::{py_fn, PyModule, PyResult, Python};
use cpython_ext::PyNone;
/// Populate an existing module so it contains utilities.
pub(crate) fn populate_module(py: Python<'_>, module: &PyModule) -> PyResult<PyNone> {
module.add(
py,
"sleep",
py_fn!(py, sleep(seconds: f64, gil: bool = false)),
)?;
Ok(PyNone)
}
fn sleep(py: Python, seconds: f64, gil: bool) -> PyResult<PyNone>
|
{
let duration = std::time::Duration::from_micros((seconds * 1e6) as u64);
if gil {
std::thread::sleep(duration);
} else {
py.allow_threads(|| {
std::thread::sleep(duration);
});
}
Ok(PyNone)
}
|
identifier_body
|
|
functions.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! Lightweight utilities. To optimize compile time, do not put complex
//! functions or functions depending on other crates here.
use cpython::{py_fn, PyModule, PyResult, Python};
use cpython_ext::PyNone;
/// Populate an existing module so it contains utilities.
pub(crate) fn populate_module(py: Python<'_>, module: &PyModule) -> PyResult<PyNone> {
module.add(
py,
"sleep",
py_fn!(py, sleep(seconds: f64, gil: bool = false)),
)?;
Ok(PyNone)
}
fn sleep(py: Python, seconds: f64, gil: bool) -> PyResult<PyNone> {
let duration = std::time::Duration::from_micros((seconds * 1e6) as u64);
if gil {
std::thread::sleep(duration);
} else
|
Ok(PyNone)
}
|
{
py.allow_threads(|| {
std::thread::sleep(duration);
});
}
|
conditional_block
|
threads.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Modules/syscalls/threads.rs
//! Thread management calls
use kernel::prelude::*;
use ObjectHandle;
use Error;
use values;
use SyscallArg;
use kernel::memory::freeze::FreezeMut;
//use kernel::threads::get_process_local;
/// Current process type (provides an object handle for IPC)
pub struct CurProcess;
impl ::objects::Object for CurProcess
{
const CLASS: u16 = values::CLASS_CORE_THISPROCESS;
fn class(&self) -> u16 { Self::CLASS }
fn as_any(&self) -> &Any { self }
fn handle_syscall(&self, call: u16, mut args: &[usize]) -> Result<u64, Error>
{
match call
{
values::CORE_THISPROCESS_RECVOBJ => {
let class = try!( <u16>::get_arg(&mut args) );
let idx = try!( <usize>::get_arg(&mut args) );
Ok( ::objects::get_unclaimed(class, idx) )
},
values::CORE_THISPROCESS_RECVMSG => {
let dest = try!( <FreezeMut<[u8]>>::get_arg(&mut args) );
todo!("CORE_THISPROCESS_RECVMSG - {:p}+{}", dest.as_ptr(), dest.len());
},
_ => todo!("CurProcess::handle_syscall({},...)", call),
}
}
fn bind_wait(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32
{
let mut ret = 0;
if flags & values::EV_THISPROCESS_RECVOBJ!= 0 {
::objects::wait_for_obj(obj);
ret += 1;
}
if flags & values::EV_THISPROCESS_RECVMSG!= 0 {
todo!("EV_THISPROCESS_RECVMSG");
}
ret
}
fn clear_wait(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32
{
let mut ret = 0;
if flags & values::EV_THISPROCESS_RECVOBJ!= 0 {
::objects::clear_wait_for_obj(obj);
ret |= values::EV_THISPROCESS_RECVOBJ;
}
if flags & values::EV_THISPROCESS_RECVMSG!= 0 {
todo!("EV_THISPROCESS_RECVMSG");
}
ret
}
}
#[inline(never)]
pub fn exit(status: u32) {
::kernel::threads::exit_process(status);
}
#[inline(never)]
pub fn terminate() {
todo!("terminate()");
}
#[inline(never)]
pub fn newthread(sp: usize, ip: usize) -> ObjectHandle {
todo!("newthread(sp={:#x},ip={:#x})", sp, ip);
}
#[inline(never)]
pub fn newprocess(name: &str, ip: usize, sp: usize, clone_start: usize, clone_end: usize) -> ObjectHandle {
// 1. Create a new process image (virtual address space)
let mut process = ::kernel::threads::ProcessHandle::new(name, clone_start, clone_end);
// 3. Create a new thread using that process image with the specified ip/sp
process.start_root_thread(ip, sp);
struct Process(::kernel::threads::ProcessHandle);
impl ::objects::Object for Process
{
const CLASS: u16 = values::CLASS_CORE_PROCESS;
fn class(&self) -> u16 { Self::CLASS }
fn as_any(&self) -> &Any { self }
fn handle_syscall(&self, call: u16, mut args: &[usize]) -> Result<u64,Error>
{
match call
{
// Request termination of child process
values::CORE_PROCESS_KILL => todo!("CORE_PROCESS_KILL"),
// Send one of this process' objects to the child process
values::CORE_PROCESS_SENDOBJ => {
let handle = try!(<u32>::get_arg(&mut args));
::objects::give_object(&self.0, handle).map(|_| 0)
},
// Send an IPC message to teh child process
values::CORE_PROCESS_SENDMSG => todo!("CORE_PROCESS_SENDMSG"),
_ => todo!("Process::handle_syscall({},...)", call),
}
}
fn
|
(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32 {
let mut ret = 0;
// Wait for child process to terminate
if flags & values::EV_PROCESS_TERMINATED!= 0 {
self.0.bind_wait_terminate(obj);
ret += 1;
}
ret
}
fn clear_wait(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32 {
let mut ret = 0;
// Wait for child process to terminate
if flags & values::EV_PROCESS_TERMINATED!= 0 {
if self.0.clear_wait_terminate(obj) {
ret |= values::EV_PROCESS_TERMINATED;
}
}
ret
}
}
::objects::new_object( Process(process) )
}
// ret: number of events triggered
#[inline(never)]
pub fn wait(events: &mut [values::WaitItem], wake_time_mono: u64) -> Result<u32,Error>
{
let mut waiter = ::kernel::threads::SleepObject::new("wait");
let mut num_bound = 0;
for ev in events.iter() {
num_bound += try!(::objects::wait_on_object(ev.object, ev.flags, &mut waiter));
}
if num_bound == 0 && wake_time_mono ==!0 {
// Attempting to sleep on no events with an infinite timeout! Would sleep forever
log_error!("TODO: What to do when a thread tries to sleep forever");
waiter.wait();
}
// A wake time of 0 means to not sleep at all, just check the status of the events
// TODO: There should be a more efficient way of doing this, than binding only to unbind again
if wake_time_mono!= 0 {
//!0 indicates an unbounded wait (no need to set a wakeup time)
if wake_time_mono!=!0 {
todo!("Set a wakeup timer at {}", wake_time_mono);
//waiter.wait_until(wake_time_mono);
}
else {
waiter.wait();
}
}
Ok( events.iter_mut().fold(0, |total,ev| total + ::objects::clear_wait(ev.object, ev.flags, &mut waiter).unwrap()) )
}
|
bind_wait
|
identifier_name
|
threads.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Modules/syscalls/threads.rs
//! Thread management calls
use kernel::prelude::*;
use ObjectHandle;
use Error;
use values;
use SyscallArg;
use kernel::memory::freeze::FreezeMut;
//use kernel::threads::get_process_local;
/// Current process type (provides an object handle for IPC)
pub struct CurProcess;
impl ::objects::Object for CurProcess
{
const CLASS: u16 = values::CLASS_CORE_THISPROCESS;
fn class(&self) -> u16 { Self::CLASS }
fn as_any(&self) -> &Any { self }
fn handle_syscall(&self, call: u16, mut args: &[usize]) -> Result<u64, Error>
{
match call
{
values::CORE_THISPROCESS_RECVOBJ => {
let class = try!( <u16>::get_arg(&mut args) );
let idx = try!( <usize>::get_arg(&mut args) );
Ok( ::objects::get_unclaimed(class, idx) )
},
values::CORE_THISPROCESS_RECVMSG => {
let dest = try!( <FreezeMut<[u8]>>::get_arg(&mut args) );
todo!("CORE_THISPROCESS_RECVMSG - {:p}+{}", dest.as_ptr(), dest.len());
},
_ => todo!("CurProcess::handle_syscall({},...)", call),
}
}
fn bind_wait(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32
{
let mut ret = 0;
if flags & values::EV_THISPROCESS_RECVOBJ!= 0 {
::objects::wait_for_obj(obj);
ret += 1;
}
if flags & values::EV_THISPROCESS_RECVMSG!= 0 {
todo!("EV_THISPROCESS_RECVMSG");
}
ret
}
fn clear_wait(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32
{
let mut ret = 0;
if flags & values::EV_THISPROCESS_RECVOBJ!= 0 {
::objects::clear_wait_for_obj(obj);
ret |= values::EV_THISPROCESS_RECVOBJ;
}
if flags & values::EV_THISPROCESS_RECVMSG!= 0 {
todo!("EV_THISPROCESS_RECVMSG");
}
ret
}
}
#[inline(never)]
pub fn exit(status: u32) {
::kernel::threads::exit_process(status);
}
#[inline(never)]
pub fn terminate() {
todo!("terminate()");
}
#[inline(never)]
pub fn newthread(sp: usize, ip: usize) -> ObjectHandle {
todo!("newthread(sp={:#x},ip={:#x})", sp, ip);
}
#[inline(never)]
pub fn newprocess(name: &str, ip: usize, sp: usize, clone_start: usize, clone_end: usize) -> ObjectHandle {
// 1. Create a new process image (virtual address space)
let mut process = ::kernel::threads::ProcessHandle::new(name, clone_start, clone_end);
// 3. Create a new thread using that process image with the specified ip/sp
process.start_root_thread(ip, sp);
struct Process(::kernel::threads::ProcessHandle);
impl ::objects::Object for Process
{
const CLASS: u16 = values::CLASS_CORE_PROCESS;
fn class(&self) -> u16 { Self::CLASS }
fn as_any(&self) -> &Any { self }
fn handle_syscall(&self, call: u16, mut args: &[usize]) -> Result<u64,Error>
{
match call
{
// Request termination of child process
values::CORE_PROCESS_KILL => todo!("CORE_PROCESS_KILL"),
// Send one of this process' objects to the child process
values::CORE_PROCESS_SENDOBJ => {
let handle = try!(<u32>::get_arg(&mut args));
::objects::give_object(&self.0, handle).map(|_| 0)
},
// Send an IPC message to teh child process
values::CORE_PROCESS_SENDMSG => todo!("CORE_PROCESS_SENDMSG"),
_ => todo!("Process::handle_syscall({},...)", call),
}
}
fn bind_wait(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32 {
let mut ret = 0;
|
// Wait for child process to terminate
if flags & values::EV_PROCESS_TERMINATED!= 0 {
self.0.bind_wait_terminate(obj);
ret += 1;
}
ret
}
fn clear_wait(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32 {
let mut ret = 0;
// Wait for child process to terminate
if flags & values::EV_PROCESS_TERMINATED!= 0 {
if self.0.clear_wait_terminate(obj) {
ret |= values::EV_PROCESS_TERMINATED;
}
}
ret
}
}
::objects::new_object( Process(process) )
}
// ret: number of events triggered
#[inline(never)]
pub fn wait(events: &mut [values::WaitItem], wake_time_mono: u64) -> Result<u32,Error>
{
let mut waiter = ::kernel::threads::SleepObject::new("wait");
let mut num_bound = 0;
for ev in events.iter() {
num_bound += try!(::objects::wait_on_object(ev.object, ev.flags, &mut waiter));
}
if num_bound == 0 && wake_time_mono ==!0 {
// Attempting to sleep on no events with an infinite timeout! Would sleep forever
log_error!("TODO: What to do when a thread tries to sleep forever");
waiter.wait();
}
// A wake time of 0 means to not sleep at all, just check the status of the events
// TODO: There should be a more efficient way of doing this, than binding only to unbind again
if wake_time_mono!= 0 {
//!0 indicates an unbounded wait (no need to set a wakeup time)
if wake_time_mono!=!0 {
todo!("Set a wakeup timer at {}", wake_time_mono);
//waiter.wait_until(wake_time_mono);
}
else {
waiter.wait();
}
}
Ok( events.iter_mut().fold(0, |total,ev| total + ::objects::clear_wait(ev.object, ev.flags, &mut waiter).unwrap()) )
}
|
random_line_split
|
|
threads.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Modules/syscalls/threads.rs
//! Thread management calls
use kernel::prelude::*;
use ObjectHandle;
use Error;
use values;
use SyscallArg;
use kernel::memory::freeze::FreezeMut;
//use kernel::threads::get_process_local;
/// Current process type (provides an object handle for IPC)
pub struct CurProcess;
impl ::objects::Object for CurProcess
{
const CLASS: u16 = values::CLASS_CORE_THISPROCESS;
fn class(&self) -> u16 { Self::CLASS }
fn as_any(&self) -> &Any { self }
fn handle_syscall(&self, call: u16, mut args: &[usize]) -> Result<u64, Error>
{
match call
{
values::CORE_THISPROCESS_RECVOBJ => {
let class = try!( <u16>::get_arg(&mut args) );
let idx = try!( <usize>::get_arg(&mut args) );
Ok( ::objects::get_unclaimed(class, idx) )
},
values::CORE_THISPROCESS_RECVMSG => {
let dest = try!( <FreezeMut<[u8]>>::get_arg(&mut args) );
todo!("CORE_THISPROCESS_RECVMSG - {:p}+{}", dest.as_ptr(), dest.len());
},
_ => todo!("CurProcess::handle_syscall({},...)", call),
}
}
fn bind_wait(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32
{
let mut ret = 0;
if flags & values::EV_THISPROCESS_RECVOBJ!= 0 {
::objects::wait_for_obj(obj);
ret += 1;
}
if flags & values::EV_THISPROCESS_RECVMSG!= 0 {
todo!("EV_THISPROCESS_RECVMSG");
}
ret
}
fn clear_wait(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32
{
let mut ret = 0;
if flags & values::EV_THISPROCESS_RECVOBJ!= 0 {
::objects::clear_wait_for_obj(obj);
ret |= values::EV_THISPROCESS_RECVOBJ;
}
if flags & values::EV_THISPROCESS_RECVMSG!= 0 {
todo!("EV_THISPROCESS_RECVMSG");
}
ret
}
}
#[inline(never)]
pub fn exit(status: u32) {
::kernel::threads::exit_process(status);
}
#[inline(never)]
pub fn terminate() {
todo!("terminate()");
}
#[inline(never)]
pub fn newthread(sp: usize, ip: usize) -> ObjectHandle {
todo!("newthread(sp={:#x},ip={:#x})", sp, ip);
}
#[inline(never)]
pub fn newprocess(name: &str, ip: usize, sp: usize, clone_start: usize, clone_end: usize) -> ObjectHandle {
// 1. Create a new process image (virtual address space)
let mut process = ::kernel::threads::ProcessHandle::new(name, clone_start, clone_end);
// 3. Create a new thread using that process image with the specified ip/sp
process.start_root_thread(ip, sp);
struct Process(::kernel::threads::ProcessHandle);
impl ::objects::Object for Process
{
const CLASS: u16 = values::CLASS_CORE_PROCESS;
fn class(&self) -> u16 { Self::CLASS }
fn as_any(&self) -> &Any { self }
fn handle_syscall(&self, call: u16, mut args: &[usize]) -> Result<u64,Error>
{
match call
{
// Request termination of child process
values::CORE_PROCESS_KILL => todo!("CORE_PROCESS_KILL"),
// Send one of this process' objects to the child process
values::CORE_PROCESS_SENDOBJ => {
let handle = try!(<u32>::get_arg(&mut args));
::objects::give_object(&self.0, handle).map(|_| 0)
},
// Send an IPC message to teh child process
values::CORE_PROCESS_SENDMSG => todo!("CORE_PROCESS_SENDMSG"),
_ => todo!("Process::handle_syscall({},...)", call),
}
}
fn bind_wait(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32 {
let mut ret = 0;
// Wait for child process to terminate
if flags & values::EV_PROCESS_TERMINATED!= 0 {
self.0.bind_wait_terminate(obj);
ret += 1;
}
ret
}
fn clear_wait(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32 {
let mut ret = 0;
// Wait for child process to terminate
if flags & values::EV_PROCESS_TERMINATED!= 0 {
if self.0.clear_wait_terminate(obj) {
ret |= values::EV_PROCESS_TERMINATED;
}
}
ret
}
}
::objects::new_object( Process(process) )
}
// ret: number of events triggered
#[inline(never)]
pub fn wait(events: &mut [values::WaitItem], wake_time_mono: u64) -> Result<u32,Error>
|
}
else {
waiter.wait();
}
}
Ok( events.iter_mut().fold(0, |total,ev| total + ::objects::clear_wait(ev.object, ev.flags, &mut waiter).unwrap()) )
}
|
{
let mut waiter = ::kernel::threads::SleepObject::new("wait");
let mut num_bound = 0;
for ev in events.iter() {
num_bound += try!(::objects::wait_on_object(ev.object, ev.flags, &mut waiter));
}
if num_bound == 0 && wake_time_mono == !0 {
// Attempting to sleep on no events with an infinite timeout! Would sleep forever
log_error!("TODO: What to do when a thread tries to sleep forever");
waiter.wait();
}
// A wake time of 0 means to not sleep at all, just check the status of the events
// TODO: There should be a more efficient way of doing this, than binding only to unbind again
if wake_time_mono != 0 {
// !0 indicates an unbounded wait (no need to set a wakeup time)
if wake_time_mono != !0 {
todo!("Set a wakeup timer at {}", wake_time_mono);
//waiter.wait_until(wake_time_mono);
|
identifier_body
|
threads.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Modules/syscalls/threads.rs
//! Thread management calls
use kernel::prelude::*;
use ObjectHandle;
use Error;
use values;
use SyscallArg;
use kernel::memory::freeze::FreezeMut;
//use kernel::threads::get_process_local;
/// Current process type (provides an object handle for IPC)
pub struct CurProcess;
impl ::objects::Object for CurProcess
{
const CLASS: u16 = values::CLASS_CORE_THISPROCESS;
fn class(&self) -> u16 { Self::CLASS }
fn as_any(&self) -> &Any { self }
fn handle_syscall(&self, call: u16, mut args: &[usize]) -> Result<u64, Error>
{
match call
{
values::CORE_THISPROCESS_RECVOBJ => {
let class = try!( <u16>::get_arg(&mut args) );
let idx = try!( <usize>::get_arg(&mut args) );
Ok( ::objects::get_unclaimed(class, idx) )
},
values::CORE_THISPROCESS_RECVMSG => {
let dest = try!( <FreezeMut<[u8]>>::get_arg(&mut args) );
todo!("CORE_THISPROCESS_RECVMSG - {:p}+{}", dest.as_ptr(), dest.len());
},
_ => todo!("CurProcess::handle_syscall({},...)", call),
}
}
fn bind_wait(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32
{
let mut ret = 0;
if flags & values::EV_THISPROCESS_RECVOBJ!= 0 {
::objects::wait_for_obj(obj);
ret += 1;
}
if flags & values::EV_THISPROCESS_RECVMSG!= 0 {
todo!("EV_THISPROCESS_RECVMSG");
}
ret
}
fn clear_wait(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32
{
let mut ret = 0;
if flags & values::EV_THISPROCESS_RECVOBJ!= 0 {
::objects::clear_wait_for_obj(obj);
ret |= values::EV_THISPROCESS_RECVOBJ;
}
if flags & values::EV_THISPROCESS_RECVMSG!= 0 {
todo!("EV_THISPROCESS_RECVMSG");
}
ret
}
}
#[inline(never)]
pub fn exit(status: u32) {
::kernel::threads::exit_process(status);
}
#[inline(never)]
pub fn terminate() {
todo!("terminate()");
}
#[inline(never)]
pub fn newthread(sp: usize, ip: usize) -> ObjectHandle {
todo!("newthread(sp={:#x},ip={:#x})", sp, ip);
}
#[inline(never)]
pub fn newprocess(name: &str, ip: usize, sp: usize, clone_start: usize, clone_end: usize) -> ObjectHandle {
// 1. Create a new process image (virtual address space)
let mut process = ::kernel::threads::ProcessHandle::new(name, clone_start, clone_end);
// 3. Create a new thread using that process image with the specified ip/sp
process.start_root_thread(ip, sp);
struct Process(::kernel::threads::ProcessHandle);
impl ::objects::Object for Process
{
const CLASS: u16 = values::CLASS_CORE_PROCESS;
fn class(&self) -> u16 { Self::CLASS }
fn as_any(&self) -> &Any { self }
fn handle_syscall(&self, call: u16, mut args: &[usize]) -> Result<u64,Error>
{
match call
{
// Request termination of child process
values::CORE_PROCESS_KILL => todo!("CORE_PROCESS_KILL"),
// Send one of this process' objects to the child process
values::CORE_PROCESS_SENDOBJ => {
let handle = try!(<u32>::get_arg(&mut args));
::objects::give_object(&self.0, handle).map(|_| 0)
},
// Send an IPC message to teh child process
values::CORE_PROCESS_SENDMSG => todo!("CORE_PROCESS_SENDMSG"),
_ => todo!("Process::handle_syscall({},...)", call),
}
}
fn bind_wait(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32 {
let mut ret = 0;
// Wait for child process to terminate
if flags & values::EV_PROCESS_TERMINATED!= 0 {
self.0.bind_wait_terminate(obj);
ret += 1;
}
ret
}
fn clear_wait(&self, flags: u32, obj: &mut ::kernel::threads::SleepObject) -> u32 {
let mut ret = 0;
// Wait for child process to terminate
if flags & values::EV_PROCESS_TERMINATED!= 0
|
ret
}
}
::objects::new_object( Process(process) )
}
// ret: number of events triggered
#[inline(never)]
pub fn wait(events: &mut [values::WaitItem], wake_time_mono: u64) -> Result<u32,Error>
{
let mut waiter = ::kernel::threads::SleepObject::new("wait");
let mut num_bound = 0;
for ev in events.iter() {
num_bound += try!(::objects::wait_on_object(ev.object, ev.flags, &mut waiter));
}
if num_bound == 0 && wake_time_mono ==!0 {
// Attempting to sleep on no events with an infinite timeout! Would sleep forever
log_error!("TODO: What to do when a thread tries to sleep forever");
waiter.wait();
}
// A wake time of 0 means to not sleep at all, just check the status of the events
// TODO: There should be a more efficient way of doing this, than binding only to unbind again
if wake_time_mono!= 0 {
//!0 indicates an unbounded wait (no need to set a wakeup time)
if wake_time_mono!=!0 {
todo!("Set a wakeup timer at {}", wake_time_mono);
//waiter.wait_until(wake_time_mono);
}
else {
waiter.wait();
}
}
Ok( events.iter_mut().fold(0, |total,ev| total + ::objects::clear_wait(ev.object, ev.flags, &mut waiter).unwrap()) )
}
|
{
if self.0.clear_wait_terminate(obj) {
ret |= values::EV_PROCESS_TERMINATED;
}
}
|
conditional_block
|
pointing.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% data.new_style_struct("Pointing", inherited=True, gecko_name="UserInterface") %>
<%helpers:longhand name="cursor" animatable="False" spec="https://drafts.csswg.org/css-ui/#cursor">
pub use self::computed_value::T as SpecifiedValue;
use values::HasViewportPercentage;
use values::computed::ComputedValueAsSpecified;
use values::specified::url::SpecifiedUrl;
impl ComputedValueAsSpecified for SpecifiedValue {}
no_viewport_percentage!(SpecifiedValue);
pub mod computed_value {
use std::fmt;
use style_traits::cursor::Cursor;
use style_traits::ToCss;
use values::specified::url::SpecifiedUrl;
#[derive(Clone, PartialEq, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum Keyword {
AutoCursor,
SpecifiedCursor(Cursor),
}
#[cfg(not(feature = "gecko"))]
pub type T = Keyword;
#[cfg(feature = "gecko")]
#[derive(Clone, PartialEq, Debug)]
pub struct Image {
pub url: SpecifiedUrl,
pub hotspot: Option<(f32, f32)>,
}
#[cfg(feature = "gecko")]
#[derive(Clone, PartialEq, Debug)]
pub struct T {
pub images: Vec<Image>,
pub keyword: Keyword,
}
impl ToCss for Keyword {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
Keyword::AutoCursor => dest.write_str("auto"),
Keyword::SpecifiedCursor(c) => c.to_css(dest),
}
}
}
#[cfg(feature = "gecko")]
impl ToCss for Image {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
try!(self.url.to_css(dest));
if let Some((x, y)) = self.hotspot {
try!(dest.write_str(" "));
try!(x.to_css(dest));
try!(dest.write_str(" "));
try!(y.to_css(dest));
}
Ok(())
}
}
#[cfg(feature = "gecko")]
impl ToCss for T {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
for url in &self.images {
try!(url.to_css(dest));
try!(dest.write_str(", "));
}
self.keyword.to_css(dest)
}
}
}
#[cfg(not(feature = "gecko"))]
#[inline]
pub fn get_initial_value() -> computed_value::T {
computed_value::Keyword::AutoCursor
}
#[cfg(feature = "gecko")]
#[inline]
pub fn get_initial_value() -> computed_value::T {
computed_value::T {
images: vec![],
keyword: computed_value::Keyword::AutoCursor
}
}
impl Parse for computed_value::Keyword {
fn parse(_context: &ParserContext, input: &mut Parser) -> Result<computed_value::Keyword, ()> {
use std::ascii::AsciiExt;
use style_traits::cursor::Cursor;
let ident = try!(input.expect_ident());
if ident.eq_ignore_ascii_case("auto") {
Ok(computed_value::Keyword::AutoCursor)
} else {
|
#[cfg(feature = "gecko")]
fn parse_image(context: &ParserContext, input: &mut Parser) -> Result<computed_value::Image, ()> {
Ok(computed_value::Image {
url: try!(SpecifiedUrl::parse(context, input)),
hotspot: match input.try(|input| input.expect_number()) {
Ok(number) => Some((number, try!(input.expect_number()))),
Err(()) => None,
},
})
}
#[cfg(not(feature = "gecko"))]
pub fn parse(context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
computed_value::Keyword::parse(context, input)
}
/// cursor: [<url> [<number> <number>]?]# [auto | default |...]
#[cfg(feature = "gecko")]
pub fn parse(context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
let mut images = vec![];
loop {
match input.try(|input| parse_image(context, input)) {
Ok(image) => images.push(image),
Err(()) => break,
}
try!(input.expect_comma());
}
Ok(computed_value::T {
images: images,
keyword: try!(computed_value::Keyword::parse(context, input)),
})
}
</%helpers:longhand>
// NB: `pointer-events: auto` (and use of `pointer-events` in anything that isn't SVG, in fact)
// is nonstandard, slated for CSS4-UI.
// TODO(pcwalton): SVG-only values.
${helpers.single_keyword("pointer-events", "auto none", animatable=False,
spec="https://www.w3.org/TR/SVG11/interact.html#PointerEventsProperty")}
${helpers.single_keyword("-moz-user-input", "auto none enabled disabled",
products="gecko", gecko_ffi_name="mUserInput",
gecko_enum_prefix="StyleUserInput",
gecko_inexhaustive=True,
animatable=False,
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-user-input)")}
${helpers.single_keyword("-moz-user-modify", "read-only read-write write-only",
products="gecko", gecko_ffi_name="mUserModify",
gecko_enum_prefix="StyleUserModify",
gecko_inexhaustive=True,
animatable=False,
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-user-modify)")}
${helpers.single_keyword("-moz-user-focus",
"none ignore normal select-after select-before select-menu select-same select-all",
products="gecko", gecko_ffi_name="mUserFocus",
gecko_enum_prefix="StyleUserFocus",
gecko_inexhaustive=True,
animatable=False,
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-user-focus)")}
|
Cursor::from_css_keyword(&ident).map(computed_value::Keyword::SpecifiedCursor)
}
}
}
|
random_line_split
|
pointing.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% data.new_style_struct("Pointing", inherited=True, gecko_name="UserInterface") %>
<%helpers:longhand name="cursor" animatable="False" spec="https://drafts.csswg.org/css-ui/#cursor">
pub use self::computed_value::T as SpecifiedValue;
use values::HasViewportPercentage;
use values::computed::ComputedValueAsSpecified;
use values::specified::url::SpecifiedUrl;
impl ComputedValueAsSpecified for SpecifiedValue {}
no_viewport_percentage!(SpecifiedValue);
pub mod computed_value {
use std::fmt;
use style_traits::cursor::Cursor;
use style_traits::ToCss;
use values::specified::url::SpecifiedUrl;
#[derive(Clone, PartialEq, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum Keyword {
AutoCursor,
SpecifiedCursor(Cursor),
}
#[cfg(not(feature = "gecko"))]
pub type T = Keyword;
#[cfg(feature = "gecko")]
#[derive(Clone, PartialEq, Debug)]
pub struct Image {
pub url: SpecifiedUrl,
pub hotspot: Option<(f32, f32)>,
}
#[cfg(feature = "gecko")]
#[derive(Clone, PartialEq, Debug)]
pub struct T {
pub images: Vec<Image>,
pub keyword: Keyword,
}
impl ToCss for Keyword {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
Keyword::AutoCursor => dest.write_str("auto"),
Keyword::SpecifiedCursor(c) => c.to_css(dest),
}
}
}
#[cfg(feature = "gecko")]
impl ToCss for Image {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
try!(self.url.to_css(dest));
if let Some((x, y)) = self.hotspot {
try!(dest.write_str(" "));
try!(x.to_css(dest));
try!(dest.write_str(" "));
try!(y.to_css(dest));
}
Ok(())
}
}
#[cfg(feature = "gecko")]
impl ToCss for T {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
for url in &self.images {
try!(url.to_css(dest));
try!(dest.write_str(", "));
}
self.keyword.to_css(dest)
}
}
}
#[cfg(not(feature = "gecko"))]
#[inline]
pub fn get_initial_value() -> computed_value::T {
computed_value::Keyword::AutoCursor
}
#[cfg(feature = "gecko")]
#[inline]
pub fn get_initial_value() -> computed_value::T {
computed_value::T {
images: vec![],
keyword: computed_value::Keyword::AutoCursor
}
}
impl Parse for computed_value::Keyword {
fn parse(_context: &ParserContext, input: &mut Parser) -> Result<computed_value::Keyword, ()> {
use std::ascii::AsciiExt;
use style_traits::cursor::Cursor;
let ident = try!(input.expect_ident());
if ident.eq_ignore_ascii_case("auto") {
Ok(computed_value::Keyword::AutoCursor)
} else {
Cursor::from_css_keyword(&ident).map(computed_value::Keyword::SpecifiedCursor)
}
}
}
#[cfg(feature = "gecko")]
fn parse_image(context: &ParserContext, input: &mut Parser) -> Result<computed_value::Image, ()> {
Ok(computed_value::Image {
url: try!(SpecifiedUrl::parse(context, input)),
hotspot: match input.try(|input| input.expect_number()) {
Ok(number) => Some((number, try!(input.expect_number()))),
Err(()) => None,
},
})
}
#[cfg(not(feature = "gecko"))]
pub fn parse(context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()>
|
/// cursor: [<url> [<number> <number>]?]# [auto | default |...]
#[cfg(feature = "gecko")]
pub fn parse(context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
let mut images = vec![];
loop {
match input.try(|input| parse_image(context, input)) {
Ok(image) => images.push(image),
Err(()) => break,
}
try!(input.expect_comma());
}
Ok(computed_value::T {
images: images,
keyword: try!(computed_value::Keyword::parse(context, input)),
})
}
</%helpers:longhand>
// NB: `pointer-events: auto` (and use of `pointer-events` in anything that isn't SVG, in fact)
// is nonstandard, slated for CSS4-UI.
// TODO(pcwalton): SVG-only values.
${helpers.single_keyword("pointer-events", "auto none", animatable=False,
spec="https://www.w3.org/TR/SVG11/interact.html#PointerEventsProperty")}
${helpers.single_keyword("-moz-user-input", "auto none enabled disabled",
products="gecko", gecko_ffi_name="mUserInput",
gecko_enum_prefix="StyleUserInput",
gecko_inexhaustive=True,
animatable=False,
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-user-input)")}
${helpers.single_keyword("-moz-user-modify", "read-only read-write write-only",
products="gecko", gecko_ffi_name="mUserModify",
gecko_enum_prefix="StyleUserModify",
gecko_inexhaustive=True,
animatable=False,
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-user-modify)")}
${helpers.single_keyword("-moz-user-focus",
"none ignore normal select-after select-before select-menu select-same select-all",
products="gecko", gecko_ffi_name="mUserFocus",
gecko_enum_prefix="StyleUserFocus",
gecko_inexhaustive=True,
animatable=False,
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-user-focus)")}
|
{
computed_value::Keyword::parse(context, input)
}
|
identifier_body
|
pointing.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% data.new_style_struct("Pointing", inherited=True, gecko_name="UserInterface") %>
<%helpers:longhand name="cursor" animatable="False" spec="https://drafts.csswg.org/css-ui/#cursor">
pub use self::computed_value::T as SpecifiedValue;
use values::HasViewportPercentage;
use values::computed::ComputedValueAsSpecified;
use values::specified::url::SpecifiedUrl;
impl ComputedValueAsSpecified for SpecifiedValue {}
no_viewport_percentage!(SpecifiedValue);
pub mod computed_value {
use std::fmt;
use style_traits::cursor::Cursor;
use style_traits::ToCss;
use values::specified::url::SpecifiedUrl;
#[derive(Clone, PartialEq, Copy, Debug)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum Keyword {
AutoCursor,
SpecifiedCursor(Cursor),
}
#[cfg(not(feature = "gecko"))]
pub type T = Keyword;
#[cfg(feature = "gecko")]
#[derive(Clone, PartialEq, Debug)]
pub struct Image {
pub url: SpecifiedUrl,
pub hotspot: Option<(f32, f32)>,
}
#[cfg(feature = "gecko")]
#[derive(Clone, PartialEq, Debug)]
pub struct T {
pub images: Vec<Image>,
pub keyword: Keyword,
}
impl ToCss for Keyword {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
match *self {
Keyword::AutoCursor => dest.write_str("auto"),
Keyword::SpecifiedCursor(c) => c.to_css(dest),
}
}
}
#[cfg(feature = "gecko")]
impl ToCss for Image {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
try!(self.url.to_css(dest));
if let Some((x, y)) = self.hotspot {
try!(dest.write_str(" "));
try!(x.to_css(dest));
try!(dest.write_str(" "));
try!(y.to_css(dest));
}
Ok(())
}
}
#[cfg(feature = "gecko")]
impl ToCss for T {
fn
|
<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
for url in &self.images {
try!(url.to_css(dest));
try!(dest.write_str(", "));
}
self.keyword.to_css(dest)
}
}
}
#[cfg(not(feature = "gecko"))]
#[inline]
pub fn get_initial_value() -> computed_value::T {
computed_value::Keyword::AutoCursor
}
#[cfg(feature = "gecko")]
#[inline]
pub fn get_initial_value() -> computed_value::T {
computed_value::T {
images: vec![],
keyword: computed_value::Keyword::AutoCursor
}
}
impl Parse for computed_value::Keyword {
fn parse(_context: &ParserContext, input: &mut Parser) -> Result<computed_value::Keyword, ()> {
use std::ascii::AsciiExt;
use style_traits::cursor::Cursor;
let ident = try!(input.expect_ident());
if ident.eq_ignore_ascii_case("auto") {
Ok(computed_value::Keyword::AutoCursor)
} else {
Cursor::from_css_keyword(&ident).map(computed_value::Keyword::SpecifiedCursor)
}
}
}
#[cfg(feature = "gecko")]
fn parse_image(context: &ParserContext, input: &mut Parser) -> Result<computed_value::Image, ()> {
Ok(computed_value::Image {
url: try!(SpecifiedUrl::parse(context, input)),
hotspot: match input.try(|input| input.expect_number()) {
Ok(number) => Some((number, try!(input.expect_number()))),
Err(()) => None,
},
})
}
#[cfg(not(feature = "gecko"))]
pub fn parse(context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
computed_value::Keyword::parse(context, input)
}
/// cursor: [<url> [<number> <number>]?]# [auto | default |...]
#[cfg(feature = "gecko")]
pub fn parse(context: &ParserContext, input: &mut Parser) -> Result<SpecifiedValue, ()> {
let mut images = vec![];
loop {
match input.try(|input| parse_image(context, input)) {
Ok(image) => images.push(image),
Err(()) => break,
}
try!(input.expect_comma());
}
Ok(computed_value::T {
images: images,
keyword: try!(computed_value::Keyword::parse(context, input)),
})
}
</%helpers:longhand>
// NB: `pointer-events: auto` (and use of `pointer-events` in anything that isn't SVG, in fact)
// is nonstandard, slated for CSS4-UI.
// TODO(pcwalton): SVG-only values.
${helpers.single_keyword("pointer-events", "auto none", animatable=False,
spec="https://www.w3.org/TR/SVG11/interact.html#PointerEventsProperty")}
${helpers.single_keyword("-moz-user-input", "auto none enabled disabled",
products="gecko", gecko_ffi_name="mUserInput",
gecko_enum_prefix="StyleUserInput",
gecko_inexhaustive=True,
animatable=False,
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-user-input)")}
${helpers.single_keyword("-moz-user-modify", "read-only read-write write-only",
products="gecko", gecko_ffi_name="mUserModify",
gecko_enum_prefix="StyleUserModify",
gecko_inexhaustive=True,
animatable=False,
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-user-modify)")}
${helpers.single_keyword("-moz-user-focus",
"none ignore normal select-after select-before select-menu select-same select-all",
products="gecko", gecko_ffi_name="mUserFocus",
gecko_enum_prefix="StyleUserFocus",
gecko_inexhaustive=True,
animatable=False,
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-user-focus)")}
|
to_css
|
identifier_name
|
alias.rs
|
use crate::{
analysis::{namespaces, rust_type::rust_type},
codegen::general,
config::gobjects::GObject,
env::Env,
file_saver,
library::*,
traits::*,
};
use std::{
io::{prelude::*, Result},
path::Path,
};
pub fn
|
(env: &Env, root_path: &Path, mod_rs: &mut Vec<String>) {
let configs: Vec<&GObject> = env
.config
.objects
.values()
.filter(|c| {
c.status.need_generate() && c.type_id.map_or(false, |tid| tid.ns_id == namespaces::MAIN)
})
.collect();
let mut has_any = false;
for config in &configs {
if let Type::Alias(_) = *env.library.type_(config.type_id.unwrap()) {
has_any = true;
break;
}
}
if!has_any {
return;
}
let path = root_path.join("alias.rs");
file_saver::save_to_file(path, env.config.make_backup, |w| {
general::start_comments(w, &env.config)?;
writeln!(w)?;
writeln!(w, "#[allow(unused_imports)]")?;
writeln!(w, "use auto::*;")?;
writeln!(w)?;
mod_rs.push("\nmod alias;".into());
for config in &configs {
if let Type::Alias(ref alias) = *env.library.type_(config.type_id.unwrap()) {
mod_rs.push(format!("pub use self::alias::{};", alias.name));
generate_alias(env, w, alias, config)?;
}
}
Ok(())
});
}
fn generate_alias(env: &Env, w: &mut dyn Write, alias: &Alias, _: &GObject) -> Result<()> {
let typ = rust_type(env, alias.typ).into_string();
writeln!(w, "pub type {} = {};", alias.name, typ)?;
Ok(())
}
|
generate
|
identifier_name
|
alias.rs
|
use crate::{
analysis::{namespaces, rust_type::rust_type},
codegen::general,
config::gobjects::GObject,
env::Env,
file_saver,
library::*,
traits::*,
};
use std::{
io::{prelude::*, Result},
path::Path,
};
pub fn generate(env: &Env, root_path: &Path, mod_rs: &mut Vec<String>) {
let configs: Vec<&GObject> = env
.config
.objects
.values()
.filter(|c| {
c.status.need_generate() && c.type_id.map_or(false, |tid| tid.ns_id == namespaces::MAIN)
})
.collect();
let mut has_any = false;
for config in &configs {
if let Type::Alias(_) = *env.library.type_(config.type_id.unwrap()) {
has_any = true;
break;
}
}
if!has_any {
return;
}
let path = root_path.join("alias.rs");
file_saver::save_to_file(path, env.config.make_backup, |w| {
general::start_comments(w, &env.config)?;
writeln!(w)?;
writeln!(w, "#[allow(unused_imports)]")?;
writeln!(w, "use auto::*;")?;
writeln!(w)?;
mod_rs.push("\nmod alias;".into());
for config in &configs {
if let Type::Alias(ref alias) = *env.library.type_(config.type_id.unwrap()) {
mod_rs.push(format!("pub use self::alias::{};", alias.name));
generate_alias(env, w, alias, config)?;
}
}
Ok(())
});
}
fn generate_alias(env: &Env, w: &mut dyn Write, alias: &Alias, _: &GObject) -> Result<()>
|
{
let typ = rust_type(env, alias.typ).into_string();
writeln!(w, "pub type {} = {};", alias.name, typ)?;
Ok(())
}
|
identifier_body
|
|
alias.rs
|
use crate::{
analysis::{namespaces, rust_type::rust_type},
codegen::general,
config::gobjects::GObject,
env::Env,
file_saver,
library::*,
traits::*,
};
use std::{
io::{prelude::*, Result},
path::Path,
};
pub fn generate(env: &Env, root_path: &Path, mod_rs: &mut Vec<String>) {
let configs: Vec<&GObject> = env
.config
.objects
.values()
.filter(|c| {
c.status.need_generate() && c.type_id.map_or(false, |tid| tid.ns_id == namespaces::MAIN)
})
.collect();
let mut has_any = false;
for config in &configs {
if let Type::Alias(_) = *env.library.type_(config.type_id.unwrap()) {
has_any = true;
break;
}
}
if!has_any {
return;
}
let path = root_path.join("alias.rs");
file_saver::save_to_file(path, env.config.make_backup, |w| {
general::start_comments(w, &env.config)?;
writeln!(w)?;
writeln!(w, "#[allow(unused_imports)]")?;
writeln!(w, "use auto::*;")?;
writeln!(w)?;
mod_rs.push("\nmod alias;".into());
for config in &configs {
if let Type::Alias(ref alias) = *env.library.type_(config.type_id.unwrap()) {
|
}
Ok(())
});
}
fn generate_alias(env: &Env, w: &mut dyn Write, alias: &Alias, _: &GObject) -> Result<()> {
let typ = rust_type(env, alias.typ).into_string();
writeln!(w, "pub type {} = {};", alias.name, typ)?;
Ok(())
}
|
mod_rs.push(format!("pub use self::alias::{};", alias.name));
generate_alias(env, w, alias, config)?;
}
|
random_line_split
|
alias.rs
|
use crate::{
analysis::{namespaces, rust_type::rust_type},
codegen::general,
config::gobjects::GObject,
env::Env,
file_saver,
library::*,
traits::*,
};
use std::{
io::{prelude::*, Result},
path::Path,
};
pub fn generate(env: &Env, root_path: &Path, mod_rs: &mut Vec<String>) {
let configs: Vec<&GObject> = env
.config
.objects
.values()
.filter(|c| {
c.status.need_generate() && c.type_id.map_or(false, |tid| tid.ns_id == namespaces::MAIN)
})
.collect();
let mut has_any = false;
for config in &configs {
if let Type::Alias(_) = *env.library.type_(config.type_id.unwrap()) {
has_any = true;
break;
}
}
if!has_any
|
let path = root_path.join("alias.rs");
file_saver::save_to_file(path, env.config.make_backup, |w| {
general::start_comments(w, &env.config)?;
writeln!(w)?;
writeln!(w, "#[allow(unused_imports)]")?;
writeln!(w, "use auto::*;")?;
writeln!(w)?;
mod_rs.push("\nmod alias;".into());
for config in &configs {
if let Type::Alias(ref alias) = *env.library.type_(config.type_id.unwrap()) {
mod_rs.push(format!("pub use self::alias::{};", alias.name));
generate_alias(env, w, alias, config)?;
}
}
Ok(())
});
}
fn generate_alias(env: &Env, w: &mut dyn Write, alias: &Alias, _: &GObject) -> Result<()> {
let typ = rust_type(env, alias.typ).into_string();
writeln!(w, "pub type {} = {};", alias.name, typ)?;
Ok(())
}
|
{
return;
}
|
conditional_block
|
whoami.rs
|
#![crate_id(name="whoami", version="1.0.0", author="KokaKiwi")]
/*
* This file is part of the uutils coreutils package.
*
* (c) Jordi Boggiano <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.21 */
#![allow(non_camel_case_types)]
#![feature(macro_rules)]
extern crate getopts;
extern crate libc;
use std::io::print;
use std::os;
use std::str;
use c_types::{c_passwd, getpwuid};
#[path = "../common/util.rs"] mod util;
#[path = "../common/c_types.rs"] mod c_types;
extern {
pub fn geteuid() -> libc::c_int;
}
unsafe fn getusername() -> String {
let passwd: *c_passwd = getpwuid(geteuid());
let pw_name: *libc::c_char = (*passwd).pw_name;
let name = str::raw::from_c_str(pw_name);
name
}
static NAME: &'static str = "whoami";
#[allow(dead_code)]
fn main() { uumain(os::args()); }
pub fn
|
(args: Vec<String>) {
let program = args.get(0).as_slice();
let opts = [
getopts::optflag("h", "help", "display this help and exit"),
getopts::optflag("V", "version", "output version information and exit"),
];
let matches = match getopts::getopts(args.tail(), opts) {
Ok(m) => m,
Err(f) => crash!(1, "{}", f.to_err_msg()),
};
if matches.opt_present("help") {
println!("whoami 1.0.0");
println!("");
println!("Usage:");
println!(" {:s}", program);
println!("");
print(getopts::usage("print effective userid", opts).as_slice());
return;
}
if matches.opt_present("version") {
println!("whoami 1.0.0");
return;
}
exec();
}
pub fn exec() {
unsafe {
let username = getusername();
println!("{:s}", username);
}
}
|
uumain
|
identifier_name
|
whoami.rs
|
#![crate_id(name="whoami", version="1.0.0", author="KokaKiwi")]
/*
* This file is part of the uutils coreutils package.
*
* (c) Jordi Boggiano <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.21 */
#![allow(non_camel_case_types)]
#![feature(macro_rules)]
extern crate getopts;
extern crate libc;
use std::io::print;
use std::os;
use std::str;
use c_types::{c_passwd, getpwuid};
#[path = "../common/util.rs"] mod util;
#[path = "../common/c_types.rs"] mod c_types;
extern {
pub fn geteuid() -> libc::c_int;
}
unsafe fn getusername() -> String {
let passwd: *c_passwd = getpwuid(geteuid());
let pw_name: *libc::c_char = (*passwd).pw_name;
let name = str::raw::from_c_str(pw_name);
name
}
static NAME: &'static str = "whoami";
#[allow(dead_code)]
fn main() { uumain(os::args()); }
pub fn uumain(args: Vec<String>) {
let program = args.get(0).as_slice();
let opts = [
getopts::optflag("h", "help", "display this help and exit"),
getopts::optflag("V", "version", "output version information and exit"),
];
let matches = match getopts::getopts(args.tail(), opts) {
Ok(m) => m,
Err(f) => crash!(1, "{}", f.to_err_msg()),
};
if matches.opt_present("help") {
println!("whoami 1.0.0");
println!("");
println!("Usage:");
println!(" {:s}", program);
println!("");
print(getopts::usage("print effective userid", opts).as_slice());
return;
}
if matches.opt_present("version")
|
exec();
}
pub fn exec() {
unsafe {
let username = getusername();
println!("{:s}", username);
}
}
|
{
println!("whoami 1.0.0");
return;
}
|
conditional_block
|
whoami.rs
|
#![crate_id(name="whoami", version="1.0.0", author="KokaKiwi")]
/*
* This file is part of the uutils coreutils package.
*
* (c) Jordi Boggiano <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.21 */
#![allow(non_camel_case_types)]
#![feature(macro_rules)]
extern crate getopts;
extern crate libc;
use std::io::print;
use std::os;
use std::str;
use c_types::{c_passwd, getpwuid};
#[path = "../common/util.rs"] mod util;
#[path = "../common/c_types.rs"] mod c_types;
extern {
pub fn geteuid() -> libc::c_int;
}
unsafe fn getusername() -> String {
let passwd: *c_passwd = getpwuid(geteuid());
let pw_name: *libc::c_char = (*passwd).pw_name;
let name = str::raw::from_c_str(pw_name);
name
}
static NAME: &'static str = "whoami";
#[allow(dead_code)]
fn main()
|
pub fn uumain(args: Vec<String>) {
let program = args.get(0).as_slice();
let opts = [
getopts::optflag("h", "help", "display this help and exit"),
getopts::optflag("V", "version", "output version information and exit"),
];
let matches = match getopts::getopts(args.tail(), opts) {
Ok(m) => m,
Err(f) => crash!(1, "{}", f.to_err_msg()),
};
if matches.opt_present("help") {
println!("whoami 1.0.0");
println!("");
println!("Usage:");
println!(" {:s}", program);
println!("");
print(getopts::usage("print effective userid", opts).as_slice());
return;
}
if matches.opt_present("version") {
println!("whoami 1.0.0");
return;
}
exec();
}
pub fn exec() {
unsafe {
let username = getusername();
println!("{:s}", username);
}
}
|
{ uumain(os::args()); }
|
identifier_body
|
whoami.rs
|
#![crate_id(name="whoami", version="1.0.0", author="KokaKiwi")]
/*
* This file is part of the uutils coreutils package.
*
|
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/* last synced with: whoami (GNU coreutils) 8.21 */
#![allow(non_camel_case_types)]
#![feature(macro_rules)]
extern crate getopts;
extern crate libc;
use std::io::print;
use std::os;
use std::str;
use c_types::{c_passwd, getpwuid};
#[path = "../common/util.rs"] mod util;
#[path = "../common/c_types.rs"] mod c_types;
extern {
pub fn geteuid() -> libc::c_int;
}
unsafe fn getusername() -> String {
let passwd: *c_passwd = getpwuid(geteuid());
let pw_name: *libc::c_char = (*passwd).pw_name;
let name = str::raw::from_c_str(pw_name);
name
}
static NAME: &'static str = "whoami";
#[allow(dead_code)]
fn main() { uumain(os::args()); }
pub fn uumain(args: Vec<String>) {
let program = args.get(0).as_slice();
let opts = [
getopts::optflag("h", "help", "display this help and exit"),
getopts::optflag("V", "version", "output version information and exit"),
];
let matches = match getopts::getopts(args.tail(), opts) {
Ok(m) => m,
Err(f) => crash!(1, "{}", f.to_err_msg()),
};
if matches.opt_present("help") {
println!("whoami 1.0.0");
println!("");
println!("Usage:");
println!(" {:s}", program);
println!("");
print(getopts::usage("print effective userid", opts).as_slice());
return;
}
if matches.opt_present("version") {
println!("whoami 1.0.0");
return;
}
exec();
}
pub fn exec() {
unsafe {
let username = getusername();
println!("{:s}", username);
}
}
|
* (c) Jordi Boggiano <[email protected]>
|
random_line_split
|
mod.rs
|
mod control_handle;
mod control_base;
mod window;
mod button;
mod check_box;
mod radio_button;
mod text_input;
mod label;
mod image_frame;
#[cfg(feature = "textbox")]
mod text_box;
#[cfg(feature = "rich-textbox")]
mod rich_text_box;
#[cfg(feature = "rich-textbox")]
mod rich_label;
#[cfg(feature = "status-bar")]
mod status_bar;
#[cfg(feature = "tooltip")]
mod tooltip;
#[cfg(feature = "trackbar")]
mod track_bar;
#[cfg(feature = "menu")]
mod menu;
#[cfg(feature = "timer")]
mod timer;
#[cfg(feature = "animation-timer")]
mod animation_timer;
#[cfg(feature = "notice")]
mod notice;
#[cfg(feature = "combobox")]
mod combo_box;
#[cfg(feature = "listbox")]
mod list_box;
#[cfg(feature = "datetime-picker")]
mod date_picker;
#[cfg(feature = "progress-bar")]
mod progress_bar;
#[cfg(feature = "tabs")]
mod tabs;
#[cfg(feature = "tree-view")]
mod treeview;
#[cfg(all(feature = "tree-view-iterator", feature = "tree-view") )]
mod treeview_iterator;
#[cfg(feature = "tray-notification")]
mod tray_notification;
#[cfg(feature = "message-window")]
mod message_window;
#[cfg(feature = "list-view")]
mod list_view;
#[cfg(feature = "number-select")]
mod number_select;
#[cfg(feature = "extern-canvas")]
mod extern_canvas;
#[cfg(feature = "frame")]
mod frame;
#[cfg(feature = "scroll-bar")]
mod scroll_bar;
#[cfg(feature = "plotting")]
mod plotters;
mod handle_from_control;
pub use control_handle::ControlHandle;
pub use control_base::{ControlBase, HwndBuilder, TimerBuilder as BaseTimerBuilder, OtherBuilder};
pub use window::{Window, WindowBuilder, WindowFlags};
pub use button::{Button, ButtonBuilder, ButtonFlags};
pub use check_box::{CheckBox, CheckBoxBuilder, CheckBoxState, CheckBoxFlags};
pub use radio_button::{RadioButton, RadioButtonBuilder, RadioButtonState, RadioButtonFlags};
pub use text_input::{TextInput, TextInputBuilder, TextInputFlags};
pub use label::{Label, LabelBuilder, LabelFlags};
pub use image_frame::{ImageFrame, ImageFrameBuilder, ImageFrameFlags};
#[cfg(feature = "textbox")]
pub use text_box::{TextBox, TextBoxBuilder, TextBoxFlags};
#[cfg(feature = "rich-textbox")]
pub use rich_text_box::*;
#[cfg(feature = "rich-textbox")]
pub use rich_label::*;
#[cfg(feature = "status-bar")]
pub use status_bar::{StatusBar, StatusBarBuilder};
#[cfg(feature = "tooltip")]
pub use tooltip::{Tooltip, TooltipBuilder, TooltipIcon};
#[cfg(feature = "trackbar")]
pub use track_bar::{TrackBar, TrackBarBuilder, TrackBarFlags};
#[cfg(feature = "menu")]
pub use menu::{Menu, MenuBuilder, MenuItem, MenuSeparator, MenuItemBuilder, PopupMenuFlags};
#[cfg(feature = "menu")]
pub use control_base::HmenuBuilder;
#[cfg(feature = "timer")]
#[allow(deprecated)]
pub use timer::{Timer, TimerBuilder};
#[cfg(feature = "animation-timer")]
#[allow(deprecated)]
pub use animation_timer::{AnimationTimer, AnimationTimerBuilder};
#[cfg(feature = "notice")]
pub use notice::{Notice, NoticeSender, NoticeBuilder};
#[cfg(feature = "combobox")]
pub use combo_box::{ComboBox, ComboBoxFlags, ComboBoxBuilder};
#[cfg(feature = "listbox")]
pub use list_box::{ListBox, ListBoxFlags, ListBoxBuilder};
#[cfg(feature = "datetime-picker")]
pub use date_picker::{DatePicker, DatePickerValue, DatePickerFlags, DatePickerBuilder};
#[cfg(feature = "progress-bar")]
pub use progress_bar::{ProgressBar, ProgressBarState, ProgressBarFlags, ProgressBarBuilder};
#[cfg(feature = "tabs")]
pub use tabs::{TabsContainer, Tab, TabsContainerFlags, TabsContainerBuilder, TabBuilder};
#[cfg(feature = "tree-view")]
pub use treeview::{TreeView, TreeViewBuilder, TreeItem, TreeInsert, TreeItemAction, ExpandState, TreeItemState, TreeViewFlags};
#[cfg(all(feature = "tree-view-iterator", feature = "tree-view") )]
pub use treeview_iterator::TreeViewIterator;
#[cfg(feature = "tray-notification")]
pub use tray_notification::{TrayNotificationFlags, TrayNotification, TrayNotificationBuilder};
#[cfg(feature = "message-window")]
pub use message_window::{MessageWindow, MessageWindowBuilder};
#[cfg(feature = "list-view")]
pub use list_view::{ListView, ListViewStyle, ListViewBuilder, ListViewFlags, ListViewExFlags, InsertListViewItem, ListViewItem, InsertListViewColumn, ListViewColumn, ListViewColumnSortArrow, ListViewColumnFlags};
#[cfg(all(feature="list-view", feature="image-list"))]
pub use list_view::ListViewImageListType;
#[cfg(feature = "number-select")]
pub use number_select::{NumberSelect, NumberSelectBuilder, NumberSelectFlags, NumberSelectData};
#[cfg(feature = "extern-canvas")]
pub use extern_canvas::{ExternCanvas, ExternCanvasBuilder, ExternCanvasFlags};
#[cfg(feature = "frame")]
pub use frame::{Frame, FrameBuilder, FrameFlags};
#[cfg(feature = "scroll-bar")]
pub use scroll_bar::{ScrollBar, ScrollBarBuilder, ScrollBarFlags};
#[cfg(feature = "plotting")]
pub use self::plotters::{Plotters, PlottersBuilder, PlottersDrawingArea, PlottersBackend, PlottersError};
|
pub use handle_from_control::*;
|
random_line_split
|
|
kernel_log.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/gui/kernel_log.rs
/// Kernel log output (and debug terminal)
//
// Manages a set of windows giving a view into the kernel
// - Kernel log (current) : Contains the most recent kernel log messages
// - Logo
// - TODO: Kernel log (history) : A searchable/filterable/scrollable kernel log
// - TODO: Console
#[allow(unused_imports)]
use kernel::prelude::*;
use super::windows::{WindowGroupHandle,WindowHandle};
use super::{Colour,Dims,Pos,Rect};
use core::fmt;
use kernel::sync::mutex::{LazyMutex,HeldLazyMutex};
// Bitmap font used by this module is in another file
include!("../../../Graphics/font_cp437_8x16.rs");
// Raw bitmap logo (already encoded with dims and as a rust array)
include!("../../../Graphics/logo.rs");
struct KernelLog
{
_wgh: WindowGroupHandle,
wh: WindowHandle,
_logo_wh: WindowHandle,
cur_line: u32,
#[allow(dead_code)]
buffer_handle: super::windows::BufHandle,
}
/// Character position
#[derive(Copy,Clone,Debug)]
struct CharPos(u32,u32);
struct LogWriter
{
log: HeldLazyMutex<'static,KernelLog>,
pos: CharPos,
colour: Colour,
}
/// Trait to provde 'is_combining', used by render code
trait UnicodeCombining
{
fn is_combining(&self) -> bool;
}
const C_CELL_DIMS: Dims = Dims { w: 8, h: 16 };
static S_KERNEL_LOG: LazyMutex<KernelLog> = lazymutex_init!();
#[doc(hidden)]
pub fn init()
{
// Create window (and structure)
S_KERNEL_LOG.init(|| KernelLog::new());
//super::register_dims_update(|| S_KERNEL_LOG.lock().update_dims());
//S_KERNEL_LOG.lock().register_input();
{
use core::fmt::Write;
{
let mut w = LogWriter::new();
w.set_colour(Colour::def_green());
write!(&mut w, "{}", ::kernel::VERSION_STRING).unwrap();
}
{
let mut w = LogWriter::new();
w.set_colour(Colour::def_yellow());
write!(&mut w, "> {}", ::kernel::BUILD_STRING).unwrap();
}
}
// Populate kernel logging window with accumulated logs
// TODO:
// Register to recieve logs
}
impl KernelLog
{
fn
|
() -> KernelLog
{
// TODO: Register to somehow be informed when dimensions change
// - Is this particular call bad for bypassing the GUI? Or is this acceptable
let max_dims = match ::kernel::metadevs::video::get_display_for_pos( Pos::new(0,0) )
{
Some(display) => display.dims(),
None => {
log_warning!("No display at (0,0)");
Dims::new(0,0)
},
};
// Kernel's window group
let mut wgh = WindowGroupHandle::alloc("Kernel");
// - Log Window
let mut wh = wgh.create_window("Kernel Log");
//wh.set_pos(Pos::new(0,0));
//wh.resize(max_dims);
wh.maximise();
let log_buf_handle = wh.get_buffer();
// - Fancy logo window
let dims = Dims::new(S_LOGO_DIMS.0,S_LOGO_DIMS.1);
let mut logo_wh = wgh.create_window("Logo");
logo_wh.set_pos(Pos::new(max_dims.w-dims.w, 0));
logo_wh.resize(dims);
logo_wh.blit_rect( Rect::new_pd(Pos::new(0,0),dims), &S_LOGO_DATA, dims.w as usize );
if max_dims!= Dims::new(0,0)
{
// > Show windows in reverse render order
wh.show();
logo_wh.show();
}
// Return struct populated with above handles
KernelLog {
_wgh: wgh,
wh: wh,
_logo_wh: logo_wh,
cur_line: 0,
buffer_handle: log_buf_handle,
}
}
/// Scroll the display up a step, revealing a new line
fn scroll_up(&mut self)
{
self.cur_line += 1;
}
/// Write a string to the log display (at the given character position)
fn write_text(&mut self, mut pos: CharPos, colour: Colour, text: &str) -> CharPos
{
for c in text.chars()
{
if self.putc(pos, colour, c)
{
pos = pos.next();
}
}
pos
}
/// Flush changes
fn flush(&mut self)
{
// Poke the WM and tell it to reblit us
self.wh.redraw();
}
/// Writes a single codepoint to the display
///
/// Returns true if the character caused a cell change (i.e. it wasn't a combining character)
fn putc(&mut self, pos: CharPos, colour: Colour, c: char) -> bool
{
// If the character was a combining AND it's not at the start of a line,
// render atop the previous cell
if c.is_combining() && pos.col() > 0 {
self.render_char(pos.prev(), colour, c);
false
}
// Otherwise, wipe the cell and render into it
else {
self.clear_cell(pos);
self.render_char(pos, colour, c);
true
}
}
// Low-level rendering
/// Clear a character cell
fn clear_cell(&mut self, pos: CharPos)
{
self.wh.fill_rect( Rect{ pos : pos.to_pixels(), dims: C_CELL_DIMS }, Colour::def_black());
}
/// Actually does the rendering
fn render_char(&mut self, pos: CharPos, colour: Colour, cp: char)
{
if self.buffer_handle.dims().width() == 0 {
return ;
}
let idx = unicode_to_cp437(cp);
//log_trace!("KernelLog::render_char({:?}, {:?}, '{}') idx={}", pos, colour, cp, idx);
let bitmap = &S_FONTDATA[idx as usize];
// Actual render!
let Pos { x: bx, y: by } = pos.to_pixels();
for row in (0.. 16)
{
let byte = &bitmap[row as usize];
let r = self.buffer_handle.scanline_rgn_mut(by as usize + row, bx as usize, 8);
for col in (0usize.. 8)
{
if (byte >> 7-col) & 1!= 0 {
r[col] = colour.as_argb32();
}
}
}
}
}
impl CharPos
{
fn col(&self) -> u32 { self.1 }
fn next(self) -> CharPos { CharPos(self.0, self.1+1) }
fn prev(self) -> CharPos { CharPos(self.0, self.1-1) }
fn to_pixels(self) -> Pos {
Pos::new( (self.1 * C_CELL_DIMS.w) as u32, (self.0 * C_CELL_DIMS.h) as u32 )
}
}
impl LogWriter
{
pub fn new() -> LogWriter
{
let mut log = S_KERNEL_LOG.lock();
log.scroll_up();
LogWriter {
pos: CharPos(log.cur_line-1,0),
colour: Colour::def_white(),
log: log,
}
}
pub fn set_colour(&mut self, colour: Colour)
{
self.colour = colour;
}
}
impl fmt::Write for LogWriter
{
fn write_str(&mut self, s: &str) -> fmt::Result
{
self.pos = self.log.write_text(self.pos, self.colour, s);
Ok( () )
}
}
impl ::core::ops::Drop for LogWriter
{
fn drop(&mut self)
{
self.log.flush();
}
}
impl UnicodeCombining for char
{
fn is_combining(&self) -> bool
{
match *self as u32
{
// Ranges from wikipedia:Combining_Character
0x0300... 0x036F => true,
0x1AB0... 0x1AFF => true,
0x1DC0... 0x1DFF => true,
0x20D0... 0x20FF => true,
0xFE20... 0xFE2F => true,
_ => false,
}
}
}
|
new
|
identifier_name
|
kernel_log.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/gui/kernel_log.rs
/// Kernel log output (and debug terminal)
//
// Manages a set of windows giving a view into the kernel
// - Kernel log (current) : Contains the most recent kernel log messages
// - Logo
// - TODO: Kernel log (history) : A searchable/filterable/scrollable kernel log
// - TODO: Console
#[allow(unused_imports)]
use kernel::prelude::*;
use super::windows::{WindowGroupHandle,WindowHandle};
use super::{Colour,Dims,Pos,Rect};
use core::fmt;
use kernel::sync::mutex::{LazyMutex,HeldLazyMutex};
// Bitmap font used by this module is in another file
include!("../../../Graphics/font_cp437_8x16.rs");
// Raw bitmap logo (already encoded with dims and as a rust array)
include!("../../../Graphics/logo.rs");
struct KernelLog
{
_wgh: WindowGroupHandle,
wh: WindowHandle,
_logo_wh: WindowHandle,
cur_line: u32,
#[allow(dead_code)]
buffer_handle: super::windows::BufHandle,
}
/// Character position
#[derive(Copy,Clone,Debug)]
struct CharPos(u32,u32);
struct LogWriter
{
log: HeldLazyMutex<'static,KernelLog>,
pos: CharPos,
colour: Colour,
}
/// Trait to provde 'is_combining', used by render code
trait UnicodeCombining
{
fn is_combining(&self) -> bool;
}
const C_CELL_DIMS: Dims = Dims { w: 8, h: 16 };
static S_KERNEL_LOG: LazyMutex<KernelLog> = lazymutex_init!();
#[doc(hidden)]
pub fn init()
{
// Create window (and structure)
S_KERNEL_LOG.init(|| KernelLog::new());
//super::register_dims_update(|| S_KERNEL_LOG.lock().update_dims());
//S_KERNEL_LOG.lock().register_input();
{
use core::fmt::Write;
{
let mut w = LogWriter::new();
w.set_colour(Colour::def_green());
write!(&mut w, "{}", ::kernel::VERSION_STRING).unwrap();
}
{
let mut w = LogWriter::new();
w.set_colour(Colour::def_yellow());
write!(&mut w, "> {}", ::kernel::BUILD_STRING).unwrap();
}
}
// Populate kernel logging window with accumulated logs
// TODO:
// Register to recieve logs
}
impl KernelLog
{
fn new() -> KernelLog
{
// TODO: Register to somehow be informed when dimensions change
// - Is this particular call bad for bypassing the GUI? Or is this acceptable
let max_dims = match ::kernel::metadevs::video::get_display_for_pos( Pos::new(0,0) )
{
Some(display) => display.dims(),
None => {
log_warning!("No display at (0,0)");
Dims::new(0,0)
},
};
// Kernel's window group
let mut wgh = WindowGroupHandle::alloc("Kernel");
// - Log Window
let mut wh = wgh.create_window("Kernel Log");
//wh.set_pos(Pos::new(0,0));
//wh.resize(max_dims);
wh.maximise();
let log_buf_handle = wh.get_buffer();
// - Fancy logo window
let dims = Dims::new(S_LOGO_DIMS.0,S_LOGO_DIMS.1);
let mut logo_wh = wgh.create_window("Logo");
logo_wh.set_pos(Pos::new(max_dims.w-dims.w, 0));
logo_wh.resize(dims);
logo_wh.blit_rect( Rect::new_pd(Pos::new(0,0),dims), &S_LOGO_DATA, dims.w as usize );
if max_dims!= Dims::new(0,0)
{
// > Show windows in reverse render order
wh.show();
logo_wh.show();
}
// Return struct populated with above handles
KernelLog {
_wgh: wgh,
wh: wh,
_logo_wh: logo_wh,
cur_line: 0,
buffer_handle: log_buf_handle,
}
}
/// Scroll the display up a step, revealing a new line
fn scroll_up(&mut self)
{
self.cur_line += 1;
}
/// Write a string to the log display (at the given character position)
fn write_text(&mut self, mut pos: CharPos, colour: Colour, text: &str) -> CharPos
{
for c in text.chars()
{
if self.putc(pos, colour, c)
{
pos = pos.next();
}
}
pos
}
/// Flush changes
fn flush(&mut self)
{
// Poke the WM and tell it to reblit us
self.wh.redraw();
}
/// Writes a single codepoint to the display
///
/// Returns true if the character caused a cell change (i.e. it wasn't a combining character)
fn putc(&mut self, pos: CharPos, colour: Colour, c: char) -> bool
{
// If the character was a combining AND it's not at the start of a line,
// render atop the previous cell
if c.is_combining() && pos.col() > 0 {
self.render_char(pos.prev(), colour, c);
false
}
// Otherwise, wipe the cell and render into it
else {
self.clear_cell(pos);
self.render_char(pos, colour, c);
true
}
}
// Low-level rendering
/// Clear a character cell
fn clear_cell(&mut self, pos: CharPos)
{
self.wh.fill_rect( Rect{ pos : pos.to_pixels(), dims: C_CELL_DIMS }, Colour::def_black());
}
/// Actually does the rendering
fn render_char(&mut self, pos: CharPos, colour: Colour, cp: char)
{
if self.buffer_handle.dims().width() == 0 {
return ;
}
let idx = unicode_to_cp437(cp);
//log_trace!("KernelLog::render_char({:?}, {:?}, '{}') idx={}", pos, colour, cp, idx);
let bitmap = &S_FONTDATA[idx as usize];
// Actual render!
let Pos { x: bx, y: by } = pos.to_pixels();
for row in (0.. 16)
{
let byte = &bitmap[row as usize];
let r = self.buffer_handle.scanline_rgn_mut(by as usize + row, bx as usize, 8);
for col in (0usize.. 8)
{
if (byte >> 7-col) & 1!= 0 {
r[col] = colour.as_argb32();
}
}
}
}
}
impl CharPos
{
fn col(&self) -> u32 { self.1 }
fn next(self) -> CharPos { CharPos(self.0, self.1+1) }
fn prev(self) -> CharPos { CharPos(self.0, self.1-1) }
fn to_pixels(self) -> Pos
|
}
impl LogWriter
{
pub fn new() -> LogWriter
{
let mut log = S_KERNEL_LOG.lock();
log.scroll_up();
LogWriter {
pos: CharPos(log.cur_line-1,0),
colour: Colour::def_white(),
log: log,
}
}
pub fn set_colour(&mut self, colour: Colour)
{
self.colour = colour;
}
}
impl fmt::Write for LogWriter
{
fn write_str(&mut self, s: &str) -> fmt::Result
{
self.pos = self.log.write_text(self.pos, self.colour, s);
Ok( () )
}
}
impl ::core::ops::Drop for LogWriter
{
fn drop(&mut self)
{
self.log.flush();
}
}
impl UnicodeCombining for char
{
fn is_combining(&self) -> bool
{
match *self as u32
{
// Ranges from wikipedia:Combining_Character
0x0300... 0x036F => true,
0x1AB0... 0x1AFF => true,
0x1DC0... 0x1DFF => true,
0x20D0... 0x20FF => true,
0xFE20... 0xFE2F => true,
_ => false,
}
}
}
|
{
Pos::new( (self.1 * C_CELL_DIMS.w) as u32, (self.0 * C_CELL_DIMS.h) as u32 )
}
|
identifier_body
|
kernel_log.rs
|
// "Tifflin" Kernel
// - By John Hodge (thePowersGang)
//
// Core/gui/kernel_log.rs
/// Kernel log output (and debug terminal)
//
// Manages a set of windows giving a view into the kernel
// - Kernel log (current) : Contains the most recent kernel log messages
// - Logo
// - TODO: Kernel log (history) : A searchable/filterable/scrollable kernel log
// - TODO: Console
#[allow(unused_imports)]
use kernel::prelude::*;
use super::windows::{WindowGroupHandle,WindowHandle};
use super::{Colour,Dims,Pos,Rect};
use core::fmt;
use kernel::sync::mutex::{LazyMutex,HeldLazyMutex};
// Bitmap font used by this module is in another file
include!("../../../Graphics/font_cp437_8x16.rs");
// Raw bitmap logo (already encoded with dims and as a rust array)
include!("../../../Graphics/logo.rs");
struct KernelLog
{
_wgh: WindowGroupHandle,
wh: WindowHandle,
_logo_wh: WindowHandle,
cur_line: u32,
#[allow(dead_code)]
buffer_handle: super::windows::BufHandle,
}
/// Character position
#[derive(Copy,Clone,Debug)]
struct CharPos(u32,u32);
struct LogWriter
{
log: HeldLazyMutex<'static,KernelLog>,
pos: CharPos,
colour: Colour,
}
/// Trait to provde 'is_combining', used by render code
trait UnicodeCombining
{
fn is_combining(&self) -> bool;
}
const C_CELL_DIMS: Dims = Dims { w: 8, h: 16 };
static S_KERNEL_LOG: LazyMutex<KernelLog> = lazymutex_init!();
#[doc(hidden)]
pub fn init()
{
// Create window (and structure)
S_KERNEL_LOG.init(|| KernelLog::new());
//super::register_dims_update(|| S_KERNEL_LOG.lock().update_dims());
//S_KERNEL_LOG.lock().register_input();
{
use core::fmt::Write;
{
let mut w = LogWriter::new();
w.set_colour(Colour::def_green());
write!(&mut w, "{}", ::kernel::VERSION_STRING).unwrap();
}
{
let mut w = LogWriter::new();
w.set_colour(Colour::def_yellow());
write!(&mut w, "> {}", ::kernel::BUILD_STRING).unwrap();
}
}
// Populate kernel logging window with accumulated logs
// TODO:
// Register to recieve logs
}
impl KernelLog
{
fn new() -> KernelLog
{
// TODO: Register to somehow be informed when dimensions change
// - Is this particular call bad for bypassing the GUI? Or is this acceptable
let max_dims = match ::kernel::metadevs::video::get_display_for_pos( Pos::new(0,0) )
{
Some(display) => display.dims(),
None => {
log_warning!("No display at (0,0)");
Dims::new(0,0)
},
};
// Kernel's window group
let mut wgh = WindowGroupHandle::alloc("Kernel");
// - Log Window
let mut wh = wgh.create_window("Kernel Log");
//wh.set_pos(Pos::new(0,0));
//wh.resize(max_dims);
wh.maximise();
let log_buf_handle = wh.get_buffer();
// - Fancy logo window
let dims = Dims::new(S_LOGO_DIMS.0,S_LOGO_DIMS.1);
let mut logo_wh = wgh.create_window("Logo");
logo_wh.set_pos(Pos::new(max_dims.w-dims.w, 0));
logo_wh.resize(dims);
logo_wh.blit_rect( Rect::new_pd(Pos::new(0,0),dims), &S_LOGO_DATA, dims.w as usize );
if max_dims!= Dims::new(0,0)
{
// > Show windows in reverse render order
wh.show();
logo_wh.show();
}
// Return struct populated with above handles
KernelLog {
_wgh: wgh,
wh: wh,
_logo_wh: logo_wh,
cur_line: 0,
buffer_handle: log_buf_handle,
}
}
/// Scroll the display up a step, revealing a new line
fn scroll_up(&mut self)
{
self.cur_line += 1;
}
/// Write a string to the log display (at the given character position)
fn write_text(&mut self, mut pos: CharPos, colour: Colour, text: &str) -> CharPos
{
for c in text.chars()
{
if self.putc(pos, colour, c)
{
pos = pos.next();
}
}
pos
}
/// Flush changes
|
{
// Poke the WM and tell it to reblit us
self.wh.redraw();
}
/// Writes a single codepoint to the display
///
/// Returns true if the character caused a cell change (i.e. it wasn't a combining character)
fn putc(&mut self, pos: CharPos, colour: Colour, c: char) -> bool
{
// If the character was a combining AND it's not at the start of a line,
// render atop the previous cell
if c.is_combining() && pos.col() > 0 {
self.render_char(pos.prev(), colour, c);
false
}
// Otherwise, wipe the cell and render into it
else {
self.clear_cell(pos);
self.render_char(pos, colour, c);
true
}
}
// Low-level rendering
/// Clear a character cell
fn clear_cell(&mut self, pos: CharPos)
{
self.wh.fill_rect( Rect{ pos : pos.to_pixels(), dims: C_CELL_DIMS }, Colour::def_black());
}
/// Actually does the rendering
fn render_char(&mut self, pos: CharPos, colour: Colour, cp: char)
{
if self.buffer_handle.dims().width() == 0 {
return ;
}
let idx = unicode_to_cp437(cp);
//log_trace!("KernelLog::render_char({:?}, {:?}, '{}') idx={}", pos, colour, cp, idx);
let bitmap = &S_FONTDATA[idx as usize];
// Actual render!
let Pos { x: bx, y: by } = pos.to_pixels();
for row in (0.. 16)
{
let byte = &bitmap[row as usize];
let r = self.buffer_handle.scanline_rgn_mut(by as usize + row, bx as usize, 8);
for col in (0usize.. 8)
{
if (byte >> 7-col) & 1!= 0 {
r[col] = colour.as_argb32();
}
}
}
}
}
impl CharPos
{
fn col(&self) -> u32 { self.1 }
fn next(self) -> CharPos { CharPos(self.0, self.1+1) }
fn prev(self) -> CharPos { CharPos(self.0, self.1-1) }
fn to_pixels(self) -> Pos {
Pos::new( (self.1 * C_CELL_DIMS.w) as u32, (self.0 * C_CELL_DIMS.h) as u32 )
}
}
impl LogWriter
{
pub fn new() -> LogWriter
{
let mut log = S_KERNEL_LOG.lock();
log.scroll_up();
LogWriter {
pos: CharPos(log.cur_line-1,0),
colour: Colour::def_white(),
log: log,
}
}
pub fn set_colour(&mut self, colour: Colour)
{
self.colour = colour;
}
}
impl fmt::Write for LogWriter
{
fn write_str(&mut self, s: &str) -> fmt::Result
{
self.pos = self.log.write_text(self.pos, self.colour, s);
Ok( () )
}
}
impl ::core::ops::Drop for LogWriter
{
fn drop(&mut self)
{
self.log.flush();
}
}
impl UnicodeCombining for char
{
fn is_combining(&self) -> bool
{
match *self as u32
{
// Ranges from wikipedia:Combining_Character
0x0300... 0x036F => true,
0x1AB0... 0x1AFF => true,
0x1DC0... 0x1DFF => true,
0x20D0... 0x20FF => true,
0xFE20... 0xFE2F => true,
_ => false,
}
}
}
|
fn flush(&mut self)
|
random_line_split
|
closable_tcp_stream.rs
|
// Copyright 2015 The tiny-http Contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io::{Read, Write};
use std::io::Result as IoResult;
use std::net::{SocketAddr, TcpStream, Shutdown};
pub struct ClosableTcpStream {
stream: TcpStream,
close_read: bool,
close_write: bool,
}
impl ClosableTcpStream {
pub fn new(stream: TcpStream, close_read: bool, close_write: bool) -> ClosableTcpStream {
ClosableTcpStream {
stream: stream,
close_read: close_read,
close_write: close_write,
}
}
pub fn peer_addr(&mut self) -> IoResult<SocketAddr> {
self.stream.peer_addr()
}
}
impl Drop for ClosableTcpStream {
fn drop(&mut self) {
if self.close_read {
self.stream.shutdown(Shutdown::Read).ok(); // ignoring outcome
}
if self.close_write
|
}
}
impl Read for ClosableTcpStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
self.stream.read(buf)
}
}
impl Write for ClosableTcpStream {
fn write(&mut self, buf: &[u8]) -> IoResult<usize> {
self.stream.write(buf)
}
fn flush(&mut self) -> IoResult<()> {
self.stream.flush()
}
}
|
{
self.stream.shutdown(Shutdown::Write).ok(); // ignoring outcome
}
|
conditional_block
|
closable_tcp_stream.rs
|
// Copyright 2015 The tiny-http Contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io::{Read, Write};
use std::io::Result as IoResult;
use std::net::{SocketAddr, TcpStream, Shutdown};
pub struct ClosableTcpStream {
stream: TcpStream,
close_read: bool,
close_write: bool,
}
impl ClosableTcpStream {
pub fn new(stream: TcpStream, close_read: bool, close_write: bool) -> ClosableTcpStream {
ClosableTcpStream {
stream: stream,
close_read: close_read,
close_write: close_write,
}
}
pub fn peer_addr(&mut self) -> IoResult<SocketAddr> {
self.stream.peer_addr()
}
}
|
impl Drop for ClosableTcpStream {
fn drop(&mut self) {
if self.close_read {
self.stream.shutdown(Shutdown::Read).ok(); // ignoring outcome
}
if self.close_write {
self.stream.shutdown(Shutdown::Write).ok(); // ignoring outcome
}
}
}
impl Read for ClosableTcpStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
self.stream.read(buf)
}
}
impl Write for ClosableTcpStream {
fn write(&mut self, buf: &[u8]) -> IoResult<usize> {
self.stream.write(buf)
}
fn flush(&mut self) -> IoResult<()> {
self.stream.flush()
}
}
|
random_line_split
|
|
closable_tcp_stream.rs
|
// Copyright 2015 The tiny-http Contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io::{Read, Write};
use std::io::Result as IoResult;
use std::net::{SocketAddr, TcpStream, Shutdown};
pub struct ClosableTcpStream {
stream: TcpStream,
close_read: bool,
close_write: bool,
}
impl ClosableTcpStream {
pub fn new(stream: TcpStream, close_read: bool, close_write: bool) -> ClosableTcpStream {
ClosableTcpStream {
stream: stream,
close_read: close_read,
close_write: close_write,
}
}
pub fn peer_addr(&mut self) -> IoResult<SocketAddr> {
self.stream.peer_addr()
}
}
impl Drop for ClosableTcpStream {
fn drop(&mut self) {
if self.close_read {
self.stream.shutdown(Shutdown::Read).ok(); // ignoring outcome
}
if self.close_write {
self.stream.shutdown(Shutdown::Write).ok(); // ignoring outcome
}
}
}
impl Read for ClosableTcpStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize>
|
}
impl Write for ClosableTcpStream {
fn write(&mut self, buf: &[u8]) -> IoResult<usize> {
self.stream.write(buf)
}
fn flush(&mut self) -> IoResult<()> {
self.stream.flush()
}
}
|
{
self.stream.read(buf)
}
|
identifier_body
|
closable_tcp_stream.rs
|
// Copyright 2015 The tiny-http Contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::io::{Read, Write};
use std::io::Result as IoResult;
use std::net::{SocketAddr, TcpStream, Shutdown};
pub struct
|
{
stream: TcpStream,
close_read: bool,
close_write: bool,
}
impl ClosableTcpStream {
pub fn new(stream: TcpStream, close_read: bool, close_write: bool) -> ClosableTcpStream {
ClosableTcpStream {
stream: stream,
close_read: close_read,
close_write: close_write,
}
}
pub fn peer_addr(&mut self) -> IoResult<SocketAddr> {
self.stream.peer_addr()
}
}
impl Drop for ClosableTcpStream {
fn drop(&mut self) {
if self.close_read {
self.stream.shutdown(Shutdown::Read).ok(); // ignoring outcome
}
if self.close_write {
self.stream.shutdown(Shutdown::Write).ok(); // ignoring outcome
}
}
}
impl Read for ClosableTcpStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> {
self.stream.read(buf)
}
}
impl Write for ClosableTcpStream {
fn write(&mut self, buf: &[u8]) -> IoResult<usize> {
self.stream.write(buf)
}
fn flush(&mut self) -> IoResult<()> {
self.stream.flush()
}
}
|
ClosableTcpStream
|
identifier_name
|
mod.rs
|
use alloc::boxed::Box;
use alloc::{BTreeMap, Vec};
use core::{cmp, str};
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::RwLock;
use syscall::data::Stat;
use syscall::error::{Error, EBADF, EINVAL, ENOENT, Result};
use syscall::flag::{MODE_DIR, MODE_FILE, SEEK_CUR, SEEK_END, SEEK_SET};
use syscall::scheme::Scheme;
mod context;
mod cpu;
mod exe;
mod iostat;
mod scheme;
mod scheme_num;
mod syscall;
mod uname;
struct Handle {
path: &'static [u8],
data: Vec<u8>,
mode: u16,
seek: usize
}
type SysFn = Fn() -> Result<Vec<u8>> + Send + Sync;
/// System information scheme
pub struct SysScheme {
next_id: AtomicUsize,
files: BTreeMap<&'static [u8], Box<SysFn>>,
handles: RwLock<BTreeMap<usize, Handle>>
}
impl SysScheme {
pub fn new() -> SysScheme {
let mut files: BTreeMap<&'static [u8], Box<SysFn>> = BTreeMap::new();
files.insert(b"context", Box::new(move || context::resource()));
files.insert(b"cpu", Box::new(move || cpu::resource()));
files.insert(b"exe", Box::new(move || exe::resource()));
files.insert(b"iostat", Box::new(move || iostat::resource()));
files.insert(b"scheme", Box::new(move || scheme::resource()));
files.insert(b"scheme_num", Box::new(move || scheme_num::resource()));
files.insert(b"syscall", Box::new(move || syscall::resource()));
files.insert(b"uname", Box::new(move || uname::resource()));
SysScheme {
next_id: AtomicUsize::new(0),
files: files,
handles: RwLock::new(BTreeMap::new())
}
}
}
impl Scheme for SysScheme {
fn open(&self, path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
let path_utf8 = str::from_utf8(path).or(Err(Error::new(ENOENT)))?;
let path_trimmed = path_utf8.trim_matches('/');
if path_trimmed.is_empty() {
let mut data = Vec::new();
for entry in self.files.iter() {
if! data.is_empty() {
data.push(b'\n');
}
data.extend_from_slice(entry.0);
}
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: b"",
data: data,
mode: MODE_DIR | 0o444,
seek: 0
});
return Ok(id)
} else {
//Have to iterate to get the path without allocation
for entry in self.files.iter() {
if entry.0 == &path_trimmed.as_bytes() {
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: entry.0,
data: entry.1()?,
mode: MODE_FILE | 0o444,
seek: 0
});
return Ok(id)
}
}
}
Err(Error::new(ENOENT))
}
fn dup(&self, id: usize, buf: &[u8]) -> Result<usize> {
if! buf.is_empty() {
return Err(Error::new(EINVAL));
}
let (path, data, mode, seek) = {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
(handle.path, handle.data.clone(), handle.mode, handle.seek)
};
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: path,
data: data,
mode: mode,
seek: seek
});
Ok(id)
}
fn read(&self, id: usize, buffer: &mut [u8]) -> Result<usize> {
let mut handles = self.handles.write();
let handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
let mut i = 0;
while i < buffer.len() && handle.seek < handle.data.len() {
buffer[i] = handle.data[handle.seek];
i += 1;
handle.seek += 1;
}
Ok(i)
}
fn seek(&self, id: usize, pos: usize, whence: usize) -> Result<usize> {
let mut handles = self.handles.write();
let handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
handle.seek = match whence {
SEEK_SET => cmp::min(handle.data.len(), pos),
SEEK_CUR => cmp::max(0, cmp::min(handle.data.len() as isize, handle.seek as isize + pos as isize)) as usize,
SEEK_END => cmp::max(0, cmp::min(handle.data.len() as isize, handle.data.len() as isize + pos as isize)) as usize,
_ => return Err(Error::new(EINVAL))
};
Ok(handle.seek)
}
fn fpath(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
let mut i = 0;
let scheme_path = b"sys:";
while i < buf.len() && i < scheme_path.len() {
buf[i] = scheme_path[i];
i += 1;
}
let mut j = 0;
while i < buf.len() && j < handle.path.len() {
buf[i] = handle.path[j];
i += 1;
j += 1;
}
Ok(i)
}
fn fstat(&self, id: usize, stat: &mut Stat) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
stat.st_mode = handle.mode;
stat.st_uid = 0;
stat.st_gid = 0;
stat.st_size = handle.data.len() as u64;
Ok(0)
}
fn fsync(&self, _id: usize) -> Result<usize> {
Ok(0)
}
fn
|
(&self, id: usize) -> Result<usize> {
self.handles.write().remove(&id).ok_or(Error::new(EBADF)).and(Ok(0))
}
}
|
close
|
identifier_name
|
mod.rs
|
use alloc::boxed::Box;
use alloc::{BTreeMap, Vec};
use core::{cmp, str};
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::RwLock;
use syscall::data::Stat;
use syscall::error::{Error, EBADF, EINVAL, ENOENT, Result};
use syscall::flag::{MODE_DIR, MODE_FILE, SEEK_CUR, SEEK_END, SEEK_SET};
use syscall::scheme::Scheme;
mod context;
mod cpu;
mod exe;
mod iostat;
mod scheme;
mod scheme_num;
mod syscall;
mod uname;
struct Handle {
path: &'static [u8],
data: Vec<u8>,
mode: u16,
seek: usize
}
type SysFn = Fn() -> Result<Vec<u8>> + Send + Sync;
/// System information scheme
pub struct SysScheme {
next_id: AtomicUsize,
files: BTreeMap<&'static [u8], Box<SysFn>>,
handles: RwLock<BTreeMap<usize, Handle>>
}
impl SysScheme {
pub fn new() -> SysScheme {
let mut files: BTreeMap<&'static [u8], Box<SysFn>> = BTreeMap::new();
files.insert(b"context", Box::new(move || context::resource()));
files.insert(b"cpu", Box::new(move || cpu::resource()));
files.insert(b"exe", Box::new(move || exe::resource()));
files.insert(b"iostat", Box::new(move || iostat::resource()));
files.insert(b"scheme", Box::new(move || scheme::resource()));
files.insert(b"scheme_num", Box::new(move || scheme_num::resource()));
files.insert(b"syscall", Box::new(move || syscall::resource()));
files.insert(b"uname", Box::new(move || uname::resource()));
SysScheme {
next_id: AtomicUsize::new(0),
files: files,
handles: RwLock::new(BTreeMap::new())
}
}
}
impl Scheme for SysScheme {
fn open(&self, path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
let path_utf8 = str::from_utf8(path).or(Err(Error::new(ENOENT)))?;
let path_trimmed = path_utf8.trim_matches('/');
if path_trimmed.is_empty() {
let mut data = Vec::new();
for entry in self.files.iter() {
if! data.is_empty() {
data.push(b'\n');
}
data.extend_from_slice(entry.0);
}
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: b"",
data: data,
mode: MODE_DIR | 0o444,
seek: 0
});
return Ok(id)
} else {
//Have to iterate to get the path without allocation
for entry in self.files.iter() {
if entry.0 == &path_trimmed.as_bytes() {
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: entry.0,
data: entry.1()?,
mode: MODE_FILE | 0o444,
seek: 0
});
return Ok(id)
}
}
}
Err(Error::new(ENOENT))
}
fn dup(&self, id: usize, buf: &[u8]) -> Result<usize> {
if! buf.is_empty() {
return Err(Error::new(EINVAL));
}
let (path, data, mode, seek) = {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
(handle.path, handle.data.clone(), handle.mode, handle.seek)
};
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: path,
data: data,
mode: mode,
seek: seek
});
Ok(id)
}
fn read(&self, id: usize, buffer: &mut [u8]) -> Result<usize> {
let mut handles = self.handles.write();
let handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
let mut i = 0;
while i < buffer.len() && handle.seek < handle.data.len() {
buffer[i] = handle.data[handle.seek];
i += 1;
handle.seek += 1;
}
Ok(i)
}
fn seek(&self, id: usize, pos: usize, whence: usize) -> Result<usize> {
let mut handles = self.handles.write();
let handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
handle.seek = match whence {
SEEK_SET => cmp::min(handle.data.len(), pos),
SEEK_CUR => cmp::max(0, cmp::min(handle.data.len() as isize, handle.seek as isize + pos as isize)) as usize,
SEEK_END => cmp::max(0, cmp::min(handle.data.len() as isize, handle.data.len() as isize + pos as isize)) as usize,
_ => return Err(Error::new(EINVAL))
};
Ok(handle.seek)
|
}
fn fpath(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
let mut i = 0;
let scheme_path = b"sys:";
while i < buf.len() && i < scheme_path.len() {
buf[i] = scheme_path[i];
i += 1;
}
let mut j = 0;
while i < buf.len() && j < handle.path.len() {
buf[i] = handle.path[j];
i += 1;
j += 1;
}
Ok(i)
}
fn fstat(&self, id: usize, stat: &mut Stat) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
stat.st_mode = handle.mode;
stat.st_uid = 0;
stat.st_gid = 0;
stat.st_size = handle.data.len() as u64;
Ok(0)
}
fn fsync(&self, _id: usize) -> Result<usize> {
Ok(0)
}
fn close(&self, id: usize) -> Result<usize> {
self.handles.write().remove(&id).ok_or(Error::new(EBADF)).and(Ok(0))
}
}
|
random_line_split
|
|
mod.rs
|
use alloc::boxed::Box;
use alloc::{BTreeMap, Vec};
use core::{cmp, str};
use core::sync::atomic::{AtomicUsize, Ordering};
use spin::RwLock;
use syscall::data::Stat;
use syscall::error::{Error, EBADF, EINVAL, ENOENT, Result};
use syscall::flag::{MODE_DIR, MODE_FILE, SEEK_CUR, SEEK_END, SEEK_SET};
use syscall::scheme::Scheme;
mod context;
mod cpu;
mod exe;
mod iostat;
mod scheme;
mod scheme_num;
mod syscall;
mod uname;
struct Handle {
path: &'static [u8],
data: Vec<u8>,
mode: u16,
seek: usize
}
type SysFn = Fn() -> Result<Vec<u8>> + Send + Sync;
/// System information scheme
pub struct SysScheme {
next_id: AtomicUsize,
files: BTreeMap<&'static [u8], Box<SysFn>>,
handles: RwLock<BTreeMap<usize, Handle>>
}
impl SysScheme {
pub fn new() -> SysScheme {
let mut files: BTreeMap<&'static [u8], Box<SysFn>> = BTreeMap::new();
files.insert(b"context", Box::new(move || context::resource()));
files.insert(b"cpu", Box::new(move || cpu::resource()));
files.insert(b"exe", Box::new(move || exe::resource()));
files.insert(b"iostat", Box::new(move || iostat::resource()));
files.insert(b"scheme", Box::new(move || scheme::resource()));
files.insert(b"scheme_num", Box::new(move || scheme_num::resource()));
files.insert(b"syscall", Box::new(move || syscall::resource()));
files.insert(b"uname", Box::new(move || uname::resource()));
SysScheme {
next_id: AtomicUsize::new(0),
files: files,
handles: RwLock::new(BTreeMap::new())
}
}
}
impl Scheme for SysScheme {
fn open(&self, path: &[u8], _flags: usize, _uid: u32, _gid: u32) -> Result<usize> {
let path_utf8 = str::from_utf8(path).or(Err(Error::new(ENOENT)))?;
let path_trimmed = path_utf8.trim_matches('/');
if path_trimmed.is_empty() {
let mut data = Vec::new();
for entry in self.files.iter() {
if! data.is_empty() {
data.push(b'\n');
}
data.extend_from_slice(entry.0);
}
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: b"",
data: data,
mode: MODE_DIR | 0o444,
seek: 0
});
return Ok(id)
} else {
//Have to iterate to get the path without allocation
for entry in self.files.iter() {
if entry.0 == &path_trimmed.as_bytes() {
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: entry.0,
data: entry.1()?,
mode: MODE_FILE | 0o444,
seek: 0
});
return Ok(id)
}
}
}
Err(Error::new(ENOENT))
}
fn dup(&self, id: usize, buf: &[u8]) -> Result<usize> {
if! buf.is_empty() {
return Err(Error::new(EINVAL));
}
let (path, data, mode, seek) = {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
(handle.path, handle.data.clone(), handle.mode, handle.seek)
};
let id = self.next_id.fetch_add(1, Ordering::SeqCst);
self.handles.write().insert(id, Handle {
path: path,
data: data,
mode: mode,
seek: seek
});
Ok(id)
}
fn read(&self, id: usize, buffer: &mut [u8]) -> Result<usize>
|
fn seek(&self, id: usize, pos: usize, whence: usize) -> Result<usize> {
let mut handles = self.handles.write();
let handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
handle.seek = match whence {
SEEK_SET => cmp::min(handle.data.len(), pos),
SEEK_CUR => cmp::max(0, cmp::min(handle.data.len() as isize, handle.seek as isize + pos as isize)) as usize,
SEEK_END => cmp::max(0, cmp::min(handle.data.len() as isize, handle.data.len() as isize + pos as isize)) as usize,
_ => return Err(Error::new(EINVAL))
};
Ok(handle.seek)
}
fn fpath(&self, id: usize, buf: &mut [u8]) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
let mut i = 0;
let scheme_path = b"sys:";
while i < buf.len() && i < scheme_path.len() {
buf[i] = scheme_path[i];
i += 1;
}
let mut j = 0;
while i < buf.len() && j < handle.path.len() {
buf[i] = handle.path[j];
i += 1;
j += 1;
}
Ok(i)
}
fn fstat(&self, id: usize, stat: &mut Stat) -> Result<usize> {
let handles = self.handles.read();
let handle = handles.get(&id).ok_or(Error::new(EBADF))?;
stat.st_mode = handle.mode;
stat.st_uid = 0;
stat.st_gid = 0;
stat.st_size = handle.data.len() as u64;
Ok(0)
}
fn fsync(&self, _id: usize) -> Result<usize> {
Ok(0)
}
fn close(&self, id: usize) -> Result<usize> {
self.handles.write().remove(&id).ok_or(Error::new(EBADF)).and(Ok(0))
}
}
|
{
let mut handles = self.handles.write();
let handle = handles.get_mut(&id).ok_or(Error::new(EBADF))?;
let mut i = 0;
while i < buffer.len() && handle.seek < handle.data.len() {
buffer[i] = handle.data[handle.seek];
i += 1;
handle.seek += 1;
}
Ok(i)
}
|
identifier_body
|
traversal.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversals over the DOM and flow trees, running the layout computations.
use construct::FlowConstructor;
use context::{LayoutContext, SharedLayoutContext};
use display_list_builder::DisplayListBuildState;
use flow::{self, PreorderFlowTraversal};
use flow::{CAN_BE_FRAGMENTED, Flow, ImmutableFlowUtils, PostorderFlowTraversal};
use gfx::display_list::OpaqueNode;
use script_layout_interface::wrapper_traits::{LayoutNode, ThreadSafeLayoutNode};
use std::mem;
use style::atomic_refcell::AtomicRefCell;
use style::context::{LocalStyleContext, SharedStyleContext, StyleContext};
use style::data::ElementData;
use style::dom::{StylingMode, TElement, TNode};
use style::selector_parser::RestyleDamage;
use style::servo::restyle_damage::{BUBBLE_ISIZES, REFLOW, REFLOW_OUT_OF_FLOW, REPAINT};
use style::traversal::{DomTraversalContext, put_thread_local_bloom_filter};
use style::traversal::{recalc_style_at, remove_from_bloom_filter};
use style::traversal::take_thread_local_bloom_filter;
use util::opts;
use wrapper::{GetRawData, LayoutNodeHelpers, LayoutNodeLayoutData};
pub struct RecalcStyleAndConstructFlows<'lc> {
context: LayoutContext<'lc>,
root: OpaqueNode,
}
#[allow(unsafe_code)]
impl<'lc, N> DomTraversalContext<N> for RecalcStyleAndConstructFlows<'lc>
where N: LayoutNode + TNode,
N::ConcreteElement: TElement
{
type SharedContext = SharedLayoutContext;
#[allow(unsafe_code)]
fn new<'a>(shared: &'a Self::SharedContext, root: OpaqueNode) -> Self {
// FIXME(bholley): This transmutation from &'a to &'lc is very unfortunate, but I haven't
// found a way to avoid it despite spending several days on it (and consulting Manishearth,
// brson, and nmatsakis).
//
// The crux of the problem is that parameterizing DomTraversalContext on the lifetime of
// the SharedContext doesn't work for a variety of reasons [1]. However, the code in
// parallel.rs needs to be able to use the DomTraversalContext trait (or something similar)
// to stack-allocate a struct (a generalized LayoutContext<'a>) that holds a borrowed
// SharedContext, which means that the struct needs to be parameterized on a lifetime.
// Given the aforementioned constraint, the only way to accomplish this is to avoid
// propagating the borrow lifetime from the struct to the trait, but that means that the
// new() method on the trait cannot require the lifetime of its argument to match the
// lifetime of the Self object it creates.
//
// This could be solved with an associated type with an unbound lifetime parameter, but
// that would require higher-kinded types, which don't exist yet and probably aren't coming
// for a while.
//
// So we transmute. :-( This is safe because the DomTravesalContext is stack-allocated on
// the worker thread while processing a WorkUnit, whereas the borrowed SharedContext is
// live for the entire duration of the restyle. This really could _almost_ compile: all
// we'd need to do is change the signature to to |new<'a: 'lc>|, and everything would
// work great. But we can't do that, because that would cause a mismatch with the signature
// in the trait we're implementing, and we can't mention 'lc in that trait at all for the
// reasons described above.
//
// [1] For example, the WorkQueue type needs to be parameterized on the concrete type of
// DomTraversalContext::SharedContext, and the WorkQueue lifetime is similar to that of the
// LayoutThread, generally much longer than that of a given SharedLayoutContext borrow.
let shared_lc: &'lc SharedLayoutContext = unsafe { mem::transmute(shared) };
RecalcStyleAndConstructFlows {
context: LayoutContext::new(shared_lc),
root: root,
}
}
fn process_preorder(&self, node: N) {
// FIXME(pcwalton): Stop allocating here. Ideally this should just be
// done by the HTML parser.
node.initialize_data();
if node.is_text_node() {
// FIXME(bholley): Stop doing this silly work to maintain broken bloom filter
// invariants.
//
// Longer version: The bloom filter is entirely busted for parallel traversal. Because
// parallel traversal is breadth-first, each sibling rejects the bloom filter set up
// by the previous sibling (which is valid for children, not siblings) and recreates
// it. Similarly, the fixup performed in the bottom-up traversal is useless, because
// threads perform flow construction up the parent chain until they find a parent with
// other unprocessed children, at which point they bail to the work queue and find a
// different node.
//
// Nevertheless, the remove_from_bloom_filter call at the end of flow construction
// asserts that the bloom filter is valid for the current node. This breaks when we
// stop calling recalc_style_at for text nodes, because the recursive chain of
// construct_flows_at calls is no longer necessarily rooted in a call that sets up the
// thread-local bloom filter for the leaf node.
//
// The bloom filter stuff is all going to be rewritten, so we just hackily duplicate
// the bloom filter manipulation from recalc_style_at to maintain invariants.
let parent = node.parent_node().unwrap().as_element();
let bf = take_thread_local_bloom_filter(parent, self.root, self.context.shared_context());
put_thread_local_bloom_filter(bf, &node.to_unsafe(), self.context.shared_context());
} else {
let el = node.as_element().unwrap();
recalc_style_at::<_, _, Self>(&self.context, self.root, el);
}
}
fn process_postorder(&self, node: N) {
construct_flows_at(&self.context, self.root, node);
}
fn should_traverse_child(parent: N::ConcreteElement, child: N) -> bool {
// If the parent is display:none, we don't need to do anything.
if parent.is_display_none() {
return false;
}
match child.as_element() {
// Elements should be traversed if they need styling or flow construction.
Some(el) => el.styling_mode()!= StylingMode::Stop ||
el.as_node().to_threadsafe().restyle_damage()!= RestyleDamage::empty(),
// Text nodes never need styling. However, there are two cases they may need
|
// flow construction:
// (1) They child doesn't yet have layout data (preorder traversal initializes it).
// (2) The parent element has restyle damage (so the text flow also needs fixup).
None => child.get_raw_data().is_none() ||
parent.as_node().to_threadsafe().restyle_damage()!= RestyleDamage::empty(),
}
}
unsafe fn ensure_element_data(element: &N::ConcreteElement) -> &AtomicRefCell<ElementData> {
element.as_node().initialize_data();
element.get_data().unwrap()
}
unsafe fn clear_element_data(element: &N::ConcreteElement) {
element.as_node().clear_data();
}
fn local_context(&self) -> &LocalStyleContext {
self.context.local_context()
}
}
/// A bottom-up, parallelizable traversal.
pub trait PostorderNodeMutTraversal<ConcreteThreadSafeLayoutNode: ThreadSafeLayoutNode> {
/// The operation to perform. Return true to continue or false to stop.
fn process(&mut self, node: &ConcreteThreadSafeLayoutNode);
}
/// The flow construction traversal, which builds flows for styled nodes.
#[inline]
#[allow(unsafe_code)]
fn construct_flows_at<'a, N: LayoutNode>(context: &'a LayoutContext<'a>, root: OpaqueNode, node: N) {
// Construct flows for this node.
{
let tnode = node.to_threadsafe();
// Always reconstruct if incremental layout is turned off.
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || tnode.restyle_damage()!= RestyleDamage::empty() ||
node.as_element().map_or(false, |el| el.has_dirty_descendants()) {
let mut flow_constructor = FlowConstructor::new(context);
if nonincremental_layout ||!flow_constructor.repair_if_possible(&tnode) {
flow_constructor.process(&tnode);
debug!("Constructed flow for {:x}: {:x}",
tnode.debug_id(),
tnode.flow_debug_id());
}
}
tnode.clear_restyle_damage();
}
unsafe { node.clear_dirty_bits(); }
remove_from_bloom_filter(context, root, node);
}
/// The bubble-inline-sizes traversal, the first part of layout computation. This computes
/// preferred and intrinsic inline-sizes and bubbles them up the tree.
pub struct BubbleISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BubbleISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.bubble_inline_sizes();
flow::mut_base(flow).restyle_damage.remove(BUBBLE_ISIZES);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.contains(BUBBLE_ISIZES)
}
}
/// The assign-inline-sizes traversal. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignISizes<'a> {
pub shared_context: &'a SharedStyleContext,
}
impl<'a> PreorderFlowTraversal for AssignISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.assign_inline_sizes(self.shared_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW)
}
}
/// The assign-block-sizes-and-store-overflow traversal, the last (and most expensive) part of
/// layout computation. Determines the final block-sizes for all layout objects and computes
/// positions. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignBSizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for AssignBSizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
// Can't do anything with anything that floats might flow through until we reach their
// inorder parent.
//
// NB: We must return without resetting the restyle bits for these, as we haven't actually
// reflowed anything!
if flow.floats_might_flow_through() {
return
}
flow.assign_block_size(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
let base = flow::base(flow);
base.restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW) &&
// The fragmentation countainer is responsible for calling Flow::fragment recursively
!base.flags.contains(CAN_BE_FRAGMENTED)
}
}
#[derive(Copy, Clone)]
pub struct ComputeAbsolutePositions<'a> {
pub layout_context: &'a SharedLayoutContext,
}
impl<'a> PreorderFlowTraversal for ComputeAbsolutePositions<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.compute_absolute_position(self.layout_context);
}
}
pub struct BuildDisplayList<'a> {
pub state: DisplayListBuildState<'a>,
}
impl<'a> BuildDisplayList<'a> {
#[inline]
pub fn traverse(&mut self, flow: &mut Flow) {
let new_stacking_context =
flow::base(flow).stacking_context_id!= self.state.stacking_context_id();
if new_stacking_context {
self.state.push_stacking_context_id(flow::base(flow).stacking_context_id);
}
let new_scroll_root =
flow::base(flow).scroll_root_id!= self.state.scroll_root_id();
if new_scroll_root {
self.state.push_scroll_root_id(flow::base(flow).scroll_root_id);
}
if self.should_process() {
flow.build_display_list(&mut self.state);
flow::mut_base(flow).restyle_damage.remove(REPAINT);
}
for kid in flow::child_iter_mut(flow) {
self.traverse(kid);
}
if new_stacking_context {
self.state.pop_stacking_context_id();
}
if new_scroll_root {
self.state.pop_scroll_root_id();
}
}
#[inline]
fn should_process(&self) -> bool {
true
}
}
|
random_line_split
|
|
traversal.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversals over the DOM and flow trees, running the layout computations.
use construct::FlowConstructor;
use context::{LayoutContext, SharedLayoutContext};
use display_list_builder::DisplayListBuildState;
use flow::{self, PreorderFlowTraversal};
use flow::{CAN_BE_FRAGMENTED, Flow, ImmutableFlowUtils, PostorderFlowTraversal};
use gfx::display_list::OpaqueNode;
use script_layout_interface::wrapper_traits::{LayoutNode, ThreadSafeLayoutNode};
use std::mem;
use style::atomic_refcell::AtomicRefCell;
use style::context::{LocalStyleContext, SharedStyleContext, StyleContext};
use style::data::ElementData;
use style::dom::{StylingMode, TElement, TNode};
use style::selector_parser::RestyleDamage;
use style::servo::restyle_damage::{BUBBLE_ISIZES, REFLOW, REFLOW_OUT_OF_FLOW, REPAINT};
use style::traversal::{DomTraversalContext, put_thread_local_bloom_filter};
use style::traversal::{recalc_style_at, remove_from_bloom_filter};
use style::traversal::take_thread_local_bloom_filter;
use util::opts;
use wrapper::{GetRawData, LayoutNodeHelpers, LayoutNodeLayoutData};
pub struct RecalcStyleAndConstructFlows<'lc> {
context: LayoutContext<'lc>,
root: OpaqueNode,
}
#[allow(unsafe_code)]
impl<'lc, N> DomTraversalContext<N> for RecalcStyleAndConstructFlows<'lc>
where N: LayoutNode + TNode,
N::ConcreteElement: TElement
{
type SharedContext = SharedLayoutContext;
#[allow(unsafe_code)]
fn new<'a>(shared: &'a Self::SharedContext, root: OpaqueNode) -> Self {
// FIXME(bholley): This transmutation from &'a to &'lc is very unfortunate, but I haven't
// found a way to avoid it despite spending several days on it (and consulting Manishearth,
// brson, and nmatsakis).
//
// The crux of the problem is that parameterizing DomTraversalContext on the lifetime of
// the SharedContext doesn't work for a variety of reasons [1]. However, the code in
// parallel.rs needs to be able to use the DomTraversalContext trait (or something similar)
// to stack-allocate a struct (a generalized LayoutContext<'a>) that holds a borrowed
// SharedContext, which means that the struct needs to be parameterized on a lifetime.
// Given the aforementioned constraint, the only way to accomplish this is to avoid
// propagating the borrow lifetime from the struct to the trait, but that means that the
// new() method on the trait cannot require the lifetime of its argument to match the
// lifetime of the Self object it creates.
//
// This could be solved with an associated type with an unbound lifetime parameter, but
// that would require higher-kinded types, which don't exist yet and probably aren't coming
// for a while.
//
// So we transmute. :-( This is safe because the DomTravesalContext is stack-allocated on
// the worker thread while processing a WorkUnit, whereas the borrowed SharedContext is
// live for the entire duration of the restyle. This really could _almost_ compile: all
// we'd need to do is change the signature to to |new<'a: 'lc>|, and everything would
// work great. But we can't do that, because that would cause a mismatch with the signature
// in the trait we're implementing, and we can't mention 'lc in that trait at all for the
// reasons described above.
//
// [1] For example, the WorkQueue type needs to be parameterized on the concrete type of
// DomTraversalContext::SharedContext, and the WorkQueue lifetime is similar to that of the
// LayoutThread, generally much longer than that of a given SharedLayoutContext borrow.
let shared_lc: &'lc SharedLayoutContext = unsafe { mem::transmute(shared) };
RecalcStyleAndConstructFlows {
context: LayoutContext::new(shared_lc),
root: root,
}
}
fn process_preorder(&self, node: N) {
// FIXME(pcwalton): Stop allocating here. Ideally this should just be
// done by the HTML parser.
node.initialize_data();
if node.is_text_node()
|
let parent = node.parent_node().unwrap().as_element();
let bf = take_thread_local_bloom_filter(parent, self.root, self.context.shared_context());
put_thread_local_bloom_filter(bf, &node.to_unsafe(), self.context.shared_context());
}
else {
let el = node.as_element().unwrap();
recalc_style_at::<_, _, Self>(&self.context, self.root, el);
}
}
fn process_postorder(&self, node: N) {
construct_flows_at(&self.context, self.root, node);
}
fn should_traverse_child(parent: N::ConcreteElement, child: N) -> bool {
// If the parent is display:none, we don't need to do anything.
if parent.is_display_none() {
return false;
}
match child.as_element() {
// Elements should be traversed if they need styling or flow construction.
Some(el) => el.styling_mode()!= StylingMode::Stop ||
el.as_node().to_threadsafe().restyle_damage()!= RestyleDamage::empty(),
// Text nodes never need styling. However, there are two cases they may need
// flow construction:
// (1) They child doesn't yet have layout data (preorder traversal initializes it).
// (2) The parent element has restyle damage (so the text flow also needs fixup).
None => child.get_raw_data().is_none() ||
parent.as_node().to_threadsafe().restyle_damage()!= RestyleDamage::empty(),
}
}
unsafe fn ensure_element_data(element: &N::ConcreteElement) -> &AtomicRefCell<ElementData> {
element.as_node().initialize_data();
element.get_data().unwrap()
}
unsafe fn clear_element_data(element: &N::ConcreteElement) {
element.as_node().clear_data();
}
fn local_context(&self) -> &LocalStyleContext {
self.context.local_context()
}
}
/// A bottom-up, parallelizable traversal.
pub trait PostorderNodeMutTraversal<ConcreteThreadSafeLayoutNode: ThreadSafeLayoutNode> {
/// The operation to perform. Return true to continue or false to stop.
fn process(&mut self, node: &ConcreteThreadSafeLayoutNode);
}
/// The flow construction traversal, which builds flows for styled nodes.
#[inline]
#[allow(unsafe_code)]
fn construct_flows_at<'a, N: LayoutNode>(context: &'a LayoutContext<'a>, root: OpaqueNode, node: N) {
// Construct flows for this node.
{
let tnode = node.to_threadsafe();
// Always reconstruct if incremental layout is turned off.
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || tnode.restyle_damage()!= RestyleDamage::empty() ||
node.as_element().map_or(false, |el| el.has_dirty_descendants()) {
let mut flow_constructor = FlowConstructor::new(context);
if nonincremental_layout ||!flow_constructor.repair_if_possible(&tnode) {
flow_constructor.process(&tnode);
debug!("Constructed flow for {:x}: {:x}",
tnode.debug_id(),
tnode.flow_debug_id());
}
}
tnode.clear_restyle_damage();
}
unsafe { node.clear_dirty_bits(); }
remove_from_bloom_filter(context, root, node);
}
/// The bubble-inline-sizes traversal, the first part of layout computation. This computes
/// preferred and intrinsic inline-sizes and bubbles them up the tree.
pub struct BubbleISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BubbleISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.bubble_inline_sizes();
flow::mut_base(flow).restyle_damage.remove(BUBBLE_ISIZES);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.contains(BUBBLE_ISIZES)
}
}
/// The assign-inline-sizes traversal. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignISizes<'a> {
pub shared_context: &'a SharedStyleContext,
}
impl<'a> PreorderFlowTraversal for AssignISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.assign_inline_sizes(self.shared_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW)
}
}
/// The assign-block-sizes-and-store-overflow traversal, the last (and most expensive) part of
/// layout computation. Determines the final block-sizes for all layout objects and computes
/// positions. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignBSizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for AssignBSizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
// Can't do anything with anything that floats might flow through until we reach their
// inorder parent.
//
// NB: We must return without resetting the restyle bits for these, as we haven't actually
// reflowed anything!
if flow.floats_might_flow_through() {
return
}
flow.assign_block_size(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
let base = flow::base(flow);
base.restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW) &&
// The fragmentation countainer is responsible for calling Flow::fragment recursively
!base.flags.contains(CAN_BE_FRAGMENTED)
}
}
#[derive(Copy, Clone)]
pub struct ComputeAbsolutePositions<'a> {
pub layout_context: &'a SharedLayoutContext,
}
impl<'a> PreorderFlowTraversal for ComputeAbsolutePositions<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.compute_absolute_position(self.layout_context);
}
}
pub struct BuildDisplayList<'a> {
pub state: DisplayListBuildState<'a>,
}
impl<'a> BuildDisplayList<'a> {
#[inline]
pub fn traverse(&mut self, flow: &mut Flow) {
let new_stacking_context =
flow::base(flow).stacking_context_id!= self.state.stacking_context_id();
if new_stacking_context {
self.state.push_stacking_context_id(flow::base(flow).stacking_context_id);
}
let new_scroll_root =
flow::base(flow).scroll_root_id!= self.state.scroll_root_id();
if new_scroll_root {
self.state.push_scroll_root_id(flow::base(flow).scroll_root_id);
}
if self.should_process() {
flow.build_display_list(&mut self.state);
flow::mut_base(flow).restyle_damage.remove(REPAINT);
}
for kid in flow::child_iter_mut(flow) {
self.traverse(kid);
}
if new_stacking_context {
self.state.pop_stacking_context_id();
}
if new_scroll_root {
self.state.pop_scroll_root_id();
}
}
#[inline]
fn should_process(&self) -> bool {
true
}
}
|
{
// FIXME(bholley): Stop doing this silly work to maintain broken bloom filter
// invariants.
//
// Longer version: The bloom filter is entirely busted for parallel traversal. Because
// parallel traversal is breadth-first, each sibling rejects the bloom filter set up
// by the previous sibling (which is valid for children, not siblings) and recreates
// it. Similarly, the fixup performed in the bottom-up traversal is useless, because
// threads perform flow construction up the parent chain until they find a parent with
// other unprocessed children, at which point they bail to the work queue and find a
// different node.
//
// Nevertheless, the remove_from_bloom_filter call at the end of flow construction
// asserts that the bloom filter is valid for the current node. This breaks when we
// stop calling recalc_style_at for text nodes, because the recursive chain of
// construct_flows_at calls is no longer necessarily rooted in a call that sets up the
// thread-local bloom filter for the leaf node.
//
// The bloom filter stuff is all going to be rewritten, so we just hackily duplicate
// the bloom filter manipulation from recalc_style_at to maintain invariants.
|
conditional_block
|
traversal.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversals over the DOM and flow trees, running the layout computations.
use construct::FlowConstructor;
use context::{LayoutContext, SharedLayoutContext};
use display_list_builder::DisplayListBuildState;
use flow::{self, PreorderFlowTraversal};
use flow::{CAN_BE_FRAGMENTED, Flow, ImmutableFlowUtils, PostorderFlowTraversal};
use gfx::display_list::OpaqueNode;
use script_layout_interface::wrapper_traits::{LayoutNode, ThreadSafeLayoutNode};
use std::mem;
use style::atomic_refcell::AtomicRefCell;
use style::context::{LocalStyleContext, SharedStyleContext, StyleContext};
use style::data::ElementData;
use style::dom::{StylingMode, TElement, TNode};
use style::selector_parser::RestyleDamage;
use style::servo::restyle_damage::{BUBBLE_ISIZES, REFLOW, REFLOW_OUT_OF_FLOW, REPAINT};
use style::traversal::{DomTraversalContext, put_thread_local_bloom_filter};
use style::traversal::{recalc_style_at, remove_from_bloom_filter};
use style::traversal::take_thread_local_bloom_filter;
use util::opts;
use wrapper::{GetRawData, LayoutNodeHelpers, LayoutNodeLayoutData};
pub struct RecalcStyleAndConstructFlows<'lc> {
context: LayoutContext<'lc>,
root: OpaqueNode,
}
#[allow(unsafe_code)]
impl<'lc, N> DomTraversalContext<N> for RecalcStyleAndConstructFlows<'lc>
where N: LayoutNode + TNode,
N::ConcreteElement: TElement
{
type SharedContext = SharedLayoutContext;
#[allow(unsafe_code)]
fn new<'a>(shared: &'a Self::SharedContext, root: OpaqueNode) -> Self {
// FIXME(bholley): This transmutation from &'a to &'lc is very unfortunate, but I haven't
// found a way to avoid it despite spending several days on it (and consulting Manishearth,
// brson, and nmatsakis).
//
// The crux of the problem is that parameterizing DomTraversalContext on the lifetime of
// the SharedContext doesn't work for a variety of reasons [1]. However, the code in
// parallel.rs needs to be able to use the DomTraversalContext trait (or something similar)
// to stack-allocate a struct (a generalized LayoutContext<'a>) that holds a borrowed
// SharedContext, which means that the struct needs to be parameterized on a lifetime.
// Given the aforementioned constraint, the only way to accomplish this is to avoid
// propagating the borrow lifetime from the struct to the trait, but that means that the
// new() method on the trait cannot require the lifetime of its argument to match the
// lifetime of the Self object it creates.
//
// This could be solved with an associated type with an unbound lifetime parameter, but
// that would require higher-kinded types, which don't exist yet and probably aren't coming
// for a while.
//
// So we transmute. :-( This is safe because the DomTravesalContext is stack-allocated on
// the worker thread while processing a WorkUnit, whereas the borrowed SharedContext is
// live for the entire duration of the restyle. This really could _almost_ compile: all
// we'd need to do is change the signature to to |new<'a: 'lc>|, and everything would
// work great. But we can't do that, because that would cause a mismatch with the signature
// in the trait we're implementing, and we can't mention 'lc in that trait at all for the
// reasons described above.
//
// [1] For example, the WorkQueue type needs to be parameterized on the concrete type of
// DomTraversalContext::SharedContext, and the WorkQueue lifetime is similar to that of the
// LayoutThread, generally much longer than that of a given SharedLayoutContext borrow.
let shared_lc: &'lc SharedLayoutContext = unsafe { mem::transmute(shared) };
RecalcStyleAndConstructFlows {
context: LayoutContext::new(shared_lc),
root: root,
}
}
fn process_preorder(&self, node: N) {
// FIXME(pcwalton): Stop allocating here. Ideally this should just be
// done by the HTML parser.
node.initialize_data();
if node.is_text_node() {
// FIXME(bholley): Stop doing this silly work to maintain broken bloom filter
// invariants.
//
// Longer version: The bloom filter is entirely busted for parallel traversal. Because
// parallel traversal is breadth-first, each sibling rejects the bloom filter set up
// by the previous sibling (which is valid for children, not siblings) and recreates
// it. Similarly, the fixup performed in the bottom-up traversal is useless, because
// threads perform flow construction up the parent chain until they find a parent with
// other unprocessed children, at which point they bail to the work queue and find a
// different node.
//
// Nevertheless, the remove_from_bloom_filter call at the end of flow construction
// asserts that the bloom filter is valid for the current node. This breaks when we
// stop calling recalc_style_at for text nodes, because the recursive chain of
// construct_flows_at calls is no longer necessarily rooted in a call that sets up the
// thread-local bloom filter for the leaf node.
//
// The bloom filter stuff is all going to be rewritten, so we just hackily duplicate
// the bloom filter manipulation from recalc_style_at to maintain invariants.
let parent = node.parent_node().unwrap().as_element();
let bf = take_thread_local_bloom_filter(parent, self.root, self.context.shared_context());
put_thread_local_bloom_filter(bf, &node.to_unsafe(), self.context.shared_context());
} else {
let el = node.as_element().unwrap();
recalc_style_at::<_, _, Self>(&self.context, self.root, el);
}
}
fn process_postorder(&self, node: N) {
construct_flows_at(&self.context, self.root, node);
}
fn should_traverse_child(parent: N::ConcreteElement, child: N) -> bool {
// If the parent is display:none, we don't need to do anything.
if parent.is_display_none() {
return false;
}
match child.as_element() {
// Elements should be traversed if they need styling or flow construction.
Some(el) => el.styling_mode()!= StylingMode::Stop ||
el.as_node().to_threadsafe().restyle_damage()!= RestyleDamage::empty(),
// Text nodes never need styling. However, there are two cases they may need
// flow construction:
// (1) They child doesn't yet have layout data (preorder traversal initializes it).
// (2) The parent element has restyle damage (so the text flow also needs fixup).
None => child.get_raw_data().is_none() ||
parent.as_node().to_threadsafe().restyle_damage()!= RestyleDamage::empty(),
}
}
unsafe fn ensure_element_data(element: &N::ConcreteElement) -> &AtomicRefCell<ElementData> {
element.as_node().initialize_data();
element.get_data().unwrap()
}
unsafe fn clear_element_data(element: &N::ConcreteElement) {
element.as_node().clear_data();
}
fn local_context(&self) -> &LocalStyleContext {
self.context.local_context()
}
}
/// A bottom-up, parallelizable traversal.
pub trait PostorderNodeMutTraversal<ConcreteThreadSafeLayoutNode: ThreadSafeLayoutNode> {
/// The operation to perform. Return true to continue or false to stop.
fn process(&mut self, node: &ConcreteThreadSafeLayoutNode);
}
/// The flow construction traversal, which builds flows for styled nodes.
#[inline]
#[allow(unsafe_code)]
fn construct_flows_at<'a, N: LayoutNode>(context: &'a LayoutContext<'a>, root: OpaqueNode, node: N) {
// Construct flows for this node.
{
let tnode = node.to_threadsafe();
// Always reconstruct if incremental layout is turned off.
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || tnode.restyle_damage()!= RestyleDamage::empty() ||
node.as_element().map_or(false, |el| el.has_dirty_descendants()) {
let mut flow_constructor = FlowConstructor::new(context);
if nonincremental_layout ||!flow_constructor.repair_if_possible(&tnode) {
flow_constructor.process(&tnode);
debug!("Constructed flow for {:x}: {:x}",
tnode.debug_id(),
tnode.flow_debug_id());
}
}
tnode.clear_restyle_damage();
}
unsafe { node.clear_dirty_bits(); }
remove_from_bloom_filter(context, root, node);
}
/// The bubble-inline-sizes traversal, the first part of layout computation. This computes
/// preferred and intrinsic inline-sizes and bubbles them up the tree.
pub struct BubbleISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BubbleISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.bubble_inline_sizes();
flow::mut_base(flow).restyle_damage.remove(BUBBLE_ISIZES);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.contains(BUBBLE_ISIZES)
}
}
/// The assign-inline-sizes traversal. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignISizes<'a> {
pub shared_context: &'a SharedStyleContext,
}
impl<'a> PreorderFlowTraversal for AssignISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.assign_inline_sizes(self.shared_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW)
}
}
/// The assign-block-sizes-and-store-overflow traversal, the last (and most expensive) part of
/// layout computation. Determines the final block-sizes for all layout objects and computes
/// positions. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignBSizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for AssignBSizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
// Can't do anything with anything that floats might flow through until we reach their
// inorder parent.
//
// NB: We must return without resetting the restyle bits for these, as we haven't actually
// reflowed anything!
if flow.floats_might_flow_through() {
return
}
flow.assign_block_size(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
let base = flow::base(flow);
base.restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW) &&
// The fragmentation countainer is responsible for calling Flow::fragment recursively
!base.flags.contains(CAN_BE_FRAGMENTED)
}
}
#[derive(Copy, Clone)]
pub struct
|
<'a> {
pub layout_context: &'a SharedLayoutContext,
}
impl<'a> PreorderFlowTraversal for ComputeAbsolutePositions<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.compute_absolute_position(self.layout_context);
}
}
pub struct BuildDisplayList<'a> {
pub state: DisplayListBuildState<'a>,
}
impl<'a> BuildDisplayList<'a> {
#[inline]
pub fn traverse(&mut self, flow: &mut Flow) {
let new_stacking_context =
flow::base(flow).stacking_context_id!= self.state.stacking_context_id();
if new_stacking_context {
self.state.push_stacking_context_id(flow::base(flow).stacking_context_id);
}
let new_scroll_root =
flow::base(flow).scroll_root_id!= self.state.scroll_root_id();
if new_scroll_root {
self.state.push_scroll_root_id(flow::base(flow).scroll_root_id);
}
if self.should_process() {
flow.build_display_list(&mut self.state);
flow::mut_base(flow).restyle_damage.remove(REPAINT);
}
for kid in flow::child_iter_mut(flow) {
self.traverse(kid);
}
if new_stacking_context {
self.state.pop_stacking_context_id();
}
if new_scroll_root {
self.state.pop_scroll_root_id();
}
}
#[inline]
fn should_process(&self) -> bool {
true
}
}
|
ComputeAbsolutePositions
|
identifier_name
|
traversal.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Traversals over the DOM and flow trees, running the layout computations.
use construct::FlowConstructor;
use context::{LayoutContext, SharedLayoutContext};
use display_list_builder::DisplayListBuildState;
use flow::{self, PreorderFlowTraversal};
use flow::{CAN_BE_FRAGMENTED, Flow, ImmutableFlowUtils, PostorderFlowTraversal};
use gfx::display_list::OpaqueNode;
use script_layout_interface::wrapper_traits::{LayoutNode, ThreadSafeLayoutNode};
use std::mem;
use style::atomic_refcell::AtomicRefCell;
use style::context::{LocalStyleContext, SharedStyleContext, StyleContext};
use style::data::ElementData;
use style::dom::{StylingMode, TElement, TNode};
use style::selector_parser::RestyleDamage;
use style::servo::restyle_damage::{BUBBLE_ISIZES, REFLOW, REFLOW_OUT_OF_FLOW, REPAINT};
use style::traversal::{DomTraversalContext, put_thread_local_bloom_filter};
use style::traversal::{recalc_style_at, remove_from_bloom_filter};
use style::traversal::take_thread_local_bloom_filter;
use util::opts;
use wrapper::{GetRawData, LayoutNodeHelpers, LayoutNodeLayoutData};
pub struct RecalcStyleAndConstructFlows<'lc> {
context: LayoutContext<'lc>,
root: OpaqueNode,
}
#[allow(unsafe_code)]
impl<'lc, N> DomTraversalContext<N> for RecalcStyleAndConstructFlows<'lc>
where N: LayoutNode + TNode,
N::ConcreteElement: TElement
{
type SharedContext = SharedLayoutContext;
#[allow(unsafe_code)]
fn new<'a>(shared: &'a Self::SharedContext, root: OpaqueNode) -> Self {
// FIXME(bholley): This transmutation from &'a to &'lc is very unfortunate, but I haven't
// found a way to avoid it despite spending several days on it (and consulting Manishearth,
// brson, and nmatsakis).
//
// The crux of the problem is that parameterizing DomTraversalContext on the lifetime of
// the SharedContext doesn't work for a variety of reasons [1]. However, the code in
// parallel.rs needs to be able to use the DomTraversalContext trait (or something similar)
// to stack-allocate a struct (a generalized LayoutContext<'a>) that holds a borrowed
// SharedContext, which means that the struct needs to be parameterized on a lifetime.
// Given the aforementioned constraint, the only way to accomplish this is to avoid
// propagating the borrow lifetime from the struct to the trait, but that means that the
// new() method on the trait cannot require the lifetime of its argument to match the
// lifetime of the Self object it creates.
//
// This could be solved with an associated type with an unbound lifetime parameter, but
// that would require higher-kinded types, which don't exist yet and probably aren't coming
// for a while.
//
// So we transmute. :-( This is safe because the DomTravesalContext is stack-allocated on
// the worker thread while processing a WorkUnit, whereas the borrowed SharedContext is
// live for the entire duration of the restyle. This really could _almost_ compile: all
// we'd need to do is change the signature to to |new<'a: 'lc>|, and everything would
// work great. But we can't do that, because that would cause a mismatch with the signature
// in the trait we're implementing, and we can't mention 'lc in that trait at all for the
// reasons described above.
//
// [1] For example, the WorkQueue type needs to be parameterized on the concrete type of
// DomTraversalContext::SharedContext, and the WorkQueue lifetime is similar to that of the
// LayoutThread, generally much longer than that of a given SharedLayoutContext borrow.
let shared_lc: &'lc SharedLayoutContext = unsafe { mem::transmute(shared) };
RecalcStyleAndConstructFlows {
context: LayoutContext::new(shared_lc),
root: root,
}
}
fn process_preorder(&self, node: N) {
// FIXME(pcwalton): Stop allocating here. Ideally this should just be
// done by the HTML parser.
node.initialize_data();
if node.is_text_node() {
// FIXME(bholley): Stop doing this silly work to maintain broken bloom filter
// invariants.
//
// Longer version: The bloom filter is entirely busted for parallel traversal. Because
// parallel traversal is breadth-first, each sibling rejects the bloom filter set up
// by the previous sibling (which is valid for children, not siblings) and recreates
// it. Similarly, the fixup performed in the bottom-up traversal is useless, because
// threads perform flow construction up the parent chain until they find a parent with
// other unprocessed children, at which point they bail to the work queue and find a
// different node.
//
// Nevertheless, the remove_from_bloom_filter call at the end of flow construction
// asserts that the bloom filter is valid for the current node. This breaks when we
// stop calling recalc_style_at for text nodes, because the recursive chain of
// construct_flows_at calls is no longer necessarily rooted in a call that sets up the
// thread-local bloom filter for the leaf node.
//
// The bloom filter stuff is all going to be rewritten, so we just hackily duplicate
// the bloom filter manipulation from recalc_style_at to maintain invariants.
let parent = node.parent_node().unwrap().as_element();
let bf = take_thread_local_bloom_filter(parent, self.root, self.context.shared_context());
put_thread_local_bloom_filter(bf, &node.to_unsafe(), self.context.shared_context());
} else {
let el = node.as_element().unwrap();
recalc_style_at::<_, _, Self>(&self.context, self.root, el);
}
}
fn process_postorder(&self, node: N) {
construct_flows_at(&self.context, self.root, node);
}
fn should_traverse_child(parent: N::ConcreteElement, child: N) -> bool {
// If the parent is display:none, we don't need to do anything.
if parent.is_display_none() {
return false;
}
match child.as_element() {
// Elements should be traversed if they need styling or flow construction.
Some(el) => el.styling_mode()!= StylingMode::Stop ||
el.as_node().to_threadsafe().restyle_damage()!= RestyleDamage::empty(),
// Text nodes never need styling. However, there are two cases they may need
// flow construction:
// (1) They child doesn't yet have layout data (preorder traversal initializes it).
// (2) The parent element has restyle damage (so the text flow also needs fixup).
None => child.get_raw_data().is_none() ||
parent.as_node().to_threadsafe().restyle_damage()!= RestyleDamage::empty(),
}
}
unsafe fn ensure_element_data(element: &N::ConcreteElement) -> &AtomicRefCell<ElementData> {
element.as_node().initialize_data();
element.get_data().unwrap()
}
unsafe fn clear_element_data(element: &N::ConcreteElement) {
element.as_node().clear_data();
}
fn local_context(&self) -> &LocalStyleContext
|
}
/// A bottom-up, parallelizable traversal.
pub trait PostorderNodeMutTraversal<ConcreteThreadSafeLayoutNode: ThreadSafeLayoutNode> {
/// The operation to perform. Return true to continue or false to stop.
fn process(&mut self, node: &ConcreteThreadSafeLayoutNode);
}
/// The flow construction traversal, which builds flows for styled nodes.
#[inline]
#[allow(unsafe_code)]
fn construct_flows_at<'a, N: LayoutNode>(context: &'a LayoutContext<'a>, root: OpaqueNode, node: N) {
// Construct flows for this node.
{
let tnode = node.to_threadsafe();
// Always reconstruct if incremental layout is turned off.
let nonincremental_layout = opts::get().nonincremental_layout;
if nonincremental_layout || tnode.restyle_damage()!= RestyleDamage::empty() ||
node.as_element().map_or(false, |el| el.has_dirty_descendants()) {
let mut flow_constructor = FlowConstructor::new(context);
if nonincremental_layout ||!flow_constructor.repair_if_possible(&tnode) {
flow_constructor.process(&tnode);
debug!("Constructed flow for {:x}: {:x}",
tnode.debug_id(),
tnode.flow_debug_id());
}
}
tnode.clear_restyle_damage();
}
unsafe { node.clear_dirty_bits(); }
remove_from_bloom_filter(context, root, node);
}
/// The bubble-inline-sizes traversal, the first part of layout computation. This computes
/// preferred and intrinsic inline-sizes and bubbles them up the tree.
pub struct BubbleISizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for BubbleISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.bubble_inline_sizes();
flow::mut_base(flow).restyle_damage.remove(BUBBLE_ISIZES);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.contains(BUBBLE_ISIZES)
}
}
/// The assign-inline-sizes traversal. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignISizes<'a> {
pub shared_context: &'a SharedStyleContext,
}
impl<'a> PreorderFlowTraversal for AssignISizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.assign_inline_sizes(self.shared_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
flow::base(flow).restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW)
}
}
/// The assign-block-sizes-and-store-overflow traversal, the last (and most expensive) part of
/// layout computation. Determines the final block-sizes for all layout objects and computes
/// positions. In Gecko this corresponds to `Reflow`.
#[derive(Copy, Clone)]
pub struct AssignBSizes<'a> {
pub layout_context: &'a LayoutContext<'a>,
}
impl<'a> PostorderFlowTraversal for AssignBSizes<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
// Can't do anything with anything that floats might flow through until we reach their
// inorder parent.
//
// NB: We must return without resetting the restyle bits for these, as we haven't actually
// reflowed anything!
if flow.floats_might_flow_through() {
return
}
flow.assign_block_size(self.layout_context);
}
#[inline]
fn should_process(&self, flow: &mut Flow) -> bool {
let base = flow::base(flow);
base.restyle_damage.intersects(REFLOW_OUT_OF_FLOW | REFLOW) &&
// The fragmentation countainer is responsible for calling Flow::fragment recursively
!base.flags.contains(CAN_BE_FRAGMENTED)
}
}
#[derive(Copy, Clone)]
pub struct ComputeAbsolutePositions<'a> {
pub layout_context: &'a SharedLayoutContext,
}
impl<'a> PreorderFlowTraversal for ComputeAbsolutePositions<'a> {
#[inline]
fn process(&self, flow: &mut Flow) {
flow.compute_absolute_position(self.layout_context);
}
}
pub struct BuildDisplayList<'a> {
pub state: DisplayListBuildState<'a>,
}
impl<'a> BuildDisplayList<'a> {
#[inline]
pub fn traverse(&mut self, flow: &mut Flow) {
let new_stacking_context =
flow::base(flow).stacking_context_id!= self.state.stacking_context_id();
if new_stacking_context {
self.state.push_stacking_context_id(flow::base(flow).stacking_context_id);
}
let new_scroll_root =
flow::base(flow).scroll_root_id!= self.state.scroll_root_id();
if new_scroll_root {
self.state.push_scroll_root_id(flow::base(flow).scroll_root_id);
}
if self.should_process() {
flow.build_display_list(&mut self.state);
flow::mut_base(flow).restyle_damage.remove(REPAINT);
}
for kid in flow::child_iter_mut(flow) {
self.traverse(kid);
}
if new_stacking_context {
self.state.pop_stacking_context_id();
}
if new_scroll_root {
self.state.pop_scroll_root_id();
}
}
#[inline]
fn should_process(&self) -> bool {
true
}
}
|
{
self.context.local_context()
}
|
identifier_body
|
rtdeps.rs
|
// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This module contains the linkage attributes to all runtime dependencies of
//! the standard library This varies per-platform, but these libraries are
//! necessary for running libstd.
#![unstable(feature = "std_misc")]
// All platforms need to link to rustrt
#[cfg(not(test))]
#[link(name = "rust_builtin", kind = "static")]
extern {}
// LLVM implements the `frem` instruction as a call to `fmod`, which lives in
|
#[cfg(all(target_os = "linux", not(target_env = "musl")))]
#[link(name = "dl")]
#[link(name = "pthread")]
extern {}
#[cfg(target_os = "android")]
#[link(name = "dl")]
#[link(name = "log")]
extern {}
#[cfg(target_os = "freebsd")]
#[link(name = "execinfo")]
#[link(name = "pthread")]
extern {}
#[cfg(any(target_os = "dragonfly",
target_os = "bitrig",
target_os = "openbsd"))]
#[link(name = "pthread")]
extern {}
#[cfg(target_os = "macos")]
#[link(name = "System")]
extern {}
#[cfg(target_os = "ios")]
#[link(name = "System")]
extern {}
|
// libm. Hence, we must explicitly link to it.
//
// On Linux, librt and libdl are indirect dependencies via std,
// and binutils 2.22+ won't add them automatically
|
random_line_split
|
applicable_declarations.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Applicable declarations management.
use properties::PropertyDeclarationBlock;
use rule_tree::{CascadeLevel, StyleSource};
use servo_arc::Arc;
use shared_lock::Locked;
use smallvec::SmallVec;
use std::fmt::{self, Debug};
use std::mem;
/// List of applicable declarations. This is a transient structure that shuttles
/// declarations between selector matching and inserting into the rule tree, and
/// therefore we want to avoid heap-allocation where possible.
///
/// In measurements on wikipedia, we pretty much never have more than 8 applicable
/// declarations, so we could consider making this 8 entries instead of 16.
/// However, it may depend a lot on workload, and stack space is cheap.
pub type ApplicableDeclarationList = SmallVec<[ApplicableDeclarationBlock; 16]>;
/// Blink uses 18 bits to store source order, and does not check overflow [1].
/// That's a limit that could be reached in realistic webpages, so we use
/// 24 bits and enforce defined behavior in the overflow case.
///
/// Note that the value of 24 is also hard-coded into the level() accessor,
/// which does a byte-aligned load of the 4th byte. If you change this value
/// you'll need to change that as well.
///
/// [1] https://cs.chromium.org/chromium/src/third_party/WebKit/Source/core/css/
/// RuleSet.h?l=128&rcl=90140ab80b84d0f889abc253410f44ed54ae04f3
const SOURCE_ORDER_BITS: usize = 24;
const SOURCE_ORDER_MASK: u32 = (1 << SOURCE_ORDER_BITS) - 1;
const SOURCE_ORDER_MAX: u32 = SOURCE_ORDER_MASK;
/// Stores the source order of a block and the cascade level it belongs to.
#[derive(Clone, Copy, Eq, MallocSizeOf, PartialEq)]
struct SourceOrderAndCascadeLevel(u32);
impl SourceOrderAndCascadeLevel {
fn new(source_order: u32, cascade_level: CascadeLevel) -> SourceOrderAndCascadeLevel {
let mut bits = ::std::cmp::min(source_order, SOURCE_ORDER_MAX);
bits |= (cascade_level as u8 as u32) << SOURCE_ORDER_BITS;
SourceOrderAndCascadeLevel(bits)
}
fn order(&self) -> u32 {
self.0 & SOURCE_ORDER_MASK
}
fn level(&self) -> CascadeLevel {
unsafe {
// Transmute rather than shifting so that we're sure the compiler
// emits a simple byte-aligned load.
let as_bytes: [u8; 4] = mem::transmute(self.0);
CascadeLevel::from_byte(as_bytes[3])
}
}
}
impl Debug for SourceOrderAndCascadeLevel {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SourceOrderAndCascadeLevel")
.field("order", &self.order())
.field("level", &self.level())
.finish()
}
}
/// A property declaration together with its precedence among rules of equal
/// specificity so that we can sort them.
///
/// This represents the declarations in a given declaration block for a given
/// importance.
#[derive(Clone, Debug, MallocSizeOf, PartialEq)]
pub struct ApplicableDeclarationBlock {
/// The style source, either a style rule, or a property declaration block.
#[ignore_malloc_size_of = "Arc"]
pub source: StyleSource,
/// The source order of the block, and the cascade level it belongs to.
order_and_level: SourceOrderAndCascadeLevel,
/// The specificity of the selector this block is represented by.
pub specificity: u32,
}
impl ApplicableDeclarationBlock {
/// Constructs an applicable declaration block from a given property
/// declaration block and importance.
#[inline]
pub fn
|
(
declarations: Arc<Locked<PropertyDeclarationBlock>>,
level: CascadeLevel,
) -> Self {
ApplicableDeclarationBlock {
source: StyleSource::Declarations(declarations),
order_and_level: SourceOrderAndCascadeLevel::new(0, level),
specificity: 0,
}
}
/// Constructs an applicable declaration block from the given components
#[inline]
pub fn new(source: StyleSource, order: u32, level: CascadeLevel, specificity: u32) -> Self {
ApplicableDeclarationBlock {
source: source,
order_and_level: SourceOrderAndCascadeLevel::new(order, level),
specificity: specificity,
}
}
/// Returns the source order of the block.
#[inline]
pub fn source_order(&self) -> u32 {
self.order_and_level.order()
}
/// Returns the cascade level of the block.
#[inline]
pub fn level(&self) -> CascadeLevel {
self.order_and_level.level()
}
/// Convenience method to consume self and return the source alongside the
/// level.
#[inline]
pub fn order_and_level(self) -> (StyleSource, CascadeLevel) {
let level = self.level();
(self.source, level)
}
}
|
from_declarations
|
identifier_name
|
applicable_declarations.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Applicable declarations management.
use properties::PropertyDeclarationBlock;
use rule_tree::{CascadeLevel, StyleSource};
use servo_arc::Arc;
use shared_lock::Locked;
use smallvec::SmallVec;
use std::fmt::{self, Debug};
use std::mem;
/// List of applicable declarations. This is a transient structure that shuttles
/// declarations between selector matching and inserting into the rule tree, and
/// therefore we want to avoid heap-allocation where possible.
///
/// In measurements on wikipedia, we pretty much never have more than 8 applicable
/// declarations, so we could consider making this 8 entries instead of 16.
/// However, it may depend a lot on workload, and stack space is cheap.
pub type ApplicableDeclarationList = SmallVec<[ApplicableDeclarationBlock; 16]>;
/// Blink uses 18 bits to store source order, and does not check overflow [1].
/// That's a limit that could be reached in realistic webpages, so we use
/// 24 bits and enforce defined behavior in the overflow case.
///
/// Note that the value of 24 is also hard-coded into the level() accessor,
/// which does a byte-aligned load of the 4th byte. If you change this value
/// you'll need to change that as well.
///
/// [1] https://cs.chromium.org/chromium/src/third_party/WebKit/Source/core/css/
/// RuleSet.h?l=128&rcl=90140ab80b84d0f889abc253410f44ed54ae04f3
const SOURCE_ORDER_BITS: usize = 24;
const SOURCE_ORDER_MASK: u32 = (1 << SOURCE_ORDER_BITS) - 1;
const SOURCE_ORDER_MAX: u32 = SOURCE_ORDER_MASK;
/// Stores the source order of a block and the cascade level it belongs to.
#[derive(Clone, Copy, Eq, MallocSizeOf, PartialEq)]
struct SourceOrderAndCascadeLevel(u32);
impl SourceOrderAndCascadeLevel {
fn new(source_order: u32, cascade_level: CascadeLevel) -> SourceOrderAndCascadeLevel {
let mut bits = ::std::cmp::min(source_order, SOURCE_ORDER_MAX);
bits |= (cascade_level as u8 as u32) << SOURCE_ORDER_BITS;
SourceOrderAndCascadeLevel(bits)
}
fn order(&self) -> u32 {
self.0 & SOURCE_ORDER_MASK
}
fn level(&self) -> CascadeLevel {
unsafe {
// Transmute rather than shifting so that we're sure the compiler
// emits a simple byte-aligned load.
let as_bytes: [u8; 4] = mem::transmute(self.0);
CascadeLevel::from_byte(as_bytes[3])
}
}
}
impl Debug for SourceOrderAndCascadeLevel {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
|
}
/// A property declaration together with its precedence among rules of equal
/// specificity so that we can sort them.
///
/// This represents the declarations in a given declaration block for a given
/// importance.
#[derive(Clone, Debug, MallocSizeOf, PartialEq)]
pub struct ApplicableDeclarationBlock {
/// The style source, either a style rule, or a property declaration block.
#[ignore_malloc_size_of = "Arc"]
pub source: StyleSource,
/// The source order of the block, and the cascade level it belongs to.
order_and_level: SourceOrderAndCascadeLevel,
/// The specificity of the selector this block is represented by.
pub specificity: u32,
}
impl ApplicableDeclarationBlock {
/// Constructs an applicable declaration block from a given property
/// declaration block and importance.
#[inline]
pub fn from_declarations(
declarations: Arc<Locked<PropertyDeclarationBlock>>,
level: CascadeLevel,
) -> Self {
ApplicableDeclarationBlock {
source: StyleSource::Declarations(declarations),
order_and_level: SourceOrderAndCascadeLevel::new(0, level),
specificity: 0,
}
}
/// Constructs an applicable declaration block from the given components
#[inline]
pub fn new(source: StyleSource, order: u32, level: CascadeLevel, specificity: u32) -> Self {
ApplicableDeclarationBlock {
source: source,
order_and_level: SourceOrderAndCascadeLevel::new(order, level),
specificity: specificity,
}
}
/// Returns the source order of the block.
#[inline]
pub fn source_order(&self) -> u32 {
self.order_and_level.order()
}
/// Returns the cascade level of the block.
#[inline]
pub fn level(&self) -> CascadeLevel {
self.order_and_level.level()
}
/// Convenience method to consume self and return the source alongside the
/// level.
#[inline]
pub fn order_and_level(self) -> (StyleSource, CascadeLevel) {
let level = self.level();
(self.source, level)
}
}
|
{
f.debug_struct("SourceOrderAndCascadeLevel")
.field("order", &self.order())
.field("level", &self.level())
.finish()
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.