file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
redundant_else.rs
|
use clippy_utils::diagnostics::span_lint_and_help;
use rustc_ast::ast::{Block, Expr, ExprKind, Stmt, StmtKind};
use rustc_ast::visit::{walk_expr, Visitor};
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_middle::lint::in_external_macro;
use rustc_session::{declare_lint_pass, declare_tool_lint};
declare_clippy_lint! {
/// ### What it does
/// Checks for `else` blocks that can be removed without changing semantics.
///
/// ### Why is this bad?
/// The `else` block adds unnecessary indentation and verbosity.
///
/// ### Known problems
/// Some may prefer to keep the `else` block for clarity.
///
/// ### Example
/// ```rust
/// fn my_func(count: u32) {
/// if count == 0 {
/// print!("Nothing to do");
/// return;
/// } else {
/// print!("Moving on...");
/// }
/// }
/// ```
/// Use instead:
/// ```rust
/// fn my_func(count: u32) {
/// if count == 0 {
/// print!("Nothing to do");
/// return;
/// }
/// print!("Moving on...");
/// }
/// ```
pub REDUNDANT_ELSE,
pedantic,
"`else` branch that can be removed without changing semantics"
}
declare_lint_pass!(RedundantElse => [REDUNDANT_ELSE]);
impl EarlyLintPass for RedundantElse {
fn check_stmt(&mut self, cx: &EarlyContext<'_>, stmt: &Stmt)
|
// else if else
ExprKind::If(_, next_then, Some(next_els)) => {
then = next_then;
els = next_els;
continue;
},
// else if without else
ExprKind::If(..) => return,
// done
_ => break,
}
}
span_lint_and_help(
cx,
REDUNDANT_ELSE,
els.span,
"redundant else block",
None,
"remove the `else` block and move the contents out",
);
}
}
/// Call `check` functions to check if an expression always breaks control flow
#[derive(Default)]
struct BreakVisitor {
is_break: bool,
}
impl<'ast> Visitor<'ast> for BreakVisitor {
fn visit_block(&mut self, block: &'ast Block) {
self.is_break = match block.stmts.as_slice() {
[.., last] => self.check_stmt(last),
_ => false,
};
}
fn visit_expr(&mut self, expr: &'ast Expr) {
self.is_break = match expr.kind {
ExprKind::Break(..) | ExprKind::Continue(..) | ExprKind::Ret(..) => true,
ExprKind::Match(_, ref arms) => arms.iter().all(|arm| self.check_expr(&arm.body)),
ExprKind::If(_, ref then, Some(ref els)) => self.check_block(then) && self.check_expr(els),
ExprKind::If(_, _, None)
// ignore loops for simplicity
| ExprKind::While(..) | ExprKind::ForLoop(..) | ExprKind::Loop(..) => false,
_ => {
walk_expr(self, expr);
return;
},
};
}
}
impl BreakVisitor {
fn check<T>(&mut self, item: T, visit: fn(&mut Self, T)) -> bool {
visit(self, item);
std::mem::replace(&mut self.is_break, false)
}
fn check_block(&mut self, block: &Block) -> bool {
self.check(block, Self::visit_block)
}
fn check_expr(&mut self, expr: &Expr) -> bool {
self.check(expr, Self::visit_expr)
}
fn check_stmt(&mut self, stmt: &Stmt) -> bool {
self.check(stmt, Self::visit_stmt)
}
}
|
{
if in_external_macro(cx.sess, stmt.span) {
return;
}
// Only look at expressions that are a whole statement
let expr: &Expr = match &stmt.kind {
StmtKind::Expr(expr) | StmtKind::Semi(expr) => expr,
_ => return,
};
// if else
let (mut then, mut els): (&Block, &Expr) = match &expr.kind {
ExprKind::If(_, then, Some(els)) => (then, els),
_ => return,
};
loop {
if !BreakVisitor::default().check_block(then) {
// then block does not always break
return;
}
match &els.kind {
|
identifier_body
|
redundant_else.rs
|
use clippy_utils::diagnostics::span_lint_and_help;
use rustc_ast::ast::{Block, Expr, ExprKind, Stmt, StmtKind};
use rustc_ast::visit::{walk_expr, Visitor};
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_middle::lint::in_external_macro;
use rustc_session::{declare_lint_pass, declare_tool_lint};
declare_clippy_lint! {
/// ### What it does
/// Checks for `else` blocks that can be removed without changing semantics.
///
/// ### Why is this bad?
/// The `else` block adds unnecessary indentation and verbosity.
///
/// ### Known problems
/// Some may prefer to keep the `else` block for clarity.
///
/// ### Example
/// ```rust
/// fn my_func(count: u32) {
/// if count == 0 {
/// print!("Nothing to do");
/// return;
/// } else {
/// print!("Moving on...");
/// }
/// }
/// ```
/// Use instead:
/// ```rust
/// fn my_func(count: u32) {
/// if count == 0 {
/// print!("Nothing to do");
/// return;
/// }
/// print!("Moving on...");
/// }
/// ```
pub REDUNDANT_ELSE,
pedantic,
"`else` branch that can be removed without changing semantics"
}
declare_lint_pass!(RedundantElse => [REDUNDANT_ELSE]);
impl EarlyLintPass for RedundantElse {
fn check_stmt(&mut self, cx: &EarlyContext<'_>, stmt: &Stmt) {
if in_external_macro(cx.sess, stmt.span) {
return;
}
// Only look at expressions that are a whole statement
let expr: &Expr = match &stmt.kind {
StmtKind::Expr(expr) | StmtKind::Semi(expr) => expr,
_ => return,
};
// if else
let (mut then, mut els): (&Block, &Expr) = match &expr.kind {
ExprKind::If(_, then, Some(els)) => (then, els),
_ => return,
};
loop {
if!BreakVisitor::default().check_block(then) {
// then block does not always break
return;
}
match &els.kind {
// else if else
ExprKind::If(_, next_then, Some(next_els)) => {
then = next_then;
els = next_els;
continue;
},
// else if without else
ExprKind::If(..) => return,
// done
_ => break,
}
}
span_lint_and_help(
cx,
REDUNDANT_ELSE,
els.span,
"redundant else block",
None,
"remove the `else` block and move the contents out",
);
}
}
/// Call `check` functions to check if an expression always breaks control flow
#[derive(Default)]
struct BreakVisitor {
is_break: bool,
}
impl<'ast> Visitor<'ast> for BreakVisitor {
fn visit_block(&mut self, block: &'ast Block) {
self.is_break = match block.stmts.as_slice() {
[.., last] => self.check_stmt(last),
_ => false,
};
}
fn visit_expr(&mut self, expr: &'ast Expr) {
self.is_break = match expr.kind {
ExprKind::Break(..) | ExprKind::Continue(..) | ExprKind::Ret(..) => true,
ExprKind::Match(_, ref arms) => arms.iter().all(|arm| self.check_expr(&arm.body)),
ExprKind::If(_, ref then, Some(ref els)) => self.check_block(then) && self.check_expr(els),
ExprKind::If(_, _, None)
// ignore loops for simplicity
| ExprKind::While(..) | ExprKind::ForLoop(..) | ExprKind::Loop(..) => false,
_ => {
walk_expr(self, expr);
return;
},
};
}
}
impl BreakVisitor {
fn check<T>(&mut self, item: T, visit: fn(&mut Self, T)) -> bool {
visit(self, item);
std::mem::replace(&mut self.is_break, false)
}
fn check_block(&mut self, block: &Block) -> bool {
self.check(block, Self::visit_block)
}
fn check_expr(&mut self, expr: &Expr) -> bool {
self.check(expr, Self::visit_expr)
}
fn check_stmt(&mut self, stmt: &Stmt) -> bool {
self.check(stmt, Self::visit_stmt)
|
}
|
}
|
random_line_split
|
debug.rs
|
use winapi::um::{d3d11, d3d11_1, d3dcommon};
use wio::{com::ComPtr, wide::ToWide};
use std::{env, ffi::OsStr, fmt};
#[must_use]
pub struct DebugScope {
annotation: ComPtr<d3d11_1::ID3DUserDefinedAnnotation>,
}
impl DebugScope {
// TODO: Not used currently in release, will be used in the future
#[allow(dead_code)]
pub fn with_name(
context: &ComPtr<d3d11::ID3D11DeviceContext>,
args: fmt::Arguments,
) -> Option<Self> {
let name = format!("{}", args);
// debugging with visual studio and its ilk *really* doesn't like calling this on a
// deferred context when replaying a capture, compared to renderdoc
if unsafe { context.GetType() } == d3d11::D3D11_DEVICE_CONTEXT_DEFERRED {
// TODO: find a better way to detect either if RD or VS is active debugger
if env::var("GFX_NO_RENDERDOC").is_ok() {
return None;
}
}
let annotation = context
.cast::<d3d11_1::ID3DUserDefinedAnnotation>()
.unwrap();
let msg: &OsStr = name.as_ref();
let msg: Vec<u16> = msg.to_wide_null();
unsafe {
annotation.BeginEvent(msg.as_ptr() as _);
}
Some(DebugScope { annotation })
}
}
impl Drop for DebugScope {
fn
|
(&mut self) {
unsafe {
self.annotation.EndEvent();
}
}
}
pub fn debug_marker(context: &ComPtr<d3d11::ID3D11DeviceContext>, name: &str) {
// same here
if unsafe { context.GetType() } == d3d11::D3D11_DEVICE_CONTEXT_DEFERRED {
if env::var("GFX_NO_RENDERDOC").is_ok() {
return;
}
}
let annotation = context
.cast::<d3d11_1::ID3DUserDefinedAnnotation>()
.unwrap();
let msg: &OsStr = name.as_ref();
let msg: Vec<u16> = msg.to_wide_null();
unsafe {
annotation.SetMarker(msg.as_ptr() as _);
}
}
pub fn verify_debug_ascii(name: &str) -> bool {
let res = name.is_ascii();
if!res {
error!("DX11 buffer names must be ASCII");
}
res
}
/// Set the debug name of a resource.
///
/// Must be ASCII.
///
/// SetPrivateData will copy the data internally so the data doesn't need to live.
pub fn set_debug_name(resource: &d3d11::ID3D11DeviceChild, name: &str) {
unsafe {
resource.SetPrivateData(
(&d3dcommon::WKPDID_D3DDebugObjectName) as *const _,
name.len() as _,
name.as_ptr() as *const _,
);
}
}
/// Set the debug name of a resource with a given suffix.
///
/// Must be ASCII.
///
/// The given string will be mutated to add the suffix, then restored to it's original state.
/// This saves an allocation. SetPrivateData will copy the data internally so the data doesn't need to live.
pub fn set_debug_name_with_suffix(
resource: &d3d11::ID3D11DeviceChild,
name: &mut String,
suffix: &str,
) {
name.push_str(suffix);
unsafe {
resource.SetPrivateData(
(&d3dcommon::WKPDID_D3DDebugObjectName) as *const _,
name.len() as _,
name.as_ptr() as *const _,
);
}
name.drain((name.len() - suffix.len())..);
}
|
drop
|
identifier_name
|
debug.rs
|
use winapi::um::{d3d11, d3d11_1, d3dcommon};
use wio::{com::ComPtr, wide::ToWide};
use std::{env, ffi::OsStr, fmt};
#[must_use]
pub struct DebugScope {
annotation: ComPtr<d3d11_1::ID3DUserDefinedAnnotation>,
}
impl DebugScope {
// TODO: Not used currently in release, will be used in the future
#[allow(dead_code)]
pub fn with_name(
context: &ComPtr<d3d11::ID3D11DeviceContext>,
args: fmt::Arguments,
) -> Option<Self> {
let name = format!("{}", args);
// debugging with visual studio and its ilk *really* doesn't like calling this on a
// deferred context when replaying a capture, compared to renderdoc
if unsafe { context.GetType() } == d3d11::D3D11_DEVICE_CONTEXT_DEFERRED {
// TODO: find a better way to detect either if RD or VS is active debugger
if env::var("GFX_NO_RENDERDOC").is_ok() {
return None;
}
}
let annotation = context
.cast::<d3d11_1::ID3DUserDefinedAnnotation>()
.unwrap();
let msg: &OsStr = name.as_ref();
let msg: Vec<u16> = msg.to_wide_null();
unsafe {
annotation.BeginEvent(msg.as_ptr() as _);
}
Some(DebugScope { annotation })
}
}
impl Drop for DebugScope {
fn drop(&mut self) {
unsafe {
self.annotation.EndEvent();
}
}
}
pub fn debug_marker(context: &ComPtr<d3d11::ID3D11DeviceContext>, name: &str)
|
pub fn verify_debug_ascii(name: &str) -> bool {
let res = name.is_ascii();
if!res {
error!("DX11 buffer names must be ASCII");
}
res
}
/// Set the debug name of a resource.
///
/// Must be ASCII.
///
/// SetPrivateData will copy the data internally so the data doesn't need to live.
pub fn set_debug_name(resource: &d3d11::ID3D11DeviceChild, name: &str) {
unsafe {
resource.SetPrivateData(
(&d3dcommon::WKPDID_D3DDebugObjectName) as *const _,
name.len() as _,
name.as_ptr() as *const _,
);
}
}
/// Set the debug name of a resource with a given suffix.
///
/// Must be ASCII.
///
/// The given string will be mutated to add the suffix, then restored to it's original state.
/// This saves an allocation. SetPrivateData will copy the data internally so the data doesn't need to live.
pub fn set_debug_name_with_suffix(
resource: &d3d11::ID3D11DeviceChild,
name: &mut String,
suffix: &str,
) {
name.push_str(suffix);
unsafe {
resource.SetPrivateData(
(&d3dcommon::WKPDID_D3DDebugObjectName) as *const _,
name.len() as _,
name.as_ptr() as *const _,
);
}
name.drain((name.len() - suffix.len())..);
}
|
{
// same here
if unsafe { context.GetType() } == d3d11::D3D11_DEVICE_CONTEXT_DEFERRED {
if env::var("GFX_NO_RENDERDOC").is_ok() {
return;
}
}
let annotation = context
.cast::<d3d11_1::ID3DUserDefinedAnnotation>()
.unwrap();
let msg: &OsStr = name.as_ref();
let msg: Vec<u16> = msg.to_wide_null();
unsafe {
annotation.SetMarker(msg.as_ptr() as _);
}
}
|
identifier_body
|
debug.rs
|
use winapi::um::{d3d11, d3d11_1, d3dcommon};
use wio::{com::ComPtr, wide::ToWide};
use std::{env, ffi::OsStr, fmt};
#[must_use]
pub struct DebugScope {
annotation: ComPtr<d3d11_1::ID3DUserDefinedAnnotation>,
}
impl DebugScope {
// TODO: Not used currently in release, will be used in the future
#[allow(dead_code)]
pub fn with_name(
context: &ComPtr<d3d11::ID3D11DeviceContext>,
args: fmt::Arguments,
) -> Option<Self> {
let name = format!("{}", args);
// debugging with visual studio and its ilk *really* doesn't like calling this on a
// deferred context when replaying a capture, compared to renderdoc
if unsafe { context.GetType() } == d3d11::D3D11_DEVICE_CONTEXT_DEFERRED {
// TODO: find a better way to detect either if RD or VS is active debugger
if env::var("GFX_NO_RENDERDOC").is_ok() {
return None;
}
}
let annotation = context
.cast::<d3d11_1::ID3DUserDefinedAnnotation>()
.unwrap();
let msg: &OsStr = name.as_ref();
let msg: Vec<u16> = msg.to_wide_null();
unsafe {
annotation.BeginEvent(msg.as_ptr() as _);
}
Some(DebugScope { annotation })
}
}
impl Drop for DebugScope {
fn drop(&mut self) {
unsafe {
self.annotation.EndEvent();
}
}
}
pub fn debug_marker(context: &ComPtr<d3d11::ID3D11DeviceContext>, name: &str) {
// same here
if unsafe { context.GetType() } == d3d11::D3D11_DEVICE_CONTEXT_DEFERRED {
if env::var("GFX_NO_RENDERDOC").is_ok() {
return;
}
}
let annotation = context
.cast::<d3d11_1::ID3DUserDefinedAnnotation>()
.unwrap();
let msg: &OsStr = name.as_ref();
let msg: Vec<u16> = msg.to_wide_null();
unsafe {
annotation.SetMarker(msg.as_ptr() as _);
}
}
pub fn verify_debug_ascii(name: &str) -> bool {
let res = name.is_ascii();
if!res
|
res
}
/// Set the debug name of a resource.
///
/// Must be ASCII.
///
/// SetPrivateData will copy the data internally so the data doesn't need to live.
pub fn set_debug_name(resource: &d3d11::ID3D11DeviceChild, name: &str) {
unsafe {
resource.SetPrivateData(
(&d3dcommon::WKPDID_D3DDebugObjectName) as *const _,
name.len() as _,
name.as_ptr() as *const _,
);
}
}
/// Set the debug name of a resource with a given suffix.
///
/// Must be ASCII.
///
/// The given string will be mutated to add the suffix, then restored to it's original state.
/// This saves an allocation. SetPrivateData will copy the data internally so the data doesn't need to live.
pub fn set_debug_name_with_suffix(
resource: &d3d11::ID3D11DeviceChild,
name: &mut String,
suffix: &str,
) {
name.push_str(suffix);
unsafe {
resource.SetPrivateData(
(&d3dcommon::WKPDID_D3DDebugObjectName) as *const _,
name.len() as _,
name.as_ptr() as *const _,
);
}
name.drain((name.len() - suffix.len())..);
}
|
{
error!("DX11 buffer names must be ASCII");
}
|
conditional_block
|
debug.rs
|
use winapi::um::{d3d11, d3d11_1, d3dcommon};
use wio::{com::ComPtr, wide::ToWide};
use std::{env, ffi::OsStr, fmt};
#[must_use]
pub struct DebugScope {
annotation: ComPtr<d3d11_1::ID3DUserDefinedAnnotation>,
}
impl DebugScope {
// TODO: Not used currently in release, will be used in the future
#[allow(dead_code)]
pub fn with_name(
context: &ComPtr<d3d11::ID3D11DeviceContext>,
args: fmt::Arguments,
) -> Option<Self> {
let name = format!("{}", args);
// debugging with visual studio and its ilk *really* doesn't like calling this on a
// deferred context when replaying a capture, compared to renderdoc
if unsafe { context.GetType() } == d3d11::D3D11_DEVICE_CONTEXT_DEFERRED {
// TODO: find a better way to detect either if RD or VS is active debugger
if env::var("GFX_NO_RENDERDOC").is_ok() {
return None;
}
}
let annotation = context
.cast::<d3d11_1::ID3DUserDefinedAnnotation>()
.unwrap();
let msg: &OsStr = name.as_ref();
let msg: Vec<u16> = msg.to_wide_null();
unsafe {
annotation.BeginEvent(msg.as_ptr() as _);
}
Some(DebugScope { annotation })
}
}
impl Drop for DebugScope {
fn drop(&mut self) {
unsafe {
self.annotation.EndEvent();
}
}
}
pub fn debug_marker(context: &ComPtr<d3d11::ID3D11DeviceContext>, name: &str) {
// same here
if unsafe { context.GetType() } == d3d11::D3D11_DEVICE_CONTEXT_DEFERRED {
if env::var("GFX_NO_RENDERDOC").is_ok() {
return;
}
}
let annotation = context
.cast::<d3d11_1::ID3DUserDefinedAnnotation>()
.unwrap();
let msg: &OsStr = name.as_ref();
let msg: Vec<u16> = msg.to_wide_null();
unsafe {
annotation.SetMarker(msg.as_ptr() as _);
}
}
pub fn verify_debug_ascii(name: &str) -> bool {
let res = name.is_ascii();
if!res {
error!("DX11 buffer names must be ASCII");
}
res
}
/// Set the debug name of a resource.
///
/// Must be ASCII.
///
/// SetPrivateData will copy the data internally so the data doesn't need to live.
pub fn set_debug_name(resource: &d3d11::ID3D11DeviceChild, name: &str) {
unsafe {
resource.SetPrivateData(
(&d3dcommon::WKPDID_D3DDebugObjectName) as *const _,
name.len() as _,
name.as_ptr() as *const _,
);
}
}
|
/// Must be ASCII.
///
/// The given string will be mutated to add the suffix, then restored to it's original state.
/// This saves an allocation. SetPrivateData will copy the data internally so the data doesn't need to live.
pub fn set_debug_name_with_suffix(
resource: &d3d11::ID3D11DeviceChild,
name: &mut String,
suffix: &str,
) {
name.push_str(suffix);
unsafe {
resource.SetPrivateData(
(&d3dcommon::WKPDID_D3DDebugObjectName) as *const _,
name.len() as _,
name.as_ptr() as *const _,
);
}
name.drain((name.len() - suffix.len())..);
}
|
/// Set the debug name of a resource with a given suffix.
///
|
random_line_split
|
edge-trigger-test.rs
|
/// Ensure all sockets operate on edge trigger mode.
extern crate amy;
use std::net::{TcpListener, TcpStream};
use std::thread;
use std::str;
use std::io::{Read, Write};
use amy::{
Poller,
Event,
};
const IP: &'static str = "127.0.0.1:10008";
/// This test ensures that only one write event is received, even if no data is written. On a level
/// triggered system, write events would come on every poll.
#[test]
fn edge_trigger()
|
sock.set_nonblocking(true).unwrap();
// Create the poller and registrar
let mut poller = Poller::new().unwrap();
let registrar = poller.get_registrar();
// The socket should become writable once it's connected
let id = registrar.register(&sock, Event::Write).unwrap();
let notifications = poller.wait(250).unwrap();
assert_eq!(1, notifications.len());
assert_eq!(id, notifications[0].id);
assert_eq!(Event::Write, notifications[0].event);
// Poll as second time. There should be no notification, since the socket is edge triggered.
let notifications = poller.wait(250).unwrap();
assert_eq!(0, notifications.len());
// Tell the listening thread to stop itself
sock.write("stop".as_bytes()).unwrap();
}
|
{
// Spawn a listening thread and accept one connection
thread::spawn(|| {
let listener = TcpListener::bind(IP).unwrap();
let (mut sock, _) = listener.accept().unwrap();
// When the test completes, the client will send a "stop" message to shutdown the server.
let mut buf = String::new();
sock.read_to_string(&mut buf).unwrap();
});
// Setup a client socket in non-blocking mode
// Loop until we connect because the listener needs to start first
let mut sock;
loop {
if let Ok(s) = TcpStream::connect(IP) {
sock = s;
break;
}
}
|
identifier_body
|
edge-trigger-test.rs
|
/// Ensure all sockets operate on edge trigger mode.
extern crate amy;
use std::net::{TcpListener, TcpStream};
use std::thread;
use std::str;
use std::io::{Read, Write};
use amy::{
Poller,
Event,
};
const IP: &'static str = "127.0.0.1:10008";
/// This test ensures that only one write event is received, even if no data is written. On a level
/// triggered system, write events would come on every poll.
#[test]
fn
|
() {
// Spawn a listening thread and accept one connection
thread::spawn(|| {
let listener = TcpListener::bind(IP).unwrap();
let (mut sock, _) = listener.accept().unwrap();
// When the test completes, the client will send a "stop" message to shutdown the server.
let mut buf = String::new();
sock.read_to_string(&mut buf).unwrap();
});
// Setup a client socket in non-blocking mode
// Loop until we connect because the listener needs to start first
let mut sock;
loop {
if let Ok(s) = TcpStream::connect(IP) {
sock = s;
break;
}
}
sock.set_nonblocking(true).unwrap();
// Create the poller and registrar
let mut poller = Poller::new().unwrap();
let registrar = poller.get_registrar();
// The socket should become writable once it's connected
let id = registrar.register(&sock, Event::Write).unwrap();
let notifications = poller.wait(250).unwrap();
assert_eq!(1, notifications.len());
assert_eq!(id, notifications[0].id);
assert_eq!(Event::Write, notifications[0].event);
// Poll as second time. There should be no notification, since the socket is edge triggered.
let notifications = poller.wait(250).unwrap();
assert_eq!(0, notifications.len());
// Tell the listening thread to stop itself
sock.write("stop".as_bytes()).unwrap();
}
|
edge_trigger
|
identifier_name
|
edge-trigger-test.rs
|
/// Ensure all sockets operate on edge trigger mode.
extern crate amy;
use std::net::{TcpListener, TcpStream};
use std::thread;
use std::str;
use std::io::{Read, Write};
use amy::{
Poller,
Event,
|
/// triggered system, write events would come on every poll.
#[test]
fn edge_trigger() {
// Spawn a listening thread and accept one connection
thread::spawn(|| {
let listener = TcpListener::bind(IP).unwrap();
let (mut sock, _) = listener.accept().unwrap();
// When the test completes, the client will send a "stop" message to shutdown the server.
let mut buf = String::new();
sock.read_to_string(&mut buf).unwrap();
});
// Setup a client socket in non-blocking mode
// Loop until we connect because the listener needs to start first
let mut sock;
loop {
if let Ok(s) = TcpStream::connect(IP) {
sock = s;
break;
}
}
sock.set_nonblocking(true).unwrap();
// Create the poller and registrar
let mut poller = Poller::new().unwrap();
let registrar = poller.get_registrar();
// The socket should become writable once it's connected
let id = registrar.register(&sock, Event::Write).unwrap();
let notifications = poller.wait(250).unwrap();
assert_eq!(1, notifications.len());
assert_eq!(id, notifications[0].id);
assert_eq!(Event::Write, notifications[0].event);
// Poll as second time. There should be no notification, since the socket is edge triggered.
let notifications = poller.wait(250).unwrap();
assert_eq!(0, notifications.len());
// Tell the listening thread to stop itself
sock.write("stop".as_bytes()).unwrap();
}
|
};
const IP: &'static str = "127.0.0.1:10008";
/// This test ensures that only one write event is received, even if no data is written. On a level
|
random_line_split
|
edge-trigger-test.rs
|
/// Ensure all sockets operate on edge trigger mode.
extern crate amy;
use std::net::{TcpListener, TcpStream};
use std::thread;
use std::str;
use std::io::{Read, Write};
use amy::{
Poller,
Event,
};
const IP: &'static str = "127.0.0.1:10008";
/// This test ensures that only one write event is received, even if no data is written. On a level
/// triggered system, write events would come on every poll.
#[test]
fn edge_trigger() {
// Spawn a listening thread and accept one connection
thread::spawn(|| {
let listener = TcpListener::bind(IP).unwrap();
let (mut sock, _) = listener.accept().unwrap();
// When the test completes, the client will send a "stop" message to shutdown the server.
let mut buf = String::new();
sock.read_to_string(&mut buf).unwrap();
});
// Setup a client socket in non-blocking mode
// Loop until we connect because the listener needs to start first
let mut sock;
loop {
if let Ok(s) = TcpStream::connect(IP)
|
}
sock.set_nonblocking(true).unwrap();
// Create the poller and registrar
let mut poller = Poller::new().unwrap();
let registrar = poller.get_registrar();
// The socket should become writable once it's connected
let id = registrar.register(&sock, Event::Write).unwrap();
let notifications = poller.wait(250).unwrap();
assert_eq!(1, notifications.len());
assert_eq!(id, notifications[0].id);
assert_eq!(Event::Write, notifications[0].event);
// Poll as second time. There should be no notification, since the socket is edge triggered.
let notifications = poller.wait(250).unwrap();
assert_eq!(0, notifications.len());
// Tell the listening thread to stop itself
sock.write("stop".as_bytes()).unwrap();
}
|
{
sock = s;
break;
}
|
conditional_block
|
iter.rs
|
//! Collection of iterators over a graph.
//!
//! This module contains various iterators, meant to be returned from the different
//! iterator functions.
use std::collections::HashMap;
use std::collections::btree_map::Iter as BTreeIter;
use std::cell::Ref;
use graph::graph::Element;
use vec_map::Keys;
macro_rules! iter_impl {
(
$name:ident < $($typaram:ident),* >,
$subtype:ident < $($stparam:ty),* >
) => {
/// Iterator for a Graph structure (see [Graph](../struct.Graph.html)).
pub struct $name <'a, $($typaram),*>
where $($typaram: 'a),*
{
element : $subtype <'a, $($stparam),* >
}
impl<'a, $($typaram),*> $name< 'a, $($typaram),*>
where $($typaram: 'a),*
{
/// Create a new iterator from a parent structure.
pub fn new(element : $subtype <'a, $($stparam),* > ) -> Self {
$name {
element : element
}
}
}
}
}
iter_impl!(ConnIdVal<E>, BTreeIter<usize, E>);
iter_impl!(IterEdges<E>, ConnIdVal<E>);
iter_impl!(IterConnIds<E>, ConnIdVal<E>);
iter_impl!(ListIds<V, E>, Keys<Element<V, E>>);
/// Iterator for a root structure.
///
/// The root iterator transforms a linked hashmap into a string of indices.
pub struct Root<'a> {
tree : Ref<'a, HashMap<usize, Option<usize>>>,
start : usize,
}
impl<'a> Root<'a> {
/// Create a new iterator for a linked hashmap.
pub fn new(tree : Ref<'a, HashMap<usize, Option<usize>>>, start : usize) -> Root<'a> {
Root {tree : tree, start : start}
}
}
impl<'a, E : 'a> Iterator for ConnIdVal<'a, E> {
type Item = (usize, &'a E);
fn next(&mut self) -> Option<(usize, &'a E)> {
self.element.next().map(|(&n, e)| (n, e))
}
}
impl<'a, E : 'a> Iterator for IterEdges<'a, E> {
type Item = &'a E;
fn next(&mut self) -> Option<&'a E> {
self.element.next().map(|x| x.1)
}
}
impl<'a, E : 'a> Iterator for IterConnIds<'a, E> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
self.element.next().map(|x| x.0)
}
}
impl<'a, V : 'a, E : 'a> Iterator for ListIds<'a, V, E> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
self.element.next()
}
}
impl<'a> Iterator for Root<'a> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
let res = self.tree.get(&self.start).and_then(|&t| t);
if let Some(t) = res
|
res
}
}
|
{self.start = t;}
|
conditional_block
|
iter.rs
|
//! Collection of iterators over a graph.
//!
//! This module contains various iterators, meant to be returned from the different
//! iterator functions.
use std::collections::HashMap;
use std::collections::btree_map::Iter as BTreeIter;
use std::cell::Ref;
use graph::graph::Element;
use vec_map::Keys;
macro_rules! iter_impl {
(
$name:ident < $($typaram:ident),* >,
$subtype:ident < $($stparam:ty),* >
) => {
/// Iterator for a Graph structure (see [Graph](../struct.Graph.html)).
pub struct $name <'a, $($typaram),*>
where $($typaram: 'a),*
{
element : $subtype <'a, $($stparam),* >
}
impl<'a, $($typaram),*> $name< 'a, $($typaram),*>
where $($typaram: 'a),*
{
/// Create a new iterator from a parent structure.
pub fn new(element : $subtype <'a, $($stparam),* > ) -> Self {
$name {
element : element
}
}
}
}
}
iter_impl!(ConnIdVal<E>, BTreeIter<usize, E>);
iter_impl!(IterEdges<E>, ConnIdVal<E>);
iter_impl!(IterConnIds<E>, ConnIdVal<E>);
iter_impl!(ListIds<V, E>, Keys<Element<V, E>>);
/// Iterator for a root structure.
///
/// The root iterator transforms a linked hashmap into a string of indices.
pub struct Root<'a> {
tree : Ref<'a, HashMap<usize, Option<usize>>>,
start : usize,
}
impl<'a> Root<'a> {
/// Create a new iterator for a linked hashmap.
pub fn
|
(tree : Ref<'a, HashMap<usize, Option<usize>>>, start : usize) -> Root<'a> {
Root {tree : tree, start : start}
}
}
impl<'a, E : 'a> Iterator for ConnIdVal<'a, E> {
type Item = (usize, &'a E);
fn next(&mut self) -> Option<(usize, &'a E)> {
self.element.next().map(|(&n, e)| (n, e))
}
}
impl<'a, E : 'a> Iterator for IterEdges<'a, E> {
type Item = &'a E;
fn next(&mut self) -> Option<&'a E> {
self.element.next().map(|x| x.1)
}
}
impl<'a, E : 'a> Iterator for IterConnIds<'a, E> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
self.element.next().map(|x| x.0)
}
}
impl<'a, V : 'a, E : 'a> Iterator for ListIds<'a, V, E> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
self.element.next()
}
}
impl<'a> Iterator for Root<'a> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
let res = self.tree.get(&self.start).and_then(|&t| t);
if let Some(t) = res {self.start = t;}
res
}
}
|
new
|
identifier_name
|
iter.rs
|
//! Collection of iterators over a graph.
//!
//! This module contains various iterators, meant to be returned from the different
//! iterator functions.
use std::collections::HashMap;
use std::collections::btree_map::Iter as BTreeIter;
use std::cell::Ref;
use graph::graph::Element;
use vec_map::Keys;
macro_rules! iter_impl {
(
$name:ident < $($typaram:ident),* >,
$subtype:ident < $($stparam:ty),* >
) => {
/// Iterator for a Graph structure (see [Graph](../struct.Graph.html)).
pub struct $name <'a, $($typaram),*>
where $($typaram: 'a),*
{
element : $subtype <'a, $($stparam),* >
}
impl<'a, $($typaram),*> $name< 'a, $($typaram),*>
where $($typaram: 'a),*
{
/// Create a new iterator from a parent structure.
pub fn new(element : $subtype <'a, $($stparam),* > ) -> Self {
$name {
element : element
}
}
}
}
}
iter_impl!(ConnIdVal<E>, BTreeIter<usize, E>);
iter_impl!(IterEdges<E>, ConnIdVal<E>);
iter_impl!(IterConnIds<E>, ConnIdVal<E>);
iter_impl!(ListIds<V, E>, Keys<Element<V, E>>);
/// Iterator for a root structure.
///
/// The root iterator transforms a linked hashmap into a string of indices.
pub struct Root<'a> {
tree : Ref<'a, HashMap<usize, Option<usize>>>,
start : usize,
}
impl<'a> Root<'a> {
/// Create a new iterator for a linked hashmap.
pub fn new(tree : Ref<'a, HashMap<usize, Option<usize>>>, start : usize) -> Root<'a>
|
}
impl<'a, E : 'a> Iterator for ConnIdVal<'a, E> {
type Item = (usize, &'a E);
fn next(&mut self) -> Option<(usize, &'a E)> {
self.element.next().map(|(&n, e)| (n, e))
}
}
impl<'a, E : 'a> Iterator for IterEdges<'a, E> {
type Item = &'a E;
fn next(&mut self) -> Option<&'a E> {
self.element.next().map(|x| x.1)
}
}
impl<'a, E : 'a> Iterator for IterConnIds<'a, E> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
self.element.next().map(|x| x.0)
}
}
impl<'a, V : 'a, E : 'a> Iterator for ListIds<'a, V, E> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
self.element.next()
}
}
impl<'a> Iterator for Root<'a> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
let res = self.tree.get(&self.start).and_then(|&t| t);
if let Some(t) = res {self.start = t;}
res
}
}
|
{
Root {tree : tree, start : start}
}
|
identifier_body
|
iter.rs
|
//! Collection of iterators over a graph.
//!
//! This module contains various iterators, meant to be returned from the different
//! iterator functions.
use std::collections::HashMap;
use std::collections::btree_map::Iter as BTreeIter;
use std::cell::Ref;
use graph::graph::Element;
use vec_map::Keys;
macro_rules! iter_impl {
(
$name:ident < $($typaram:ident),* >,
$subtype:ident < $($stparam:ty),* >
) => {
/// Iterator for a Graph structure (see [Graph](../struct.Graph.html)).
pub struct $name <'a, $($typaram),*>
where $($typaram: 'a),*
{
element : $subtype <'a, $($stparam),* >
}
impl<'a, $($typaram),*> $name< 'a, $($typaram),*>
where $($typaram: 'a),*
{
/// Create a new iterator from a parent structure.
pub fn new(element : $subtype <'a, $($stparam),* > ) -> Self {
$name {
element : element
}
}
}
}
}
iter_impl!(ConnIdVal<E>, BTreeIter<usize, E>);
iter_impl!(IterEdges<E>, ConnIdVal<E>);
iter_impl!(IterConnIds<E>, ConnIdVal<E>);
iter_impl!(ListIds<V, E>, Keys<Element<V, E>>);
/// Iterator for a root structure.
///
/// The root iterator transforms a linked hashmap into a string of indices.
pub struct Root<'a> {
tree : Ref<'a, HashMap<usize, Option<usize>>>,
start : usize,
}
impl<'a> Root<'a> {
/// Create a new iterator for a linked hashmap.
pub fn new(tree : Ref<'a, HashMap<usize, Option<usize>>>, start : usize) -> Root<'a> {
Root {tree : tree, start : start}
}
}
impl<'a, E : 'a> Iterator for ConnIdVal<'a, E> {
type Item = (usize, &'a E);
fn next(&mut self) -> Option<(usize, &'a E)> {
self.element.next().map(|(&n, e)| (n, e))
}
}
impl<'a, E : 'a> Iterator for IterEdges<'a, E> {
type Item = &'a E;
fn next(&mut self) -> Option<&'a E> {
self.element.next().map(|x| x.1)
}
}
impl<'a, E : 'a> Iterator for IterConnIds<'a, E> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
self.element.next().map(|x| x.0)
}
}
|
impl<'a, V : 'a, E : 'a> Iterator for ListIds<'a, V, E> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
self.element.next()
}
}
impl<'a> Iterator for Root<'a> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
let res = self.tree.get(&self.start).and_then(|&t| t);
if let Some(t) = res {self.start = t;}
res
}
}
|
random_line_split
|
|
hidclass.rs
|
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
use shared::guiddef::GUID;
use shared::minwindef::{DWORD, PUCHAR, UCHAR, ULONG, USHORT};
use um::winioctl::{
FILE_ANY_ACCESS, FILE_DEVICE_KEYBOARD, METHOD_BUFFERED, METHOD_IN_DIRECT, METHOD_NEITHER,
METHOD_OUT_DIRECT,
};
use um::winnt::BOOLEAN;
DEFINE_GUID!{GUID_DEVINTERFACE_HID,
0x4D1E55B2, 0xF16F, 0x11CF, 0x88, 0xCB, 0x00, 0x11, 0x11, 0x00, 0x00, 0x30}
pub const GUID_CLASS_INPUT: GUID = GUID_DEVINTERFACE_HID;
DEFINE_GUID!{GUID_HID_INTERFACE_NOTIFY,
0x2c4e2e88, 0x25e6, 0x4c33, 0x88, 0x2f, 0x3d, 0x82, 0xe6, 0x07, 0x36, 0x81}
DEFINE_GUID!{GUID_HID_INTERFACE_HIDPARSE,
0xf5c315a5, 0x69ac, 0x4bc2, 0x92, 0x79, 0xd0, 0xb6, 0x45, 0x76, 0xf4, 0x4b}
// FIXME devpropkey stuff
pub const HID_REVISION: DWORD = 0x00000001;
pub const IOCTL_HID_GET_DRIVER_CONFIG: DWORD = HID_BUFFER_CTL_CODE!(100);
pub const IOCTL_HID_SET_DRIVER_CONFIG: DWORD = HID_BUFFER_CTL_CODE!(101);
pub const IOCTL_HID_GET_POLL_FREQUENCY_MSEC: DWORD = HID_BUFFER_CTL_CODE!(102);
pub const IOCTL_HID_SET_POLL_FREQUENCY_MSEC: DWORD = HID_BUFFER_CTL_CODE!(103);
pub const IOCTL_GET_NUM_DEVICE_INPUT_BUFFERS: DWORD = HID_BUFFER_CTL_CODE!(104);
pub const IOCTL_SET_NUM_DEVICE_INPUT_BUFFERS: DWORD = HID_BUFFER_CTL_CODE!(105);
pub const IOCTL_HID_GET_COLLECTION_INFORMATION: DWORD = HID_BUFFER_CTL_CODE!(106);
pub const IOCTL_HID_ENABLE_WAKE_ON_SX: DWORD = HID_BUFFER_CTL_CODE!(107);
pub const IOCTL_HID_SET_S0_IDLE_TIMEOUT: DWORD = HID_BUFFER_CTL_CODE!(108);
pub const IOCTL_HID_GET_COLLECTION_DESCRIPTOR: DWORD = HID_CTL_CODE!(100);
pub const IOCTL_HID_FLUSH_QUEUE: DWORD = HID_CTL_CODE!(101);
pub const IOCTL_HID_SET_FEATURE: DWORD = HID_IN_CTL_CODE!(100);
pub const IOCTL_HID_SET_OUTPUT_REPORT: DWORD = HID_IN_CTL_CODE!(101);
pub const IOCTL_HID_GET_FEATURE: DWORD = HID_OUT_CTL_CODE!(100);
pub const IOCTL_GET_PHYSICAL_DESCRIPTOR: DWORD = HID_OUT_CTL_CODE!(102);
pub const IOCTL_HID_GET_HARDWARE_ID: DWORD = HID_OUT_CTL_CODE!(103);
pub const IOCTL_HID_GET_INPUT_REPORT: DWORD = HID_OUT_CTL_CODE!(104);
pub const IOCTL_HID_GET_OUTPUT_REPORT: DWORD = HID_OUT_CTL_CODE!(105);
pub const IOCTL_HID_GET_MANUFACTURER_STRING: DWORD = HID_OUT_CTL_CODE!(110);
pub const IOCTL_HID_GET_PRODUCT_STRING: DWORD = HID_OUT_CTL_CODE!(111);
pub const IOCTL_HID_GET_SERIALNUMBER_STRING: DWORD = HID_OUT_CTL_CODE!(112);
pub const IOCTL_HID_GET_INDEXED_STRING: DWORD = HID_OUT_CTL_CODE!(120);
pub const IOCTL_HID_GET_MS_GENRE_DESCRIPTOR: DWORD = HID_OUT_CTL_CODE!(121);
pub const IOCTL_HID_ENABLE_SECURE_READ: DWORD = HID_CTL_CODE!(130);
pub const IOCTL_HID_DISABLE_SECURE_READ: DWORD = HID_CTL_CODE!(131);
pub const IOCTL_HID_DEVICERESET_NOTIFICATION: DWORD = HID_CTL_CODE!(140);
STRUCT!{struct HID_XFER_PACKET {
reportBuffer: PUCHAR,
reportBufferLen: ULONG,
|
reportId: UCHAR,
}}
pub type PHID_XFER_PACKET = *mut HID_XFER_PACKET;
//FIXME Stuff for NT_INCLUDED
STRUCT!{struct HID_COLLECTION_INFORMATION {
DescriptorSize: ULONG,
Polled: BOOLEAN,
Reserved1: [UCHAR; 1],
VendorID: USHORT,
ProductID: USHORT,
VersionNumber: USHORT,
}}
pub type PHID_COLLECTION_INFORMATION = *mut HID_COLLECTION_INFORMATION;
STRUCT!{struct HID_DRIVER_CONFIG {
Size: ULONG,
RingBufferSize: ULONG,
}}
pub type PHID_DRIVER_CONFIG = *mut HID_DRIVER_CONFIG;
|
random_line_split
|
|
termbox.rs
|
#[link(name = "termbox", vers = "0.1.0")];
#[crate_type = "lib"];
/*!
*
* A lightweight curses alternative wrapping the termbox library.
*
* # SYNOPSIS
*
* A hello world for the terminal:
*
* extern mod std;
* extern mod termbox;
*
* use tb = termbox;
*
* fn main() {
* tb::init();
* tb::print(1, 1, tb::bold, tb::white, tb::black, "Hello, world!");
* tb::present();
* std::timer::sleep(std::uv_global_loop::get(), 1000);
* tb::shutdown();
* }
*
* # DESCRIPTION
*
* Output is double-buffered.
*
* TODO
*
* # EXAMPLES
*
* TODO
*
*/
use libc::{c_int,c_uint};
use ff = foreign;
/*
* Foreign functions from termbox.
*/
#[link_name="termbox"]
extern mod foreign {
fn tb_init() -> c_int;
fn tb_shutdown();
fn tb_width() -> c_uint;
fn tb_height() -> c_uint;
fn tb_clear();
fn tb_present();
fn tb_set_cursor(cx: c_int, cy: c_int);
fn tb_change_cell(x: c_uint, y: c_uint, ch: u32, fg: u16, bg: u16);
fn tb_select_input_mode(mode: c_int) -> c_int;
fn tb_set_clear_attributes(fg: u16, bg: u16);
fn tb_peek_event(ev: *raw_event, timeout: c_uint) -> c_int;
fn tb_poll_event(ev: *raw_event) -> c_int;
}
pub fn init() -> int {
ff::tb_init() as int
}
pub fn
|
() {
ff::tb_shutdown();
}
pub fn width() -> uint {
ff::tb_width() as uint
}
pub fn height() -> uint {
ff::tb_height() as uint
}
/**
* Clear buffer.
*/
pub fn clear() {
ff::tb_clear();
}
/**
* Write buffer to terminal.
*/
pub fn present() {
ff::tb_present();
}
pub fn set_cursor(cx: int, cy: int) {
ff::tb_set_cursor(cx as c_int, cy as c_int);
}
// low-level wrapper
fn change_cell(x: uint, y: uint, ch: u32, fg: u16, bg: u16) {
ff::tb_change_cell(x as c_uint, y as c_uint, ch, fg, bg);
}
// Convert from enums to u16
fn convert_color(c: color) -> u16 {
match c {
black => 0x00,
red => 0x01,
green => 0x02,
yellow => 0x03,
blue => 0x04,
magenta => 0x05,
cyan => 0x06,
white => 0x07
}
}
fn convert_style(sty: style) -> u16 {
match sty {
normal => 0x00,
bold => 0x10,
underline => 0x20,
bold_underline => 0x30
}
}
/**
* Print a string to the buffer. Leftmost charater is at (x, y).
*/
pub fn print(x: uint, y: uint, sty: style, fg: color, bg: color, s: &str) {
let fg: u16 = convert_color(fg) | convert_style(sty);
let bg: u16 = convert_color(bg);
for s.each_chari |i, ch| {
ff::tb_change_cell((x + i) as c_uint, y as c_uint, ch as u32, fg, bg);
}
}
/**
* Print a charater to the buffer.
*/
pub fn print_ch(x: uint, y: uint, sty: style, fg: color, bg: color, ch: char) {
let fg: u16 = convert_color(fg) | convert_style(sty);
let bg: u16 = convert_color(bg);
ff::tb_change_cell(x as c_uint, y as c_uint, ch as u32, fg, bg);
}
enum color {
black,
red,
green,
yellow,
blue,
magenta,
cyan,
white
}
enum style {
normal,
bold,
underline,
bold_underline
}
// Convenience functions
fn with_term(f: fn~()) {
init();
let res = task::try(f);
shutdown();
if result::is_err(&res) {
error!("with_term: An error occured.");
}
}
/*
* The event type matches struct tb_event from termbox.h
*/
struct raw_event {
mut e_type: u8,
mut e_mod: u8,
mut key: u16,
mut ch: u32,
mut w: i32,
mut h: i32
}
fn nil_raw_event() -> raw_event {
raw_event{mut e_type: 0, mut e_mod: 0, mut key: 0, mut ch: 0, mut w: 0, mut h: 0}
}
enum event {
key_event({md: u8, key: u16, ch: u32}),
resize_event({w: i32, h: i32}),
no_event
}
/**
* Get an event if within timeout milliseconds, otherwise return no_event.
*/
pub fn peek_event(timeout: uint) -> event {
let ev = nil_raw_event();
let rc = ff::tb_peek_event(ptr::addr_of(&ev), timeout as c_uint);
unpack_event(rc, &ev)
}
/**
* Blocking function to return next event.
*/
pub fn poll_event() -> event {
let ev = nil_raw_event();
let rc = ff::tb_poll_event(ptr::addr_of(&ev));
unpack_event(rc, &ev)
}
/* helper fn
*
* ev_type
* 0 -> no event
* 1 -> key
* 2 -> resize
* -1 -> error
*/
fn unpack_event(ev_type: c_int, ev: &raw_event) -> event {
match ev_type {
0 => no_event,
1 => key_event({md: ev.e_mod, key: ev.key, ch: ev.ch}),
2 => resize_event({w: ev.w, h: ev.h}),
_ => fail
}
}
|
shutdown
|
identifier_name
|
termbox.rs
|
#[link(name = "termbox", vers = "0.1.0")];
#[crate_type = "lib"];
/*!
*
* A lightweight curses alternative wrapping the termbox library.
*
* # SYNOPSIS
*
* A hello world for the terminal:
*
* extern mod std;
* extern mod termbox;
*
* use tb = termbox;
*
* fn main() {
* tb::init();
* tb::print(1, 1, tb::bold, tb::white, tb::black, "Hello, world!");
* tb::present();
* std::timer::sleep(std::uv_global_loop::get(), 1000);
* tb::shutdown();
* }
*
* # DESCRIPTION
*
* Output is double-buffered.
*
* TODO
*
* # EXAMPLES
*
* TODO
*
*/
use libc::{c_int,c_uint};
use ff = foreign;
/*
* Foreign functions from termbox.
*/
#[link_name="termbox"]
extern mod foreign {
fn tb_init() -> c_int;
fn tb_shutdown();
fn tb_width() -> c_uint;
fn tb_height() -> c_uint;
fn tb_clear();
fn tb_present();
fn tb_set_cursor(cx: c_int, cy: c_int);
fn tb_change_cell(x: c_uint, y: c_uint, ch: u32, fg: u16, bg: u16);
fn tb_select_input_mode(mode: c_int) -> c_int;
fn tb_set_clear_attributes(fg: u16, bg: u16);
fn tb_peek_event(ev: *raw_event, timeout: c_uint) -> c_int;
fn tb_poll_event(ev: *raw_event) -> c_int;
}
pub fn init() -> int {
ff::tb_init() as int
}
pub fn shutdown() {
ff::tb_shutdown();
}
pub fn width() -> uint
|
pub fn height() -> uint {
ff::tb_height() as uint
}
/**
* Clear buffer.
*/
pub fn clear() {
ff::tb_clear();
}
/**
* Write buffer to terminal.
*/
pub fn present() {
ff::tb_present();
}
pub fn set_cursor(cx: int, cy: int) {
ff::tb_set_cursor(cx as c_int, cy as c_int);
}
// low-level wrapper
fn change_cell(x: uint, y: uint, ch: u32, fg: u16, bg: u16) {
ff::tb_change_cell(x as c_uint, y as c_uint, ch, fg, bg);
}
// Convert from enums to u16
fn convert_color(c: color) -> u16 {
match c {
black => 0x00,
red => 0x01,
green => 0x02,
yellow => 0x03,
blue => 0x04,
magenta => 0x05,
cyan => 0x06,
white => 0x07
}
}
fn convert_style(sty: style) -> u16 {
match sty {
normal => 0x00,
bold => 0x10,
underline => 0x20,
bold_underline => 0x30
}
}
/**
* Print a string to the buffer. Leftmost charater is at (x, y).
*/
pub fn print(x: uint, y: uint, sty: style, fg: color, bg: color, s: &str) {
let fg: u16 = convert_color(fg) | convert_style(sty);
let bg: u16 = convert_color(bg);
for s.each_chari |i, ch| {
ff::tb_change_cell((x + i) as c_uint, y as c_uint, ch as u32, fg, bg);
}
}
/**
* Print a charater to the buffer.
*/
pub fn print_ch(x: uint, y: uint, sty: style, fg: color, bg: color, ch: char) {
let fg: u16 = convert_color(fg) | convert_style(sty);
let bg: u16 = convert_color(bg);
ff::tb_change_cell(x as c_uint, y as c_uint, ch as u32, fg, bg);
}
enum color {
black,
red,
green,
yellow,
blue,
magenta,
cyan,
white
}
enum style {
normal,
bold,
underline,
bold_underline
}
// Convenience functions
fn with_term(f: fn~()) {
init();
let res = task::try(f);
shutdown();
if result::is_err(&res) {
error!("with_term: An error occured.");
}
}
/*
* The event type matches struct tb_event from termbox.h
*/
struct raw_event {
mut e_type: u8,
mut e_mod: u8,
mut key: u16,
mut ch: u32,
mut w: i32,
mut h: i32
}
fn nil_raw_event() -> raw_event {
raw_event{mut e_type: 0, mut e_mod: 0, mut key: 0, mut ch: 0, mut w: 0, mut h: 0}
}
enum event {
key_event({md: u8, key: u16, ch: u32}),
resize_event({w: i32, h: i32}),
no_event
}
/**
* Get an event if within timeout milliseconds, otherwise return no_event.
*/
pub fn peek_event(timeout: uint) -> event {
let ev = nil_raw_event();
let rc = ff::tb_peek_event(ptr::addr_of(&ev), timeout as c_uint);
unpack_event(rc, &ev)
}
/**
* Blocking function to return next event.
*/
pub fn poll_event() -> event {
let ev = nil_raw_event();
let rc = ff::tb_poll_event(ptr::addr_of(&ev));
unpack_event(rc, &ev)
}
/* helper fn
*
* ev_type
* 0 -> no event
* 1 -> key
* 2 -> resize
* -1 -> error
*/
fn unpack_event(ev_type: c_int, ev: &raw_event) -> event {
match ev_type {
0 => no_event,
1 => key_event({md: ev.e_mod, key: ev.key, ch: ev.ch}),
2 => resize_event({w: ev.w, h: ev.h}),
_ => fail
}
}
|
{
ff::tb_width() as uint
}
|
identifier_body
|
termbox.rs
|
#[link(name = "termbox", vers = "0.1.0")];
#[crate_type = "lib"];
/*!
*
* A lightweight curses alternative wrapping the termbox library.
*
* # SYNOPSIS
*
* A hello world for the terminal:
*
* extern mod std;
* extern mod termbox;
*
* use tb = termbox;
*
* fn main() {
* tb::init();
* tb::print(1, 1, tb::bold, tb::white, tb::black, "Hello, world!");
* tb::present();
* std::timer::sleep(std::uv_global_loop::get(), 1000);
* tb::shutdown();
* }
*
* # DESCRIPTION
*
* Output is double-buffered.
*
* TODO
*
* # EXAMPLES
*
* TODO
*
*/
use libc::{c_int,c_uint};
use ff = foreign;
/*
* Foreign functions from termbox.
*/
#[link_name="termbox"]
extern mod foreign {
fn tb_init() -> c_int;
fn tb_shutdown();
fn tb_width() -> c_uint;
fn tb_height() -> c_uint;
fn tb_clear();
fn tb_present();
fn tb_set_cursor(cx: c_int, cy: c_int);
|
fn tb_peek_event(ev: *raw_event, timeout: c_uint) -> c_int;
fn tb_poll_event(ev: *raw_event) -> c_int;
}
pub fn init() -> int {
ff::tb_init() as int
}
pub fn shutdown() {
ff::tb_shutdown();
}
pub fn width() -> uint {
ff::tb_width() as uint
}
pub fn height() -> uint {
ff::tb_height() as uint
}
/**
* Clear buffer.
*/
pub fn clear() {
ff::tb_clear();
}
/**
* Write buffer to terminal.
*/
pub fn present() {
ff::tb_present();
}
pub fn set_cursor(cx: int, cy: int) {
ff::tb_set_cursor(cx as c_int, cy as c_int);
}
// low-level wrapper
fn change_cell(x: uint, y: uint, ch: u32, fg: u16, bg: u16) {
ff::tb_change_cell(x as c_uint, y as c_uint, ch, fg, bg);
}
// Convert from enums to u16
fn convert_color(c: color) -> u16 {
match c {
black => 0x00,
red => 0x01,
green => 0x02,
yellow => 0x03,
blue => 0x04,
magenta => 0x05,
cyan => 0x06,
white => 0x07
}
}
fn convert_style(sty: style) -> u16 {
match sty {
normal => 0x00,
bold => 0x10,
underline => 0x20,
bold_underline => 0x30
}
}
/**
* Print a string to the buffer. Leftmost charater is at (x, y).
*/
pub fn print(x: uint, y: uint, sty: style, fg: color, bg: color, s: &str) {
let fg: u16 = convert_color(fg) | convert_style(sty);
let bg: u16 = convert_color(bg);
for s.each_chari |i, ch| {
ff::tb_change_cell((x + i) as c_uint, y as c_uint, ch as u32, fg, bg);
}
}
/**
* Print a charater to the buffer.
*/
pub fn print_ch(x: uint, y: uint, sty: style, fg: color, bg: color, ch: char) {
let fg: u16 = convert_color(fg) | convert_style(sty);
let bg: u16 = convert_color(bg);
ff::tb_change_cell(x as c_uint, y as c_uint, ch as u32, fg, bg);
}
enum color {
black,
red,
green,
yellow,
blue,
magenta,
cyan,
white
}
enum style {
normal,
bold,
underline,
bold_underline
}
// Convenience functions
fn with_term(f: fn~()) {
init();
let res = task::try(f);
shutdown();
if result::is_err(&res) {
error!("with_term: An error occured.");
}
}
/*
* The event type matches struct tb_event from termbox.h
*/
struct raw_event {
mut e_type: u8,
mut e_mod: u8,
mut key: u16,
mut ch: u32,
mut w: i32,
mut h: i32
}
fn nil_raw_event() -> raw_event {
raw_event{mut e_type: 0, mut e_mod: 0, mut key: 0, mut ch: 0, mut w: 0, mut h: 0}
}
enum event {
key_event({md: u8, key: u16, ch: u32}),
resize_event({w: i32, h: i32}),
no_event
}
/**
* Get an event if within timeout milliseconds, otherwise return no_event.
*/
pub fn peek_event(timeout: uint) -> event {
let ev = nil_raw_event();
let rc = ff::tb_peek_event(ptr::addr_of(&ev), timeout as c_uint);
unpack_event(rc, &ev)
}
/**
* Blocking function to return next event.
*/
pub fn poll_event() -> event {
let ev = nil_raw_event();
let rc = ff::tb_poll_event(ptr::addr_of(&ev));
unpack_event(rc, &ev)
}
/* helper fn
*
* ev_type
* 0 -> no event
* 1 -> key
* 2 -> resize
* -1 -> error
*/
fn unpack_event(ev_type: c_int, ev: &raw_event) -> event {
match ev_type {
0 => no_event,
1 => key_event({md: ev.e_mod, key: ev.key, ch: ev.ch}),
2 => resize_event({w: ev.w, h: ev.h}),
_ => fail
}
}
|
fn tb_change_cell(x: c_uint, y: c_uint, ch: u32, fg: u16, bg: u16);
fn tb_select_input_mode(mode: c_int) -> c_int;
fn tb_set_clear_attributes(fg: u16, bg: u16);
|
random_line_split
|
mod_resolver.rs
|
use std::io;
use std::path::PathBuf;
use super::read_config;
use crate::{FileName, Input, Session};
fn verify_mod_resolution(input_file_name: &str, exp_misformatted_files: &[&str]) {
let input_file = PathBuf::from(input_file_name);
let config = read_config(&input_file);
let mut session = Session::<io::Stdout>::new(config, None);
let report = session
.format(Input::File(input_file_name.into()))
.expect("Should not have had any execution errors");
let errors_by_file = &report.internal.borrow().0;
for exp_file in exp_misformatted_files {
assert!(errors_by_file.contains_key(&FileName::Real(PathBuf::from(exp_file))));
}
}
#[test]
fn nested_out_of_line_mods_loaded() {
// See also https://github.com/rust-lang/rustfmt/issues/4874
verify_mod_resolution(
"tests/mod-resolver/issue-4874/main.rs",
&[
"tests/mod-resolver/issue-4874/bar/baz.rs",
"tests/mod-resolver/issue-4874/foo/qux.rs",
],
);
}
#[test]
fn
|
() {
// See also https://github.com/rust-lang/rustfmt/issues/5063
verify_mod_resolution(
"tests/mod-resolver/issue-5063/main.rs",
&[
"tests/mod-resolver/issue-5063/foo/bar/baz.rs",
"tests/mod-resolver/issue-5063/foo.rs",
],
);
}
|
out_of_line_nested_inline_within_out_of_line
|
identifier_name
|
mod_resolver.rs
|
use std::io;
use std::path::PathBuf;
use super::read_config;
use crate::{FileName, Input, Session};
fn verify_mod_resolution(input_file_name: &str, exp_misformatted_files: &[&str]) {
let input_file = PathBuf::from(input_file_name);
let config = read_config(&input_file);
let mut session = Session::<io::Stdout>::new(config, None);
let report = session
.format(Input::File(input_file_name.into()))
.expect("Should not have had any execution errors");
let errors_by_file = &report.internal.borrow().0;
for exp_file in exp_misformatted_files {
assert!(errors_by_file.contains_key(&FileName::Real(PathBuf::from(exp_file))));
}
}
#[test]
fn nested_out_of_line_mods_loaded()
|
#[test]
fn out_of_line_nested_inline_within_out_of_line() {
// See also https://github.com/rust-lang/rustfmt/issues/5063
verify_mod_resolution(
"tests/mod-resolver/issue-5063/main.rs",
&[
"tests/mod-resolver/issue-5063/foo/bar/baz.rs",
"tests/mod-resolver/issue-5063/foo.rs",
],
);
}
|
{
// See also https://github.com/rust-lang/rustfmt/issues/4874
verify_mod_resolution(
"tests/mod-resolver/issue-4874/main.rs",
&[
"tests/mod-resolver/issue-4874/bar/baz.rs",
"tests/mod-resolver/issue-4874/foo/qux.rs",
],
);
}
|
identifier_body
|
mod_resolver.rs
|
use std::io;
use std::path::PathBuf;
use super::read_config;
use crate::{FileName, Input, Session};
fn verify_mod_resolution(input_file_name: &str, exp_misformatted_files: &[&str]) {
let input_file = PathBuf::from(input_file_name);
let config = read_config(&input_file);
let mut session = Session::<io::Stdout>::new(config, None);
let report = session
.format(Input::File(input_file_name.into()))
.expect("Should not have had any execution errors");
let errors_by_file = &report.internal.borrow().0;
for exp_file in exp_misformatted_files {
assert!(errors_by_file.contains_key(&FileName::Real(PathBuf::from(exp_file))));
}
}
#[test]
fn nested_out_of_line_mods_loaded() {
// See also https://github.com/rust-lang/rustfmt/issues/4874
verify_mod_resolution(
"tests/mod-resolver/issue-4874/main.rs",
&[
|
);
}
#[test]
fn out_of_line_nested_inline_within_out_of_line() {
// See also https://github.com/rust-lang/rustfmt/issues/5063
verify_mod_resolution(
"tests/mod-resolver/issue-5063/main.rs",
&[
"tests/mod-resolver/issue-5063/foo/bar/baz.rs",
"tests/mod-resolver/issue-5063/foo.rs",
],
);
}
|
"tests/mod-resolver/issue-4874/bar/baz.rs",
"tests/mod-resolver/issue-4874/foo/qux.rs",
],
|
random_line_split
|
filecache_test.rs
|
//
// Copyright (c) Pirmin Kalberer. All rights reserved.
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
//
use crate::cache::cache::Cache;
use crate::cache::filecache::Filecache;
use std::fs;
use std::path::Path;
#[test]
fn test_dircache() {
use std::env;
let mut dir = env::temp_dir();
dir.push("t_rex_test");
let basepath = format!("{}", &dir.display());
let _ = fs::remove_dir_all(&basepath);
let cache = Filecache {
basepath: basepath,
baseurl: Some("http://localhost:6767".to_string()),
};
let path = "tileset/0/1/2.pbf";
let fullpath = format!("{}/{}", cache.basepath, path);
let obj = "0123456789";
// Cache miss
assert_eq!(cache.read(path, |_| {}), false);
// Write into cache
let _ = cache.write(path, obj.as_bytes());
assert!(Path::new(&fullpath).exists());
// Cache hit
assert_eq!(cache.read(path, |_| {}), true);
// Read from cache
|
let _ = f.read_to_string(&mut s);
});
assert_eq!(&s, "0123456789");
}
|
let mut s = String::new();
cache.read(path, |f| {
|
random_line_split
|
filecache_test.rs
|
//
// Copyright (c) Pirmin Kalberer. All rights reserved.
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
//
use crate::cache::cache::Cache;
use crate::cache::filecache::Filecache;
use std::fs;
use std::path::Path;
#[test]
fn
|
() {
use std::env;
let mut dir = env::temp_dir();
dir.push("t_rex_test");
let basepath = format!("{}", &dir.display());
let _ = fs::remove_dir_all(&basepath);
let cache = Filecache {
basepath: basepath,
baseurl: Some("http://localhost:6767".to_string()),
};
let path = "tileset/0/1/2.pbf";
let fullpath = format!("{}/{}", cache.basepath, path);
let obj = "0123456789";
// Cache miss
assert_eq!(cache.read(path, |_| {}), false);
// Write into cache
let _ = cache.write(path, obj.as_bytes());
assert!(Path::new(&fullpath).exists());
// Cache hit
assert_eq!(cache.read(path, |_| {}), true);
// Read from cache
let mut s = String::new();
cache.read(path, |f| {
let _ = f.read_to_string(&mut s);
});
assert_eq!(&s, "0123456789");
}
|
test_dircache
|
identifier_name
|
filecache_test.rs
|
//
// Copyright (c) Pirmin Kalberer. All rights reserved.
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
//
use crate::cache::cache::Cache;
use crate::cache::filecache::Filecache;
use std::fs;
use std::path::Path;
#[test]
fn test_dircache()
|
let _ = cache.write(path, obj.as_bytes());
assert!(Path::new(&fullpath).exists());
// Cache hit
assert_eq!(cache.read(path, |_| {}), true);
// Read from cache
let mut s = String::new();
cache.read(path, |f| {
let _ = f.read_to_string(&mut s);
});
assert_eq!(&s, "0123456789");
}
|
{
use std::env;
let mut dir = env::temp_dir();
dir.push("t_rex_test");
let basepath = format!("{}", &dir.display());
let _ = fs::remove_dir_all(&basepath);
let cache = Filecache {
basepath: basepath,
baseurl: Some("http://localhost:6767".to_string()),
};
let path = "tileset/0/1/2.pbf";
let fullpath = format!("{}/{}", cache.basepath, path);
let obj = "0123456789";
// Cache miss
assert_eq!(cache.read(path, |_| {}), false);
// Write into cache
|
identifier_body
|
itemrt.rs
|
//! Item rare tables for GC/BB. The ItemRT.gsl is organized much in the same
//! way as the ItemPT archive.
use std::io::{Read, Write, Cursor};
use std::io;
use psoserial::Serial;
use psoserial::util::*;
/// Parsed rare table entry. There are 101 Enemy entries and 30 Box entries in
/// a full rare table file.
#[derive(Clone, Copy, Debug, Default)]
pub struct RtEntry {
pub prob: u8,
pub item_data: [u8; 3]
}
impl Serial for RtEntry {
fn serialize(&self, dst: &mut Write) -> io::Result<()> {
try!(self.prob.serialize(dst));
try!(self.item_data.serialize(dst));
Ok(())
}
fn deserialize(src: &mut Read) -> io::Result<Self> {
Ok(RtEntry {
prob: try!(Serial::deserialize(src)),
item_data: try!(Serial::deserialize(src))
})
}
}
impl RtEntry {
/// Derive the drop probability from the encoded value.
///
/// This function was derived from the Sylverant codebase, which itself
/// comes from research done by Lee, much like the rest of the data formats.
pub fn probability(&self) -> f64 {
let tmp = match (self.prob >> 3) as isize - 4 {
v if v < 0 => 0,
v => v
};
let expanded = ((2 << tmp) * ((self.prob & 7) + 7)) as f64;
expanded / (0x100000000u64 as f64)
}
/// Derive item data from the encoded value.
pub fn item_data(&self) -> u32 {
(self.item_data[0] as u32)
| ((self.item_data[1] as u32) << 8)
| ((self.item_data[2] as u32) << 16)
}
}
/// Full rare drop table for a single section ID.
#[derive(Clone, Debug)]
pub struct
|
{
pub enemy_rares: Vec<RtEntry>,
pub box_rares: Vec<RtEntry>
}
impl Serial for RtSet {
fn serialize(&self, dst: &mut Write) -> io::Result<()> {
try!(write_array(&self.enemy_rares, 101, dst));
try!(write_array(&self.box_rares, 30, dst));
Ok(())
}
fn deserialize(src: &mut Read) -> io::Result<Self> {
let enemy_rares = try!(read_array(101, src));
let box_rares = try!(read_array(30, src));
Ok(RtSet {
enemy_rares: enemy_rares,
box_rares: box_rares
})
}
}
/// Full rare drop table for a full episode.
#[derive(Clone, Debug)]
pub struct ItemRT {
sections: Vec<RtSet>
}
impl ItemRT {
pub fn load_from_buffers(files: &[&[u8]]) -> io::Result<ItemRT> {
if files.len()!= 10 {
return Err(io::Error::new(io::ErrorKind::Other, "Not enough files, need 10"));
}
let mut sections = Vec::with_capacity(10);
for f in files.iter() {
let mut cursor = Cursor::new(f);
let section = try!(RtSet::deserialize(&mut cursor));
sections.push(section);
}
Ok(ItemRT {
sections: sections
})
}
}
|
RtSet
|
identifier_name
|
itemrt.rs
|
//! Item rare tables for GC/BB. The ItemRT.gsl is organized much in the same
//! way as the ItemPT archive.
use std::io::{Read, Write, Cursor};
use std::io;
use psoserial::Serial;
use psoserial::util::*;
/// Parsed rare table entry. There are 101 Enemy entries and 30 Box entries in
/// a full rare table file.
#[derive(Clone, Copy, Debug, Default)]
pub struct RtEntry {
pub prob: u8,
pub item_data: [u8; 3]
}
impl Serial for RtEntry {
fn serialize(&self, dst: &mut Write) -> io::Result<()> {
try!(self.prob.serialize(dst));
try!(self.item_data.serialize(dst));
Ok(())
}
fn deserialize(src: &mut Read) -> io::Result<Self> {
Ok(RtEntry {
prob: try!(Serial::deserialize(src)),
item_data: try!(Serial::deserialize(src))
})
}
}
impl RtEntry {
/// Derive the drop probability from the encoded value.
///
/// This function was derived from the Sylverant codebase, which itself
/// comes from research done by Lee, much like the rest of the data formats.
pub fn probability(&self) -> f64 {
let tmp = match (self.prob >> 3) as isize - 4 {
v if v < 0 => 0,
v => v
};
let expanded = ((2 << tmp) * ((self.prob & 7) + 7)) as f64;
expanded / (0x100000000u64 as f64)
}
/// Derive item data from the encoded value.
pub fn item_data(&self) -> u32 {
(self.item_data[0] as u32)
| ((self.item_data[1] as u32) << 8)
| ((self.item_data[2] as u32) << 16)
}
}
/// Full rare drop table for a single section ID.
#[derive(Clone, Debug)]
pub struct RtSet {
pub enemy_rares: Vec<RtEntry>,
pub box_rares: Vec<RtEntry>
}
impl Serial for RtSet {
fn serialize(&self, dst: &mut Write) -> io::Result<()> {
try!(write_array(&self.enemy_rares, 101, dst));
try!(write_array(&self.box_rares, 30, dst));
Ok(())
}
fn deserialize(src: &mut Read) -> io::Result<Self>
|
}
/// Full rare drop table for a full episode.
#[derive(Clone, Debug)]
pub struct ItemRT {
sections: Vec<RtSet>
}
impl ItemRT {
pub fn load_from_buffers(files: &[&[u8]]) -> io::Result<ItemRT> {
if files.len()!= 10 {
return Err(io::Error::new(io::ErrorKind::Other, "Not enough files, need 10"));
}
let mut sections = Vec::with_capacity(10);
for f in files.iter() {
let mut cursor = Cursor::new(f);
let section = try!(RtSet::deserialize(&mut cursor));
sections.push(section);
}
Ok(ItemRT {
sections: sections
})
}
}
|
{
let enemy_rares = try!(read_array(101, src));
let box_rares = try!(read_array(30, src));
Ok(RtSet {
enemy_rares: enemy_rares,
box_rares: box_rares
})
}
|
identifier_body
|
itemrt.rs
|
//! Item rare tables for GC/BB. The ItemRT.gsl is organized much in the same
//! way as the ItemPT archive.
use std::io::{Read, Write, Cursor};
use std::io;
use psoserial::Serial;
use psoserial::util::*;
/// Parsed rare table entry. There are 101 Enemy entries and 30 Box entries in
/// a full rare table file.
#[derive(Clone, Copy, Debug, Default)]
pub struct RtEntry {
pub prob: u8,
pub item_data: [u8; 3]
}
impl Serial for RtEntry {
fn serialize(&self, dst: &mut Write) -> io::Result<()> {
try!(self.prob.serialize(dst));
|
Ok(())
}
fn deserialize(src: &mut Read) -> io::Result<Self> {
Ok(RtEntry {
prob: try!(Serial::deserialize(src)),
item_data: try!(Serial::deserialize(src))
})
}
}
impl RtEntry {
/// Derive the drop probability from the encoded value.
///
/// This function was derived from the Sylverant codebase, which itself
/// comes from research done by Lee, much like the rest of the data formats.
pub fn probability(&self) -> f64 {
let tmp = match (self.prob >> 3) as isize - 4 {
v if v < 0 => 0,
v => v
};
let expanded = ((2 << tmp) * ((self.prob & 7) + 7)) as f64;
expanded / (0x100000000u64 as f64)
}
/// Derive item data from the encoded value.
pub fn item_data(&self) -> u32 {
(self.item_data[0] as u32)
| ((self.item_data[1] as u32) << 8)
| ((self.item_data[2] as u32) << 16)
}
}
/// Full rare drop table for a single section ID.
#[derive(Clone, Debug)]
pub struct RtSet {
pub enemy_rares: Vec<RtEntry>,
pub box_rares: Vec<RtEntry>
}
impl Serial for RtSet {
fn serialize(&self, dst: &mut Write) -> io::Result<()> {
try!(write_array(&self.enemy_rares, 101, dst));
try!(write_array(&self.box_rares, 30, dst));
Ok(())
}
fn deserialize(src: &mut Read) -> io::Result<Self> {
let enemy_rares = try!(read_array(101, src));
let box_rares = try!(read_array(30, src));
Ok(RtSet {
enemy_rares: enemy_rares,
box_rares: box_rares
})
}
}
/// Full rare drop table for a full episode.
#[derive(Clone, Debug)]
pub struct ItemRT {
sections: Vec<RtSet>
}
impl ItemRT {
pub fn load_from_buffers(files: &[&[u8]]) -> io::Result<ItemRT> {
if files.len()!= 10 {
return Err(io::Error::new(io::ErrorKind::Other, "Not enough files, need 10"));
}
let mut sections = Vec::with_capacity(10);
for f in files.iter() {
let mut cursor = Cursor::new(f);
let section = try!(RtSet::deserialize(&mut cursor));
sections.push(section);
}
Ok(ItemRT {
sections: sections
})
}
}
|
try!(self.item_data.serialize(dst));
|
random_line_split
|
TestSin.rs
|
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma version(1)
#pragma rs java_package_name(android.renderscript.cts)
// Don't edit this file! It is auto-generated by frameworks/rs/api/gen_runtime.
float __attribute__((kernel)) testSinFloatFloat(float in) {
return sin(in);
|
}
float2 __attribute__((kernel)) testSinFloat2Float2(float2 in) {
return sin(in);
}
float3 __attribute__((kernel)) testSinFloat3Float3(float3 in) {
return sin(in);
}
float4 __attribute__((kernel)) testSinFloat4Float4(float4 in) {
return sin(in);
}
|
random_line_split
|
|
context.rs
|
use alloc::arc::Arc;
use alloc::boxed::Box;
use alloc::{BTreeMap, Vec, VecDeque};
use core::cmp::Ordering;
use core::mem;
use spin::Mutex;
use context::arch;
use context::file::FileDescriptor;
use context::memory::{Grant, Memory, SharedMemory, Tls};
use device;
use scheme::{SchemeNamespace, FileHandle};
use syscall::data::{Event, SigAction};
use syscall::flag::SIG_DFL;
use sync::{WaitMap, WaitQueue};
/// Unique identifier for a context (i.e. `pid`).
use ::core::sync::atomic::AtomicUsize;
int_like!(ContextId, AtomicContextId, usize, AtomicUsize);
/// The status of a context - used for scheduling
/// See `syscall::process::waitpid` and the `sync` module for examples of usage
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Status {
Runnable,
Blocked,
Stopped(usize),
Exited(usize)
}
#[derive(Copy, Clone, Debug)]
pub struct WaitpidKey {
pub pid: Option<ContextId>,
pub pgid: Option<ContextId>,
}
impl Ord for WaitpidKey {
fn cmp(&self, other: &WaitpidKey) -> Ordering {
// If both have pid set, compare that
if let Some(s_pid) = self.pid {
if let Some(o_pid) = other.pid {
return s_pid.cmp(&o_pid);
}
}
// If both have pgid set, compare that
if let Some(s_pgid) = self.pgid {
if let Some(o_pgid) = other.pgid {
return s_pgid.cmp(&o_pgid);
}
}
// If either has pid set, it is greater
if self.pid.is_some() {
return Ordering::Greater;
}
if other.pid.is_some() {
return Ordering::Less;
}
// If either has pgid set, it is greater
if self.pgid.is_some() {
return Ordering::Greater;
}
if other.pgid.is_some() {
return Ordering::Less;
}
// If all pid and pgid are None, they are equal
Ordering::Equal
}
}
impl PartialOrd for WaitpidKey {
fn partial_cmp(&self, other: &WaitpidKey) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for WaitpidKey {
fn eq(&self, other: &WaitpidKey) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl Eq for WaitpidKey {}
/// A context, which identifies either a process or a thread
#[derive(Debug)]
pub struct Context {
/// The ID of this context
pub id: ContextId,
/// The group ID of this context
pub pgid: ContextId,
/// The ID of the parent context
pub ppid: ContextId,
/// The real user id
pub ruid: u32,
/// The real group id
pub rgid: u32,
/// The real namespace id
pub rns: SchemeNamespace,
/// The effective user id
pub euid: u32,
/// The effective group id
pub egid: u32,
/// The effective namespace id
pub ens: SchemeNamespace,
/// Status of context
pub status: Status,
/// Context running or not
pub running: bool,
/// CPU ID, if locked
pub cpu_id: Option<usize>,
/// Current system call
pub syscall: Option<(usize, usize, usize, usize, usize, usize)>,
/// Context is halting parent
pub vfork: bool,
/// Context is being waited on
pub waitpid: Arc<WaitMap<WaitpidKey, (ContextId, usize)>>,
/// Context should handle pending signals
pub pending: VecDeque<u8>,
/// Context should wake up at specified time
pub wake: Option<(u64, u64)>,
/// The architecture specific context
pub arch: arch::Context,
/// Kernel FX - used to store SIMD and FPU registers on context switch
pub kfx: Option<Box<[u8]>>,
/// Kernel stack
pub kstack: Option<Box<[u8]>>,
/// Kernel signal backup
pub ksig: Option<(arch::Context, Option<Box<[u8]>>, Option<Box<[u8]>>)>,
|
pub image: Vec<SharedMemory>,
/// User heap
pub heap: Option<SharedMemory>,
/// User stack
pub stack: Option<Memory>,
/// User signal stack
pub sigstack: Option<Memory>,
/// User Thread local storage
pub tls: Option<Tls>,
/// User grants
pub grants: Arc<Mutex<Vec<Grant>>>,
/// The name of the context
pub name: Arc<Mutex<Box<[u8]>>>,
/// The current working directory
pub cwd: Arc<Mutex<Vec<u8>>>,
/// Kernel events
pub events: Arc<WaitQueue<Event>>,
/// The process environment
pub env: Arc<Mutex<BTreeMap<Box<[u8]>, Arc<Mutex<Vec<u8>>>>>>,
/// The open files in the scheme
pub files: Arc<Mutex<Vec<Option<FileDescriptor>>>>,
/// Singal actions
pub actions: Arc<Mutex<Vec<(SigAction, usize)>>>,
}
impl Context {
pub fn new(id: ContextId) -> Context {
Context {
id: id,
pgid: id,
ppid: ContextId::from(0),
ruid: 0,
rgid: 0,
rns: SchemeNamespace::from(0),
euid: 0,
egid: 0,
ens: SchemeNamespace::from(0),
status: Status::Blocked,
running: false,
cpu_id: None,
syscall: None,
vfork: false,
waitpid: Arc::new(WaitMap::new()),
pending: VecDeque::new(),
wake: None,
arch: arch::Context::new(),
kfx: None,
kstack: None,
ksig: None,
ksig_restore: false,
image: Vec::new(),
heap: None,
stack: None,
sigstack: None,
tls: None,
grants: Arc::new(Mutex::new(Vec::new())),
name: Arc::new(Mutex::new(Vec::new().into_boxed_slice())),
cwd: Arc::new(Mutex::new(Vec::new())),
events: Arc::new(WaitQueue::new()),
env: Arc::new(Mutex::new(BTreeMap::new())),
files: Arc::new(Mutex::new(Vec::new())),
actions: Arc::new(Mutex::new(vec![(
SigAction {
sa_handler: unsafe { mem::transmute(SIG_DFL) },
sa_mask: [0; 2],
sa_flags: 0,
},
0
); 128])),
}
}
/// Make a relative path absolute
/// Given a cwd of "scheme:/path"
/// This function will turn "foo" into "scheme:/path/foo"
/// "/foo" will turn into "scheme:/foo"
/// "bar:/foo" will be used directly, as it is already absolute
pub fn canonicalize(&self, path: &[u8]) -> Vec<u8> {
let mut canon = if path.iter().position(|&b| b == b':').is_none() {
let cwd = self.cwd.lock();
let mut canon = if!path.starts_with(b"/") {
let mut c = cwd.clone();
if! c.ends_with(b"/") {
c.push(b'/');
}
c
} else {
cwd[..cwd.iter().position(|&b| b == b':').map_or(1, |i| i + 1)].to_vec()
};
canon.extend_from_slice(&path);
canon
} else {
path.to_vec()
};
// NOTE: assumes the scheme does not include anything like "../" or "./"
let mut result = {
let parts = canon.split(|&c| c == b'/')
.filter(|&part| part!= b".")
.rev()
.scan(0, |nskip, part| {
if part == b"." {
Some(None)
} else if part == b".." {
*nskip += 1;
Some(None)
} else if *nskip > 0 {
*nskip -= 1;
Some(None)
} else {
Some(Some(part))
}
})
.filter_map(|x| x)
.filter(|x|!x.is_empty())
.collect::<Vec<_>>();
parts
.iter()
.rev()
.fold(Vec::new(), |mut vec, &part| {
vec.extend_from_slice(part);
vec.push(b'/');
vec
})
};
result.pop(); // remove extra '/'
// replace with the root of the scheme if it's empty
if result.is_empty() {
let pos = canon.iter()
.position(|&b| b == b':')
.map_or(canon.len(), |p| p + 1);
canon.truncate(pos);
canon
} else {
result
}
}
/// Block the context, and return true if it was runnable before being blocked
pub fn block(&mut self) -> bool {
if self.status == Status::Runnable {
self.status = Status::Blocked;
true
} else {
false
}
}
/// Unblock context, and return true if it was blocked before being marked runnable
pub fn unblock(&mut self) -> bool {
if self.status == Status::Blocked {
self.status = Status::Runnable;
if cfg!(feature = "multi_core") {
if let Some(cpu_id) = self.cpu_id {
if cpu_id!= ::cpu_id() {
// Send IPI if not on current CPU
// TODO: Make this more architecture independent
unsafe { device::local_apic::LOCAL_APIC.set_icr(3 << 18 | 1 << 14 | 0x40) };
}
}
}
true
} else {
false
}
}
/// Add a file to the lowest available slot.
/// Return the file descriptor number or None if no slot was found
pub fn add_file(&self, file: FileDescriptor) -> Option<FileHandle> {
self.add_file_min(file, 0)
}
/// Add a file to the lowest available slot greater than or equal to min.
/// Return the file descriptor number or None if no slot was found
pub fn add_file_min(&self, file: FileDescriptor, min: usize) -> Option<FileHandle> {
let mut files = self.files.lock();
for (i, file_option) in files.iter_mut().enumerate() {
if file_option.is_none() && i >= min {
*file_option = Some(file);
return Some(FileHandle::from(i));
}
}
let len = files.len();
if len < super::CONTEXT_MAX_FILES {
if len >= min {
files.push(Some(file));
Some(FileHandle::from(len))
} else {
drop(files);
self.insert_file(FileHandle::from(min), file)
}
} else {
None
}
}
/// Get a file
pub fn get_file(&self, i: FileHandle) -> Option<FileDescriptor> {
let files = self.files.lock();
if i.into() < files.len() {
files[i.into()].clone()
} else {
None
}
}
/// Insert a file with a specific handle number. This is used by dup2
/// Return the file descriptor number or None if the slot was not empty, or i was invalid
pub fn insert_file(&self, i: FileHandle, file: FileDescriptor) -> Option<FileHandle> {
let mut files = self.files.lock();
if i.into() < super::CONTEXT_MAX_FILES {
while i.into() >= files.len() {
files.push(None);
}
if files[i.into()].is_none() {
files[i.into()] = Some(file);
Some(i)
} else {
None
}
} else {
None
}
}
/// Remove a file
// TODO: adjust files vector to smaller size if possible
pub fn remove_file(&self, i: FileHandle) -> Option<FileDescriptor> {
let mut files = self.files.lock();
if i.into() < files.len() {
files[i.into()].take()
} else {
None
}
}
}
|
/// Restore ksig context on next switch
pub ksig_restore: bool,
/// Executable image
|
random_line_split
|
context.rs
|
use alloc::arc::Arc;
use alloc::boxed::Box;
use alloc::{BTreeMap, Vec, VecDeque};
use core::cmp::Ordering;
use core::mem;
use spin::Mutex;
use context::arch;
use context::file::FileDescriptor;
use context::memory::{Grant, Memory, SharedMemory, Tls};
use device;
use scheme::{SchemeNamespace, FileHandle};
use syscall::data::{Event, SigAction};
use syscall::flag::SIG_DFL;
use sync::{WaitMap, WaitQueue};
/// Unique identifier for a context (i.e. `pid`).
use ::core::sync::atomic::AtomicUsize;
int_like!(ContextId, AtomicContextId, usize, AtomicUsize);
/// The status of a context - used for scheduling
/// See `syscall::process::waitpid` and the `sync` module for examples of usage
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Status {
Runnable,
Blocked,
Stopped(usize),
Exited(usize)
}
#[derive(Copy, Clone, Debug)]
pub struct WaitpidKey {
pub pid: Option<ContextId>,
pub pgid: Option<ContextId>,
}
impl Ord for WaitpidKey {
fn cmp(&self, other: &WaitpidKey) -> Ordering {
// If both have pid set, compare that
if let Some(s_pid) = self.pid {
if let Some(o_pid) = other.pid {
return s_pid.cmp(&o_pid);
}
}
// If both have pgid set, compare that
if let Some(s_pgid) = self.pgid {
if let Some(o_pgid) = other.pgid {
return s_pgid.cmp(&o_pgid);
}
}
// If either has pid set, it is greater
if self.pid.is_some() {
return Ordering::Greater;
}
if other.pid.is_some() {
return Ordering::Less;
}
// If either has pgid set, it is greater
if self.pgid.is_some() {
return Ordering::Greater;
}
if other.pgid.is_some() {
return Ordering::Less;
}
// If all pid and pgid are None, they are equal
Ordering::Equal
}
}
impl PartialOrd for WaitpidKey {
fn partial_cmp(&self, other: &WaitpidKey) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for WaitpidKey {
fn eq(&self, other: &WaitpidKey) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl Eq for WaitpidKey {}
/// A context, which identifies either a process or a thread
#[derive(Debug)]
pub struct Context {
/// The ID of this context
pub id: ContextId,
/// The group ID of this context
pub pgid: ContextId,
/// The ID of the parent context
pub ppid: ContextId,
/// The real user id
pub ruid: u32,
/// The real group id
pub rgid: u32,
/// The real namespace id
pub rns: SchemeNamespace,
/// The effective user id
pub euid: u32,
/// The effective group id
pub egid: u32,
/// The effective namespace id
pub ens: SchemeNamespace,
/// Status of context
pub status: Status,
/// Context running or not
pub running: bool,
/// CPU ID, if locked
pub cpu_id: Option<usize>,
/// Current system call
pub syscall: Option<(usize, usize, usize, usize, usize, usize)>,
/// Context is halting parent
pub vfork: bool,
/// Context is being waited on
pub waitpid: Arc<WaitMap<WaitpidKey, (ContextId, usize)>>,
/// Context should handle pending signals
pub pending: VecDeque<u8>,
/// Context should wake up at specified time
pub wake: Option<(u64, u64)>,
/// The architecture specific context
pub arch: arch::Context,
/// Kernel FX - used to store SIMD and FPU registers on context switch
pub kfx: Option<Box<[u8]>>,
/// Kernel stack
pub kstack: Option<Box<[u8]>>,
/// Kernel signal backup
pub ksig: Option<(arch::Context, Option<Box<[u8]>>, Option<Box<[u8]>>)>,
/// Restore ksig context on next switch
pub ksig_restore: bool,
/// Executable image
pub image: Vec<SharedMemory>,
/// User heap
pub heap: Option<SharedMemory>,
/// User stack
pub stack: Option<Memory>,
/// User signal stack
pub sigstack: Option<Memory>,
/// User Thread local storage
pub tls: Option<Tls>,
/// User grants
pub grants: Arc<Mutex<Vec<Grant>>>,
/// The name of the context
pub name: Arc<Mutex<Box<[u8]>>>,
/// The current working directory
pub cwd: Arc<Mutex<Vec<u8>>>,
/// Kernel events
pub events: Arc<WaitQueue<Event>>,
/// The process environment
pub env: Arc<Mutex<BTreeMap<Box<[u8]>, Arc<Mutex<Vec<u8>>>>>>,
/// The open files in the scheme
pub files: Arc<Mutex<Vec<Option<FileDescriptor>>>>,
/// Singal actions
pub actions: Arc<Mutex<Vec<(SigAction, usize)>>>,
}
impl Context {
pub fn new(id: ContextId) -> Context {
Context {
id: id,
pgid: id,
ppid: ContextId::from(0),
ruid: 0,
rgid: 0,
rns: SchemeNamespace::from(0),
euid: 0,
egid: 0,
ens: SchemeNamespace::from(0),
status: Status::Blocked,
running: false,
cpu_id: None,
syscall: None,
vfork: false,
waitpid: Arc::new(WaitMap::new()),
pending: VecDeque::new(),
wake: None,
arch: arch::Context::new(),
kfx: None,
kstack: None,
ksig: None,
ksig_restore: false,
image: Vec::new(),
heap: None,
stack: None,
sigstack: None,
tls: None,
grants: Arc::new(Mutex::new(Vec::new())),
name: Arc::new(Mutex::new(Vec::new().into_boxed_slice())),
cwd: Arc::new(Mutex::new(Vec::new())),
events: Arc::new(WaitQueue::new()),
env: Arc::new(Mutex::new(BTreeMap::new())),
files: Arc::new(Mutex::new(Vec::new())),
actions: Arc::new(Mutex::new(vec![(
SigAction {
sa_handler: unsafe { mem::transmute(SIG_DFL) },
sa_mask: [0; 2],
sa_flags: 0,
},
0
); 128])),
}
}
/// Make a relative path absolute
/// Given a cwd of "scheme:/path"
/// This function will turn "foo" into "scheme:/path/foo"
/// "/foo" will turn into "scheme:/foo"
/// "bar:/foo" will be used directly, as it is already absolute
pub fn canonicalize(&self, path: &[u8]) -> Vec<u8> {
let mut canon = if path.iter().position(|&b| b == b':').is_none() {
let cwd = self.cwd.lock();
let mut canon = if!path.starts_with(b"/") {
let mut c = cwd.clone();
if! c.ends_with(b"/") {
c.push(b'/');
}
c
} else {
cwd[..cwd.iter().position(|&b| b == b':').map_or(1, |i| i + 1)].to_vec()
};
canon.extend_from_slice(&path);
canon
} else {
path.to_vec()
};
// NOTE: assumes the scheme does not include anything like "../" or "./"
let mut result = {
let parts = canon.split(|&c| c == b'/')
.filter(|&part| part!= b".")
.rev()
.scan(0, |nskip, part| {
if part == b"." {
Some(None)
} else if part == b".." {
*nskip += 1;
Some(None)
} else if *nskip > 0 {
*nskip -= 1;
Some(None)
} else {
Some(Some(part))
}
})
.filter_map(|x| x)
.filter(|x|!x.is_empty())
.collect::<Vec<_>>();
parts
.iter()
.rev()
.fold(Vec::new(), |mut vec, &part| {
vec.extend_from_slice(part);
vec.push(b'/');
vec
})
};
result.pop(); // remove extra '/'
// replace with the root of the scheme if it's empty
if result.is_empty() {
let pos = canon.iter()
.position(|&b| b == b':')
.map_or(canon.len(), |p| p + 1);
canon.truncate(pos);
canon
} else {
result
}
}
/// Block the context, and return true if it was runnable before being blocked
pub fn block(&mut self) -> bool {
if self.status == Status::Runnable {
self.status = Status::Blocked;
true
} else {
false
}
}
/// Unblock context, and return true if it was blocked before being marked runnable
pub fn unblock(&mut self) -> bool {
if self.status == Status::Blocked {
self.status = Status::Runnable;
if cfg!(feature = "multi_core") {
if let Some(cpu_id) = self.cpu_id {
if cpu_id!= ::cpu_id() {
// Send IPI if not on current CPU
// TODO: Make this more architecture independent
unsafe { device::local_apic::LOCAL_APIC.set_icr(3 << 18 | 1 << 14 | 0x40) };
}
}
}
true
} else {
false
}
}
/// Add a file to the lowest available slot.
/// Return the file descriptor number or None if no slot was found
pub fn add_file(&self, file: FileDescriptor) -> Option<FileHandle> {
self.add_file_min(file, 0)
}
/// Add a file to the lowest available slot greater than or equal to min.
/// Return the file descriptor number or None if no slot was found
pub fn
|
(&self, file: FileDescriptor, min: usize) -> Option<FileHandle> {
let mut files = self.files.lock();
for (i, file_option) in files.iter_mut().enumerate() {
if file_option.is_none() && i >= min {
*file_option = Some(file);
return Some(FileHandle::from(i));
}
}
let len = files.len();
if len < super::CONTEXT_MAX_FILES {
if len >= min {
files.push(Some(file));
Some(FileHandle::from(len))
} else {
drop(files);
self.insert_file(FileHandle::from(min), file)
}
} else {
None
}
}
/// Get a file
pub fn get_file(&self, i: FileHandle) -> Option<FileDescriptor> {
let files = self.files.lock();
if i.into() < files.len() {
files[i.into()].clone()
} else {
None
}
}
/// Insert a file with a specific handle number. This is used by dup2
/// Return the file descriptor number or None if the slot was not empty, or i was invalid
pub fn insert_file(&self, i: FileHandle, file: FileDescriptor) -> Option<FileHandle> {
let mut files = self.files.lock();
if i.into() < super::CONTEXT_MAX_FILES {
while i.into() >= files.len() {
files.push(None);
}
if files[i.into()].is_none() {
files[i.into()] = Some(file);
Some(i)
} else {
None
}
} else {
None
}
}
/// Remove a file
// TODO: adjust files vector to smaller size if possible
pub fn remove_file(&self, i: FileHandle) -> Option<FileDescriptor> {
let mut files = self.files.lock();
if i.into() < files.len() {
files[i.into()].take()
} else {
None
}
}
}
|
add_file_min
|
identifier_name
|
context.rs
|
use alloc::arc::Arc;
use alloc::boxed::Box;
use alloc::{BTreeMap, Vec, VecDeque};
use core::cmp::Ordering;
use core::mem;
use spin::Mutex;
use context::arch;
use context::file::FileDescriptor;
use context::memory::{Grant, Memory, SharedMemory, Tls};
use device;
use scheme::{SchemeNamespace, FileHandle};
use syscall::data::{Event, SigAction};
use syscall::flag::SIG_DFL;
use sync::{WaitMap, WaitQueue};
/// Unique identifier for a context (i.e. `pid`).
use ::core::sync::atomic::AtomicUsize;
int_like!(ContextId, AtomicContextId, usize, AtomicUsize);
/// The status of a context - used for scheduling
/// See `syscall::process::waitpid` and the `sync` module for examples of usage
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Status {
Runnable,
Blocked,
Stopped(usize),
Exited(usize)
}
#[derive(Copy, Clone, Debug)]
pub struct WaitpidKey {
pub pid: Option<ContextId>,
pub pgid: Option<ContextId>,
}
impl Ord for WaitpidKey {
fn cmp(&self, other: &WaitpidKey) -> Ordering {
// If both have pid set, compare that
if let Some(s_pid) = self.pid {
if let Some(o_pid) = other.pid {
return s_pid.cmp(&o_pid);
}
}
// If both have pgid set, compare that
if let Some(s_pgid) = self.pgid {
if let Some(o_pgid) = other.pgid {
return s_pgid.cmp(&o_pgid);
}
}
// If either has pid set, it is greater
if self.pid.is_some() {
return Ordering::Greater;
}
if other.pid.is_some() {
return Ordering::Less;
}
// If either has pgid set, it is greater
if self.pgid.is_some() {
return Ordering::Greater;
}
if other.pgid.is_some() {
return Ordering::Less;
}
// If all pid and pgid are None, they are equal
Ordering::Equal
}
}
impl PartialOrd for WaitpidKey {
fn partial_cmp(&self, other: &WaitpidKey) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for WaitpidKey {
fn eq(&self, other: &WaitpidKey) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl Eq for WaitpidKey {}
/// A context, which identifies either a process or a thread
#[derive(Debug)]
pub struct Context {
/// The ID of this context
pub id: ContextId,
/// The group ID of this context
pub pgid: ContextId,
/// The ID of the parent context
pub ppid: ContextId,
/// The real user id
pub ruid: u32,
/// The real group id
pub rgid: u32,
/// The real namespace id
pub rns: SchemeNamespace,
/// The effective user id
pub euid: u32,
/// The effective group id
pub egid: u32,
/// The effective namespace id
pub ens: SchemeNamespace,
/// Status of context
pub status: Status,
/// Context running or not
pub running: bool,
/// CPU ID, if locked
pub cpu_id: Option<usize>,
/// Current system call
pub syscall: Option<(usize, usize, usize, usize, usize, usize)>,
/// Context is halting parent
pub vfork: bool,
/// Context is being waited on
pub waitpid: Arc<WaitMap<WaitpidKey, (ContextId, usize)>>,
/// Context should handle pending signals
pub pending: VecDeque<u8>,
/// Context should wake up at specified time
pub wake: Option<(u64, u64)>,
/// The architecture specific context
pub arch: arch::Context,
/// Kernel FX - used to store SIMD and FPU registers on context switch
pub kfx: Option<Box<[u8]>>,
/// Kernel stack
pub kstack: Option<Box<[u8]>>,
/// Kernel signal backup
pub ksig: Option<(arch::Context, Option<Box<[u8]>>, Option<Box<[u8]>>)>,
/// Restore ksig context on next switch
pub ksig_restore: bool,
/// Executable image
pub image: Vec<SharedMemory>,
/// User heap
pub heap: Option<SharedMemory>,
/// User stack
pub stack: Option<Memory>,
/// User signal stack
pub sigstack: Option<Memory>,
/// User Thread local storage
pub tls: Option<Tls>,
/// User grants
pub grants: Arc<Mutex<Vec<Grant>>>,
/// The name of the context
pub name: Arc<Mutex<Box<[u8]>>>,
/// The current working directory
pub cwd: Arc<Mutex<Vec<u8>>>,
/// Kernel events
pub events: Arc<WaitQueue<Event>>,
/// The process environment
pub env: Arc<Mutex<BTreeMap<Box<[u8]>, Arc<Mutex<Vec<u8>>>>>>,
/// The open files in the scheme
pub files: Arc<Mutex<Vec<Option<FileDescriptor>>>>,
/// Singal actions
pub actions: Arc<Mutex<Vec<(SigAction, usize)>>>,
}
impl Context {
pub fn new(id: ContextId) -> Context {
Context {
id: id,
pgid: id,
ppid: ContextId::from(0),
ruid: 0,
rgid: 0,
rns: SchemeNamespace::from(0),
euid: 0,
egid: 0,
ens: SchemeNamespace::from(0),
status: Status::Blocked,
running: false,
cpu_id: None,
syscall: None,
vfork: false,
waitpid: Arc::new(WaitMap::new()),
pending: VecDeque::new(),
wake: None,
arch: arch::Context::new(),
kfx: None,
kstack: None,
ksig: None,
ksig_restore: false,
image: Vec::new(),
heap: None,
stack: None,
sigstack: None,
tls: None,
grants: Arc::new(Mutex::new(Vec::new())),
name: Arc::new(Mutex::new(Vec::new().into_boxed_slice())),
cwd: Arc::new(Mutex::new(Vec::new())),
events: Arc::new(WaitQueue::new()),
env: Arc::new(Mutex::new(BTreeMap::new())),
files: Arc::new(Mutex::new(Vec::new())),
actions: Arc::new(Mutex::new(vec![(
SigAction {
sa_handler: unsafe { mem::transmute(SIG_DFL) },
sa_mask: [0; 2],
sa_flags: 0,
},
0
); 128])),
}
}
/// Make a relative path absolute
/// Given a cwd of "scheme:/path"
/// This function will turn "foo" into "scheme:/path/foo"
/// "/foo" will turn into "scheme:/foo"
/// "bar:/foo" will be used directly, as it is already absolute
pub fn canonicalize(&self, path: &[u8]) -> Vec<u8> {
let mut canon = if path.iter().position(|&b| b == b':').is_none() {
let cwd = self.cwd.lock();
let mut canon = if!path.starts_with(b"/") {
let mut c = cwd.clone();
if! c.ends_with(b"/") {
c.push(b'/');
}
c
} else {
cwd[..cwd.iter().position(|&b| b == b':').map_or(1, |i| i + 1)].to_vec()
};
canon.extend_from_slice(&path);
canon
} else {
path.to_vec()
};
// NOTE: assumes the scheme does not include anything like "../" or "./"
let mut result = {
let parts = canon.split(|&c| c == b'/')
.filter(|&part| part!= b".")
.rev()
.scan(0, |nskip, part| {
if part == b"." {
Some(None)
} else if part == b".." {
*nskip += 1;
Some(None)
} else if *nskip > 0 {
*nskip -= 1;
Some(None)
} else {
Some(Some(part))
}
})
.filter_map(|x| x)
.filter(|x|!x.is_empty())
.collect::<Vec<_>>();
parts
.iter()
.rev()
.fold(Vec::new(), |mut vec, &part| {
vec.extend_from_slice(part);
vec.push(b'/');
vec
})
};
result.pop(); // remove extra '/'
// replace with the root of the scheme if it's empty
if result.is_empty() {
let pos = canon.iter()
.position(|&b| b == b':')
.map_or(canon.len(), |p| p + 1);
canon.truncate(pos);
canon
} else {
result
}
}
/// Block the context, and return true if it was runnable before being blocked
pub fn block(&mut self) -> bool {
if self.status == Status::Runnable {
self.status = Status::Blocked;
true
} else {
false
}
}
/// Unblock context, and return true if it was blocked before being marked runnable
pub fn unblock(&mut self) -> bool {
if self.status == Status::Blocked {
self.status = Status::Runnable;
if cfg!(feature = "multi_core") {
if let Some(cpu_id) = self.cpu_id {
if cpu_id!= ::cpu_id() {
// Send IPI if not on current CPU
// TODO: Make this more architecture independent
unsafe { device::local_apic::LOCAL_APIC.set_icr(3 << 18 | 1 << 14 | 0x40) };
}
}
}
true
} else {
false
}
}
/// Add a file to the lowest available slot.
/// Return the file descriptor number or None if no slot was found
pub fn add_file(&self, file: FileDescriptor) -> Option<FileHandle>
|
/// Add a file to the lowest available slot greater than or equal to min.
/// Return the file descriptor number or None if no slot was found
pub fn add_file_min(&self, file: FileDescriptor, min: usize) -> Option<FileHandle> {
let mut files = self.files.lock();
for (i, file_option) in files.iter_mut().enumerate() {
if file_option.is_none() && i >= min {
*file_option = Some(file);
return Some(FileHandle::from(i));
}
}
let len = files.len();
if len < super::CONTEXT_MAX_FILES {
if len >= min {
files.push(Some(file));
Some(FileHandle::from(len))
} else {
drop(files);
self.insert_file(FileHandle::from(min), file)
}
} else {
None
}
}
/// Get a file
pub fn get_file(&self, i: FileHandle) -> Option<FileDescriptor> {
let files = self.files.lock();
if i.into() < files.len() {
files[i.into()].clone()
} else {
None
}
}
/// Insert a file with a specific handle number. This is used by dup2
/// Return the file descriptor number or None if the slot was not empty, or i was invalid
pub fn insert_file(&self, i: FileHandle, file: FileDescriptor) -> Option<FileHandle> {
let mut files = self.files.lock();
if i.into() < super::CONTEXT_MAX_FILES {
while i.into() >= files.len() {
files.push(None);
}
if files[i.into()].is_none() {
files[i.into()] = Some(file);
Some(i)
} else {
None
}
} else {
None
}
}
/// Remove a file
// TODO: adjust files vector to smaller size if possible
pub fn remove_file(&self, i: FileHandle) -> Option<FileDescriptor> {
let mut files = self.files.lock();
if i.into() < files.len() {
files[i.into()].take()
} else {
None
}
}
}
|
{
self.add_file_min(file, 0)
}
|
identifier_body
|
context.rs
|
use alloc::arc::Arc;
use alloc::boxed::Box;
use alloc::{BTreeMap, Vec, VecDeque};
use core::cmp::Ordering;
use core::mem;
use spin::Mutex;
use context::arch;
use context::file::FileDescriptor;
use context::memory::{Grant, Memory, SharedMemory, Tls};
use device;
use scheme::{SchemeNamespace, FileHandle};
use syscall::data::{Event, SigAction};
use syscall::flag::SIG_DFL;
use sync::{WaitMap, WaitQueue};
/// Unique identifier for a context (i.e. `pid`).
use ::core::sync::atomic::AtomicUsize;
int_like!(ContextId, AtomicContextId, usize, AtomicUsize);
/// The status of a context - used for scheduling
/// See `syscall::process::waitpid` and the `sync` module for examples of usage
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Status {
Runnable,
Blocked,
Stopped(usize),
Exited(usize)
}
#[derive(Copy, Clone, Debug)]
pub struct WaitpidKey {
pub pid: Option<ContextId>,
pub pgid: Option<ContextId>,
}
impl Ord for WaitpidKey {
fn cmp(&self, other: &WaitpidKey) -> Ordering {
// If both have pid set, compare that
if let Some(s_pid) = self.pid {
if let Some(o_pid) = other.pid {
return s_pid.cmp(&o_pid);
}
}
// If both have pgid set, compare that
if let Some(s_pgid) = self.pgid {
if let Some(o_pgid) = other.pgid {
return s_pgid.cmp(&o_pgid);
}
}
// If either has pid set, it is greater
if self.pid.is_some() {
return Ordering::Greater;
}
if other.pid.is_some() {
return Ordering::Less;
}
// If either has pgid set, it is greater
if self.pgid.is_some() {
return Ordering::Greater;
}
if other.pgid.is_some() {
return Ordering::Less;
}
// If all pid and pgid are None, they are equal
Ordering::Equal
}
}
impl PartialOrd for WaitpidKey {
fn partial_cmp(&self, other: &WaitpidKey) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for WaitpidKey {
fn eq(&self, other: &WaitpidKey) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl Eq for WaitpidKey {}
/// A context, which identifies either a process or a thread
#[derive(Debug)]
pub struct Context {
/// The ID of this context
pub id: ContextId,
/// The group ID of this context
pub pgid: ContextId,
/// The ID of the parent context
pub ppid: ContextId,
/// The real user id
pub ruid: u32,
/// The real group id
pub rgid: u32,
/// The real namespace id
pub rns: SchemeNamespace,
/// The effective user id
pub euid: u32,
/// The effective group id
pub egid: u32,
/// The effective namespace id
pub ens: SchemeNamespace,
/// Status of context
pub status: Status,
/// Context running or not
pub running: bool,
/// CPU ID, if locked
pub cpu_id: Option<usize>,
/// Current system call
pub syscall: Option<(usize, usize, usize, usize, usize, usize)>,
/// Context is halting parent
pub vfork: bool,
/// Context is being waited on
pub waitpid: Arc<WaitMap<WaitpidKey, (ContextId, usize)>>,
/// Context should handle pending signals
pub pending: VecDeque<u8>,
/// Context should wake up at specified time
pub wake: Option<(u64, u64)>,
/// The architecture specific context
pub arch: arch::Context,
/// Kernel FX - used to store SIMD and FPU registers on context switch
pub kfx: Option<Box<[u8]>>,
/// Kernel stack
pub kstack: Option<Box<[u8]>>,
/// Kernel signal backup
pub ksig: Option<(arch::Context, Option<Box<[u8]>>, Option<Box<[u8]>>)>,
/// Restore ksig context on next switch
pub ksig_restore: bool,
/// Executable image
pub image: Vec<SharedMemory>,
/// User heap
pub heap: Option<SharedMemory>,
/// User stack
pub stack: Option<Memory>,
/// User signal stack
pub sigstack: Option<Memory>,
/// User Thread local storage
pub tls: Option<Tls>,
/// User grants
pub grants: Arc<Mutex<Vec<Grant>>>,
/// The name of the context
pub name: Arc<Mutex<Box<[u8]>>>,
/// The current working directory
pub cwd: Arc<Mutex<Vec<u8>>>,
/// Kernel events
pub events: Arc<WaitQueue<Event>>,
/// The process environment
pub env: Arc<Mutex<BTreeMap<Box<[u8]>, Arc<Mutex<Vec<u8>>>>>>,
/// The open files in the scheme
pub files: Arc<Mutex<Vec<Option<FileDescriptor>>>>,
/// Singal actions
pub actions: Arc<Mutex<Vec<(SigAction, usize)>>>,
}
impl Context {
pub fn new(id: ContextId) -> Context {
Context {
id: id,
pgid: id,
ppid: ContextId::from(0),
ruid: 0,
rgid: 0,
rns: SchemeNamespace::from(0),
euid: 0,
egid: 0,
ens: SchemeNamespace::from(0),
status: Status::Blocked,
running: false,
cpu_id: None,
syscall: None,
vfork: false,
waitpid: Arc::new(WaitMap::new()),
pending: VecDeque::new(),
wake: None,
arch: arch::Context::new(),
kfx: None,
kstack: None,
ksig: None,
ksig_restore: false,
image: Vec::new(),
heap: None,
stack: None,
sigstack: None,
tls: None,
grants: Arc::new(Mutex::new(Vec::new())),
name: Arc::new(Mutex::new(Vec::new().into_boxed_slice())),
cwd: Arc::new(Mutex::new(Vec::new())),
events: Arc::new(WaitQueue::new()),
env: Arc::new(Mutex::new(BTreeMap::new())),
files: Arc::new(Mutex::new(Vec::new())),
actions: Arc::new(Mutex::new(vec![(
SigAction {
sa_handler: unsafe { mem::transmute(SIG_DFL) },
sa_mask: [0; 2],
sa_flags: 0,
},
0
); 128])),
}
}
/// Make a relative path absolute
/// Given a cwd of "scheme:/path"
/// This function will turn "foo" into "scheme:/path/foo"
/// "/foo" will turn into "scheme:/foo"
/// "bar:/foo" will be used directly, as it is already absolute
pub fn canonicalize(&self, path: &[u8]) -> Vec<u8> {
let mut canon = if path.iter().position(|&b| b == b':').is_none() {
let cwd = self.cwd.lock();
let mut canon = if!path.starts_with(b"/") {
let mut c = cwd.clone();
if! c.ends_with(b"/") {
c.push(b'/');
}
c
} else {
cwd[..cwd.iter().position(|&b| b == b':').map_or(1, |i| i + 1)].to_vec()
};
canon.extend_from_slice(&path);
canon
} else {
path.to_vec()
};
// NOTE: assumes the scheme does not include anything like "../" or "./"
let mut result = {
let parts = canon.split(|&c| c == b'/')
.filter(|&part| part!= b".")
.rev()
.scan(0, |nskip, part| {
if part == b"." {
Some(None)
} else if part == b".." {
*nskip += 1;
Some(None)
} else if *nskip > 0 {
*nskip -= 1;
Some(None)
} else {
Some(Some(part))
}
})
.filter_map(|x| x)
.filter(|x|!x.is_empty())
.collect::<Vec<_>>();
parts
.iter()
.rev()
.fold(Vec::new(), |mut vec, &part| {
vec.extend_from_slice(part);
vec.push(b'/');
vec
})
};
result.pop(); // remove extra '/'
// replace with the root of the scheme if it's empty
if result.is_empty() {
let pos = canon.iter()
.position(|&b| b == b':')
.map_or(canon.len(), |p| p + 1);
canon.truncate(pos);
canon
} else {
result
}
}
/// Block the context, and return true if it was runnable before being blocked
pub fn block(&mut self) -> bool {
if self.status == Status::Runnable {
self.status = Status::Blocked;
true
} else {
false
}
}
/// Unblock context, and return true if it was blocked before being marked runnable
pub fn unblock(&mut self) -> bool {
if self.status == Status::Blocked {
self.status = Status::Runnable;
if cfg!(feature = "multi_core") {
if let Some(cpu_id) = self.cpu_id {
if cpu_id!= ::cpu_id() {
// Send IPI if not on current CPU
// TODO: Make this more architecture independent
unsafe { device::local_apic::LOCAL_APIC.set_icr(3 << 18 | 1 << 14 | 0x40) };
}
}
}
true
} else {
false
}
}
/// Add a file to the lowest available slot.
/// Return the file descriptor number or None if no slot was found
pub fn add_file(&self, file: FileDescriptor) -> Option<FileHandle> {
self.add_file_min(file, 0)
}
/// Add a file to the lowest available slot greater than or equal to min.
/// Return the file descriptor number or None if no slot was found
pub fn add_file_min(&self, file: FileDescriptor, min: usize) -> Option<FileHandle> {
let mut files = self.files.lock();
for (i, file_option) in files.iter_mut().enumerate() {
if file_option.is_none() && i >= min {
*file_option = Some(file);
return Some(FileHandle::from(i));
}
}
let len = files.len();
if len < super::CONTEXT_MAX_FILES {
if len >= min {
files.push(Some(file));
Some(FileHandle::from(len))
} else {
drop(files);
self.insert_file(FileHandle::from(min), file)
}
} else {
None
}
}
/// Get a file
pub fn get_file(&self, i: FileHandle) -> Option<FileDescriptor> {
let files = self.files.lock();
if i.into() < files.len() {
files[i.into()].clone()
} else {
None
}
}
/// Insert a file with a specific handle number. This is used by dup2
/// Return the file descriptor number or None if the slot was not empty, or i was invalid
pub fn insert_file(&self, i: FileHandle, file: FileDescriptor) -> Option<FileHandle> {
let mut files = self.files.lock();
if i.into() < super::CONTEXT_MAX_FILES {
while i.into() >= files.len() {
files.push(None);
}
if files[i.into()].is_none() {
files[i.into()] = Some(file);
Some(i)
} else
|
} else {
None
}
}
/// Remove a file
// TODO: adjust files vector to smaller size if possible
pub fn remove_file(&self, i: FileHandle) -> Option<FileDescriptor> {
let mut files = self.files.lock();
if i.into() < files.len() {
files[i.into()].take()
} else {
None
}
}
}
|
{
None
}
|
conditional_block
|
complex.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Complex numbers.
use std::num::{Zero,One,ToStrRadix};
// FIXME #1284: handle complex NaN & infinity etc. This
// probably doesn't map to C's _Complex correctly.
// FIXME #5734:: Need generic sin/cos for.to/from_polar().
// FIXME #5735: Need generic sqrt to implement.norm().
/// A complex number in Cartesian form.
#[deriving(Eq,Clone)]
pub struct Cmplx<T> {
/// Real portion of the complex number
re: T,
/// Imaginary portion of the complex number
im: T
}
pub type Complex32 = Cmplx<f32>;
pub type Complex64 = Cmplx<f64>;
impl<T: Clone + Num> Cmplx<T> {
/// Create a new Cmplx
#[inline]
pub fn new(re: T, im: T) -> Cmplx<T> {
Cmplx { re: re, im: im }
}
/**
Returns the square of the norm (since `T` doesn't necessarily
have a sqrt function), i.e. `re^2 + im^2`.
*/
#[inline]
pub fn norm_sqr(&self) -> T {
self.re * self.re + self.im * self.im
}
/// Returns the complex conjugate. i.e. `re - i im`
#[inline]
pub fn conj(&self) -> Cmplx<T> {
Cmplx::new(self.re.clone(), -self.im)
}
/// Multiplies `self` by the scalar `t`.
#[inline]
pub fn scale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re * t, self.im * t)
}
/// Divides `self` by the scalar `t`.
#[inline]
pub fn unscale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re / t, self.im / t)
}
/// Returns `1/self`
#[inline]
pub fn inv(&self) -> Cmplx<T> {
let norm_sqr = self.norm_sqr();
Cmplx::new(self.re / norm_sqr,
-self.im / norm_sqr)
}
}
impl<T: Clone + Real> Cmplx<T> {
/// Calculate |self|
#[inline]
pub fn norm(&self) -> T {
self.re.hypot(&self.im)
}
}
impl<T: Clone + Real> Cmplx<T> {
/// Calculate the principal Arg of self.
#[inline]
pub fn arg(&self) -> T {
self.im.atan2(&self.re)
}
/// Convert to polar form (r, theta), such that `self = r * exp(i
/// * theta)`
#[inline]
pub fn to_polar(&self) -> (T, T) {
(self.norm(), self.arg())
}
/// Convert a polar representation into a complex number.
#[inline]
pub fn from_polar(r: &T, theta: &T) -> Cmplx<T> {
Cmplx::new(r * theta.cos(), r * theta.sin())
}
}
/* arithmetic */
// (a + i b) + (c + i d) == (a + c) + i (b + d)
impl<T: Clone + Num> Add<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn add(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re + other.re, self.im + other.im)
}
}
// (a + i b) - (c + i d) == (a - c) + i (b - d)
impl<T: Clone + Num> Sub<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn sub(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re - other.re, self.im - other.im)
}
}
// (a + i b) * (c + i d) == (a*c - b*d) + i (a*d + b*c)
impl<T: Clone + Num> Mul<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn mul(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re*other.re - self.im*other.im,
self.re*other.im + self.im*other.re)
}
}
// (a + i b) / (c + i d) == [(a + i b) * (c - i d)] / (c*c + d*d)
// == [(a*c + b*d) / (c*c + d*d)] + i [(b*c - a*d) / (c*c + d*d)]
impl<T: Clone + Num> Div<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn div(&self, other: &Cmplx<T>) -> Cmplx<T> {
let norm_sqr = other.norm_sqr();
Cmplx::new((self.re*other.re + self.im*other.im) / norm_sqr,
(self.im*other.re - self.re*other.im) / norm_sqr)
}
}
impl<T: Clone + Num> Neg<Cmplx<T>> for Cmplx<T> {
#[inline]
fn neg(&self) -> Cmplx<T> {
Cmplx::new(-self.re, -self.im)
}
}
/* constants */
impl<T: Clone + Num> Zero for Cmplx<T> {
#[inline]
fn zero() -> Cmplx<T> {
Cmplx::new(Zero::zero(), Zero::zero())
}
#[inline]
fn is_zero(&self) -> bool {
self.re.is_zero() && self.im.is_zero()
}
}
impl<T: Clone + Num> One for Cmplx<T> {
#[inline]
fn one() -> Cmplx<T> {
Cmplx::new(One::one(), Zero::zero())
}
}
/* string conversions */
impl<T: ToStr + Num + Ord> ToStr for Cmplx<T> {
fn to_str(&self) -> ~str {
if self.im < Zero::zero() {
format!("{}-{}i", self.re.to_str(), (-self.im).to_str())
} else {
format!("{}+{}i", self.re.to_str(), self.im.to_str())
}
}
}
impl<T: ToStrRadix + Num + Ord> ToStrRadix for Cmplx<T> {
fn to_str_radix(&self, radix: uint) -> ~str {
if self.im < Zero::zero() {
format!("{}-{}i", self.re.to_str_radix(radix), (-self.im).to_str_radix(radix))
} else {
format!("{}+{}i", self.re.to_str_radix(radix), self.im.to_str_radix(radix))
}
}
}
#[cfg(test)]
mod test {
#[allow(non_uppercase_statics)];
use super::*;
use std::num::{Zero,One,Real};
pub static _0_0i : Complex64 = Cmplx { re: 0.0, im: 0.0 };
pub static _1_0i : Complex64 = Cmplx { re: 1.0, im: 0.0 };
pub static _1_1i : Complex64 = Cmplx { re: 1.0, im: 1.0 };
pub static _0_1i : Complex64 = Cmplx { re: 0.0, im: 1.0 };
pub static _neg1_1i : Complex64 = Cmplx { re: -1.0, im: 1.0 };
pub static _05_05i : Complex64 = Cmplx { re: 0.5, im: 0.5 };
pub static all_consts : [Complex64,.. 5] = [_0_0i, _1_0i, _1_1i, _neg1_1i, _05_05i];
#[test]
fn test_consts() {
// check our constants are what Cmplx::new creates
fn test(c : Complex64, r : f64, i: f64) {
assert_eq!(c, Cmplx::new(r,i));
}
test(_0_0i, 0.0, 0.0);
test(_1_0i, 1.0, 0.0);
test(_1_1i, 1.0, 1.0);
test(_neg1_1i, -1.0, 1.0);
test(_05_05i, 0.5, 0.5);
assert_eq!(_0_0i, Zero::zero());
assert_eq!(_1_0i, One::one());
}
#[test]
#[ignore(cfg(target_arch = "x86"))]
// FIXME #7158: (maybe?) currently failing on x86.
fn test_norm() {
fn test(c: Complex64, ns: f64) {
assert_eq!(c.norm_sqr(), ns);
assert_eq!(c.norm(), ns.sqrt())
}
test(_0_0i, 0.0);
test(_1_0i, 1.0);
test(_1_1i, 2.0);
test(_neg1_1i, 2.0);
test(_05_05i, 0.5);
}
#[test]
fn test_scale_unscale() {
assert_eq!(_05_05i.scale(2.0), _1_1i);
assert_eq!(_1_1i.unscale(2.0), _05_05i);
for &c in all_consts.iter() {
assert_eq!(c.scale(2.0).unscale(2.0), c);
}
}
#[test]
fn test_conj() {
for &c in all_consts.iter() {
assert_eq!(c.conj(), Cmplx::new(c.re, -c.im));
assert_eq!(c.conj().conj(), c);
}
}
#[test]
fn test_inv() {
assert_eq!(_1_1i.inv(), _05_05i.conj());
assert_eq!(_1_0i.inv(), _1_0i.inv());
}
#[test]
#[should_fail]
#[ignore]
fn test_inv_zero() {
// FIXME #5736: should this really fail, or just NaN?
_0_0i.inv();
}
#[test]
fn test_arg() {
fn test(c: Complex64, arg: f64) {
assert!((c.arg() - arg).abs() < 1.0e-6)
}
test(_1_0i, 0.0);
test(_1_1i, 0.25 * Real::pi());
test(_neg1_1i, 0.75 * Real::pi());
test(_05_05i, 0.25 * Real::pi());
}
#[test]
fn test_polar_conv() {
fn test(c: Complex64) {
let (r, theta) = c.to_polar();
assert!((c - Cmplx::from_polar(&r, &theta)).norm() < 1e-6);
}
for &c in all_consts.iter() { test(c); }
}
mod arith {
use super::*;
use std::num::Zero;
#[test]
fn test_add() {
assert_eq!(_05_05i + _05_05i, _1_1i);
assert_eq!(_0_1i + _1_0i, _1_1i);
assert_eq!(_1_0i + _neg1_1i, _0_1i);
for &c in all_consts.iter() {
assert_eq!(_0_0i + c, c);
assert_eq!(c + _0_0i, c);
}
}
#[test]
fn test_sub() {
assert_eq!(_05_05i - _05_05i, _0_0i);
assert_eq!(_0_1i - _1_0i, _neg1_1i);
assert_eq!(_0_1i - _neg1_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(c - _0_0i, c);
assert_eq!(c - c, _0_0i);
}
}
#[test]
fn test_mul() {
assert_eq!(_05_05i * _05_05i, _0_1i.unscale(2.0));
assert_eq!(_1_1i * _0_1i, _neg1_1i);
// i^2 & i^4
assert_eq!(_0_1i * _0_1i, -_1_0i);
assert_eq!(_0_1i * _0_1i * _0_1i * _0_1i, _1_0i);
for &c in all_consts.iter() {
|
fn test_div() {
assert_eq!(_neg1_1i / _0_1i, _1_1i);
for &c in all_consts.iter() {
if c!= Zero::zero() {
assert_eq!(c / c, _1_0i);
}
}
}
#[test]
fn test_neg() {
assert_eq!(-_1_0i + _0_1i, _neg1_1i);
assert_eq!((-_0_1i) * _0_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(-(-c), c);
}
}
}
#[test]
fn test_to_str() {
fn test(c : Complex64, s: ~str) {
assert_eq!(c.to_str(), s);
}
test(_0_0i, ~"0+0i");
test(_1_0i, ~"1+0i");
test(_0_1i, ~"0+1i");
test(_1_1i, ~"1+1i");
test(_neg1_1i, ~"-1+1i");
test(-_neg1_1i, ~"1-1i");
test(_05_05i, ~"0.5+0.5i");
}
}
|
assert_eq!(c * _1_0i, c);
assert_eq!(_1_0i * c, c);
}
}
#[test]
|
random_line_split
|
complex.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Complex numbers.
use std::num::{Zero,One,ToStrRadix};
// FIXME #1284: handle complex NaN & infinity etc. This
// probably doesn't map to C's _Complex correctly.
// FIXME #5734:: Need generic sin/cos for.to/from_polar().
// FIXME #5735: Need generic sqrt to implement.norm().
/// A complex number in Cartesian form.
#[deriving(Eq,Clone)]
pub struct Cmplx<T> {
/// Real portion of the complex number
re: T,
/// Imaginary portion of the complex number
im: T
}
pub type Complex32 = Cmplx<f32>;
pub type Complex64 = Cmplx<f64>;
impl<T: Clone + Num> Cmplx<T> {
/// Create a new Cmplx
#[inline]
pub fn new(re: T, im: T) -> Cmplx<T> {
Cmplx { re: re, im: im }
}
/**
Returns the square of the norm (since `T` doesn't necessarily
have a sqrt function), i.e. `re^2 + im^2`.
*/
#[inline]
pub fn norm_sqr(&self) -> T {
self.re * self.re + self.im * self.im
}
/// Returns the complex conjugate. i.e. `re - i im`
#[inline]
pub fn conj(&self) -> Cmplx<T> {
Cmplx::new(self.re.clone(), -self.im)
}
/// Multiplies `self` by the scalar `t`.
#[inline]
pub fn scale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re * t, self.im * t)
}
/// Divides `self` by the scalar `t`.
#[inline]
pub fn unscale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re / t, self.im / t)
}
/// Returns `1/self`
#[inline]
pub fn inv(&self) -> Cmplx<T> {
let norm_sqr = self.norm_sqr();
Cmplx::new(self.re / norm_sqr,
-self.im / norm_sqr)
}
}
impl<T: Clone + Real> Cmplx<T> {
/// Calculate |self|
#[inline]
pub fn norm(&self) -> T {
self.re.hypot(&self.im)
}
}
impl<T: Clone + Real> Cmplx<T> {
/// Calculate the principal Arg of self.
#[inline]
pub fn arg(&self) -> T {
self.im.atan2(&self.re)
}
/// Convert to polar form (r, theta), such that `self = r * exp(i
/// * theta)`
#[inline]
pub fn to_polar(&self) -> (T, T) {
(self.norm(), self.arg())
}
/// Convert a polar representation into a complex number.
#[inline]
pub fn from_polar(r: &T, theta: &T) -> Cmplx<T> {
Cmplx::new(r * theta.cos(), r * theta.sin())
}
}
/* arithmetic */
// (a + i b) + (c + i d) == (a + c) + i (b + d)
impl<T: Clone + Num> Add<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn add(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re + other.re, self.im + other.im)
}
}
// (a + i b) - (c + i d) == (a - c) + i (b - d)
impl<T: Clone + Num> Sub<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn sub(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re - other.re, self.im - other.im)
}
}
// (a + i b) * (c + i d) == (a*c - b*d) + i (a*d + b*c)
impl<T: Clone + Num> Mul<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn mul(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re*other.re - self.im*other.im,
self.re*other.im + self.im*other.re)
}
}
// (a + i b) / (c + i d) == [(a + i b) * (c - i d)] / (c*c + d*d)
// == [(a*c + b*d) / (c*c + d*d)] + i [(b*c - a*d) / (c*c + d*d)]
impl<T: Clone + Num> Div<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn div(&self, other: &Cmplx<T>) -> Cmplx<T> {
let norm_sqr = other.norm_sqr();
Cmplx::new((self.re*other.re + self.im*other.im) / norm_sqr,
(self.im*other.re - self.re*other.im) / norm_sqr)
}
}
impl<T: Clone + Num> Neg<Cmplx<T>> for Cmplx<T> {
#[inline]
fn neg(&self) -> Cmplx<T> {
Cmplx::new(-self.re, -self.im)
}
}
/* constants */
impl<T: Clone + Num> Zero for Cmplx<T> {
#[inline]
fn zero() -> Cmplx<T> {
Cmplx::new(Zero::zero(), Zero::zero())
}
#[inline]
fn is_zero(&self) -> bool {
self.re.is_zero() && self.im.is_zero()
}
}
impl<T: Clone + Num> One for Cmplx<T> {
#[inline]
fn one() -> Cmplx<T> {
Cmplx::new(One::one(), Zero::zero())
}
}
/* string conversions */
impl<T: ToStr + Num + Ord> ToStr for Cmplx<T> {
fn to_str(&self) -> ~str {
if self.im < Zero::zero() {
format!("{}-{}i", self.re.to_str(), (-self.im).to_str())
} else {
format!("{}+{}i", self.re.to_str(), self.im.to_str())
}
}
}
impl<T: ToStrRadix + Num + Ord> ToStrRadix for Cmplx<T> {
fn to_str_radix(&self, radix: uint) -> ~str {
if self.im < Zero::zero() {
format!("{}-{}i", self.re.to_str_radix(radix), (-self.im).to_str_radix(radix))
} else {
format!("{}+{}i", self.re.to_str_radix(radix), self.im.to_str_radix(radix))
}
}
}
#[cfg(test)]
mod test {
#[allow(non_uppercase_statics)];
use super::*;
use std::num::{Zero,One,Real};
pub static _0_0i : Complex64 = Cmplx { re: 0.0, im: 0.0 };
pub static _1_0i : Complex64 = Cmplx { re: 1.0, im: 0.0 };
pub static _1_1i : Complex64 = Cmplx { re: 1.0, im: 1.0 };
pub static _0_1i : Complex64 = Cmplx { re: 0.0, im: 1.0 };
pub static _neg1_1i : Complex64 = Cmplx { re: -1.0, im: 1.0 };
pub static _05_05i : Complex64 = Cmplx { re: 0.5, im: 0.5 };
pub static all_consts : [Complex64,.. 5] = [_0_0i, _1_0i, _1_1i, _neg1_1i, _05_05i];
#[test]
fn test_consts() {
// check our constants are what Cmplx::new creates
fn test(c : Complex64, r : f64, i: f64) {
assert_eq!(c, Cmplx::new(r,i));
}
test(_0_0i, 0.0, 0.0);
test(_1_0i, 1.0, 0.0);
test(_1_1i, 1.0, 1.0);
test(_neg1_1i, -1.0, 1.0);
test(_05_05i, 0.5, 0.5);
assert_eq!(_0_0i, Zero::zero());
assert_eq!(_1_0i, One::one());
}
#[test]
#[ignore(cfg(target_arch = "x86"))]
// FIXME #7158: (maybe?) currently failing on x86.
fn test_norm() {
fn test(c: Complex64, ns: f64) {
assert_eq!(c.norm_sqr(), ns);
assert_eq!(c.norm(), ns.sqrt())
}
test(_0_0i, 0.0);
test(_1_0i, 1.0);
test(_1_1i, 2.0);
test(_neg1_1i, 2.0);
test(_05_05i, 0.5);
}
#[test]
fn test_scale_unscale() {
assert_eq!(_05_05i.scale(2.0), _1_1i);
assert_eq!(_1_1i.unscale(2.0), _05_05i);
for &c in all_consts.iter() {
assert_eq!(c.scale(2.0).unscale(2.0), c);
}
}
#[test]
fn
|
() {
for &c in all_consts.iter() {
assert_eq!(c.conj(), Cmplx::new(c.re, -c.im));
assert_eq!(c.conj().conj(), c);
}
}
#[test]
fn test_inv() {
assert_eq!(_1_1i.inv(), _05_05i.conj());
assert_eq!(_1_0i.inv(), _1_0i.inv());
}
#[test]
#[should_fail]
#[ignore]
fn test_inv_zero() {
// FIXME #5736: should this really fail, or just NaN?
_0_0i.inv();
}
#[test]
fn test_arg() {
fn test(c: Complex64, arg: f64) {
assert!((c.arg() - arg).abs() < 1.0e-6)
}
test(_1_0i, 0.0);
test(_1_1i, 0.25 * Real::pi());
test(_neg1_1i, 0.75 * Real::pi());
test(_05_05i, 0.25 * Real::pi());
}
#[test]
fn test_polar_conv() {
fn test(c: Complex64) {
let (r, theta) = c.to_polar();
assert!((c - Cmplx::from_polar(&r, &theta)).norm() < 1e-6);
}
for &c in all_consts.iter() { test(c); }
}
mod arith {
use super::*;
use std::num::Zero;
#[test]
fn test_add() {
assert_eq!(_05_05i + _05_05i, _1_1i);
assert_eq!(_0_1i + _1_0i, _1_1i);
assert_eq!(_1_0i + _neg1_1i, _0_1i);
for &c in all_consts.iter() {
assert_eq!(_0_0i + c, c);
assert_eq!(c + _0_0i, c);
}
}
#[test]
fn test_sub() {
assert_eq!(_05_05i - _05_05i, _0_0i);
assert_eq!(_0_1i - _1_0i, _neg1_1i);
assert_eq!(_0_1i - _neg1_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(c - _0_0i, c);
assert_eq!(c - c, _0_0i);
}
}
#[test]
fn test_mul() {
assert_eq!(_05_05i * _05_05i, _0_1i.unscale(2.0));
assert_eq!(_1_1i * _0_1i, _neg1_1i);
// i^2 & i^4
assert_eq!(_0_1i * _0_1i, -_1_0i);
assert_eq!(_0_1i * _0_1i * _0_1i * _0_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(c * _1_0i, c);
assert_eq!(_1_0i * c, c);
}
}
#[test]
fn test_div() {
assert_eq!(_neg1_1i / _0_1i, _1_1i);
for &c in all_consts.iter() {
if c!= Zero::zero() {
assert_eq!(c / c, _1_0i);
}
}
}
#[test]
fn test_neg() {
assert_eq!(-_1_0i + _0_1i, _neg1_1i);
assert_eq!((-_0_1i) * _0_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(-(-c), c);
}
}
}
#[test]
fn test_to_str() {
fn test(c : Complex64, s: ~str) {
assert_eq!(c.to_str(), s);
}
test(_0_0i, ~"0+0i");
test(_1_0i, ~"1+0i");
test(_0_1i, ~"0+1i");
test(_1_1i, ~"1+1i");
test(_neg1_1i, ~"-1+1i");
test(-_neg1_1i, ~"1-1i");
test(_05_05i, ~"0.5+0.5i");
}
}
|
test_conj
|
identifier_name
|
complex.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Complex numbers.
use std::num::{Zero,One,ToStrRadix};
// FIXME #1284: handle complex NaN & infinity etc. This
// probably doesn't map to C's _Complex correctly.
// FIXME #5734:: Need generic sin/cos for.to/from_polar().
// FIXME #5735: Need generic sqrt to implement.norm().
/// A complex number in Cartesian form.
#[deriving(Eq,Clone)]
pub struct Cmplx<T> {
/// Real portion of the complex number
re: T,
/// Imaginary portion of the complex number
im: T
}
pub type Complex32 = Cmplx<f32>;
pub type Complex64 = Cmplx<f64>;
impl<T: Clone + Num> Cmplx<T> {
/// Create a new Cmplx
#[inline]
pub fn new(re: T, im: T) -> Cmplx<T> {
Cmplx { re: re, im: im }
}
/**
Returns the square of the norm (since `T` doesn't necessarily
have a sqrt function), i.e. `re^2 + im^2`.
*/
#[inline]
pub fn norm_sqr(&self) -> T {
self.re * self.re + self.im * self.im
}
/// Returns the complex conjugate. i.e. `re - i im`
#[inline]
pub fn conj(&self) -> Cmplx<T> {
Cmplx::new(self.re.clone(), -self.im)
}
/// Multiplies `self` by the scalar `t`.
#[inline]
pub fn scale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re * t, self.im * t)
}
/// Divides `self` by the scalar `t`.
#[inline]
pub fn unscale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re / t, self.im / t)
}
/// Returns `1/self`
#[inline]
pub fn inv(&self) -> Cmplx<T> {
let norm_sqr = self.norm_sqr();
Cmplx::new(self.re / norm_sqr,
-self.im / norm_sqr)
}
}
impl<T: Clone + Real> Cmplx<T> {
/// Calculate |self|
#[inline]
pub fn norm(&self) -> T
|
}
impl<T: Clone + Real> Cmplx<T> {
/// Calculate the principal Arg of self.
#[inline]
pub fn arg(&self) -> T {
self.im.atan2(&self.re)
}
/// Convert to polar form (r, theta), such that `self = r * exp(i
/// * theta)`
#[inline]
pub fn to_polar(&self) -> (T, T) {
(self.norm(), self.arg())
}
/// Convert a polar representation into a complex number.
#[inline]
pub fn from_polar(r: &T, theta: &T) -> Cmplx<T> {
Cmplx::new(r * theta.cos(), r * theta.sin())
}
}
/* arithmetic */
// (a + i b) + (c + i d) == (a + c) + i (b + d)
impl<T: Clone + Num> Add<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn add(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re + other.re, self.im + other.im)
}
}
// (a + i b) - (c + i d) == (a - c) + i (b - d)
impl<T: Clone + Num> Sub<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn sub(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re - other.re, self.im - other.im)
}
}
// (a + i b) * (c + i d) == (a*c - b*d) + i (a*d + b*c)
impl<T: Clone + Num> Mul<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn mul(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re*other.re - self.im*other.im,
self.re*other.im + self.im*other.re)
}
}
// (a + i b) / (c + i d) == [(a + i b) * (c - i d)] / (c*c + d*d)
// == [(a*c + b*d) / (c*c + d*d)] + i [(b*c - a*d) / (c*c + d*d)]
impl<T: Clone + Num> Div<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn div(&self, other: &Cmplx<T>) -> Cmplx<T> {
let norm_sqr = other.norm_sqr();
Cmplx::new((self.re*other.re + self.im*other.im) / norm_sqr,
(self.im*other.re - self.re*other.im) / norm_sqr)
}
}
impl<T: Clone + Num> Neg<Cmplx<T>> for Cmplx<T> {
#[inline]
fn neg(&self) -> Cmplx<T> {
Cmplx::new(-self.re, -self.im)
}
}
/* constants */
impl<T: Clone + Num> Zero for Cmplx<T> {
#[inline]
fn zero() -> Cmplx<T> {
Cmplx::new(Zero::zero(), Zero::zero())
}
#[inline]
fn is_zero(&self) -> bool {
self.re.is_zero() && self.im.is_zero()
}
}
impl<T: Clone + Num> One for Cmplx<T> {
#[inline]
fn one() -> Cmplx<T> {
Cmplx::new(One::one(), Zero::zero())
}
}
/* string conversions */
impl<T: ToStr + Num + Ord> ToStr for Cmplx<T> {
fn to_str(&self) -> ~str {
if self.im < Zero::zero() {
format!("{}-{}i", self.re.to_str(), (-self.im).to_str())
} else {
format!("{}+{}i", self.re.to_str(), self.im.to_str())
}
}
}
impl<T: ToStrRadix + Num + Ord> ToStrRadix for Cmplx<T> {
fn to_str_radix(&self, radix: uint) -> ~str {
if self.im < Zero::zero() {
format!("{}-{}i", self.re.to_str_radix(radix), (-self.im).to_str_radix(radix))
} else {
format!("{}+{}i", self.re.to_str_radix(radix), self.im.to_str_radix(radix))
}
}
}
#[cfg(test)]
mod test {
#[allow(non_uppercase_statics)];
use super::*;
use std::num::{Zero,One,Real};
pub static _0_0i : Complex64 = Cmplx { re: 0.0, im: 0.0 };
pub static _1_0i : Complex64 = Cmplx { re: 1.0, im: 0.0 };
pub static _1_1i : Complex64 = Cmplx { re: 1.0, im: 1.0 };
pub static _0_1i : Complex64 = Cmplx { re: 0.0, im: 1.0 };
pub static _neg1_1i : Complex64 = Cmplx { re: -1.0, im: 1.0 };
pub static _05_05i : Complex64 = Cmplx { re: 0.5, im: 0.5 };
pub static all_consts : [Complex64,.. 5] = [_0_0i, _1_0i, _1_1i, _neg1_1i, _05_05i];
#[test]
fn test_consts() {
// check our constants are what Cmplx::new creates
fn test(c : Complex64, r : f64, i: f64) {
assert_eq!(c, Cmplx::new(r,i));
}
test(_0_0i, 0.0, 0.0);
test(_1_0i, 1.0, 0.0);
test(_1_1i, 1.0, 1.0);
test(_neg1_1i, -1.0, 1.0);
test(_05_05i, 0.5, 0.5);
assert_eq!(_0_0i, Zero::zero());
assert_eq!(_1_0i, One::one());
}
#[test]
#[ignore(cfg(target_arch = "x86"))]
// FIXME #7158: (maybe?) currently failing on x86.
fn test_norm() {
fn test(c: Complex64, ns: f64) {
assert_eq!(c.norm_sqr(), ns);
assert_eq!(c.norm(), ns.sqrt())
}
test(_0_0i, 0.0);
test(_1_0i, 1.0);
test(_1_1i, 2.0);
test(_neg1_1i, 2.0);
test(_05_05i, 0.5);
}
#[test]
fn test_scale_unscale() {
assert_eq!(_05_05i.scale(2.0), _1_1i);
assert_eq!(_1_1i.unscale(2.0), _05_05i);
for &c in all_consts.iter() {
assert_eq!(c.scale(2.0).unscale(2.0), c);
}
}
#[test]
fn test_conj() {
for &c in all_consts.iter() {
assert_eq!(c.conj(), Cmplx::new(c.re, -c.im));
assert_eq!(c.conj().conj(), c);
}
}
#[test]
fn test_inv() {
assert_eq!(_1_1i.inv(), _05_05i.conj());
assert_eq!(_1_0i.inv(), _1_0i.inv());
}
#[test]
#[should_fail]
#[ignore]
fn test_inv_zero() {
// FIXME #5736: should this really fail, or just NaN?
_0_0i.inv();
}
#[test]
fn test_arg() {
fn test(c: Complex64, arg: f64) {
assert!((c.arg() - arg).abs() < 1.0e-6)
}
test(_1_0i, 0.0);
test(_1_1i, 0.25 * Real::pi());
test(_neg1_1i, 0.75 * Real::pi());
test(_05_05i, 0.25 * Real::pi());
}
#[test]
fn test_polar_conv() {
fn test(c: Complex64) {
let (r, theta) = c.to_polar();
assert!((c - Cmplx::from_polar(&r, &theta)).norm() < 1e-6);
}
for &c in all_consts.iter() { test(c); }
}
mod arith {
use super::*;
use std::num::Zero;
#[test]
fn test_add() {
assert_eq!(_05_05i + _05_05i, _1_1i);
assert_eq!(_0_1i + _1_0i, _1_1i);
assert_eq!(_1_0i + _neg1_1i, _0_1i);
for &c in all_consts.iter() {
assert_eq!(_0_0i + c, c);
assert_eq!(c + _0_0i, c);
}
}
#[test]
fn test_sub() {
assert_eq!(_05_05i - _05_05i, _0_0i);
assert_eq!(_0_1i - _1_0i, _neg1_1i);
assert_eq!(_0_1i - _neg1_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(c - _0_0i, c);
assert_eq!(c - c, _0_0i);
}
}
#[test]
fn test_mul() {
assert_eq!(_05_05i * _05_05i, _0_1i.unscale(2.0));
assert_eq!(_1_1i * _0_1i, _neg1_1i);
// i^2 & i^4
assert_eq!(_0_1i * _0_1i, -_1_0i);
assert_eq!(_0_1i * _0_1i * _0_1i * _0_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(c * _1_0i, c);
assert_eq!(_1_0i * c, c);
}
}
#[test]
fn test_div() {
assert_eq!(_neg1_1i / _0_1i, _1_1i);
for &c in all_consts.iter() {
if c!= Zero::zero() {
assert_eq!(c / c, _1_0i);
}
}
}
#[test]
fn test_neg() {
assert_eq!(-_1_0i + _0_1i, _neg1_1i);
assert_eq!((-_0_1i) * _0_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(-(-c), c);
}
}
}
#[test]
fn test_to_str() {
fn test(c : Complex64, s: ~str) {
assert_eq!(c.to_str(), s);
}
test(_0_0i, ~"0+0i");
test(_1_0i, ~"1+0i");
test(_0_1i, ~"0+1i");
test(_1_1i, ~"1+1i");
test(_neg1_1i, ~"-1+1i");
test(-_neg1_1i, ~"1-1i");
test(_05_05i, ~"0.5+0.5i");
}
}
|
{
self.re.hypot(&self.im)
}
|
identifier_body
|
complex.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Complex numbers.
use std::num::{Zero,One,ToStrRadix};
// FIXME #1284: handle complex NaN & infinity etc. This
// probably doesn't map to C's _Complex correctly.
// FIXME #5734:: Need generic sin/cos for.to/from_polar().
// FIXME #5735: Need generic sqrt to implement.norm().
/// A complex number in Cartesian form.
#[deriving(Eq,Clone)]
pub struct Cmplx<T> {
/// Real portion of the complex number
re: T,
/// Imaginary portion of the complex number
im: T
}
pub type Complex32 = Cmplx<f32>;
pub type Complex64 = Cmplx<f64>;
impl<T: Clone + Num> Cmplx<T> {
/// Create a new Cmplx
#[inline]
pub fn new(re: T, im: T) -> Cmplx<T> {
Cmplx { re: re, im: im }
}
/**
Returns the square of the norm (since `T` doesn't necessarily
have a sqrt function), i.e. `re^2 + im^2`.
*/
#[inline]
pub fn norm_sqr(&self) -> T {
self.re * self.re + self.im * self.im
}
/// Returns the complex conjugate. i.e. `re - i im`
#[inline]
pub fn conj(&self) -> Cmplx<T> {
Cmplx::new(self.re.clone(), -self.im)
}
/// Multiplies `self` by the scalar `t`.
#[inline]
pub fn scale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re * t, self.im * t)
}
/// Divides `self` by the scalar `t`.
#[inline]
pub fn unscale(&self, t: T) -> Cmplx<T> {
Cmplx::new(self.re / t, self.im / t)
}
/// Returns `1/self`
#[inline]
pub fn inv(&self) -> Cmplx<T> {
let norm_sqr = self.norm_sqr();
Cmplx::new(self.re / norm_sqr,
-self.im / norm_sqr)
}
}
impl<T: Clone + Real> Cmplx<T> {
/// Calculate |self|
#[inline]
pub fn norm(&self) -> T {
self.re.hypot(&self.im)
}
}
impl<T: Clone + Real> Cmplx<T> {
/// Calculate the principal Arg of self.
#[inline]
pub fn arg(&self) -> T {
self.im.atan2(&self.re)
}
/// Convert to polar form (r, theta), such that `self = r * exp(i
/// * theta)`
#[inline]
pub fn to_polar(&self) -> (T, T) {
(self.norm(), self.arg())
}
/// Convert a polar representation into a complex number.
#[inline]
pub fn from_polar(r: &T, theta: &T) -> Cmplx<T> {
Cmplx::new(r * theta.cos(), r * theta.sin())
}
}
/* arithmetic */
// (a + i b) + (c + i d) == (a + c) + i (b + d)
impl<T: Clone + Num> Add<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn add(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re + other.re, self.im + other.im)
}
}
// (a + i b) - (c + i d) == (a - c) + i (b - d)
impl<T: Clone + Num> Sub<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn sub(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re - other.re, self.im - other.im)
}
}
// (a + i b) * (c + i d) == (a*c - b*d) + i (a*d + b*c)
impl<T: Clone + Num> Mul<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn mul(&self, other: &Cmplx<T>) -> Cmplx<T> {
Cmplx::new(self.re*other.re - self.im*other.im,
self.re*other.im + self.im*other.re)
}
}
// (a + i b) / (c + i d) == [(a + i b) * (c - i d)] / (c*c + d*d)
// == [(a*c + b*d) / (c*c + d*d)] + i [(b*c - a*d) / (c*c + d*d)]
impl<T: Clone + Num> Div<Cmplx<T>, Cmplx<T>> for Cmplx<T> {
#[inline]
fn div(&self, other: &Cmplx<T>) -> Cmplx<T> {
let norm_sqr = other.norm_sqr();
Cmplx::new((self.re*other.re + self.im*other.im) / norm_sqr,
(self.im*other.re - self.re*other.im) / norm_sqr)
}
}
impl<T: Clone + Num> Neg<Cmplx<T>> for Cmplx<T> {
#[inline]
fn neg(&self) -> Cmplx<T> {
Cmplx::new(-self.re, -self.im)
}
}
/* constants */
impl<T: Clone + Num> Zero for Cmplx<T> {
#[inline]
fn zero() -> Cmplx<T> {
Cmplx::new(Zero::zero(), Zero::zero())
}
#[inline]
fn is_zero(&self) -> bool {
self.re.is_zero() && self.im.is_zero()
}
}
impl<T: Clone + Num> One for Cmplx<T> {
#[inline]
fn one() -> Cmplx<T> {
Cmplx::new(One::one(), Zero::zero())
}
}
/* string conversions */
impl<T: ToStr + Num + Ord> ToStr for Cmplx<T> {
fn to_str(&self) -> ~str {
if self.im < Zero::zero() {
format!("{}-{}i", self.re.to_str(), (-self.im).to_str())
} else {
format!("{}+{}i", self.re.to_str(), self.im.to_str())
}
}
}
impl<T: ToStrRadix + Num + Ord> ToStrRadix for Cmplx<T> {
fn to_str_radix(&self, radix: uint) -> ~str {
if self.im < Zero::zero() {
format!("{}-{}i", self.re.to_str_radix(radix), (-self.im).to_str_radix(radix))
} else
|
}
}
#[cfg(test)]
mod test {
#[allow(non_uppercase_statics)];
use super::*;
use std::num::{Zero,One,Real};
pub static _0_0i : Complex64 = Cmplx { re: 0.0, im: 0.0 };
pub static _1_0i : Complex64 = Cmplx { re: 1.0, im: 0.0 };
pub static _1_1i : Complex64 = Cmplx { re: 1.0, im: 1.0 };
pub static _0_1i : Complex64 = Cmplx { re: 0.0, im: 1.0 };
pub static _neg1_1i : Complex64 = Cmplx { re: -1.0, im: 1.0 };
pub static _05_05i : Complex64 = Cmplx { re: 0.5, im: 0.5 };
pub static all_consts : [Complex64,.. 5] = [_0_0i, _1_0i, _1_1i, _neg1_1i, _05_05i];
#[test]
fn test_consts() {
// check our constants are what Cmplx::new creates
fn test(c : Complex64, r : f64, i: f64) {
assert_eq!(c, Cmplx::new(r,i));
}
test(_0_0i, 0.0, 0.0);
test(_1_0i, 1.0, 0.0);
test(_1_1i, 1.0, 1.0);
test(_neg1_1i, -1.0, 1.0);
test(_05_05i, 0.5, 0.5);
assert_eq!(_0_0i, Zero::zero());
assert_eq!(_1_0i, One::one());
}
#[test]
#[ignore(cfg(target_arch = "x86"))]
// FIXME #7158: (maybe?) currently failing on x86.
fn test_norm() {
fn test(c: Complex64, ns: f64) {
assert_eq!(c.norm_sqr(), ns);
assert_eq!(c.norm(), ns.sqrt())
}
test(_0_0i, 0.0);
test(_1_0i, 1.0);
test(_1_1i, 2.0);
test(_neg1_1i, 2.0);
test(_05_05i, 0.5);
}
#[test]
fn test_scale_unscale() {
assert_eq!(_05_05i.scale(2.0), _1_1i);
assert_eq!(_1_1i.unscale(2.0), _05_05i);
for &c in all_consts.iter() {
assert_eq!(c.scale(2.0).unscale(2.0), c);
}
}
#[test]
fn test_conj() {
for &c in all_consts.iter() {
assert_eq!(c.conj(), Cmplx::new(c.re, -c.im));
assert_eq!(c.conj().conj(), c);
}
}
#[test]
fn test_inv() {
assert_eq!(_1_1i.inv(), _05_05i.conj());
assert_eq!(_1_0i.inv(), _1_0i.inv());
}
#[test]
#[should_fail]
#[ignore]
fn test_inv_zero() {
// FIXME #5736: should this really fail, or just NaN?
_0_0i.inv();
}
#[test]
fn test_arg() {
fn test(c: Complex64, arg: f64) {
assert!((c.arg() - arg).abs() < 1.0e-6)
}
test(_1_0i, 0.0);
test(_1_1i, 0.25 * Real::pi());
test(_neg1_1i, 0.75 * Real::pi());
test(_05_05i, 0.25 * Real::pi());
}
#[test]
fn test_polar_conv() {
fn test(c: Complex64) {
let (r, theta) = c.to_polar();
assert!((c - Cmplx::from_polar(&r, &theta)).norm() < 1e-6);
}
for &c in all_consts.iter() { test(c); }
}
mod arith {
use super::*;
use std::num::Zero;
#[test]
fn test_add() {
assert_eq!(_05_05i + _05_05i, _1_1i);
assert_eq!(_0_1i + _1_0i, _1_1i);
assert_eq!(_1_0i + _neg1_1i, _0_1i);
for &c in all_consts.iter() {
assert_eq!(_0_0i + c, c);
assert_eq!(c + _0_0i, c);
}
}
#[test]
fn test_sub() {
assert_eq!(_05_05i - _05_05i, _0_0i);
assert_eq!(_0_1i - _1_0i, _neg1_1i);
assert_eq!(_0_1i - _neg1_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(c - _0_0i, c);
assert_eq!(c - c, _0_0i);
}
}
#[test]
fn test_mul() {
assert_eq!(_05_05i * _05_05i, _0_1i.unscale(2.0));
assert_eq!(_1_1i * _0_1i, _neg1_1i);
// i^2 & i^4
assert_eq!(_0_1i * _0_1i, -_1_0i);
assert_eq!(_0_1i * _0_1i * _0_1i * _0_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(c * _1_0i, c);
assert_eq!(_1_0i * c, c);
}
}
#[test]
fn test_div() {
assert_eq!(_neg1_1i / _0_1i, _1_1i);
for &c in all_consts.iter() {
if c!= Zero::zero() {
assert_eq!(c / c, _1_0i);
}
}
}
#[test]
fn test_neg() {
assert_eq!(-_1_0i + _0_1i, _neg1_1i);
assert_eq!((-_0_1i) * _0_1i, _1_0i);
for &c in all_consts.iter() {
assert_eq!(-(-c), c);
}
}
}
#[test]
fn test_to_str() {
fn test(c : Complex64, s: ~str) {
assert_eq!(c.to_str(), s);
}
test(_0_0i, ~"0+0i");
test(_1_0i, ~"1+0i");
test(_0_1i, ~"0+1i");
test(_1_1i, ~"1+1i");
test(_neg1_1i, ~"-1+1i");
test(-_neg1_1i, ~"1-1i");
test(_05_05i, ~"0.5+0.5i");
}
}
|
{
format!("{}+{}i", self.re.to_str_radix(radix), self.im.to_str_radix(radix))
}
|
conditional_block
|
js.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Smart pointers for the JS-managed DOM objects.
//!
//! The DOM is made up of DOM objects whose lifetime is entirely controlled by
//! the whims of the SpiderMonkey garbage collector. The types in this module
//! are designed to ensure that any interactions with said Rust types only
//! occur on values that will remain alive the entire time.
//!
//! Here is a brief overview of the important types:
//!
//! - `Root<T>`: a stack-based reference to a rooted DOM object.
//! - `JS<T>`: a reference to a DOM object that can automatically be traced by
//! the GC when encountered as a field of a Rust structure.
//!
//! `JS<T>` does not allow access to their inner value without explicitly
//! creating a stack-based root via the `root` method. This returns a `Root<T>`,
//! which causes the JS-owned value to be uncollectable for the duration of the
//! `Root` object's lifetime. A reference to the object can then be obtained
//! from the `Root` object. These references are not allowed to outlive their
//! originating `Root<T>`.
//!
use core::nonzero::NonZero;
use dom::bindings::conversions::DerivedFrom;
use dom::bindings::inheritance::Castable;
use dom::bindings::reflector::{DomObject, Reflector};
use dom::bindings::trace::JSTraceable;
use dom::bindings::trace::trace_reflector;
use dom::node::Node;
use heapsize::HeapSizeOf;
use js::jsapi::{JSObject, JSTracer};
use script_layout_interface::TrustedNodeAddress;
use script_thread::STACK_ROOTS;
use std::cell::UnsafeCell;
use std::default::Default;
use std::hash::{Hash, Hasher};
#[cfg(debug_assertions)]
use std::intrinsics::type_name;
use std::mem;
use std::ops::Deref;
use std::ptr;
use std::rc::Rc;
use style::thread_state;
/// A traced reference to a DOM object
///
/// This type is critical to making garbage collection work with the DOM,
/// but it is very dangerous; if garbage collection happens with a `JS<T>`
/// on the stack, the `JS<T>` can point to freed memory.
///
/// This should only be used as a field in other DOM objects.
#[must_root]
pub struct JS<T> {
ptr: NonZero<*const T>,
}
// JS<T> is similar to Rc<T>, in that it's not always clear how to avoid double-counting.
// For now, we choose not to follow any such pointers.
impl<T> HeapSizeOf for JS<T> {
fn heap_size_of_children(&self) -> usize {
0
}
}
impl<T> JS<T> {
/// Returns `LayoutJS<T>` containing the same pointer.
pub unsafe fn to_layout(&self) -> LayoutJS<T>
|
}
impl<T: DomObject> JS<T> {
/// Create a JS<T> from a &T
#[allow(unrooted_must_root)]
pub fn from_ref(obj: &T) -> JS<T> {
debug_assert!(thread_state::get().is_script());
JS {
ptr: unsafe { NonZero::new(&*obj) },
}
}
}
impl<'root, T: DomObject + 'root> RootedReference<'root> for JS<T> {
type Ref = &'root T;
fn r(&'root self) -> &'root T {
&self
}
}
impl<T: DomObject> Deref for JS<T> {
type Target = T;
fn deref(&self) -> &T {
debug_assert!(thread_state::get().is_script());
// We can only have &JS<T> from a rooted thing, so it's safe to deref
// it to &T.
unsafe { &**self.ptr }
}
}
unsafe impl<T: DomObject> JSTraceable for JS<T> {
unsafe fn trace(&self, trc: *mut JSTracer) {
#[cfg(debug_assertions)]
let trace_str = format!("for {} on heap", type_name::<T>());
#[cfg(debug_assertions)]
let trace_info = &trace_str[..];
#[cfg(not(debug_assertions))]
let trace_info = "for DOM object on heap";
trace_reflector(trc,
trace_info,
(**self.ptr).reflector());
}
}
/// An unrooted reference to a DOM object for use in layout. `Layout*Helpers`
/// traits must be implemented on this.
#[allow_unrooted_interior]
pub struct LayoutJS<T> {
ptr: NonZero<*const T>,
}
impl<T: Castable> LayoutJS<T> {
/// Cast a DOM object root upwards to one of the interfaces it derives from.
pub fn upcast<U>(&self) -> LayoutJS<U>
where U: Castable,
T: DerivedFrom<U>
{
debug_assert!(thread_state::get().is_layout());
let ptr: *const T = *self.ptr;
LayoutJS {
ptr: unsafe { NonZero::new(ptr as *const U) },
}
}
/// Cast a DOM object downwards to one of the interfaces it might implement.
pub fn downcast<U>(&self) -> Option<LayoutJS<U>>
where U: DerivedFrom<T>
{
debug_assert!(thread_state::get().is_layout());
unsafe {
if (*self.unsafe_get()).is::<U>() {
let ptr: *const T = *self.ptr;
Some(LayoutJS {
ptr: NonZero::new(ptr as *const U),
})
} else {
None
}
}
}
}
impl<T: DomObject> LayoutJS<T> {
/// Get the reflector.
pub unsafe fn get_jsobject(&self) -> *mut JSObject {
debug_assert!(thread_state::get().is_layout());
(**self.ptr).reflector().get_jsobject().get()
}
}
impl<T> Copy for LayoutJS<T> {}
impl<T> PartialEq for JS<T> {
fn eq(&self, other: &JS<T>) -> bool {
self.ptr == other.ptr
}
}
impl<T> Eq for JS<T> {}
impl<T> PartialEq for LayoutJS<T> {
fn eq(&self, other: &LayoutJS<T>) -> bool {
self.ptr == other.ptr
}
}
impl<T> Eq for LayoutJS<T> {}
impl<T> Hash for JS<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.ptr.hash(state)
}
}
impl<T> Hash for LayoutJS<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.ptr.hash(state)
}
}
impl <T> Clone for JS<T> {
#[inline]
#[allow(unrooted_must_root)]
fn clone(&self) -> JS<T> {
debug_assert!(thread_state::get().is_script());
JS {
ptr: self.ptr.clone(),
}
}
}
impl <T> Clone for LayoutJS<T> {
#[inline]
fn clone(&self) -> LayoutJS<T> {
debug_assert!(thread_state::get().is_layout());
LayoutJS {
ptr: self.ptr.clone(),
}
}
}
impl LayoutJS<Node> {
/// Create a new JS-owned value wrapped from an address known to be a
/// `Node` pointer.
pub unsafe fn from_trusted_node_address(inner: TrustedNodeAddress) -> LayoutJS<Node> {
debug_assert!(thread_state::get().is_layout());
let TrustedNodeAddress(addr) = inner;
LayoutJS {
ptr: NonZero::new(addr as *const Node),
}
}
}
/// A holder that provides interior mutability for GC-managed values such as
/// `JS<T>`. Essentially a `Cell<JS<T>>`, but safer.
///
/// This should only be used as a field in other DOM objects; see warning
/// on `JS<T>`.
#[must_root]
#[derive(JSTraceable)]
pub struct MutJS<T: DomObject> {
val: UnsafeCell<JS<T>>,
}
impl<T: DomObject> MutJS<T> {
/// Create a new `MutJS`.
pub fn new(initial: &T) -> MutJS<T> {
debug_assert!(thread_state::get().is_script());
MutJS {
val: UnsafeCell::new(JS::from_ref(initial)),
}
}
/// Set this `MutJS` to the given value.
pub fn set(&self, val: &T) {
debug_assert!(thread_state::get().is_script());
unsafe {
*self.val.get() = JS::from_ref(val);
}
}
/// Get the value in this `MutJS`.
pub fn get(&self) -> Root<T> {
debug_assert!(thread_state::get().is_script());
unsafe {
Root::from_ref(&*ptr::read(self.val.get()))
}
}
}
impl<T: DomObject> HeapSizeOf for MutJS<T> {
fn heap_size_of_children(&self) -> usize {
// See comment on HeapSizeOf for JS<T>.
0
}
}
impl<T: DomObject> PartialEq for MutJS<T> {
fn eq(&self, other: &Self) -> bool {
unsafe {
*self.val.get() == *other.val.get()
}
}
}
impl<T: DomObject + PartialEq> PartialEq<T> for MutJS<T> {
fn eq(&self, other: &T) -> bool {
unsafe {
**self.val.get() == *other
}
}
}
/// A holder that provides interior mutability for GC-managed values such as
/// `JS<T>`, with nullability represented by an enclosing Option wrapper.
/// Essentially a `Cell<Option<JS<T>>>`, but safer.
///
/// This should only be used as a field in other DOM objects; see warning
/// on `JS<T>`.
#[must_root]
#[derive(JSTraceable)]
pub struct MutNullableJS<T: DomObject> {
ptr: UnsafeCell<Option<JS<T>>>,
}
impl<T: DomObject> MutNullableJS<T> {
/// Create a new `MutNullableJS`.
pub fn new(initial: Option<&T>) -> MutNullableJS<T> {
debug_assert!(thread_state::get().is_script());
MutNullableJS {
ptr: UnsafeCell::new(initial.map(JS::from_ref)),
}
}
/// Retrieve a copy of the current inner value. If it is `None`, it is
/// initialized with the result of `cb` first.
pub fn or_init<F>(&self, cb: F) -> Root<T>
where F: FnOnce() -> Root<T>
{
debug_assert!(thread_state::get().is_script());
match self.get() {
Some(inner) => inner,
None => {
let inner = cb();
self.set(Some(&inner));
inner
},
}
}
/// Retrieve a copy of the inner optional `JS<T>` as `LayoutJS<T>`.
/// For use by layout, which can't use safe types like Temporary.
#[allow(unrooted_must_root)]
pub unsafe fn get_inner_as_layout(&self) -> Option<LayoutJS<T>> {
debug_assert!(thread_state::get().is_layout());
ptr::read(self.ptr.get()).map(|js| js.to_layout())
}
/// Get a rooted value out of this object
#[allow(unrooted_must_root)]
pub fn get(&self) -> Option<Root<T>> {
debug_assert!(thread_state::get().is_script());
unsafe {
ptr::read(self.ptr.get()).map(|o| Root::from_ref(&*o))
}
}
/// Set this `MutNullableJS` to the given value.
pub fn set(&self, val: Option<&T>) {
debug_assert!(thread_state::get().is_script());
unsafe {
*self.ptr.get() = val.map(|p| JS::from_ref(p));
}
}
/// Gets the current value out of this object and sets it to `None`.
pub fn take(&self) -> Option<Root<T>> {
let value = self.get();
self.set(None);
value
}
}
impl<T: DomObject> PartialEq for MutNullableJS<T> {
fn eq(&self, other: &Self) -> bool {
unsafe {
*self.ptr.get() == *other.ptr.get()
}
}
}
impl<'a, T: DomObject> PartialEq<Option<&'a T>> for MutNullableJS<T> {
fn eq(&self, other: &Option<&T>) -> bool {
unsafe {
*self.ptr.get() == other.map(JS::from_ref)
}
}
}
impl<T: DomObject> Default for MutNullableJS<T> {
#[allow(unrooted_must_root)]
fn default() -> MutNullableJS<T> {
debug_assert!(thread_state::get().is_script());
MutNullableJS {
ptr: UnsafeCell::new(None),
}
}
}
impl<T: DomObject> HeapSizeOf for MutNullableJS<T> {
fn heap_size_of_children(&self) -> usize {
// See comment on HeapSizeOf for JS<T>.
0
}
}
impl<T: DomObject> LayoutJS<T> {
/// Returns an unsafe pointer to the interior of this JS object. This is
/// the only method that be safely accessed from layout. (The fact that
/// this is unsafe is what necessitates the layout wrappers.)
pub unsafe fn unsafe_get(&self) -> *const T {
debug_assert!(thread_state::get().is_layout());
*self.ptr
}
/// Returns a reference to the interior of this JS object. This method is
/// safe to call because it originates from the layout thread, and it cannot
/// mutate DOM nodes.
pub fn get_for_script(&self) -> &T {
debug_assert!(thread_state::get().is_script());
unsafe { &**self.ptr }
}
}
/// Get a reference out of a rooted value.
pub trait RootedReference<'root> {
/// The type of the reference.
type Ref: 'root;
/// Obtain a reference out of the rooted value.
fn r(&'root self) -> Self::Ref;
}
impl<'root, T: JSTraceable + DomObject + 'root> RootedReference<'root> for [JS<T>] {
type Ref = &'root [&'root T];
fn r(&'root self) -> &'root [&'root T] {
unsafe { mem::transmute(self) }
}
}
impl<'root, T: DomObject + 'root> RootedReference<'root> for Rc<T> {
type Ref = &'root T;
fn r(&'root self) -> &'root T {
self
}
}
impl<'root, T: RootedReference<'root> + 'root> RootedReference<'root> for Option<T> {
type Ref = Option<T::Ref>;
fn r(&'root self) -> Option<T::Ref> {
self.as_ref().map(RootedReference::r)
}
}
/// A rooting mechanism for reflectors on the stack.
/// LIFO is not required.
///
/// See also [*Exact Stack Rooting - Storing a GCPointer on the CStack*]
/// (https://developer.mozilla.org/en-US/docs/Mozilla/Projects/SpiderMonkey/Internals/GC/Exact_Stack_Rooting).
pub struct RootCollection {
roots: UnsafeCell<Vec<*const Reflector>>,
}
/// A pointer to a RootCollection, for use in global variables.
pub struct RootCollectionPtr(pub *const RootCollection);
impl Copy for RootCollectionPtr {}
impl Clone for RootCollectionPtr {
fn clone(&self) -> RootCollectionPtr {
*self
}
}
impl RootCollection {
/// Create an empty collection of roots
pub fn new() -> RootCollection {
debug_assert!(thread_state::get().is_script());
RootCollection {
roots: UnsafeCell::new(vec![]),
}
}
/// Start tracking a stack-based root
unsafe fn root(&self, untracked_reflector: *const Reflector) {
debug_assert!(thread_state::get().is_script());
let mut roots = &mut *self.roots.get();
roots.push(untracked_reflector);
assert!(!(*untracked_reflector).get_jsobject().is_null())
}
/// Stop tracking a stack-based reflector, asserting if it isn't found.
unsafe fn unroot(&self, tracked_reflector: *const Reflector) {
assert!(!tracked_reflector.is_null());
assert!(!(*tracked_reflector).get_jsobject().is_null());
debug_assert!(thread_state::get().is_script());
let mut roots = &mut *self.roots.get();
match roots.iter().rposition(|r| *r == tracked_reflector) {
Some(idx) => {
roots.remove(idx);
},
None => panic!("Can't remove a root that was never rooted!"),
}
}
}
/// SM Callback that traces the rooted reflectors
pub unsafe fn trace_roots(tracer: *mut JSTracer) {
debug!("tracing stack roots");
STACK_ROOTS.with(|ref collection| {
let RootCollectionPtr(collection) = collection.get().unwrap();
let collection = &*(*collection).roots.get();
for root in collection {
trace_reflector(tracer, "on stack", &**root);
}
});
}
/// A rooted reference to a DOM object.
///
/// The JS value is pinned for the duration of this object's lifetime; roots
/// are additive, so this object's destruction will not invalidate other roots
/// for the same JS value. `Root`s cannot outlive the associated
/// `RootCollection` object.
#[allow_unrooted_interior]
pub struct Root<T: DomObject> {
/// Reference to rooted value that must not outlive this container
ptr: NonZero<*const T>,
/// List that ensures correct dynamic root ordering
root_list: *const RootCollection,
}
impl<T: Castable> Root<T> {
/// Cast a DOM object root upwards to one of the interfaces it derives from.
pub fn upcast<U>(root: Root<T>) -> Root<U>
where U: Castable,
T: DerivedFrom<U>
{
unsafe { mem::transmute(root) }
}
/// Cast a DOM object root downwards to one of the interfaces it might implement.
pub fn downcast<U>(root: Root<T>) -> Option<Root<U>>
where U: DerivedFrom<T>
{
if root.is::<U>() {
Some(unsafe { mem::transmute(root) })
} else {
None
}
}
}
impl<T: DomObject> Root<T> {
/// Create a new stack-bounded root for the provided JS-owned value.
/// It cannot outlive its associated `RootCollection`, and it gives
/// out references which cannot outlive this new `Root`.
pub fn new(unrooted: NonZero<*const T>) -> Root<T> {
debug_assert!(thread_state::get().is_script());
STACK_ROOTS.with(|ref collection| {
let RootCollectionPtr(collection) = collection.get().unwrap();
unsafe { (*collection).root(&*(**unrooted).reflector()) }
Root {
ptr: unrooted,
root_list: collection,
}
})
}
/// Generate a new root from a reference
pub fn from_ref(unrooted: &T) -> Root<T> {
Root::new(unsafe { NonZero::new(&*unrooted) })
}
}
impl<'root, T: DomObject + 'root> RootedReference<'root> for Root<T> {
type Ref = &'root T;
fn r(&'root self) -> &'root T {
self
}
}
impl<T: DomObject> Deref for Root<T> {
type Target = T;
fn deref(&self) -> &T {
debug_assert!(thread_state::get().is_script());
unsafe { &**self.ptr.deref() }
}
}
impl<T: DomObject + HeapSizeOf> HeapSizeOf for Root<T> {
fn heap_size_of_children(&self) -> usize {
(**self).heap_size_of_children()
}
}
impl<T: DomObject> PartialEq for Root<T> {
fn eq(&self, other: &Self) -> bool {
self.ptr == other.ptr
}
}
impl<T: DomObject> Clone for Root<T> {
fn clone(&self) -> Root<T> {
Root::from_ref(&*self)
}
}
impl<T: DomObject> Drop for Root<T> {
fn drop(&mut self) {
unsafe {
(*self.root_list).unroot(self.reflector());
}
}
}
unsafe impl<T: DomObject> JSTraceable for Root<T> {
unsafe fn trace(&self, _: *mut JSTracer) {
// Already traced.
}
}
|
{
debug_assert!(thread_state::get().is_layout());
LayoutJS {
ptr: self.ptr.clone(),
}
}
|
identifier_body
|
js.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Smart pointers for the JS-managed DOM objects.
//!
//! The DOM is made up of DOM objects whose lifetime is entirely controlled by
//! the whims of the SpiderMonkey garbage collector. The types in this module
//! are designed to ensure that any interactions with said Rust types only
//! occur on values that will remain alive the entire time.
//!
//! Here is a brief overview of the important types:
//!
//! - `Root<T>`: a stack-based reference to a rooted DOM object.
//! - `JS<T>`: a reference to a DOM object that can automatically be traced by
//! the GC when encountered as a field of a Rust structure.
//!
//! `JS<T>` does not allow access to their inner value without explicitly
//! creating a stack-based root via the `root` method. This returns a `Root<T>`,
//! which causes the JS-owned value to be uncollectable for the duration of the
//! `Root` object's lifetime. A reference to the object can then be obtained
//! from the `Root` object. These references are not allowed to outlive their
//! originating `Root<T>`.
//!
use core::nonzero::NonZero;
use dom::bindings::conversions::DerivedFrom;
use dom::bindings::inheritance::Castable;
use dom::bindings::reflector::{DomObject, Reflector};
use dom::bindings::trace::JSTraceable;
use dom::bindings::trace::trace_reflector;
use dom::node::Node;
use heapsize::HeapSizeOf;
use js::jsapi::{JSObject, JSTracer};
use script_layout_interface::TrustedNodeAddress;
use script_thread::STACK_ROOTS;
use std::cell::UnsafeCell;
use std::default::Default;
use std::hash::{Hash, Hasher};
#[cfg(debug_assertions)]
use std::intrinsics::type_name;
use std::mem;
use std::ops::Deref;
use std::ptr;
use std::rc::Rc;
use style::thread_state;
/// A traced reference to a DOM object
///
/// This type is critical to making garbage collection work with the DOM,
/// but it is very dangerous; if garbage collection happens with a `JS<T>`
/// on the stack, the `JS<T>` can point to freed memory.
///
/// This should only be used as a field in other DOM objects.
#[must_root]
pub struct JS<T> {
ptr: NonZero<*const T>,
}
// JS<T> is similar to Rc<T>, in that it's not always clear how to avoid double-counting.
// For now, we choose not to follow any such pointers.
impl<T> HeapSizeOf for JS<T> {
fn heap_size_of_children(&self) -> usize {
0
}
}
impl<T> JS<T> {
/// Returns `LayoutJS<T>` containing the same pointer.
pub unsafe fn to_layout(&self) -> LayoutJS<T> {
debug_assert!(thread_state::get().is_layout());
LayoutJS {
ptr: self.ptr.clone(),
}
}
}
impl<T: DomObject> JS<T> {
/// Create a JS<T> from a &T
#[allow(unrooted_must_root)]
pub fn from_ref(obj: &T) -> JS<T> {
debug_assert!(thread_state::get().is_script());
JS {
ptr: unsafe { NonZero::new(&*obj) },
}
}
}
impl<'root, T: DomObject + 'root> RootedReference<'root> for JS<T> {
type Ref = &'root T;
fn r(&'root self) -> &'root T {
&self
}
}
impl<T: DomObject> Deref for JS<T> {
type Target = T;
fn deref(&self) -> &T {
debug_assert!(thread_state::get().is_script());
// We can only have &JS<T> from a rooted thing, so it's safe to deref
// it to &T.
unsafe { &**self.ptr }
}
}
unsafe impl<T: DomObject> JSTraceable for JS<T> {
unsafe fn trace(&self, trc: *mut JSTracer) {
#[cfg(debug_assertions)]
let trace_str = format!("for {} on heap", type_name::<T>());
#[cfg(debug_assertions)]
let trace_info = &trace_str[..];
#[cfg(not(debug_assertions))]
let trace_info = "for DOM object on heap";
trace_reflector(trc,
trace_info,
(**self.ptr).reflector());
}
}
/// An unrooted reference to a DOM object for use in layout. `Layout*Helpers`
/// traits must be implemented on this.
#[allow_unrooted_interior]
pub struct LayoutJS<T> {
ptr: NonZero<*const T>,
}
impl<T: Castable> LayoutJS<T> {
/// Cast a DOM object root upwards to one of the interfaces it derives from.
pub fn upcast<U>(&self) -> LayoutJS<U>
where U: Castable,
T: DerivedFrom<U>
{
debug_assert!(thread_state::get().is_layout());
let ptr: *const T = *self.ptr;
LayoutJS {
ptr: unsafe { NonZero::new(ptr as *const U) },
}
}
/// Cast a DOM object downwards to one of the interfaces it might implement.
pub fn downcast<U>(&self) -> Option<LayoutJS<U>>
where U: DerivedFrom<T>
{
debug_assert!(thread_state::get().is_layout());
unsafe {
if (*self.unsafe_get()).is::<U>() {
let ptr: *const T = *self.ptr;
Some(LayoutJS {
ptr: NonZero::new(ptr as *const U),
})
} else {
None
}
}
}
}
impl<T: DomObject> LayoutJS<T> {
/// Get the reflector.
pub unsafe fn get_jsobject(&self) -> *mut JSObject {
debug_assert!(thread_state::get().is_layout());
(**self.ptr).reflector().get_jsobject().get()
}
}
impl<T> Copy for LayoutJS<T> {}
impl<T> PartialEq for JS<T> {
fn eq(&self, other: &JS<T>) -> bool {
self.ptr == other.ptr
}
}
impl<T> Eq for JS<T> {}
impl<T> PartialEq for LayoutJS<T> {
fn eq(&self, other: &LayoutJS<T>) -> bool {
self.ptr == other.ptr
}
}
impl<T> Eq for LayoutJS<T> {}
impl<T> Hash for JS<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.ptr.hash(state)
}
}
impl<T> Hash for LayoutJS<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.ptr.hash(state)
}
}
impl <T> Clone for JS<T> {
#[inline]
#[allow(unrooted_must_root)]
fn clone(&self) -> JS<T> {
debug_assert!(thread_state::get().is_script());
JS {
ptr: self.ptr.clone(),
}
}
}
impl <T> Clone for LayoutJS<T> {
#[inline]
fn clone(&self) -> LayoutJS<T> {
debug_assert!(thread_state::get().is_layout());
LayoutJS {
ptr: self.ptr.clone(),
}
}
}
impl LayoutJS<Node> {
/// Create a new JS-owned value wrapped from an address known to be a
/// `Node` pointer.
pub unsafe fn from_trusted_node_address(inner: TrustedNodeAddress) -> LayoutJS<Node> {
debug_assert!(thread_state::get().is_layout());
let TrustedNodeAddress(addr) = inner;
LayoutJS {
ptr: NonZero::new(addr as *const Node),
}
}
}
/// A holder that provides interior mutability for GC-managed values such as
/// `JS<T>`. Essentially a `Cell<JS<T>>`, but safer.
///
/// This should only be used as a field in other DOM objects; see warning
/// on `JS<T>`.
#[must_root]
#[derive(JSTraceable)]
pub struct MutJS<T: DomObject> {
val: UnsafeCell<JS<T>>,
}
|
pub fn new(initial: &T) -> MutJS<T> {
debug_assert!(thread_state::get().is_script());
MutJS {
val: UnsafeCell::new(JS::from_ref(initial)),
}
}
/// Set this `MutJS` to the given value.
pub fn set(&self, val: &T) {
debug_assert!(thread_state::get().is_script());
unsafe {
*self.val.get() = JS::from_ref(val);
}
}
/// Get the value in this `MutJS`.
pub fn get(&self) -> Root<T> {
debug_assert!(thread_state::get().is_script());
unsafe {
Root::from_ref(&*ptr::read(self.val.get()))
}
}
}
impl<T: DomObject> HeapSizeOf for MutJS<T> {
fn heap_size_of_children(&self) -> usize {
// See comment on HeapSizeOf for JS<T>.
0
}
}
impl<T: DomObject> PartialEq for MutJS<T> {
fn eq(&self, other: &Self) -> bool {
unsafe {
*self.val.get() == *other.val.get()
}
}
}
impl<T: DomObject + PartialEq> PartialEq<T> for MutJS<T> {
fn eq(&self, other: &T) -> bool {
unsafe {
**self.val.get() == *other
}
}
}
/// A holder that provides interior mutability for GC-managed values such as
/// `JS<T>`, with nullability represented by an enclosing Option wrapper.
/// Essentially a `Cell<Option<JS<T>>>`, but safer.
///
/// This should only be used as a field in other DOM objects; see warning
/// on `JS<T>`.
#[must_root]
#[derive(JSTraceable)]
pub struct MutNullableJS<T: DomObject> {
ptr: UnsafeCell<Option<JS<T>>>,
}
impl<T: DomObject> MutNullableJS<T> {
/// Create a new `MutNullableJS`.
pub fn new(initial: Option<&T>) -> MutNullableJS<T> {
debug_assert!(thread_state::get().is_script());
MutNullableJS {
ptr: UnsafeCell::new(initial.map(JS::from_ref)),
}
}
/// Retrieve a copy of the current inner value. If it is `None`, it is
/// initialized with the result of `cb` first.
pub fn or_init<F>(&self, cb: F) -> Root<T>
where F: FnOnce() -> Root<T>
{
debug_assert!(thread_state::get().is_script());
match self.get() {
Some(inner) => inner,
None => {
let inner = cb();
self.set(Some(&inner));
inner
},
}
}
/// Retrieve a copy of the inner optional `JS<T>` as `LayoutJS<T>`.
/// For use by layout, which can't use safe types like Temporary.
#[allow(unrooted_must_root)]
pub unsafe fn get_inner_as_layout(&self) -> Option<LayoutJS<T>> {
debug_assert!(thread_state::get().is_layout());
ptr::read(self.ptr.get()).map(|js| js.to_layout())
}
/// Get a rooted value out of this object
#[allow(unrooted_must_root)]
pub fn get(&self) -> Option<Root<T>> {
debug_assert!(thread_state::get().is_script());
unsafe {
ptr::read(self.ptr.get()).map(|o| Root::from_ref(&*o))
}
}
/// Set this `MutNullableJS` to the given value.
pub fn set(&self, val: Option<&T>) {
debug_assert!(thread_state::get().is_script());
unsafe {
*self.ptr.get() = val.map(|p| JS::from_ref(p));
}
}
/// Gets the current value out of this object and sets it to `None`.
pub fn take(&self) -> Option<Root<T>> {
let value = self.get();
self.set(None);
value
}
}
impl<T: DomObject> PartialEq for MutNullableJS<T> {
fn eq(&self, other: &Self) -> bool {
unsafe {
*self.ptr.get() == *other.ptr.get()
}
}
}
impl<'a, T: DomObject> PartialEq<Option<&'a T>> for MutNullableJS<T> {
fn eq(&self, other: &Option<&T>) -> bool {
unsafe {
*self.ptr.get() == other.map(JS::from_ref)
}
}
}
impl<T: DomObject> Default for MutNullableJS<T> {
#[allow(unrooted_must_root)]
fn default() -> MutNullableJS<T> {
debug_assert!(thread_state::get().is_script());
MutNullableJS {
ptr: UnsafeCell::new(None),
}
}
}
impl<T: DomObject> HeapSizeOf for MutNullableJS<T> {
fn heap_size_of_children(&self) -> usize {
// See comment on HeapSizeOf for JS<T>.
0
}
}
impl<T: DomObject> LayoutJS<T> {
/// Returns an unsafe pointer to the interior of this JS object. This is
/// the only method that be safely accessed from layout. (The fact that
/// this is unsafe is what necessitates the layout wrappers.)
pub unsafe fn unsafe_get(&self) -> *const T {
debug_assert!(thread_state::get().is_layout());
*self.ptr
}
/// Returns a reference to the interior of this JS object. This method is
/// safe to call because it originates from the layout thread, and it cannot
/// mutate DOM nodes.
pub fn get_for_script(&self) -> &T {
debug_assert!(thread_state::get().is_script());
unsafe { &**self.ptr }
}
}
/// Get a reference out of a rooted value.
pub trait RootedReference<'root> {
/// The type of the reference.
type Ref: 'root;
/// Obtain a reference out of the rooted value.
fn r(&'root self) -> Self::Ref;
}
impl<'root, T: JSTraceable + DomObject + 'root> RootedReference<'root> for [JS<T>] {
type Ref = &'root [&'root T];
fn r(&'root self) -> &'root [&'root T] {
unsafe { mem::transmute(self) }
}
}
impl<'root, T: DomObject + 'root> RootedReference<'root> for Rc<T> {
type Ref = &'root T;
fn r(&'root self) -> &'root T {
self
}
}
impl<'root, T: RootedReference<'root> + 'root> RootedReference<'root> for Option<T> {
type Ref = Option<T::Ref>;
fn r(&'root self) -> Option<T::Ref> {
self.as_ref().map(RootedReference::r)
}
}
/// A rooting mechanism for reflectors on the stack.
/// LIFO is not required.
///
/// See also [*Exact Stack Rooting - Storing a GCPointer on the CStack*]
/// (https://developer.mozilla.org/en-US/docs/Mozilla/Projects/SpiderMonkey/Internals/GC/Exact_Stack_Rooting).
pub struct RootCollection {
roots: UnsafeCell<Vec<*const Reflector>>,
}
/// A pointer to a RootCollection, for use in global variables.
pub struct RootCollectionPtr(pub *const RootCollection);
impl Copy for RootCollectionPtr {}
impl Clone for RootCollectionPtr {
fn clone(&self) -> RootCollectionPtr {
*self
}
}
impl RootCollection {
/// Create an empty collection of roots
pub fn new() -> RootCollection {
debug_assert!(thread_state::get().is_script());
RootCollection {
roots: UnsafeCell::new(vec![]),
}
}
/// Start tracking a stack-based root
unsafe fn root(&self, untracked_reflector: *const Reflector) {
debug_assert!(thread_state::get().is_script());
let mut roots = &mut *self.roots.get();
roots.push(untracked_reflector);
assert!(!(*untracked_reflector).get_jsobject().is_null())
}
/// Stop tracking a stack-based reflector, asserting if it isn't found.
unsafe fn unroot(&self, tracked_reflector: *const Reflector) {
assert!(!tracked_reflector.is_null());
assert!(!(*tracked_reflector).get_jsobject().is_null());
debug_assert!(thread_state::get().is_script());
let mut roots = &mut *self.roots.get();
match roots.iter().rposition(|r| *r == tracked_reflector) {
Some(idx) => {
roots.remove(idx);
},
None => panic!("Can't remove a root that was never rooted!"),
}
}
}
/// SM Callback that traces the rooted reflectors
pub unsafe fn trace_roots(tracer: *mut JSTracer) {
debug!("tracing stack roots");
STACK_ROOTS.with(|ref collection| {
let RootCollectionPtr(collection) = collection.get().unwrap();
let collection = &*(*collection).roots.get();
for root in collection {
trace_reflector(tracer, "on stack", &**root);
}
});
}
/// A rooted reference to a DOM object.
///
/// The JS value is pinned for the duration of this object's lifetime; roots
/// are additive, so this object's destruction will not invalidate other roots
/// for the same JS value. `Root`s cannot outlive the associated
/// `RootCollection` object.
#[allow_unrooted_interior]
pub struct Root<T: DomObject> {
/// Reference to rooted value that must not outlive this container
ptr: NonZero<*const T>,
/// List that ensures correct dynamic root ordering
root_list: *const RootCollection,
}
impl<T: Castable> Root<T> {
/// Cast a DOM object root upwards to one of the interfaces it derives from.
pub fn upcast<U>(root: Root<T>) -> Root<U>
where U: Castable,
T: DerivedFrom<U>
{
unsafe { mem::transmute(root) }
}
/// Cast a DOM object root downwards to one of the interfaces it might implement.
pub fn downcast<U>(root: Root<T>) -> Option<Root<U>>
where U: DerivedFrom<T>
{
if root.is::<U>() {
Some(unsafe { mem::transmute(root) })
} else {
None
}
}
}
impl<T: DomObject> Root<T> {
/// Create a new stack-bounded root for the provided JS-owned value.
/// It cannot outlive its associated `RootCollection`, and it gives
/// out references which cannot outlive this new `Root`.
pub fn new(unrooted: NonZero<*const T>) -> Root<T> {
debug_assert!(thread_state::get().is_script());
STACK_ROOTS.with(|ref collection| {
let RootCollectionPtr(collection) = collection.get().unwrap();
unsafe { (*collection).root(&*(**unrooted).reflector()) }
Root {
ptr: unrooted,
root_list: collection,
}
})
}
/// Generate a new root from a reference
pub fn from_ref(unrooted: &T) -> Root<T> {
Root::new(unsafe { NonZero::new(&*unrooted) })
}
}
impl<'root, T: DomObject + 'root> RootedReference<'root> for Root<T> {
type Ref = &'root T;
fn r(&'root self) -> &'root T {
self
}
}
impl<T: DomObject> Deref for Root<T> {
type Target = T;
fn deref(&self) -> &T {
debug_assert!(thread_state::get().is_script());
unsafe { &**self.ptr.deref() }
}
}
impl<T: DomObject + HeapSizeOf> HeapSizeOf for Root<T> {
fn heap_size_of_children(&self) -> usize {
(**self).heap_size_of_children()
}
}
impl<T: DomObject> PartialEq for Root<T> {
fn eq(&self, other: &Self) -> bool {
self.ptr == other.ptr
}
}
impl<T: DomObject> Clone for Root<T> {
fn clone(&self) -> Root<T> {
Root::from_ref(&*self)
}
}
impl<T: DomObject> Drop for Root<T> {
fn drop(&mut self) {
unsafe {
(*self.root_list).unroot(self.reflector());
}
}
}
unsafe impl<T: DomObject> JSTraceable for Root<T> {
unsafe fn trace(&self, _: *mut JSTracer) {
// Already traced.
}
}
|
impl<T: DomObject> MutJS<T> {
/// Create a new `MutJS`.
|
random_line_split
|
js.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Smart pointers for the JS-managed DOM objects.
//!
//! The DOM is made up of DOM objects whose lifetime is entirely controlled by
//! the whims of the SpiderMonkey garbage collector. The types in this module
//! are designed to ensure that any interactions with said Rust types only
//! occur on values that will remain alive the entire time.
//!
//! Here is a brief overview of the important types:
//!
//! - `Root<T>`: a stack-based reference to a rooted DOM object.
//! - `JS<T>`: a reference to a DOM object that can automatically be traced by
//! the GC when encountered as a field of a Rust structure.
//!
//! `JS<T>` does not allow access to their inner value without explicitly
//! creating a stack-based root via the `root` method. This returns a `Root<T>`,
//! which causes the JS-owned value to be uncollectable for the duration of the
//! `Root` object's lifetime. A reference to the object can then be obtained
//! from the `Root` object. These references are not allowed to outlive their
//! originating `Root<T>`.
//!
use core::nonzero::NonZero;
use dom::bindings::conversions::DerivedFrom;
use dom::bindings::inheritance::Castable;
use dom::bindings::reflector::{DomObject, Reflector};
use dom::bindings::trace::JSTraceable;
use dom::bindings::trace::trace_reflector;
use dom::node::Node;
use heapsize::HeapSizeOf;
use js::jsapi::{JSObject, JSTracer};
use script_layout_interface::TrustedNodeAddress;
use script_thread::STACK_ROOTS;
use std::cell::UnsafeCell;
use std::default::Default;
use std::hash::{Hash, Hasher};
#[cfg(debug_assertions)]
use std::intrinsics::type_name;
use std::mem;
use std::ops::Deref;
use std::ptr;
use std::rc::Rc;
use style::thread_state;
/// A traced reference to a DOM object
///
/// This type is critical to making garbage collection work with the DOM,
/// but it is very dangerous; if garbage collection happens with a `JS<T>`
/// on the stack, the `JS<T>` can point to freed memory.
///
/// This should only be used as a field in other DOM objects.
#[must_root]
pub struct JS<T> {
ptr: NonZero<*const T>,
}
// JS<T> is similar to Rc<T>, in that it's not always clear how to avoid double-counting.
// For now, we choose not to follow any such pointers.
impl<T> HeapSizeOf for JS<T> {
fn heap_size_of_children(&self) -> usize {
0
}
}
impl<T> JS<T> {
/// Returns `LayoutJS<T>` containing the same pointer.
pub unsafe fn to_layout(&self) -> LayoutJS<T> {
debug_assert!(thread_state::get().is_layout());
LayoutJS {
ptr: self.ptr.clone(),
}
}
}
impl<T: DomObject> JS<T> {
/// Create a JS<T> from a &T
#[allow(unrooted_must_root)]
pub fn from_ref(obj: &T) -> JS<T> {
debug_assert!(thread_state::get().is_script());
JS {
ptr: unsafe { NonZero::new(&*obj) },
}
}
}
impl<'root, T: DomObject + 'root> RootedReference<'root> for JS<T> {
type Ref = &'root T;
fn r(&'root self) -> &'root T {
&self
}
}
impl<T: DomObject> Deref for JS<T> {
type Target = T;
fn deref(&self) -> &T {
debug_assert!(thread_state::get().is_script());
// We can only have &JS<T> from a rooted thing, so it's safe to deref
// it to &T.
unsafe { &**self.ptr }
}
}
unsafe impl<T: DomObject> JSTraceable for JS<T> {
unsafe fn trace(&self, trc: *mut JSTracer) {
#[cfg(debug_assertions)]
let trace_str = format!("for {} on heap", type_name::<T>());
#[cfg(debug_assertions)]
let trace_info = &trace_str[..];
#[cfg(not(debug_assertions))]
let trace_info = "for DOM object on heap";
trace_reflector(trc,
trace_info,
(**self.ptr).reflector());
}
}
/// An unrooted reference to a DOM object for use in layout. `Layout*Helpers`
/// traits must be implemented on this.
#[allow_unrooted_interior]
pub struct LayoutJS<T> {
ptr: NonZero<*const T>,
}
impl<T: Castable> LayoutJS<T> {
/// Cast a DOM object root upwards to one of the interfaces it derives from.
pub fn upcast<U>(&self) -> LayoutJS<U>
where U: Castable,
T: DerivedFrom<U>
{
debug_assert!(thread_state::get().is_layout());
let ptr: *const T = *self.ptr;
LayoutJS {
ptr: unsafe { NonZero::new(ptr as *const U) },
}
}
/// Cast a DOM object downwards to one of the interfaces it might implement.
pub fn downcast<U>(&self) -> Option<LayoutJS<U>>
where U: DerivedFrom<T>
{
debug_assert!(thread_state::get().is_layout());
unsafe {
if (*self.unsafe_get()).is::<U>() {
let ptr: *const T = *self.ptr;
Some(LayoutJS {
ptr: NonZero::new(ptr as *const U),
})
} else {
None
}
}
}
}
impl<T: DomObject> LayoutJS<T> {
/// Get the reflector.
pub unsafe fn get_jsobject(&self) -> *mut JSObject {
debug_assert!(thread_state::get().is_layout());
(**self.ptr).reflector().get_jsobject().get()
}
}
impl<T> Copy for LayoutJS<T> {}
impl<T> PartialEq for JS<T> {
fn eq(&self, other: &JS<T>) -> bool {
self.ptr == other.ptr
}
}
impl<T> Eq for JS<T> {}
impl<T> PartialEq for LayoutJS<T> {
fn eq(&self, other: &LayoutJS<T>) -> bool {
self.ptr == other.ptr
}
}
impl<T> Eq for LayoutJS<T> {}
impl<T> Hash for JS<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.ptr.hash(state)
}
}
impl<T> Hash for LayoutJS<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.ptr.hash(state)
}
}
impl <T> Clone for JS<T> {
#[inline]
#[allow(unrooted_must_root)]
fn clone(&self) -> JS<T> {
debug_assert!(thread_state::get().is_script());
JS {
ptr: self.ptr.clone(),
}
}
}
impl <T> Clone for LayoutJS<T> {
#[inline]
fn clone(&self) -> LayoutJS<T> {
debug_assert!(thread_state::get().is_layout());
LayoutJS {
ptr: self.ptr.clone(),
}
}
}
impl LayoutJS<Node> {
/// Create a new JS-owned value wrapped from an address known to be a
/// `Node` pointer.
pub unsafe fn from_trusted_node_address(inner: TrustedNodeAddress) -> LayoutJS<Node> {
debug_assert!(thread_state::get().is_layout());
let TrustedNodeAddress(addr) = inner;
LayoutJS {
ptr: NonZero::new(addr as *const Node),
}
}
}
/// A holder that provides interior mutability for GC-managed values such as
/// `JS<T>`. Essentially a `Cell<JS<T>>`, but safer.
///
/// This should only be used as a field in other DOM objects; see warning
/// on `JS<T>`.
#[must_root]
#[derive(JSTraceable)]
pub struct MutJS<T: DomObject> {
val: UnsafeCell<JS<T>>,
}
impl<T: DomObject> MutJS<T> {
/// Create a new `MutJS`.
pub fn new(initial: &T) -> MutJS<T> {
debug_assert!(thread_state::get().is_script());
MutJS {
val: UnsafeCell::new(JS::from_ref(initial)),
}
}
/// Set this `MutJS` to the given value.
pub fn set(&self, val: &T) {
debug_assert!(thread_state::get().is_script());
unsafe {
*self.val.get() = JS::from_ref(val);
}
}
/// Get the value in this `MutJS`.
pub fn get(&self) -> Root<T> {
debug_assert!(thread_state::get().is_script());
unsafe {
Root::from_ref(&*ptr::read(self.val.get()))
}
}
}
impl<T: DomObject> HeapSizeOf for MutJS<T> {
fn heap_size_of_children(&self) -> usize {
// See comment on HeapSizeOf for JS<T>.
0
}
}
impl<T: DomObject> PartialEq for MutJS<T> {
fn eq(&self, other: &Self) -> bool {
unsafe {
*self.val.get() == *other.val.get()
}
}
}
impl<T: DomObject + PartialEq> PartialEq<T> for MutJS<T> {
fn eq(&self, other: &T) -> bool {
unsafe {
**self.val.get() == *other
}
}
}
/// A holder that provides interior mutability for GC-managed values such as
/// `JS<T>`, with nullability represented by an enclosing Option wrapper.
/// Essentially a `Cell<Option<JS<T>>>`, but safer.
///
/// This should only be used as a field in other DOM objects; see warning
/// on `JS<T>`.
#[must_root]
#[derive(JSTraceable)]
pub struct MutNullableJS<T: DomObject> {
ptr: UnsafeCell<Option<JS<T>>>,
}
impl<T: DomObject> MutNullableJS<T> {
/// Create a new `MutNullableJS`.
pub fn new(initial: Option<&T>) -> MutNullableJS<T> {
debug_assert!(thread_state::get().is_script());
MutNullableJS {
ptr: UnsafeCell::new(initial.map(JS::from_ref)),
}
}
/// Retrieve a copy of the current inner value. If it is `None`, it is
/// initialized with the result of `cb` first.
pub fn or_init<F>(&self, cb: F) -> Root<T>
where F: FnOnce() -> Root<T>
{
debug_assert!(thread_state::get().is_script());
match self.get() {
Some(inner) => inner,
None => {
let inner = cb();
self.set(Some(&inner));
inner
},
}
}
/// Retrieve a copy of the inner optional `JS<T>` as `LayoutJS<T>`.
/// For use by layout, which can't use safe types like Temporary.
#[allow(unrooted_must_root)]
pub unsafe fn get_inner_as_layout(&self) -> Option<LayoutJS<T>> {
debug_assert!(thread_state::get().is_layout());
ptr::read(self.ptr.get()).map(|js| js.to_layout())
}
/// Get a rooted value out of this object
#[allow(unrooted_must_root)]
pub fn get(&self) -> Option<Root<T>> {
debug_assert!(thread_state::get().is_script());
unsafe {
ptr::read(self.ptr.get()).map(|o| Root::from_ref(&*o))
}
}
/// Set this `MutNullableJS` to the given value.
pub fn set(&self, val: Option<&T>) {
debug_assert!(thread_state::get().is_script());
unsafe {
*self.ptr.get() = val.map(|p| JS::from_ref(p));
}
}
/// Gets the current value out of this object and sets it to `None`.
pub fn take(&self) -> Option<Root<T>> {
let value = self.get();
self.set(None);
value
}
}
impl<T: DomObject> PartialEq for MutNullableJS<T> {
fn eq(&self, other: &Self) -> bool {
unsafe {
*self.ptr.get() == *other.ptr.get()
}
}
}
impl<'a, T: DomObject> PartialEq<Option<&'a T>> for MutNullableJS<T> {
fn eq(&self, other: &Option<&T>) -> bool {
unsafe {
*self.ptr.get() == other.map(JS::from_ref)
}
}
}
impl<T: DomObject> Default for MutNullableJS<T> {
#[allow(unrooted_must_root)]
fn default() -> MutNullableJS<T> {
debug_assert!(thread_state::get().is_script());
MutNullableJS {
ptr: UnsafeCell::new(None),
}
}
}
impl<T: DomObject> HeapSizeOf for MutNullableJS<T> {
fn heap_size_of_children(&self) -> usize {
// See comment on HeapSizeOf for JS<T>.
0
}
}
impl<T: DomObject> LayoutJS<T> {
/// Returns an unsafe pointer to the interior of this JS object. This is
/// the only method that be safely accessed from layout. (The fact that
/// this is unsafe is what necessitates the layout wrappers.)
pub unsafe fn unsafe_get(&self) -> *const T {
debug_assert!(thread_state::get().is_layout());
*self.ptr
}
/// Returns a reference to the interior of this JS object. This method is
/// safe to call because it originates from the layout thread, and it cannot
/// mutate DOM nodes.
pub fn get_for_script(&self) -> &T {
debug_assert!(thread_state::get().is_script());
unsafe { &**self.ptr }
}
}
/// Get a reference out of a rooted value.
pub trait RootedReference<'root> {
/// The type of the reference.
type Ref: 'root;
/// Obtain a reference out of the rooted value.
fn r(&'root self) -> Self::Ref;
}
impl<'root, T: JSTraceable + DomObject + 'root> RootedReference<'root> for [JS<T>] {
type Ref = &'root [&'root T];
fn r(&'root self) -> &'root [&'root T] {
unsafe { mem::transmute(self) }
}
}
impl<'root, T: DomObject + 'root> RootedReference<'root> for Rc<T> {
type Ref = &'root T;
fn r(&'root self) -> &'root T {
self
}
}
impl<'root, T: RootedReference<'root> + 'root> RootedReference<'root> for Option<T> {
type Ref = Option<T::Ref>;
fn r(&'root self) -> Option<T::Ref> {
self.as_ref().map(RootedReference::r)
}
}
/// A rooting mechanism for reflectors on the stack.
/// LIFO is not required.
///
/// See also [*Exact Stack Rooting - Storing a GCPointer on the CStack*]
/// (https://developer.mozilla.org/en-US/docs/Mozilla/Projects/SpiderMonkey/Internals/GC/Exact_Stack_Rooting).
pub struct RootCollection {
roots: UnsafeCell<Vec<*const Reflector>>,
}
/// A pointer to a RootCollection, for use in global variables.
pub struct RootCollectionPtr(pub *const RootCollection);
impl Copy for RootCollectionPtr {}
impl Clone for RootCollectionPtr {
fn clone(&self) -> RootCollectionPtr {
*self
}
}
impl RootCollection {
/// Create an empty collection of roots
pub fn new() -> RootCollection {
debug_assert!(thread_state::get().is_script());
RootCollection {
roots: UnsafeCell::new(vec![]),
}
}
/// Start tracking a stack-based root
unsafe fn root(&self, untracked_reflector: *const Reflector) {
debug_assert!(thread_state::get().is_script());
let mut roots = &mut *self.roots.get();
roots.push(untracked_reflector);
assert!(!(*untracked_reflector).get_jsobject().is_null())
}
/// Stop tracking a stack-based reflector, asserting if it isn't found.
unsafe fn unroot(&self, tracked_reflector: *const Reflector) {
assert!(!tracked_reflector.is_null());
assert!(!(*tracked_reflector).get_jsobject().is_null());
debug_assert!(thread_state::get().is_script());
let mut roots = &mut *self.roots.get();
match roots.iter().rposition(|r| *r == tracked_reflector) {
Some(idx) => {
roots.remove(idx);
},
None => panic!("Can't remove a root that was never rooted!"),
}
}
}
/// SM Callback that traces the rooted reflectors
pub unsafe fn
|
(tracer: *mut JSTracer) {
debug!("tracing stack roots");
STACK_ROOTS.with(|ref collection| {
let RootCollectionPtr(collection) = collection.get().unwrap();
let collection = &*(*collection).roots.get();
for root in collection {
trace_reflector(tracer, "on stack", &**root);
}
});
}
/// A rooted reference to a DOM object.
///
/// The JS value is pinned for the duration of this object's lifetime; roots
/// are additive, so this object's destruction will not invalidate other roots
/// for the same JS value. `Root`s cannot outlive the associated
/// `RootCollection` object.
#[allow_unrooted_interior]
pub struct Root<T: DomObject> {
/// Reference to rooted value that must not outlive this container
ptr: NonZero<*const T>,
/// List that ensures correct dynamic root ordering
root_list: *const RootCollection,
}
impl<T: Castable> Root<T> {
/// Cast a DOM object root upwards to one of the interfaces it derives from.
pub fn upcast<U>(root: Root<T>) -> Root<U>
where U: Castable,
T: DerivedFrom<U>
{
unsafe { mem::transmute(root) }
}
/// Cast a DOM object root downwards to one of the interfaces it might implement.
pub fn downcast<U>(root: Root<T>) -> Option<Root<U>>
where U: DerivedFrom<T>
{
if root.is::<U>() {
Some(unsafe { mem::transmute(root) })
} else {
None
}
}
}
impl<T: DomObject> Root<T> {
/// Create a new stack-bounded root for the provided JS-owned value.
/// It cannot outlive its associated `RootCollection`, and it gives
/// out references which cannot outlive this new `Root`.
pub fn new(unrooted: NonZero<*const T>) -> Root<T> {
debug_assert!(thread_state::get().is_script());
STACK_ROOTS.with(|ref collection| {
let RootCollectionPtr(collection) = collection.get().unwrap();
unsafe { (*collection).root(&*(**unrooted).reflector()) }
Root {
ptr: unrooted,
root_list: collection,
}
})
}
/// Generate a new root from a reference
pub fn from_ref(unrooted: &T) -> Root<T> {
Root::new(unsafe { NonZero::new(&*unrooted) })
}
}
impl<'root, T: DomObject + 'root> RootedReference<'root> for Root<T> {
type Ref = &'root T;
fn r(&'root self) -> &'root T {
self
}
}
impl<T: DomObject> Deref for Root<T> {
type Target = T;
fn deref(&self) -> &T {
debug_assert!(thread_state::get().is_script());
unsafe { &**self.ptr.deref() }
}
}
impl<T: DomObject + HeapSizeOf> HeapSizeOf for Root<T> {
fn heap_size_of_children(&self) -> usize {
(**self).heap_size_of_children()
}
}
impl<T: DomObject> PartialEq for Root<T> {
fn eq(&self, other: &Self) -> bool {
self.ptr == other.ptr
}
}
impl<T: DomObject> Clone for Root<T> {
fn clone(&self) -> Root<T> {
Root::from_ref(&*self)
}
}
impl<T: DomObject> Drop for Root<T> {
fn drop(&mut self) {
unsafe {
(*self.root_list).unroot(self.reflector());
}
}
}
unsafe impl<T: DomObject> JSTraceable for Root<T> {
unsafe fn trace(&self, _: *mut JSTracer) {
// Already traced.
}
}
|
trace_roots
|
identifier_name
|
lib.rs
|
#![feature(quote, plugin_registrar, rustc_private)]
#![feature(std_misc)]
extern crate syntax;
extern crate rustc;
extern crate tempdir;
use std::collections::HashMap;
use std::io;
use std::io::prelude::*;
use std::fs::{self, File};
use std::path::{PathBuf, Path};
use std::process;
use syntax::ast;
use syntax::codemap;
use syntax::ext::base::{self, ExtCtxt, MacResult};
use syntax::fold::Folder;
use syntax::parse::{self, token};
use rustc::plugin::Registry;
use tempdir::TempDir;
mod parser_any_macro;
pub fn run_mixin_command(cx: &ExtCtxt, sp: codemap::Span,
plugin_name: &str, dir: &Path, binary: &str, args: &[String],
file: Option<&Path>) -> Result<process::Output, ()> {
let mut command = process::Command::new(&binary);
command.current_dir(dir)
.args(&args);
if let Some(file) = file {
command.arg(&file);
}
match command.output() {
Ok(o) => Ok(o),
Err(e) => {
let msg = if args.is_empty() {
format!("`{}!`: could not execute `{}`: {}", plugin_name, binary, e)
} else {
format!("`{}!`: could not execute `{}` with argument{} `{}`: {}",
plugin_name,
binary,
if args.len() == 1 { "" } else {"s"},
args.connect("`, `"),
e)
};
cx.span_err(sp, &msg);
Err(())
}
}
}
/// The options passed to the macro.
pub type Options = HashMap<String, Vec<(String, codemap::Span)>>;
pub enum Output {
Interpreted(process::Output),
Compiled(process::Output, String),
}
///
pub struct MixinExpander<F>
where F: Fn(&mut ExtCtxt, codemap::Span,
Options,
&Path,
&Path) -> Result<Output, ()>
{
name: String,
dir: TempDir,
expander: F
}
impl<F> MixinExpander<F>
where F:'static + Fn(&mut ExtCtxt, codemap::Span,
Options,
&Path,
&Path) -> Result<Output, ()>
{
pub fn
|
(reg: &Registry, name: String, expander: F)
-> Result<base::SyntaxExtension, ()> {
let dir = match TempDir::new(&format!("rustc_external_mixin_{}", name)) {
Ok(d) => d,
Err(e) => {
reg.sess.span_err(reg.krate_span,
&format!("`{}!`: could not create temporary directory: {}",
name,
e));
return Err(())
}
};
Ok(base::NormalTT(Box::new(MixinExpander {
name: name,
dir: dir,
expander: expander
}), None, false))
}
fn handle(&self, cx: &ExtCtxt, sp: codemap::Span,
output: Output) -> Result<Vec<u8>, ()> {
match output {
Output::Interpreted(out) => {
try!(check_errors_raw(cx, sp, &self.name, "code", &out));
Ok(out.stdout)
}
Output::Compiled(out, exe) => {
try!(check_errors_raw(cx, sp, &self.name, "compiler", &out));
let out = try!(run_mixin_command(cx, sp,
&self.name, self.dir.path(), &exe, &[], None));
try!(check_errors_raw(cx, sp, &self.name, "binary", &out));
Ok(out.stdout)
}
}
}
}
fn check_errors_raw(cx: &ExtCtxt,sp: codemap::Span,
name: &str,
kind: &str,
output: &process::Output) -> Result<(), ()> {
if!output.status.success() {
cx.span_err(sp,
&format!("`{}!`: the {} did not execute successfully: {}",
name, kind, output.status));
let msg = if output.stderr.is_empty() {
"there was no output on stderr".to_string()
} else {
format!("the {} emitted the following on stderr:\n{}",
kind, String::from_utf8_lossy(&output.stderr))
};
cx.parse_sess().span_diagnostic.fileline_note(sp, &msg);
return Err(())
} else if!output.stderr.is_empty() {
cx.span_warn(sp, &format!("`{}!`: the {} ran successfully, but had output on stderr",
name, kind));
let msg = format!("output:\n{}",
String::from_utf8_lossy(&output.stderr));
cx.parse_sess().span_diagnostic.fileline_note(sp, &msg);
}
Ok(())
}
impl<F> base::TTMacroExpander for MixinExpander<F>
where F:'static + Fn(&mut ExtCtxt, codemap::Span,
Options,
&Path,
&Path) -> Result<Output, ()>
{
fn expand<'cx>(&self,
cx: &'cx mut ExtCtxt,
sp: codemap::Span,
raw_tts: &[ast::TokenTree])
-> Box<MacResult+'cx>
{
macro_rules! mac_try {
($run: expr, $p: pat => $fmt_str: tt, $($fmt: tt)*) => {
match $run {
Ok(x) => x,
Err($p) => {
cx.span_err(sp,
&format!(concat!("`{}!`: ", $fmt_str), self.name, $($fmt)*));
return base::DummyResult::any(sp)
}
}
}
}
let (tts, option_tts): (_, &[_]) = match raw_tts.get(0) {
Some(&ast::TtDelimited(_, ref delim)) if delim.delim == token::Brace => {
(&raw_tts[1..], &delim.tts[..])
}
_ => (raw_tts, &[])
};
let options = match parse_options(cx, option_tts) {
Ok(o) => o,
Err(_) => return base::DummyResult::any(sp)
};
let code = match base::get_single_str_from_tts(cx, sp, tts,
&format!("`{}!`", self.name)) {
Some(c) => c,
None => return base::DummyResult::any(sp)
};
let lo = tts[0].get_span().lo;
let first_line = cx.codemap().lookup_char_pos(lo).line as u64;
let filename = cx.codemap().span_to_filename(sp);
let path = PathBuf::new(&filename);
let name = if path.is_absolute() {
Path::new(path.file_name().unwrap())
} else {
&*path
};
let code_file = self.dir.path().join(name);
mac_try! {
fs::create_dir_all(code_file.parent().unwrap()),
e => "could not create temporary directory: {}", e
}
let file = mac_try!(File::create(&code_file),
e => "could not create temporary file: {}", e);
let mut file = io::BufWriter::new(file);
mac_try!(io::copy(&mut io::repeat(b'\n').take(first_line - 1), &mut file),
e => "could not write output: {}", e);
mac_try!(file.write(code.as_bytes()),
e => "could not write output: {}", e);
mac_try!(file.flush(),
e => "could not flush output: {}", e);
drop(file);
let output = (self.expander)(cx, sp, options, self.dir.path(), &name)
.and_then(|o| self.handle(cx, sp, o));
let output = match output {
Ok(o) => o,
// handled internally...
Err(_) => return base::DummyResult::any(sp)
};
let emitted_code = mac_try!(String::from_utf8(output),
e => "emitted invalid UTF-8: {}", e);
let name = format!("<{}:{} {}!>", filename, first_line, self.name);
let parser = parse::new_parser_from_source_str(cx.parse_sess(),
cx.cfg(),
name,
emitted_code);
Box::new(parser_any_macro::ParserAnyMacro::new(parser))
}
}
fn parse_options(cx: &mut ExtCtxt, option_tts: &[ast::TokenTree]) -> Result<Options, ()> {
let mut error_occurred = false;
let mut options = HashMap::new();
let mut p = cx.new_parser_from_tts(option_tts);
while p.token!= token::Eof {
// <name> = "..."
let ident = p.parse_ident();
let ident_span = p.last_span;
let key = ident.as_str().to_string();
p.expect(&token::Eq);
let ret = cx.expander().fold_expr(p.parse_expr());
let span = codemap::mk_sp(ident_span.lo, ret.span.hi);
let val_opt = base::expr_to_string(cx, ret, "option must be a string literal")
.map(|(s, _)| s.to_string());
let val = match val_opt {
None => {
error_occurred = true;
while p.token!= token::Comma { p.bump() }
continue
}
Some(v) => v
};
options.entry(key)
.get()
.unwrap_or_else(|v| v.insert(vec![]))
.push((val, span));
if p.token == token::Eof { break }
p.expect(&token::Comma);
}
if error_occurred {
Err(())
} else {
Ok(options)
}
}
|
new
|
identifier_name
|
lib.rs
|
#![feature(quote, plugin_registrar, rustc_private)]
#![feature(std_misc)]
extern crate syntax;
extern crate rustc;
extern crate tempdir;
use std::collections::HashMap;
use std::io;
use std::io::prelude::*;
use std::fs::{self, File};
use std::path::{PathBuf, Path};
use std::process;
use syntax::ast;
use syntax::codemap;
use syntax::ext::base::{self, ExtCtxt, MacResult};
use syntax::fold::Folder;
use syntax::parse::{self, token};
use rustc::plugin::Registry;
use tempdir::TempDir;
mod parser_any_macro;
pub fn run_mixin_command(cx: &ExtCtxt, sp: codemap::Span,
plugin_name: &str, dir: &Path, binary: &str, args: &[String],
file: Option<&Path>) -> Result<process::Output, ()> {
let mut command = process::Command::new(&binary);
command.current_dir(dir)
.args(&args);
if let Some(file) = file {
command.arg(&file);
}
match command.output() {
Ok(o) => Ok(o),
Err(e) => {
let msg = if args.is_empty() {
format!("`{}!`: could not execute `{}`: {}", plugin_name, binary, e)
} else {
format!("`{}!`: could not execute `{}` with argument{} `{}`: {}",
plugin_name,
binary,
if args.len() == 1 { "" } else {"s"},
args.connect("`, `"),
e)
};
cx.span_err(sp, &msg);
Err(())
}
}
}
/// The options passed to the macro.
pub type Options = HashMap<String, Vec<(String, codemap::Span)>>;
pub enum Output {
Interpreted(process::Output),
Compiled(process::Output, String),
}
///
pub struct MixinExpander<F>
where F: Fn(&mut ExtCtxt, codemap::Span,
Options,
&Path,
&Path) -> Result<Output, ()>
{
name: String,
dir: TempDir,
expander: F
}
impl<F> MixinExpander<F>
where F:'static + Fn(&mut ExtCtxt, codemap::Span,
Options,
&Path,
&Path) -> Result<Output, ()>
{
pub fn new(reg: &Registry, name: String, expander: F)
-> Result<base::SyntaxExtension, ()> {
let dir = match TempDir::new(&format!("rustc_external_mixin_{}", name)) {
Ok(d) => d,
Err(e) => {
reg.sess.span_err(reg.krate_span,
&format!("`{}!`: could not create temporary directory: {}",
name,
e));
return Err(())
}
};
Ok(base::NormalTT(Box::new(MixinExpander {
name: name,
dir: dir,
expander: expander
}), None, false))
}
fn handle(&self, cx: &ExtCtxt, sp: codemap::Span,
output: Output) -> Result<Vec<u8>, ()> {
match output {
Output::Interpreted(out) => {
try!(check_errors_raw(cx, sp, &self.name, "code", &out));
Ok(out.stdout)
}
Output::Compiled(out, exe) => {
try!(check_errors_raw(cx, sp, &self.name, "compiler", &out));
let out = try!(run_mixin_command(cx, sp,
&self.name, self.dir.path(), &exe, &[], None));
try!(check_errors_raw(cx, sp, &self.name, "binary", &out));
Ok(out.stdout)
}
}
}
}
fn check_errors_raw(cx: &ExtCtxt,sp: codemap::Span,
name: &str,
kind: &str,
output: &process::Output) -> Result<(), ()> {
if!output.status.success() {
cx.span_err(sp,
&format!("`{}!`: the {} did not execute successfully: {}",
name, kind, output.status));
let msg = if output.stderr.is_empty() {
"there was no output on stderr".to_string()
} else {
format!("the {} emitted the following on stderr:\n{}",
kind, String::from_utf8_lossy(&output.stderr))
};
cx.parse_sess().span_diagnostic.fileline_note(sp, &msg);
return Err(())
} else if!output.stderr.is_empty() {
cx.span_warn(sp, &format!("`{}!`: the {} ran successfully, but had output on stderr",
name, kind));
let msg = format!("output:\n{}",
String::from_utf8_lossy(&output.stderr));
cx.parse_sess().span_diagnostic.fileline_note(sp, &msg);
}
Ok(())
}
impl<F> base::TTMacroExpander for MixinExpander<F>
where F:'static + Fn(&mut ExtCtxt, codemap::Span,
Options,
|
&Path,
&Path) -> Result<Output, ()>
{
fn expand<'cx>(&self,
cx: &'cx mut ExtCtxt,
sp: codemap::Span,
raw_tts: &[ast::TokenTree])
-> Box<MacResult+'cx>
{
macro_rules! mac_try {
($run: expr, $p: pat => $fmt_str: tt, $($fmt: tt)*) => {
match $run {
Ok(x) => x,
Err($p) => {
cx.span_err(sp,
&format!(concat!("`{}!`: ", $fmt_str), self.name, $($fmt)*));
return base::DummyResult::any(sp)
}
}
}
}
let (tts, option_tts): (_, &[_]) = match raw_tts.get(0) {
Some(&ast::TtDelimited(_, ref delim)) if delim.delim == token::Brace => {
(&raw_tts[1..], &delim.tts[..])
}
_ => (raw_tts, &[])
};
let options = match parse_options(cx, option_tts) {
Ok(o) => o,
Err(_) => return base::DummyResult::any(sp)
};
let code = match base::get_single_str_from_tts(cx, sp, tts,
&format!("`{}!`", self.name)) {
Some(c) => c,
None => return base::DummyResult::any(sp)
};
let lo = tts[0].get_span().lo;
let first_line = cx.codemap().lookup_char_pos(lo).line as u64;
let filename = cx.codemap().span_to_filename(sp);
let path = PathBuf::new(&filename);
let name = if path.is_absolute() {
Path::new(path.file_name().unwrap())
} else {
&*path
};
let code_file = self.dir.path().join(name);
mac_try! {
fs::create_dir_all(code_file.parent().unwrap()),
e => "could not create temporary directory: {}", e
}
let file = mac_try!(File::create(&code_file),
e => "could not create temporary file: {}", e);
let mut file = io::BufWriter::new(file);
mac_try!(io::copy(&mut io::repeat(b'\n').take(first_line - 1), &mut file),
e => "could not write output: {}", e);
mac_try!(file.write(code.as_bytes()),
e => "could not write output: {}", e);
mac_try!(file.flush(),
e => "could not flush output: {}", e);
drop(file);
let output = (self.expander)(cx, sp, options, self.dir.path(), &name)
.and_then(|o| self.handle(cx, sp, o));
let output = match output {
Ok(o) => o,
// handled internally...
Err(_) => return base::DummyResult::any(sp)
};
let emitted_code = mac_try!(String::from_utf8(output),
e => "emitted invalid UTF-8: {}", e);
let name = format!("<{}:{} {}!>", filename, first_line, self.name);
let parser = parse::new_parser_from_source_str(cx.parse_sess(),
cx.cfg(),
name,
emitted_code);
Box::new(parser_any_macro::ParserAnyMacro::new(parser))
}
}
fn parse_options(cx: &mut ExtCtxt, option_tts: &[ast::TokenTree]) -> Result<Options, ()> {
let mut error_occurred = false;
let mut options = HashMap::new();
let mut p = cx.new_parser_from_tts(option_tts);
while p.token!= token::Eof {
// <name> = "..."
let ident = p.parse_ident();
let ident_span = p.last_span;
let key = ident.as_str().to_string();
p.expect(&token::Eq);
let ret = cx.expander().fold_expr(p.parse_expr());
let span = codemap::mk_sp(ident_span.lo, ret.span.hi);
let val_opt = base::expr_to_string(cx, ret, "option must be a string literal")
.map(|(s, _)| s.to_string());
let val = match val_opt {
None => {
error_occurred = true;
while p.token!= token::Comma { p.bump() }
continue
}
Some(v) => v
};
options.entry(key)
.get()
.unwrap_or_else(|v| v.insert(vec![]))
.push((val, span));
if p.token == token::Eof { break }
p.expect(&token::Comma);
}
if error_occurred {
Err(())
} else {
Ok(options)
}
}
|
random_line_split
|
|
lib.rs
|
#![feature(quote, plugin_registrar, rustc_private)]
#![feature(std_misc)]
extern crate syntax;
extern crate rustc;
extern crate tempdir;
use std::collections::HashMap;
use std::io;
use std::io::prelude::*;
use std::fs::{self, File};
use std::path::{PathBuf, Path};
use std::process;
use syntax::ast;
use syntax::codemap;
use syntax::ext::base::{self, ExtCtxt, MacResult};
use syntax::fold::Folder;
use syntax::parse::{self, token};
use rustc::plugin::Registry;
use tempdir::TempDir;
mod parser_any_macro;
pub fn run_mixin_command(cx: &ExtCtxt, sp: codemap::Span,
plugin_name: &str, dir: &Path, binary: &str, args: &[String],
file: Option<&Path>) -> Result<process::Output, ()>
|
args.connect("`, `"),
e)
};
cx.span_err(sp, &msg);
Err(())
}
}
}
/// The options passed to the macro.
pub type Options = HashMap<String, Vec<(String, codemap::Span)>>;
pub enum Output {
Interpreted(process::Output),
Compiled(process::Output, String),
}
///
pub struct MixinExpander<F>
where F: Fn(&mut ExtCtxt, codemap::Span,
Options,
&Path,
&Path) -> Result<Output, ()>
{
name: String,
dir: TempDir,
expander: F
}
impl<F> MixinExpander<F>
where F:'static + Fn(&mut ExtCtxt, codemap::Span,
Options,
&Path,
&Path) -> Result<Output, ()>
{
pub fn new(reg: &Registry, name: String, expander: F)
-> Result<base::SyntaxExtension, ()> {
let dir = match TempDir::new(&format!("rustc_external_mixin_{}", name)) {
Ok(d) => d,
Err(e) => {
reg.sess.span_err(reg.krate_span,
&format!("`{}!`: could not create temporary directory: {}",
name,
e));
return Err(())
}
};
Ok(base::NormalTT(Box::new(MixinExpander {
name: name,
dir: dir,
expander: expander
}), None, false))
}
fn handle(&self, cx: &ExtCtxt, sp: codemap::Span,
output: Output) -> Result<Vec<u8>, ()> {
match output {
Output::Interpreted(out) => {
try!(check_errors_raw(cx, sp, &self.name, "code", &out));
Ok(out.stdout)
}
Output::Compiled(out, exe) => {
try!(check_errors_raw(cx, sp, &self.name, "compiler", &out));
let out = try!(run_mixin_command(cx, sp,
&self.name, self.dir.path(), &exe, &[], None));
try!(check_errors_raw(cx, sp, &self.name, "binary", &out));
Ok(out.stdout)
}
}
}
}
fn check_errors_raw(cx: &ExtCtxt,sp: codemap::Span,
name: &str,
kind: &str,
output: &process::Output) -> Result<(), ()> {
if!output.status.success() {
cx.span_err(sp,
&format!("`{}!`: the {} did not execute successfully: {}",
name, kind, output.status));
let msg = if output.stderr.is_empty() {
"there was no output on stderr".to_string()
} else {
format!("the {} emitted the following on stderr:\n{}",
kind, String::from_utf8_lossy(&output.stderr))
};
cx.parse_sess().span_diagnostic.fileline_note(sp, &msg);
return Err(())
} else if!output.stderr.is_empty() {
cx.span_warn(sp, &format!("`{}!`: the {} ran successfully, but had output on stderr",
name, kind));
let msg = format!("output:\n{}",
String::from_utf8_lossy(&output.stderr));
cx.parse_sess().span_diagnostic.fileline_note(sp, &msg);
}
Ok(())
}
impl<F> base::TTMacroExpander for MixinExpander<F>
where F:'static + Fn(&mut ExtCtxt, codemap::Span,
Options,
&Path,
&Path) -> Result<Output, ()>
{
fn expand<'cx>(&self,
cx: &'cx mut ExtCtxt,
sp: codemap::Span,
raw_tts: &[ast::TokenTree])
-> Box<MacResult+'cx>
{
macro_rules! mac_try {
($run: expr, $p: pat => $fmt_str: tt, $($fmt: tt)*) => {
match $run {
Ok(x) => x,
Err($p) => {
cx.span_err(sp,
&format!(concat!("`{}!`: ", $fmt_str), self.name, $($fmt)*));
return base::DummyResult::any(sp)
}
}
}
}
let (tts, option_tts): (_, &[_]) = match raw_tts.get(0) {
Some(&ast::TtDelimited(_, ref delim)) if delim.delim == token::Brace => {
(&raw_tts[1..], &delim.tts[..])
}
_ => (raw_tts, &[])
};
let options = match parse_options(cx, option_tts) {
Ok(o) => o,
Err(_) => return base::DummyResult::any(sp)
};
let code = match base::get_single_str_from_tts(cx, sp, tts,
&format!("`{}!`", self.name)) {
Some(c) => c,
None => return base::DummyResult::any(sp)
};
let lo = tts[0].get_span().lo;
let first_line = cx.codemap().lookup_char_pos(lo).line as u64;
let filename = cx.codemap().span_to_filename(sp);
let path = PathBuf::new(&filename);
let name = if path.is_absolute() {
Path::new(path.file_name().unwrap())
} else {
&*path
};
let code_file = self.dir.path().join(name);
mac_try! {
fs::create_dir_all(code_file.parent().unwrap()),
e => "could not create temporary directory: {}", e
}
let file = mac_try!(File::create(&code_file),
e => "could not create temporary file: {}", e);
let mut file = io::BufWriter::new(file);
mac_try!(io::copy(&mut io::repeat(b'\n').take(first_line - 1), &mut file),
e => "could not write output: {}", e);
mac_try!(file.write(code.as_bytes()),
e => "could not write output: {}", e);
mac_try!(file.flush(),
e => "could not flush output: {}", e);
drop(file);
let output = (self.expander)(cx, sp, options, self.dir.path(), &name)
.and_then(|o| self.handle(cx, sp, o));
let output = match output {
Ok(o) => o,
// handled internally...
Err(_) => return base::DummyResult::any(sp)
};
let emitted_code = mac_try!(String::from_utf8(output),
e => "emitted invalid UTF-8: {}", e);
let name = format!("<{}:{} {}!>", filename, first_line, self.name);
let parser = parse::new_parser_from_source_str(cx.parse_sess(),
cx.cfg(),
name,
emitted_code);
Box::new(parser_any_macro::ParserAnyMacro::new(parser))
}
}
fn parse_options(cx: &mut ExtCtxt, option_tts: &[ast::TokenTree]) -> Result<Options, ()> {
let mut error_occurred = false;
let mut options = HashMap::new();
let mut p = cx.new_parser_from_tts(option_tts);
while p.token!= token::Eof {
// <name> = "..."
let ident = p.parse_ident();
let ident_span = p.last_span;
let key = ident.as_str().to_string();
p.expect(&token::Eq);
let ret = cx.expander().fold_expr(p.parse_expr());
let span = codemap::mk_sp(ident_span.lo, ret.span.hi);
let val_opt = base::expr_to_string(cx, ret, "option must be a string literal")
.map(|(s, _)| s.to_string());
let val = match val_opt {
None => {
error_occurred = true;
while p.token!= token::Comma { p.bump() }
continue
}
Some(v) => v
};
options.entry(key)
.get()
.unwrap_or_else(|v| v.insert(vec![]))
.push((val, span));
if p.token == token::Eof { break }
p.expect(&token::Comma);
}
if error_occurred {
Err(())
} else {
Ok(options)
}
}
|
{
let mut command = process::Command::new(&binary);
command.current_dir(dir)
.args(&args);
if let Some(file) = file {
command.arg(&file);
}
match command.output() {
Ok(o) => Ok(o),
Err(e) => {
let msg = if args.is_empty() {
format!("`{}!`: could not execute `{}`: {}", plugin_name, binary, e)
} else {
format!("`{}!`: could not execute `{}` with argument{} `{}`: {}",
plugin_name,
binary,
if args.len() == 1 { "" } else {"s"},
|
identifier_body
|
lib.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use anyhow::{anyhow, Error};
use async_trait::async_trait;
use bookmarks::BookmarkTransactionError;
use context::CoreContext;
use mononoke_types::{hash::GitSha1, BonsaiChangesetMut, ChangesetId};
use pushrebase_hook::{
PushrebaseCommitHook, PushrebaseHook, PushrebaseTransactionHook, RebasedChangesets,
};
use sql::Transaction;
use std::{collections::HashMap, sync::Arc};
use bonsai_git_mapping::{
extract_git_sha1_from_bonsai_extra, BonsaiGitMapping, BonsaiGitMappingEntry,
};
#[cfg(test)]
mod test;
#[derive(Clone)]
pub struct GitMappingPushrebaseHook {
bonsai_git_mapping: Arc<dyn BonsaiGitMapping>,
}
impl GitMappingPushrebaseHook {
pub fn new(bonsai_git_mapping: Arc<dyn BonsaiGitMapping>) -> Box<dyn PushrebaseHook> {
Box::new(Self { bonsai_git_mapping })
}
}
#[async_trait]
impl PushrebaseHook for GitMappingPushrebaseHook {
async fn prepushrebase(&self) -> Result<Box<dyn PushrebaseCommitHook>, Error> {
let hook = Box::new(GitMappingCommitHook {
bonsai_git_mapping: self.bonsai_git_mapping.clone(),
assignments: HashMap::new(),
}) as Box<dyn PushrebaseCommitHook>;
Ok(hook)
}
}
struct GitMappingCommitHook {
bonsai_git_mapping: Arc<dyn BonsaiGitMapping>,
assignments: HashMap<ChangesetId, GitSha1>,
}
#[async_trait]
impl PushrebaseCommitHook for GitMappingCommitHook {
fn post_rebase_changeset(
&mut self,
bcs_old: ChangesetId,
bcs_new: &mut BonsaiChangesetMut,
) -> Result<(), Error>
|
async fn into_transaction_hook(
self: Box<Self>,
_ctx: &CoreContext,
rebased: &RebasedChangesets,
) -> Result<Box<dyn PushrebaseTransactionHook>, Error> {
// Let's tie assigned git hashes to rebased Bonsai changesets:
let entries = self
.assignments
.iter()
.map(|(cs_id, git_sha1)| {
let replacement_bcs_id = rebased
.get(cs_id)
.ok_or_else(|| {
let e = format!(
"Commit was assigned a git hash, but is not found in rebased set: {}",
cs_id
);
Error::msg(e)
})?
.0;
Ok(BonsaiGitMappingEntry::new(*git_sha1, replacement_bcs_id))
})
.collect::<Result<Vec<_>, Error>>()?;
// NOTE: This check shouldn't be necessary as long as pushrebase hooks are bug-free, but
// since they're a new addition, let's be conservative.
if rebased.len()!= self.assignments.len() {
return Err(anyhow!(
"Git mapping rebased set ({}) and assignments ({}) have different lengths!",
rebased.len(),
self.assignments.len(),
));
}
Ok(Box::new(GitMappingTransactionHook {
bonsai_git_mapping: self.bonsai_git_mapping,
entries,
}) as Box<dyn PushrebaseTransactionHook>)
}
}
struct GitMappingTransactionHook {
bonsai_git_mapping: Arc<dyn BonsaiGitMapping>,
entries: Vec<BonsaiGitMappingEntry>,
}
#[async_trait]
impl PushrebaseTransactionHook for GitMappingTransactionHook {
async fn populate_transaction(
&self,
ctx: &CoreContext,
txn: Transaction,
) -> Result<Transaction, BookmarkTransactionError> {
let txn = self
.bonsai_git_mapping
.bulk_add_git_mapping_in_transaction(ctx, &self.entries[..], txn)
.await
.map_err(|e| BookmarkTransactionError::Other(e.into()))?;
Ok(txn)
}
}
|
{
let git_sha1 = extract_git_sha1_from_bonsai_extra(
bcs_new
.extra
.iter()
.map(|(k, v)| (k.as_str(), v.as_slice())),
)?;
if let Some(git_sha1) = git_sha1 {
self.assignments.insert(bcs_old, git_sha1);
}
Ok(())
}
|
identifier_body
|
lib.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use anyhow::{anyhow, Error};
use async_trait::async_trait;
use bookmarks::BookmarkTransactionError;
use context::CoreContext;
use mononoke_types::{hash::GitSha1, BonsaiChangesetMut, ChangesetId};
use pushrebase_hook::{
PushrebaseCommitHook, PushrebaseHook, PushrebaseTransactionHook, RebasedChangesets,
};
use sql::Transaction;
use std::{collections::HashMap, sync::Arc};
use bonsai_git_mapping::{
extract_git_sha1_from_bonsai_extra, BonsaiGitMapping, BonsaiGitMappingEntry,
};
#[cfg(test)]
mod test;
#[derive(Clone)]
pub struct GitMappingPushrebaseHook {
bonsai_git_mapping: Arc<dyn BonsaiGitMapping>,
}
impl GitMappingPushrebaseHook {
pub fn new(bonsai_git_mapping: Arc<dyn BonsaiGitMapping>) -> Box<dyn PushrebaseHook> {
Box::new(Self { bonsai_git_mapping })
}
}
#[async_trait]
impl PushrebaseHook for GitMappingPushrebaseHook {
async fn
|
(&self) -> Result<Box<dyn PushrebaseCommitHook>, Error> {
let hook = Box::new(GitMappingCommitHook {
bonsai_git_mapping: self.bonsai_git_mapping.clone(),
assignments: HashMap::new(),
}) as Box<dyn PushrebaseCommitHook>;
Ok(hook)
}
}
struct GitMappingCommitHook {
bonsai_git_mapping: Arc<dyn BonsaiGitMapping>,
assignments: HashMap<ChangesetId, GitSha1>,
}
#[async_trait]
impl PushrebaseCommitHook for GitMappingCommitHook {
fn post_rebase_changeset(
&mut self,
bcs_old: ChangesetId,
bcs_new: &mut BonsaiChangesetMut,
) -> Result<(), Error> {
let git_sha1 = extract_git_sha1_from_bonsai_extra(
bcs_new
.extra
.iter()
.map(|(k, v)| (k.as_str(), v.as_slice())),
)?;
if let Some(git_sha1) = git_sha1 {
self.assignments.insert(bcs_old, git_sha1);
}
Ok(())
}
async fn into_transaction_hook(
self: Box<Self>,
_ctx: &CoreContext,
rebased: &RebasedChangesets,
) -> Result<Box<dyn PushrebaseTransactionHook>, Error> {
// Let's tie assigned git hashes to rebased Bonsai changesets:
let entries = self
.assignments
.iter()
.map(|(cs_id, git_sha1)| {
let replacement_bcs_id = rebased
.get(cs_id)
.ok_or_else(|| {
let e = format!(
"Commit was assigned a git hash, but is not found in rebased set: {}",
cs_id
);
Error::msg(e)
})?
.0;
Ok(BonsaiGitMappingEntry::new(*git_sha1, replacement_bcs_id))
})
.collect::<Result<Vec<_>, Error>>()?;
// NOTE: This check shouldn't be necessary as long as pushrebase hooks are bug-free, but
// since they're a new addition, let's be conservative.
if rebased.len()!= self.assignments.len() {
return Err(anyhow!(
"Git mapping rebased set ({}) and assignments ({}) have different lengths!",
rebased.len(),
self.assignments.len(),
));
}
Ok(Box::new(GitMappingTransactionHook {
bonsai_git_mapping: self.bonsai_git_mapping,
entries,
}) as Box<dyn PushrebaseTransactionHook>)
}
}
struct GitMappingTransactionHook {
bonsai_git_mapping: Arc<dyn BonsaiGitMapping>,
entries: Vec<BonsaiGitMappingEntry>,
}
#[async_trait]
impl PushrebaseTransactionHook for GitMappingTransactionHook {
async fn populate_transaction(
&self,
ctx: &CoreContext,
txn: Transaction,
) -> Result<Transaction, BookmarkTransactionError> {
let txn = self
.bonsai_git_mapping
.bulk_add_git_mapping_in_transaction(ctx, &self.entries[..], txn)
.await
.map_err(|e| BookmarkTransactionError::Other(e.into()))?;
Ok(txn)
}
}
|
prepushrebase
|
identifier_name
|
lib.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use anyhow::{anyhow, Error};
use async_trait::async_trait;
use bookmarks::BookmarkTransactionError;
use context::CoreContext;
use mononoke_types::{hash::GitSha1, BonsaiChangesetMut, ChangesetId};
use pushrebase_hook::{
PushrebaseCommitHook, PushrebaseHook, PushrebaseTransactionHook, RebasedChangesets,
};
use sql::Transaction;
use std::{collections::HashMap, sync::Arc};
use bonsai_git_mapping::{
extract_git_sha1_from_bonsai_extra, BonsaiGitMapping, BonsaiGitMappingEntry,
};
#[cfg(test)]
mod test;
#[derive(Clone)]
pub struct GitMappingPushrebaseHook {
bonsai_git_mapping: Arc<dyn BonsaiGitMapping>,
}
impl GitMappingPushrebaseHook {
pub fn new(bonsai_git_mapping: Arc<dyn BonsaiGitMapping>) -> Box<dyn PushrebaseHook> {
Box::new(Self { bonsai_git_mapping })
}
}
#[async_trait]
impl PushrebaseHook for GitMappingPushrebaseHook {
async fn prepushrebase(&self) -> Result<Box<dyn PushrebaseCommitHook>, Error> {
let hook = Box::new(GitMappingCommitHook {
bonsai_git_mapping: self.bonsai_git_mapping.clone(),
assignments: HashMap::new(),
}) as Box<dyn PushrebaseCommitHook>;
Ok(hook)
}
}
struct GitMappingCommitHook {
bonsai_git_mapping: Arc<dyn BonsaiGitMapping>,
assignments: HashMap<ChangesetId, GitSha1>,
}
#[async_trait]
impl PushrebaseCommitHook for GitMappingCommitHook {
fn post_rebase_changeset(
&mut self,
bcs_old: ChangesetId,
bcs_new: &mut BonsaiChangesetMut,
) -> Result<(), Error> {
let git_sha1 = extract_git_sha1_from_bonsai_extra(
bcs_new
.extra
.iter()
.map(|(k, v)| (k.as_str(), v.as_slice())),
)?;
if let Some(git_sha1) = git_sha1 {
self.assignments.insert(bcs_old, git_sha1);
}
Ok(())
}
async fn into_transaction_hook(
self: Box<Self>,
_ctx: &CoreContext,
rebased: &RebasedChangesets,
) -> Result<Box<dyn PushrebaseTransactionHook>, Error> {
// Let's tie assigned git hashes to rebased Bonsai changesets:
let entries = self
.assignments
.iter()
.map(|(cs_id, git_sha1)| {
let replacement_bcs_id = rebased
.get(cs_id)
.ok_or_else(|| {
let e = format!(
"Commit was assigned a git hash, but is not found in rebased set: {}",
cs_id
);
Error::msg(e)
})?
.0;
Ok(BonsaiGitMappingEntry::new(*git_sha1, replacement_bcs_id))
})
.collect::<Result<Vec<_>, Error>>()?;
// NOTE: This check shouldn't be necessary as long as pushrebase hooks are bug-free, but
// since they're a new addition, let's be conservative.
if rebased.len()!= self.assignments.len() {
return Err(anyhow!(
"Git mapping rebased set ({}) and assignments ({}) have different lengths!",
rebased.len(),
self.assignments.len(),
));
}
Ok(Box::new(GitMappingTransactionHook {
bonsai_git_mapping: self.bonsai_git_mapping,
entries,
}) as Box<dyn PushrebaseTransactionHook>)
}
}
struct GitMappingTransactionHook {
bonsai_git_mapping: Arc<dyn BonsaiGitMapping>,
entries: Vec<BonsaiGitMappingEntry>,
}
#[async_trait]
impl PushrebaseTransactionHook for GitMappingTransactionHook {
async fn populate_transaction(
&self,
ctx: &CoreContext,
txn: Transaction,
) -> Result<Transaction, BookmarkTransactionError> {
let txn = self
.bonsai_git_mapping
.bulk_add_git_mapping_in_transaction(ctx, &self.entries[..], txn)
|
}
}
|
.await
.map_err(|e| BookmarkTransactionError::Other(e.into()))?;
Ok(txn)
|
random_line_split
|
lib.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use anyhow::{anyhow, Error};
use async_trait::async_trait;
use bookmarks::BookmarkTransactionError;
use context::CoreContext;
use mononoke_types::{hash::GitSha1, BonsaiChangesetMut, ChangesetId};
use pushrebase_hook::{
PushrebaseCommitHook, PushrebaseHook, PushrebaseTransactionHook, RebasedChangesets,
};
use sql::Transaction;
use std::{collections::HashMap, sync::Arc};
use bonsai_git_mapping::{
extract_git_sha1_from_bonsai_extra, BonsaiGitMapping, BonsaiGitMappingEntry,
};
#[cfg(test)]
mod test;
#[derive(Clone)]
pub struct GitMappingPushrebaseHook {
bonsai_git_mapping: Arc<dyn BonsaiGitMapping>,
}
impl GitMappingPushrebaseHook {
pub fn new(bonsai_git_mapping: Arc<dyn BonsaiGitMapping>) -> Box<dyn PushrebaseHook> {
Box::new(Self { bonsai_git_mapping })
}
}
#[async_trait]
impl PushrebaseHook for GitMappingPushrebaseHook {
async fn prepushrebase(&self) -> Result<Box<dyn PushrebaseCommitHook>, Error> {
let hook = Box::new(GitMappingCommitHook {
bonsai_git_mapping: self.bonsai_git_mapping.clone(),
assignments: HashMap::new(),
}) as Box<dyn PushrebaseCommitHook>;
Ok(hook)
}
}
struct GitMappingCommitHook {
bonsai_git_mapping: Arc<dyn BonsaiGitMapping>,
assignments: HashMap<ChangesetId, GitSha1>,
}
#[async_trait]
impl PushrebaseCommitHook for GitMappingCommitHook {
fn post_rebase_changeset(
&mut self,
bcs_old: ChangesetId,
bcs_new: &mut BonsaiChangesetMut,
) -> Result<(), Error> {
let git_sha1 = extract_git_sha1_from_bonsai_extra(
bcs_new
.extra
.iter()
.map(|(k, v)| (k.as_str(), v.as_slice())),
)?;
if let Some(git_sha1) = git_sha1 {
self.assignments.insert(bcs_old, git_sha1);
}
Ok(())
}
async fn into_transaction_hook(
self: Box<Self>,
_ctx: &CoreContext,
rebased: &RebasedChangesets,
) -> Result<Box<dyn PushrebaseTransactionHook>, Error> {
// Let's tie assigned git hashes to rebased Bonsai changesets:
let entries = self
.assignments
.iter()
.map(|(cs_id, git_sha1)| {
let replacement_bcs_id = rebased
.get(cs_id)
.ok_or_else(|| {
let e = format!(
"Commit was assigned a git hash, but is not found in rebased set: {}",
cs_id
);
Error::msg(e)
})?
.0;
Ok(BonsaiGitMappingEntry::new(*git_sha1, replacement_bcs_id))
})
.collect::<Result<Vec<_>, Error>>()?;
// NOTE: This check shouldn't be necessary as long as pushrebase hooks are bug-free, but
// since they're a new addition, let's be conservative.
if rebased.len()!= self.assignments.len()
|
Ok(Box::new(GitMappingTransactionHook {
bonsai_git_mapping: self.bonsai_git_mapping,
entries,
}) as Box<dyn PushrebaseTransactionHook>)
}
}
struct GitMappingTransactionHook {
bonsai_git_mapping: Arc<dyn BonsaiGitMapping>,
entries: Vec<BonsaiGitMappingEntry>,
}
#[async_trait]
impl PushrebaseTransactionHook for GitMappingTransactionHook {
async fn populate_transaction(
&self,
ctx: &CoreContext,
txn: Transaction,
) -> Result<Transaction, BookmarkTransactionError> {
let txn = self
.bonsai_git_mapping
.bulk_add_git_mapping_in_transaction(ctx, &self.entries[..], txn)
.await
.map_err(|e| BookmarkTransactionError::Other(e.into()))?;
Ok(txn)
}
}
|
{
return Err(anyhow!(
"Git mapping rebased set ({}) and assignments ({}) have different lengths!",
rebased.len(),
self.assignments.len(),
));
}
|
conditional_block
|
test_wait.rs
|
use nix::unistd::*;
use nix::unistd::ForkResult::*;
use nix::sys::signal::*;
use nix::sys::wait::*;
use libc::exit;
#[test]
fn test_wait_signal()
|
#[test]
fn test_wait_exit() {
match fork() {
Ok(Child) => unsafe { exit(12); },
Ok(Parent { child }) => {
assert_eq!(waitpid(child, None), Ok(WaitStatus::Exited(child, 12)));
},
// panic, fork should never fail unless there is a serious problem with the OS
Err(_) => panic!("Error: Fork Failed")
}
}
|
{
match fork() {
Ok(Child) => pause().unwrap_or(()),
Ok(Parent { child }) => {
kill(child, SIGKILL).ok().expect("Error: Kill Failed");
assert_eq!(waitpid(child, None), Ok(WaitStatus::Signaled(child, SIGKILL, false)));
},
// panic, fork should never fail unless there is a serious problem with the OS
Err(_) => panic!("Error: Fork Failed")
}
}
|
identifier_body
|
test_wait.rs
|
use nix::unistd::*;
use nix::unistd::ForkResult::*;
use nix::sys::signal::*;
use nix::sys::wait::*;
use libc::exit;
#[test]
fn
|
() {
match fork() {
Ok(Child) => pause().unwrap_or(()),
Ok(Parent { child }) => {
kill(child, SIGKILL).ok().expect("Error: Kill Failed");
assert_eq!(waitpid(child, None), Ok(WaitStatus::Signaled(child, SIGKILL, false)));
},
// panic, fork should never fail unless there is a serious problem with the OS
Err(_) => panic!("Error: Fork Failed")
}
}
#[test]
fn test_wait_exit() {
match fork() {
Ok(Child) => unsafe { exit(12); },
Ok(Parent { child }) => {
assert_eq!(waitpid(child, None), Ok(WaitStatus::Exited(child, 12)));
},
// panic, fork should never fail unless there is a serious problem with the OS
Err(_) => panic!("Error: Fork Failed")
}
}
|
test_wait_signal
|
identifier_name
|
test_wait.rs
|
use nix::unistd::*;
use nix::unistd::ForkResult::*;
use nix::sys::signal::*;
use nix::sys::wait::*;
use libc::exit;
#[test]
fn test_wait_signal() {
match fork() {
Ok(Child) => pause().unwrap_or(()),
Ok(Parent { child }) => {
kill(child, SIGKILL).ok().expect("Error: Kill Failed");
assert_eq!(waitpid(child, None), Ok(WaitStatus::Signaled(child, SIGKILL, false)));
},
// panic, fork should never fail unless there is a serious problem with the OS
Err(_) => panic!("Error: Fork Failed")
}
}
#[test]
fn test_wait_exit() {
match fork() {
|
assert_eq!(waitpid(child, None), Ok(WaitStatus::Exited(child, 12)));
},
// panic, fork should never fail unless there is a serious problem with the OS
Err(_) => panic!("Error: Fork Failed")
}
}
|
Ok(Child) => unsafe { exit(12); },
Ok(Parent { child }) => {
|
random_line_split
|
baps3-load.rs
|
#![feature(plugin)]
extern crate baps3_protocol;
#[macro_use] extern crate baps3_cli;
extern crate "rustc-serialize" as rustc_serialize;
extern crate docopt;
#[plugin] #[no_link] extern crate docopt_macros;
use std::borrow::ToOwned;
use std::os;
use std::path;
use baps3_cli::{ Baps3, Baps3Error, Baps3Result, verbose_logger };
use baps3_protocol::proto::Message;
docopt!(Args, "
Loads a file into a BAPS3 server.
Usage:
baps3-load -h
baps3-load [-pv] [-t <target>] <file>
Options:
-h, --help Show this message.
-p, --play If set, play the file upon loading.
-v, --verbose Prints a trail of miscellaneous information
about the action.
-t, --target <target> The target BAPS3 server (host:port).
[Default: localhost:1350]
");
fn load(Args { arg_file,
flag_play,
flag_target,
flag_verbose,.. }: Args) -> Baps3Result<()> {
let ap = try!(to_absolute_path_str(&*arg_file));
let log = |&:s:&str| verbose_logger(flag_verbose, s);
let mut baps3 = try!(Baps3::new(log, &*flag_target,
&*(if flag_play { vec!["FileLoad", "PlayStop"] }
else
|
)));
try!(baps3.send(&Message::new("load").arg(&*ap)));
if flag_play {
try!(baps3.send(&Message::new("play")));
}
Ok(())
}
/// Converts a potentially-relative path string to an absolute path string.
fn to_absolute_path_str(rel: &str) -> Baps3Result<String> {
// This is a convoluted, entangled mess of Results and Options.
// I sincerely apologise.
let badpath = |&:| Baps3Error::InvalidPath { path: rel.to_owned() };
path::Path::new_opt(rel)
.ok_or(badpath())
.and_then(|&:p| os::make_absolute(&p).map_err(|_| badpath()))
.and_then(|&:ap| ap.as_str().map(|&:s| s.to_string()).ok_or(badpath()))
}
fn main() {
let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit());
load(args).unwrap_or_else(|&:e| werr!("error: {}", e));
}
|
{ vec!["FileLoad"] }
|
conditional_block
|
baps3-load.rs
|
#![feature(plugin)]
extern crate baps3_protocol;
#[macro_use] extern crate baps3_cli;
extern crate "rustc-serialize" as rustc_serialize;
extern crate docopt;
#[plugin] #[no_link] extern crate docopt_macros;
use std::borrow::ToOwned;
use std::os;
use std::path;
use baps3_cli::{ Baps3, Baps3Error, Baps3Result, verbose_logger };
|
use baps3_protocol::proto::Message;
docopt!(Args, "
Loads a file into a BAPS3 server.
Usage:
baps3-load -h
baps3-load [-pv] [-t <target>] <file>
Options:
-h, --help Show this message.
-p, --play If set, play the file upon loading.
-v, --verbose Prints a trail of miscellaneous information
about the action.
-t, --target <target> The target BAPS3 server (host:port).
[Default: localhost:1350]
");
fn load(Args { arg_file,
flag_play,
flag_target,
flag_verbose,.. }: Args) -> Baps3Result<()> {
let ap = try!(to_absolute_path_str(&*arg_file));
let log = |&:s:&str| verbose_logger(flag_verbose, s);
let mut baps3 = try!(Baps3::new(log, &*flag_target,
&*(if flag_play { vec!["FileLoad", "PlayStop"] }
else { vec!["FileLoad"] })));
try!(baps3.send(&Message::new("load").arg(&*ap)));
if flag_play {
try!(baps3.send(&Message::new("play")));
}
Ok(())
}
/// Converts a potentially-relative path string to an absolute path string.
fn to_absolute_path_str(rel: &str) -> Baps3Result<String> {
// This is a convoluted, entangled mess of Results and Options.
// I sincerely apologise.
let badpath = |&:| Baps3Error::InvalidPath { path: rel.to_owned() };
path::Path::new_opt(rel)
.ok_or(badpath())
.and_then(|&:p| os::make_absolute(&p).map_err(|_| badpath()))
.and_then(|&:ap| ap.as_str().map(|&:s| s.to_string()).ok_or(badpath()))
}
fn main() {
let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit());
load(args).unwrap_or_else(|&:e| werr!("error: {}", e));
}
|
random_line_split
|
|
baps3-load.rs
|
#![feature(plugin)]
extern crate baps3_protocol;
#[macro_use] extern crate baps3_cli;
extern crate "rustc-serialize" as rustc_serialize;
extern crate docopt;
#[plugin] #[no_link] extern crate docopt_macros;
use std::borrow::ToOwned;
use std::os;
use std::path;
use baps3_cli::{ Baps3, Baps3Error, Baps3Result, verbose_logger };
use baps3_protocol::proto::Message;
docopt!(Args, "
Loads a file into a BAPS3 server.
Usage:
baps3-load -h
baps3-load [-pv] [-t <target>] <file>
Options:
-h, --help Show this message.
-p, --play If set, play the file upon loading.
-v, --verbose Prints a trail of miscellaneous information
about the action.
-t, --target <target> The target BAPS3 server (host:port).
[Default: localhost:1350]
");
fn load(Args { arg_file,
flag_play,
flag_target,
flag_verbose,.. }: Args) -> Baps3Result<()> {
let ap = try!(to_absolute_path_str(&*arg_file));
let log = |&:s:&str| verbose_logger(flag_verbose, s);
let mut baps3 = try!(Baps3::new(log, &*flag_target,
&*(if flag_play { vec!["FileLoad", "PlayStop"] }
else { vec!["FileLoad"] })));
try!(baps3.send(&Message::new("load").arg(&*ap)));
if flag_play {
try!(baps3.send(&Message::new("play")));
}
Ok(())
}
/// Converts a potentially-relative path string to an absolute path string.
fn to_absolute_path_str(rel: &str) -> Baps3Result<String> {
// This is a convoluted, entangled mess of Results and Options.
// I sincerely apologise.
let badpath = |&:| Baps3Error::InvalidPath { path: rel.to_owned() };
path::Path::new_opt(rel)
.ok_or(badpath())
.and_then(|&:p| os::make_absolute(&p).map_err(|_| badpath()))
.and_then(|&:ap| ap.as_str().map(|&:s| s.to_string()).ok_or(badpath()))
}
fn
|
() {
let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit());
load(args).unwrap_or_else(|&:e| werr!("error: {}", e));
}
|
main
|
identifier_name
|
baps3-load.rs
|
#![feature(plugin)]
extern crate baps3_protocol;
#[macro_use] extern crate baps3_cli;
extern crate "rustc-serialize" as rustc_serialize;
extern crate docopt;
#[plugin] #[no_link] extern crate docopt_macros;
use std::borrow::ToOwned;
use std::os;
use std::path;
use baps3_cli::{ Baps3, Baps3Error, Baps3Result, verbose_logger };
use baps3_protocol::proto::Message;
docopt!(Args, "
Loads a file into a BAPS3 server.
Usage:
baps3-load -h
baps3-load [-pv] [-t <target>] <file>
Options:
-h, --help Show this message.
-p, --play If set, play the file upon loading.
-v, --verbose Prints a trail of miscellaneous information
about the action.
-t, --target <target> The target BAPS3 server (host:port).
[Default: localhost:1350]
");
fn load(Args { arg_file,
flag_play,
flag_target,
flag_verbose,.. }: Args) -> Baps3Result<()>
|
/// Converts a potentially-relative path string to an absolute path string.
fn to_absolute_path_str(rel: &str) -> Baps3Result<String> {
// This is a convoluted, entangled mess of Results and Options.
// I sincerely apologise.
let badpath = |&:| Baps3Error::InvalidPath { path: rel.to_owned() };
path::Path::new_opt(rel)
.ok_or(badpath())
.and_then(|&:p| os::make_absolute(&p).map_err(|_| badpath()))
.and_then(|&:ap| ap.as_str().map(|&:s| s.to_string()).ok_or(badpath()))
}
fn main() {
let args: Args = Args::docopt().decode().unwrap_or_else(|e| e.exit());
load(args).unwrap_or_else(|&:e| werr!("error: {}", e));
}
|
{
let ap = try!(to_absolute_path_str(&*arg_file));
let log = |&:s:&str| verbose_logger(flag_verbose, s);
let mut baps3 = try!(Baps3::new(log, &*flag_target,
&*(if flag_play { vec!["FileLoad", "PlayStop"] }
else { vec!["FileLoad"] })));
try!(baps3.send(&Message::new("load").arg(&*ap)));
if flag_play {
try!(baps3.send(&Message::new("play")));
}
Ok(())
}
|
identifier_body
|
proc-macro-crate-in-paths.rs
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-pass
// force-host
// no-prefer-dynamic
#![crate_type = "proc-macro"]
#![deny(rust_2018_compatibility)]
#![feature(rust_2018_preview)]
extern crate proc_macro;
use proc_macro::TokenStream;
#[proc_macro_derive(Template, attributes(template))]
pub fn
|
(input: TokenStream) -> TokenStream {
input
}
|
derive_template
|
identifier_name
|
proc-macro-crate-in-paths.rs
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-pass
// force-host
// no-prefer-dynamic
#![crate_type = "proc-macro"]
#![deny(rust_2018_compatibility)]
#![feature(rust_2018_preview)]
extern crate proc_macro;
use proc_macro::TokenStream;
#[proc_macro_derive(Template, attributes(template))]
pub fn derive_template(input: TokenStream) -> TokenStream
|
{
input
}
|
identifier_body
|
|
proc-macro-crate-in-paths.rs
|
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
#![crate_type = "proc-macro"]
#![deny(rust_2018_compatibility)]
#![feature(rust_2018_preview)]
extern crate proc_macro;
use proc_macro::TokenStream;
#[proc_macro_derive(Template, attributes(template))]
pub fn derive_template(input: TokenStream) -> TokenStream {
input
}
|
// compile-pass
// force-host
// no-prefer-dynamic
|
random_line_split
|
unique-send-2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::comm::*;
fn child(c: &SharedChan<~uint>, i: uint)
|
pub fn main() {
let (p, ch) = stream();
let ch = SharedChan::new(ch);
let n = 100u;
let mut expected = 0u;
for uint::range(0u, n) |i| {
let ch = ch.clone();
task::spawn(|| child(&ch, i) );
expected += i;
}
let mut actual = 0u;
for uint::range(0u, n) |_i| {
let j = p.recv();
actual += *j;
}
assert!(expected == actual);
}
|
{
c.send(~i);
}
|
identifier_body
|
unique-send-2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::comm::*;
fn child(c: &SharedChan<~uint>, i: uint) {
c.send(~i);
}
pub fn main() {
let (p, ch) = stream();
let ch = SharedChan::new(ch);
let n = 100u;
let mut expected = 0u;
for uint::range(0u, n) |i| {
let ch = ch.clone();
task::spawn(|| child(&ch, i) );
expected += i;
}
let mut actual = 0u;
for uint::range(0u, n) |_i| {
let j = p.recv();
actual += *j;
|
assert!(expected == actual);
}
|
}
|
random_line_split
|
unique-send-2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::comm::*;
fn
|
(c: &SharedChan<~uint>, i: uint) {
c.send(~i);
}
pub fn main() {
let (p, ch) = stream();
let ch = SharedChan::new(ch);
let n = 100u;
let mut expected = 0u;
for uint::range(0u, n) |i| {
let ch = ch.clone();
task::spawn(|| child(&ch, i) );
expected += i;
}
let mut actual = 0u;
for uint::range(0u, n) |_i| {
let j = p.recv();
actual += *j;
}
assert!(expected == actual);
}
|
child
|
identifier_name
|
mod.rs
|
//
// SOS: the Stupid Operating System
// by Eliza Weisman ([email protected])
//
// Copyright (c) 2015-2017 Eliza Weisman
// Released under the terms of the MIT license. See `LICENSE` in the root
// directory of this repository for more information.
//
//! `x86_64` architecture-specific implementation.
// pub mod cpu;
pub mod drivers;
pub mod interrupts;
#[path = "../x86_all/bda.rs"] pub mod bda;
#[path = "../x86_all/multiboot2.rs"] pub mod multiboot2;
pub const ARCH_BITS: u8 = 64;
extern {
// TODO: It would be really nice if there was a less ugly way of doing
// this... (read: after the Revolution when we add memory regions to the
// heap programmatically.)
#[link_name = "heap_base_addr"]
#[linkage = "external"]
pub static HEAP_BASE: *mut u8;
#[link_name = "heap_top_addr"]
#[linkage = "external"]
pub static HEAP_TOP: *mut u8;
// Of course, we will still need to export the kernel stack addresses like
// this, but it would be nice if they could be, i dont know, not mut u8s
// pointers, like God intended.
#[link_name = "stack_base"]
pub static STACK_BASE: *mut u8;
#[link_name = "stack_top"]
pub static STACK_TOP: *mut u8;
}
use memory::PAddr;
/// Trampoline to ensure we have a correct stack frame for calling [`arch_init`]
///
/// I have no idea why this works, but it does.
///
/// [`arch_init`]: fn.arch_init
#[naked]
#[no_mangle]
pub unsafe extern "C" fn
|
() {
asm!("movabsq $$(stack_top), %rsp");
asm!("mov ax, 0
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
call arch_init"
:::: "intel");
}
/// Entry point for architecture-specific kernel init
///
/// This expects to be passed the address of a valid
/// Multiboot 2 info struct. It's the bootloader's responsibility to ensure
/// that this is passed in the correct register as expected by the calling
/// convention (`edi` on x86). If this isn't there, you can expect to have a
/// bad problem and not go to space today.
#[no_mangle]
pub extern "C" fn arch_init(multiboot_addr: PAddr) {
use cpu::{control_regs, msr};
use elf;
use params::{InitParams, mem};
kinfoln!(dots: ". ", "Beginning `arch_init()` for x86_64");
::io::term::CONSOLE.lock().clear();
::logger::initialize()
.expect("Could not initialize logger!");
// -- Unpack multiboot tag ------------------------------------------------
kinfoln!( dots: ". "
, "trying to unpack multiboot info at {:#p}"
, multiboot_addr);
// try to interpret the structure at the multiboot address as a multiboot
// info struct. if it's invalid, fail.
let boot_info
= unsafe { multiboot2::Info::from(multiboot_addr)
.expect("Could not unpack multiboot2 information!") };
// Extract ELF sections tag from the multiboot info
let elf_sections_tag
= boot_info.elf_sections()
.expect("ELF sections tag required!");
kinfoln!(dots: ". ", "Detecting kernel ELF sections:");
// Extract kernel ELF sections from multiboot info
let mut n_elf_sections = 0;
let kernel_begin
= elf_sections_tag.sections()
//.filter(|s| s.is_allocated())
.map(|s| {
kinfoln!( dots: ".. ", "{}", s );
kinfoln!( dots: "... ", "flags: [ {:?} ]", s.flags());
s.address() })
.min()
.expect("Could not find kernel start section!\
\nSomething is deeply wrong.");
let kernel_end
= elf_sections_tag.sections()
//.filter(|s| s.is_allocated())
.map(|s| { n_elf_sections += 1; s.end_address() })
.max()
.expect("Could not find kernel end section!\
\nSomething is deeply wrong.");
kinfoln!( dots: ". ", "Detected {} kernel ELF sections.", n_elf_sections);
kinfoln!( dots: ".. ", "Kernel begins at {:#p} and ends at {:#p}."
, kernel_begin, kernel_end );
let multiboot_end = multiboot_addr + boot_info.length as u64;
kinfoln!( dots: ".. ", "Multiboot info begins at {:#x} and ends at {:#x}."
, multiboot_addr, multiboot_end);
let mut params = InitParams { kernel_base: kernel_begin
, kernel_top: kernel_end
, multiboot_start: Some(multiboot_addr)
, multiboot_end: Some(multiboot_end)
, heap_base: unsafe { PAddr::from(HEAP_BASE) }
, heap_top: unsafe { PAddr::from(HEAP_TOP) }
, stack_base: unsafe { PAddr::from(STACK_BASE) }
, stack_top: unsafe { PAddr::from(STACK_TOP) }
, elf_sections: Some(elf_sections_tag.sections())
,..Default::default()
};
// Extract the memory map tag from the multiboot info
let mem_map = boot_info.mem_map()
.expect("Memory map tag required!");
kinfoln!(dots: ". ", "Detected memory areas:");
for area in mem_map {
kinfoln!( dots: ".. ", "{}", area);
let a: mem::Area = area.into();
if a.is_usable == true { params.mem_map.push(a); }
}
//-- enable flags needed for paging ------------------------------------
unsafe {
// control_regs::cr0::enable_write_protect(true);
// kinfoln!(dots: ". ", "Page write protect ENABED" );
let efer = msr::read(msr::IA32_EFER);
trace!("EFER = {:#x}", efer);
msr::write(msr::IA32_EFER, efer | (1 << 11));
let efer = msr::read(msr::IA32_EFER);
trace!("EFER = {:#x}", efer);
kinfoln!(dots: ". ", "Page no execute bit ENABLED");
}
kinfoln!(dots: ". ", "Transferring to `kernel_init()`.");
::kernel_init(¶ms);
}
|
long_mode_init
|
identifier_name
|
mod.rs
|
//
|
// SOS: the Stupid Operating System
// by Eliza Weisman ([email protected])
//
// Copyright (c) 2015-2017 Eliza Weisman
// Released under the terms of the MIT license. See `LICENSE` in the root
// directory of this repository for more information.
//
//! `x86_64` architecture-specific implementation.
// pub mod cpu;
pub mod drivers;
pub mod interrupts;
#[path = "../x86_all/bda.rs"] pub mod bda;
#[path = "../x86_all/multiboot2.rs"] pub mod multiboot2;
pub const ARCH_BITS: u8 = 64;
extern {
// TODO: It would be really nice if there was a less ugly way of doing
// this... (read: after the Revolution when we add memory regions to the
// heap programmatically.)
#[link_name = "heap_base_addr"]
#[linkage = "external"]
pub static HEAP_BASE: *mut u8;
#[link_name = "heap_top_addr"]
#[linkage = "external"]
pub static HEAP_TOP: *mut u8;
// Of course, we will still need to export the kernel stack addresses like
// this, but it would be nice if they could be, i dont know, not mut u8s
// pointers, like God intended.
#[link_name = "stack_base"]
pub static STACK_BASE: *mut u8;
#[link_name = "stack_top"]
pub static STACK_TOP: *mut u8;
}
use memory::PAddr;
/// Trampoline to ensure we have a correct stack frame for calling [`arch_init`]
///
/// I have no idea why this works, but it does.
///
/// [`arch_init`]: fn.arch_init
#[naked]
#[no_mangle]
pub unsafe extern "C" fn long_mode_init() {
asm!("movabsq $$(stack_top), %rsp");
asm!("mov ax, 0
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
call arch_init"
:::: "intel");
}
/// Entry point for architecture-specific kernel init
///
/// This expects to be passed the address of a valid
/// Multiboot 2 info struct. It's the bootloader's responsibility to ensure
/// that this is passed in the correct register as expected by the calling
/// convention (`edi` on x86). If this isn't there, you can expect to have a
/// bad problem and not go to space today.
#[no_mangle]
pub extern "C" fn arch_init(multiboot_addr: PAddr) {
use cpu::{control_regs, msr};
use elf;
use params::{InitParams, mem};
kinfoln!(dots: ". ", "Beginning `arch_init()` for x86_64");
::io::term::CONSOLE.lock().clear();
::logger::initialize()
.expect("Could not initialize logger!");
// -- Unpack multiboot tag ------------------------------------------------
kinfoln!( dots: ". "
, "trying to unpack multiboot info at {:#p}"
, multiboot_addr);
// try to interpret the structure at the multiboot address as a multiboot
// info struct. if it's invalid, fail.
let boot_info
= unsafe { multiboot2::Info::from(multiboot_addr)
.expect("Could not unpack multiboot2 information!") };
// Extract ELF sections tag from the multiboot info
let elf_sections_tag
= boot_info.elf_sections()
.expect("ELF sections tag required!");
kinfoln!(dots: ". ", "Detecting kernel ELF sections:");
// Extract kernel ELF sections from multiboot info
let mut n_elf_sections = 0;
let kernel_begin
= elf_sections_tag.sections()
//.filter(|s| s.is_allocated())
.map(|s| {
kinfoln!( dots: ".. ", "{}", s );
kinfoln!( dots: "... ", "flags: [ {:?} ]", s.flags());
s.address() })
.min()
.expect("Could not find kernel start section!\
\nSomething is deeply wrong.");
let kernel_end
= elf_sections_tag.sections()
//.filter(|s| s.is_allocated())
.map(|s| { n_elf_sections += 1; s.end_address() })
.max()
.expect("Could not find kernel end section!\
\nSomething is deeply wrong.");
kinfoln!( dots: ". ", "Detected {} kernel ELF sections.", n_elf_sections);
kinfoln!( dots: ".. ", "Kernel begins at {:#p} and ends at {:#p}."
, kernel_begin, kernel_end );
let multiboot_end = multiboot_addr + boot_info.length as u64;
kinfoln!( dots: ".. ", "Multiboot info begins at {:#x} and ends at {:#x}."
, multiboot_addr, multiboot_end);
let mut params = InitParams { kernel_base: kernel_begin
, kernel_top: kernel_end
, multiboot_start: Some(multiboot_addr)
, multiboot_end: Some(multiboot_end)
, heap_base: unsafe { PAddr::from(HEAP_BASE) }
, heap_top: unsafe { PAddr::from(HEAP_TOP) }
, stack_base: unsafe { PAddr::from(STACK_BASE) }
, stack_top: unsafe { PAddr::from(STACK_TOP) }
, elf_sections: Some(elf_sections_tag.sections())
,..Default::default()
};
// Extract the memory map tag from the multiboot info
let mem_map = boot_info.mem_map()
.expect("Memory map tag required!");
kinfoln!(dots: ". ", "Detected memory areas:");
for area in mem_map {
kinfoln!( dots: ".. ", "{}", area);
let a: mem::Area = area.into();
if a.is_usable == true { params.mem_map.push(a); }
}
//-- enable flags needed for paging ------------------------------------
unsafe {
// control_regs::cr0::enable_write_protect(true);
// kinfoln!(dots: ". ", "Page write protect ENABED" );
let efer = msr::read(msr::IA32_EFER);
trace!("EFER = {:#x}", efer);
msr::write(msr::IA32_EFER, efer | (1 << 11));
let efer = msr::read(msr::IA32_EFER);
trace!("EFER = {:#x}", efer);
kinfoln!(dots: ". ", "Page no execute bit ENABLED");
}
kinfoln!(dots: ". ", "Transferring to `kernel_init()`.");
::kernel_init(¶ms);
}
|
random_line_split
|
|
mod.rs
|
//
// SOS: the Stupid Operating System
// by Eliza Weisman ([email protected])
//
// Copyright (c) 2015-2017 Eliza Weisman
// Released under the terms of the MIT license. See `LICENSE` in the root
// directory of this repository for more information.
//
//! `x86_64` architecture-specific implementation.
// pub mod cpu;
pub mod drivers;
pub mod interrupts;
#[path = "../x86_all/bda.rs"] pub mod bda;
#[path = "../x86_all/multiboot2.rs"] pub mod multiboot2;
pub const ARCH_BITS: u8 = 64;
extern {
// TODO: It would be really nice if there was a less ugly way of doing
// this... (read: after the Revolution when we add memory regions to the
// heap programmatically.)
#[link_name = "heap_base_addr"]
#[linkage = "external"]
pub static HEAP_BASE: *mut u8;
#[link_name = "heap_top_addr"]
#[linkage = "external"]
pub static HEAP_TOP: *mut u8;
// Of course, we will still need to export the kernel stack addresses like
// this, but it would be nice if they could be, i dont know, not mut u8s
// pointers, like God intended.
#[link_name = "stack_base"]
pub static STACK_BASE: *mut u8;
#[link_name = "stack_top"]
pub static STACK_TOP: *mut u8;
}
use memory::PAddr;
/// Trampoline to ensure we have a correct stack frame for calling [`arch_init`]
///
/// I have no idea why this works, but it does.
///
/// [`arch_init`]: fn.arch_init
#[naked]
#[no_mangle]
pub unsafe extern "C" fn long_mode_init() {
asm!("movabsq $$(stack_top), %rsp");
asm!("mov ax, 0
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
call arch_init"
:::: "intel");
}
/// Entry point for architecture-specific kernel init
///
/// This expects to be passed the address of a valid
/// Multiboot 2 info struct. It's the bootloader's responsibility to ensure
/// that this is passed in the correct register as expected by the calling
/// convention (`edi` on x86). If this isn't there, you can expect to have a
/// bad problem and not go to space today.
#[no_mangle]
pub extern "C" fn arch_init(multiboot_addr: PAddr) {
use cpu::{control_regs, msr};
use elf;
use params::{InitParams, mem};
kinfoln!(dots: ". ", "Beginning `arch_init()` for x86_64");
::io::term::CONSOLE.lock().clear();
::logger::initialize()
.expect("Could not initialize logger!");
// -- Unpack multiboot tag ------------------------------------------------
kinfoln!( dots: ". "
, "trying to unpack multiboot info at {:#p}"
, multiboot_addr);
// try to interpret the structure at the multiboot address as a multiboot
// info struct. if it's invalid, fail.
let boot_info
= unsafe { multiboot2::Info::from(multiboot_addr)
.expect("Could not unpack multiboot2 information!") };
// Extract ELF sections tag from the multiboot info
let elf_sections_tag
= boot_info.elf_sections()
.expect("ELF sections tag required!");
kinfoln!(dots: ". ", "Detecting kernel ELF sections:");
// Extract kernel ELF sections from multiboot info
let mut n_elf_sections = 0;
let kernel_begin
= elf_sections_tag.sections()
//.filter(|s| s.is_allocated())
.map(|s| {
kinfoln!( dots: ".. ", "{}", s );
kinfoln!( dots: "... ", "flags: [ {:?} ]", s.flags());
s.address() })
.min()
.expect("Could not find kernel start section!\
\nSomething is deeply wrong.");
let kernel_end
= elf_sections_tag.sections()
//.filter(|s| s.is_allocated())
.map(|s| { n_elf_sections += 1; s.end_address() })
.max()
.expect("Could not find kernel end section!\
\nSomething is deeply wrong.");
kinfoln!( dots: ". ", "Detected {} kernel ELF sections.", n_elf_sections);
kinfoln!( dots: ".. ", "Kernel begins at {:#p} and ends at {:#p}."
, kernel_begin, kernel_end );
let multiboot_end = multiboot_addr + boot_info.length as u64;
kinfoln!( dots: ".. ", "Multiboot info begins at {:#x} and ends at {:#x}."
, multiboot_addr, multiboot_end);
let mut params = InitParams { kernel_base: kernel_begin
, kernel_top: kernel_end
, multiboot_start: Some(multiboot_addr)
, multiboot_end: Some(multiboot_end)
, heap_base: unsafe { PAddr::from(HEAP_BASE) }
, heap_top: unsafe { PAddr::from(HEAP_TOP) }
, stack_base: unsafe { PAddr::from(STACK_BASE) }
, stack_top: unsafe { PAddr::from(STACK_TOP) }
, elf_sections: Some(elf_sections_tag.sections())
,..Default::default()
};
// Extract the memory map tag from the multiboot info
let mem_map = boot_info.mem_map()
.expect("Memory map tag required!");
kinfoln!(dots: ". ", "Detected memory areas:");
for area in mem_map {
kinfoln!( dots: ".. ", "{}", area);
let a: mem::Area = area.into();
if a.is_usable == true
|
}
//-- enable flags needed for paging ------------------------------------
unsafe {
// control_regs::cr0::enable_write_protect(true);
// kinfoln!(dots: ". ", "Page write protect ENABED" );
let efer = msr::read(msr::IA32_EFER);
trace!("EFER = {:#x}", efer);
msr::write(msr::IA32_EFER, efer | (1 << 11));
let efer = msr::read(msr::IA32_EFER);
trace!("EFER = {:#x}", efer);
kinfoln!(dots: ". ", "Page no execute bit ENABLED");
}
kinfoln!(dots: ". ", "Transferring to `kernel_init()`.");
::kernel_init(¶ms);
}
|
{ params.mem_map.push(a); }
|
conditional_block
|
mod.rs
|
//
// SOS: the Stupid Operating System
// by Eliza Weisman ([email protected])
//
// Copyright (c) 2015-2017 Eliza Weisman
// Released under the terms of the MIT license. See `LICENSE` in the root
// directory of this repository for more information.
//
//! `x86_64` architecture-specific implementation.
// pub mod cpu;
pub mod drivers;
pub mod interrupts;
#[path = "../x86_all/bda.rs"] pub mod bda;
#[path = "../x86_all/multiboot2.rs"] pub mod multiboot2;
pub const ARCH_BITS: u8 = 64;
extern {
// TODO: It would be really nice if there was a less ugly way of doing
// this... (read: after the Revolution when we add memory regions to the
// heap programmatically.)
#[link_name = "heap_base_addr"]
#[linkage = "external"]
pub static HEAP_BASE: *mut u8;
#[link_name = "heap_top_addr"]
#[linkage = "external"]
pub static HEAP_TOP: *mut u8;
// Of course, we will still need to export the kernel stack addresses like
// this, but it would be nice if they could be, i dont know, not mut u8s
// pointers, like God intended.
#[link_name = "stack_base"]
pub static STACK_BASE: *mut u8;
#[link_name = "stack_top"]
pub static STACK_TOP: *mut u8;
}
use memory::PAddr;
/// Trampoline to ensure we have a correct stack frame for calling [`arch_init`]
///
/// I have no idea why this works, but it does.
///
/// [`arch_init`]: fn.arch_init
#[naked]
#[no_mangle]
pub unsafe extern "C" fn long_mode_init()
|
/// Entry point for architecture-specific kernel init
///
/// This expects to be passed the address of a valid
/// Multiboot 2 info struct. It's the bootloader's responsibility to ensure
/// that this is passed in the correct register as expected by the calling
/// convention (`edi` on x86). If this isn't there, you can expect to have a
/// bad problem and not go to space today.
#[no_mangle]
pub extern "C" fn arch_init(multiboot_addr: PAddr) {
use cpu::{control_regs, msr};
use elf;
use params::{InitParams, mem};
kinfoln!(dots: ". ", "Beginning `arch_init()` for x86_64");
::io::term::CONSOLE.lock().clear();
::logger::initialize()
.expect("Could not initialize logger!");
// -- Unpack multiboot tag ------------------------------------------------
kinfoln!( dots: ". "
, "trying to unpack multiboot info at {:#p}"
, multiboot_addr);
// try to interpret the structure at the multiboot address as a multiboot
// info struct. if it's invalid, fail.
let boot_info
= unsafe { multiboot2::Info::from(multiboot_addr)
.expect("Could not unpack multiboot2 information!") };
// Extract ELF sections tag from the multiboot info
let elf_sections_tag
= boot_info.elf_sections()
.expect("ELF sections tag required!");
kinfoln!(dots: ". ", "Detecting kernel ELF sections:");
// Extract kernel ELF sections from multiboot info
let mut n_elf_sections = 0;
let kernel_begin
= elf_sections_tag.sections()
//.filter(|s| s.is_allocated())
.map(|s| {
kinfoln!( dots: ".. ", "{}", s );
kinfoln!( dots: "... ", "flags: [ {:?} ]", s.flags());
s.address() })
.min()
.expect("Could not find kernel start section!\
\nSomething is deeply wrong.");
let kernel_end
= elf_sections_tag.sections()
//.filter(|s| s.is_allocated())
.map(|s| { n_elf_sections += 1; s.end_address() })
.max()
.expect("Could not find kernel end section!\
\nSomething is deeply wrong.");
kinfoln!( dots: ". ", "Detected {} kernel ELF sections.", n_elf_sections);
kinfoln!( dots: ".. ", "Kernel begins at {:#p} and ends at {:#p}."
, kernel_begin, kernel_end );
let multiboot_end = multiboot_addr + boot_info.length as u64;
kinfoln!( dots: ".. ", "Multiboot info begins at {:#x} and ends at {:#x}."
, multiboot_addr, multiboot_end);
let mut params = InitParams { kernel_base: kernel_begin
, kernel_top: kernel_end
, multiboot_start: Some(multiboot_addr)
, multiboot_end: Some(multiboot_end)
, heap_base: unsafe { PAddr::from(HEAP_BASE) }
, heap_top: unsafe { PAddr::from(HEAP_TOP) }
, stack_base: unsafe { PAddr::from(STACK_BASE) }
, stack_top: unsafe { PAddr::from(STACK_TOP) }
, elf_sections: Some(elf_sections_tag.sections())
,..Default::default()
};
// Extract the memory map tag from the multiboot info
let mem_map = boot_info.mem_map()
.expect("Memory map tag required!");
kinfoln!(dots: ". ", "Detected memory areas:");
for area in mem_map {
kinfoln!( dots: ".. ", "{}", area);
let a: mem::Area = area.into();
if a.is_usable == true { params.mem_map.push(a); }
}
//-- enable flags needed for paging ------------------------------------
unsafe {
// control_regs::cr0::enable_write_protect(true);
// kinfoln!(dots: ". ", "Page write protect ENABED" );
let efer = msr::read(msr::IA32_EFER);
trace!("EFER = {:#x}", efer);
msr::write(msr::IA32_EFER, efer | (1 << 11));
let efer = msr::read(msr::IA32_EFER);
trace!("EFER = {:#x}", efer);
kinfoln!(dots: ". ", "Page no execute bit ENABLED");
}
kinfoln!(dots: ". ", "Transferring to `kernel_init()`.");
::kernel_init(¶ms);
}
|
{
asm!("movabsq $$(stack_top), %rsp");
asm!("mov ax, 0
mov ss, ax
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
call arch_init"
:::: "intel");
}
|
identifier_body
|
imports.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused)]
// Like other items, private imports can be imported and used non-lexically in paths.
mod a {
use a as foo;
use self::foo::foo as bar;
mod b {
use super::bar;
}
}
mod foo { pub fn
|
() {} }
mod bar { pub fn f() {} }
pub fn f() -> bool { true }
// Items and explicit imports shadow globs.
fn g() {
use foo::*;
use bar::*;
fn f() -> bool { true }
let _: bool = f();
}
fn h() {
use foo::*;
use bar::*;
use f;
let _: bool = f();
}
// Here, there appears to be shadowing but isn't because of namespaces.
mod b {
use foo::*; // This imports `f` in the value namespace.
use super::b as f; // This imports `f` only in the type namespace,
fn test() { self::f(); } // so the glob isn't shadowed.
}
// Here, there is shadowing in one namespace, but not the other.
mod c {
mod test {
pub fn f() {}
pub mod f {}
}
use self::test::*; // This glob-imports `f` in both namespaces.
mod f { pub fn f() {} } // This shadows the glob only in the value namespace.
fn test() {
self::f(); // Check that the glob-imported value isn't shadowed.
self::f::f(); // Check that the glob-imported module is shadowed.
}
}
// Unused names can be ambiguous.
mod d {
pub use foo::*; // This imports `f` in the value namespace.
pub use bar::*; // This also imports `f` in the value namespace.
}
mod e {
pub use d::*; // n.b. Since `e::f` is not used, this is not considered to be a use of `d::f`.
}
fn main() {}
|
f
|
identifier_name
|
imports.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused)]
// Like other items, private imports can be imported and used non-lexically in paths.
mod a {
use a as foo;
use self::foo::foo as bar;
mod b {
use super::bar;
}
}
mod foo { pub fn f() {} }
mod bar { pub fn f() {} }
pub fn f() -> bool { true }
// Items and explicit imports shadow globs.
fn g() {
use foo::*;
use bar::*;
fn f() -> bool
|
let _: bool = f();
}
fn h() {
use foo::*;
use bar::*;
use f;
let _: bool = f();
}
// Here, there appears to be shadowing but isn't because of namespaces.
mod b {
use foo::*; // This imports `f` in the value namespace.
use super::b as f; // This imports `f` only in the type namespace,
fn test() { self::f(); } // so the glob isn't shadowed.
}
// Here, there is shadowing in one namespace, but not the other.
mod c {
mod test {
pub fn f() {}
pub mod f {}
}
use self::test::*; // This glob-imports `f` in both namespaces.
mod f { pub fn f() {} } // This shadows the glob only in the value namespace.
fn test() {
self::f(); // Check that the glob-imported value isn't shadowed.
self::f::f(); // Check that the glob-imported module is shadowed.
}
}
// Unused names can be ambiguous.
mod d {
pub use foo::*; // This imports `f` in the value namespace.
pub use bar::*; // This also imports `f` in the value namespace.
}
mod e {
pub use d::*; // n.b. Since `e::f` is not used, this is not considered to be a use of `d::f`.
}
fn main() {}
|
{ true }
|
identifier_body
|
imports.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#![allow(unused)]
// Like other items, private imports can be imported and used non-lexically in paths.
mod a {
use a as foo;
use self::foo::foo as bar;
mod b {
use super::bar;
}
}
mod foo { pub fn f() {} }
mod bar { pub fn f() {} }
pub fn f() -> bool { true }
|
use foo::*;
use bar::*;
fn f() -> bool { true }
let _: bool = f();
}
fn h() {
use foo::*;
use bar::*;
use f;
let _: bool = f();
}
// Here, there appears to be shadowing but isn't because of namespaces.
mod b {
use foo::*; // This imports `f` in the value namespace.
use super::b as f; // This imports `f` only in the type namespace,
fn test() { self::f(); } // so the glob isn't shadowed.
}
// Here, there is shadowing in one namespace, but not the other.
mod c {
mod test {
pub fn f() {}
pub mod f {}
}
use self::test::*; // This glob-imports `f` in both namespaces.
mod f { pub fn f() {} } // This shadows the glob only in the value namespace.
fn test() {
self::f(); // Check that the glob-imported value isn't shadowed.
self::f::f(); // Check that the glob-imported module is shadowed.
}
}
// Unused names can be ambiguous.
mod d {
pub use foo::*; // This imports `f` in the value namespace.
pub use bar::*; // This also imports `f` in the value namespace.
}
mod e {
pub use d::*; // n.b. Since `e::f` is not used, this is not considered to be a use of `d::f`.
}
fn main() {}
|
// Items and explicit imports shadow globs.
fn g() {
|
random_line_split
|
cmd.rs
|
use clap::{App, Arg};
use generators::common::{GenConfig, GenMode};
use itertools::Itertools;
use std::str::FromStr;
#[derive(Clone, Debug)]
pub struct CommandLineArguments {
pub codegen_key: Option<String>,
pub demo_key: Option<String>,
pub bench_key: Option<String>,
pub generation_mode: GenMode,
pub config: GenConfig,
pub limit: usize,
pub out: String,
}
pub fn read_command_line_arguments(name: &str) -> CommandLineArguments {
let matches = App::new(name)
.version("0.1.0")
.author("Mikhail Hogrefe <[email protected]>")
.about("Runs demos and benchmarks for malachite-base functions.")
.arg(
Arg::with_name("generation_mode")
.short("m")
.long("generation_mode")
.help("May be 'exhaustive', 'random', or'special_random'.")
.takes_value(true),
)
.arg(
Arg::with_name("config")
.short("c")
.long("config")
.help("e.g.'mean_run_length_n 4 mean_run_length_d 1'")
.takes_value(true),
)
.arg(
Arg::with_name("limit")
.short("l")
.long("limit")
.help("Specifies the maximum number of elements to generate")
.takes_value(true),
)
.arg(
Arg::with_name("out")
.short("o")
.long("out")
.help("Specifies the file name to write a benchmark to")
.takes_value(true),
)
.arg(
Arg::with_name("demo")
.short("d")
.long("demo")
.help("Specifies the demo name")
.takes_value(true),
)
.arg(
Arg::with_name("bench")
.short("b")
.long("bench")
.help("Specifies the benchmark name")
.takes_value(true),
)
.arg(
Arg::with_name("codegen")
.short("g")
.long("codegen")
.help("Specifies the code to generate")
.takes_value(true),
)
.get_matches();
let generation_mode = match matches.value_of("generation_mode").unwrap_or("exhaustive") {
"exhaustive" => GenMode::Exhaustive,
"random" => GenMode::Random,
|
let mut config = GenConfig::new();
if!config_string.is_empty() {
for mut chunk in &config_string.split(' ').chunks(2) {
let key = chunk.next().unwrap();
let value =
u64::from_str(chunk.next().expect("Bad config")).expect("Invalid config value");
config.insert(key, value);
}
}
let limit =
usize::from_str(matches.value_of("limit").unwrap_or("10000")).expect("Invalid limit");
let out = matches.value_of("out").unwrap_or("temp.gp").to_string();
let demo_key = matches.value_of("demo").map(ToString::to_string);
let bench_key = matches.value_of("bench").map(ToString::to_string);
let codegen_key = matches.value_of("codegen").map(ToString::to_string);
if demo_key.is_none() && bench_key.is_none() && codegen_key.is_none() {
panic!("Must specify demo, bench, or codegen");
}
CommandLineArguments {
codegen_key,
demo_key,
bench_key,
generation_mode,
config,
limit,
out,
}
}
|
"special_random" => GenMode::SpecialRandom,
_ => panic!("Invalid generation mode"),
};
let config_string = matches.value_of("config").unwrap_or("");
|
random_line_split
|
cmd.rs
|
use clap::{App, Arg};
use generators::common::{GenConfig, GenMode};
use itertools::Itertools;
use std::str::FromStr;
#[derive(Clone, Debug)]
pub struct CommandLineArguments {
pub codegen_key: Option<String>,
pub demo_key: Option<String>,
pub bench_key: Option<String>,
pub generation_mode: GenMode,
pub config: GenConfig,
pub limit: usize,
pub out: String,
}
pub fn read_command_line_arguments(name: &str) -> CommandLineArguments
|
Arg::with_name("limit")
.short("l")
.long("limit")
.help("Specifies the maximum number of elements to generate")
.takes_value(true),
)
.arg(
Arg::with_name("out")
.short("o")
.long("out")
.help("Specifies the file name to write a benchmark to")
.takes_value(true),
)
.arg(
Arg::with_name("demo")
.short("d")
.long("demo")
.help("Specifies the demo name")
.takes_value(true),
)
.arg(
Arg::with_name("bench")
.short("b")
.long("bench")
.help("Specifies the benchmark name")
.takes_value(true),
)
.arg(
Arg::with_name("codegen")
.short("g")
.long("codegen")
.help("Specifies the code to generate")
.takes_value(true),
)
.get_matches();
let generation_mode = match matches.value_of("generation_mode").unwrap_or("exhaustive") {
"exhaustive" => GenMode::Exhaustive,
"random" => GenMode::Random,
"special_random" => GenMode::SpecialRandom,
_ => panic!("Invalid generation mode"),
};
let config_string = matches.value_of("config").unwrap_or("");
let mut config = GenConfig::new();
if!config_string.is_empty() {
for mut chunk in &config_string.split(' ').chunks(2) {
let key = chunk.next().unwrap();
let value =
u64::from_str(chunk.next().expect("Bad config")).expect("Invalid config value");
config.insert(key, value);
}
}
let limit =
usize::from_str(matches.value_of("limit").unwrap_or("10000")).expect("Invalid limit");
let out = matches.value_of("out").unwrap_or("temp.gp").to_string();
let demo_key = matches.value_of("demo").map(ToString::to_string);
let bench_key = matches.value_of("bench").map(ToString::to_string);
let codegen_key = matches.value_of("codegen").map(ToString::to_string);
if demo_key.is_none() && bench_key.is_none() && codegen_key.is_none() {
panic!("Must specify demo, bench, or codegen");
}
CommandLineArguments {
codegen_key,
demo_key,
bench_key,
generation_mode,
config,
limit,
out,
}
}
|
{
let matches = App::new(name)
.version("0.1.0")
.author("Mikhail Hogrefe <[email protected]>")
.about("Runs demos and benchmarks for malachite-base functions.")
.arg(
Arg::with_name("generation_mode")
.short("m")
.long("generation_mode")
.help("May be 'exhaustive', 'random', or 'special_random'.")
.takes_value(true),
)
.arg(
Arg::with_name("config")
.short("c")
.long("config")
.help("e.g. 'mean_run_length_n 4 mean_run_length_d 1'")
.takes_value(true),
)
.arg(
|
identifier_body
|
cmd.rs
|
use clap::{App, Arg};
use generators::common::{GenConfig, GenMode};
use itertools::Itertools;
use std::str::FromStr;
#[derive(Clone, Debug)]
pub struct
|
{
pub codegen_key: Option<String>,
pub demo_key: Option<String>,
pub bench_key: Option<String>,
pub generation_mode: GenMode,
pub config: GenConfig,
pub limit: usize,
pub out: String,
}
pub fn read_command_line_arguments(name: &str) -> CommandLineArguments {
let matches = App::new(name)
.version("0.1.0")
.author("Mikhail Hogrefe <[email protected]>")
.about("Runs demos and benchmarks for malachite-base functions.")
.arg(
Arg::with_name("generation_mode")
.short("m")
.long("generation_mode")
.help("May be 'exhaustive', 'random', or'special_random'.")
.takes_value(true),
)
.arg(
Arg::with_name("config")
.short("c")
.long("config")
.help("e.g.'mean_run_length_n 4 mean_run_length_d 1'")
.takes_value(true),
)
.arg(
Arg::with_name("limit")
.short("l")
.long("limit")
.help("Specifies the maximum number of elements to generate")
.takes_value(true),
)
.arg(
Arg::with_name("out")
.short("o")
.long("out")
.help("Specifies the file name to write a benchmark to")
.takes_value(true),
)
.arg(
Arg::with_name("demo")
.short("d")
.long("demo")
.help("Specifies the demo name")
.takes_value(true),
)
.arg(
Arg::with_name("bench")
.short("b")
.long("bench")
.help("Specifies the benchmark name")
.takes_value(true),
)
.arg(
Arg::with_name("codegen")
.short("g")
.long("codegen")
.help("Specifies the code to generate")
.takes_value(true),
)
.get_matches();
let generation_mode = match matches.value_of("generation_mode").unwrap_or("exhaustive") {
"exhaustive" => GenMode::Exhaustive,
"random" => GenMode::Random,
"special_random" => GenMode::SpecialRandom,
_ => panic!("Invalid generation mode"),
};
let config_string = matches.value_of("config").unwrap_or("");
let mut config = GenConfig::new();
if!config_string.is_empty() {
for mut chunk in &config_string.split(' ').chunks(2) {
let key = chunk.next().unwrap();
let value =
u64::from_str(chunk.next().expect("Bad config")).expect("Invalid config value");
config.insert(key, value);
}
}
let limit =
usize::from_str(matches.value_of("limit").unwrap_or("10000")).expect("Invalid limit");
let out = matches.value_of("out").unwrap_or("temp.gp").to_string();
let demo_key = matches.value_of("demo").map(ToString::to_string);
let bench_key = matches.value_of("bench").map(ToString::to_string);
let codegen_key = matches.value_of("codegen").map(ToString::to_string);
if demo_key.is_none() && bench_key.is_none() && codegen_key.is_none() {
panic!("Must specify demo, bench, or codegen");
}
CommandLineArguments {
codegen_key,
demo_key,
bench_key,
generation_mode,
config,
limit,
out,
}
}
|
CommandLineArguments
|
identifier_name
|
cabi.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use lib::llvm::Attribute;
use std::option;
use middle::trans::context::CrateContext;
use middle::trans::cabi_x86;
use middle::trans::cabi_x86_64;
use middle::trans::cabi_arm;
use middle::trans::cabi_mips;
use middle::trans::type_::Type;
use syntax::abi::{X86, X86_64, Arm, Mips};
#[deriving(Clone, Eq)]
pub enum ArgKind {
/// Pass the argument directly using the normal converted
/// LLVM type or by coercing to another specified type
Direct,
/// Pass the argument indirectly via a hidden pointer
Indirect
}
/// Information about how a specific C type
/// should be passed to or returned from a function
///
/// This is borrowed from clang's ABIInfo.h
#[deriving(Clone)]
pub struct ArgType {
kind: ArgKind,
/// Original LLVM type
ty: Type,
/// Coerced LLVM Type
cast: option::Option<Type>,
/// Dummy argument, which is emitted before the real argument
pad: option::Option<Type>,
/// LLVM attribute of argument
attr: option::Option<Attribute>
}
impl ArgType {
pub fn direct(ty: Type, cast: option::Option<Type>,
pad: option::Option<Type>,
attr: option::Option<Attribute>) -> ArgType {
ArgType {
kind: Direct,
ty: ty,
cast: cast,
pad: pad,
attr: attr
}
}
pub fn
|
(ty: Type, attr: option::Option<Attribute>) -> ArgType {
ArgType {
kind: Indirect,
ty: ty,
cast: option::None,
pad: option::None,
attr: attr
}
}
pub fn is_direct(&self) -> bool {
return self.kind == Direct;
}
pub fn is_indirect(&self) -> bool {
return self.kind == Indirect;
}
}
/// Metadata describing how the arguments to a native function
/// should be passed in order to respect the native ABI.
///
/// I will do my best to describe this structure, but these
/// comments are reverse-engineered and may be inaccurate. -NDM
pub struct FnType {
/// The LLVM types of each argument.
arg_tys: ~[ArgType],
/// LLVM return type.
ret_ty: ArgType,
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
match ccx.sess.targ_cfg.arch {
X86 => cabi_x86::compute_abi_info(ccx, atys, rty, ret_def),
X86_64 => cabi_x86_64::compute_abi_info(ccx, atys, rty, ret_def),
Arm => cabi_arm::compute_abi_info(ccx, atys, rty, ret_def),
Mips => cabi_mips::compute_abi_info(ccx, atys, rty, ret_def),
}
}
|
indirect
|
identifier_name
|
cabi.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use lib::llvm::Attribute;
use std::option;
use middle::trans::context::CrateContext;
use middle::trans::cabi_x86;
use middle::trans::cabi_x86_64;
use middle::trans::cabi_arm;
use middle::trans::cabi_mips;
use middle::trans::type_::Type;
use syntax::abi::{X86, X86_64, Arm, Mips};
#[deriving(Clone, Eq)]
pub enum ArgKind {
/// Pass the argument directly using the normal converted
/// LLVM type or by coercing to another specified type
Direct,
/// Pass the argument indirectly via a hidden pointer
Indirect
}
/// Information about how a specific C type
/// should be passed to or returned from a function
///
/// This is borrowed from clang's ABIInfo.h
#[deriving(Clone)]
pub struct ArgType {
kind: ArgKind,
/// Original LLVM type
ty: Type,
/// Coerced LLVM Type
cast: option::Option<Type>,
/// Dummy argument, which is emitted before the real argument
pad: option::Option<Type>,
/// LLVM attribute of argument
attr: option::Option<Attribute>
}
impl ArgType {
pub fn direct(ty: Type, cast: option::Option<Type>,
pad: option::Option<Type>,
attr: option::Option<Attribute>) -> ArgType {
ArgType {
kind: Direct,
ty: ty,
cast: cast,
pad: pad,
attr: attr
}
}
pub fn indirect(ty: Type, attr: option::Option<Attribute>) -> ArgType {
ArgType {
kind: Indirect,
ty: ty,
cast: option::None,
pad: option::None,
attr: attr
}
}
pub fn is_direct(&self) -> bool {
return self.kind == Direct;
}
pub fn is_indirect(&self) -> bool
|
}
/// Metadata describing how the arguments to a native function
/// should be passed in order to respect the native ABI.
///
/// I will do my best to describe this structure, but these
/// comments are reverse-engineered and may be inaccurate. -NDM
pub struct FnType {
/// The LLVM types of each argument.
arg_tys: ~[ArgType],
/// LLVM return type.
ret_ty: ArgType,
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
match ccx.sess.targ_cfg.arch {
X86 => cabi_x86::compute_abi_info(ccx, atys, rty, ret_def),
X86_64 => cabi_x86_64::compute_abi_info(ccx, atys, rty, ret_def),
Arm => cabi_arm::compute_abi_info(ccx, atys, rty, ret_def),
Mips => cabi_mips::compute_abi_info(ccx, atys, rty, ret_def),
}
}
|
{
return self.kind == Indirect;
}
|
identifier_body
|
cabi.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use lib::llvm::Attribute;
use std::option;
use middle::trans::context::CrateContext;
use middle::trans::cabi_x86;
use middle::trans::cabi_x86_64;
use middle::trans::cabi_arm;
use middle::trans::cabi_mips;
use middle::trans::type_::Type;
use syntax::abi::{X86, X86_64, Arm, Mips};
#[deriving(Clone, Eq)]
pub enum ArgKind {
/// Pass the argument directly using the normal converted
/// LLVM type or by coercing to another specified type
Direct,
/// Pass the argument indirectly via a hidden pointer
Indirect
}
/// Information about how a specific C type
/// should be passed to or returned from a function
///
/// This is borrowed from clang's ABIInfo.h
#[deriving(Clone)]
pub struct ArgType {
kind: ArgKind,
/// Original LLVM type
ty: Type,
/// Coerced LLVM Type
cast: option::Option<Type>,
/// Dummy argument, which is emitted before the real argument
pad: option::Option<Type>,
/// LLVM attribute of argument
attr: option::Option<Attribute>
}
impl ArgType {
pub fn direct(ty: Type, cast: option::Option<Type>,
pad: option::Option<Type>,
attr: option::Option<Attribute>) -> ArgType {
ArgType {
kind: Direct,
ty: ty,
cast: cast,
pad: pad,
attr: attr
}
}
pub fn indirect(ty: Type, attr: option::Option<Attribute>) -> ArgType {
ArgType {
kind: Indirect,
ty: ty,
cast: option::None,
pad: option::None,
attr: attr
}
}
pub fn is_direct(&self) -> bool {
return self.kind == Direct;
}
pub fn is_indirect(&self) -> bool {
return self.kind == Indirect;
}
}
/// Metadata describing how the arguments to a native function
/// should be passed in order to respect the native ABI.
///
|
/// I will do my best to describe this structure, but these
/// comments are reverse-engineered and may be inaccurate. -NDM
pub struct FnType {
/// The LLVM types of each argument.
arg_tys: ~[ArgType],
/// LLVM return type.
ret_ty: ArgType,
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
match ccx.sess.targ_cfg.arch {
X86 => cabi_x86::compute_abi_info(ccx, atys, rty, ret_def),
X86_64 => cabi_x86_64::compute_abi_info(ccx, atys, rty, ret_def),
Arm => cabi_arm::compute_abi_info(ccx, atys, rty, ret_def),
Mips => cabi_mips::compute_abi_info(ccx, atys, rty, ret_def),
}
}
|
random_line_split
|
|
lib.rs
|
//! [tui](https://github.com/fdehau/tui-rs) is a library used to build rich
//! terminal users interfaces and dashboards.
//!
//!
//!
//! # Get started
//!
//! ## Adding `tui` as a dependency
//!
//! ```toml
//! [dependencies]
//! tui = "0.17"
//! crossterm = "0.22"
//! ```
//!
//! The crate is using the `crossterm` backend by default that works on most platforms. But if for
//! example you want to use the `termion` backend instead. This can be done by changing your
//! dependencies specification to the following:
//!
//! ```toml
//! [dependencies]
//! termion = "1.5"
//! tui = { version = "0.17", default-features = false, features = ['termion'] }
|
//!
//! The same logic applies for all other available backends.
//!
//! ## Creating a `Terminal`
//!
//! Every application using `tui` should start by instantiating a `Terminal`. It is a light
//! abstraction over available backends that provides basic functionalities such as clearing the
//! screen, hiding the cursor, etc.
//!
//! ```rust,no_run
//! use std::io;
//! use tui::{backend::CrosstermBackend, Terminal};
//!
//! fn main() -> Result<(), io::Error> {
//! let stdout = io::stdout();
//! let backend = CrosstermBackend::new(stdout);
//! let mut terminal = Terminal::new(backend)?;
//! Ok(())
//! }
//! ```
//!
//! If you had previously chosen `termion` as a backend, the terminal can be created in a similar
//! way:
//!
//! ```rust,ignore
//! use std::io;
//! use tui::{backend::TermionBackend, Terminal};
//! use termion::raw::IntoRawMode;
//!
//! fn main() -> Result<(), io::Error> {
//! let stdout = io::stdout().into_raw_mode()?;
//! let backend = TermionBackend::new(stdout);
//! let mut terminal = Terminal::new(backend)?;
//! Ok(())
//! }
//! ```
//!
//! You may also refer to the examples to find out how to create a `Terminal` for each available
//! backend.
//!
//! ## Building a User Interface (UI)
//!
//! Every component of your interface will be implementing the `Widget` trait. The library comes
//! with a predefined set of widgets that should meet most of your use cases. You are also free to
//! implement your own.
//!
//! Each widget follows a builder pattern API providing a default configuration along with methods
//! to customize them. The widget is then rendered using [`Frame::render_widget`] which takes
//! your widget instance and an area to draw to.
//!
//! The following example renders a block of the size of the terminal:
//!
//! ```rust,no_run
//! use std::{io, thread, time::Duration};
//! use tui::{
//! backend::CrosstermBackend,
//! widgets::{Widget, Block, Borders},
//! layout::{Layout, Constraint, Direction},
//! Terminal
//! };
//! use crossterm::{
//! event::{self, DisableMouseCapture, EnableMouseCapture, Event, KeyCode},
//! execute,
//! terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
//! };
//!
//! fn main() -> Result<(), io::Error> {
//! // setup terminal
//! enable_raw_mode()?;
//! let mut stdout = io::stdout();
//! execute!(stdout, EnterAlternateScreen, EnableMouseCapture)?;
//! let backend = CrosstermBackend::new(stdout);
//! let mut terminal = Terminal::new(backend)?;
//!
//! terminal.draw(|f| {
//! let size = f.size();
//! let block = Block::default()
//! .title("Block")
//! .borders(Borders::ALL);
//! f.render_widget(block, size);
//! })?;
//!
//! thread::sleep(Duration::from_millis(5000));
//!
//! // restore terminal
//! disable_raw_mode()?;
//! execute!(
//! terminal.backend_mut(),
//! LeaveAlternateScreen,
//! DisableMouseCapture
//! )?;
//! terminal.show_cursor()?;
//!
//! Ok(())
//! }
//! ```
//!
//! ## Layout
//!
//! The library comes with a basic yet useful layout management object called `Layout`. As you may
//! see below and in the examples, the library makes heavy use of the builder pattern to provide
//! full customization. And `Layout` is no exception:
//!
//! ```rust,no_run
//! use tui::{
//! backend::Backend,
//! layout::{Constraint, Direction, Layout},
//! widgets::{Block, Borders},
//! Frame,
//! };
//! fn ui<B: Backend>(f: &mut Frame<B>) {
//! let chunks = Layout::default()
//! .direction(Direction::Vertical)
//! .margin(1)
//! .constraints(
//! [
//! Constraint::Percentage(10),
//! Constraint::Percentage(80),
//! Constraint::Percentage(10)
//! ].as_ref()
//! )
//! .split(f.size());
//! let block = Block::default()
//! .title("Block")
//! .borders(Borders::ALL);
//! f.render_widget(block, chunks[0]);
//! let block = Block::default()
//! .title("Block 2")
//! .borders(Borders::ALL);
//! f.render_widget(block, chunks[1]);
//! }
//! ```
//!
//! This let you describe responsive terminal UI by nesting layouts. You should note that by
//! default the computed layout tries to fill the available space completely. So if for any reason
//! you might need a blank space somewhere, try to pass an additional constraint and don't use the
//! corresponding area.
pub mod backend;
pub mod buffer;
pub mod layout;
pub mod style;
pub mod symbols;
pub mod terminal;
pub mod text;
pub mod widgets;
pub use self::terminal::{Frame, Terminal, TerminalOptions, Viewport};
|
//!
//! ```
|
random_line_split
|
file_dlg.rs
|
/* Copyright 2015 Jordan Miner
*
* Licensed under the MIT license <LICENSE or
* http://opensource.org/licenses/MIT>. This file may not be copied,
* modified, or distributed except according to those terms.
*/
use super::control_prelude::*;
use std::borrow::Cow;
use std::ffi::CStr;
use std::path::{PathBuf, Path};
use super::Popup;
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum FileDialogType {
Open,
Save,
Dir
}
impl FileDialogType {
fn from_str(s: &str) -> FileDialogType {
match s {
"OPEN" => FileDialogType::Open,
"SAVE" => FileDialogType::Save,
"DIR" => FileDialogType::Dir,
_ => panic!("unknown FileDialogType string"),
}
}
fn to_str(&self) -> &'static str {
match *self {
FileDialogType::Open => "OPEN\0",
FileDialogType::Save => "SAVE\0",
FileDialogType::Dir => "DIR\0",
}
}
}
pub struct FileExtFilter<'a, 'b, 'c: 'b> {
pub description: Cow<'a, str>,
pub filter: Cow<'b, [Cow<'c, str>]>,
}
impl<'a, 'b, 'c> FileExtFilter<'a, 'b, 'c> {
pub fn from_borrowed(desc: &'a str, filter: &'b [Cow<'c, str>]) -> FileExtFilter<'a, 'b, 'c> {
FileExtFilter {
description: Cow::Borrowed(desc),
filter: Cow::Borrowed(filter),
}
}
pub fn from_owned(desc: String, filter: Vec<String>) -> FileExtFilter<'a, 'b, 'c> {
FileExtFilter {
description: Cow::Owned(desc),
filter: Cow::Owned(filter.into_iter().map(|s| Cow::Owned(s)).collect()),
}
}
}
#[derive(Clone)]
pub struct FileDlg(HandleRc);
impl FileDlg {
pub fn
|
() -> FileDlg {
unsafe {
::iup_open();
let ih = IupFileDlg();
FileDlg(HandleRc::new(ih))
}
}
pub fn dialog_type(&self) -> FileDialogType {
unsafe {
let val = get_str_attribute_slice(self.handle(), "DIALOGTYPE\0");
FileDialogType::from_str(&*val)
}
}
pub fn set_dialog_type(&self, ty: FileDialogType) {
set_str_attribute(self.handle(), "DIALOGTYPE\0", ty.to_str());
}
pub fn directory(&self) -> String {
get_str_attribute(self.handle(), "DIRECTORY\0")
}
pub fn set_directory(&self, s: &str) {
set_str_attribute(self.handle(), "DIRECTORY\0", s);
}
pub fn ext_filter(&self) -> Vec<FileExtFilter> {
unsafe {
let val = get_str_attribute_slice(self.handle(), "EXTFILTER\0");
let mut filters = vec![];
let mut parts = val.split('|');
loop {
let desc = match parts.next() {
Some(p) => p,
None => break,
};
let filter = match parts.next() {
Some(p) => p,
None => break,
};
let filter = filter.split(';').map(|s| Cow::Owned(s.to_owned())).collect();
filters.push(FileExtFilter {
description: Cow::Owned(desc.to_owned()),
filter: Cow::Owned(filter),
});
}
filters
}
}
/// Sets file filters. It is recommended to always include an "All Files" filter.
///
/// # Examples
/// ```
/// # use clear_coat::{FileDlg, FileExtFilter};
/// let f = FileDlg::new();
/// f.set_ext_filter(&[
/// FileExtFilter::from_borrowed("All Files",
/// &["*.*".into()]),
/// FileExtFilter::from_borrowed("All Images",
/// &["*.jpg".into(), "*.jpeg".into(), "*.png".into()]),
/// FileExtFilter::from_borrowed("JPEG Images",
/// &["*.jpg".into(), "*.jpeg".into()]),
/// FileExtFilter::from_borrowed("PNG Images",
/// &["*.png".into()]),
/// ]);
/// f.set_filter_used(1);
/// ```
pub fn set_ext_filter(&self, ext_filter: &[FileExtFilter]) {
let mut s = String::with_capacity(ext_filter.len() * 25);
for f in ext_filter.iter() {
let semi_joined_filter = f.filter.join(";");
s.push_str(&f.description);
s.push_str(" (");
s.push_str(&semi_joined_filter);
s.push_str(")");
s.push_str("|");
s.push_str(&semi_joined_filter);
s.push_str("|");
}
s.push_str("\0");
set_str_attribute(self.handle(), "EXTFILTER\0", &s);
}
/// Sets the index of the filter to use starting at 0.
pub fn set_filter_used(&self, i: u32) -> &Self {
set_str_attribute(self.handle(), "FILTERUSED\0", &format!("{}\0", i + 1));
self
}
/// Gets the index of the filter to use. It returns the selection made by the user.
pub fn filter_used(&self) -> u32 {
unsafe {
let s = get_str_attribute_slice(self.handle(), "FILTERUSED\0");
s.parse::<u32>().expect("could not convert FILTERUSED to an integer") - 1
}
}
pub fn multiple_files(&self) -> bool {
unsafe {
get_str_attribute_slice(self.handle(), "MULTIPLEFILES\0") == "YES"
}
}
pub fn set_multiple_files(&self, multiple: bool) -> &Self {
set_str_attribute(self.handle(), "MULTIPLEFILES\0", if multiple { "YES\0" } else { "NO\0" });
self
}
pub fn value_single(&self) -> Option<PathBuf> {
assert!(!self.multiple_files());
unsafe {
let val = get_attribute_ptr(self.handle(), "VALUE\0");
if val.is_null() {
None
} else {
Some(PathBuf::from(&*CStr::from_ptr(val).to_string_lossy()))
}
}
}
pub fn value_multiple(&self) -> Option<Vec<PathBuf>> {
assert!(self.multiple_files());
unsafe {
let val = get_attribute_ptr(self.handle(), "VALUE\0");
if val.is_null() {
None
} else {
const PIPE: &'static [char] = &['|'];
let val = CStr::from_ptr(val).to_string_lossy();
let mut parts = val.split(PIPE);
let last_part = parts.next_back().expect("failed removing last part");
// if multiple files were selected, the string will end in a pipe
if last_part.is_empty() {
let dir = parts.next().expect("failed to get directory in value in \
FileDlg when multiple_files == true");
Some(parts.map(|p| Path::new(dir).join(p)).collect())
} else {
assert_eq!(parts.next(), None);
Some(vec![PathBuf::from(last_part)])
}
}
}
}
}
impl_control_traits!(FileDlg);
impl Popup for FileDlg {}
impl TitleAttribute for FileDlg {}
|
new
|
identifier_name
|
file_dlg.rs
|
/* Copyright 2015 Jordan Miner
*
* Licensed under the MIT license <LICENSE or
* http://opensource.org/licenses/MIT>. This file may not be copied,
* modified, or distributed except according to those terms.
*/
use super::control_prelude::*;
use std::borrow::Cow;
use std::ffi::CStr;
use std::path::{PathBuf, Path};
use super::Popup;
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum FileDialogType {
Open,
Save,
Dir
}
impl FileDialogType {
fn from_str(s: &str) -> FileDialogType {
match s {
"OPEN" => FileDialogType::Open,
"SAVE" => FileDialogType::Save,
"DIR" => FileDialogType::Dir,
_ => panic!("unknown FileDialogType string"),
}
}
fn to_str(&self) -> &'static str {
match *self {
FileDialogType::Open => "OPEN\0",
FileDialogType::Save => "SAVE\0",
FileDialogType::Dir => "DIR\0",
}
}
}
pub struct FileExtFilter<'a, 'b, 'c: 'b> {
pub description: Cow<'a, str>,
pub filter: Cow<'b, [Cow<'c, str>]>,
}
impl<'a, 'b, 'c> FileExtFilter<'a, 'b, 'c> {
pub fn from_borrowed(desc: &'a str, filter: &'b [Cow<'c, str>]) -> FileExtFilter<'a, 'b, 'c> {
FileExtFilter {
description: Cow::Borrowed(desc),
filter: Cow::Borrowed(filter),
}
}
pub fn from_owned(desc: String, filter: Vec<String>) -> FileExtFilter<'a, 'b, 'c> {
FileExtFilter {
description: Cow::Owned(desc),
filter: Cow::Owned(filter.into_iter().map(|s| Cow::Owned(s)).collect()),
}
}
}
#[derive(Clone)]
pub struct FileDlg(HandleRc);
impl FileDlg {
pub fn new() -> FileDlg {
unsafe {
::iup_open();
let ih = IupFileDlg();
FileDlg(HandleRc::new(ih))
}
}
pub fn dialog_type(&self) -> FileDialogType {
unsafe {
let val = get_str_attribute_slice(self.handle(), "DIALOGTYPE\0");
FileDialogType::from_str(&*val)
}
}
pub fn set_dialog_type(&self, ty: FileDialogType) {
set_str_attribute(self.handle(), "DIALOGTYPE\0", ty.to_str());
}
pub fn directory(&self) -> String {
get_str_attribute(self.handle(), "DIRECTORY\0")
}
pub fn set_directory(&self, s: &str) {
set_str_attribute(self.handle(), "DIRECTORY\0", s);
}
pub fn ext_filter(&self) -> Vec<FileExtFilter> {
unsafe {
let val = get_str_attribute_slice(self.handle(), "EXTFILTER\0");
let mut filters = vec![];
let mut parts = val.split('|');
loop {
let desc = match parts.next() {
Some(p) => p,
None => break,
};
let filter = match parts.next() {
Some(p) => p,
None => break,
};
let filter = filter.split(';').map(|s| Cow::Owned(s.to_owned())).collect();
filters.push(FileExtFilter {
description: Cow::Owned(desc.to_owned()),
filter: Cow::Owned(filter),
});
}
filters
}
}
/// Sets file filters. It is recommended to always include an "All Files" filter.
///
/// # Examples
/// ```
/// # use clear_coat::{FileDlg, FileExtFilter};
/// let f = FileDlg::new();
/// f.set_ext_filter(&[
/// FileExtFilter::from_borrowed("All Files",
/// &["*.*".into()]),
/// FileExtFilter::from_borrowed("All Images",
/// &["*.jpg".into(), "*.jpeg".into(), "*.png".into()]),
/// FileExtFilter::from_borrowed("JPEG Images",
/// &["*.jpg".into(), "*.jpeg".into()]),
/// FileExtFilter::from_borrowed("PNG Images",
/// &["*.png".into()]),
/// ]);
/// f.set_filter_used(1);
/// ```
pub fn set_ext_filter(&self, ext_filter: &[FileExtFilter]) {
let mut s = String::with_capacity(ext_filter.len() * 25);
for f in ext_filter.iter() {
let semi_joined_filter = f.filter.join(";");
s.push_str(&f.description);
s.push_str(" (");
s.push_str(&semi_joined_filter);
s.push_str(")");
s.push_str("|");
s.push_str(&semi_joined_filter);
s.push_str("|");
}
s.push_str("\0");
set_str_attribute(self.handle(), "EXTFILTER\0", &s);
}
/// Sets the index of the filter to use starting at 0.
pub fn set_filter_used(&self, i: u32) -> &Self {
set_str_attribute(self.handle(), "FILTERUSED\0", &format!("{}\0", i + 1));
self
}
/// Gets the index of the filter to use. It returns the selection made by the user.
pub fn filter_used(&self) -> u32 {
unsafe {
let s = get_str_attribute_slice(self.handle(), "FILTERUSED\0");
s.parse::<u32>().expect("could not convert FILTERUSED to an integer") - 1
}
}
pub fn multiple_files(&self) -> bool {
unsafe {
get_str_attribute_slice(self.handle(), "MULTIPLEFILES\0") == "YES"
}
}
pub fn set_multiple_files(&self, multiple: bool) -> &Self {
set_str_attribute(self.handle(), "MULTIPLEFILES\0", if multiple { "YES\0" } else { "NO\0" });
self
}
pub fn value_single(&self) -> Option<PathBuf> {
assert!(!self.multiple_files());
unsafe {
let val = get_attribute_ptr(self.handle(), "VALUE\0");
if val.is_null() {
|
}
}
pub fn value_multiple(&self) -> Option<Vec<PathBuf>> {
assert!(self.multiple_files());
unsafe {
let val = get_attribute_ptr(self.handle(), "VALUE\0");
if val.is_null() {
None
} else {
const PIPE: &'static [char] = &['|'];
let val = CStr::from_ptr(val).to_string_lossy();
let mut parts = val.split(PIPE);
let last_part = parts.next_back().expect("failed removing last part");
// if multiple files were selected, the string will end in a pipe
if last_part.is_empty() {
let dir = parts.next().expect("failed to get directory in value in \
FileDlg when multiple_files == true");
Some(parts.map(|p| Path::new(dir).join(p)).collect())
} else {
assert_eq!(parts.next(), None);
Some(vec![PathBuf::from(last_part)])
}
}
}
}
}
impl_control_traits!(FileDlg);
impl Popup for FileDlg {}
impl TitleAttribute for FileDlg {}
|
None
} else {
Some(PathBuf::from(&*CStr::from_ptr(val).to_string_lossy()))
}
|
random_line_split
|
msgsend-ring-mutex-arcs.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test creates a bunch of threads that simultaneously send to each
// other in a ring. The messages should all be basically
// independent.
// This is like msgsend-ring-pipes but adapted to use Arcs.
// This also serves as a pipes test, because Arcs are implemented with pipes.
// no-pretty-expanded FIXME #15189
#![feature(duration, duration_span, future)]
use std::env;
use std::sync::{Arc, Future, Mutex, Condvar};
use std::time::Duration;
// A poor man's pipe.
type pipe = Arc<(Mutex<Vec<usize>>, Condvar)>;
fn send(p: &pipe, msg: usize) {
let &(ref lock, ref cond) = &**p;
let mut arr = lock.lock().unwrap();
arr.push(msg);
cond.notify_one();
}
fn recv(p: &pipe) -> usize
|
fn init() -> (pipe,pipe) {
let m = Arc::new((Mutex::new(Vec::new()), Condvar::new()));
((&m).clone(), m)
}
fn thread_ring(i: usize, count: usize, num_chan: pipe, num_port: pipe) {
let mut num_chan = Some(num_chan);
let mut num_port = Some(num_port);
// Send/Receive lots of messages.
for j in 0..count {
//println!("thread %?, iter %?", i, j);
let num_chan2 = num_chan.take().unwrap();
let num_port2 = num_port.take().unwrap();
send(&num_chan2, i * j);
num_chan = Some(num_chan2);
let _n = recv(&num_port2);
//log(error, _n);
num_port = Some(num_port2);
};
}
fn main() {
let args = env::args();
let args = if env::var_os("RUST_BENCH").is_some() {
vec!("".to_string(), "100".to_string(), "10000".to_string())
} else if args.len() <= 1 {
vec!("".to_string(), "10".to_string(), "100".to_string())
} else {
args.collect()
};
let num_tasks = args[1].parse::<usize>().unwrap();
let msg_per_task = args[2].parse::<usize>().unwrap();
let (num_chan, num_port) = init();
let mut p = Some((num_chan, num_port));
let dur = Duration::span(|| {
let (mut num_chan, num_port) = p.take().unwrap();
// create the ring
let mut futures = Vec::new();
for i in 1..num_tasks {
//println!("spawning %?", i);
let (new_chan, num_port) = init();
let num_chan_2 = num_chan.clone();
let new_future = Future::spawn(move|| {
thread_ring(i, msg_per_task, num_chan_2, num_port)
});
futures.push(new_future);
num_chan = new_chan;
};
// do our iteration
thread_ring(0, msg_per_task, num_chan, num_port);
// synchronize
for f in &mut futures {
f.get()
}
});
// all done, report stats.
let num_msgs = num_tasks * msg_per_task;
let rate = (num_msgs as f64) / (dur.as_secs() as f64);
println!("Sent {} messages in {:?}", num_msgs, dur);
println!(" {} messages / second", rate);
println!(" {} μs / message", 1000000. / rate);
}
|
{
let &(ref lock, ref cond) = &**p;
let mut arr = lock.lock().unwrap();
while arr.is_empty() {
arr = cond.wait(arr).unwrap();
}
arr.pop().unwrap()
}
|
identifier_body
|
msgsend-ring-mutex-arcs.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test creates a bunch of threads that simultaneously send to each
// other in a ring. The messages should all be basically
// independent.
// This is like msgsend-ring-pipes but adapted to use Arcs.
// This also serves as a pipes test, because Arcs are implemented with pipes.
// no-pretty-expanded FIXME #15189
#![feature(duration, duration_span, future)]
use std::env;
use std::sync::{Arc, Future, Mutex, Condvar};
use std::time::Duration;
// A poor man's pipe.
type pipe = Arc<(Mutex<Vec<usize>>, Condvar)>;
fn send(p: &pipe, msg: usize) {
let &(ref lock, ref cond) = &**p;
let mut arr = lock.lock().unwrap();
arr.push(msg);
cond.notify_one();
}
fn recv(p: &pipe) -> usize {
let &(ref lock, ref cond) = &**p;
let mut arr = lock.lock().unwrap();
while arr.is_empty() {
arr = cond.wait(arr).unwrap();
}
arr.pop().unwrap()
}
fn init() -> (pipe,pipe) {
let m = Arc::new((Mutex::new(Vec::new()), Condvar::new()));
((&m).clone(), m)
}
fn
|
(i: usize, count: usize, num_chan: pipe, num_port: pipe) {
let mut num_chan = Some(num_chan);
let mut num_port = Some(num_port);
// Send/Receive lots of messages.
for j in 0..count {
//println!("thread %?, iter %?", i, j);
let num_chan2 = num_chan.take().unwrap();
let num_port2 = num_port.take().unwrap();
send(&num_chan2, i * j);
num_chan = Some(num_chan2);
let _n = recv(&num_port2);
//log(error, _n);
num_port = Some(num_port2);
};
}
fn main() {
let args = env::args();
let args = if env::var_os("RUST_BENCH").is_some() {
vec!("".to_string(), "100".to_string(), "10000".to_string())
} else if args.len() <= 1 {
vec!("".to_string(), "10".to_string(), "100".to_string())
} else {
args.collect()
};
let num_tasks = args[1].parse::<usize>().unwrap();
let msg_per_task = args[2].parse::<usize>().unwrap();
let (num_chan, num_port) = init();
let mut p = Some((num_chan, num_port));
let dur = Duration::span(|| {
let (mut num_chan, num_port) = p.take().unwrap();
// create the ring
let mut futures = Vec::new();
for i in 1..num_tasks {
//println!("spawning %?", i);
let (new_chan, num_port) = init();
let num_chan_2 = num_chan.clone();
let new_future = Future::spawn(move|| {
thread_ring(i, msg_per_task, num_chan_2, num_port)
});
futures.push(new_future);
num_chan = new_chan;
};
// do our iteration
thread_ring(0, msg_per_task, num_chan, num_port);
// synchronize
for f in &mut futures {
f.get()
}
});
// all done, report stats.
let num_msgs = num_tasks * msg_per_task;
let rate = (num_msgs as f64) / (dur.as_secs() as f64);
println!("Sent {} messages in {:?}", num_msgs, dur);
println!(" {} messages / second", rate);
println!(" {} μs / message", 1000000. / rate);
}
|
thread_ring
|
identifier_name
|
msgsend-ring-mutex-arcs.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This test creates a bunch of threads that simultaneously send to each
// other in a ring. The messages should all be basically
// independent.
// This is like msgsend-ring-pipes but adapted to use Arcs.
// This also serves as a pipes test, because Arcs are implemented with pipes.
// no-pretty-expanded FIXME #15189
#![feature(duration, duration_span, future)]
use std::env;
use std::sync::{Arc, Future, Mutex, Condvar};
use std::time::Duration;
// A poor man's pipe.
type pipe = Arc<(Mutex<Vec<usize>>, Condvar)>;
fn send(p: &pipe, msg: usize) {
let &(ref lock, ref cond) = &**p;
let mut arr = lock.lock().unwrap();
arr.push(msg);
cond.notify_one();
}
fn recv(p: &pipe) -> usize {
let &(ref lock, ref cond) = &**p;
let mut arr = lock.lock().unwrap();
while arr.is_empty() {
arr = cond.wait(arr).unwrap();
}
arr.pop().unwrap()
}
fn init() -> (pipe,pipe) {
let m = Arc::new((Mutex::new(Vec::new()), Condvar::new()));
((&m).clone(), m)
}
fn thread_ring(i: usize, count: usize, num_chan: pipe, num_port: pipe) {
let mut num_chan = Some(num_chan);
let mut num_port = Some(num_port);
// Send/Receive lots of messages.
for j in 0..count {
//println!("thread %?, iter %?", i, j);
let num_chan2 = num_chan.take().unwrap();
let num_port2 = num_port.take().unwrap();
send(&num_chan2, i * j);
num_chan = Some(num_chan2);
let _n = recv(&num_port2);
//log(error, _n);
num_port = Some(num_port2);
};
}
fn main() {
let args = env::args();
let args = if env::var_os("RUST_BENCH").is_some() {
vec!("".to_string(), "100".to_string(), "10000".to_string())
} else if args.len() <= 1 {
vec!("".to_string(), "10".to_string(), "100".to_string())
} else {
args.collect()
};
let num_tasks = args[1].parse::<usize>().unwrap();
let msg_per_task = args[2].parse::<usize>().unwrap();
let (num_chan, num_port) = init();
let mut p = Some((num_chan, num_port));
let dur = Duration::span(|| {
let (mut num_chan, num_port) = p.take().unwrap();
// create the ring
let mut futures = Vec::new();
for i in 1..num_tasks {
//println!("spawning %?", i);
let (new_chan, num_port) = init();
let num_chan_2 = num_chan.clone();
let new_future = Future::spawn(move|| {
thread_ring(i, msg_per_task, num_chan_2, num_port)
});
futures.push(new_future);
num_chan = new_chan;
};
// do our iteration
thread_ring(0, msg_per_task, num_chan, num_port);
// synchronize
for f in &mut futures {
f.get()
}
|
// all done, report stats.
let num_msgs = num_tasks * msg_per_task;
let rate = (num_msgs as f64) / (dur.as_secs() as f64);
println!("Sent {} messages in {:?}", num_msgs, dur);
println!(" {} messages / second", rate);
println!(" {} μs / message", 1000000. / rate);
}
|
});
|
random_line_split
|
decompose.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt::{self, Write};
#[derive(Clone)]
enum DecompositionType {
Canonical,
Compatible
}
/// External iterator for a string decomposition's characters.
#[derive(Clone)]
pub struct Decompositions<I> {
kind: DecompositionType,
iter: I,
done: bool,
// This buffer stores pairs of (canonical combining class, character),
// pushed onto the end in text order.
//
// It's split into two contiguous regions by the `ready` offset. The first
// `ready` pairs are sorted and ready to emit on demand. The "pending"
// suffix afterwards still needs more characters for us to be able to sort
// in canonical order and is not safe to emit.
buffer: Vec<(u8, char)>,
ready: usize,
}
#[inline]
pub fn new_canonical<I: Iterator<Item=char>>(iter: I) -> Decompositions<I> {
Decompositions {
kind: self::DecompositionType::Canonical,
iter: iter,
done: false,
buffer: Vec::new(),
ready: 0,
}
}
#[inline]
pub fn new_compatible<I: Iterator<Item=char>>(iter: I) -> Decompositions<I> {
Decompositions {
kind: self::DecompositionType::Compatible,
iter: iter,
done: false,
buffer: Vec::new(),
ready: 0,
}
}
impl<I> Decompositions<I> {
#[inline]
fn push_back(&mut self, ch: char) {
let class = super::char::canonical_combining_class(ch);
if class == 0 {
self.sort_pending();
}
self.buffer.push((class, ch));
}
#[inline]
fn sort_pending(&mut self) {
if self.ready == 0 && self.buffer.is_empty() {
return;
}
// NB: `sort_by_key` is stable, so it will preserve the original text's
// order within a combining class.
self.buffer[self.ready..].sort_by_key(|k| k.0);
self.ready = self.buffer.len();
}
#[inline]
fn pop_front(&mut self) -> Option<char>
|
}
impl<I: Iterator<Item=char>> Iterator for Decompositions<I> {
type Item = char;
#[inline]
fn next(&mut self) -> Option<char> {
while self.ready == 0 &&!self.done {
match (self.iter.next(), &self.kind) {
(Some(ch), &DecompositionType::Canonical) => {
super::char::decompose_canonical(ch, |d| self.push_back(d));
},
(Some(ch), &DecompositionType::Compatible) => {
super::char::decompose_compatible(ch, |d| self.push_back(d));
},
(None, _) => {
self.sort_pending();
self.done = true;
},
}
}
self.pop_front()
}
fn size_hint(&self) -> (usize, Option<usize>) {
let (lower, _) = self.iter.size_hint();
(lower, None)
}
}
impl<I: Iterator<Item=char> + Clone> fmt::Display for Decompositions<I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for c in self.clone() {
f.write_char(c)?;
}
Ok(())
}
}
|
{
if self.ready == 0 {
None
} else {
self.ready -= 1;
Some(self.buffer.remove(0).1)
}
}
|
identifier_body
|
decompose.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt::{self, Write};
#[derive(Clone)]
enum DecompositionType {
Canonical,
Compatible
}
/// External iterator for a string decomposition's characters.
#[derive(Clone)]
pub struct Decompositions<I> {
kind: DecompositionType,
iter: I,
done: bool,
// This buffer stores pairs of (canonical combining class, character),
// pushed onto the end in text order.
//
// It's split into two contiguous regions by the `ready` offset. The first
// `ready` pairs are sorted and ready to emit on demand. The "pending"
// suffix afterwards still needs more characters for us to be able to sort
// in canonical order and is not safe to emit.
buffer: Vec<(u8, char)>,
ready: usize,
}
#[inline]
pub fn new_canonical<I: Iterator<Item=char>>(iter: I) -> Decompositions<I> {
Decompositions {
kind: self::DecompositionType::Canonical,
iter: iter,
done: false,
buffer: Vec::new(),
ready: 0,
}
}
#[inline]
pub fn new_compatible<I: Iterator<Item=char>>(iter: I) -> Decompositions<I> {
Decompositions {
kind: self::DecompositionType::Compatible,
iter: iter,
done: false,
buffer: Vec::new(),
ready: 0,
}
}
impl<I> Decompositions<I> {
#[inline]
fn push_back(&mut self, ch: char) {
let class = super::char::canonical_combining_class(ch);
if class == 0 {
self.sort_pending();
}
self.buffer.push((class, ch));
}
#[inline]
fn sort_pending(&mut self) {
if self.ready == 0 && self.buffer.is_empty() {
return;
}
// NB: `sort_by_key` is stable, so it will preserve the original text's
// order within a combining class.
self.buffer[self.ready..].sort_by_key(|k| k.0);
self.ready = self.buffer.len();
}
#[inline]
fn pop_front(&mut self) -> Option<char> {
if self.ready == 0 {
None
} else {
self.ready -= 1;
Some(self.buffer.remove(0).1)
}
}
}
impl<I: Iterator<Item=char>> Iterator for Decompositions<I> {
type Item = char;
#[inline]
fn next(&mut self) -> Option<char> {
while self.ready == 0 &&!self.done {
match (self.iter.next(), &self.kind) {
(Some(ch), &DecompositionType::Canonical) => {
super::char::decompose_canonical(ch, |d| self.push_back(d));
},
(Some(ch), &DecompositionType::Compatible) =>
|
,
(None, _) => {
self.sort_pending();
self.done = true;
},
}
}
self.pop_front()
}
fn size_hint(&self) -> (usize, Option<usize>) {
let (lower, _) = self.iter.size_hint();
(lower, None)
}
}
impl<I: Iterator<Item=char> + Clone> fmt::Display for Decompositions<I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for c in self.clone() {
f.write_char(c)?;
}
Ok(())
}
}
|
{
super::char::decompose_compatible(ch, |d| self.push_back(d));
}
|
conditional_block
|
decompose.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt::{self, Write};
#[derive(Clone)]
enum DecompositionType {
Canonical,
Compatible
}
/// External iterator for a string decomposition's characters.
#[derive(Clone)]
pub struct Decompositions<I> {
kind: DecompositionType,
iter: I,
done: bool,
// This buffer stores pairs of (canonical combining class, character),
// pushed onto the end in text order.
//
// It's split into two contiguous regions by the `ready` offset. The first
// `ready` pairs are sorted and ready to emit on demand. The "pending"
// suffix afterwards still needs more characters for us to be able to sort
// in canonical order and is not safe to emit.
buffer: Vec<(u8, char)>,
ready: usize,
}
#[inline]
pub fn new_canonical<I: Iterator<Item=char>>(iter: I) -> Decompositions<I> {
Decompositions {
kind: self::DecompositionType::Canonical,
iter: iter,
done: false,
buffer: Vec::new(),
ready: 0,
}
}
#[inline]
pub fn new_compatible<I: Iterator<Item=char>>(iter: I) -> Decompositions<I> {
Decompositions {
kind: self::DecompositionType::Compatible,
iter: iter,
done: false,
buffer: Vec::new(),
ready: 0,
}
}
impl<I> Decompositions<I> {
#[inline]
fn push_back(&mut self, ch: char) {
let class = super::char::canonical_combining_class(ch);
if class == 0 {
self.sort_pending();
}
self.buffer.push((class, ch));
}
#[inline]
fn sort_pending(&mut self) {
if self.ready == 0 && self.buffer.is_empty() {
return;
}
// NB: `sort_by_key` is stable, so it will preserve the original text's
// order within a combining class.
self.buffer[self.ready..].sort_by_key(|k| k.0);
self.ready = self.buffer.len();
}
#[inline]
fn pop_front(&mut self) -> Option<char> {
if self.ready == 0 {
None
} else {
self.ready -= 1;
Some(self.buffer.remove(0).1)
}
}
}
impl<I: Iterator<Item=char>> Iterator for Decompositions<I> {
|
#[inline]
fn next(&mut self) -> Option<char> {
while self.ready == 0 &&!self.done {
match (self.iter.next(), &self.kind) {
(Some(ch), &DecompositionType::Canonical) => {
super::char::decompose_canonical(ch, |d| self.push_back(d));
},
(Some(ch), &DecompositionType::Compatible) => {
super::char::decompose_compatible(ch, |d| self.push_back(d));
},
(None, _) => {
self.sort_pending();
self.done = true;
},
}
}
self.pop_front()
}
fn size_hint(&self) -> (usize, Option<usize>) {
let (lower, _) = self.iter.size_hint();
(lower, None)
}
}
impl<I: Iterator<Item=char> + Clone> fmt::Display for Decompositions<I> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for c in self.clone() {
f.write_char(c)?;
}
Ok(())
}
}
|
type Item = char;
|
random_line_split
|
decompose.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::fmt::{self, Write};
#[derive(Clone)]
enum DecompositionType {
Canonical,
Compatible
}
/// External iterator for a string decomposition's characters.
#[derive(Clone)]
pub struct Decompositions<I> {
kind: DecompositionType,
iter: I,
done: bool,
// This buffer stores pairs of (canonical combining class, character),
// pushed onto the end in text order.
//
// It's split into two contiguous regions by the `ready` offset. The first
// `ready` pairs are sorted and ready to emit on demand. The "pending"
// suffix afterwards still needs more characters for us to be able to sort
// in canonical order and is not safe to emit.
buffer: Vec<(u8, char)>,
ready: usize,
}
#[inline]
pub fn new_canonical<I: Iterator<Item=char>>(iter: I) -> Decompositions<I> {
Decompositions {
kind: self::DecompositionType::Canonical,
iter: iter,
done: false,
buffer: Vec::new(),
ready: 0,
}
}
#[inline]
pub fn new_compatible<I: Iterator<Item=char>>(iter: I) -> Decompositions<I> {
Decompositions {
kind: self::DecompositionType::Compatible,
iter: iter,
done: false,
buffer: Vec::new(),
ready: 0,
}
}
impl<I> Decompositions<I> {
#[inline]
fn push_back(&mut self, ch: char) {
let class = super::char::canonical_combining_class(ch);
if class == 0 {
self.sort_pending();
}
self.buffer.push((class, ch));
}
#[inline]
fn sort_pending(&mut self) {
if self.ready == 0 && self.buffer.is_empty() {
return;
}
// NB: `sort_by_key` is stable, so it will preserve the original text's
// order within a combining class.
self.buffer[self.ready..].sort_by_key(|k| k.0);
self.ready = self.buffer.len();
}
#[inline]
fn pop_front(&mut self) -> Option<char> {
if self.ready == 0 {
None
} else {
self.ready -= 1;
Some(self.buffer.remove(0).1)
}
}
}
impl<I: Iterator<Item=char>> Iterator for Decompositions<I> {
type Item = char;
#[inline]
fn next(&mut self) -> Option<char> {
while self.ready == 0 &&!self.done {
match (self.iter.next(), &self.kind) {
(Some(ch), &DecompositionType::Canonical) => {
super::char::decompose_canonical(ch, |d| self.push_back(d));
},
(Some(ch), &DecompositionType::Compatible) => {
super::char::decompose_compatible(ch, |d| self.push_back(d));
},
(None, _) => {
self.sort_pending();
self.done = true;
},
}
}
self.pop_front()
}
fn size_hint(&self) -> (usize, Option<usize>) {
let (lower, _) = self.iter.size_hint();
(lower, None)
}
}
impl<I: Iterator<Item=char> + Clone> fmt::Display for Decompositions<I> {
fn
|
(&self, f: &mut fmt::Formatter) -> fmt::Result {
for c in self.clone() {
f.write_char(c)?;
}
Ok(())
}
}
|
fmt
|
identifier_name
|
scrobctl.rs
|
#![deny(
missing_copy_implementations,
missing_debug_implementations,
// missing_docs, temp disable
clippy::all,
clippy::pedantic,
clippy::cargo,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unused_import_braces,
unused_qualifications,
unused_extern_crates,
variant_size_differences
)]
use clap::{App, Arg, ArgMatches};
fn get_arguments() -> ArgMatches<'static> {
App::new("scrobc")
.version(env!("CARGO_PKG_VERSION"))
.author("Dom Rodriguez <[email protected]>")
.about("Client for controlling scrob")
.arg(
Arg::with_name("v")
.short("v")
.multiple(true)
.required(false)
.help("Sets the level of logging verbosity."),
)
.get_matches()
}
fn
|
() {
let _args = get_arguments();
unimplemented!();
}
|
main
|
identifier_name
|
scrobctl.rs
|
#![deny(
missing_copy_implementations,
missing_debug_implementations,
// missing_docs, temp disable
clippy::all,
clippy::pedantic,
|
unused_qualifications,
unused_extern_crates,
variant_size_differences
)]
use clap::{App, Arg, ArgMatches};
fn get_arguments() -> ArgMatches<'static> {
App::new("scrobc")
.version(env!("CARGO_PKG_VERSION"))
.author("Dom Rodriguez <[email protected]>")
.about("Client for controlling scrob")
.arg(
Arg::with_name("v")
.short("v")
.multiple(true)
.required(false)
.help("Sets the level of logging verbosity."),
)
.get_matches()
}
fn main() {
let _args = get_arguments();
unimplemented!();
}
|
clippy::cargo,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unused_import_braces,
|
random_line_split
|
scrobctl.rs
|
#![deny(
missing_copy_implementations,
missing_debug_implementations,
// missing_docs, temp disable
clippy::all,
clippy::pedantic,
clippy::cargo,
trivial_casts,
trivial_numeric_casts,
unsafe_code,
unused_import_braces,
unused_qualifications,
unused_extern_crates,
variant_size_differences
)]
use clap::{App, Arg, ArgMatches};
fn get_arguments() -> ArgMatches<'static> {
App::new("scrobc")
.version(env!("CARGO_PKG_VERSION"))
.author("Dom Rodriguez <[email protected]>")
.about("Client for controlling scrob")
.arg(
Arg::with_name("v")
.short("v")
.multiple(true)
.required(false)
.help("Sets the level of logging verbosity."),
)
.get_matches()
}
fn main()
|
{
let _args = get_arguments();
unimplemented!();
}
|
identifier_body
|
|
client.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Fetching
use std::{io, fmt, time};
use std::sync::Arc;
use std::sync::atomic::{self, AtomicBool};
use futures::{self, BoxFuture, Future};
use futures_cpupool::{CpuPool, CpuFuture};
use mime::{self, Mime};
use parking_lot::RwLock;
use reqwest;
/// Fetch abort control
#[derive(Default, Debug, Clone)]
pub struct Abort(Arc<AtomicBool>);
impl Abort {
/// Returns `true` if request is aborted.
pub fn is_aborted(&self) -> bool {
self.0.load(atomic::Ordering::SeqCst)
}
}
impl From<Arc<AtomicBool>> for Abort {
fn from(a: Arc<AtomicBool>) -> Self {
Abort(a)
}
}
/// Fetch
pub trait Fetch: Clone + Send + Sync +'static {
/// Result type
type Result: Future<Item=Response, Error=Error> + Send +'static;
/// Creates new Fetch object.
fn new() -> Result<Self, Error> where Self: Sized;
/// Spawn the future in context of this `Fetch` thread pool.
/// Implementation is optional.
fn process<F, I, E>(&self, f: F) -> BoxFuture<I, E> where
F: Future<Item=I, Error=E> + Send +'static,
I: Send +'static,
E: Send +'static,
{
f.boxed()
}
/// Fetch URL and get a future for the result.
/// Supports aborting the request in the middle of execution.
fn fetch_with_abort(&self, url: &str, abort: Abort) -> Self::Result;
/// Fetch URL and get a future for the result.
fn fetch(&self, url: &str) -> Self::Result {
self.fetch_with_abort(url, Default::default())
}
/// Fetch URL and get the result synchronously.
fn fetch_sync(&self, url: &str) -> Result<Response, Error> {
self.fetch(url).wait()
}
/// Closes this client
fn close(self) where Self: Sized {}
}
const CLIENT_TIMEOUT_SECONDS: u64 = 5;
/// Fetch client
pub struct Client {
client: RwLock<(time::Instant, Arc<reqwest::Client>)>,
pool: CpuPool,
limit: Option<usize>,
}
impl Clone for Client {
fn clone(&self) -> Self {
let (ref time, ref client) = *self.client.read();
Client {
client: RwLock::new((time.clone(), client.clone())),
pool: self.pool.clone(),
limit: self.limit.clone(),
}
}
}
impl Client {
fn new_client() -> Result<Arc<reqwest::Client>, Error> {
let mut client = reqwest::Client::new()?;
client.redirect(reqwest::RedirectPolicy::limited(5));
Ok(Arc::new(client))
}
fn with_limit(limit: Option<usize>) -> Result<Self, Error> {
Ok(Client {
client: RwLock::new((time::Instant::now(), Self::new_client()?)),
pool: CpuPool::new(4),
limit: limit,
})
}
fn client(&self) -> Result<Arc<reqwest::Client>, Error> {
{
let (ref time, ref client) = *self.client.read();
if time.elapsed() < time::Duration::from_secs(CLIENT_TIMEOUT_SECONDS) {
return Ok(client.clone());
}
}
let client = Self::new_client()?;
*self.client.write() = (time::Instant::now(), client.clone());
Ok(client)
}
}
impl Fetch for Client {
type Result = CpuFuture<Response, Error>;
fn new() -> Result<Self, Error> {
// Max 50MB will be downloaded.
Self::with_limit(Some(50*1024*1024))
}
fn process<F, I, E>(&self, f: F) -> BoxFuture<I, E> where
F: Future<Item=I, Error=E> + Send +'static,
I: Send +'static,
E: Send +'static,
{
self.pool.spawn(f).boxed()
}
fn fetch_with_abort(&self, url: &str, abort: Abort) -> Self::Result {
debug!(target: "fetch", "Fetching from: {:?}", url);
match self.client() {
Ok(client) => {
self.pool.spawn(FetchTask {
url: url.into(),
client: client,
limit: self.limit,
abort: abort,
})
},
Err(err) => {
self.pool.spawn(futures::future::err(err))
},
}
}
}
struct FetchTask {
url: String,
client: Arc<reqwest::Client>,
limit: Option<usize>,
abort: Abort,
}
impl Future for FetchTask {
// TODO [ToDr] timeouts handling?
type Item = Response;
type Error = Error;
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
if self.abort.is_aborted() {
trace!(target: "fetch", "Fetch of {:?} aborted.", self.url);
return Err(Error::Aborted);
}
trace!(target: "fetch", "Starting fetch task: {:?}", self.url);
let result = self.client.get(&self.url)
.header(reqwest::header::UserAgent("Parity Fetch".into()))
.send()?;
Ok(futures::Async::Ready(Response {
inner: ResponseInner::Response(result),
abort: self.abort.clone(),
limit: self.limit,
read: 0,
}))
}
}
/// Fetch Error
#[derive(Debug)]
pub enum Error {
/// Internal fetch error
Fetch(reqwest::Error),
/// Request aborted
Aborted,
}
impl From<reqwest::Error> for Error {
fn from(error: reqwest::Error) -> Self {
Error::Fetch(error)
}
}
enum ResponseInner {
Response(reqwest::Response),
Reader(Box<io::Read + Send>),
NotFound,
}
impl fmt::Debug for ResponseInner {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ResponseInner::Response(ref response) => response.fmt(f),
ResponseInner::NotFound => write!(f, "Not found"),
ResponseInner::Reader(_) => write!(f, "io Reader"),
}
}
}
/// A fetch response type.
#[derive(Debug)]
pub struct Response {
inner: ResponseInner,
abort: Abort,
limit: Option<usize>,
read: usize,
}
impl Response {
/// Creates new successfuly response reading from a file.
pub fn from_reader<R: io::Read + Send +'static>(reader: R) -> Self {
Response {
inner: ResponseInner::Reader(Box::new(reader)),
abort: Abort::default(),
limit: None,
read: 0,
}
}
/// Creates 404 response (useful for tests)
pub fn not_found() -> Self {
Response {
inner: ResponseInner::NotFound,
abort: Abort::default(),
limit: None,
read: 0,
}
}
/// Returns status code of this response.
pub fn status(&self) -> reqwest::StatusCode
|
/// Returns `true` if response status code is successful.
pub fn is_success(&self) -> bool {
self.status() == reqwest::StatusCode::Ok
}
/// Returns `true` if content type of this response is `text/html`
pub fn is_html(&self) -> bool {
match self.content_type() {
Some(Mime(mime::TopLevel::Text, mime::SubLevel::Html, _)) => true,
_ => false,
}
}
/// Returns content type of this response (if present)
pub fn content_type(&self) -> Option<Mime> {
match self.inner {
ResponseInner::Response(ref r) => {
let content_type = r.headers().get::<reqwest::header::ContentType>();
content_type.map(|mime| mime.0.clone())
},
_ => None,
}
}
}
impl io::Read for Response {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.abort.is_aborted() {
return Err(io::Error::new(io::ErrorKind::ConnectionAborted, "Fetch aborted."));
}
let res = match self.inner {
ResponseInner::Response(ref mut response) => response.read(buf),
ResponseInner::NotFound => return Ok(0),
ResponseInner::Reader(ref mut reader) => reader.read(buf),
};
// increase bytes read
if let Ok(read) = res {
self.read += read;
}
// check limit
match self.limit {
Some(limit) if limit < self.read => {
return Err(io::Error::new(io::ErrorKind::PermissionDenied, "Size limit reached."));
},
_ => {},
}
res
}
}
|
{
match self.inner {
ResponseInner::Response(ref r) => *r.status(),
ResponseInner::NotFound => reqwest::StatusCode::NotFound,
_ => reqwest::StatusCode::Ok,
}
}
|
identifier_body
|
client.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Fetching
use std::{io, fmt, time};
use std::sync::Arc;
use std::sync::atomic::{self, AtomicBool};
use futures::{self, BoxFuture, Future};
use futures_cpupool::{CpuPool, CpuFuture};
use mime::{self, Mime};
use parking_lot::RwLock;
use reqwest;
/// Fetch abort control
#[derive(Default, Debug, Clone)]
pub struct Abort(Arc<AtomicBool>);
impl Abort {
/// Returns `true` if request is aborted.
pub fn is_aborted(&self) -> bool {
self.0.load(atomic::Ordering::SeqCst)
}
}
impl From<Arc<AtomicBool>> for Abort {
fn from(a: Arc<AtomicBool>) -> Self {
Abort(a)
}
}
/// Fetch
pub trait Fetch: Clone + Send + Sync +'static {
/// Result type
type Result: Future<Item=Response, Error=Error> + Send +'static;
/// Creates new Fetch object.
fn new() -> Result<Self, Error> where Self: Sized;
/// Spawn the future in context of this `Fetch` thread pool.
/// Implementation is optional.
fn process<F, I, E>(&self, f: F) -> BoxFuture<I, E> where
F: Future<Item=I, Error=E> + Send +'static,
I: Send +'static,
E: Send +'static,
{
f.boxed()
}
/// Fetch URL and get a future for the result.
/// Supports aborting the request in the middle of execution.
fn fetch_with_abort(&self, url: &str, abort: Abort) -> Self::Result;
/// Fetch URL and get a future for the result.
fn fetch(&self, url: &str) -> Self::Result {
self.fetch_with_abort(url, Default::default())
}
/// Fetch URL and get the result synchronously.
fn fetch_sync(&self, url: &str) -> Result<Response, Error> {
self.fetch(url).wait()
}
/// Closes this client
fn close(self) where Self: Sized {}
}
const CLIENT_TIMEOUT_SECONDS: u64 = 5;
/// Fetch client
pub struct Client {
client: RwLock<(time::Instant, Arc<reqwest::Client>)>,
pool: CpuPool,
limit: Option<usize>,
}
impl Clone for Client {
fn clone(&self) -> Self {
let (ref time, ref client) = *self.client.read();
Client {
client: RwLock::new((time.clone(), client.clone())),
pool: self.pool.clone(),
limit: self.limit.clone(),
}
}
}
impl Client {
fn new_client() -> Result<Arc<reqwest::Client>, Error> {
let mut client = reqwest::Client::new()?;
client.redirect(reqwest::RedirectPolicy::limited(5));
Ok(Arc::new(client))
}
fn with_limit(limit: Option<usize>) -> Result<Self, Error> {
Ok(Client {
client: RwLock::new((time::Instant::now(), Self::new_client()?)),
pool: CpuPool::new(4),
limit: limit,
})
}
fn client(&self) -> Result<Arc<reqwest::Client>, Error> {
{
let (ref time, ref client) = *self.client.read();
if time.elapsed() < time::Duration::from_secs(CLIENT_TIMEOUT_SECONDS) {
return Ok(client.clone());
}
}
let client = Self::new_client()?;
*self.client.write() = (time::Instant::now(), client.clone());
Ok(client)
}
}
impl Fetch for Client {
type Result = CpuFuture<Response, Error>;
fn new() -> Result<Self, Error> {
// Max 50MB will be downloaded.
Self::with_limit(Some(50*1024*1024))
}
fn process<F, I, E>(&self, f: F) -> BoxFuture<I, E> where
F: Future<Item=I, Error=E> + Send +'static,
I: Send +'static,
E: Send +'static,
{
self.pool.spawn(f).boxed()
}
fn fetch_with_abort(&self, url: &str, abort: Abort) -> Self::Result {
debug!(target: "fetch", "Fetching from: {:?}", url);
match self.client() {
Ok(client) => {
self.pool.spawn(FetchTask {
url: url.into(),
client: client,
limit: self.limit,
abort: abort,
})
},
Err(err) => {
self.pool.spawn(futures::future::err(err))
},
}
}
}
struct FetchTask {
url: String,
client: Arc<reqwest::Client>,
limit: Option<usize>,
abort: Abort,
}
impl Future for FetchTask {
// TODO [ToDr] timeouts handling?
type Item = Response;
type Error = Error;
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
if self.abort.is_aborted() {
trace!(target: "fetch", "Fetch of {:?} aborted.", self.url);
return Err(Error::Aborted);
}
trace!(target: "fetch", "Starting fetch task: {:?}", self.url);
let result = self.client.get(&self.url)
.header(reqwest::header::UserAgent("Parity Fetch".into()))
.send()?;
Ok(futures::Async::Ready(Response {
inner: ResponseInner::Response(result),
abort: self.abort.clone(),
limit: self.limit,
read: 0,
}))
}
}
/// Fetch Error
#[derive(Debug)]
pub enum Error {
/// Internal fetch error
Fetch(reqwest::Error),
/// Request aborted
Aborted,
}
impl From<reqwest::Error> for Error {
fn from(error: reqwest::Error) -> Self {
Error::Fetch(error)
}
}
enum ResponseInner {
Response(reqwest::Response),
Reader(Box<io::Read + Send>),
NotFound,
}
impl fmt::Debug for ResponseInner {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ResponseInner::Response(ref response) => response.fmt(f),
ResponseInner::NotFound => write!(f, "Not found"),
ResponseInner::Reader(_) => write!(f, "io Reader"),
}
}
}
/// A fetch response type.
#[derive(Debug)]
pub struct Response {
inner: ResponseInner,
abort: Abort,
limit: Option<usize>,
read: usize,
}
impl Response {
/// Creates new successfuly response reading from a file.
pub fn from_reader<R: io::Read + Send +'static>(reader: R) -> Self {
Response {
inner: ResponseInner::Reader(Box::new(reader)),
abort: Abort::default(),
limit: None,
read: 0,
}
}
/// Creates 404 response (useful for tests)
pub fn not_found() -> Self {
Response {
inner: ResponseInner::NotFound,
abort: Abort::default(),
limit: None,
read: 0,
}
}
/// Returns status code of this response.
pub fn status(&self) -> reqwest::StatusCode {
match self.inner {
ResponseInner::Response(ref r) => *r.status(),
ResponseInner::NotFound => reqwest::StatusCode::NotFound,
_ => reqwest::StatusCode::Ok,
}
}
/// Returns `true` if response status code is successful.
pub fn is_success(&self) -> bool {
self.status() == reqwest::StatusCode::Ok
}
/// Returns `true` if content type of this response is `text/html`
pub fn is_html(&self) -> bool {
match self.content_type() {
Some(Mime(mime::TopLevel::Text, mime::SubLevel::Html, _)) => true,
_ => false,
}
}
/// Returns content type of this response (if present)
pub fn content_type(&self) -> Option<Mime> {
match self.inner {
ResponseInner::Response(ref r) => {
let content_type = r.headers().get::<reqwest::header::ContentType>();
content_type.map(|mime| mime.0.clone())
},
_ => None,
}
}
}
impl io::Read for Response {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.abort.is_aborted() {
return Err(io::Error::new(io::ErrorKind::ConnectionAborted, "Fetch aborted."));
|
}
let res = match self.inner {
ResponseInner::Response(ref mut response) => response.read(buf),
ResponseInner::NotFound => return Ok(0),
ResponseInner::Reader(ref mut reader) => reader.read(buf),
};
// increase bytes read
if let Ok(read) = res {
self.read += read;
}
// check limit
match self.limit {
Some(limit) if limit < self.read => {
return Err(io::Error::new(io::ErrorKind::PermissionDenied, "Size limit reached."));
},
_ => {},
}
res
}
}
|
random_line_split
|
|
client.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Fetching
use std::{io, fmt, time};
use std::sync::Arc;
use std::sync::atomic::{self, AtomicBool};
use futures::{self, BoxFuture, Future};
use futures_cpupool::{CpuPool, CpuFuture};
use mime::{self, Mime};
use parking_lot::RwLock;
use reqwest;
/// Fetch abort control
#[derive(Default, Debug, Clone)]
pub struct Abort(Arc<AtomicBool>);
impl Abort {
/// Returns `true` if request is aborted.
pub fn is_aborted(&self) -> bool {
self.0.load(atomic::Ordering::SeqCst)
}
}
impl From<Arc<AtomicBool>> for Abort {
fn from(a: Arc<AtomicBool>) -> Self {
Abort(a)
}
}
/// Fetch
pub trait Fetch: Clone + Send + Sync +'static {
/// Result type
type Result: Future<Item=Response, Error=Error> + Send +'static;
/// Creates new Fetch object.
fn new() -> Result<Self, Error> where Self: Sized;
/// Spawn the future in context of this `Fetch` thread pool.
/// Implementation is optional.
fn process<F, I, E>(&self, f: F) -> BoxFuture<I, E> where
F: Future<Item=I, Error=E> + Send +'static,
I: Send +'static,
E: Send +'static,
{
f.boxed()
}
/// Fetch URL and get a future for the result.
/// Supports aborting the request in the middle of execution.
fn fetch_with_abort(&self, url: &str, abort: Abort) -> Self::Result;
/// Fetch URL and get a future for the result.
fn fetch(&self, url: &str) -> Self::Result {
self.fetch_with_abort(url, Default::default())
}
/// Fetch URL and get the result synchronously.
fn fetch_sync(&self, url: &str) -> Result<Response, Error> {
self.fetch(url).wait()
}
/// Closes this client
fn
|
(self) where Self: Sized {}
}
const CLIENT_TIMEOUT_SECONDS: u64 = 5;
/// Fetch client
pub struct Client {
client: RwLock<(time::Instant, Arc<reqwest::Client>)>,
pool: CpuPool,
limit: Option<usize>,
}
impl Clone for Client {
fn clone(&self) -> Self {
let (ref time, ref client) = *self.client.read();
Client {
client: RwLock::new((time.clone(), client.clone())),
pool: self.pool.clone(),
limit: self.limit.clone(),
}
}
}
impl Client {
fn new_client() -> Result<Arc<reqwest::Client>, Error> {
let mut client = reqwest::Client::new()?;
client.redirect(reqwest::RedirectPolicy::limited(5));
Ok(Arc::new(client))
}
fn with_limit(limit: Option<usize>) -> Result<Self, Error> {
Ok(Client {
client: RwLock::new((time::Instant::now(), Self::new_client()?)),
pool: CpuPool::new(4),
limit: limit,
})
}
fn client(&self) -> Result<Arc<reqwest::Client>, Error> {
{
let (ref time, ref client) = *self.client.read();
if time.elapsed() < time::Duration::from_secs(CLIENT_TIMEOUT_SECONDS) {
return Ok(client.clone());
}
}
let client = Self::new_client()?;
*self.client.write() = (time::Instant::now(), client.clone());
Ok(client)
}
}
impl Fetch for Client {
type Result = CpuFuture<Response, Error>;
fn new() -> Result<Self, Error> {
// Max 50MB will be downloaded.
Self::with_limit(Some(50*1024*1024))
}
fn process<F, I, E>(&self, f: F) -> BoxFuture<I, E> where
F: Future<Item=I, Error=E> + Send +'static,
I: Send +'static,
E: Send +'static,
{
self.pool.spawn(f).boxed()
}
fn fetch_with_abort(&self, url: &str, abort: Abort) -> Self::Result {
debug!(target: "fetch", "Fetching from: {:?}", url);
match self.client() {
Ok(client) => {
self.pool.spawn(FetchTask {
url: url.into(),
client: client,
limit: self.limit,
abort: abort,
})
},
Err(err) => {
self.pool.spawn(futures::future::err(err))
},
}
}
}
struct FetchTask {
url: String,
client: Arc<reqwest::Client>,
limit: Option<usize>,
abort: Abort,
}
impl Future for FetchTask {
// TODO [ToDr] timeouts handling?
type Item = Response;
type Error = Error;
fn poll(&mut self) -> futures::Poll<Self::Item, Self::Error> {
if self.abort.is_aborted() {
trace!(target: "fetch", "Fetch of {:?} aborted.", self.url);
return Err(Error::Aborted);
}
trace!(target: "fetch", "Starting fetch task: {:?}", self.url);
let result = self.client.get(&self.url)
.header(reqwest::header::UserAgent("Parity Fetch".into()))
.send()?;
Ok(futures::Async::Ready(Response {
inner: ResponseInner::Response(result),
abort: self.abort.clone(),
limit: self.limit,
read: 0,
}))
}
}
/// Fetch Error
#[derive(Debug)]
pub enum Error {
/// Internal fetch error
Fetch(reqwest::Error),
/// Request aborted
Aborted,
}
impl From<reqwest::Error> for Error {
fn from(error: reqwest::Error) -> Self {
Error::Fetch(error)
}
}
enum ResponseInner {
Response(reqwest::Response),
Reader(Box<io::Read + Send>),
NotFound,
}
impl fmt::Debug for ResponseInner {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ResponseInner::Response(ref response) => response.fmt(f),
ResponseInner::NotFound => write!(f, "Not found"),
ResponseInner::Reader(_) => write!(f, "io Reader"),
}
}
}
/// A fetch response type.
#[derive(Debug)]
pub struct Response {
inner: ResponseInner,
abort: Abort,
limit: Option<usize>,
read: usize,
}
impl Response {
/// Creates new successfuly response reading from a file.
pub fn from_reader<R: io::Read + Send +'static>(reader: R) -> Self {
Response {
inner: ResponseInner::Reader(Box::new(reader)),
abort: Abort::default(),
limit: None,
read: 0,
}
}
/// Creates 404 response (useful for tests)
pub fn not_found() -> Self {
Response {
inner: ResponseInner::NotFound,
abort: Abort::default(),
limit: None,
read: 0,
}
}
/// Returns status code of this response.
pub fn status(&self) -> reqwest::StatusCode {
match self.inner {
ResponseInner::Response(ref r) => *r.status(),
ResponseInner::NotFound => reqwest::StatusCode::NotFound,
_ => reqwest::StatusCode::Ok,
}
}
/// Returns `true` if response status code is successful.
pub fn is_success(&self) -> bool {
self.status() == reqwest::StatusCode::Ok
}
/// Returns `true` if content type of this response is `text/html`
pub fn is_html(&self) -> bool {
match self.content_type() {
Some(Mime(mime::TopLevel::Text, mime::SubLevel::Html, _)) => true,
_ => false,
}
}
/// Returns content type of this response (if present)
pub fn content_type(&self) -> Option<Mime> {
match self.inner {
ResponseInner::Response(ref r) => {
let content_type = r.headers().get::<reqwest::header::ContentType>();
content_type.map(|mime| mime.0.clone())
},
_ => None,
}
}
}
impl io::Read for Response {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
if self.abort.is_aborted() {
return Err(io::Error::new(io::ErrorKind::ConnectionAborted, "Fetch aborted."));
}
let res = match self.inner {
ResponseInner::Response(ref mut response) => response.read(buf),
ResponseInner::NotFound => return Ok(0),
ResponseInner::Reader(ref mut reader) => reader.read(buf),
};
// increase bytes read
if let Ok(read) = res {
self.read += read;
}
// check limit
match self.limit {
Some(limit) if limit < self.read => {
return Err(io::Error::new(io::ErrorKind::PermissionDenied, "Size limit reached."));
},
_ => {},
}
res
}
}
|
close
|
identifier_name
|
defs.rs
|
//-
// Copyright (c) 2016, 2017, Jason Lingle
//
// This file is part of Ensync.
//
// Ensync is free software: you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the Free Software
// Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// Ensync is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
// details.
//
// You should have received a copy of the GNU General Public License along with
// Ensync. If not, see <http://www.gnu.org/licenses/>.
use std::ffi::{OsStr, OsString};
use std::fmt;
/// Type for content hashes of regular files and for blob identifiers on the
/// server.
///
/// In practise, this is a 256-bit SHA-3 sum.
pub type HashId = [u8; 32];
/// The sentinal hash value indicating an uncomputed hash.
///
/// One does not compare hashes against this, since the hashes on files can be
/// out-of-date anyway and must be computed when the file is uploaded in any
/// case.
pub const UNKNOWN_HASH: HashId = [0; 32];
/// The name of the directory which is a sibling to the configuration and which
/// is the root of Ensync's private data.
pub const PRIVATE_DIR_NAME: &'static str = "internal.ensync";
/// Prefix of invasive temporary files (i.e., those created implicitly by the
/// sync process).
pub const INVASIVE_TMP_PREFIX: &'static str = "ensync_tmp_";
/// Wraps a `HashId` to display it in hexadecimal format.
#[derive(Clone, Copy)]
pub struct DisplayHash(pub HashId);
impl fmt::Display for DisplayHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}\
{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}\
{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}\
{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
self.0[0],
self.0[1],
self.0[2],
self.0[3],
self.0[4],
self.0[5],
self.0[6],
self.0[7],
self.0[8],
self.0[9],
self.0[10],
self.0[11],
self.0[12],
self.0[13],
self.0[14],
self.0[15],
self.0[16],
self.0[17],
self.0[18],
self.0[19],
self.0[20],
self.0[21],
self.0[22],
self.0[23],
self.0[24],
self.0[25],
self.0[26],
self.0[27],
self.0[28],
self.0[29],
self.0[30],
self.0[31]
)
}
}
// These were originally defined to `mode_t`, `off_t`, `time_t`, and `ino_t`
// when we planned to use the POSIX API directly.
pub type FileMode = u32;
pub type FileSize = u64;
pub type FileTime = i64;
pub type FileInode = u64;
/// Shallow data about a file in the sync process, excluding its name.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum FileData {
/// A directory. The only immediate data is its mode. In a file stream, the
/// receiver must either push the new directory or request it to be
/// discarded.
Directory(FileMode),
/// A regular file. Data is mode, size in bytes, last modified, content
/// hash. Note that the content hash may be incorrect, and always will be
/// for files freshly streamed off the client filesystem.
Regular(FileMode, FileSize, FileTime, HashId),
/// A symbolic link. The only data is its actual content.
Symlink(OsString),
/// Any other type of non-regular file.
Special,
}
impl FileData {
/// If both `self` and `other` have a `FileMode`, set `self`'s mode to
/// `other`'s.
pub fn transrich_unix_mode(&mut self, other: &FileData) {
match *self {
FileData::Directory(ref mut dst)
| FileData::Regular(ref mut dst, _, _, _) => match *other {
FileData::Directory(src) | FileData::Regular(src, _, _, _) => {
*dst = src
}
_ => (),
},
_ => (),
}
}
/// Returns whether this `FileData` is a directory.
pub fn is_dir(&self) -> bool {
match *self {
FileData::Directory(_) => true,
_ => false,
}
}
/// Returns whether both `self` and `other` are regular files and `self`'s
/// modification time is greater than `other`'s.
pub fn
|
(&self, other: &Self) -> bool {
match (self, other) {
(
&FileData::Regular(_, _, tself, _),
&FileData::Regular(_, _, tother, _),
) => tself > tother,
_ => false,
}
}
/// Returns whether this file object and another one represent the same
/// content.
///
/// This is slightly less strict than a full equality test, ignoring some
/// of the fields for regular files.
pub fn matches(&self, that: &FileData) -> bool {
use self::FileData::*;
match (self, that) {
(&Directory(m1), &Directory(m2)) => m1 == m2,
(&Regular(m1, _, t1, ref h1), &Regular(m2, _, t2, ref h2)) => {
m1 == m2 && t1 == t2 && *h1 == *h2
}
(&Symlink(ref t1), &Symlink(ref t2)) => *t1 == *t2,
(&Special, &Special) => true,
_ => false,
}
}
/// Returns whether non-metadata about this file and another one match.
pub fn matches_data(&self, that: &FileData) -> bool {
use self::FileData::*;
match (self, that) {
(&Directory(m1), &Directory(m2)) => m1 == m2,
(&Regular(m1, _, _, ref h1), &Regular(m2, _, _, ref h2)) => {
m1 == m2 && *h1 == *h2
}
(&Symlink(ref t1), &Symlink(ref t2)) => *t1 == *t2,
(&Special, &Special) => true,
_ => false,
}
}
/// Returns whether this file object and another one have the same content
/// except for file mode.
pub fn matches_content(&self, that: &FileData) -> bool {
use self::FileData::*;
match (self, that) {
(&Directory(_), &Directory(_)) => true,
(&Regular(_, _, _, ref h1), &Regular(_, _, _, ref h2)) => {
*h1 == *h2
}
(&Symlink(ref t1), &Symlink(ref t2)) => *t1 == *t2,
(&Special, &Special) => true,
_ => false,
}
}
}
/// Convenience for passing a file name and data together.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct File<'a>(pub &'a OsStr, pub &'a FileData);
pub fn is_dir(fd: Option<&FileData>) -> bool {
match fd {
Some(&FileData::Directory(_)) => true,
_ => false,
}
}
#[cfg(test)]
pub mod test_helpers {
use std::ffi::{OsStr, OsString};
pub fn oss(s: &str) -> OsString {
OsStr::new(s).to_owned()
}
}
#[cfg(test)]
mod test {
use super::test_helpers::*;
use super::*;
#[test]
fn file_newer_than() {
let older = FileData::Regular(0o777, 0, 42, [1; 32]);
let newer = FileData::Regular(0o666, 0, 56, [2; 32]);
assert!(newer.newer_than(&older));
assert!(!older.newer_than(&newer));
assert!(!FileData::Special.newer_than(&older));
assert!(!newer.newer_than(&FileData::Special));
}
#[test]
fn file_matches() {
let f1 = FileData::Regular(0o777, 0, 42, [1; 32]);
let f2 = FileData::Regular(0o666, 0, 56, [1; 32]);
let f3 = FileData::Regular(0o777, 0, 42, [2; 32]);
let f4 = FileData::Regular(0o777, 0, 42, [1; 32]);
let d1 = FileData::Directory(0o777);
let d2 = FileData::Directory(0o666);
let s1 = FileData::Symlink(oss("foo"));
let s2 = FileData::Symlink(oss("bar"));
let s3 = FileData::Symlink(oss("foo"));
let special = FileData::Special;
assert!(f1.matches(&f1));
assert!(f1.matches(&f4));
assert!(!f1.matches(&f2));
assert!(!f1.matches(&f3));
assert!(!f1.matches(&d1));
assert!(!f1.matches(&s1));
assert!(!f1.matches(&special));
assert!(d1.matches(&d1));
assert!(!d1.matches(&d2));
assert!(!d1.matches(&f1));
assert!(s1.matches(&s1));
assert!(s1.matches(&s3));
assert!(!s1.matches(&s2));
assert!(!s1.matches(&special));
assert!(special.matches(&special));
assert!(!special.matches(&f1));
assert!(f1.matches_content(&f1));
assert!(f1.matches_content(&f4));
assert!(f1.matches_content(&f2));
assert!(!f1.matches_content(&f3));
assert!(!f1.matches_content(&d1));
assert!(!f1.matches_content(&s1));
assert!(!f1.matches_content(&special));
assert!(d1.matches_content(&d1));
assert!(d1.matches_content(&d2));
assert!(!d1.matches_content(&f1));
assert!(s1.matches_content(&s1));
assert!(s1.matches_content(&s3));
assert!(!s1.matches_content(&s2));
assert!(!s1.matches_content(&special));
assert!(special.matches_content(&special));
assert!(!special.matches_content(&f1));
}
}
|
newer_than
|
identifier_name
|
defs.rs
|
//-
// Copyright (c) 2016, 2017, Jason Lingle
//
// This file is part of Ensync.
//
// Ensync is free software: you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the Free Software
// Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// Ensync is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
// FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
// details.
|
use std::ffi::{OsStr, OsString};
use std::fmt;
/// Type for content hashes of regular files and for blob identifiers on the
/// server.
///
/// In practise, this is a 256-bit SHA-3 sum.
pub type HashId = [u8; 32];
/// The sentinal hash value indicating an uncomputed hash.
///
/// One does not compare hashes against this, since the hashes on files can be
/// out-of-date anyway and must be computed when the file is uploaded in any
/// case.
pub const UNKNOWN_HASH: HashId = [0; 32];
/// The name of the directory which is a sibling to the configuration and which
/// is the root of Ensync's private data.
pub const PRIVATE_DIR_NAME: &'static str = "internal.ensync";
/// Prefix of invasive temporary files (i.e., those created implicitly by the
/// sync process).
pub const INVASIVE_TMP_PREFIX: &'static str = "ensync_tmp_";
/// Wraps a `HashId` to display it in hexadecimal format.
#[derive(Clone, Copy)]
pub struct DisplayHash(pub HashId);
impl fmt::Display for DisplayHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}\
{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}\
{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}\
{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}",
self.0[0],
self.0[1],
self.0[2],
self.0[3],
self.0[4],
self.0[5],
self.0[6],
self.0[7],
self.0[8],
self.0[9],
self.0[10],
self.0[11],
self.0[12],
self.0[13],
self.0[14],
self.0[15],
self.0[16],
self.0[17],
self.0[18],
self.0[19],
self.0[20],
self.0[21],
self.0[22],
self.0[23],
self.0[24],
self.0[25],
self.0[26],
self.0[27],
self.0[28],
self.0[29],
self.0[30],
self.0[31]
)
}
}
// These were originally defined to `mode_t`, `off_t`, `time_t`, and `ino_t`
// when we planned to use the POSIX API directly.
pub type FileMode = u32;
pub type FileSize = u64;
pub type FileTime = i64;
pub type FileInode = u64;
/// Shallow data about a file in the sync process, excluding its name.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum FileData {
/// A directory. The only immediate data is its mode. In a file stream, the
/// receiver must either push the new directory or request it to be
/// discarded.
Directory(FileMode),
/// A regular file. Data is mode, size in bytes, last modified, content
/// hash. Note that the content hash may be incorrect, and always will be
/// for files freshly streamed off the client filesystem.
Regular(FileMode, FileSize, FileTime, HashId),
/// A symbolic link. The only data is its actual content.
Symlink(OsString),
/// Any other type of non-regular file.
Special,
}
impl FileData {
/// If both `self` and `other` have a `FileMode`, set `self`'s mode to
/// `other`'s.
pub fn transrich_unix_mode(&mut self, other: &FileData) {
match *self {
FileData::Directory(ref mut dst)
| FileData::Regular(ref mut dst, _, _, _) => match *other {
FileData::Directory(src) | FileData::Regular(src, _, _, _) => {
*dst = src
}
_ => (),
},
_ => (),
}
}
/// Returns whether this `FileData` is a directory.
pub fn is_dir(&self) -> bool {
match *self {
FileData::Directory(_) => true,
_ => false,
}
}
/// Returns whether both `self` and `other` are regular files and `self`'s
/// modification time is greater than `other`'s.
pub fn newer_than(&self, other: &Self) -> bool {
match (self, other) {
(
&FileData::Regular(_, _, tself, _),
&FileData::Regular(_, _, tother, _),
) => tself > tother,
_ => false,
}
}
/// Returns whether this file object and another one represent the same
/// content.
///
/// This is slightly less strict than a full equality test, ignoring some
/// of the fields for regular files.
pub fn matches(&self, that: &FileData) -> bool {
use self::FileData::*;
match (self, that) {
(&Directory(m1), &Directory(m2)) => m1 == m2,
(&Regular(m1, _, t1, ref h1), &Regular(m2, _, t2, ref h2)) => {
m1 == m2 && t1 == t2 && *h1 == *h2
}
(&Symlink(ref t1), &Symlink(ref t2)) => *t1 == *t2,
(&Special, &Special) => true,
_ => false,
}
}
/// Returns whether non-metadata about this file and another one match.
pub fn matches_data(&self, that: &FileData) -> bool {
use self::FileData::*;
match (self, that) {
(&Directory(m1), &Directory(m2)) => m1 == m2,
(&Regular(m1, _, _, ref h1), &Regular(m2, _, _, ref h2)) => {
m1 == m2 && *h1 == *h2
}
(&Symlink(ref t1), &Symlink(ref t2)) => *t1 == *t2,
(&Special, &Special) => true,
_ => false,
}
}
/// Returns whether this file object and another one have the same content
/// except for file mode.
pub fn matches_content(&self, that: &FileData) -> bool {
use self::FileData::*;
match (self, that) {
(&Directory(_), &Directory(_)) => true,
(&Regular(_, _, _, ref h1), &Regular(_, _, _, ref h2)) => {
*h1 == *h2
}
(&Symlink(ref t1), &Symlink(ref t2)) => *t1 == *t2,
(&Special, &Special) => true,
_ => false,
}
}
}
/// Convenience for passing a file name and data together.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct File<'a>(pub &'a OsStr, pub &'a FileData);
pub fn is_dir(fd: Option<&FileData>) -> bool {
match fd {
Some(&FileData::Directory(_)) => true,
_ => false,
}
}
#[cfg(test)]
pub mod test_helpers {
use std::ffi::{OsStr, OsString};
pub fn oss(s: &str) -> OsString {
OsStr::new(s).to_owned()
}
}
#[cfg(test)]
mod test {
use super::test_helpers::*;
use super::*;
#[test]
fn file_newer_than() {
let older = FileData::Regular(0o777, 0, 42, [1; 32]);
let newer = FileData::Regular(0o666, 0, 56, [2; 32]);
assert!(newer.newer_than(&older));
assert!(!older.newer_than(&newer));
assert!(!FileData::Special.newer_than(&older));
assert!(!newer.newer_than(&FileData::Special));
}
#[test]
fn file_matches() {
let f1 = FileData::Regular(0o777, 0, 42, [1; 32]);
let f2 = FileData::Regular(0o666, 0, 56, [1; 32]);
let f3 = FileData::Regular(0o777, 0, 42, [2; 32]);
let f4 = FileData::Regular(0o777, 0, 42, [1; 32]);
let d1 = FileData::Directory(0o777);
let d2 = FileData::Directory(0o666);
let s1 = FileData::Symlink(oss("foo"));
let s2 = FileData::Symlink(oss("bar"));
let s3 = FileData::Symlink(oss("foo"));
let special = FileData::Special;
assert!(f1.matches(&f1));
assert!(f1.matches(&f4));
assert!(!f1.matches(&f2));
assert!(!f1.matches(&f3));
assert!(!f1.matches(&d1));
assert!(!f1.matches(&s1));
assert!(!f1.matches(&special));
assert!(d1.matches(&d1));
assert!(!d1.matches(&d2));
assert!(!d1.matches(&f1));
assert!(s1.matches(&s1));
assert!(s1.matches(&s3));
assert!(!s1.matches(&s2));
assert!(!s1.matches(&special));
assert!(special.matches(&special));
assert!(!special.matches(&f1));
assert!(f1.matches_content(&f1));
assert!(f1.matches_content(&f4));
assert!(f1.matches_content(&f2));
assert!(!f1.matches_content(&f3));
assert!(!f1.matches_content(&d1));
assert!(!f1.matches_content(&s1));
assert!(!f1.matches_content(&special));
assert!(d1.matches_content(&d1));
assert!(d1.matches_content(&d2));
assert!(!d1.matches_content(&f1));
assert!(s1.matches_content(&s1));
assert!(s1.matches_content(&s3));
assert!(!s1.matches_content(&s2));
assert!(!s1.matches_content(&special));
assert!(special.matches_content(&special));
assert!(!special.matches_content(&f1));
}
}
|
//
// You should have received a copy of the GNU General Public License along with
// Ensync. If not, see <http://www.gnu.org/licenses/>.
|
random_line_split
|
get_backup_keys.rs
|
//! `GET /_matrix/client/*/room_keys/keys`
//!
//! Retrieve all keys from a backup version.
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv3room_keyskeys
use std::collections::BTreeMap;
use ruma_common::{api::ruma_api, RoomId};
use crate::backup::RoomKeyBackup;
ruma_api! {
metadata: {
description: "Retrieve all keys from a backup version.",
method: GET,
name: "get_backup_keys",
unstable_path: "/_matrix/client/unstable/room_keys/keys",
r0_path: "/_matrix/client/r0/room_keys/keys",
stable_path: "/_matrix/client/v3/room_keys/keys",
rate_limited: true,
authentication: AccessToken,
added: 1.0,
}
request: {
/// The backup version to retrieve keys from.
#[ruma_api(query)]
pub version: &'a str,
}
response: {
/// A map from room IDs to session IDs to key data.
pub rooms: BTreeMap<Box<RoomId>, RoomKeyBackup>,
}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given version.
pub fn new(version: &'a str) -> Self {
Self { version }
}
}
impl Response {
/// Creates a new `Response` with the given room key backups.
pub fn
|
(rooms: BTreeMap<Box<RoomId>, RoomKeyBackup>) -> Self {
Self { rooms }
}
}
}
|
new
|
identifier_name
|
get_backup_keys.rs
|
//! `GET /_matrix/client/*/room_keys/keys`
//!
//! Retrieve all keys from a backup version.
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv3room_keyskeys
use std::collections::BTreeMap;
use ruma_common::{api::ruma_api, RoomId};
use crate::backup::RoomKeyBackup;
ruma_api! {
metadata: {
description: "Retrieve all keys from a backup version.",
method: GET,
name: "get_backup_keys",
unstable_path: "/_matrix/client/unstable/room_keys/keys",
r0_path: "/_matrix/client/r0/room_keys/keys",
stable_path: "/_matrix/client/v3/room_keys/keys",
rate_limited: true,
authentication: AccessToken,
added: 1.0,
}
request: {
/// The backup version to retrieve keys from.
|
#[ruma_api(query)]
pub version: &'a str,
}
response: {
/// A map from room IDs to session IDs to key data.
pub rooms: BTreeMap<Box<RoomId>, RoomKeyBackup>,
}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given version.
pub fn new(version: &'a str) -> Self {
Self { version }
}
}
impl Response {
/// Creates a new `Response` with the given room key backups.
pub fn new(rooms: BTreeMap<Box<RoomId>, RoomKeyBackup>) -> Self {
Self { rooms }
}
}
}
|
random_line_split
|
|
get_backup_keys.rs
|
//! `GET /_matrix/client/*/room_keys/keys`
//!
//! Retrieve all keys from a backup version.
pub mod v3 {
//! `/v3/` ([spec])
//!
//! [spec]: https://spec.matrix.org/v1.2/client-server-api/#get_matrixclientv3room_keyskeys
use std::collections::BTreeMap;
use ruma_common::{api::ruma_api, RoomId};
use crate::backup::RoomKeyBackup;
ruma_api! {
metadata: {
description: "Retrieve all keys from a backup version.",
method: GET,
name: "get_backup_keys",
unstable_path: "/_matrix/client/unstable/room_keys/keys",
r0_path: "/_matrix/client/r0/room_keys/keys",
stable_path: "/_matrix/client/v3/room_keys/keys",
rate_limited: true,
authentication: AccessToken,
added: 1.0,
}
request: {
/// The backup version to retrieve keys from.
#[ruma_api(query)]
pub version: &'a str,
}
response: {
/// A map from room IDs to session IDs to key data.
pub rooms: BTreeMap<Box<RoomId>, RoomKeyBackup>,
}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given version.
pub fn new(version: &'a str) -> Self {
Self { version }
}
}
impl Response {
/// Creates a new `Response` with the given room key backups.
pub fn new(rooms: BTreeMap<Box<RoomId>, RoomKeyBackup>) -> Self
|
}
}
|
{
Self { rooms }
}
|
identifier_body
|
json_parser.rs
|
// Copyright 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file contains bindings to convert from JSON to C++ base::Value objects.
// The code related to `base::Value` can be found in 'values.rs'
// and 'values_deserialization.rs'.
use crate::values::ValueSlotRef;
use crate::values_deserialization::ValueVisitor;
use cxx::CxxString;
use serde::de::Deserializer;
use serde_jsonrc::de::SliceRead;
use std::pin::Pin;
// UTF8 byte order mark.
const UTF8_BOM: [u8; 3] = [0xef, 0xbb, 0xbf];
#[cxx::bridge(namespace=base::rs_glue)]
mod ffi {
/// Options for parsing JSON inputs. Largely a mirror of the C++ `base::JSONParserOptions`
/// bitflags, represented as a friendlier struct-of-bools instead.
#[namespace=base::ffi::json::json_parser]
struct JsonOptions {
/// Allows commas to exist after the last element in structures.
allow_trailing_commas: bool,
/// If set the parser replaces invalid code points (i.e. lone surrogates) with the Unicode
/// replacement character (U+FFFD). If not set, invalid code points trigger a hard error and
/// parsing fails.
replace_invalid_characters: bool,
/// Allows both C (/* */) and C++ (//) style comments.
allow_comments: bool,
/// Permits unescaped ASCII control characters (such as unescaped \r and \n) in the range
/// [0x00,0x1F].
allow_control_chars: bool,
/// Permits \\v vertical tab escapes.
allow_vert_tab: bool,
/// Permits \\xNN escapes as described above.
allow_x_escapes: bool,
/// The maximum recursion depth to walk while parsing nested JSON objects. JSON beyond the
/// specified depth will be ignored.
max_depth: usize,
}
unsafe extern "C++" {
include!("base/rs_glue/values_glue.h");
type ValueSlot = crate::rs_glue::ffi::ValueSlot;
}
extern "Rust" {
#[namespace=base::ffi::json::json_parser]
pub fn decode_json_from_cpp(
json: &[u8],
options: JsonOptions,
value_slot: Pin<&mut ValueSlot>,
error_line: &mut i32,
error_column: &mut i32,
error_message: Pin<&mut CxxString>,
) -> bool;
}
}
pub type JsonOptions = ffi::JsonOptions;
impl JsonOptions {
/// Construct a JsonOptions with common Chromium extensions.
///
/// Per base::JSONParserOptions::JSON_PARSE_CHROMIUM_EXTENSIONS:
///
/// This parser historically accepted, without configuration flags,
/// non-standard JSON extensions. This enables that traditional parsing
/// behavior.
pub fn with_chromium_extensions(max_depth: usize) -> JsonOptions {
JsonOptions {
allow_trailing_commas: false,
replace_invalid_characters: false,
allow_comments: true,
allow_control_chars: true,
allow_vert_tab: true,
allow_x_escapes: true,
max_depth,
}
}
}
/// Decode some JSON into C++ base::Value object tree.
///
/// This function takes and returns normal Rust types. For an equivalent which
/// can be called from C++, see `decode_json_from_cpp`.
///
/// # Args:
///
/// * `json`: the JSON. Note that this is a slice of bytes rather than a string,
/// which in Rust terms means it hasn't yet been validated to be
/// legitimate UTF8. The JSON decoding will do that.
/// * `options`: configuration options for non-standard JSON extensions
/// * `value_slot`: a space into which to construct a base::Value
///
/// It always strips a UTF8 BOM from the start of the string, if one is found.
///
/// Return: a serde_jsonrc::Error or Ok.
///
/// It is be desirable in future to expose this API to other Rust code inside
/// and outside //base. TODO(crbug/1287209): work out API norms and then add
/// 'pub' to do this.
pub fn decode_json(
json: &[u8],
options: JsonOptions,
value_slot: ValueSlotRef,
) -> Result<(), serde_jsonrc::Error> {
let mut to_parse = json;
if to_parse.len() >= 3 && to_parse[0..3] == UTF8_BOM {
to_parse = &to_parse[3..];
}
let mut deserializer = serde_jsonrc::Deserializer::new(SliceRead::new(
&to_parse,
options.replace_invalid_characters,
options.allow_control_chars,
options.allow_vert_tab,
options.allow_x_escapes,
));
// By default serde_json[rc] has a recursion limit of 128.
// As we want different recursion limits in different circumstances,
// we disable its own recursion tracking and use our own.
deserializer.disable_recursion_limit();
deserializer.set_ignore_trailing_commas(options.allow_trailing_commas);
deserializer.set_allow_comments(options.allow_comments);
// The C++ parser starts counting nesting levels from the first item
// inside the outermost dict. We start counting from the
// absl::optional<base::Value> and also count the outermost dict,
// therefore we start with -2 to match C++ behavior.
let result =
deserializer.deserialize_any(ValueVisitor::new(value_slot, options.max_depth - 2))?;
deserializer.end()?;
Ok(result)
}
/// Decode some JSON into a `base::Value`; for calling by C++.
///
/// See `decode_json` for an equivalent which takes and returns idiomatic Rust
/// types, and a little bit more information about the implementation.
///
/// # Args
///
/// * `json`: a slice of input JSON unsigned characters.
/// * `options`: configuration options for non-standard JSON extensions
/// * `value_slot`: a space into which to construct a base::Value
/// * `error_line`/`error_column`/`error_message`: populated with details of
/// any decode error.
///
/// # Returns
///
/// A Boolean indicating whether the decode succeeded.
fn decode_json_from_cpp(
json: &[u8],
options: ffi::JsonOptions,
value_slot: Pin<&mut ffi::ValueSlot>,
error_line: &mut i32,
error_column: &mut i32,
mut error_message: Pin<&mut CxxString>,
) -> bool {
let value_slot = ValueSlotRef::from(value_slot);
match decode_json(json, options, value_slot) {
Err(err) =>
|
Ok(_) => true,
}
}
|
{
*error_line = err.line().try_into().unwrap_or(-1);
*error_column = err.column().try_into().unwrap_or(-1);
error_message.as_mut().clear();
// The following line pulls in a lot of binary bloat, due to all the formatter
// implementations required to stringify error messages. This error message is used in
// only a couple of places outside unit tests so we could consider trying
// to eliminate.
error_message.as_mut().push_str(&err.to_string());
false
}
|
conditional_block
|
json_parser.rs
|
// Copyright 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file contains bindings to convert from JSON to C++ base::Value objects.
// The code related to `base::Value` can be found in 'values.rs'
// and 'values_deserialization.rs'.
use crate::values::ValueSlotRef;
use crate::values_deserialization::ValueVisitor;
use cxx::CxxString;
use serde::de::Deserializer;
use serde_jsonrc::de::SliceRead;
use std::pin::Pin;
// UTF8 byte order mark.
const UTF8_BOM: [u8; 3] = [0xef, 0xbb, 0xbf];
#[cxx::bridge(namespace=base::rs_glue)]
mod ffi {
/// Options for parsing JSON inputs. Largely a mirror of the C++ `base::JSONParserOptions`
/// bitflags, represented as a friendlier struct-of-bools instead.
#[namespace=base::ffi::json::json_parser]
struct JsonOptions {
/// Allows commas to exist after the last element in structures.
allow_trailing_commas: bool,
/// If set the parser replaces invalid code points (i.e. lone surrogates) with the Unicode
/// replacement character (U+FFFD). If not set, invalid code points trigger a hard error and
/// parsing fails.
replace_invalid_characters: bool,
/// Allows both C (/* */) and C++ (//) style comments.
allow_comments: bool,
/// Permits unescaped ASCII control characters (such as unescaped \r and \n) in the range
/// [0x00,0x1F].
allow_control_chars: bool,
/// Permits \\v vertical tab escapes.
allow_vert_tab: bool,
/// Permits \\xNN escapes as described above.
allow_x_escapes: bool,
/// The maximum recursion depth to walk while parsing nested JSON objects. JSON beyond the
/// specified depth will be ignored.
max_depth: usize,
}
unsafe extern "C++" {
include!("base/rs_glue/values_glue.h");
type ValueSlot = crate::rs_glue::ffi::ValueSlot;
}
extern "Rust" {
#[namespace=base::ffi::json::json_parser]
pub fn decode_json_from_cpp(
json: &[u8],
options: JsonOptions,
value_slot: Pin<&mut ValueSlot>,
error_line: &mut i32,
error_column: &mut i32,
error_message: Pin<&mut CxxString>,
) -> bool;
}
}
pub type JsonOptions = ffi::JsonOptions;
impl JsonOptions {
/// Construct a JsonOptions with common Chromium extensions.
///
/// Per base::JSONParserOptions::JSON_PARSE_CHROMIUM_EXTENSIONS:
///
/// This parser historically accepted, without configuration flags,
/// non-standard JSON extensions. This enables that traditional parsing
/// behavior.
pub fn with_chromium_extensions(max_depth: usize) -> JsonOptions {
JsonOptions {
allow_trailing_commas: false,
replace_invalid_characters: false,
allow_comments: true,
allow_control_chars: true,
allow_vert_tab: true,
allow_x_escapes: true,
max_depth,
}
}
}
/// Decode some JSON into C++ base::Value object tree.
///
/// This function takes and returns normal Rust types. For an equivalent which
/// can be called from C++, see `decode_json_from_cpp`.
///
/// # Args:
///
/// * `json`: the JSON. Note that this is a slice of bytes rather than a string,
/// which in Rust terms means it hasn't yet been validated to be
/// legitimate UTF8. The JSON decoding will do that.
/// * `options`: configuration options for non-standard JSON extensions
/// * `value_slot`: a space into which to construct a base::Value
///
/// It always strips a UTF8 BOM from the start of the string, if one is found.
///
/// Return: a serde_jsonrc::Error or Ok.
///
/// It is be desirable in future to expose this API to other Rust code inside
/// and outside //base. TODO(crbug/1287209): work out API norms and then add
/// 'pub' to do this.
pub fn decode_json(
json: &[u8],
options: JsonOptions,
value_slot: ValueSlotRef,
) -> Result<(), serde_jsonrc::Error> {
let mut to_parse = json;
if to_parse.len() >= 3 && to_parse[0..3] == UTF8_BOM {
to_parse = &to_parse[3..];
}
let mut deserializer = serde_jsonrc::Deserializer::new(SliceRead::new(
&to_parse,
options.replace_invalid_characters,
options.allow_control_chars,
options.allow_vert_tab,
options.allow_x_escapes,
));
// By default serde_json[rc] has a recursion limit of 128.
// As we want different recursion limits in different circumstances,
// we disable its own recursion tracking and use our own.
deserializer.disable_recursion_limit();
deserializer.set_ignore_trailing_commas(options.allow_trailing_commas);
deserializer.set_allow_comments(options.allow_comments);
// The C++ parser starts counting nesting levels from the first item
// inside the outermost dict. We start counting from the
// absl::optional<base::Value> and also count the outermost dict,
// therefore we start with -2 to match C++ behavior.
let result =
deserializer.deserialize_any(ValueVisitor::new(value_slot, options.max_depth - 2))?;
deserializer.end()?;
Ok(result)
}
/// Decode some JSON into a `base::Value`; for calling by C++.
///
/// See `decode_json` for an equivalent which takes and returns idiomatic Rust
/// types, and a little bit more information about the implementation.
///
/// # Args
///
/// * `json`: a slice of input JSON unsigned characters.
/// * `options`: configuration options for non-standard JSON extensions
/// * `value_slot`: a space into which to construct a base::Value
/// * `error_line`/`error_column`/`error_message`: populated with details of
/// any decode error.
///
/// # Returns
///
/// A Boolean indicating whether the decode succeeded.
fn decode_json_from_cpp(
json: &[u8],
options: ffi::JsonOptions,
value_slot: Pin<&mut ffi::ValueSlot>,
error_line: &mut i32,
error_column: &mut i32,
mut error_message: Pin<&mut CxxString>,
) -> bool {
let value_slot = ValueSlotRef::from(value_slot);
match decode_json(json, options, value_slot) {
Err(err) => {
*error_line = err.line().try_into().unwrap_or(-1);
*error_column = err.column().try_into().unwrap_or(-1);
error_message.as_mut().clear();
// The following line pulls in a lot of binary bloat, due to all the formatter
// implementations required to stringify error messages. This error message is used in
// only a couple of places outside unit tests so we could consider trying
// to eliminate.
error_message.as_mut().push_str(&err.to_string());
false
}
Ok(_) => true,
}
|
}
|
random_line_split
|
|
json_parser.rs
|
// Copyright 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file contains bindings to convert from JSON to C++ base::Value objects.
// The code related to `base::Value` can be found in 'values.rs'
// and 'values_deserialization.rs'.
use crate::values::ValueSlotRef;
use crate::values_deserialization::ValueVisitor;
use cxx::CxxString;
use serde::de::Deserializer;
use serde_jsonrc::de::SliceRead;
use std::pin::Pin;
// UTF8 byte order mark.
const UTF8_BOM: [u8; 3] = [0xef, 0xbb, 0xbf];
#[cxx::bridge(namespace=base::rs_glue)]
mod ffi {
/// Options for parsing JSON inputs. Largely a mirror of the C++ `base::JSONParserOptions`
/// bitflags, represented as a friendlier struct-of-bools instead.
#[namespace=base::ffi::json::json_parser]
struct JsonOptions {
/// Allows commas to exist after the last element in structures.
allow_trailing_commas: bool,
/// If set the parser replaces invalid code points (i.e. lone surrogates) with the Unicode
/// replacement character (U+FFFD). If not set, invalid code points trigger a hard error and
/// parsing fails.
replace_invalid_characters: bool,
/// Allows both C (/* */) and C++ (//) style comments.
allow_comments: bool,
/// Permits unescaped ASCII control characters (such as unescaped \r and \n) in the range
/// [0x00,0x1F].
allow_control_chars: bool,
/// Permits \\v vertical tab escapes.
allow_vert_tab: bool,
/// Permits \\xNN escapes as described above.
allow_x_escapes: bool,
/// The maximum recursion depth to walk while parsing nested JSON objects. JSON beyond the
/// specified depth will be ignored.
max_depth: usize,
}
unsafe extern "C++" {
include!("base/rs_glue/values_glue.h");
type ValueSlot = crate::rs_glue::ffi::ValueSlot;
}
extern "Rust" {
#[namespace=base::ffi::json::json_parser]
pub fn decode_json_from_cpp(
json: &[u8],
options: JsonOptions,
value_slot: Pin<&mut ValueSlot>,
error_line: &mut i32,
error_column: &mut i32,
error_message: Pin<&mut CxxString>,
) -> bool;
}
}
pub type JsonOptions = ffi::JsonOptions;
impl JsonOptions {
/// Construct a JsonOptions with common Chromium extensions.
///
/// Per base::JSONParserOptions::JSON_PARSE_CHROMIUM_EXTENSIONS:
///
/// This parser historically accepted, without configuration flags,
/// non-standard JSON extensions. This enables that traditional parsing
/// behavior.
pub fn with_chromium_extensions(max_depth: usize) -> JsonOptions {
JsonOptions {
allow_trailing_commas: false,
replace_invalid_characters: false,
allow_comments: true,
allow_control_chars: true,
allow_vert_tab: true,
allow_x_escapes: true,
max_depth,
}
}
}
/// Decode some JSON into C++ base::Value object tree.
///
/// This function takes and returns normal Rust types. For an equivalent which
/// can be called from C++, see `decode_json_from_cpp`.
///
/// # Args:
///
/// * `json`: the JSON. Note that this is a slice of bytes rather than a string,
/// which in Rust terms means it hasn't yet been validated to be
/// legitimate UTF8. The JSON decoding will do that.
/// * `options`: configuration options for non-standard JSON extensions
/// * `value_slot`: a space into which to construct a base::Value
///
/// It always strips a UTF8 BOM from the start of the string, if one is found.
///
/// Return: a serde_jsonrc::Error or Ok.
///
/// It is be desirable in future to expose this API to other Rust code inside
/// and outside //base. TODO(crbug/1287209): work out API norms and then add
/// 'pub' to do this.
pub fn
|
(
json: &[u8],
options: JsonOptions,
value_slot: ValueSlotRef,
) -> Result<(), serde_jsonrc::Error> {
let mut to_parse = json;
if to_parse.len() >= 3 && to_parse[0..3] == UTF8_BOM {
to_parse = &to_parse[3..];
}
let mut deserializer = serde_jsonrc::Deserializer::new(SliceRead::new(
&to_parse,
options.replace_invalid_characters,
options.allow_control_chars,
options.allow_vert_tab,
options.allow_x_escapes,
));
// By default serde_json[rc] has a recursion limit of 128.
// As we want different recursion limits in different circumstances,
// we disable its own recursion tracking and use our own.
deserializer.disable_recursion_limit();
deserializer.set_ignore_trailing_commas(options.allow_trailing_commas);
deserializer.set_allow_comments(options.allow_comments);
// The C++ parser starts counting nesting levels from the first item
// inside the outermost dict. We start counting from the
// absl::optional<base::Value> and also count the outermost dict,
// therefore we start with -2 to match C++ behavior.
let result =
deserializer.deserialize_any(ValueVisitor::new(value_slot, options.max_depth - 2))?;
deserializer.end()?;
Ok(result)
}
/// Decode some JSON into a `base::Value`; for calling by C++.
///
/// See `decode_json` for an equivalent which takes and returns idiomatic Rust
/// types, and a little bit more information about the implementation.
///
/// # Args
///
/// * `json`: a slice of input JSON unsigned characters.
/// * `options`: configuration options for non-standard JSON extensions
/// * `value_slot`: a space into which to construct a base::Value
/// * `error_line`/`error_column`/`error_message`: populated with details of
/// any decode error.
///
/// # Returns
///
/// A Boolean indicating whether the decode succeeded.
fn decode_json_from_cpp(
json: &[u8],
options: ffi::JsonOptions,
value_slot: Pin<&mut ffi::ValueSlot>,
error_line: &mut i32,
error_column: &mut i32,
mut error_message: Pin<&mut CxxString>,
) -> bool {
let value_slot = ValueSlotRef::from(value_slot);
match decode_json(json, options, value_slot) {
Err(err) => {
*error_line = err.line().try_into().unwrap_or(-1);
*error_column = err.column().try_into().unwrap_or(-1);
error_message.as_mut().clear();
// The following line pulls in a lot of binary bloat, due to all the formatter
// implementations required to stringify error messages. This error message is used in
// only a couple of places outside unit tests so we could consider trying
// to eliminate.
error_message.as_mut().push_str(&err.to_string());
false
}
Ok(_) => true,
}
}
|
decode_json
|
identifier_name
|
rand_utils.rs
|
//! Utility functions for random functionality.
//!
//! This module provides sampling and shuffling which are used
//! within the learning modules.
use rand::{Rng, thread_rng};
/// ```
/// use rusty_machine::learning::toolkit::rand_utils;
///
/// let mut pool = &mut [1,2,3,4];
/// let sample = rand_utils::reservoir_sample(pool, 3);
///
/// println!("{:?}", sample);
/// ```
pub fn
|
<T: Copy>(pool: &[T], reservoir_size: usize) -> Vec<T> {
assert!(pool.len() >= reservoir_size,
"Sample size is greater than total.");
let mut pool_mut = &pool[..];
let mut res = pool_mut[..reservoir_size].to_vec();
pool_mut = &pool_mut[reservoir_size..];
let mut ele_seen = reservoir_size;
let mut rng = thread_rng();
while!pool_mut.is_empty() {
ele_seen += 1;
let r = rng.gen_range(0, ele_seen);
let p_0 = pool_mut[0];
pool_mut = &pool_mut[1..];
if r < reservoir_size {
res[r] = p_0;
}
}
res
}
/// The inside out Fisher-Yates algorithm.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::toolkit::rand_utils;
///
/// // Collect the numbers 0..5
/// let a = (0..5).collect::<Vec<_>>();
///
/// // Perform a Fisher-Yates shuffle to get a random permutation
/// let permutation = rand_utils::fisher_yates(&a);
/// ```
pub fn fisher_yates<T: Copy>(arr: &[T]) -> Vec<T> {
let n = arr.len();
let mut rng = thread_rng();
let mut shuffled_arr = Vec::with_capacity(n);
unsafe {
// We set the length here
// We only access data which has been initialized in the algorithm
shuffled_arr.set_len(n);
}
for i in 0..n {
let j = rng.gen_range(0, i + 1);
// If j isn't the last point in the active shuffled array
if j!= i {
// Copy value at position j to the end of the shuffled array
// This is safe as we only read initialized data (j < i)
let x = shuffled_arr[j];
shuffled_arr[i] = x;
}
// Place value at end of active array into shuffled array
shuffled_arr[j] = arr[i];
}
shuffled_arr
}
/// The in place Fisher-Yates shuffle.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::toolkit::rand_utils;
///
/// // Collect the numbers 0..5
/// let mut a = (0..5).collect::<Vec<_>>();
///
/// // Permute the values in place with Fisher-Yates
/// rand_utils::in_place_fisher_yates(&mut a);
/// ```
pub fn in_place_fisher_yates<T>(arr: &mut [T]) {
let n = arr.len();
let mut rng = thread_rng();
for i in 0..n {
// Swap i with a random point after it
let j = rng.gen_range(0, n - i);
arr.swap(i, i + j);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_reservoir_sample() {
let a = vec![1, 2, 3, 4, 5, 6, 7];
let b = reservoir_sample(&a, 3);
assert_eq!(b.len(), 3);
}
#[test]
fn test_fisher_yates() {
let a = (0..10).collect::<Vec<_>>();
let b = fisher_yates(&a);
for val in a.iter() {
assert!(b.contains(val));
}
}
#[test]
fn test_in_place_fisher_yates() {
let mut a = (0..10).collect::<Vec<_>>();
in_place_fisher_yates(&mut a);
for val in 0..10 {
assert!(a.contains(&val));
}
}
}
|
reservoir_sample
|
identifier_name
|
rand_utils.rs
|
//! Utility functions for random functionality.
//!
//! This module provides sampling and shuffling which are used
//! within the learning modules.
use rand::{Rng, thread_rng};
/// ```
/// use rusty_machine::learning::toolkit::rand_utils;
///
/// let mut pool = &mut [1,2,3,4];
/// let sample = rand_utils::reservoir_sample(pool, 3);
///
/// println!("{:?}", sample);
/// ```
pub fn reservoir_sample<T: Copy>(pool: &[T], reservoir_size: usize) -> Vec<T> {
assert!(pool.len() >= reservoir_size,
"Sample size is greater than total.");
let mut pool_mut = &pool[..];
let mut res = pool_mut[..reservoir_size].to_vec();
pool_mut = &pool_mut[reservoir_size..];
let mut ele_seen = reservoir_size;
let mut rng = thread_rng();
while!pool_mut.is_empty() {
ele_seen += 1;
let r = rng.gen_range(0, ele_seen);
let p_0 = pool_mut[0];
pool_mut = &pool_mut[1..];
if r < reservoir_size {
res[r] = p_0;
}
}
res
}
/// The inside out Fisher-Yates algorithm.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::toolkit::rand_utils;
///
/// // Collect the numbers 0..5
/// let a = (0..5).collect::<Vec<_>>();
///
/// // Perform a Fisher-Yates shuffle to get a random permutation
/// let permutation = rand_utils::fisher_yates(&a);
/// ```
pub fn fisher_yates<T: Copy>(arr: &[T]) -> Vec<T> {
let n = arr.len();
let mut rng = thread_rng();
let mut shuffled_arr = Vec::with_capacity(n);
unsafe {
// We set the length here
// We only access data which has been initialized in the algorithm
shuffled_arr.set_len(n);
}
for i in 0..n {
let j = rng.gen_range(0, i + 1);
// If j isn't the last point in the active shuffled array
if j!= i {
// Copy value at position j to the end of the shuffled array
// This is safe as we only read initialized data (j < i)
let x = shuffled_arr[j];
shuffled_arr[i] = x;
}
// Place value at end of active array into shuffled array
shuffled_arr[j] = arr[i];
}
shuffled_arr
}
/// The in place Fisher-Yates shuffle.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::toolkit::rand_utils;
///
/// // Collect the numbers 0..5
/// let mut a = (0..5).collect::<Vec<_>>();
///
/// // Permute the values in place with Fisher-Yates
/// rand_utils::in_place_fisher_yates(&mut a);
/// ```
pub fn in_place_fisher_yates<T>(arr: &mut [T]) {
let n = arr.len();
let mut rng = thread_rng();
for i in 0..n {
// Swap i with a random point after it
let j = rng.gen_range(0, n - i);
arr.swap(i, i + j);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_reservoir_sample() {
let a = vec![1, 2, 3, 4, 5, 6, 7];
let b = reservoir_sample(&a, 3);
assert_eq!(b.len(), 3);
}
|
let a = (0..10).collect::<Vec<_>>();
let b = fisher_yates(&a);
for val in a.iter() {
assert!(b.contains(val));
}
}
#[test]
fn test_in_place_fisher_yates() {
let mut a = (0..10).collect::<Vec<_>>();
in_place_fisher_yates(&mut a);
for val in 0..10 {
assert!(a.contains(&val));
}
}
}
|
#[test]
fn test_fisher_yates() {
|
random_line_split
|
rand_utils.rs
|
//! Utility functions for random functionality.
//!
//! This module provides sampling and shuffling which are used
//! within the learning modules.
use rand::{Rng, thread_rng};
/// ```
/// use rusty_machine::learning::toolkit::rand_utils;
///
/// let mut pool = &mut [1,2,3,4];
/// let sample = rand_utils::reservoir_sample(pool, 3);
///
/// println!("{:?}", sample);
/// ```
pub fn reservoir_sample<T: Copy>(pool: &[T], reservoir_size: usize) -> Vec<T> {
assert!(pool.len() >= reservoir_size,
"Sample size is greater than total.");
let mut pool_mut = &pool[..];
let mut res = pool_mut[..reservoir_size].to_vec();
pool_mut = &pool_mut[reservoir_size..];
let mut ele_seen = reservoir_size;
let mut rng = thread_rng();
while!pool_mut.is_empty() {
ele_seen += 1;
let r = rng.gen_range(0, ele_seen);
let p_0 = pool_mut[0];
pool_mut = &pool_mut[1..];
if r < reservoir_size
|
}
res
}
/// The inside out Fisher-Yates algorithm.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::toolkit::rand_utils;
///
/// // Collect the numbers 0..5
/// let a = (0..5).collect::<Vec<_>>();
///
/// // Perform a Fisher-Yates shuffle to get a random permutation
/// let permutation = rand_utils::fisher_yates(&a);
/// ```
pub fn fisher_yates<T: Copy>(arr: &[T]) -> Vec<T> {
let n = arr.len();
let mut rng = thread_rng();
let mut shuffled_arr = Vec::with_capacity(n);
unsafe {
// We set the length here
// We only access data which has been initialized in the algorithm
shuffled_arr.set_len(n);
}
for i in 0..n {
let j = rng.gen_range(0, i + 1);
// If j isn't the last point in the active shuffled array
if j!= i {
// Copy value at position j to the end of the shuffled array
// This is safe as we only read initialized data (j < i)
let x = shuffled_arr[j];
shuffled_arr[i] = x;
}
// Place value at end of active array into shuffled array
shuffled_arr[j] = arr[i];
}
shuffled_arr
}
/// The in place Fisher-Yates shuffle.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::toolkit::rand_utils;
///
/// // Collect the numbers 0..5
/// let mut a = (0..5).collect::<Vec<_>>();
///
/// // Permute the values in place with Fisher-Yates
/// rand_utils::in_place_fisher_yates(&mut a);
/// ```
pub fn in_place_fisher_yates<T>(arr: &mut [T]) {
let n = arr.len();
let mut rng = thread_rng();
for i in 0..n {
// Swap i with a random point after it
let j = rng.gen_range(0, n - i);
arr.swap(i, i + j);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_reservoir_sample() {
let a = vec![1, 2, 3, 4, 5, 6, 7];
let b = reservoir_sample(&a, 3);
assert_eq!(b.len(), 3);
}
#[test]
fn test_fisher_yates() {
let a = (0..10).collect::<Vec<_>>();
let b = fisher_yates(&a);
for val in a.iter() {
assert!(b.contains(val));
}
}
#[test]
fn test_in_place_fisher_yates() {
let mut a = (0..10).collect::<Vec<_>>();
in_place_fisher_yates(&mut a);
for val in 0..10 {
assert!(a.contains(&val));
}
}
}
|
{
res[r] = p_0;
}
|
conditional_block
|
rand_utils.rs
|
//! Utility functions for random functionality.
//!
//! This module provides sampling and shuffling which are used
//! within the learning modules.
use rand::{Rng, thread_rng};
/// ```
/// use rusty_machine::learning::toolkit::rand_utils;
///
/// let mut pool = &mut [1,2,3,4];
/// let sample = rand_utils::reservoir_sample(pool, 3);
///
/// println!("{:?}", sample);
/// ```
pub fn reservoir_sample<T: Copy>(pool: &[T], reservoir_size: usize) -> Vec<T> {
assert!(pool.len() >= reservoir_size,
"Sample size is greater than total.");
let mut pool_mut = &pool[..];
let mut res = pool_mut[..reservoir_size].to_vec();
pool_mut = &pool_mut[reservoir_size..];
let mut ele_seen = reservoir_size;
let mut rng = thread_rng();
while!pool_mut.is_empty() {
ele_seen += 1;
let r = rng.gen_range(0, ele_seen);
let p_0 = pool_mut[0];
pool_mut = &pool_mut[1..];
if r < reservoir_size {
res[r] = p_0;
}
}
res
}
/// The inside out Fisher-Yates algorithm.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::toolkit::rand_utils;
///
/// // Collect the numbers 0..5
/// let a = (0..5).collect::<Vec<_>>();
///
/// // Perform a Fisher-Yates shuffle to get a random permutation
/// let permutation = rand_utils::fisher_yates(&a);
/// ```
pub fn fisher_yates<T: Copy>(arr: &[T]) -> Vec<T> {
let n = arr.len();
let mut rng = thread_rng();
let mut shuffled_arr = Vec::with_capacity(n);
unsafe {
// We set the length here
// We only access data which has been initialized in the algorithm
shuffled_arr.set_len(n);
}
for i in 0..n {
let j = rng.gen_range(0, i + 1);
// If j isn't the last point in the active shuffled array
if j!= i {
// Copy value at position j to the end of the shuffled array
// This is safe as we only read initialized data (j < i)
let x = shuffled_arr[j];
shuffled_arr[i] = x;
}
// Place value at end of active array into shuffled array
shuffled_arr[j] = arr[i];
}
shuffled_arr
}
/// The in place Fisher-Yates shuffle.
///
/// # Examples
///
/// ```
/// use rusty_machine::learning::toolkit::rand_utils;
///
/// // Collect the numbers 0..5
/// let mut a = (0..5).collect::<Vec<_>>();
///
/// // Permute the values in place with Fisher-Yates
/// rand_utils::in_place_fisher_yates(&mut a);
/// ```
pub fn in_place_fisher_yates<T>(arr: &mut [T]) {
let n = arr.len();
let mut rng = thread_rng();
for i in 0..n {
// Swap i with a random point after it
let j = rng.gen_range(0, n - i);
arr.swap(i, i + j);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_reservoir_sample() {
let a = vec![1, 2, 3, 4, 5, 6, 7];
let b = reservoir_sample(&a, 3);
assert_eq!(b.len(), 3);
}
#[test]
fn test_fisher_yates() {
let a = (0..10).collect::<Vec<_>>();
let b = fisher_yates(&a);
for val in a.iter() {
assert!(b.contains(val));
}
}
#[test]
fn test_in_place_fisher_yates()
|
}
|
{
let mut a = (0..10).collect::<Vec<_>>();
in_place_fisher_yates(&mut a);
for val in 0..10 {
assert!(a.contains(&val));
}
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.