file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
htmlolistelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLOListElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLOListElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLOListElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct HTMLOListElement {
pub htmlelement: HTMLElement,
}
impl HTMLOListElementDerived for EventTarget {
fn is_htmlolistelement(&self) -> bool {
self.type_id == NodeTargetTypeId(ElementNodeTypeId(HTMLOListElementTypeId))
}
}
impl HTMLOListElement {
pub fn new_inherited(localName: DOMString, document: &JSRef<Document>) -> HTMLOListElement {
HTMLOListElement {
htmlelement: HTMLElement::new_inherited(HTMLOListElementTypeId, localName, document)
}
}
pub fn new(localName: DOMString, document: &JSRef<Document>) -> Temporary<HTMLOListElement> {
let element = HTMLOListElement::new_inherited(localName, document);
Node::reflect_node(box element, document, HTMLOListElementBinding::Wrap)
}
}
pub trait HTMLOListElementMethods {
}
impl Reflectable for HTMLOListElement {
fn
|
<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
|
reflector
|
identifier_name
|
htmlolistelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLOListElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLOListElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLOListElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct HTMLOListElement {
pub htmlelement: HTMLElement,
}
impl HTMLOListElementDerived for EventTarget {
fn is_htmlolistelement(&self) -> bool
|
}
impl HTMLOListElement {
pub fn new_inherited(localName: DOMString, document: &JSRef<Document>) -> HTMLOListElement {
HTMLOListElement {
htmlelement: HTMLElement::new_inherited(HTMLOListElementTypeId, localName, document)
}
}
pub fn new(localName: DOMString, document: &JSRef<Document>) -> Temporary<HTMLOListElement> {
let element = HTMLOListElement::new_inherited(localName, document);
Node::reflect_node(box element, document, HTMLOListElementBinding::Wrap)
}
}
pub trait HTMLOListElementMethods {
}
impl Reflectable for HTMLOListElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
|
{
self.type_id == NodeTargetTypeId(ElementNodeTypeId(HTMLOListElementTypeId))
}
|
identifier_body
|
network_settings.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Structure to hold network settings configured from CLI
/// Networking & RPC settings
#[derive(Debug, PartialEq, Clone)]
pub struct NetworkSettings {
/// Node name
pub name: String,
/// Name of the chain we are connected to
pub chain: String,
/// Networking port
pub network_port: u16,
/// Is JSON-RPC server enabled?
pub rpc_enabled: bool,
/// Interface that JSON-RPC listens on
pub rpc_interface: String,
/// Port for JSON-RPC server
pub rpc_port: u16,
}
impl Default for NetworkSettings {
fn default() -> Self
|
}
|
{
NetworkSettings {
name: "".into(),
chain: "foundation".into(),
network_port: 30303,
rpc_enabled: true,
rpc_interface: "127.0.0.1".into(),
rpc_port: 8545
}
}
|
identifier_body
|
network_settings.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Structure to hold network settings configured from CLI
/// Networking & RPC settings
#[derive(Debug, PartialEq, Clone)]
pub struct NetworkSettings {
/// Node name
pub name: String,
/// Name of the chain we are connected to
pub chain: String,
/// Networking port
pub network_port: u16,
/// Is JSON-RPC server enabled?
pub rpc_enabled: bool,
/// Interface that JSON-RPC listens on
pub rpc_interface: String,
/// Port for JSON-RPC server
pub rpc_port: u16,
}
impl Default for NetworkSettings {
fn
|
() -> Self {
NetworkSettings {
name: "".into(),
chain: "foundation".into(),
network_port: 30303,
rpc_enabled: true,
rpc_interface: "127.0.0.1".into(),
rpc_port: 8545
}
}
}
|
default
|
identifier_name
|
network_settings.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
|
#[derive(Debug, PartialEq, Clone)]
pub struct NetworkSettings {
/// Node name
pub name: String,
/// Name of the chain we are connected to
pub chain: String,
/// Networking port
pub network_port: u16,
/// Is JSON-RPC server enabled?
pub rpc_enabled: bool,
/// Interface that JSON-RPC listens on
pub rpc_interface: String,
/// Port for JSON-RPC server
pub rpc_port: u16,
}
impl Default for NetworkSettings {
fn default() -> Self {
NetworkSettings {
name: "".into(),
chain: "foundation".into(),
network_port: 30303,
rpc_enabled: true,
rpc_interface: "127.0.0.1".into(),
rpc_port: 8545
}
}
}
|
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Structure to hold network settings configured from CLI
/// Networking & RPC settings
|
random_line_split
|
genimplsource0.rs
|
struct S; // On déclare le type concret `S`.
struct GenericVal<T>(T); // On déclare le type générique `GenericVal`.
// Implémentation de GenericVal où nous précisons que cette méthode doit être
// implémentée uniquement pour le type `f32`.
impl GenericVal<f32> {
fn say_hello_f32(&self) -> (){
println!("I'm a float! :D");
}
} // On spécifie `f32`
impl GenericVal<S> {
fn say_hello_s(&self) -> (){
println!("I'm a S object! :D");
}
} // On spécifie le type `S` pour les mêmes raisons qu'au-dessus.
// `<T>` doit précéder le type pour le rendre générique.
impl <T> GenericVal<T> {
fn say_hello(&self) -> (){
println!("I'm a generic object! :D");
}
}
struct Val {
val: f64
}
struct GenVal<T>{
gen_val: T
}
// Implémentation de Val.
impl Val {
fn value(&self) -> &f64 { &self.val }
}
// Implémentation de GenVal pour le type générique `T`.
impl <T> GenVal<T> {
fn value(&self) -> &T { &
|
gen_val }
}
fn main() {
let x = Val { val: 3.0 };
let y = GenVal { gen_val: 3i32 };
println!("{}, {}", x.value(), y.value());
GenericVal(1.0).say_hello_f32();
GenericVal(S).say_hello_s();
GenericVal("prout").say_hello();
}
|
self.
|
identifier_name
|
genimplsource0.rs
|
struct S; // On déclare le type concret `S`.
struct GenericVal<T>(T); // On déclare le type générique `GenericVal`.
// Implémentation de GenericVal où nous précisons que cette méthode doit être
// implémentée uniquement pour le type `f32`.
impl GenericVal<f32> {
fn say_hello_f32(&self) -> (){
println!("I'm a float! :D");
}
} // On spécifie `f32`
impl GenericVal<S> {
fn say_hello_s(&self) -> (){
println!("I'm a S object! :D");
}
} // On spécifie le type `S` pour les mêmes raisons qu'au-dessus.
// `<T>` doit précéder le type pour le rendre générique.
impl <T> GenericVal<T> {
fn say_hello(&self) -> (){
println!("I'm a generic object! :D");
}
}
struct Val {
val: f64
}
struct GenVal<T>{
gen_val: T
}
// Implémentation de Val.
impl Val {
fn value(&self) -> &f64 { &self.val }
}
// Implémentation de GenVal pour le type générique `T`.
impl <T> GenVal<T> {
fn value(&self) -> &T { &self.gen_val }
}
fn main() {
let x = Val { val: 3.0 };
let y = GenVal { gen_val: 3i32 };
println!("{}, {}", x.value(), y.value());
|
GenericVal("prout").say_hello();
}
|
GenericVal(1.0).say_hello_f32();
GenericVal(S).say_hello_s();
|
random_line_split
|
ast.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::fmt;
pub type OffsetSpan = (usize, usize);
#[derive(Debug, Clone, PartialEq)]
pub struct Spanned<T> {
pub pos: OffsetSpan,
pub data: T,
}
#[derive(Debug, Clone, PartialEq)]
pub enum BinOp {
Add,
Sub,
Mul,
Div,
Mod,
Lt,
Lte,
Gt,
Gte,
Eq,
}
#[derive(Debug, Clone, PartialEq)]
pub enum LogicalBinOp {
And,
Or,
}
#[derive(Debug, Clone, PartialEq)]
pub enum UnOp {
Neg,
}
#[derive(Debug, Clone, PartialEq)]
pub enum LogicalUnOp {
Not,
}
impl fmt::Display for BinOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
BinOp::Add => write!(f, "+"),
BinOp::Sub => write!(f, "-"),
BinOp::Mul => write!(f, "*"),
BinOp::Div => write!(f, "/"),
BinOp::Mod => write!(f, "%"),
BinOp::Lt => write!(f, "<"),
BinOp::Lte => write!(f, "<="),
BinOp::Gt => write!(f, ">"),
BinOp::Gte => write!(f, ">="),
BinOp::Eq => write!(f, "=="),
}
}
}
impl fmt::Display for LogicalBinOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
LogicalBinOp::And => write!(f, "and"),
LogicalBinOp::Or => write!(f, "or"),
}
}
}
impl fmt::Display for UnOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
UnOp::Neg => write!(f, "-"),
}
}
}
impl fmt::Display for LogicalUnOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
LogicalUnOp::Not => write!(f, "not"),
}
}
}
#[derive(Debug, Clone)]
pub enum Literal {
Integer(i64),
Float(f64),
Bool(bool),
String(String),
}
pub type LiteralNode = Spanned<Literal>;
#[derive(Debug, Clone)]
pub enum LhsExpr {
Identifier(String),
}
pub type LhsExprNode = Spanned<LhsExpr>;
#[derive(Debug, Clone)]
pub enum Variable {
Identifier(BindingType, String),
}
#[derive(Debug, Clone)]
pub enum BindingType {
Mutable,
}
#[derive(Debug, Clone)]
pub struct
|
{
pub maybe_id: Option<String>,
pub params: Vec<String>,
pub body: Box<StmtNode>,
}
#[derive(Debug, Clone)]
pub enum Expr {
Literal(LiteralNode),
Identifier(String),
Binary(Box<ExprNode>, BinOp, Box<ExprNode>),
BinaryLogical(Box<ExprNode>, LogicalBinOp, Box<ExprNode>),
Unary(UnOp, Box<ExprNode>),
UnaryLogical(LogicalUnOp, Box<ExprNode>),
FnDef(FnDefExpr),
FnCall(Box<ExprNode>, Vec<ExprNode>),
Tuple(Vec<ExprNode>),
MemberByIdx(Box<ExprNode>, Box<ExprNode>),
}
// Only for parser convenience
pub enum ExprSuffix {
ListInParens(Vec<ExprNode>),
InSquareBrackets(ExprNode),
}
pub type ExprNode = Spanned<Expr>;
#[derive(Debug, Clone)]
pub struct IfThenStmt {
pub cond: ExprNode,
pub then_block: Box<StmtNode>,
pub maybe_else_block: Option<Box<StmtNode>>,
}
#[derive(Debug, Clone)]
pub enum Stmt {
Assign(LhsExprNode, ExprNode),
AssignOp(LhsExprNode, BinOp, ExprNode),
VarDecl(Variable, ExprNode),
Expr(ExprNode),
Block(Vec<StmtNode>),
IfThen(IfThenStmt),
Loop(Box<StmtNode>),
Return(Option<ExprNode>),
Break,
Continue,
Empty,
}
pub type StmtNode = Spanned<Stmt>;
|
FnDefExpr
|
identifier_name
|
ast.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::fmt;
pub type OffsetSpan = (usize, usize);
#[derive(Debug, Clone, PartialEq)]
pub struct Spanned<T> {
pub pos: OffsetSpan,
pub data: T,
}
#[derive(Debug, Clone, PartialEq)]
pub enum BinOp {
Add,
Sub,
Mul,
Div,
Mod,
Lt,
Lte,
Gt,
Gte,
Eq,
}
#[derive(Debug, Clone, PartialEq)]
pub enum LogicalBinOp {
And,
Or,
}
#[derive(Debug, Clone, PartialEq)]
pub enum UnOp {
Neg,
}
#[derive(Debug, Clone, PartialEq)]
pub enum LogicalUnOp {
Not,
}
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
BinOp::Add => write!(f, "+"),
BinOp::Sub => write!(f, "-"),
BinOp::Mul => write!(f, "*"),
BinOp::Div => write!(f, "/"),
BinOp::Mod => write!(f, "%"),
BinOp::Lt => write!(f, "<"),
BinOp::Lte => write!(f, "<="),
BinOp::Gt => write!(f, ">"),
BinOp::Gte => write!(f, ">="),
BinOp::Eq => write!(f, "=="),
}
}
}
impl fmt::Display for LogicalBinOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
LogicalBinOp::And => write!(f, "and"),
LogicalBinOp::Or => write!(f, "or"),
}
}
}
impl fmt::Display for UnOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
UnOp::Neg => write!(f, "-"),
}
}
}
impl fmt::Display for LogicalUnOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
LogicalUnOp::Not => write!(f, "not"),
}
}
}
#[derive(Debug, Clone)]
pub enum Literal {
Integer(i64),
Float(f64),
Bool(bool),
String(String),
}
pub type LiteralNode = Spanned<Literal>;
#[derive(Debug, Clone)]
pub enum LhsExpr {
Identifier(String),
}
pub type LhsExprNode = Spanned<LhsExpr>;
#[derive(Debug, Clone)]
pub enum Variable {
Identifier(BindingType, String),
}
#[derive(Debug, Clone)]
pub enum BindingType {
Mutable,
}
#[derive(Debug, Clone)]
pub struct FnDefExpr {
pub maybe_id: Option<String>,
pub params: Vec<String>,
pub body: Box<StmtNode>,
}
#[derive(Debug, Clone)]
pub enum Expr {
Literal(LiteralNode),
Identifier(String),
Binary(Box<ExprNode>, BinOp, Box<ExprNode>),
BinaryLogical(Box<ExprNode>, LogicalBinOp, Box<ExprNode>),
Unary(UnOp, Box<ExprNode>),
UnaryLogical(LogicalUnOp, Box<ExprNode>),
FnDef(FnDefExpr),
FnCall(Box<ExprNode>, Vec<ExprNode>),
Tuple(Vec<ExprNode>),
MemberByIdx(Box<ExprNode>, Box<ExprNode>),
}
// Only for parser convenience
pub enum ExprSuffix {
ListInParens(Vec<ExprNode>),
InSquareBrackets(ExprNode),
}
pub type ExprNode = Spanned<Expr>;
#[derive(Debug, Clone)]
pub struct IfThenStmt {
pub cond: ExprNode,
pub then_block: Box<StmtNode>,
pub maybe_else_block: Option<Box<StmtNode>>,
}
#[derive(Debug, Clone)]
pub enum Stmt {
Assign(LhsExprNode, ExprNode),
AssignOp(LhsExprNode, BinOp, ExprNode),
VarDecl(Variable, ExprNode),
Expr(ExprNode),
Block(Vec<StmtNode>),
IfThen(IfThenStmt),
Loop(Box<StmtNode>),
Return(Option<ExprNode>),
Break,
Continue,
Empty,
}
pub type StmtNode = Spanned<Stmt>;
|
impl fmt::Display for BinOp {
|
random_line_split
|
mod.rs
|
pub mod multi;
pub mod threshold;
use std::cmp::min;
use crate::{Levenshtein, Metric};
impl Default for Levenshtein {
fn default() -> Self {
Levenshtein {}
}
}
impl<T: Eq, R: AsRef<[T]>> Metric<T, R> for Levenshtein {
fn distance(a: R, b: R) -> usize
|
}
fn distance<T>(a: &[T], b: &[T]) -> usize
where
T: Eq,
{
let a_length = a.len();
let b_length = b.len();
if a_length == 0 {
return b_length;
} else if b_length == 0 {
return a_length;
}
let (row_items, column_items) = if a_length < b_length { (b, a) } else { (a, b) };
let mut buffer = first_row(row_items, column_items);
other_rows(row_items, column_items, &mut buffer);
*buffer.last().unwrap()
}
fn first_row<T>(row_items: &[T], column_items: &[T]) -> Vec<usize>
where
T: Eq,
{
let columns = column_items.len();
let mut buffer = Vec::with_capacity(columns);
let mut column_iter = column_items.iter().enumerate();
let (_, column_item0) = column_iter.next().unwrap();
let row_item0 = &row_items[0];
// Row 1, Column 1
buffer.push(if row_item0 == column_item0 { 0 } else { 1 });
// Row 1, Column 2+
for (column, column_item) in column_iter {
let value = {
let diag = column;
if row_item0 == column_item {
diag
} else {
let left = buffer[column - 1];
min(diag, left) + 1
}
};
buffer.push(value);
}
buffer
}
fn other_rows<T>(row_items: &[T], column_items: &[T], buffer: &mut Vec<usize>)
where
T: Eq,
{
let row_iter = row_items.iter().enumerate();
for (row, row_item) in row_iter {
let mut last_up = buffer[0];
let mut column_iter = column_items.iter().enumerate();
// Row 2+, Column 1
let (_, column_item0) = column_iter.next().unwrap();
buffer[0] = {
let diag = row;
if row_item == column_item0 {
diag
} else {
let up = buffer[0];
min(diag, up) + 1
}
};
// Row 2+, Column 2+
for (column, column_item) in column_iter {
let value = {
let diag = last_up;
if row_item == column_item {
diag
} else {
let left = buffer[column - 1];
let up = buffer[column];
min(min(diag, left), up) + 1
}
};
last_up = buffer[column];
buffer[column] = value;
}
}
}
|
{
distance(a.as_ref(), b.as_ref())
}
|
identifier_body
|
mod.rs
|
pub mod multi;
pub mod threshold;
use std::cmp::min;
use crate::{Levenshtein, Metric};
impl Default for Levenshtein {
fn default() -> Self {
Levenshtein {}
}
}
impl<T: Eq, R: AsRef<[T]>> Metric<T, R> for Levenshtein {
fn distance(a: R, b: R) -> usize {
distance(a.as_ref(), b.as_ref())
}
}
fn distance<T>(a: &[T], b: &[T]) -> usize
where
T: Eq,
{
let a_length = a.len();
let b_length = b.len();
if a_length == 0 {
return b_length;
} else if b_length == 0 {
return a_length;
}
let (row_items, column_items) = if a_length < b_length { (b, a) } else { (a, b) };
let mut buffer = first_row(row_items, column_items);
other_rows(row_items, column_items, &mut buffer);
*buffer.last().unwrap()
}
fn first_row<T>(row_items: &[T], column_items: &[T]) -> Vec<usize>
where
T: Eq,
{
let columns = column_items.len();
let mut buffer = Vec::with_capacity(columns);
let mut column_iter = column_items.iter().enumerate();
let (_, column_item0) = column_iter.next().unwrap();
let row_item0 = &row_items[0];
// Row 1, Column 1
buffer.push(if row_item0 == column_item0 { 0 } else { 1 });
// Row 1, Column 2+
for (column, column_item) in column_iter {
let value = {
let diag = column;
if row_item0 == column_item {
diag
} else {
let left = buffer[column - 1];
min(diag, left) + 1
}
};
buffer.push(value);
}
buffer
}
fn other_rows<T>(row_items: &[T], column_items: &[T], buffer: &mut Vec<usize>)
where
T: Eq,
{
let row_iter = row_items.iter().enumerate();
for (row, row_item) in row_iter {
let mut last_up = buffer[0];
|
let (_, column_item0) = column_iter.next().unwrap();
buffer[0] = {
let diag = row;
if row_item == column_item0 {
diag
} else {
let up = buffer[0];
min(diag, up) + 1
}
};
// Row 2+, Column 2+
for (column, column_item) in column_iter {
let value = {
let diag = last_up;
if row_item == column_item {
diag
} else {
let left = buffer[column - 1];
let up = buffer[column];
min(min(diag, left), up) + 1
}
};
last_up = buffer[column];
buffer[column] = value;
}
}
}
|
let mut column_iter = column_items.iter().enumerate();
// Row 2+, Column 1
|
random_line_split
|
mod.rs
|
pub mod multi;
pub mod threshold;
use std::cmp::min;
use crate::{Levenshtein, Metric};
impl Default for Levenshtein {
fn default() -> Self {
Levenshtein {}
}
}
impl<T: Eq, R: AsRef<[T]>> Metric<T, R> for Levenshtein {
fn distance(a: R, b: R) -> usize {
distance(a.as_ref(), b.as_ref())
}
}
fn distance<T>(a: &[T], b: &[T]) -> usize
where
T: Eq,
{
let a_length = a.len();
let b_length = b.len();
if a_length == 0 {
return b_length;
} else if b_length == 0 {
return a_length;
}
let (row_items, column_items) = if a_length < b_length { (b, a) } else { (a, b) };
let mut buffer = first_row(row_items, column_items);
other_rows(row_items, column_items, &mut buffer);
*buffer.last().unwrap()
}
fn first_row<T>(row_items: &[T], column_items: &[T]) -> Vec<usize>
where
T: Eq,
{
let columns = column_items.len();
let mut buffer = Vec::with_capacity(columns);
let mut column_iter = column_items.iter().enumerate();
let (_, column_item0) = column_iter.next().unwrap();
let row_item0 = &row_items[0];
// Row 1, Column 1
buffer.push(if row_item0 == column_item0 { 0 } else { 1 });
// Row 1, Column 2+
for (column, column_item) in column_iter {
let value = {
let diag = column;
if row_item0 == column_item {
diag
} else {
let left = buffer[column - 1];
min(diag, left) + 1
}
};
buffer.push(value);
}
buffer
}
fn
|
<T>(row_items: &[T], column_items: &[T], buffer: &mut Vec<usize>)
where
T: Eq,
{
let row_iter = row_items.iter().enumerate();
for (row, row_item) in row_iter {
let mut last_up = buffer[0];
let mut column_iter = column_items.iter().enumerate();
// Row 2+, Column 1
let (_, column_item0) = column_iter.next().unwrap();
buffer[0] = {
let diag = row;
if row_item == column_item0 {
diag
} else {
let up = buffer[0];
min(diag, up) + 1
}
};
// Row 2+, Column 2+
for (column, column_item) in column_iter {
let value = {
let diag = last_up;
if row_item == column_item {
diag
} else {
let left = buffer[column - 1];
let up = buffer[column];
min(min(diag, left), up) + 1
}
};
last_up = buffer[column];
buffer[column] = value;
}
}
}
|
other_rows
|
identifier_name
|
mod.rs
|
pub mod multi;
pub mod threshold;
use std::cmp::min;
use crate::{Levenshtein, Metric};
impl Default for Levenshtein {
fn default() -> Self {
Levenshtein {}
}
}
impl<T: Eq, R: AsRef<[T]>> Metric<T, R> for Levenshtein {
fn distance(a: R, b: R) -> usize {
distance(a.as_ref(), b.as_ref())
}
}
fn distance<T>(a: &[T], b: &[T]) -> usize
where
T: Eq,
{
let a_length = a.len();
let b_length = b.len();
if a_length == 0 {
return b_length;
} else if b_length == 0 {
return a_length;
}
let (row_items, column_items) = if a_length < b_length
|
else { (a, b) };
let mut buffer = first_row(row_items, column_items);
other_rows(row_items, column_items, &mut buffer);
*buffer.last().unwrap()
}
fn first_row<T>(row_items: &[T], column_items: &[T]) -> Vec<usize>
where
T: Eq,
{
let columns = column_items.len();
let mut buffer = Vec::with_capacity(columns);
let mut column_iter = column_items.iter().enumerate();
let (_, column_item0) = column_iter.next().unwrap();
let row_item0 = &row_items[0];
// Row 1, Column 1
buffer.push(if row_item0 == column_item0 { 0 } else { 1 });
// Row 1, Column 2+
for (column, column_item) in column_iter {
let value = {
let diag = column;
if row_item0 == column_item {
diag
} else {
let left = buffer[column - 1];
min(diag, left) + 1
}
};
buffer.push(value);
}
buffer
}
fn other_rows<T>(row_items: &[T], column_items: &[T], buffer: &mut Vec<usize>)
where
T: Eq,
{
let row_iter = row_items.iter().enumerate();
for (row, row_item) in row_iter {
let mut last_up = buffer[0];
let mut column_iter = column_items.iter().enumerate();
// Row 2+, Column 1
let (_, column_item0) = column_iter.next().unwrap();
buffer[0] = {
let diag = row;
if row_item == column_item0 {
diag
} else {
let up = buffer[0];
min(diag, up) + 1
}
};
// Row 2+, Column 2+
for (column, column_item) in column_iter {
let value = {
let diag = last_up;
if row_item == column_item {
diag
} else {
let left = buffer[column - 1];
let up = buffer[column];
min(min(diag, left), up) + 1
}
};
last_up = buffer[column];
buffer[column] = value;
}
}
}
|
{ (b, a) }
|
conditional_block
|
status.rs
|
/*
* libgit2 "status" example - shows how to use the status APIs
*
* Written by the libgit2 contributors
*
* To the extent possible under law, the author(s) have dedicated all copyright
* and related and neighboring rights to this software to the public domain
* worldwide. This software is distributed without any warranty.
*
* You should have received a copy of the CC0 Public Domain Dedication along
* with this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#![deny(warnings)]
extern crate git2;
extern crate docopt;
extern crate rustc_serialize;
use std::str;
use docopt::Docopt;
use git2::{Repository, Error, StatusOptions, ErrorCode, SubmoduleIgnore};
#[derive(RustcDecodable)]
struct Args {
arg_spec: Vec<String>,
flag_short: bool,
flag_long: bool,
flag_porcelain: bool,
flag_branch: bool,
flag_z: bool,
flag_ignored: bool,
flag_untracked_files: Option<String>,
flag_ignore_submodules: Option<String>,
flag_git_dir: Option<String>,
flag_repeat: bool,
flag_list_submodules: bool,
}
#[derive(Eq, PartialEq)]
enum Format { Long, Short, Porcelain }
fn run(args: &Args) -> Result<(), Error> {
let path = args.flag_git_dir.clone().unwrap_or(".".to_string());
let repo = try!(Repository::open(&path));
if repo.is_bare() {
return Err(Error::from_str("cannot report status on bare repository"))
}
let mut opts = StatusOptions::new();
opts.include_ignored(args.flag_ignored);
match args.flag_untracked_files.as_ref().map(|s| &s[..]) {
Some("no") => { opts.include_untracked(false); }
Some("normal") => { opts.include_untracked(true); }
Some("all") => {
opts.include_untracked(true).recurse_untracked_dirs(true);
}
Some(_) => return Err(Error::from_str("invalid untracked-files value")),
None => {}
}
match args.flag_ignore_submodules.as_ref().map(|s| &s[..]) {
Some("all") => { opts.exclude_submodules(true); }
Some(_) => return Err(Error::from_str("invalid ignore-submodules value")),
None => {}
}
opts.include_untracked(!args.flag_ignored);
for spec in args.arg_spec.iter() {
opts.pathspec(spec);
}
loop {
if args.flag_repeat {
println!("\u{1b}[H\u{1b}[2J");
}
let statuses = try!(repo.statuses(Some(&mut opts)));
if args.flag_branch {
try!(show_branch(&repo, args.format()));
}
if args.flag_list_submodules {
try!(print_submodules(&repo));
}
if args.format() == Format::Long {
print_long(statuses);
} else {
print_short(&repo, statuses);
}
if args.flag_repeat {
std::thread::sleep_ms(10000);
} else {
return Ok(())
}
}
}
fn show_branch(repo: &Repository, format: Format) -> Result<(), Error> {
let head = match repo.head() {
Ok(head) => Some(head),
Err(ref e) if e.code() == ErrorCode::UnbornBranch ||
e.code() == ErrorCode::NotFound => None,
Err(e) => return Err(e),
};
let head = head.as_ref().and_then(|h| h.shorthand());
if format == Format::Long {
println!("# On branch {}",
head.unwrap_or("Not currently on any branch"));
} else {
println!("## {}", head.unwrap_or("HEAD (no branch)"));
}
Ok(())
}
fn print_submodules(repo: &Repository) -> Result<(), Error> {
let modules = try!(repo.submodules());
println!("# Submodules");
for sm in modules.iter() {
println!("# - submodule '{}' at {}", sm.name().unwrap(),
sm.path().display());
}
Ok(())
}
// This function print out an output similar to git's status command in long
// form, including the command-line hints.
fn print_long(statuses: git2::Statuses) {
let mut header = false;
let mut rm_in_workdir = false;
let mut changes_in_index = false;
let mut changed_in_workdir = false;
// Print index changes
for entry in statuses.iter().filter(|e| e.status()!= git2::STATUS_CURRENT) {
if entry.status().contains(git2::STATUS_WT_DELETED) {
rm_in_workdir = true;
}
let istatus = match entry.status() {
s if s.contains(git2::STATUS_INDEX_NEW) => "new file: ",
s if s.contains(git2::STATUS_INDEX_MODIFIED) => "modified: ",
s if s.contains(git2::STATUS_INDEX_DELETED) => "deleted: ",
s if s.contains(git2::STATUS_INDEX_RENAMED) => "renamed: ",
s if s.contains(git2::STATUS_INDEX_TYPECHANGE) => "typechange:",
_ => continue,
};
if!header {
println!("\
# Changes to be committed:
# (use \"git reset HEAD <file>...\" to unstage)
#");
header = true;
}
let old_path = entry.head_to_index().unwrap().old_file().path();
let new_path = entry.head_to_index().unwrap().new_file().path();
match (old_path, new_path) {
(Some(ref old), Some(ref new)) if old!= new => {
println!("#\t{} {} -> {}", istatus, old.display(),
new.display());
}
(old, new) => {
println!("#\t{} {}", istatus, old.or(new).unwrap().display());
}
}
}
if header {
changes_in_index = true;
println!("#");
}
header = false;
// Print workdir changes to tracked files
for entry in statuses.iter() {
// With `STATUS_OPT_INCLUDE_UNMODIFIED` (not used in this example)
// `index_to_workdir` may not be `None` even if there are no differences,
// in which case it will be a `Delta::Unmodified`.
if entry.status() == git2::STATUS_CURRENT ||
entry.index_to_workdir().is_none() {
continue
}
let istatus = match entry.status() {
s if s.contains(git2::STATUS_WT_MODIFIED) => "modified: ",
s if s.contains(git2::STATUS_WT_DELETED) => "deleted: ",
s if s.contains(git2::STATUS_WT_RENAMED) => "renamed: ",
s if s.contains(git2::STATUS_WT_TYPECHANGE) => "typechange:",
_ => continue,
};
if!header {
println!("\
# Changes not staged for commit:
# (use \"git add{} <file>...\" to update what will be committed)
# (use \"git checkout -- <file>...\" to discard changes in working directory)
#\
", if rm_in_workdir {"/rm"} else {""});
header = true;
}
let old_path = entry.index_to_workdir().unwrap().old_file().path();
let new_path = entry.index_to_workdir().unwrap().new_file().path();
match (old_path, new_path) {
(Some(ref old), Some(ref new)) if old!= new => {
println!("#\t{} {} -> {}", istatus, old.display(),
new.display());
}
(old, new) => {
println!("#\t{} {}", istatus, old.or(new).unwrap().display());
}
}
}
if header
|
header = false;
// Print untracked files
for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_WT_NEW) {
if!header {
println!("\
# Untracked files
# (use \"git add <file>...\" to include in what will be committed)
#");
header = true;
}
let file = entry.index_to_workdir().unwrap().old_file().path().unwrap();
println!("#\t{}", file.display());
}
header = false;
// Print ignored files
for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_IGNORED) {
if!header {
println!("\
# Ignored files
# (use \"git add -f <file>...\" to include in what will be committed)
#");
header = true;
}
let file = entry.index_to_workdir().unwrap().old_file().path().unwrap();
println!("#\t{}", file.display());
}
if!changes_in_index && changed_in_workdir {
println!("no changes added to commit (use \"git add\" and/or \
\"git commit -a\")");
}
}
// This version of the output prefixes each path with two status columns and
// shows submodule status information.
fn print_short(repo: &Repository, statuses: git2::Statuses) {
for entry in statuses.iter().filter(|e| e.status()!= git2::STATUS_CURRENT) {
let mut istatus = match entry.status() {
s if s.contains(git2::STATUS_INDEX_NEW) => 'A',
s if s.contains(git2::STATUS_INDEX_MODIFIED) => 'M',
s if s.contains(git2::STATUS_INDEX_DELETED) => 'D',
s if s.contains(git2::STATUS_INDEX_RENAMED) => 'R',
s if s.contains(git2::STATUS_INDEX_TYPECHANGE) => 'T',
_ =>'',
};
let mut wstatus = match entry.status() {
s if s.contains(git2::STATUS_WT_NEW) => {
if istatus =='' { istatus = '?'; } '?'
}
s if s.contains(git2::STATUS_WT_MODIFIED) => 'M',
s if s.contains(git2::STATUS_WT_DELETED) => 'D',
s if s.contains(git2::STATUS_WT_RENAMED) => 'R',
s if s.contains(git2::STATUS_WT_TYPECHANGE) => 'T',
_ =>'',
};
if entry.status().contains(git2::STATUS_IGNORED) {
istatus = '!';
wstatus = '!';
}
if istatus == '?' && wstatus == '?' { continue }
let mut extra = "";
// A commit in a tree is how submodules are stored, so let's go take a
// look at its status.
//
// TODO: check for GIT_FILEMODE_COMMIT
let status = entry.index_to_workdir().and_then(|diff| {
let ignore = SubmoduleIgnore::Unspecified;
diff.new_file().path_bytes()
.and_then(|s| str::from_utf8(s).ok())
.and_then(|name| repo.submodule_status(name, ignore).ok())
});
if let Some(status) = status {
if status.contains(git2::SUBMODULE_STATUS_WD_MODIFIED) {
extra = " (new commits)";
} else if status.contains(git2::SUBMODULE_STATUS_WD_INDEX_MODIFIED) {
extra = " (modified content)";
} else if status.contains(git2::SUBMODULE_STATUS_WD_WD_MODIFIED) {
extra = " (modified content)";
} else if status.contains(git2::SUBMODULE_STATUS_WD_UNTRACKED) {
extra = " (untracked content)";
}
}
let (mut a, mut b, mut c) = (None, None, None);
if let Some(diff) = entry.head_to_index() {
a = diff.old_file().path();
b = diff.new_file().path();
}
if let Some(diff) = entry.index_to_workdir() {
a = a.or(diff.old_file().path());
b = b.or(diff.old_file().path());
c = diff.new_file().path();
}
match (istatus, wstatus) {
('R', 'R') => println!("RR {} {} {}{}", a.unwrap().display(),
b.unwrap().display(), c.unwrap().display(),
extra),
('R', w) => println!("R{} {} {}{}", w, a.unwrap().display(),
b.unwrap().display(), extra),
(i, 'R') => println!("{}R {} {}{}", i, a.unwrap().display(),
c.unwrap().display(), extra),
(i, w) => println!("{}{} {}{}", i, w, a.unwrap().display(), extra),
}
}
for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_WT_NEW) {
println!("?? {}", entry.index_to_workdir().unwrap().old_file()
.path().unwrap().display());
}
}
impl Args {
fn format(&self) -> Format {
if self.flag_short { Format::Short }
else if self.flag_long { Format::Long }
else if self.flag_porcelain { Format::Porcelain }
else if self.flag_z { Format::Porcelain }
else { Format::Long }
}
}
fn main() {
const USAGE: &'static str = "
usage: status [options] [--] [<spec>..]
Options:
-s, --short show short statuses
--long show longer statuses (default)
--porcelain ??
-b, --branch show branch information
-z ??
--ignored show ignored files as well
--untracked-files <opt> setting for showing untracked files [no|normal|all]
--ignore-submodules <opt> setting for ignoring submodules [all]
--git-dir <dir> git directory to analyze
--repeat repeatedly show status, sleeping inbetween
--list-submodules show submodules
-h, --help show this message
";
let args = Docopt::new(USAGE).and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
match run(&args) {
Ok(()) => {}
Err(e) => println!("error: {}", e),
}
}
|
{
changed_in_workdir = true;
println!("#");
}
|
conditional_block
|
status.rs
|
/*
* libgit2 "status" example - shows how to use the status APIs
*
* Written by the libgit2 contributors
*
* To the extent possible under law, the author(s) have dedicated all copyright
* and related and neighboring rights to this software to the public domain
* worldwide. This software is distributed without any warranty.
*
* You should have received a copy of the CC0 Public Domain Dedication along
* with this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#![deny(warnings)]
extern crate git2;
extern crate docopt;
extern crate rustc_serialize;
use std::str;
use docopt::Docopt;
use git2::{Repository, Error, StatusOptions, ErrorCode, SubmoduleIgnore};
#[derive(RustcDecodable)]
struct Args {
arg_spec: Vec<String>,
flag_short: bool,
flag_long: bool,
flag_porcelain: bool,
flag_branch: bool,
flag_z: bool,
flag_ignored: bool,
flag_untracked_files: Option<String>,
flag_ignore_submodules: Option<String>,
flag_git_dir: Option<String>,
flag_repeat: bool,
flag_list_submodules: bool,
}
#[derive(Eq, PartialEq)]
enum Format { Long, Short, Porcelain }
fn run(args: &Args) -> Result<(), Error> {
let path = args.flag_git_dir.clone().unwrap_or(".".to_string());
let repo = try!(Repository::open(&path));
if repo.is_bare() {
return Err(Error::from_str("cannot report status on bare repository"))
}
let mut opts = StatusOptions::new();
opts.include_ignored(args.flag_ignored);
match args.flag_untracked_files.as_ref().map(|s| &s[..]) {
Some("no") => { opts.include_untracked(false); }
Some("normal") => { opts.include_untracked(true); }
Some("all") => {
opts.include_untracked(true).recurse_untracked_dirs(true);
}
Some(_) => return Err(Error::from_str("invalid untracked-files value")),
None => {}
}
match args.flag_ignore_submodules.as_ref().map(|s| &s[..]) {
Some("all") => { opts.exclude_submodules(true); }
Some(_) => return Err(Error::from_str("invalid ignore-submodules value")),
None => {}
}
opts.include_untracked(!args.flag_ignored);
for spec in args.arg_spec.iter() {
opts.pathspec(spec);
}
loop {
if args.flag_repeat {
println!("\u{1b}[H\u{1b}[2J");
}
let statuses = try!(repo.statuses(Some(&mut opts)));
if args.flag_branch {
try!(show_branch(&repo, args.format()));
}
if args.flag_list_submodules {
try!(print_submodules(&repo));
}
if args.format() == Format::Long {
print_long(statuses);
} else {
print_short(&repo, statuses);
}
if args.flag_repeat {
std::thread::sleep_ms(10000);
} else {
return Ok(())
}
}
}
fn show_branch(repo: &Repository, format: Format) -> Result<(), Error> {
let head = match repo.head() {
Ok(head) => Some(head),
Err(ref e) if e.code() == ErrorCode::UnbornBranch ||
e.code() == ErrorCode::NotFound => None,
Err(e) => return Err(e),
};
let head = head.as_ref().and_then(|h| h.shorthand());
if format == Format::Long {
println!("# On branch {}",
head.unwrap_or("Not currently on any branch"));
} else {
println!("## {}", head.unwrap_or("HEAD (no branch)"));
}
Ok(())
}
fn print_submodules(repo: &Repository) -> Result<(), Error> {
let modules = try!(repo.submodules());
println!("# Submodules");
for sm in modules.iter() {
println!("# - submodule '{}' at {}", sm.name().unwrap(),
sm.path().display());
}
Ok(())
}
// This function print out an output similar to git's status command in long
// form, including the command-line hints.
fn print_long(statuses: git2::Statuses) {
let mut header = false;
let mut rm_in_workdir = false;
let mut changes_in_index = false;
let mut changed_in_workdir = false;
// Print index changes
for entry in statuses.iter().filter(|e| e.status()!= git2::STATUS_CURRENT) {
if entry.status().contains(git2::STATUS_WT_DELETED) {
rm_in_workdir = true;
}
let istatus = match entry.status() {
s if s.contains(git2::STATUS_INDEX_NEW) => "new file: ",
s if s.contains(git2::STATUS_INDEX_MODIFIED) => "modified: ",
s if s.contains(git2::STATUS_INDEX_DELETED) => "deleted: ",
s if s.contains(git2::STATUS_INDEX_RENAMED) => "renamed: ",
s if s.contains(git2::STATUS_INDEX_TYPECHANGE) => "typechange:",
_ => continue,
};
if!header {
println!("\
# Changes to be committed:
# (use \"git reset HEAD <file>...\" to unstage)
#");
header = true;
}
let old_path = entry.head_to_index().unwrap().old_file().path();
let new_path = entry.head_to_index().unwrap().new_file().path();
match (old_path, new_path) {
(Some(ref old), Some(ref new)) if old!= new => {
println!("#\t{} {} -> {}", istatus, old.display(),
new.display());
}
(old, new) => {
println!("#\t{} {}", istatus, old.or(new).unwrap().display());
}
}
}
if header {
changes_in_index = true;
println!("#");
}
header = false;
// Print workdir changes to tracked files
for entry in statuses.iter() {
// With `STATUS_OPT_INCLUDE_UNMODIFIED` (not used in this example)
// `index_to_workdir` may not be `None` even if there are no differences,
// in which case it will be a `Delta::Unmodified`.
if entry.status() == git2::STATUS_CURRENT ||
entry.index_to_workdir().is_none() {
continue
}
let istatus = match entry.status() {
s if s.contains(git2::STATUS_WT_MODIFIED) => "modified: ",
s if s.contains(git2::STATUS_WT_DELETED) => "deleted: ",
s if s.contains(git2::STATUS_WT_RENAMED) => "renamed: ",
s if s.contains(git2::STATUS_WT_TYPECHANGE) => "typechange:",
_ => continue,
};
if!header {
println!("\
# Changes not staged for commit:
# (use \"git add{} <file>...\" to update what will be committed)
# (use \"git checkout -- <file>...\" to discard changes in working directory)
#\
", if rm_in_workdir {"/rm"} else {""});
header = true;
}
let old_path = entry.index_to_workdir().unwrap().old_file().path();
let new_path = entry.index_to_workdir().unwrap().new_file().path();
match (old_path, new_path) {
(Some(ref old), Some(ref new)) if old!= new => {
println!("#\t{} {} -> {}", istatus, old.display(),
new.display());
}
(old, new) => {
println!("#\t{} {}", istatus, old.or(new).unwrap().display());
}
}
}
if header {
changed_in_workdir = true;
println!("#");
}
header = false;
// Print untracked files
for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_WT_NEW) {
if!header {
println!("\
# Untracked files
# (use \"git add <file>...\" to include in what will be committed)
#");
header = true;
}
let file = entry.index_to_workdir().unwrap().old_file().path().unwrap();
println!("#\t{}", file.display());
}
header = false;
// Print ignored files
for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_IGNORED) {
if!header {
println!("\
# Ignored files
# (use \"git add -f <file>...\" to include in what will be committed)
#");
header = true;
}
let file = entry.index_to_workdir().unwrap().old_file().path().unwrap();
println!("#\t{}", file.display());
}
if!changes_in_index && changed_in_workdir {
println!("no changes added to commit (use \"git add\" and/or \
\"git commit -a\")");
}
}
// This version of the output prefixes each path with two status columns and
// shows submodule status information.
fn print_short(repo: &Repository, statuses: git2::Statuses) {
for entry in statuses.iter().filter(|e| e.status()!= git2::STATUS_CURRENT) {
let mut istatus = match entry.status() {
s if s.contains(git2::STATUS_INDEX_NEW) => 'A',
s if s.contains(git2::STATUS_INDEX_MODIFIED) => 'M',
s if s.contains(git2::STATUS_INDEX_DELETED) => 'D',
s if s.contains(git2::STATUS_INDEX_RENAMED) => 'R',
s if s.contains(git2::STATUS_INDEX_TYPECHANGE) => 'T',
_ =>'',
};
let mut wstatus = match entry.status() {
s if s.contains(git2::STATUS_WT_NEW) => {
if istatus =='' { istatus = '?'; } '?'
}
s if s.contains(git2::STATUS_WT_MODIFIED) => 'M',
s if s.contains(git2::STATUS_WT_DELETED) => 'D',
s if s.contains(git2::STATUS_WT_RENAMED) => 'R',
s if s.contains(git2::STATUS_WT_TYPECHANGE) => 'T',
_ =>'',
};
if entry.status().contains(git2::STATUS_IGNORED) {
istatus = '!';
wstatus = '!';
}
if istatus == '?' && wstatus == '?' { continue }
let mut extra = "";
// A commit in a tree is how submodules are stored, so let's go take a
// look at its status.
//
// TODO: check for GIT_FILEMODE_COMMIT
let status = entry.index_to_workdir().and_then(|diff| {
let ignore = SubmoduleIgnore::Unspecified;
diff.new_file().path_bytes()
.and_then(|s| str::from_utf8(s).ok())
.and_then(|name| repo.submodule_status(name, ignore).ok())
});
if let Some(status) = status {
if status.contains(git2::SUBMODULE_STATUS_WD_MODIFIED) {
extra = " (new commits)";
} else if status.contains(git2::SUBMODULE_STATUS_WD_INDEX_MODIFIED) {
extra = " (modified content)";
} else if status.contains(git2::SUBMODULE_STATUS_WD_WD_MODIFIED) {
extra = " (modified content)";
} else if status.contains(git2::SUBMODULE_STATUS_WD_UNTRACKED) {
extra = " (untracked content)";
}
}
let (mut a, mut b, mut c) = (None, None, None);
if let Some(diff) = entry.head_to_index() {
a = diff.old_file().path();
b = diff.new_file().path();
}
if let Some(diff) = entry.index_to_workdir() {
a = a.or(diff.old_file().path());
b = b.or(diff.old_file().path());
c = diff.new_file().path();
}
match (istatus, wstatus) {
('R', 'R') => println!("RR {} {} {}{}", a.unwrap().display(),
b.unwrap().display(), c.unwrap().display(),
extra),
('R', w) => println!("R{} {} {}{}", w, a.unwrap().display(),
b.unwrap().display(), extra),
(i, 'R') => println!("{}R {} {}{}", i, a.unwrap().display(),
c.unwrap().display(), extra),
(i, w) => println!("{}{} {}{}", i, w, a.unwrap().display(), extra),
}
}
for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_WT_NEW) {
println!("?? {}", entry.index_to_workdir().unwrap().old_file()
.path().unwrap().display());
}
}
impl Args {
fn format(&self) -> Format {
if self.flag_short { Format::Short }
else if self.flag_long { Format::Long }
else if self.flag_porcelain { Format::Porcelain }
else if self.flag_z { Format::Porcelain }
else { Format::Long }
}
}
fn
|
() {
const USAGE: &'static str = "
usage: status [options] [--] [<spec>..]
Options:
-s, --short show short statuses
--long show longer statuses (default)
--porcelain ??
-b, --branch show branch information
-z ??
--ignored show ignored files as well
--untracked-files <opt> setting for showing untracked files [no|normal|all]
--ignore-submodules <opt> setting for ignoring submodules [all]
--git-dir <dir> git directory to analyze
--repeat repeatedly show status, sleeping inbetween
--list-submodules show submodules
-h, --help show this message
";
let args = Docopt::new(USAGE).and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
match run(&args) {
Ok(()) => {}
Err(e) => println!("error: {}", e),
}
}
|
main
|
identifier_name
|
status.rs
|
/*
* libgit2 "status" example - shows how to use the status APIs
*
* Written by the libgit2 contributors
*
* To the extent possible under law, the author(s) have dedicated all copyright
* and related and neighboring rights to this software to the public domain
* worldwide. This software is distributed without any warranty.
*
* You should have received a copy of the CC0 Public Domain Dedication along
* with this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#![deny(warnings)]
extern crate git2;
extern crate docopt;
extern crate rustc_serialize;
use std::str;
use docopt::Docopt;
use git2::{Repository, Error, StatusOptions, ErrorCode, SubmoduleIgnore};
#[derive(RustcDecodable)]
struct Args {
arg_spec: Vec<String>,
flag_short: bool,
flag_long: bool,
flag_porcelain: bool,
flag_branch: bool,
flag_z: bool,
flag_ignored: bool,
flag_untracked_files: Option<String>,
flag_ignore_submodules: Option<String>,
flag_git_dir: Option<String>,
flag_repeat: bool,
flag_list_submodules: bool,
}
#[derive(Eq, PartialEq)]
enum Format { Long, Short, Porcelain }
fn run(args: &Args) -> Result<(), Error> {
let path = args.flag_git_dir.clone().unwrap_or(".".to_string());
let repo = try!(Repository::open(&path));
if repo.is_bare() {
return Err(Error::from_str("cannot report status on bare repository"))
}
let mut opts = StatusOptions::new();
opts.include_ignored(args.flag_ignored);
match args.flag_untracked_files.as_ref().map(|s| &s[..]) {
Some("no") => { opts.include_untracked(false); }
Some("normal") => { opts.include_untracked(true); }
Some("all") => {
opts.include_untracked(true).recurse_untracked_dirs(true);
}
Some(_) => return Err(Error::from_str("invalid untracked-files value")),
None => {}
}
match args.flag_ignore_submodules.as_ref().map(|s| &s[..]) {
Some("all") => { opts.exclude_submodules(true); }
Some(_) => return Err(Error::from_str("invalid ignore-submodules value")),
None => {}
}
opts.include_untracked(!args.flag_ignored);
for spec in args.arg_spec.iter() {
opts.pathspec(spec);
}
loop {
if args.flag_repeat {
println!("\u{1b}[H\u{1b}[2J");
}
let statuses = try!(repo.statuses(Some(&mut opts)));
if args.flag_branch {
try!(show_branch(&repo, args.format()));
}
if args.flag_list_submodules {
try!(print_submodules(&repo));
}
if args.format() == Format::Long {
print_long(statuses);
} else {
print_short(&repo, statuses);
}
if args.flag_repeat {
std::thread::sleep_ms(10000);
} else {
return Ok(())
}
}
}
fn show_branch(repo: &Repository, format: Format) -> Result<(), Error> {
let head = match repo.head() {
Ok(head) => Some(head),
Err(ref e) if e.code() == ErrorCode::UnbornBranch ||
e.code() == ErrorCode::NotFound => None,
Err(e) => return Err(e),
};
let head = head.as_ref().and_then(|h| h.shorthand());
if format == Format::Long {
println!("# On branch {}",
head.unwrap_or("Not currently on any branch"));
} else {
println!("## {}", head.unwrap_or("HEAD (no branch)"));
}
Ok(())
}
fn print_submodules(repo: &Repository) -> Result<(), Error> {
let modules = try!(repo.submodules());
println!("# Submodules");
for sm in modules.iter() {
println!("# - submodule '{}' at {}", sm.name().unwrap(),
sm.path().display());
}
Ok(())
}
// This function print out an output similar to git's status command in long
// form, including the command-line hints.
fn print_long(statuses: git2::Statuses) {
let mut header = false;
let mut rm_in_workdir = false;
let mut changes_in_index = false;
let mut changed_in_workdir = false;
// Print index changes
for entry in statuses.iter().filter(|e| e.status()!= git2::STATUS_CURRENT) {
if entry.status().contains(git2::STATUS_WT_DELETED) {
rm_in_workdir = true;
}
let istatus = match entry.status() {
s if s.contains(git2::STATUS_INDEX_NEW) => "new file: ",
s if s.contains(git2::STATUS_INDEX_MODIFIED) => "modified: ",
s if s.contains(git2::STATUS_INDEX_DELETED) => "deleted: ",
s if s.contains(git2::STATUS_INDEX_RENAMED) => "renamed: ",
s if s.contains(git2::STATUS_INDEX_TYPECHANGE) => "typechange:",
_ => continue,
};
if!header {
println!("\
# Changes to be committed:
# (use \"git reset HEAD <file>...\" to unstage)
#");
header = true;
}
let old_path = entry.head_to_index().unwrap().old_file().path();
let new_path = entry.head_to_index().unwrap().new_file().path();
match (old_path, new_path) {
(Some(ref old), Some(ref new)) if old!= new => {
println!("#\t{} {} -> {}", istatus, old.display(),
new.display());
}
(old, new) => {
println!("#\t{} {}", istatus, old.or(new).unwrap().display());
}
}
}
if header {
changes_in_index = true;
println!("#");
}
header = false;
// Print workdir changes to tracked files
for entry in statuses.iter() {
// With `STATUS_OPT_INCLUDE_UNMODIFIED` (not used in this example)
// `index_to_workdir` may not be `None` even if there are no differences,
// in which case it will be a `Delta::Unmodified`.
if entry.status() == git2::STATUS_CURRENT ||
entry.index_to_workdir().is_none() {
continue
}
let istatus = match entry.status() {
s if s.contains(git2::STATUS_WT_MODIFIED) => "modified: ",
s if s.contains(git2::STATUS_WT_DELETED) => "deleted: ",
s if s.contains(git2::STATUS_WT_RENAMED) => "renamed: ",
s if s.contains(git2::STATUS_WT_TYPECHANGE) => "typechange:",
_ => continue,
};
if!header {
println!("\
# Changes not staged for commit:
# (use \"git add{} <file>...\" to update what will be committed)
# (use \"git checkout -- <file>...\" to discard changes in working directory)
#\
", if rm_in_workdir {"/rm"} else {""});
header = true;
}
let old_path = entry.index_to_workdir().unwrap().old_file().path();
let new_path = entry.index_to_workdir().unwrap().new_file().path();
match (old_path, new_path) {
(Some(ref old), Some(ref new)) if old!= new => {
println!("#\t{} {} -> {}", istatus, old.display(),
new.display());
}
(old, new) => {
println!("#\t{} {}", istatus, old.or(new).unwrap().display());
}
}
}
if header {
changed_in_workdir = true;
println!("#");
}
header = false;
// Print untracked files
for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_WT_NEW) {
if!header {
println!("\
# Untracked files
# (use \"git add <file>...\" to include in what will be committed)
#");
header = true;
}
let file = entry.index_to_workdir().unwrap().old_file().path().unwrap();
println!("#\t{}", file.display());
}
header = false;
// Print ignored files
for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_IGNORED) {
if!header {
println!("\
# Ignored files
# (use \"git add -f <file>...\" to include in what will be committed)
#");
header = true;
}
let file = entry.index_to_workdir().unwrap().old_file().path().unwrap();
println!("#\t{}", file.display());
}
if!changes_in_index && changed_in_workdir {
println!("no changes added to commit (use \"git add\" and/or \
\"git commit -a\")");
}
}
// This version of the output prefixes each path with two status columns and
// shows submodule status information.
fn print_short(repo: &Repository, statuses: git2::Statuses) {
for entry in statuses.iter().filter(|e| e.status()!= git2::STATUS_CURRENT) {
let mut istatus = match entry.status() {
s if s.contains(git2::STATUS_INDEX_NEW) => 'A',
s if s.contains(git2::STATUS_INDEX_MODIFIED) => 'M',
s if s.contains(git2::STATUS_INDEX_DELETED) => 'D',
s if s.contains(git2::STATUS_INDEX_RENAMED) => 'R',
s if s.contains(git2::STATUS_INDEX_TYPECHANGE) => 'T',
_ =>'',
};
let mut wstatus = match entry.status() {
s if s.contains(git2::STATUS_WT_NEW) => {
if istatus =='' { istatus = '?'; } '?'
}
s if s.contains(git2::STATUS_WT_MODIFIED) => 'M',
s if s.contains(git2::STATUS_WT_DELETED) => 'D',
s if s.contains(git2::STATUS_WT_RENAMED) => 'R',
s if s.contains(git2::STATUS_WT_TYPECHANGE) => 'T',
_ =>'',
};
if entry.status().contains(git2::STATUS_IGNORED) {
istatus = '!';
wstatus = '!';
}
if istatus == '?' && wstatus == '?' { continue }
let mut extra = "";
// A commit in a tree is how submodules are stored, so let's go take a
// look at its status.
//
// TODO: check for GIT_FILEMODE_COMMIT
let status = entry.index_to_workdir().and_then(|diff| {
let ignore = SubmoduleIgnore::Unspecified;
diff.new_file().path_bytes()
.and_then(|s| str::from_utf8(s).ok())
.and_then(|name| repo.submodule_status(name, ignore).ok())
});
if let Some(status) = status {
if status.contains(git2::SUBMODULE_STATUS_WD_MODIFIED) {
extra = " (new commits)";
} else if status.contains(git2::SUBMODULE_STATUS_WD_INDEX_MODIFIED) {
extra = " (modified content)";
} else if status.contains(git2::SUBMODULE_STATUS_WD_WD_MODIFIED) {
extra = " (modified content)";
} else if status.contains(git2::SUBMODULE_STATUS_WD_UNTRACKED) {
extra = " (untracked content)";
}
}
let (mut a, mut b, mut c) = (None, None, None);
if let Some(diff) = entry.head_to_index() {
a = diff.old_file().path();
b = diff.new_file().path();
}
if let Some(diff) = entry.index_to_workdir() {
a = a.or(diff.old_file().path());
b = b.or(diff.old_file().path());
c = diff.new_file().path();
}
match (istatus, wstatus) {
('R', 'R') => println!("RR {} {} {}{}", a.unwrap().display(),
b.unwrap().display(), c.unwrap().display(),
extra),
('R', w) => println!("R{} {} {}{}", w, a.unwrap().display(),
b.unwrap().display(), extra),
(i, 'R') => println!("{}R {} {}{}", i, a.unwrap().display(),
c.unwrap().display(), extra),
(i, w) => println!("{}{} {}{}", i, w, a.unwrap().display(), extra),
}
}
for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_WT_NEW) {
println!("?? {}", entry.index_to_workdir().unwrap().old_file()
.path().unwrap().display());
}
}
impl Args {
fn format(&self) -> Format {
if self.flag_short { Format::Short }
else if self.flag_long { Format::Long }
else if self.flag_porcelain { Format::Porcelain }
else if self.flag_z { Format::Porcelain }
else { Format::Long }
}
}
fn main() {
const USAGE: &'static str = "
usage: status [options] [--] [<spec>..]
Options:
-s, --short show short statuses
--long show longer statuses (default)
--porcelain ??
-b, --branch show branch information
-z ??
--ignored show ignored files as well
--untracked-files <opt> setting for showing untracked files [no|normal|all]
--ignore-submodules <opt> setting for ignoring submodules [all]
--git-dir <dir> git directory to analyze
--repeat repeatedly show status, sleeping inbetween
--list-submodules show submodules
-h, --help show this message
";
let args = Docopt::new(USAGE).and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
match run(&args) {
|
Ok(()) => {}
Err(e) => println!("error: {}", e),
}
}
|
random_line_split
|
|
status.rs
|
/*
* libgit2 "status" example - shows how to use the status APIs
*
* Written by the libgit2 contributors
*
* To the extent possible under law, the author(s) have dedicated all copyright
* and related and neighboring rights to this software to the public domain
* worldwide. This software is distributed without any warranty.
*
* You should have received a copy of the CC0 Public Domain Dedication along
* with this software. If not, see
* <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#![deny(warnings)]
extern crate git2;
extern crate docopt;
extern crate rustc_serialize;
use std::str;
use docopt::Docopt;
use git2::{Repository, Error, StatusOptions, ErrorCode, SubmoduleIgnore};
#[derive(RustcDecodable)]
struct Args {
arg_spec: Vec<String>,
flag_short: bool,
flag_long: bool,
flag_porcelain: bool,
flag_branch: bool,
flag_z: bool,
flag_ignored: bool,
flag_untracked_files: Option<String>,
flag_ignore_submodules: Option<String>,
flag_git_dir: Option<String>,
flag_repeat: bool,
flag_list_submodules: bool,
}
#[derive(Eq, PartialEq)]
enum Format { Long, Short, Porcelain }
fn run(args: &Args) -> Result<(), Error> {
let path = args.flag_git_dir.clone().unwrap_or(".".to_string());
let repo = try!(Repository::open(&path));
if repo.is_bare() {
return Err(Error::from_str("cannot report status on bare repository"))
}
let mut opts = StatusOptions::new();
opts.include_ignored(args.flag_ignored);
match args.flag_untracked_files.as_ref().map(|s| &s[..]) {
Some("no") => { opts.include_untracked(false); }
Some("normal") => { opts.include_untracked(true); }
Some("all") => {
opts.include_untracked(true).recurse_untracked_dirs(true);
}
Some(_) => return Err(Error::from_str("invalid untracked-files value")),
None => {}
}
match args.flag_ignore_submodules.as_ref().map(|s| &s[..]) {
Some("all") => { opts.exclude_submodules(true); }
Some(_) => return Err(Error::from_str("invalid ignore-submodules value")),
None => {}
}
opts.include_untracked(!args.flag_ignored);
for spec in args.arg_spec.iter() {
opts.pathspec(spec);
}
loop {
if args.flag_repeat {
println!("\u{1b}[H\u{1b}[2J");
}
let statuses = try!(repo.statuses(Some(&mut opts)));
if args.flag_branch {
try!(show_branch(&repo, args.format()));
}
if args.flag_list_submodules {
try!(print_submodules(&repo));
}
if args.format() == Format::Long {
print_long(statuses);
} else {
print_short(&repo, statuses);
}
if args.flag_repeat {
std::thread::sleep_ms(10000);
} else {
return Ok(())
}
}
}
fn show_branch(repo: &Repository, format: Format) -> Result<(), Error> {
let head = match repo.head() {
Ok(head) => Some(head),
Err(ref e) if e.code() == ErrorCode::UnbornBranch ||
e.code() == ErrorCode::NotFound => None,
Err(e) => return Err(e),
};
let head = head.as_ref().and_then(|h| h.shorthand());
if format == Format::Long {
println!("# On branch {}",
head.unwrap_or("Not currently on any branch"));
} else {
println!("## {}", head.unwrap_or("HEAD (no branch)"));
}
Ok(())
}
fn print_submodules(repo: &Repository) -> Result<(), Error> {
let modules = try!(repo.submodules());
println!("# Submodules");
for sm in modules.iter() {
println!("# - submodule '{}' at {}", sm.name().unwrap(),
sm.path().display());
}
Ok(())
}
// This function print out an output similar to git's status command in long
// form, including the command-line hints.
fn print_long(statuses: git2::Statuses) {
let mut header = false;
let mut rm_in_workdir = false;
let mut changes_in_index = false;
let mut changed_in_workdir = false;
// Print index changes
for entry in statuses.iter().filter(|e| e.status()!= git2::STATUS_CURRENT) {
if entry.status().contains(git2::STATUS_WT_DELETED) {
rm_in_workdir = true;
}
let istatus = match entry.status() {
s if s.contains(git2::STATUS_INDEX_NEW) => "new file: ",
s if s.contains(git2::STATUS_INDEX_MODIFIED) => "modified: ",
s if s.contains(git2::STATUS_INDEX_DELETED) => "deleted: ",
s if s.contains(git2::STATUS_INDEX_RENAMED) => "renamed: ",
s if s.contains(git2::STATUS_INDEX_TYPECHANGE) => "typechange:",
_ => continue,
};
if!header {
println!("\
# Changes to be committed:
# (use \"git reset HEAD <file>...\" to unstage)
#");
header = true;
}
let old_path = entry.head_to_index().unwrap().old_file().path();
let new_path = entry.head_to_index().unwrap().new_file().path();
match (old_path, new_path) {
(Some(ref old), Some(ref new)) if old!= new => {
println!("#\t{} {} -> {}", istatus, old.display(),
new.display());
}
(old, new) => {
println!("#\t{} {}", istatus, old.or(new).unwrap().display());
}
}
}
if header {
changes_in_index = true;
println!("#");
}
header = false;
// Print workdir changes to tracked files
for entry in statuses.iter() {
// With `STATUS_OPT_INCLUDE_UNMODIFIED` (not used in this example)
// `index_to_workdir` may not be `None` even if there are no differences,
// in which case it will be a `Delta::Unmodified`.
if entry.status() == git2::STATUS_CURRENT ||
entry.index_to_workdir().is_none() {
continue
}
let istatus = match entry.status() {
s if s.contains(git2::STATUS_WT_MODIFIED) => "modified: ",
s if s.contains(git2::STATUS_WT_DELETED) => "deleted: ",
s if s.contains(git2::STATUS_WT_RENAMED) => "renamed: ",
s if s.contains(git2::STATUS_WT_TYPECHANGE) => "typechange:",
_ => continue,
};
if!header {
println!("\
# Changes not staged for commit:
# (use \"git add{} <file>...\" to update what will be committed)
# (use \"git checkout -- <file>...\" to discard changes in working directory)
#\
", if rm_in_workdir {"/rm"} else {""});
header = true;
}
let old_path = entry.index_to_workdir().unwrap().old_file().path();
let new_path = entry.index_to_workdir().unwrap().new_file().path();
match (old_path, new_path) {
(Some(ref old), Some(ref new)) if old!= new => {
println!("#\t{} {} -> {}", istatus, old.display(),
new.display());
}
(old, new) => {
println!("#\t{} {}", istatus, old.or(new).unwrap().display());
}
}
}
if header {
changed_in_workdir = true;
println!("#");
}
header = false;
// Print untracked files
for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_WT_NEW) {
if!header {
println!("\
# Untracked files
# (use \"git add <file>...\" to include in what will be committed)
#");
header = true;
}
let file = entry.index_to_workdir().unwrap().old_file().path().unwrap();
println!("#\t{}", file.display());
}
header = false;
// Print ignored files
for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_IGNORED) {
if!header {
println!("\
# Ignored files
# (use \"git add -f <file>...\" to include in what will be committed)
#");
header = true;
}
let file = entry.index_to_workdir().unwrap().old_file().path().unwrap();
println!("#\t{}", file.display());
}
if!changes_in_index && changed_in_workdir {
println!("no changes added to commit (use \"git add\" and/or \
\"git commit -a\")");
}
}
// This version of the output prefixes each path with two status columns and
// shows submodule status information.
fn print_short(repo: &Repository, statuses: git2::Statuses)
|
if entry.status().contains(git2::STATUS_IGNORED) {
istatus = '!';
wstatus = '!';
}
if istatus == '?' && wstatus == '?' { continue }
let mut extra = "";
// A commit in a tree is how submodules are stored, so let's go take a
// look at its status.
//
// TODO: check for GIT_FILEMODE_COMMIT
let status = entry.index_to_workdir().and_then(|diff| {
let ignore = SubmoduleIgnore::Unspecified;
diff.new_file().path_bytes()
.and_then(|s| str::from_utf8(s).ok())
.and_then(|name| repo.submodule_status(name, ignore).ok())
});
if let Some(status) = status {
if status.contains(git2::SUBMODULE_STATUS_WD_MODIFIED) {
extra = " (new commits)";
} else if status.contains(git2::SUBMODULE_STATUS_WD_INDEX_MODIFIED) {
extra = " (modified content)";
} else if status.contains(git2::SUBMODULE_STATUS_WD_WD_MODIFIED) {
extra = " (modified content)";
} else if status.contains(git2::SUBMODULE_STATUS_WD_UNTRACKED) {
extra = " (untracked content)";
}
}
let (mut a, mut b, mut c) = (None, None, None);
if let Some(diff) = entry.head_to_index() {
a = diff.old_file().path();
b = diff.new_file().path();
}
if let Some(diff) = entry.index_to_workdir() {
a = a.or(diff.old_file().path());
b = b.or(diff.old_file().path());
c = diff.new_file().path();
}
match (istatus, wstatus) {
('R', 'R') => println!("RR {} {} {}{}", a.unwrap().display(),
b.unwrap().display(), c.unwrap().display(),
extra),
('R', w) => println!("R{} {} {}{}", w, a.unwrap().display(),
b.unwrap().display(), extra),
(i, 'R') => println!("{}R {} {}{}", i, a.unwrap().display(),
c.unwrap().display(), extra),
(i, w) => println!("{}{} {}{}", i, w, a.unwrap().display(), extra),
}
}
for entry in statuses.iter().filter(|e| e.status() == git2::STATUS_WT_NEW) {
println!("?? {}", entry.index_to_workdir().unwrap().old_file()
.path().unwrap().display());
}
}
impl Args {
fn format(&self) -> Format {
if self.flag_short { Format::Short }
else if self.flag_long { Format::Long }
else if self.flag_porcelain { Format::Porcelain }
else if self.flag_z { Format::Porcelain }
else { Format::Long }
}
}
fn main() {
const USAGE: &'static str = "
usage: status [options] [--] [<spec>..]
Options:
-s, --short show short statuses
--long show longer statuses (default)
--porcelain ??
-b, --branch show branch information
-z ??
--ignored show ignored files as well
--untracked-files <opt> setting for showing untracked files [no|normal|all]
--ignore-submodules <opt> setting for ignoring submodules [all]
--git-dir <dir> git directory to analyze
--repeat repeatedly show status, sleeping inbetween
--list-submodules show submodules
-h, --help show this message
";
let args = Docopt::new(USAGE).and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
match run(&args) {
Ok(()) => {}
Err(e) => println!("error: {}", e),
}
}
|
{
for entry in statuses.iter().filter(|e| e.status() != git2::STATUS_CURRENT) {
let mut istatus = match entry.status() {
s if s.contains(git2::STATUS_INDEX_NEW) => 'A',
s if s.contains(git2::STATUS_INDEX_MODIFIED) => 'M',
s if s.contains(git2::STATUS_INDEX_DELETED) => 'D',
s if s.contains(git2::STATUS_INDEX_RENAMED) => 'R',
s if s.contains(git2::STATUS_INDEX_TYPECHANGE) => 'T',
_ => ' ',
};
let mut wstatus = match entry.status() {
s if s.contains(git2::STATUS_WT_NEW) => {
if istatus == ' ' { istatus = '?'; } '?'
}
s if s.contains(git2::STATUS_WT_MODIFIED) => 'M',
s if s.contains(git2::STATUS_WT_DELETED) => 'D',
s if s.contains(git2::STATUS_WT_RENAMED) => 'R',
s if s.contains(git2::STATUS_WT_TYPECHANGE) => 'T',
_ => ' ',
};
|
identifier_body
|
slice-pat-type-mismatches.rs
|
fn main() {
match "foo".to_string() {
['f', 'o',..] => {}
//~^ ERROR expected an array or slice, found `String`
_ => { }
};
// Note that this one works with default binding modes.
match &[0, 1, 2] {
[..] => {}
};
match &[0, 1, 2] {
&[..] =>
|
// ok
};
match [0, 1, 2] {
[0] => {}, //~ ERROR pattern requires
[0, 1, x @..] => {
let a: [_; 1] = x;
}
[0, 1, 2, 3, x @..] => {} //~ ERROR pattern requires
};
match does_not_exist { //~ ERROR cannot find value `does_not_exist` in this scope
[] => {}
};
}
fn another_fn_to_avoid_suppression() {
match Default::default()
{
[] => {} //~ ERROR type annotations needed
};
}
|
{}
|
conditional_block
|
slice-pat-type-mismatches.rs
|
fn main() {
match "foo".to_string() {
['f', 'o',..] => {}
//~^ ERROR expected an array or slice, found `String`
_ => { }
};
// Note that this one works with default binding modes.
match &[0, 1, 2] {
[..] => {}
};
match &[0, 1, 2] {
&[..] => {} // ok
};
|
[0, 1, x @..] => {
let a: [_; 1] = x;
}
[0, 1, 2, 3, x @..] => {} //~ ERROR pattern requires
};
match does_not_exist { //~ ERROR cannot find value `does_not_exist` in this scope
[] => {}
};
}
fn another_fn_to_avoid_suppression() {
match Default::default()
{
[] => {} //~ ERROR type annotations needed
};
}
|
match [0, 1, 2] {
[0] => {}, //~ ERROR pattern requires
|
random_line_split
|
slice-pat-type-mismatches.rs
|
fn main() {
match "foo".to_string() {
['f', 'o',..] => {}
//~^ ERROR expected an array or slice, found `String`
_ => { }
};
// Note that this one works with default binding modes.
match &[0, 1, 2] {
[..] => {}
};
match &[0, 1, 2] {
&[..] => {} // ok
};
match [0, 1, 2] {
[0] => {}, //~ ERROR pattern requires
[0, 1, x @..] => {
let a: [_; 1] = x;
}
[0, 1, 2, 3, x @..] => {} //~ ERROR pattern requires
};
match does_not_exist { //~ ERROR cannot find value `does_not_exist` in this scope
[] => {}
};
}
fn another_fn_to_avoid_suppression()
|
{
match Default::default()
{
[] => {} //~ ERROR type annotations needed
};
}
|
identifier_body
|
|
slice-pat-type-mismatches.rs
|
fn
|
() {
match "foo".to_string() {
['f', 'o',..] => {}
//~^ ERROR expected an array or slice, found `String`
_ => { }
};
// Note that this one works with default binding modes.
match &[0, 1, 2] {
[..] => {}
};
match &[0, 1, 2] {
&[..] => {} // ok
};
match [0, 1, 2] {
[0] => {}, //~ ERROR pattern requires
[0, 1, x @..] => {
let a: [_; 1] = x;
}
[0, 1, 2, 3, x @..] => {} //~ ERROR pattern requires
};
match does_not_exist { //~ ERROR cannot find value `does_not_exist` in this scope
[] => {}
};
}
fn another_fn_to_avoid_suppression() {
match Default::default()
{
[] => {} //~ ERROR type annotations needed
};
}
|
main
|
identifier_name
|
utils.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use rustc::front::map as ast_map;
use rustc::lint::{LateContext, LintContext};
use rustc::middle::def;
use rustc::middle::def_id::DefId;
use rustc_front::hir;
use syntax::ast;
use syntax::attr::mark_used;
use syntax::codemap::{ExpnFormat, Span};
use syntax::ptr::P;
/// Matches a type with a provided string, and returns its type parameters if successful
///
/// Try not to use this for types defined in crates you own, use match_lang_ty instead (for lint passes)
pub fn match_ty_unwrap<'a>(ty: &'a ast::Ty, segments: &[&str]) -> Option<&'a [P<ast::Ty>]> {
match ty.node {
ast::TyPath(_, ast::Path { segments: ref seg,.. }) => {
// So hir::Path isn't the full path, just the tokens that were provided.
// I could muck around with the maps and find the full path
// however the more efficient way is to simply reverse the iterators and zip them
// which will compare them in reverse until one of them runs out of segments
if seg.iter().rev().zip(segments.iter().rev()).all(|(a, b)| a.identifier.name.as_str() == *b) {
match seg.last() {
Some(&ast::PathSegment { parameters: ast::AngleBracketedParameters(ref a),.. }) => {
Some(&a.types)
}
_ => None
}
} else {
None
}
},
_ => None
}
}
/// Checks if a type has a #[servo_lang = "str"] attribute
pub fn match_lang_ty(cx: &LateContext, ty: &hir::Ty, value: &str) -> bool {
match ty.node {
hir::TyPath(..) => {},
_ => return false,
}
let def_id = match cx.tcx.def_map.borrow().get(&ty.id) {
Some(&def::PathResolution { base_def: def::DefTy(def_id, _),.. }) => def_id,
_ => return false,
};
match_lang_did(cx, def_id, value)
}
pub fn match_lang_did(cx: &LateContext, did: DefId, value: &str) -> bool {
cx.tcx.get_attrs(did).iter().any(|attr| {
match attr.node.value.node {
ast::MetaNameValue(ref name, ref val) if &**name == "servo_lang" => {
match val.node {
ast::LitStr(ref v, _) if &**v == value => {
mark_used(attr);
true
},
_ => false,
}
}
_ => false,
}
})
}
// Determines if a block is in an unsafe context so that an unhelpful
// lint can be aborted.
pub fn
|
(map: &ast_map::Map, id: ast::NodeId) -> bool {
match map.find(map.get_parent(id)) {
Some(ast_map::NodeImplItem(itm)) => {
match itm.node {
hir::ImplItemKind::Method(ref sig, _) => sig.unsafety == hir::Unsafety::Unsafe,
_ => false
}
},
Some(ast_map::NodeItem(itm)) => {
match itm.node {
hir::ItemFn(_, style, _, _, _, _) => match style {
hir::Unsafety::Unsafe => true,
_ => false,
},
_ => false,
}
}
_ => false // There are probably a couple of other unsafe cases we don't care to lint, those will need
// to be added.
}
}
/// check if a DefId's path matches the given absolute type path
/// usage e.g. with
/// `match_def_path(cx, id, &["core", "option", "Option"])`
pub fn match_def_path(cx: &LateContext, def_id: DefId, path: &[&str]) -> bool {
cx.tcx.with_path(def_id, |iter| iter.map(|elem| elem.name())
.zip(path.iter()).all(|(nm, p)| &nm.as_str() == p))
}
pub fn in_derive_expn(cx: &LateContext, span: Span) -> bool {
cx.sess().codemap().with_expn_info(span.expn_id,
|info| {
if let Some(i) = info {
if let ExpnFormat::MacroAttribute(n) = i.callee.format {
if n.as_str().contains("derive") {
true
} else { false }
} else { false }
} else { false }
})
}
|
unsafe_context
|
identifier_name
|
utils.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use rustc::front::map as ast_map;
use rustc::lint::{LateContext, LintContext};
use rustc::middle::def;
use rustc::middle::def_id::DefId;
use rustc_front::hir;
use syntax::ast;
use syntax::attr::mark_used;
use syntax::codemap::{ExpnFormat, Span};
use syntax::ptr::P;
/// Matches a type with a provided string, and returns its type parameters if successful
///
/// Try not to use this for types defined in crates you own, use match_lang_ty instead (for lint passes)
pub fn match_ty_unwrap<'a>(ty: &'a ast::Ty, segments: &[&str]) -> Option<&'a [P<ast::Ty>]> {
match ty.node {
ast::TyPath(_, ast::Path { segments: ref seg,.. }) => {
// So hir::Path isn't the full path, just the tokens that were provided.
// I could muck around with the maps and find the full path
// however the more efficient way is to simply reverse the iterators and zip them
// which will compare them in reverse until one of them runs out of segments
if seg.iter().rev().zip(segments.iter().rev()).all(|(a, b)| a.identifier.name.as_str() == *b) {
match seg.last() {
Some(&ast::PathSegment { parameters: ast::AngleBracketedParameters(ref a),.. }) => {
Some(&a.types)
}
_ => None
}
} else {
None
}
},
_ => None
}
}
/// Checks if a type has a #[servo_lang = "str"] attribute
pub fn match_lang_ty(cx: &LateContext, ty: &hir::Ty, value: &str) -> bool {
match ty.node {
hir::TyPath(..) => {},
_ => return false,
}
let def_id = match cx.tcx.def_map.borrow().get(&ty.id) {
Some(&def::PathResolution { base_def: def::DefTy(def_id, _),.. }) => def_id,
_ => return false,
};
match_lang_did(cx, def_id, value)
}
pub fn match_lang_did(cx: &LateContext, did: DefId, value: &str) -> bool {
cx.tcx.get_attrs(did).iter().any(|attr| {
match attr.node.value.node {
ast::MetaNameValue(ref name, ref val) if &**name == "servo_lang" => {
match val.node {
ast::LitStr(ref v, _) if &**v == value => {
mark_used(attr);
true
},
_ => false,
}
}
_ => false,
}
})
}
// Determines if a block is in an unsafe context so that an unhelpful
// lint can be aborted.
pub fn unsafe_context(map: &ast_map::Map, id: ast::NodeId) -> bool {
match map.find(map.get_parent(id)) {
Some(ast_map::NodeImplItem(itm)) => {
match itm.node {
hir::ImplItemKind::Method(ref sig, _) => sig.unsafety == hir::Unsafety::Unsafe,
_ => false
}
},
Some(ast_map::NodeItem(itm)) => {
match itm.node {
hir::ItemFn(_, style, _, _, _, _) => match style {
hir::Unsafety::Unsafe => true,
_ => false,
},
_ => false,
}
}
_ => false // There are probably a couple of other unsafe cases we don't care to lint, those will need
// to be added.
}
}
/// check if a DefId's path matches the given absolute type path
/// usage e.g. with
/// `match_def_path(cx, id, &["core", "option", "Option"])`
pub fn match_def_path(cx: &LateContext, def_id: DefId, path: &[&str]) -> bool {
cx.tcx.with_path(def_id, |iter| iter.map(|elem| elem.name())
.zip(path.iter()).all(|(nm, p)| &nm.as_str() == p))
}
pub fn in_derive_expn(cx: &LateContext, span: Span) -> bool {
cx.sess().codemap().with_expn_info(span.expn_id,
|info| {
if let Some(i) = info {
if let ExpnFormat::MacroAttribute(n) = i.callee.format
|
else { false }
} else { false }
})
}
|
{
if n.as_str().contains("derive") {
true
} else { false }
}
|
conditional_block
|
utils.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use rustc::front::map as ast_map;
use rustc::lint::{LateContext, LintContext};
use rustc::middle::def;
use rustc::middle::def_id::DefId;
use rustc_front::hir;
use syntax::ast;
use syntax::attr::mark_used;
use syntax::codemap::{ExpnFormat, Span};
use syntax::ptr::P;
/// Matches a type with a provided string, and returns its type parameters if successful
///
/// Try not to use this for types defined in crates you own, use match_lang_ty instead (for lint passes)
pub fn match_ty_unwrap<'a>(ty: &'a ast::Ty, segments: &[&str]) -> Option<&'a [P<ast::Ty>]> {
match ty.node {
ast::TyPath(_, ast::Path { segments: ref seg,.. }) => {
// So hir::Path isn't the full path, just the tokens that were provided.
// I could muck around with the maps and find the full path
// however the more efficient way is to simply reverse the iterators and zip them
// which will compare them in reverse until one of them runs out of segments
if seg.iter().rev().zip(segments.iter().rev()).all(|(a, b)| a.identifier.name.as_str() == *b) {
match seg.last() {
Some(&ast::PathSegment { parameters: ast::AngleBracketedParameters(ref a),.. }) => {
Some(&a.types)
}
_ => None
}
} else {
None
}
},
_ => None
}
}
/// Checks if a type has a #[servo_lang = "str"] attribute
pub fn match_lang_ty(cx: &LateContext, ty: &hir::Ty, value: &str) -> bool {
match ty.node {
hir::TyPath(..) => {},
_ => return false,
}
let def_id = match cx.tcx.def_map.borrow().get(&ty.id) {
Some(&def::PathResolution { base_def: def::DefTy(def_id, _),.. }) => def_id,
_ => return false,
};
match_lang_did(cx, def_id, value)
}
pub fn match_lang_did(cx: &LateContext, did: DefId, value: &str) -> bool {
cx.tcx.get_attrs(did).iter().any(|attr| {
match attr.node.value.node {
ast::MetaNameValue(ref name, ref val) if &**name == "servo_lang" => {
match val.node {
ast::LitStr(ref v, _) if &**v == value => {
mark_used(attr);
true
},
_ => false,
}
}
_ => false,
}
})
}
// Determines if a block is in an unsafe context so that an unhelpful
|
pub fn unsafe_context(map: &ast_map::Map, id: ast::NodeId) -> bool {
match map.find(map.get_parent(id)) {
Some(ast_map::NodeImplItem(itm)) => {
match itm.node {
hir::ImplItemKind::Method(ref sig, _) => sig.unsafety == hir::Unsafety::Unsafe,
_ => false
}
},
Some(ast_map::NodeItem(itm)) => {
match itm.node {
hir::ItemFn(_, style, _, _, _, _) => match style {
hir::Unsafety::Unsafe => true,
_ => false,
},
_ => false,
}
}
_ => false // There are probably a couple of other unsafe cases we don't care to lint, those will need
// to be added.
}
}
/// check if a DefId's path matches the given absolute type path
/// usage e.g. with
/// `match_def_path(cx, id, &["core", "option", "Option"])`
pub fn match_def_path(cx: &LateContext, def_id: DefId, path: &[&str]) -> bool {
cx.tcx.with_path(def_id, |iter| iter.map(|elem| elem.name())
.zip(path.iter()).all(|(nm, p)| &nm.as_str() == p))
}
pub fn in_derive_expn(cx: &LateContext, span: Span) -> bool {
cx.sess().codemap().with_expn_info(span.expn_id,
|info| {
if let Some(i) = info {
if let ExpnFormat::MacroAttribute(n) = i.callee.format {
if n.as_str().contains("derive") {
true
} else { false }
} else { false }
} else { false }
})
}
|
// lint can be aborted.
|
random_line_split
|
utils.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use rustc::front::map as ast_map;
use rustc::lint::{LateContext, LintContext};
use rustc::middle::def;
use rustc::middle::def_id::DefId;
use rustc_front::hir;
use syntax::ast;
use syntax::attr::mark_used;
use syntax::codemap::{ExpnFormat, Span};
use syntax::ptr::P;
/// Matches a type with a provided string, and returns its type parameters if successful
///
/// Try not to use this for types defined in crates you own, use match_lang_ty instead (for lint passes)
pub fn match_ty_unwrap<'a>(ty: &'a ast::Ty, segments: &[&str]) -> Option<&'a [P<ast::Ty>]> {
match ty.node {
ast::TyPath(_, ast::Path { segments: ref seg,.. }) => {
// So hir::Path isn't the full path, just the tokens that were provided.
// I could muck around with the maps and find the full path
// however the more efficient way is to simply reverse the iterators and zip them
// which will compare them in reverse until one of them runs out of segments
if seg.iter().rev().zip(segments.iter().rev()).all(|(a, b)| a.identifier.name.as_str() == *b) {
match seg.last() {
Some(&ast::PathSegment { parameters: ast::AngleBracketedParameters(ref a),.. }) => {
Some(&a.types)
}
_ => None
}
} else {
None
}
},
_ => None
}
}
/// Checks if a type has a #[servo_lang = "str"] attribute
pub fn match_lang_ty(cx: &LateContext, ty: &hir::Ty, value: &str) -> bool {
match ty.node {
hir::TyPath(..) => {},
_ => return false,
}
let def_id = match cx.tcx.def_map.borrow().get(&ty.id) {
Some(&def::PathResolution { base_def: def::DefTy(def_id, _),.. }) => def_id,
_ => return false,
};
match_lang_did(cx, def_id, value)
}
pub fn match_lang_did(cx: &LateContext, did: DefId, value: &str) -> bool {
cx.tcx.get_attrs(did).iter().any(|attr| {
match attr.node.value.node {
ast::MetaNameValue(ref name, ref val) if &**name == "servo_lang" => {
match val.node {
ast::LitStr(ref v, _) if &**v == value => {
mark_used(attr);
true
},
_ => false,
}
}
_ => false,
}
})
}
// Determines if a block is in an unsafe context so that an unhelpful
// lint can be aborted.
pub fn unsafe_context(map: &ast_map::Map, id: ast::NodeId) -> bool {
match map.find(map.get_parent(id)) {
Some(ast_map::NodeImplItem(itm)) => {
match itm.node {
hir::ImplItemKind::Method(ref sig, _) => sig.unsafety == hir::Unsafety::Unsafe,
_ => false
}
},
Some(ast_map::NodeItem(itm)) => {
match itm.node {
hir::ItemFn(_, style, _, _, _, _) => match style {
hir::Unsafety::Unsafe => true,
_ => false,
},
_ => false,
}
}
_ => false // There are probably a couple of other unsafe cases we don't care to lint, those will need
// to be added.
}
}
/// check if a DefId's path matches the given absolute type path
/// usage e.g. with
/// `match_def_path(cx, id, &["core", "option", "Option"])`
pub fn match_def_path(cx: &LateContext, def_id: DefId, path: &[&str]) -> bool
|
pub fn in_derive_expn(cx: &LateContext, span: Span) -> bool {
cx.sess().codemap().with_expn_info(span.expn_id,
|info| {
if let Some(i) = info {
if let ExpnFormat::MacroAttribute(n) = i.callee.format {
if n.as_str().contains("derive") {
true
} else { false }
} else { false }
} else { false }
})
}
|
{
cx.tcx.with_path(def_id, |iter| iter.map(|elem| elem.name())
.zip(path.iter()).all(|(nm, p)| &nm.as_str() == p))
}
|
identifier_body
|
wallet_address.rs
|
//! Fractal Global Wallet Address
//!
//! This module holds the Fractal Global wallet address format, along with its parsing error
//! representing struct.
use std::convert::From;
use std::result::Result;
use std::error::Error;
use std::{fmt, str};
use std::str::FromStr;
use rust_base58::{ToBase58, FromBase58};
use rust_base58::base58::FromBase58Error;
#[cfg(feature = "json-types")]
use rustc_serialize::json;
/// The wallet address size.
///
/// This is the length, in bytes of the wallet addresses. It can be used to create arrays to store
/// complete addresses. Note: an address stored as a `[u8, WALLET_ADDRESS_LEN]` won't have any sort
/// of checksum verification, and as such, it should be used with extreme care, never using is as
/// an input or output mechanism, and only as an internal representation of the wallet address.
pub const WALLET_ADDRESS_LEN: usize = 7;
/// The object representation of a wallet address.
///
/// Wallet addresses are structs that act as as an easy manipulation object for wallet addresses.
/// Addresses that come from user input can be verified, and made sure they are correct.
///
/// Address can be used as strings or displayed using the `Display` trait:
///
/// ```
/// use fractal_utils::{WalletAddress, WALLET_ADDRESS_LEN};
///
/// let addr = WalletAddress::from_data([0u8; WALLET_ADDRESS_LEN]);
/// let addr_str = format!("{}", addr);
/// assert_eq!(addr_str, "fr111111111");
/// ```
///
/// All Fractal wallet addresses start with `fr`, and then they have a base-58 encoded string
/// representing `WALLET_ADDRESS_LEN+2` bytes. The first byte will be `0x00`, that the rest bytes
/// until `WALLET_ADDRESS_LEN` will compose the actual address, while the other two are the
/// checksum. That way addresses coming from user input can be verified:
///
/// ```
/// use std::str::FromStr;
/// use std::result::Result;
/// use fractal_utils::{WalletAddress, WALLET_ADDRESS_LEN};
///
/// let wallet: Result<WalletAddress, _> = "fr111111111".parse();
/// assert!(wallet.is_ok());
///
/// let wallet: Result<WalletAddress, _> = "fr111111112".parse();
/// assert!(wallet.is_err());
/// ```
///
/// The checksums are calculated by doing the `XOR` operation in all the bytes of the wallet address
/// and doing `XOR` of the checksum's first byte with the second one for each byte:
///
/// ```
/// # use fractal_utils::WALLET_ADDRESS_LEN;
/// #
/// let check_addr = [0x00, 0x11, 0x2A, 0x44, 0xCD, 0xFF, 0xE0];
/// # assert_eq!(check_addr.len(), WALLET_ADDRESS_LEN);
/// let mut checksum = [0u8; 2];
///
/// for b in &check_addr {
|
/// checksum[0] ^= *b;
/// checksum[1] ^= checksum[0];
/// }
///
/// assert_eq!(checksum, [0xAD, 0x07]);
/// ```
#[derive(Eq, PartialEq, PartialOrd, Ord, Debug, Clone, Copy, RustcEncodable, RustcDecodable)]
pub struct WalletAddress {
address: [u8; WALLET_ADDRESS_LEN],
}
impl WalletAddress {
/// Creates a new wallet address from raw data.
///
/// This should only be used if the raw input data is verified to be correct, ir it could lead
/// o a false address.
///
/// It will panic if the address does not start with byte `0x00`.
pub fn from_data(addr: [u8; WALLET_ADDRESS_LEN]) -> WalletAddress {
assert_eq!(addr[0],
0x00,
"the provided address is not a correct Fractal Global wallet address, its \
first byt should be 0x00");
WalletAddress { address: addr }
}
/// Returns the wallet address bytes.
///
/// This could be useful to store the bytes in databases where space can be an issue, or where
/// fast search is required. It does not contain checksums nor any other verification mechanism.
pub fn get_raw(&self) -> &[u8] {
&self.address
}
}
impl From<[u8; WALLET_ADDRESS_LEN]> for WalletAddress {
fn from(other: [u8; WALLET_ADDRESS_LEN]) -> WalletAddress {
WalletAddress { address: other }
}
}
impl FromStr for WalletAddress {
type Err = WalletAddressParseError;
fn from_str(s: &str) -> Result<WalletAddress, WalletAddressParseError> {
if &s[0..2]!= "fr" {
return Err(WalletAddressParseError::new(s,
"the address does not start with \"fr\"",
None));
}
let bytes = match s[2..].from_base58() {
Ok(b) => b,
Err(FromBase58Error::InvalidBase58Byte(c, i)) => {
let new_error = FromBase58Error::InvalidBase58Byte(c, i + 2);
return Err(WalletAddressParseError::new(s,
&format!("the address is not a valid \
base-58 encoded string: {}",
new_error),
Some(new_error)));
}
};
if bytes[0]!= 0x00 {
return Err(WalletAddressParseError::new(s,
"the first byte of the address is not 0x00",
None));
}
let mut checksum = [0u8; 2];
for byte in &bytes[..WALLET_ADDRESS_LEN] {
checksum[0] ^= *byte;
checksum[1] ^= checksum[0];
}
if checksum[0]!= bytes[WALLET_ADDRESS_LEN] ||
checksum[1]!= bytes[WALLET_ADDRESS_LEN + 1] {
Err(WalletAddressParseError::new(s, "checksum fail", None))
} else {
let mut address = [0u8; WALLET_ADDRESS_LEN];
address.clone_from_slice(&bytes[..WALLET_ADDRESS_LEN]);
Ok(WalletAddress::from_data(address))
}
}
}
#[cfg(feature = "json-types")]
/// The `WalletAddress` type can easily be converted to json, using its `to_json()` method. Note
/// that this will return a `Json::String` with the wallet address as a string in it.
impl json::ToJson for WalletAddress {
fn to_json(&self) -> json::Json {
json::Json::String(format!("{}", self))
}
}
impl fmt::Display for WalletAddress {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut arr = [0u8; WALLET_ADDRESS_LEN + 2];
arr[0..WALLET_ADDRESS_LEN].clone_from_slice(&self.address);
for byte in &self.address {
arr[WALLET_ADDRESS_LEN] ^= *byte;
arr[WALLET_ADDRESS_LEN + 1] ^= arr[WALLET_ADDRESS_LEN];
}
write!(f, "fr{}", arr.to_base58())
}
}
/// Wallet address parsing error.
///
/// This struct represents a wallet address parsing error. It can be used to check the validity of
/// wallet address strings, and implements common `Error` and `Display` traits.
#[derive(Debug)]
pub struct WalletAddressParseError {
description: String,
cause: Option<FromBase58Error>,
}
impl WalletAddressParseError {
fn new<S: AsRef<str>>(wallet_address: S,
error: S,
cause: Option<FromBase58Error>)
-> WalletAddressParseError {
WalletAddressParseError {
description: format!("the wallet address {:?} is not a valid Fractal Global wallet \
address, {}",
wallet_address.as_ref(),
error.as_ref()),
cause: cause,
}
}
}
impl fmt::Display for WalletAddressParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description)
}
}
impl Error for WalletAddressParseError {
fn description(&self) -> &str {
&self.description
}
fn cause(&self) -> Option<&Error> {
None
}
}
|
random_line_split
|
|
wallet_address.rs
|
//! Fractal Global Wallet Address
//!
//! This module holds the Fractal Global wallet address format, along with its parsing error
//! representing struct.
use std::convert::From;
use std::result::Result;
use std::error::Error;
use std::{fmt, str};
use std::str::FromStr;
use rust_base58::{ToBase58, FromBase58};
use rust_base58::base58::FromBase58Error;
#[cfg(feature = "json-types")]
use rustc_serialize::json;
/// The wallet address size.
///
/// This is the length, in bytes of the wallet addresses. It can be used to create arrays to store
/// complete addresses. Note: an address stored as a `[u8, WALLET_ADDRESS_LEN]` won't have any sort
/// of checksum verification, and as such, it should be used with extreme care, never using is as
/// an input or output mechanism, and only as an internal representation of the wallet address.
pub const WALLET_ADDRESS_LEN: usize = 7;
/// The object representation of a wallet address.
///
/// Wallet addresses are structs that act as as an easy manipulation object for wallet addresses.
/// Addresses that come from user input can be verified, and made sure they are correct.
///
/// Address can be used as strings or displayed using the `Display` trait:
///
/// ```
/// use fractal_utils::{WalletAddress, WALLET_ADDRESS_LEN};
///
/// let addr = WalletAddress::from_data([0u8; WALLET_ADDRESS_LEN]);
/// let addr_str = format!("{}", addr);
/// assert_eq!(addr_str, "fr111111111");
/// ```
///
/// All Fractal wallet addresses start with `fr`, and then they have a base-58 encoded string
/// representing `WALLET_ADDRESS_LEN+2` bytes. The first byte will be `0x00`, that the rest bytes
/// until `WALLET_ADDRESS_LEN` will compose the actual address, while the other two are the
/// checksum. That way addresses coming from user input can be verified:
///
/// ```
/// use std::str::FromStr;
/// use std::result::Result;
/// use fractal_utils::{WalletAddress, WALLET_ADDRESS_LEN};
///
/// let wallet: Result<WalletAddress, _> = "fr111111111".parse();
/// assert!(wallet.is_ok());
///
/// let wallet: Result<WalletAddress, _> = "fr111111112".parse();
/// assert!(wallet.is_err());
/// ```
///
/// The checksums are calculated by doing the `XOR` operation in all the bytes of the wallet address
/// and doing `XOR` of the checksum's first byte with the second one for each byte:
///
/// ```
/// # use fractal_utils::WALLET_ADDRESS_LEN;
/// #
/// let check_addr = [0x00, 0x11, 0x2A, 0x44, 0xCD, 0xFF, 0xE0];
/// # assert_eq!(check_addr.len(), WALLET_ADDRESS_LEN);
/// let mut checksum = [0u8; 2];
///
/// for b in &check_addr {
/// checksum[0] ^= *b;
/// checksum[1] ^= checksum[0];
/// }
///
/// assert_eq!(checksum, [0xAD, 0x07]);
/// ```
#[derive(Eq, PartialEq, PartialOrd, Ord, Debug, Clone, Copy, RustcEncodable, RustcDecodable)]
pub struct WalletAddress {
address: [u8; WALLET_ADDRESS_LEN],
}
impl WalletAddress {
/// Creates a new wallet address from raw data.
///
/// This should only be used if the raw input data is verified to be correct, ir it could lead
/// o a false address.
///
/// It will panic if the address does not start with byte `0x00`.
pub fn from_data(addr: [u8; WALLET_ADDRESS_LEN]) -> WalletAddress {
assert_eq!(addr[0],
0x00,
"the provided address is not a correct Fractal Global wallet address, its \
first byt should be 0x00");
WalletAddress { address: addr }
}
/// Returns the wallet address bytes.
///
/// This could be useful to store the bytes in databases where space can be an issue, or where
/// fast search is required. It does not contain checksums nor any other verification mechanism.
pub fn get_raw(&self) -> &[u8] {
&self.address
}
}
impl From<[u8; WALLET_ADDRESS_LEN]> for WalletAddress {
fn from(other: [u8; WALLET_ADDRESS_LEN]) -> WalletAddress {
WalletAddress { address: other }
}
}
impl FromStr for WalletAddress {
type Err = WalletAddressParseError;
fn from_str(s: &str) -> Result<WalletAddress, WalletAddressParseError> {
if &s[0..2]!= "fr" {
return Err(WalletAddressParseError::new(s,
"the address does not start with \"fr\"",
None));
}
let bytes = match s[2..].from_base58() {
Ok(b) => b,
Err(FromBase58Error::InvalidBase58Byte(c, i)) => {
let new_error = FromBase58Error::InvalidBase58Byte(c, i + 2);
return Err(WalletAddressParseError::new(s,
&format!("the address is not a valid \
base-58 encoded string: {}",
new_error),
Some(new_error)));
}
};
if bytes[0]!= 0x00
|
let mut checksum = [0u8; 2];
for byte in &bytes[..WALLET_ADDRESS_LEN] {
checksum[0] ^= *byte;
checksum[1] ^= checksum[0];
}
if checksum[0]!= bytes[WALLET_ADDRESS_LEN] ||
checksum[1]!= bytes[WALLET_ADDRESS_LEN + 1] {
Err(WalletAddressParseError::new(s, "checksum fail", None))
} else {
let mut address = [0u8; WALLET_ADDRESS_LEN];
address.clone_from_slice(&bytes[..WALLET_ADDRESS_LEN]);
Ok(WalletAddress::from_data(address))
}
}
}
#[cfg(feature = "json-types")]
/// The `WalletAddress` type can easily be converted to json, using its `to_json()` method. Note
/// that this will return a `Json::String` with the wallet address as a string in it.
impl json::ToJson for WalletAddress {
fn to_json(&self) -> json::Json {
json::Json::String(format!("{}", self))
}
}
impl fmt::Display for WalletAddress {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut arr = [0u8; WALLET_ADDRESS_LEN + 2];
arr[0..WALLET_ADDRESS_LEN].clone_from_slice(&self.address);
for byte in &self.address {
arr[WALLET_ADDRESS_LEN] ^= *byte;
arr[WALLET_ADDRESS_LEN + 1] ^= arr[WALLET_ADDRESS_LEN];
}
write!(f, "fr{}", arr.to_base58())
}
}
/// Wallet address parsing error.
///
/// This struct represents a wallet address parsing error. It can be used to check the validity of
/// wallet address strings, and implements common `Error` and `Display` traits.
#[derive(Debug)]
pub struct WalletAddressParseError {
description: String,
cause: Option<FromBase58Error>,
}
impl WalletAddressParseError {
fn new<S: AsRef<str>>(wallet_address: S,
error: S,
cause: Option<FromBase58Error>)
-> WalletAddressParseError {
WalletAddressParseError {
description: format!("the wallet address {:?} is not a valid Fractal Global wallet \
address, {}",
wallet_address.as_ref(),
error.as_ref()),
cause: cause,
}
}
}
impl fmt::Display for WalletAddressParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description)
}
}
impl Error for WalletAddressParseError {
fn description(&self) -> &str {
&self.description
}
fn cause(&self) -> Option<&Error> {
None
}
}
|
{
return Err(WalletAddressParseError::new(s,
"the first byte of the address is not 0x00",
None));
}
|
conditional_block
|
wallet_address.rs
|
//! Fractal Global Wallet Address
//!
//! This module holds the Fractal Global wallet address format, along with its parsing error
//! representing struct.
use std::convert::From;
use std::result::Result;
use std::error::Error;
use std::{fmt, str};
use std::str::FromStr;
use rust_base58::{ToBase58, FromBase58};
use rust_base58::base58::FromBase58Error;
#[cfg(feature = "json-types")]
use rustc_serialize::json;
/// The wallet address size.
///
/// This is the length, in bytes of the wallet addresses. It can be used to create arrays to store
/// complete addresses. Note: an address stored as a `[u8, WALLET_ADDRESS_LEN]` won't have any sort
/// of checksum verification, and as such, it should be used with extreme care, never using is as
/// an input or output mechanism, and only as an internal representation of the wallet address.
pub const WALLET_ADDRESS_LEN: usize = 7;
/// The object representation of a wallet address.
///
/// Wallet addresses are structs that act as as an easy manipulation object for wallet addresses.
/// Addresses that come from user input can be verified, and made sure they are correct.
///
/// Address can be used as strings or displayed using the `Display` trait:
///
/// ```
/// use fractal_utils::{WalletAddress, WALLET_ADDRESS_LEN};
///
/// let addr = WalletAddress::from_data([0u8; WALLET_ADDRESS_LEN]);
/// let addr_str = format!("{}", addr);
/// assert_eq!(addr_str, "fr111111111");
/// ```
///
/// All Fractal wallet addresses start with `fr`, and then they have a base-58 encoded string
/// representing `WALLET_ADDRESS_LEN+2` bytes. The first byte will be `0x00`, that the rest bytes
/// until `WALLET_ADDRESS_LEN` will compose the actual address, while the other two are the
/// checksum. That way addresses coming from user input can be verified:
///
/// ```
/// use std::str::FromStr;
/// use std::result::Result;
/// use fractal_utils::{WalletAddress, WALLET_ADDRESS_LEN};
///
/// let wallet: Result<WalletAddress, _> = "fr111111111".parse();
/// assert!(wallet.is_ok());
///
/// let wallet: Result<WalletAddress, _> = "fr111111112".parse();
/// assert!(wallet.is_err());
/// ```
///
/// The checksums are calculated by doing the `XOR` operation in all the bytes of the wallet address
/// and doing `XOR` of the checksum's first byte with the second one for each byte:
///
/// ```
/// # use fractal_utils::WALLET_ADDRESS_LEN;
/// #
/// let check_addr = [0x00, 0x11, 0x2A, 0x44, 0xCD, 0xFF, 0xE0];
/// # assert_eq!(check_addr.len(), WALLET_ADDRESS_LEN);
/// let mut checksum = [0u8; 2];
///
/// for b in &check_addr {
/// checksum[0] ^= *b;
/// checksum[1] ^= checksum[0];
/// }
///
/// assert_eq!(checksum, [0xAD, 0x07]);
/// ```
#[derive(Eq, PartialEq, PartialOrd, Ord, Debug, Clone, Copy, RustcEncodable, RustcDecodable)]
pub struct WalletAddress {
address: [u8; WALLET_ADDRESS_LEN],
}
impl WalletAddress {
/// Creates a new wallet address from raw data.
///
/// This should only be used if the raw input data is verified to be correct, ir it could lead
/// o a false address.
///
/// It will panic if the address does not start with byte `0x00`.
pub fn from_data(addr: [u8; WALLET_ADDRESS_LEN]) -> WalletAddress {
assert_eq!(addr[0],
0x00,
"the provided address is not a correct Fractal Global wallet address, its \
first byt should be 0x00");
WalletAddress { address: addr }
}
/// Returns the wallet address bytes.
///
/// This could be useful to store the bytes in databases where space can be an issue, or where
/// fast search is required. It does not contain checksums nor any other verification mechanism.
pub fn get_raw(&self) -> &[u8] {
&self.address
}
}
impl From<[u8; WALLET_ADDRESS_LEN]> for WalletAddress {
fn from(other: [u8; WALLET_ADDRESS_LEN]) -> WalletAddress {
WalletAddress { address: other }
}
}
impl FromStr for WalletAddress {
type Err = WalletAddressParseError;
fn from_str(s: &str) -> Result<WalletAddress, WalletAddressParseError> {
if &s[0..2]!= "fr" {
return Err(WalletAddressParseError::new(s,
"the address does not start with \"fr\"",
None));
}
let bytes = match s[2..].from_base58() {
Ok(b) => b,
Err(FromBase58Error::InvalidBase58Byte(c, i)) => {
let new_error = FromBase58Error::InvalidBase58Byte(c, i + 2);
return Err(WalletAddressParseError::new(s,
&format!("the address is not a valid \
base-58 encoded string: {}",
new_error),
Some(new_error)));
}
};
if bytes[0]!= 0x00 {
return Err(WalletAddressParseError::new(s,
"the first byte of the address is not 0x00",
None));
}
let mut checksum = [0u8; 2];
for byte in &bytes[..WALLET_ADDRESS_LEN] {
checksum[0] ^= *byte;
checksum[1] ^= checksum[0];
}
if checksum[0]!= bytes[WALLET_ADDRESS_LEN] ||
checksum[1]!= bytes[WALLET_ADDRESS_LEN + 1] {
Err(WalletAddressParseError::new(s, "checksum fail", None))
} else {
let mut address = [0u8; WALLET_ADDRESS_LEN];
address.clone_from_slice(&bytes[..WALLET_ADDRESS_LEN]);
Ok(WalletAddress::from_data(address))
}
}
}
#[cfg(feature = "json-types")]
/// The `WalletAddress` type can easily be converted to json, using its `to_json()` method. Note
/// that this will return a `Json::String` with the wallet address as a string in it.
impl json::ToJson for WalletAddress {
fn to_json(&self) -> json::Json {
json::Json::String(format!("{}", self))
}
}
impl fmt::Display for WalletAddress {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut arr = [0u8; WALLET_ADDRESS_LEN + 2];
arr[0..WALLET_ADDRESS_LEN].clone_from_slice(&self.address);
for byte in &self.address {
arr[WALLET_ADDRESS_LEN] ^= *byte;
arr[WALLET_ADDRESS_LEN + 1] ^= arr[WALLET_ADDRESS_LEN];
}
write!(f, "fr{}", arr.to_base58())
}
}
/// Wallet address parsing error.
///
/// This struct represents a wallet address parsing error. It can be used to check the validity of
/// wallet address strings, and implements common `Error` and `Display` traits.
#[derive(Debug)]
pub struct WalletAddressParseError {
description: String,
cause: Option<FromBase58Error>,
}
impl WalletAddressParseError {
fn new<S: AsRef<str>>(wallet_address: S,
error: S,
cause: Option<FromBase58Error>)
-> WalletAddressParseError {
WalletAddressParseError {
description: format!("the wallet address {:?} is not a valid Fractal Global wallet \
address, {}",
wallet_address.as_ref(),
error.as_ref()),
cause: cause,
}
}
}
impl fmt::Display for WalletAddressParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description)
}
}
impl Error for WalletAddressParseError {
fn description(&self) -> &str {
&self.description
}
fn cause(&self) -> Option<&Error>
|
}
|
{
None
}
|
identifier_body
|
wallet_address.rs
|
//! Fractal Global Wallet Address
//!
//! This module holds the Fractal Global wallet address format, along with its parsing error
//! representing struct.
use std::convert::From;
use std::result::Result;
use std::error::Error;
use std::{fmt, str};
use std::str::FromStr;
use rust_base58::{ToBase58, FromBase58};
use rust_base58::base58::FromBase58Error;
#[cfg(feature = "json-types")]
use rustc_serialize::json;
/// The wallet address size.
///
/// This is the length, in bytes of the wallet addresses. It can be used to create arrays to store
/// complete addresses. Note: an address stored as a `[u8, WALLET_ADDRESS_LEN]` won't have any sort
/// of checksum verification, and as such, it should be used with extreme care, never using is as
/// an input or output mechanism, and only as an internal representation of the wallet address.
pub const WALLET_ADDRESS_LEN: usize = 7;
/// The object representation of a wallet address.
///
/// Wallet addresses are structs that act as as an easy manipulation object for wallet addresses.
/// Addresses that come from user input can be verified, and made sure they are correct.
///
/// Address can be used as strings or displayed using the `Display` trait:
///
/// ```
/// use fractal_utils::{WalletAddress, WALLET_ADDRESS_LEN};
///
/// let addr = WalletAddress::from_data([0u8; WALLET_ADDRESS_LEN]);
/// let addr_str = format!("{}", addr);
/// assert_eq!(addr_str, "fr111111111");
/// ```
///
/// All Fractal wallet addresses start with `fr`, and then they have a base-58 encoded string
/// representing `WALLET_ADDRESS_LEN+2` bytes. The first byte will be `0x00`, that the rest bytes
/// until `WALLET_ADDRESS_LEN` will compose the actual address, while the other two are the
/// checksum. That way addresses coming from user input can be verified:
///
/// ```
/// use std::str::FromStr;
/// use std::result::Result;
/// use fractal_utils::{WalletAddress, WALLET_ADDRESS_LEN};
///
/// let wallet: Result<WalletAddress, _> = "fr111111111".parse();
/// assert!(wallet.is_ok());
///
/// let wallet: Result<WalletAddress, _> = "fr111111112".parse();
/// assert!(wallet.is_err());
/// ```
///
/// The checksums are calculated by doing the `XOR` operation in all the bytes of the wallet address
/// and doing `XOR` of the checksum's first byte with the second one for each byte:
///
/// ```
/// # use fractal_utils::WALLET_ADDRESS_LEN;
/// #
/// let check_addr = [0x00, 0x11, 0x2A, 0x44, 0xCD, 0xFF, 0xE0];
/// # assert_eq!(check_addr.len(), WALLET_ADDRESS_LEN);
/// let mut checksum = [0u8; 2];
///
/// for b in &check_addr {
/// checksum[0] ^= *b;
/// checksum[1] ^= checksum[0];
/// }
///
/// assert_eq!(checksum, [0xAD, 0x07]);
/// ```
#[derive(Eq, PartialEq, PartialOrd, Ord, Debug, Clone, Copy, RustcEncodable, RustcDecodable)]
pub struct WalletAddress {
address: [u8; WALLET_ADDRESS_LEN],
}
impl WalletAddress {
/// Creates a new wallet address from raw data.
///
/// This should only be used if the raw input data is verified to be correct, ir it could lead
/// o a false address.
///
/// It will panic if the address does not start with byte `0x00`.
pub fn from_data(addr: [u8; WALLET_ADDRESS_LEN]) -> WalletAddress {
assert_eq!(addr[0],
0x00,
"the provided address is not a correct Fractal Global wallet address, its \
first byt should be 0x00");
WalletAddress { address: addr }
}
/// Returns the wallet address bytes.
///
/// This could be useful to store the bytes in databases where space can be an issue, or where
/// fast search is required. It does not contain checksums nor any other verification mechanism.
pub fn get_raw(&self) -> &[u8] {
&self.address
}
}
impl From<[u8; WALLET_ADDRESS_LEN]> for WalletAddress {
fn from(other: [u8; WALLET_ADDRESS_LEN]) -> WalletAddress {
WalletAddress { address: other }
}
}
impl FromStr for WalletAddress {
type Err = WalletAddressParseError;
fn from_str(s: &str) -> Result<WalletAddress, WalletAddressParseError> {
if &s[0..2]!= "fr" {
return Err(WalletAddressParseError::new(s,
"the address does not start with \"fr\"",
None));
}
let bytes = match s[2..].from_base58() {
Ok(b) => b,
Err(FromBase58Error::InvalidBase58Byte(c, i)) => {
let new_error = FromBase58Error::InvalidBase58Byte(c, i + 2);
return Err(WalletAddressParseError::new(s,
&format!("the address is not a valid \
base-58 encoded string: {}",
new_error),
Some(new_error)));
}
};
if bytes[0]!= 0x00 {
return Err(WalletAddressParseError::new(s,
"the first byte of the address is not 0x00",
None));
}
let mut checksum = [0u8; 2];
for byte in &bytes[..WALLET_ADDRESS_LEN] {
checksum[0] ^= *byte;
checksum[1] ^= checksum[0];
}
if checksum[0]!= bytes[WALLET_ADDRESS_LEN] ||
checksum[1]!= bytes[WALLET_ADDRESS_LEN + 1] {
Err(WalletAddressParseError::new(s, "checksum fail", None))
} else {
let mut address = [0u8; WALLET_ADDRESS_LEN];
address.clone_from_slice(&bytes[..WALLET_ADDRESS_LEN]);
Ok(WalletAddress::from_data(address))
}
}
}
#[cfg(feature = "json-types")]
/// The `WalletAddress` type can easily be converted to json, using its `to_json()` method. Note
/// that this will return a `Json::String` with the wallet address as a string in it.
impl json::ToJson for WalletAddress {
fn to_json(&self) -> json::Json {
json::Json::String(format!("{}", self))
}
}
impl fmt::Display for WalletAddress {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut arr = [0u8; WALLET_ADDRESS_LEN + 2];
arr[0..WALLET_ADDRESS_LEN].clone_from_slice(&self.address);
for byte in &self.address {
arr[WALLET_ADDRESS_LEN] ^= *byte;
arr[WALLET_ADDRESS_LEN + 1] ^= arr[WALLET_ADDRESS_LEN];
}
write!(f, "fr{}", arr.to_base58())
}
}
/// Wallet address parsing error.
///
/// This struct represents a wallet address parsing error. It can be used to check the validity of
/// wallet address strings, and implements common `Error` and `Display` traits.
#[derive(Debug)]
pub struct WalletAddressParseError {
description: String,
cause: Option<FromBase58Error>,
}
impl WalletAddressParseError {
fn
|
<S: AsRef<str>>(wallet_address: S,
error: S,
cause: Option<FromBase58Error>)
-> WalletAddressParseError {
WalletAddressParseError {
description: format!("the wallet address {:?} is not a valid Fractal Global wallet \
address, {}",
wallet_address.as_ref(),
error.as_ref()),
cause: cause,
}
}
}
impl fmt::Display for WalletAddressParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description)
}
}
impl Error for WalletAddressParseError {
fn description(&self) -> &str {
&self.description
}
fn cause(&self) -> Option<&Error> {
None
}
}
|
new
|
identifier_name
|
lib.rs
|
//! MLCR: Machine-Learning-based Cache Replacement
//!
//! MLCR trains a neural network to "guess" how long time will pass before the cache block is
//! accessed again. In other words, it provides a qualified guess to approximate the ideal Bélády's
//! algorithm without a time machine.
//!
//! MLCR is slow, because it needs to train a neural network, but in many cases, the added
//! precision pays off by greatly reducing the number of cache misses. As such, it should only be
//! used when the cached medium is significantly slower than training the network (e.g. hard disks or
//! internet downloads).
extern crate crossbeam;
extern crate nn;
extern crate parking_lot;
use crossbeam::sync::SegQueue;
use nn::NN;
use parking_lot::{Mutex, MutexGuard};
use std::{cmp, f64};
use std::collections::{BinaryHeap, HashMap};
/// A clock tick count.
///
/// Every touch (i.e. read) increments the _clock_ yielding a new _tick_. This tick is roughly used
/// as a measure for the time passed (the actual time is irrelevant as it doesn't change the state
/// of the cache).
///
/// This tick count is used in the neural network model for the next hit prediction.
type Tick = u32;
/// The ID of a cache block.
///
/// The ID uniquely identifies a particular cache block inhabitant. It is used in the prediction
/// model and should thus be chosen carefully as representing the inner data (e.g. the disk
/// address) in order to achieve least cache misses.
pub type Id = u64;
/// A cache block.
///
/// This represents the state of a particular cache block.
struct Block {
/// The two last times the block was used.
last_used: [Tick; 2],
/// The tick where the block was added.
instated: Tick,
/// The number of times the block has been touched.
times_used: u32,
}
impl Block {
/// Convert the block data into a vector.
fn as_vec(&self, id: Id) -> Vec<f64> {
vec![id as f64, self.instated as f64, self.last_used[0] as f64, self.last_used[1] as f64,
self.times_used as f64]
}
}
/// A next usage prediction.
///
/// This contains a prediction produced by the neural network, estimating when is the next tick,
/// the block will be touched.
#[derive(PartialEq)]
struct Prediction {
/// The ID of the block we're predicting.
id: Id,
/// The prediction produced by the neural network.
///
/// Note that this does not represent a tick, but rather a monotone function thereof.
prediction: f64,
}
impl cmp::Ord for Prediction {
fn cmp(&self, other: &Prediction) -> cmp::Ordering {
if self.prediction < other.prediction {
cmp::Ordering::Less
} else {
|
}
}
impl cmp::PartialOrd for Prediction {
fn partial_cmp(&self, other: &Prediction) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl cmp::Eq for Prediction {}
/// An iterator over the coldest (best candidates for replacement) to hotter cache objects.
///
/// This iterators from the objects predicted to be used in the farthest future to the nearest
/// future.
///
/// In other words, this goes over the best to worse candidates for replacement, trimming, or
/// clearing.
pub struct ColdIter {
/// A binary heap over the predictions ordered by distance into the future.
heap: BinaryHeap<Prediction>,
}
impl Iterator for ColdIter {
type Item = Id;
fn next(&mut self) -> Option<Id> {
self.heap.pop().map(|Prediction { id,.. }| id)
}
}
/// A learning cache tracker.
///
/// This keeps track of cache blocks.
///
/// A cache block represents some data, which is not managed by the cache tracker. The cache block
/// is said to be _touched_ when this data is used in some way.
///
/// The _ideal replacement_ is the block which is used in the most distant future. As this is not
/// possible to know in advance, we make a prediction or a _approximate ideal replacement_, which
/// is based around various data points of the block such as the time of the last uses, or the
/// number of touches.
///
/// The aim of the cache tracker is to provided _approximate ideal replacements_. Numerous
/// algorithms for making these predictions exists (examples are LRU, PLRU, LFU, MFU, MRU, ARC,
/// etc.), but MLCR uses an approach which is radically different: It feeds the data points into a
/// neural network and lets this estimate the tick of the next touch.
pub struct Cache {
/// The blocks in this cache tracker.
blocks: HashMap<Id, Block>,
/// The neural network mapping blocks to the ticks of next touch.
nn: NN,
/// The clock.
///
/// This increments on every touch.
clock: Tick,
}
impl Cache {
/// Tick the clock.
fn tick(&mut self) {
self.clock += 1;
}
/// Create a new cache tracker.
pub fn new() -> Cache {
Cache {
blocks: HashMap::new(),
nn: NN::new(&[5, 6, 1]),
clock: 0,
}
}
/// Touch a cache block.
///
/// This should be called whenever the object `id` represents is used (read, written, etc.).
///
/// This will train the neural network with the new data.
pub fn touch(&mut self, id: Id) {
{
// Get the block we need.
let block = self.blocks.get_mut(&id).unwrap();
// Apply a bijective map from the clock to a float on the range (0,1), which can be
// fed to the network.
let goal = (self.clock as f64 * 0.01).tanh();
// Train the neural network with the existing data against the clock.
self.nn.train(&[(block.as_vec(id), vec![goal])]);
// Update the block with last used data.
block.last_used[0] = block.last_used[1];
block.last_used[1] = self.clock;
// Increment the frequency counter.
block.times_used += 1;
}
// Tick the clock.
self.tick();
}
/// Insert a new cache block into the cache tracker.
pub fn insert(&mut self, id: Id) {
self.blocks.insert(id, Block {
last_used: [!0; 2],
instated: self.clock,
times_used: 0,
});
}
/// Remove a cache block.
pub fn remove(&mut self, id: Id) {
self.blocks.remove(&id);
}
/// Get an iterator over blocks from cold to hot.
pub fn cold(&mut self) -> ColdIter {
// Build a heap over the predictions.
let mut heap = BinaryHeap::new();
for (&id, block) in self.blocks.iter() {
// Predict the next use.
let prediction = self.nn.run(&block.as_vec(id))[0];
// Push the prediction to the heap.
heap.push(Prediction {
id: id,
prediction: prediction,
});
}
ColdIter {
heap: heap,
}
}
/// Get at iterator over blocks to remove to trim the cache tracker to `to`.
///
/// Note that this won't remove the blocks, and this should be handled manually with the
/// `remove` method.
pub fn trim(&mut self, to: usize) -> ::std::iter::Take<ColdIter> {
self.cold().take(self.blocks.len() - to)
}
}
/// A cache operation.
enum CacheOperation {
/// Create a new cache block with some ID.
Insert(Id),
/// Remove a cache block.
Remove(Id),
/// Touch some block.
Touch(Id),
}
/// A concurrent cache tracker.
///
/// This has two parts to it:
///
/// - A normal cache tracker, protected by a lock.
/// - A queue of cache operations that will be executed when the lock is acquired.
pub struct ConcurrentCache {
/// The inner cache tracker, protected by a lock.
inner: Mutex<Cache>,
/// The cache tracker operation queue.
///
/// In order to avoid excessively locking and unlocking the cache tracker, we buffer the
/// operations, which will then be executed in one go, when needed.
queue: SegQueue<CacheOperation>,
}
impl ConcurrentCache {
/// Create a new concurrent cache tracker.
pub fn new() -> ConcurrentCache {
ConcurrentCache {
inner: Mutex::new(Cache::new()),
queue: SegQueue::new(),
}
}
/// Lock the inner cache.
pub fn lock(&self) -> MutexGuard<Cache> {
// Lock the cache tracker.
let mut lock = self.inner.lock();
// Commit the buffered operations to the tracker.
while let Some(op) = self.queue.try_pop() {
match op {
CacheOperation::Insert(id) => lock.insert(id),
CacheOperation::Remove(id) => lock.remove(id),
CacheOperation::Touch(id) => lock.touch(id),
}
}
lock
}
/// Insert a new cache block.
pub fn insert(&mut self, id: Id) {
self.queue.push(CacheOperation::Insert(id));
}
/// Remove a cache block.
pub fn remove(&mut self, id: Id) {
self.queue.push(CacheOperation::Remove(id));
}
/// Touch a cache block.
pub fn touch(&mut self, id: Id) {
self.queue.push(CacheOperation::Touch(id));
}
}
|
cmp::Ordering::Greater
}
|
conditional_block
|
lib.rs
|
//! MLCR: Machine-Learning-based Cache Replacement
//!
//! MLCR trains a neural network to "guess" how long time will pass before the cache block is
//! accessed again. In other words, it provides a qualified guess to approximate the ideal Bélády's
//! algorithm without a time machine.
//!
//! MLCR is slow, because it needs to train a neural network, but in many cases, the added
//! precision pays off by greatly reducing the number of cache misses. As such, it should only be
//! used when the cached medium is significantly slower than training the network (e.g. hard disks or
//! internet downloads).
extern crate crossbeam;
extern crate nn;
extern crate parking_lot;
use crossbeam::sync::SegQueue;
use nn::NN;
use parking_lot::{Mutex, MutexGuard};
use std::{cmp, f64};
use std::collections::{BinaryHeap, HashMap};
/// A clock tick count.
///
/// Every touch (i.e. read) increments the _clock_ yielding a new _tick_. This tick is roughly used
/// as a measure for the time passed (the actual time is irrelevant as it doesn't change the state
/// of the cache).
///
/// This tick count is used in the neural network model for the next hit prediction.
type Tick = u32;
/// The ID of a cache block.
///
/// The ID uniquely identifies a particular cache block inhabitant. It is used in the prediction
/// model and should thus be chosen carefully as representing the inner data (e.g. the disk
/// address) in order to achieve least cache misses.
pub type Id = u64;
/// A cache block.
///
/// This represents the state of a particular cache block.
struct Block {
/// The two last times the block was used.
last_used: [Tick; 2],
/// The tick where the block was added.
instated: Tick,
/// The number of times the block has been touched.
times_used: u32,
}
impl Block {
/// Convert the block data into a vector.
fn as_vec(&self, id: Id) -> Vec<f64> {
vec![id as f64, self.instated as f64, self.last_used[0] as f64, self.last_used[1] as f64,
self.times_used as f64]
}
}
/// A next usage prediction.
///
/// This contains a prediction produced by the neural network, estimating when is the next tick,
/// the block will be touched.
#[derive(PartialEq)]
struct Prediction {
/// The ID of the block we're predicting.
id: Id,
/// The prediction produced by the neural network.
///
/// Note that this does not represent a tick, but rather a monotone function thereof.
prediction: f64,
}
impl cmp::Ord for Prediction {
fn cmp(&self, other: &Prediction) -> cmp::Ordering {
if self.prediction < other.prediction {
cmp::Ordering::Less
} else {
cmp::Ordering::Greater
}
}
}
impl cmp::PartialOrd for Prediction {
fn partial_cmp(&self, other: &Prediction) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl cmp::Eq for Prediction {}
/// An iterator over the coldest (best candidates for replacement) to hotter cache objects.
///
/// This iterators from the objects predicted to be used in the farthest future to the nearest
/// future.
///
/// In other words, this goes over the best to worse candidates for replacement, trimming, or
/// clearing.
pub struct ColdIter {
/// A binary heap over the predictions ordered by distance into the future.
heap: BinaryHeap<Prediction>,
}
impl Iterator for ColdIter {
type Item = Id;
fn next(&mut self) -> Option<Id> {
self.heap.pop().map(|Prediction { id,.. }| id)
}
}
/// A learning cache tracker.
///
/// This keeps track of cache blocks.
///
/// A cache block represents some data, which is not managed by the cache tracker. The cache block
/// is said to be _touched_ when this data is used in some way.
///
/// The _ideal replacement_ is the block which is used in the most distant future. As this is not
/// possible to know in advance, we make a prediction or a _approximate ideal replacement_, which
/// is based around various data points of the block such as the time of the last uses, or the
/// number of touches.
///
/// The aim of the cache tracker is to provided _approximate ideal replacements_. Numerous
/// algorithms for making these predictions exists (examples are LRU, PLRU, LFU, MFU, MRU, ARC,
/// etc.), but MLCR uses an approach which is radically different: It feeds the data points into a
/// neural network and lets this estimate the tick of the next touch.
pub struct Cache {
/// The blocks in this cache tracker.
blocks: HashMap<Id, Block>,
/// The neural network mapping blocks to the ticks of next touch.
nn: NN,
/// The clock.
///
/// This increments on every touch.
clock: Tick,
}
impl Cache {
/// Tick the clock.
fn tick(&mut self) {
self.clock += 1;
}
/// Create a new cache tracker.
pub fn new() -> Cache {
Cache {
blocks: HashMap::new(),
nn: NN::new(&[5, 6, 1]),
clock: 0,
}
}
/// Touch a cache block.
///
/// This should be called whenever the object `id` represents is used (read, written, etc.).
///
/// This will train the neural network with the new data.
pub fn touch(&mut self, id: Id) {
{
// Get the block we need.
let block = self.blocks.get_mut(&id).unwrap();
// Apply a bijective map from the clock to a float on the range (0,1), which can be
// fed to the network.
let goal = (self.clock as f64 * 0.01).tanh();
// Train the neural network with the existing data against the clock.
self.nn.train(&[(block.as_vec(id), vec![goal])]);
// Update the block with last used data.
block.last_used[0] = block.last_used[1];
block.last_used[1] = self.clock;
// Increment the frequency counter.
block.times_used += 1;
}
// Tick the clock.
self.tick();
}
/// Insert a new cache block into the cache tracker.
pub fn insert(&mut self, id: Id) {
self.blocks.insert(id, Block {
last_used: [!0; 2],
instated: self.clock,
times_used: 0,
});
}
/// Remove a cache block.
pub fn remove(&mut self, id: Id) {
self.blocks.remove(&id);
}
/// Get an iterator over blocks from cold to hot.
pub fn cold(&mut self) -> ColdIter {
// Build a heap over the predictions.
let mut heap = BinaryHeap::new();
for (&id, block) in self.blocks.iter() {
// Predict the next use.
let prediction = self.nn.run(&block.as_vec(id))[0];
// Push the prediction to the heap.
heap.push(Prediction {
id: id,
prediction: prediction,
});
}
ColdIter {
heap: heap,
}
}
/// Get at iterator over blocks to remove to trim the cache tracker to `to`.
///
/// Note that this won't remove the blocks, and this should be handled manually with the
/// `remove` method.
pub fn trim(&mut self, to: usize) -> ::std::iter::Take<ColdIter> {
self.cold().take(self.blocks.len() - to)
}
}
/// A cache operation.
enum CacheOperation {
/// Create a new cache block with some ID.
Insert(Id),
/// Remove a cache block.
Remove(Id),
/// Touch some block.
Touch(Id),
}
/// A concurrent cache tracker.
///
/// This has two parts to it:
///
/// - A normal cache tracker, protected by a lock.
/// - A queue of cache operations that will be executed when the lock is acquired.
pub struct ConcurrentCache {
/// The inner cache tracker, protected by a lock.
inner: Mutex<Cache>,
/// The cache tracker operation queue.
///
/// In order to avoid excessively locking and unlocking the cache tracker, we buffer the
/// operations, which will then be executed in one go, when needed.
queue: SegQueue<CacheOperation>,
}
impl ConcurrentCache {
/// Create a new concurrent cache tracker.
pub fn ne
|
-> ConcurrentCache {
ConcurrentCache {
inner: Mutex::new(Cache::new()),
queue: SegQueue::new(),
}
}
/// Lock the inner cache.
pub fn lock(&self) -> MutexGuard<Cache> {
// Lock the cache tracker.
let mut lock = self.inner.lock();
// Commit the buffered operations to the tracker.
while let Some(op) = self.queue.try_pop() {
match op {
CacheOperation::Insert(id) => lock.insert(id),
CacheOperation::Remove(id) => lock.remove(id),
CacheOperation::Touch(id) => lock.touch(id),
}
}
lock
}
/// Insert a new cache block.
pub fn insert(&mut self, id: Id) {
self.queue.push(CacheOperation::Insert(id));
}
/// Remove a cache block.
pub fn remove(&mut self, id: Id) {
self.queue.push(CacheOperation::Remove(id));
}
/// Touch a cache block.
pub fn touch(&mut self, id: Id) {
self.queue.push(CacheOperation::Touch(id));
}
}
|
w()
|
identifier_name
|
lib.rs
|
//! MLCR: Machine-Learning-based Cache Replacement
//!
//! MLCR trains a neural network to "guess" how long time will pass before the cache block is
//! accessed again. In other words, it provides a qualified guess to approximate the ideal Bélády's
//! algorithm without a time machine.
//!
//! MLCR is slow, because it needs to train a neural network, but in many cases, the added
//! precision pays off by greatly reducing the number of cache misses. As such, it should only be
//! used when the cached medium is significantly slower than training the network (e.g. hard disks or
//! internet downloads).
extern crate crossbeam;
extern crate nn;
extern crate parking_lot;
use crossbeam::sync::SegQueue;
use nn::NN;
use parking_lot::{Mutex, MutexGuard};
use std::{cmp, f64};
use std::collections::{BinaryHeap, HashMap};
/// A clock tick count.
///
/// Every touch (i.e. read) increments the _clock_ yielding a new _tick_. This tick is roughly used
/// as a measure for the time passed (the actual time is irrelevant as it doesn't change the state
/// of the cache).
///
/// This tick count is used in the neural network model for the next hit prediction.
type Tick = u32;
/// The ID of a cache block.
///
/// The ID uniquely identifies a particular cache block inhabitant. It is used in the prediction
/// model and should thus be chosen carefully as representing the inner data (e.g. the disk
/// address) in order to achieve least cache misses.
pub type Id = u64;
/// A cache block.
///
/// This represents the state of a particular cache block.
struct Block {
/// The two last times the block was used.
last_used: [Tick; 2],
/// The tick where the block was added.
instated: Tick,
/// The number of times the block has been touched.
times_used: u32,
}
impl Block {
/// Convert the block data into a vector.
fn as_vec(&self, id: Id) -> Vec<f64> {
vec![id as f64, self.instated as f64, self.last_used[0] as f64, self.last_used[1] as f64,
self.times_used as f64]
}
}
/// A next usage prediction.
///
/// This contains a prediction produced by the neural network, estimating when is the next tick,
/// the block will be touched.
#[derive(PartialEq)]
struct Prediction {
/// The ID of the block we're predicting.
id: Id,
/// The prediction produced by the neural network.
///
/// Note that this does not represent a tick, but rather a monotone function thereof.
prediction: f64,
}
impl cmp::Ord for Prediction {
fn cmp(&self, other: &Prediction) -> cmp::Ordering {
if self.prediction < other.prediction {
cmp::Ordering::Less
} else {
cmp::Ordering::Greater
}
}
}
impl cmp::PartialOrd for Prediction {
fn partial_cmp(&self, other: &Prediction) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl cmp::Eq for Prediction {}
/// An iterator over the coldest (best candidates for replacement) to hotter cache objects.
///
/// This iterators from the objects predicted to be used in the farthest future to the nearest
/// future.
///
/// In other words, this goes over the best to worse candidates for replacement, trimming, or
/// clearing.
pub struct ColdIter {
/// A binary heap over the predictions ordered by distance into the future.
heap: BinaryHeap<Prediction>,
}
impl Iterator for ColdIter {
type Item = Id;
fn next(&mut self) -> Option<Id> {
self.heap.pop().map(|Prediction { id,.. }| id)
}
}
/// A learning cache tracker.
///
/// This keeps track of cache blocks.
///
/// A cache block represents some data, which is not managed by the cache tracker. The cache block
/// is said to be _touched_ when this data is used in some way.
///
/// The _ideal replacement_ is the block which is used in the most distant future. As this is not
/// possible to know in advance, we make a prediction or a _approximate ideal replacement_, which
/// is based around various data points of the block such as the time of the last uses, or the
/// number of touches.
///
/// The aim of the cache tracker is to provided _approximate ideal replacements_. Numerous
/// algorithms for making these predictions exists (examples are LRU, PLRU, LFU, MFU, MRU, ARC,
/// etc.), but MLCR uses an approach which is radically different: It feeds the data points into a
/// neural network and lets this estimate the tick of the next touch.
pub struct Cache {
/// The blocks in this cache tracker.
blocks: HashMap<Id, Block>,
/// The neural network mapping blocks to the ticks of next touch.
nn: NN,
/// The clock.
///
/// This increments on every touch.
clock: Tick,
}
impl Cache {
/// Tick the clock.
fn tick(&mut self) {
self.clock += 1;
}
/// Create a new cache tracker.
pub fn new() -> Cache {
Cache {
|
}
/// Touch a cache block.
///
/// This should be called whenever the object `id` represents is used (read, written, etc.).
///
/// This will train the neural network with the new data.
pub fn touch(&mut self, id: Id) {
{
// Get the block we need.
let block = self.blocks.get_mut(&id).unwrap();
// Apply a bijective map from the clock to a float on the range (0,1), which can be
// fed to the network.
let goal = (self.clock as f64 * 0.01).tanh();
// Train the neural network with the existing data against the clock.
self.nn.train(&[(block.as_vec(id), vec![goal])]);
// Update the block with last used data.
block.last_used[0] = block.last_used[1];
block.last_used[1] = self.clock;
// Increment the frequency counter.
block.times_used += 1;
}
// Tick the clock.
self.tick();
}
/// Insert a new cache block into the cache tracker.
pub fn insert(&mut self, id: Id) {
self.blocks.insert(id, Block {
last_used: [!0; 2],
instated: self.clock,
times_used: 0,
});
}
/// Remove a cache block.
pub fn remove(&mut self, id: Id) {
self.blocks.remove(&id);
}
/// Get an iterator over blocks from cold to hot.
pub fn cold(&mut self) -> ColdIter {
// Build a heap over the predictions.
let mut heap = BinaryHeap::new();
for (&id, block) in self.blocks.iter() {
// Predict the next use.
let prediction = self.nn.run(&block.as_vec(id))[0];
// Push the prediction to the heap.
heap.push(Prediction {
id: id,
prediction: prediction,
});
}
ColdIter {
heap: heap,
}
}
/// Get at iterator over blocks to remove to trim the cache tracker to `to`.
///
/// Note that this won't remove the blocks, and this should be handled manually with the
/// `remove` method.
pub fn trim(&mut self, to: usize) -> ::std::iter::Take<ColdIter> {
self.cold().take(self.blocks.len() - to)
}
}
/// A cache operation.
enum CacheOperation {
/// Create a new cache block with some ID.
Insert(Id),
/// Remove a cache block.
Remove(Id),
/// Touch some block.
Touch(Id),
}
/// A concurrent cache tracker.
///
/// This has two parts to it:
///
/// - A normal cache tracker, protected by a lock.
/// - A queue of cache operations that will be executed when the lock is acquired.
pub struct ConcurrentCache {
/// The inner cache tracker, protected by a lock.
inner: Mutex<Cache>,
/// The cache tracker operation queue.
///
/// In order to avoid excessively locking and unlocking the cache tracker, we buffer the
/// operations, which will then be executed in one go, when needed.
queue: SegQueue<CacheOperation>,
}
impl ConcurrentCache {
/// Create a new concurrent cache tracker.
pub fn new() -> ConcurrentCache {
ConcurrentCache {
inner: Mutex::new(Cache::new()),
queue: SegQueue::new(),
}
}
/// Lock the inner cache.
pub fn lock(&self) -> MutexGuard<Cache> {
// Lock the cache tracker.
let mut lock = self.inner.lock();
// Commit the buffered operations to the tracker.
while let Some(op) = self.queue.try_pop() {
match op {
CacheOperation::Insert(id) => lock.insert(id),
CacheOperation::Remove(id) => lock.remove(id),
CacheOperation::Touch(id) => lock.touch(id),
}
}
lock
}
/// Insert a new cache block.
pub fn insert(&mut self, id: Id) {
self.queue.push(CacheOperation::Insert(id));
}
/// Remove a cache block.
pub fn remove(&mut self, id: Id) {
self.queue.push(CacheOperation::Remove(id));
}
/// Touch a cache block.
pub fn touch(&mut self, id: Id) {
self.queue.push(CacheOperation::Touch(id));
}
}
|
blocks: HashMap::new(),
nn: NN::new(&[5, 6, 1]),
clock: 0,
}
|
random_line_split
|
lib.rs
|
//! MLCR: Machine-Learning-based Cache Replacement
//!
//! MLCR trains a neural network to "guess" how long time will pass before the cache block is
//! accessed again. In other words, it provides a qualified guess to approximate the ideal Bélády's
//! algorithm without a time machine.
//!
//! MLCR is slow, because it needs to train a neural network, but in many cases, the added
//! precision pays off by greatly reducing the number of cache misses. As such, it should only be
//! used when the cached medium is significantly slower than training the network (e.g. hard disks or
//! internet downloads).
extern crate crossbeam;
extern crate nn;
extern crate parking_lot;
use crossbeam::sync::SegQueue;
use nn::NN;
use parking_lot::{Mutex, MutexGuard};
use std::{cmp, f64};
use std::collections::{BinaryHeap, HashMap};
/// A clock tick count.
///
/// Every touch (i.e. read) increments the _clock_ yielding a new _tick_. This tick is roughly used
/// as a measure for the time passed (the actual time is irrelevant as it doesn't change the state
/// of the cache).
///
/// This tick count is used in the neural network model for the next hit prediction.
type Tick = u32;
/// The ID of a cache block.
///
/// The ID uniquely identifies a particular cache block inhabitant. It is used in the prediction
/// model and should thus be chosen carefully as representing the inner data (e.g. the disk
/// address) in order to achieve least cache misses.
pub type Id = u64;
/// A cache block.
///
/// This represents the state of a particular cache block.
struct Block {
/// The two last times the block was used.
last_used: [Tick; 2],
/// The tick where the block was added.
instated: Tick,
/// The number of times the block has been touched.
times_used: u32,
}
impl Block {
/// Convert the block data into a vector.
fn as_vec(&self, id: Id) -> Vec<f64> {
vec![id as f64, self.instated as f64, self.last_used[0] as f64, self.last_used[1] as f64,
self.times_used as f64]
}
}
/// A next usage prediction.
///
/// This contains a prediction produced by the neural network, estimating when is the next tick,
/// the block will be touched.
#[derive(PartialEq)]
struct Prediction {
/// The ID of the block we're predicting.
id: Id,
/// The prediction produced by the neural network.
///
/// Note that this does not represent a tick, but rather a monotone function thereof.
prediction: f64,
}
impl cmp::Ord for Prediction {
fn cmp(&self, other: &Prediction) -> cmp::Ordering {
if self.prediction < other.prediction {
cmp::Ordering::Less
} else {
cmp::Ordering::Greater
}
}
}
impl cmp::PartialOrd for Prediction {
fn partial_cmp(&self, other: &Prediction) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl cmp::Eq for Prediction {}
/// An iterator over the coldest (best candidates for replacement) to hotter cache objects.
///
/// This iterators from the objects predicted to be used in the farthest future to the nearest
/// future.
///
/// In other words, this goes over the best to worse candidates for replacement, trimming, or
/// clearing.
pub struct ColdIter {
/// A binary heap over the predictions ordered by distance into the future.
heap: BinaryHeap<Prediction>,
}
impl Iterator for ColdIter {
type Item = Id;
fn next(&mut self) -> Option<Id> {
self.heap.pop().map(|Prediction { id,.. }| id)
}
}
/// A learning cache tracker.
///
/// This keeps track of cache blocks.
///
/// A cache block represents some data, which is not managed by the cache tracker. The cache block
/// is said to be _touched_ when this data is used in some way.
///
/// The _ideal replacement_ is the block which is used in the most distant future. As this is not
/// possible to know in advance, we make a prediction or a _approximate ideal replacement_, which
/// is based around various data points of the block such as the time of the last uses, or the
/// number of touches.
///
/// The aim of the cache tracker is to provided _approximate ideal replacements_. Numerous
/// algorithms for making these predictions exists (examples are LRU, PLRU, LFU, MFU, MRU, ARC,
/// etc.), but MLCR uses an approach which is radically different: It feeds the data points into a
/// neural network and lets this estimate the tick of the next touch.
pub struct Cache {
/// The blocks in this cache tracker.
blocks: HashMap<Id, Block>,
/// The neural network mapping blocks to the ticks of next touch.
nn: NN,
/// The clock.
///
/// This increments on every touch.
clock: Tick,
}
impl Cache {
/// Tick the clock.
fn tick(&mut self) {
self.clock += 1;
}
/// Create a new cache tracker.
pub fn new() -> Cache {
Cache {
blocks: HashMap::new(),
nn: NN::new(&[5, 6, 1]),
clock: 0,
}
}
/// Touch a cache block.
///
/// This should be called whenever the object `id` represents is used (read, written, etc.).
///
/// This will train the neural network with the new data.
pub fn touch(&mut self, id: Id) {
{
// Get the block we need.
let block = self.blocks.get_mut(&id).unwrap();
// Apply a bijective map from the clock to a float on the range (0,1), which can be
// fed to the network.
let goal = (self.clock as f64 * 0.01).tanh();
// Train the neural network with the existing data against the clock.
self.nn.train(&[(block.as_vec(id), vec![goal])]);
// Update the block with last used data.
block.last_used[0] = block.last_used[1];
block.last_used[1] = self.clock;
// Increment the frequency counter.
block.times_used += 1;
}
// Tick the clock.
self.tick();
}
/// Insert a new cache block into the cache tracker.
pub fn insert(&mut self, id: Id) {
self.blocks.insert(id, Block {
last_used: [!0; 2],
instated: self.clock,
times_used: 0,
});
}
/// Remove a cache block.
pub fn remove(&mut self, id: Id) {
self.blocks.remove(&id);
}
/// Get an iterator over blocks from cold to hot.
pub fn cold(&mut self) -> ColdIter {
// Build a heap over the predictions.
let mut heap = BinaryHeap::new();
for (&id, block) in self.blocks.iter() {
// Predict the next use.
let prediction = self.nn.run(&block.as_vec(id))[0];
// Push the prediction to the heap.
heap.push(Prediction {
id: id,
prediction: prediction,
});
}
ColdIter {
heap: heap,
}
}
/// Get at iterator over blocks to remove to trim the cache tracker to `to`.
///
/// Note that this won't remove the blocks, and this should be handled manually with the
/// `remove` method.
pub fn trim(&mut self, to: usize) -> ::std::iter::Take<ColdIter> {
|
/// A cache operation.
enum CacheOperation {
/// Create a new cache block with some ID.
Insert(Id),
/// Remove a cache block.
Remove(Id),
/// Touch some block.
Touch(Id),
}
/// A concurrent cache tracker.
///
/// This has two parts to it:
///
/// - A normal cache tracker, protected by a lock.
/// - A queue of cache operations that will be executed when the lock is acquired.
pub struct ConcurrentCache {
/// The inner cache tracker, protected by a lock.
inner: Mutex<Cache>,
/// The cache tracker operation queue.
///
/// In order to avoid excessively locking and unlocking the cache tracker, we buffer the
/// operations, which will then be executed in one go, when needed.
queue: SegQueue<CacheOperation>,
}
impl ConcurrentCache {
/// Create a new concurrent cache tracker.
pub fn new() -> ConcurrentCache {
ConcurrentCache {
inner: Mutex::new(Cache::new()),
queue: SegQueue::new(),
}
}
/// Lock the inner cache.
pub fn lock(&self) -> MutexGuard<Cache> {
// Lock the cache tracker.
let mut lock = self.inner.lock();
// Commit the buffered operations to the tracker.
while let Some(op) = self.queue.try_pop() {
match op {
CacheOperation::Insert(id) => lock.insert(id),
CacheOperation::Remove(id) => lock.remove(id),
CacheOperation::Touch(id) => lock.touch(id),
}
}
lock
}
/// Insert a new cache block.
pub fn insert(&mut self, id: Id) {
self.queue.push(CacheOperation::Insert(id));
}
/// Remove a cache block.
pub fn remove(&mut self, id: Id) {
self.queue.push(CacheOperation::Remove(id));
}
/// Touch a cache block.
pub fn touch(&mut self, id: Id) {
self.queue.push(CacheOperation::Touch(id));
}
}
|
self.cold().take(self.blocks.len() - to)
}
}
|
identifier_body
|
default_author.rs
|
/*
* Copyright 2013 Brandon Sanderson
*
* This file is part of Evict-BT.
*
* Evict-BT is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Evict-BT is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Evict-BT. If not, see <http://www.gnu.org/licenses/>.
*/
use config;
use std::error::Error;
pub fn default_author(mut args:Vec<String>) -> isize {
if args.len() > 1 {
println!("default-author usage: evict default-author [new-author]");
1
}else{
let config = config::Config::load();
if args.len() == 0 {
match config.author {
Some(author) => println!("{}", author),
None => println!("No author set")
};
0
}else{
//How do we get values out of a Vec nicely? Can't move when indexing...
let save_result = config::Config{author:Some(args.swap_remove(0)),.. config}.save();
|
_ => {}
}
0
}
}
}
|
match save_result {
Err(e) => println!("Failed to save config: {}", e.description()),
|
random_line_split
|
default_author.rs
|
/*
* Copyright 2013 Brandon Sanderson
*
* This file is part of Evict-BT.
*
* Evict-BT is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Evict-BT is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Evict-BT. If not, see <http://www.gnu.org/licenses/>.
*/
use config;
use std::error::Error;
pub fn default_author(mut args:Vec<String>) -> isize
|
}
}
}
|
{
if args.len() > 1 {
println!("default-author usage: evict default-author [new-author]");
1
}else{
let config = config::Config::load();
if args.len() == 0 {
match config.author {
Some(author) => println!("{}", author),
None => println!("No author set")
};
0
}else{
//How do we get values out of a Vec nicely? Can't move when indexing...
let save_result = config::Config{author:Some(args.swap_remove(0)), .. config}.save();
match save_result {
Err(e) => println!("Failed to save config: {}", e.description()),
_ => {}
}
0
|
identifier_body
|
default_author.rs
|
/*
* Copyright 2013 Brandon Sanderson
*
* This file is part of Evict-BT.
*
* Evict-BT is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Evict-BT is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Evict-BT. If not, see <http://www.gnu.org/licenses/>.
*/
use config;
use std::error::Error;
pub fn
|
(mut args:Vec<String>) -> isize {
if args.len() > 1 {
println!("default-author usage: evict default-author [new-author]");
1
}else{
let config = config::Config::load();
if args.len() == 0 {
match config.author {
Some(author) => println!("{}", author),
None => println!("No author set")
};
0
}else{
//How do we get values out of a Vec nicely? Can't move when indexing...
let save_result = config::Config{author:Some(args.swap_remove(0)),.. config}.save();
match save_result {
Err(e) => println!("Failed to save config: {}", e.description()),
_ => {}
}
0
}
}
}
|
default_author
|
identifier_name
|
mm.rs
|
use core::option::Option;
use arch::PAGE_SIZE;
use World;
use titanium::hw;
extern {
static _pt_start: u8;
static _pt_end: u8;
}
pub struct PageArena {
pub start : usize,
pub end : usize,
pub current : usize,
}
impl PageArena {
pub fn new(start : usize, end : usize) -> PageArena {
// TODO: check alignment and bug if wrong
PageArena {
start: start,
end: end,
current: start,
}
}
pub fn get(&mut self) -> Option<usize> {
if self.current < self.end {
let ret = self.current;
self.current = ret + PAGE_SIZE;
Option::Some(ret)
} else
|
}
}
static mut pool : PageArena = PageArena {
start: 0,
end: 0,
current: 0,
};
pub fn preinit() -> &'static mut PageArena {
let start : usize = &_pt_start as *const u8 as usize;
let end : usize = &_pt_end as *const u8 as usize;
unsafe {
pool.start = start;
pool.end = end;
pool.current = start;
&mut pool
}
}
pub fn init(world : &mut World<hw::Real>) {
//let _table = PageTableRoot::new(world);
}
|
{
Option::None
}
|
conditional_block
|
mm.rs
|
use core::option::Option;
use arch::PAGE_SIZE;
use World;
use titanium::hw;
extern {
static _pt_start: u8;
static _pt_end: u8;
}
pub struct PageArena {
pub start : usize,
pub end : usize,
pub current : usize,
}
impl PageArena {
pub fn new(start : usize, end : usize) -> PageArena {
// TODO: check alignment and bug if wrong
PageArena {
start: start,
end: end,
current: start,
}
}
pub fn
|
(&mut self) -> Option<usize> {
if self.current < self.end {
let ret = self.current;
self.current = ret + PAGE_SIZE;
Option::Some(ret)
} else {
Option::None
}
}
}
static mut pool : PageArena = PageArena {
start: 0,
end: 0,
current: 0,
};
pub fn preinit() -> &'static mut PageArena {
let start : usize = &_pt_start as *const u8 as usize;
let end : usize = &_pt_end as *const u8 as usize;
unsafe {
pool.start = start;
pool.end = end;
pool.current = start;
&mut pool
}
}
pub fn init(world : &mut World<hw::Real>) {
//let _table = PageTableRoot::new(world);
}
|
get
|
identifier_name
|
mm.rs
|
use core::option::Option;
use arch::PAGE_SIZE;
use World;
use titanium::hw;
extern {
static _pt_start: u8;
static _pt_end: u8;
}
pub struct PageArena {
pub start : usize,
pub end : usize,
pub current : usize,
}
impl PageArena {
pub fn new(start : usize, end : usize) -> PageArena {
// TODO: check alignment and bug if wrong
PageArena {
start: start,
end: end,
current: start,
}
}
pub fn get(&mut self) -> Option<usize> {
if self.current < self.end {
let ret = self.current;
self.current = ret + PAGE_SIZE;
Option::Some(ret)
} else {
Option::None
}
}
}
static mut pool : PageArena = PageArena {
start: 0,
end: 0,
current: 0,
};
|
let start : usize = &_pt_start as *const u8 as usize;
let end : usize = &_pt_end as *const u8 as usize;
unsafe {
pool.start = start;
pool.end = end;
pool.current = start;
&mut pool
}
}
pub fn init(world : &mut World<hw::Real>) {
//let _table = PageTableRoot::new(world);
}
|
pub fn preinit() -> &'static mut PageArena {
|
random_line_split
|
csv.rs
|
extern crate nalgebra;
use nalgebra::*;
use om::koe::*;
use om::cb::*;
use tick::*;
use push::*;
use std::rc::*;
/// #Cartesian State Vectors
/// This structure represents an orbit using a
/// radius vector and a velocity vector.
/// It holds a reference to the central body.
#[derive(Clone)]
pub struct CSV {
|
pub cb: Rc<CB>,
}
impl Tick for CSV {
fn tick(&self, dt: f64) -> Self {
CSV {
r: self.r + self.v * dt,
cb: self.cb.clone(),
..*self
}
}
}
impl Push for CSV {
fn push(&self, dv: Vec3<f64>) -> Self {
CSV {
v: self.v + dv,
cb: self.cb.clone(),
..*self
}
}
}
impl CSV {
/// Construct CSV from position and velocity.
pub fn new(r: Vec3<f64>, v: Vec3<f64>, cb: Rc<CB>) -> CSV {
CSV {
r: r,
v: v,
cb: cb,
}
}
/// Construct CSV from KOE.
pub fn from_koe(koe: KOE) -> CSV {
// Mean anomaly
let m0 = koe.m0;
// Number of iterations for newton_raphson
let iterations = 10;
// Eccentric anomaly
let ea = CSV::newton_raphson(&m0, &koe.e, &iterations);
// True anomaly
let ta = 2.0*((1.0+koe.e).sqrt()*(ea/2.0).sin())
.atan2((1.0-koe.e).sqrt()*(ea/2.0).cos());
// Distance to the center of the central body
let dist = koe.a*(1.0-koe.e*ea.cos());
// Radius vector in i, j plane
let mut r = (koe.cb.i*ta.cos() + koe.cb.j*ta.sin()) * dist;
// Velocity in i, j plane
let mut v = (koe.cb.i*(-ea.sin()) +
koe.cb.j*((1.0-koe.e.powf(2.0)).sqrt()*ea.cos())) * ((koe.cb.mu*koe.a).sqrt()/dist);
// Radius vector in orbital plane
r = koe.rot.transform(&r);
// Velocity in orbital plane
v = koe.rot.transform(&v);
CSV::new(r, v, koe.cb.clone())
}
// Function that numerically solves Kepler's equation
fn newton_raphson(m0: &f64, e: &f64, iterations: &i32) -> f64 {
let mut ea = m0.clone();
for _ in 0..*iterations {
ea = ea - (ea - e*ea.sin() - m0)/(1.0 - e*ea.cos());
}
ea
}
}
|
/// Radius vector.
pub r: Vec3<f64>,
/// Velocity.
pub v: Vec3<f64>,
/// Reference to the central body.
|
random_line_split
|
csv.rs
|
extern crate nalgebra;
use nalgebra::*;
use om::koe::*;
use om::cb::*;
use tick::*;
use push::*;
use std::rc::*;
/// #Cartesian State Vectors
/// This structure represents an orbit using a
/// radius vector and a velocity vector.
/// It holds a reference to the central body.
#[derive(Clone)]
pub struct CSV {
/// Radius vector.
pub r: Vec3<f64>,
/// Velocity.
pub v: Vec3<f64>,
/// Reference to the central body.
pub cb: Rc<CB>,
}
impl Tick for CSV {
fn
|
(&self, dt: f64) -> Self {
CSV {
r: self.r + self.v * dt,
cb: self.cb.clone(),
..*self
}
}
}
impl Push for CSV {
fn push(&self, dv: Vec3<f64>) -> Self {
CSV {
v: self.v + dv,
cb: self.cb.clone(),
..*self
}
}
}
impl CSV {
/// Construct CSV from position and velocity.
pub fn new(r: Vec3<f64>, v: Vec3<f64>, cb: Rc<CB>) -> CSV {
CSV {
r: r,
v: v,
cb: cb,
}
}
/// Construct CSV from KOE.
pub fn from_koe(koe: KOE) -> CSV {
// Mean anomaly
let m0 = koe.m0;
// Number of iterations for newton_raphson
let iterations = 10;
// Eccentric anomaly
let ea = CSV::newton_raphson(&m0, &koe.e, &iterations);
// True anomaly
let ta = 2.0*((1.0+koe.e).sqrt()*(ea/2.0).sin())
.atan2((1.0-koe.e).sqrt()*(ea/2.0).cos());
// Distance to the center of the central body
let dist = koe.a*(1.0-koe.e*ea.cos());
// Radius vector in i, j plane
let mut r = (koe.cb.i*ta.cos() + koe.cb.j*ta.sin()) * dist;
// Velocity in i, j plane
let mut v = (koe.cb.i*(-ea.sin()) +
koe.cb.j*((1.0-koe.e.powf(2.0)).sqrt()*ea.cos())) * ((koe.cb.mu*koe.a).sqrt()/dist);
// Radius vector in orbital plane
r = koe.rot.transform(&r);
// Velocity in orbital plane
v = koe.rot.transform(&v);
CSV::new(r, v, koe.cb.clone())
}
// Function that numerically solves Kepler's equation
fn newton_raphson(m0: &f64, e: &f64, iterations: &i32) -> f64 {
let mut ea = m0.clone();
for _ in 0..*iterations {
ea = ea - (ea - e*ea.sin() - m0)/(1.0 - e*ea.cos());
}
ea
}
}
|
tick
|
identifier_name
|
csv.rs
|
extern crate nalgebra;
use nalgebra::*;
use om::koe::*;
use om::cb::*;
use tick::*;
use push::*;
use std::rc::*;
/// #Cartesian State Vectors
/// This structure represents an orbit using a
/// radius vector and a velocity vector.
/// It holds a reference to the central body.
#[derive(Clone)]
pub struct CSV {
/// Radius vector.
pub r: Vec3<f64>,
/// Velocity.
pub v: Vec3<f64>,
/// Reference to the central body.
pub cb: Rc<CB>,
}
impl Tick for CSV {
fn tick(&self, dt: f64) -> Self {
CSV {
r: self.r + self.v * dt,
cb: self.cb.clone(),
..*self
}
}
}
impl Push for CSV {
fn push(&self, dv: Vec3<f64>) -> Self {
CSV {
v: self.v + dv,
cb: self.cb.clone(),
..*self
}
}
}
impl CSV {
/// Construct CSV from position and velocity.
pub fn new(r: Vec3<f64>, v: Vec3<f64>, cb: Rc<CB>) -> CSV
|
/// Construct CSV from KOE.
pub fn from_koe(koe: KOE) -> CSV {
// Mean anomaly
let m0 = koe.m0;
// Number of iterations for newton_raphson
let iterations = 10;
// Eccentric anomaly
let ea = CSV::newton_raphson(&m0, &koe.e, &iterations);
// True anomaly
let ta = 2.0*((1.0+koe.e).sqrt()*(ea/2.0).sin())
.atan2((1.0-koe.e).sqrt()*(ea/2.0).cos());
// Distance to the center of the central body
let dist = koe.a*(1.0-koe.e*ea.cos());
// Radius vector in i, j plane
let mut r = (koe.cb.i*ta.cos() + koe.cb.j*ta.sin()) * dist;
// Velocity in i, j plane
let mut v = (koe.cb.i*(-ea.sin()) +
koe.cb.j*((1.0-koe.e.powf(2.0)).sqrt()*ea.cos())) * ((koe.cb.mu*koe.a).sqrt()/dist);
// Radius vector in orbital plane
r = koe.rot.transform(&r);
// Velocity in orbital plane
v = koe.rot.transform(&v);
CSV::new(r, v, koe.cb.clone())
}
// Function that numerically solves Kepler's equation
fn newton_raphson(m0: &f64, e: &f64, iterations: &i32) -> f64 {
let mut ea = m0.clone();
for _ in 0..*iterations {
ea = ea - (ea - e*ea.sin() - m0)/(1.0 - e*ea.cos());
}
ea
}
}
|
{
CSV {
r: r,
v: v,
cb: cb,
}
}
|
identifier_body
|
dot.rs
|
//! Generating Graphviz `dot` files from our IR.
use super::context::{BindgenContext, ItemId};
use super::traversal::Trace;
use std::fs::File;
use std::io::{self, Write};
use std::path::Path;
/// A trait for anything that can write attributes as `<table>` rows to a dot
/// file.
pub trait DotAttributes {
/// Write this thing's attributes to the given output. Each attribute must
/// be its own `<tr>...</tr>`.
fn dot_attributes<W>(
&self,
ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write;
}
/// Write a graphviz dot file containing our IR.
pub fn write_dot_file<P>(ctx: &BindgenContext, path: P) -> io::Result<()>
where
P: AsRef<Path>,
|
ctx,
&mut |sub_id: ItemId, edge_kind| {
if err.is_some() {
return;
}
match writeln!(
&mut dot_file,
"{} -> {} [label={:?}, color={}];",
id.as_usize(),
sub_id.as_usize(),
edge_kind,
if is_allowlisted { "black" } else { "gray" }
) {
Ok(_) => {}
Err(e) => err = Some(Err(e)),
}
},
&(),
);
if let Some(err) = err {
return err;
}
if let Some(module) = item.as_module() {
for child in module.children() {
writeln!(
&mut dot_file,
"{} -> {} [style=dotted, color=gray]",
item.id().as_usize(),
child.as_usize()
)?;
}
}
}
writeln!(&mut dot_file, "}}")?;
Ok(())
}
|
{
let file = File::create(path)?;
let mut dot_file = io::BufWriter::new(file);
writeln!(&mut dot_file, "digraph {{")?;
let mut err: Option<io::Result<_>> = None;
for (id, item) in ctx.items() {
let is_allowlisted = ctx.allowlisted_items().contains(&id);
writeln!(
&mut dot_file,
r#"{} [fontname="courier", color={}, label=< <table border="0" align="left">"#,
id.as_usize(),
if is_allowlisted { "black" } else { "gray" }
)?;
item.dot_attributes(ctx, &mut dot_file)?;
writeln!(&mut dot_file, r#"</table> >];"#)?;
item.trace(
|
identifier_body
|
dot.rs
|
//! Generating Graphviz `dot` files from our IR.
use super::context::{BindgenContext, ItemId};
use super::traversal::Trace;
use std::fs::File;
use std::io::{self, Write};
use std::path::Path;
/// A trait for anything that can write attributes as `<table>` rows to a dot
/// file.
pub trait DotAttributes {
/// Write this thing's attributes to the given output. Each attribute must
/// be its own `<tr>...</tr>`.
fn dot_attributes<W>(
&self,
ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write;
}
/// Write a graphviz dot file containing our IR.
pub fn write_dot_file<P>(ctx: &BindgenContext, path: P) -> io::Result<()>
where
P: AsRef<Path>,
{
let file = File::create(path)?;
let mut dot_file = io::BufWriter::new(file);
writeln!(&mut dot_file, "digraph {{")?;
let mut err: Option<io::Result<_>> = None;
for (id, item) in ctx.items() {
let is_allowlisted = ctx.allowlisted_items().contains(&id);
writeln!(
&mut dot_file,
r#"{} [fontname="courier", color={}, label=< <table border="0" align="left">"#,
id.as_usize(),
if is_allowlisted { "black" } else { "gray" }
)?;
item.dot_attributes(ctx, &mut dot_file)?;
writeln!(&mut dot_file, r#"</table> >];"#)?;
item.trace(
ctx,
&mut |sub_id: ItemId, edge_kind| {
if err.is_some() {
return;
}
match writeln!(
&mut dot_file,
"{} -> {} [label={:?}, color={}];",
id.as_usize(),
sub_id.as_usize(),
edge_kind,
if is_allowlisted { "black" } else { "gray" }
) {
Ok(_) =>
|
Err(e) => err = Some(Err(e)),
}
},
&(),
);
if let Some(err) = err {
return err;
}
if let Some(module) = item.as_module() {
for child in module.children() {
writeln!(
&mut dot_file,
"{} -> {} [style=dotted, color=gray]",
item.id().as_usize(),
child.as_usize()
)?;
}
}
}
writeln!(&mut dot_file, "}}")?;
Ok(())
}
|
{}
|
conditional_block
|
dot.rs
|
//! Generating Graphviz `dot` files from our IR.
use super::context::{BindgenContext, ItemId};
use super::traversal::Trace;
use std::fs::File;
use std::io::{self, Write};
use std::path::Path;
/// A trait for anything that can write attributes as `<table>` rows to a dot
/// file.
pub trait DotAttributes {
/// Write this thing's attributes to the given output. Each attribute must
/// be its own `<tr>...</tr>`.
fn dot_attributes<W>(
&self,
ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write;
}
/// Write a graphviz dot file containing our IR.
pub fn write_dot_file<P>(ctx: &BindgenContext, path: P) -> io::Result<()>
where
P: AsRef<Path>,
{
let file = File::create(path)?;
let mut dot_file = io::BufWriter::new(file);
writeln!(&mut dot_file, "digraph {{")?;
let mut err: Option<io::Result<_>> = None;
for (id, item) in ctx.items() {
let is_allowlisted = ctx.allowlisted_items().contains(&id);
writeln!(
&mut dot_file,
r#"{} [fontname="courier", color={}, label=< <table border="0" align="left">"#,
id.as_usize(),
if is_allowlisted { "black" } else { "gray" }
)?;
item.dot_attributes(ctx, &mut dot_file)?;
writeln!(&mut dot_file, r#"</table> >];"#)?;
item.trace(
ctx,
&mut |sub_id: ItemId, edge_kind| {
if err.is_some() {
return;
}
match writeln!(
&mut dot_file,
"{} -> {} [label={:?}, color={}];",
id.as_usize(),
sub_id.as_usize(),
edge_kind,
if is_allowlisted { "black" } else { "gray" }
) {
Ok(_) => {}
Err(e) => err = Some(Err(e)),
}
},
&(),
);
if let Some(err) = err {
return err;
}
if let Some(module) = item.as_module() {
for child in module.children() {
writeln!(
&mut dot_file,
|
}
}
writeln!(&mut dot_file, "}}")?;
Ok(())
}
|
"{} -> {} [style=dotted, color=gray]",
item.id().as_usize(),
child.as_usize()
)?;
}
|
random_line_split
|
dot.rs
|
//! Generating Graphviz `dot` files from our IR.
use super::context::{BindgenContext, ItemId};
use super::traversal::Trace;
use std::fs::File;
use std::io::{self, Write};
use std::path::Path;
/// A trait for anything that can write attributes as `<table>` rows to a dot
/// file.
pub trait DotAttributes {
/// Write this thing's attributes to the given output. Each attribute must
/// be its own `<tr>...</tr>`.
fn dot_attributes<W>(
&self,
ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write;
}
/// Write a graphviz dot file containing our IR.
pub fn
|
<P>(ctx: &BindgenContext, path: P) -> io::Result<()>
where
P: AsRef<Path>,
{
let file = File::create(path)?;
let mut dot_file = io::BufWriter::new(file);
writeln!(&mut dot_file, "digraph {{")?;
let mut err: Option<io::Result<_>> = None;
for (id, item) in ctx.items() {
let is_allowlisted = ctx.allowlisted_items().contains(&id);
writeln!(
&mut dot_file,
r#"{} [fontname="courier", color={}, label=< <table border="0" align="left">"#,
id.as_usize(),
if is_allowlisted { "black" } else { "gray" }
)?;
item.dot_attributes(ctx, &mut dot_file)?;
writeln!(&mut dot_file, r#"</table> >];"#)?;
item.trace(
ctx,
&mut |sub_id: ItemId, edge_kind| {
if err.is_some() {
return;
}
match writeln!(
&mut dot_file,
"{} -> {} [label={:?}, color={}];",
id.as_usize(),
sub_id.as_usize(),
edge_kind,
if is_allowlisted { "black" } else { "gray" }
) {
Ok(_) => {}
Err(e) => err = Some(Err(e)),
}
},
&(),
);
if let Some(err) = err {
return err;
}
if let Some(module) = item.as_module() {
for child in module.children() {
writeln!(
&mut dot_file,
"{} -> {} [style=dotted, color=gray]",
item.id().as_usize(),
child.as_usize()
)?;
}
}
}
writeln!(&mut dot_file, "}}")?;
Ok(())
}
|
write_dot_file
|
identifier_name
|
htmltablerowelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::RGBA;
use dom::bindings::codegen::Bindings::HTMLTableElementBinding::HTMLTableElementMethods;
use dom::bindings::codegen::Bindings::HTMLTableRowElementBinding::{self, HTMLTableRowElementMethods};
use dom::bindings::codegen::Bindings::HTMLTableSectionElementBinding::HTMLTableSectionElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::error::{ErrorResult, Fallible};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{JS, LayoutJS, MutNullableHeap, Root, RootedReference};
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::{Element, RawLayoutElementHelpers};
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmlelement::HTMLElement;
use dom::htmltabledatacellelement::HTMLTableDataCellElement;
use dom::htmltableelement::HTMLTableElement;
use dom::htmltableheadercellelement::HTMLTableHeaderCellElement;
use dom::htmltablesectionelement::HTMLTableSectionElement;
use dom::node::{Node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use string_cache::Atom;
use style::attr::AttrValue;
#[derive(JSTraceable)]
struct CellsFilter;
impl CollectionFilter for CellsFilter {
fn filter(&self, elem: &Element, root: &Node) -> bool {
(elem.is::<HTMLTableHeaderCellElement>() || elem.is::<HTMLTableDataCellElement>()) &&
elem.upcast::<Node>().GetParentNode().r() == Some(root)
}
}
#[dom_struct]
pub struct HTMLTableRowElement {
htmlelement: HTMLElement,
cells: MutNullableHeap<JS<HTMLCollection>>,
}
impl HTMLTableRowElement {
fn new_inherited(local_name: Atom, prefix: Option<DOMString>, document: &Document)
-> HTMLTableRowElement {
HTMLTableRowElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
cells: Default::default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: Atom, prefix: Option<DOMString>, document: &Document)
-> Root<HTMLTableRowElement> {
Node::reflect_node(box HTMLTableRowElement::new_inherited(local_name, prefix, document),
document,
HTMLTableRowElementBinding::Wrap)
}
/// Determine the index for this `HTMLTableRowElement` within the given
/// `HTMLCollection`. Returns `-1` if not found within collection.
fn row_index(&self, collection: Root<HTMLCollection>) -> i32 {
collection.elements_iter()
.position(|elem| (&elem as &Element) == self.upcast())
.map_or(-1, |i| i as i32)
}
}
impl HTMLTableRowElementMethods for HTMLTableRowElement {
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_getter!(BgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_legacy_color_setter!(SetBgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-cells
fn Cells(&self) -> Root<HTMLCollection> {
self.cells.or_init(|| {
let window = window_from_node(self);
let filter = box CellsFilter;
HTMLCollection::create(window.r(), self.upcast(), filter)
})
}
// https://html.spec.whatwg.org/multipage/#dom-tr-insertcell
fn InsertCell(&self, index: i32) -> Fallible<Root<HTMLElement>> {
let node = self.upcast::<Node>();
node.insert_cell_or_row(
index,
|| self.Cells(),
|| HTMLTableDataCellElement::new(atom!("td"), None, node.owner_doc().r()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-deletecell
fn DeleteCell(&self, index: i32) -> ErrorResult {
let node = self.upcast::<Node>();
node.delete_cell_or_row(
index,
|| self.Cells(),
|n| n.is::<HTMLTableDataCellElement>())
}
// https://html.spec.whatwg.org/multipage/#dom-tr-rowindex
fn RowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
if let Some(table) = parent.downcast::<HTMLTableElement>() {
return self.row_index(table.Rows());
}
if!parent.is::<HTMLTableSectionElement>() {
return -1;
}
let grandparent = match parent.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
grandparent.downcast::<HTMLTableElement>()
.map_or(-1, |table| self.row_index(table.Rows()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-sectionrowindex
fn SectionRowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
let collection = if let Some(table) = parent.downcast::<HTMLTableElement>() {
table.Rows()
} else if let Some(table_section) = parent.downcast::<HTMLTableSectionElement>()
|
else {
return -1;
};
self.row_index(collection)
}
}
pub trait HTMLTableRowElementLayoutHelpers {
fn get_background_color(&self) -> Option<RGBA>;
}
#[allow(unsafe_code)]
impl HTMLTableRowElementLayoutHelpers for LayoutJS<HTMLTableRowElement> {
fn get_background_color(&self) -> Option<RGBA> {
unsafe {
(&*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &atom!("bgcolor"))
.and_then(AttrValue::as_color)
.cloned()
}
}
}
impl VirtualMethods for HTMLTableRowElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, local_name: &Atom, value: DOMString) -> AttrValue {
match *local_name {
atom!("bgcolor") => AttrValue::from_legacy_color(value.into()),
_ => self.super_type().unwrap().parse_plain_attribute(local_name, value),
}
}
}
|
{
table_section.Rows()
}
|
conditional_block
|
htmltablerowelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::RGBA;
use dom::bindings::codegen::Bindings::HTMLTableElementBinding::HTMLTableElementMethods;
use dom::bindings::codegen::Bindings::HTMLTableRowElementBinding::{self, HTMLTableRowElementMethods};
use dom::bindings::codegen::Bindings::HTMLTableSectionElementBinding::HTMLTableSectionElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::error::{ErrorResult, Fallible};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{JS, LayoutJS, MutNullableHeap, Root, RootedReference};
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::{Element, RawLayoutElementHelpers};
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmlelement::HTMLElement;
use dom::htmltabledatacellelement::HTMLTableDataCellElement;
use dom::htmltableelement::HTMLTableElement;
use dom::htmltableheadercellelement::HTMLTableHeaderCellElement;
use dom::htmltablesectionelement::HTMLTableSectionElement;
use dom::node::{Node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use string_cache::Atom;
use style::attr::AttrValue;
#[derive(JSTraceable)]
struct CellsFilter;
impl CollectionFilter for CellsFilter {
fn filter(&self, elem: &Element, root: &Node) -> bool {
(elem.is::<HTMLTableHeaderCellElement>() || elem.is::<HTMLTableDataCellElement>()) &&
elem.upcast::<Node>().GetParentNode().r() == Some(root)
}
}
#[dom_struct]
pub struct HTMLTableRowElement {
htmlelement: HTMLElement,
cells: MutNullableHeap<JS<HTMLCollection>>,
}
impl HTMLTableRowElement {
fn new_inherited(local_name: Atom, prefix: Option<DOMString>, document: &Document)
-> HTMLTableRowElement {
HTMLTableRowElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
cells: Default::default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: Atom, prefix: Option<DOMString>, document: &Document)
-> Root<HTMLTableRowElement> {
Node::reflect_node(box HTMLTableRowElement::new_inherited(local_name, prefix, document),
document,
HTMLTableRowElementBinding::Wrap)
}
/// Determine the index for this `HTMLTableRowElement` within the given
/// `HTMLCollection`. Returns `-1` if not found within collection.
fn row_index(&self, collection: Root<HTMLCollection>) -> i32 {
collection.elements_iter()
.position(|elem| (&elem as &Element) == self.upcast())
.map_or(-1, |i| i as i32)
}
}
impl HTMLTableRowElementMethods for HTMLTableRowElement {
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_getter!(BgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_legacy_color_setter!(SetBgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-cells
fn Cells(&self) -> Root<HTMLCollection>
|
// https://html.spec.whatwg.org/multipage/#dom-tr-insertcell
fn InsertCell(&self, index: i32) -> Fallible<Root<HTMLElement>> {
let node = self.upcast::<Node>();
node.insert_cell_or_row(
index,
|| self.Cells(),
|| HTMLTableDataCellElement::new(atom!("td"), None, node.owner_doc().r()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-deletecell
fn DeleteCell(&self, index: i32) -> ErrorResult {
let node = self.upcast::<Node>();
node.delete_cell_or_row(
index,
|| self.Cells(),
|n| n.is::<HTMLTableDataCellElement>())
}
// https://html.spec.whatwg.org/multipage/#dom-tr-rowindex
fn RowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
if let Some(table) = parent.downcast::<HTMLTableElement>() {
return self.row_index(table.Rows());
}
if!parent.is::<HTMLTableSectionElement>() {
return -1;
}
let grandparent = match parent.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
grandparent.downcast::<HTMLTableElement>()
.map_or(-1, |table| self.row_index(table.Rows()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-sectionrowindex
fn SectionRowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
let collection = if let Some(table) = parent.downcast::<HTMLTableElement>() {
table.Rows()
} else if let Some(table_section) = parent.downcast::<HTMLTableSectionElement>() {
table_section.Rows()
} else {
return -1;
};
self.row_index(collection)
}
}
pub trait HTMLTableRowElementLayoutHelpers {
fn get_background_color(&self) -> Option<RGBA>;
}
#[allow(unsafe_code)]
impl HTMLTableRowElementLayoutHelpers for LayoutJS<HTMLTableRowElement> {
fn get_background_color(&self) -> Option<RGBA> {
unsafe {
(&*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &atom!("bgcolor"))
.and_then(AttrValue::as_color)
.cloned()
}
}
}
impl VirtualMethods for HTMLTableRowElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, local_name: &Atom, value: DOMString) -> AttrValue {
match *local_name {
atom!("bgcolor") => AttrValue::from_legacy_color(value.into()),
_ => self.super_type().unwrap().parse_plain_attribute(local_name, value),
}
}
}
|
{
self.cells.or_init(|| {
let window = window_from_node(self);
let filter = box CellsFilter;
HTMLCollection::create(window.r(), self.upcast(), filter)
})
}
|
identifier_body
|
htmltablerowelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::RGBA;
use dom::bindings::codegen::Bindings::HTMLTableElementBinding::HTMLTableElementMethods;
use dom::bindings::codegen::Bindings::HTMLTableRowElementBinding::{self, HTMLTableRowElementMethods};
use dom::bindings::codegen::Bindings::HTMLTableSectionElementBinding::HTMLTableSectionElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::error::{ErrorResult, Fallible};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{JS, LayoutJS, MutNullableHeap, Root, RootedReference};
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::{Element, RawLayoutElementHelpers};
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmlelement::HTMLElement;
use dom::htmltabledatacellelement::HTMLTableDataCellElement;
use dom::htmltableelement::HTMLTableElement;
use dom::htmltableheadercellelement::HTMLTableHeaderCellElement;
use dom::htmltablesectionelement::HTMLTableSectionElement;
use dom::node::{Node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use string_cache::Atom;
use style::attr::AttrValue;
#[derive(JSTraceable)]
struct CellsFilter;
impl CollectionFilter for CellsFilter {
fn filter(&self, elem: &Element, root: &Node) -> bool {
(elem.is::<HTMLTableHeaderCellElement>() || elem.is::<HTMLTableDataCellElement>()) &&
elem.upcast::<Node>().GetParentNode().r() == Some(root)
}
}
#[dom_struct]
pub struct HTMLTableRowElement {
htmlelement: HTMLElement,
cells: MutNullableHeap<JS<HTMLCollection>>,
}
impl HTMLTableRowElement {
fn new_inherited(local_name: Atom, prefix: Option<DOMString>, document: &Document)
-> HTMLTableRowElement {
HTMLTableRowElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
cells: Default::default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: Atom, prefix: Option<DOMString>, document: &Document)
-> Root<HTMLTableRowElement> {
Node::reflect_node(box HTMLTableRowElement::new_inherited(local_name, prefix, document),
document,
HTMLTableRowElementBinding::Wrap)
}
/// Determine the index for this `HTMLTableRowElement` within the given
/// `HTMLCollection`. Returns `-1` if not found within collection.
fn row_index(&self, collection: Root<HTMLCollection>) -> i32 {
collection.elements_iter()
.position(|elem| (&elem as &Element) == self.upcast())
.map_or(-1, |i| i as i32)
}
}
impl HTMLTableRowElementMethods for HTMLTableRowElement {
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_getter!(BgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_legacy_color_setter!(SetBgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-cells
fn Cells(&self) -> Root<HTMLCollection> {
self.cells.or_init(|| {
let window = window_from_node(self);
let filter = box CellsFilter;
HTMLCollection::create(window.r(), self.upcast(), filter)
})
}
// https://html.spec.whatwg.org/multipage/#dom-tr-insertcell
fn InsertCell(&self, index: i32) -> Fallible<Root<HTMLElement>> {
let node = self.upcast::<Node>();
node.insert_cell_or_row(
index,
|| self.Cells(),
|| HTMLTableDataCellElement::new(atom!("td"), None, node.owner_doc().r()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-deletecell
fn DeleteCell(&self, index: i32) -> ErrorResult {
let node = self.upcast::<Node>();
node.delete_cell_or_row(
index,
|| self.Cells(),
|n| n.is::<HTMLTableDataCellElement>())
}
// https://html.spec.whatwg.org/multipage/#dom-tr-rowindex
fn RowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
if let Some(table) = parent.downcast::<HTMLTableElement>() {
return self.row_index(table.Rows());
}
if!parent.is::<HTMLTableSectionElement>() {
return -1;
}
let grandparent = match parent.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
grandparent.downcast::<HTMLTableElement>()
.map_or(-1, |table| self.row_index(table.Rows()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-sectionrowindex
fn SectionRowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
|
table.Rows()
} else if let Some(table_section) = parent.downcast::<HTMLTableSectionElement>() {
table_section.Rows()
} else {
return -1;
};
self.row_index(collection)
}
}
pub trait HTMLTableRowElementLayoutHelpers {
fn get_background_color(&self) -> Option<RGBA>;
}
#[allow(unsafe_code)]
impl HTMLTableRowElementLayoutHelpers for LayoutJS<HTMLTableRowElement> {
fn get_background_color(&self) -> Option<RGBA> {
unsafe {
(&*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &atom!("bgcolor"))
.and_then(AttrValue::as_color)
.cloned()
}
}
}
impl VirtualMethods for HTMLTableRowElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, local_name: &Atom, value: DOMString) -> AttrValue {
match *local_name {
atom!("bgcolor") => AttrValue::from_legacy_color(value.into()),
_ => self.super_type().unwrap().parse_plain_attribute(local_name, value),
}
}
}
|
};
let collection = if let Some(table) = parent.downcast::<HTMLTableElement>() {
|
random_line_split
|
htmltablerowelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::RGBA;
use dom::bindings::codegen::Bindings::HTMLTableElementBinding::HTMLTableElementMethods;
use dom::bindings::codegen::Bindings::HTMLTableRowElementBinding::{self, HTMLTableRowElementMethods};
use dom::bindings::codegen::Bindings::HTMLTableSectionElementBinding::HTMLTableSectionElementMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use dom::bindings::error::{ErrorResult, Fallible};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{JS, LayoutJS, MutNullableHeap, Root, RootedReference};
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::element::{Element, RawLayoutElementHelpers};
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmlelement::HTMLElement;
use dom::htmltabledatacellelement::HTMLTableDataCellElement;
use dom::htmltableelement::HTMLTableElement;
use dom::htmltableheadercellelement::HTMLTableHeaderCellElement;
use dom::htmltablesectionelement::HTMLTableSectionElement;
use dom::node::{Node, window_from_node};
use dom::virtualmethods::VirtualMethods;
use string_cache::Atom;
use style::attr::AttrValue;
#[derive(JSTraceable)]
struct CellsFilter;
impl CollectionFilter for CellsFilter {
fn filter(&self, elem: &Element, root: &Node) -> bool {
(elem.is::<HTMLTableHeaderCellElement>() || elem.is::<HTMLTableDataCellElement>()) &&
elem.upcast::<Node>().GetParentNode().r() == Some(root)
}
}
#[dom_struct]
pub struct HTMLTableRowElement {
htmlelement: HTMLElement,
cells: MutNullableHeap<JS<HTMLCollection>>,
}
impl HTMLTableRowElement {
fn new_inherited(local_name: Atom, prefix: Option<DOMString>, document: &Document)
-> HTMLTableRowElement {
HTMLTableRowElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
cells: Default::default(),
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: Atom, prefix: Option<DOMString>, document: &Document)
-> Root<HTMLTableRowElement> {
Node::reflect_node(box HTMLTableRowElement::new_inherited(local_name, prefix, document),
document,
HTMLTableRowElementBinding::Wrap)
}
/// Determine the index for this `HTMLTableRowElement` within the given
/// `HTMLCollection`. Returns `-1` if not found within collection.
fn row_index(&self, collection: Root<HTMLCollection>) -> i32 {
collection.elements_iter()
.position(|elem| (&elem as &Element) == self.upcast())
.map_or(-1, |i| i as i32)
}
}
impl HTMLTableRowElementMethods for HTMLTableRowElement {
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_getter!(BgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-bgcolor
make_legacy_color_setter!(SetBgColor, "bgcolor");
// https://html.spec.whatwg.org/multipage/#dom-tr-cells
fn
|
(&self) -> Root<HTMLCollection> {
self.cells.or_init(|| {
let window = window_from_node(self);
let filter = box CellsFilter;
HTMLCollection::create(window.r(), self.upcast(), filter)
})
}
// https://html.spec.whatwg.org/multipage/#dom-tr-insertcell
fn InsertCell(&self, index: i32) -> Fallible<Root<HTMLElement>> {
let node = self.upcast::<Node>();
node.insert_cell_or_row(
index,
|| self.Cells(),
|| HTMLTableDataCellElement::new(atom!("td"), None, node.owner_doc().r()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-deletecell
fn DeleteCell(&self, index: i32) -> ErrorResult {
let node = self.upcast::<Node>();
node.delete_cell_or_row(
index,
|| self.Cells(),
|n| n.is::<HTMLTableDataCellElement>())
}
// https://html.spec.whatwg.org/multipage/#dom-tr-rowindex
fn RowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
if let Some(table) = parent.downcast::<HTMLTableElement>() {
return self.row_index(table.Rows());
}
if!parent.is::<HTMLTableSectionElement>() {
return -1;
}
let grandparent = match parent.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
grandparent.downcast::<HTMLTableElement>()
.map_or(-1, |table| self.row_index(table.Rows()))
}
// https://html.spec.whatwg.org/multipage/#dom-tr-sectionrowindex
fn SectionRowIndex(&self) -> i32 {
let parent = match self.upcast::<Node>().GetParentNode() {
Some(parent) => parent,
None => return -1,
};
let collection = if let Some(table) = parent.downcast::<HTMLTableElement>() {
table.Rows()
} else if let Some(table_section) = parent.downcast::<HTMLTableSectionElement>() {
table_section.Rows()
} else {
return -1;
};
self.row_index(collection)
}
}
pub trait HTMLTableRowElementLayoutHelpers {
fn get_background_color(&self) -> Option<RGBA>;
}
#[allow(unsafe_code)]
impl HTMLTableRowElementLayoutHelpers for LayoutJS<HTMLTableRowElement> {
fn get_background_color(&self) -> Option<RGBA> {
unsafe {
(&*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &atom!("bgcolor"))
.and_then(AttrValue::as_color)
.cloned()
}
}
}
impl VirtualMethods for HTMLTableRowElement {
fn super_type(&self) -> Option<&VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &VirtualMethods)
}
fn parse_plain_attribute(&self, local_name: &Atom, value: DOMString) -> AttrValue {
match *local_name {
atom!("bgcolor") => AttrValue::from_legacy_color(value.into()),
_ => self.super_type().unwrap().parse_plain_attribute(local_name, value),
}
}
}
|
Cells
|
identifier_name
|
try_fold.rs
|
use core::fmt;
use core::pin::Pin;
use futures_core::future::{FusedFuture, Future, TryFuture};
use futures_core::ready;
use futures_core::stream::Stream;
use futures_core::task::{Context, Poll};
use pin_project_lite::pin_project;
pin_project! {
/// Future for the [`try_fold`](super::TryStreamExt::try_fold) method.
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct TryFold<St, Fut, T, F> {
#[pin]
stream: St,
f: F,
accum: Option<T>,
#[pin]
future: Option<Fut>,
}
}
impl<St, Fut, T, F> fmt::Debug for TryFold<St, Fut, T, F>
where
St: fmt::Debug,
Fut: fmt::Debug,
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TryFold")
.field("stream", &self.stream)
.field("accum", &self.accum)
.field("future", &self.future)
.finish()
}
}
impl<St, Fut, T, F> TryFold<St, Fut, T, F>
where
St: Stream,
F: FnMut(T, St::Item) -> Fut,
Fut: TryFuture<Ok = T>,
{
pub(super) fn new(stream: St, f: F, t: T) -> Self
|
}
impl<St, Fut, T, F> FusedFuture for TryFold<St, Fut, T, F>
where
St: Stream,
F: FnMut(T, St::Item) -> Fut,
Fut: TryFuture<Ok = T>,
{
fn is_terminated(&self) -> bool {
self.accum.is_none() && self.future.is_none()
}
}
impl<St, Fut, T, F> Future for TryFold<St, Fut, T, F>
where
St: Stream,
F: FnMut(T, St::Item) -> Fut,
Fut: TryFuture<Ok = T>,
{
type Output = Result<T, Fut::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
Poll::Ready(loop {
if let Some(fut) = this.future.as_mut().as_pin_mut() {
// we're currently processing a future to produce a new accum value
let res = ready!(fut.try_poll(cx));
this.future.set(None);
match res {
Ok(a) => *this.accum = Some(a),
Err(e) => break Err(e),
}
} else if this.accum.is_some() {
// we're waiting on a new item from the stream
let res = ready!(this.stream.as_mut().poll_next(cx));
let a = this.accum.take().unwrap();
match res {
Some(item) => this.future.set(Some((this.f)(a, item))),
None => break Ok(a),
}
} else {
panic!("Fold polled after completion")
}
})
}
}
|
{
Self { stream, f, accum: Some(t), future: None }
}
|
identifier_body
|
try_fold.rs
|
use core::fmt;
use core::pin::Pin;
use futures_core::future::{FusedFuture, Future, TryFuture};
use futures_core::ready;
use futures_core::stream::Stream;
use futures_core::task::{Context, Poll};
use pin_project_lite::pin_project;
pin_project! {
/// Future for the [`try_fold`](super::TryStreamExt::try_fold) method.
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct TryFold<St, Fut, T, F> {
#[pin]
stream: St,
f: F,
accum: Option<T>,
#[pin]
future: Option<Fut>,
}
}
impl<St, Fut, T, F> fmt::Debug for TryFold<St, Fut, T, F>
where
St: fmt::Debug,
Fut: fmt::Debug,
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TryFold")
.field("stream", &self.stream)
.field("accum", &self.accum)
.field("future", &self.future)
.finish()
}
}
impl<St, Fut, T, F> TryFold<St, Fut, T, F>
where
St: Stream,
F: FnMut(T, St::Item) -> Fut,
Fut: TryFuture<Ok = T>,
{
pub(super) fn new(stream: St, f: F, t: T) -> Self {
Self { stream, f, accum: Some(t), future: None }
}
}
impl<St, Fut, T, F> FusedFuture for TryFold<St, Fut, T, F>
where
St: Stream,
F: FnMut(T, St::Item) -> Fut,
Fut: TryFuture<Ok = T>,
{
fn is_terminated(&self) -> bool {
self.accum.is_none() && self.future.is_none()
}
}
impl<St, Fut, T, F> Future for TryFold<St, Fut, T, F>
where
St: Stream,
F: FnMut(T, St::Item) -> Fut,
Fut: TryFuture<Ok = T>,
{
type Output = Result<T, Fut::Error>;
fn
|
(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
Poll::Ready(loop {
if let Some(fut) = this.future.as_mut().as_pin_mut() {
// we're currently processing a future to produce a new accum value
let res = ready!(fut.try_poll(cx));
this.future.set(None);
match res {
Ok(a) => *this.accum = Some(a),
Err(e) => break Err(e),
}
} else if this.accum.is_some() {
// we're waiting on a new item from the stream
let res = ready!(this.stream.as_mut().poll_next(cx));
let a = this.accum.take().unwrap();
match res {
Some(item) => this.future.set(Some((this.f)(a, item))),
None => break Ok(a),
}
} else {
panic!("Fold polled after completion")
}
})
}
}
|
poll
|
identifier_name
|
try_fold.rs
|
use core::fmt;
use core::pin::Pin;
use futures_core::future::{FusedFuture, Future, TryFuture};
use futures_core::ready;
use futures_core::stream::Stream;
use futures_core::task::{Context, Poll};
use pin_project_lite::pin_project;
pin_project! {
/// Future for the [`try_fold`](super::TryStreamExt::try_fold) method.
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct TryFold<St, Fut, T, F> {
#[pin]
stream: St,
|
f: F,
accum: Option<T>,
#[pin]
future: Option<Fut>,
}
}
impl<St, Fut, T, F> fmt::Debug for TryFold<St, Fut, T, F>
where
St: fmt::Debug,
Fut: fmt::Debug,
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TryFold")
.field("stream", &self.stream)
.field("accum", &self.accum)
.field("future", &self.future)
.finish()
}
}
impl<St, Fut, T, F> TryFold<St, Fut, T, F>
where
St: Stream,
F: FnMut(T, St::Item) -> Fut,
Fut: TryFuture<Ok = T>,
{
pub(super) fn new(stream: St, f: F, t: T) -> Self {
Self { stream, f, accum: Some(t), future: None }
}
}
impl<St, Fut, T, F> FusedFuture for TryFold<St, Fut, T, F>
where
St: Stream,
F: FnMut(T, St::Item) -> Fut,
Fut: TryFuture<Ok = T>,
{
fn is_terminated(&self) -> bool {
self.accum.is_none() && self.future.is_none()
}
}
impl<St, Fut, T, F> Future for TryFold<St, Fut, T, F>
where
St: Stream,
F: FnMut(T, St::Item) -> Fut,
Fut: TryFuture<Ok = T>,
{
type Output = Result<T, Fut::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
Poll::Ready(loop {
if let Some(fut) = this.future.as_mut().as_pin_mut() {
// we're currently processing a future to produce a new accum value
let res = ready!(fut.try_poll(cx));
this.future.set(None);
match res {
Ok(a) => *this.accum = Some(a),
Err(e) => break Err(e),
}
} else if this.accum.is_some() {
// we're waiting on a new item from the stream
let res = ready!(this.stream.as_mut().poll_next(cx));
let a = this.accum.take().unwrap();
match res {
Some(item) => this.future.set(Some((this.f)(a, item))),
None => break Ok(a),
}
} else {
panic!("Fold polled after completion")
}
})
}
}
|
random_line_split
|
|
interpret.rs
|
use parser::{Token, TokenType};
use std::ops::Index;
use std::iter;
use std::iter::FromIterator;
/*
This mod contains the interpreter part of Assembunny+. The abbreviated terminology for this mod is "ASMBI", for "ASseMBunny+ Interpreter".
One of the functions here is meant to be called directly from main.rs so that file can focus on command line handling.
*/
pub struct AsmbiState {
/// Register map (with its own type)
pub regs: RegisterMap,
/// Instruction Pointer, declared as u32 for ability to run more than 4 billion lines of ASMB.
/// (I don't anticipate any combined ASMB program to have more than 4 billion lines!)
pub ip: u32,
}
/// This struct/impl wraps the Register Vec in order to reduce boilerplate and redundancy on certain functions; It also makes code more readable.
pub struct RegisterMap {
pub vec: Vec<i32>,
}
impl RegisterMap {
pub fn index_set(&mut self, regindex: usize, val: i32) -> bool {
if self.vec.len() <= regindex {
return false;
}
self.vec[regindex] = val;
true
}
pub fn set(&mut self, regtok: &Token, newval: i32) -> bool {
self.index_set(regtok.val as usize, newval)
}
pub fn get(&self, index: usize) -> Option<&i32> {
if self.vec.len() <= index {
None
} else {
Some(self.vec.index(index))
}
}
#[allow(unused_assignments)]
pub fn index_modify<F>(&mut self, index: usize, modifier: F) -> bool
where F: Fn(i32) -> i32 {
let mut optval: i32 = 0;
{
match self.get(index) {
Some(val) => optval = *val,
None => return false
}
}
self.index_set(index, modifier(optval))
}
pub fn modify<F>(&mut self, regtok: &Token, modifier: F) -> bool
where F: Fn(i32) -> i32 {
self.index_modify(regtok.val as usize, modifier)
}
pub fn parse_token(&self, tok: &Token) -> i32 {
match tok.type_ {
TokenType::LITERAL => tok.val,
TokenType::REGISTER => *self.get(tok.val as usize).unwrap(),
_ => panic!("parse_token does not parse keyword tokens.")
}
}
pub fn new(capacity: usize) -> Self {
RegisterMap {
vec: Vec::from_iter(iter::repeat(0).take(capacity)),
}
}
}
/// Syntactic sugar for all return values in exec.
type Response = Result<(), String>;
/// Module consisting of executors for each keyword.
/// Each function has two arguments: mutable reference to AsmbiState and Vec<&str> tokens from the parser.
/// The tokens are expected to be passed by parser::line_valid. If an error that was supposed to be caught in that function is encountered here, the program will panic!, reminding the developer that parser::line_valid is not working properly.
mod exec {
use std::char;
use interpret::{AsmbiState, Response};
use parser::Token;
macro_rules! try_do {
( $fun:expr, $err:expr ) => (if $fun {
Ok(())
} else {
Err($err)
})
}
macro_rules! try_set {
( $fun:expr ) => (try_do!($fun, "Failed to set register value".to_owned()))
}
pub fn def(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: def <new register index> <new value>
// NOTE: All the separate `let` statements were adopted in order to prevent compiler errors regarding simultaneous mutable/immutable borrowing of state
let newval = state.regs.parse_token(&toks[2]);
try_set!(state.regs.set(&toks[1], newval))
}
pub fn inc(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: inc <register index>
try_set!(state.regs.modify(&toks[1], |v| v + 1))
}
pub fn inct(state: &mut AsmbiState, toks: &Vec<Token>) -> Response
|
pub fn dec(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: dec <register name>
try_set!(state.regs.modify(&toks[1], |v| v - 1))
}
pub fn dect(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: dect <register name> <value to be eval'd>
let subtractor = state.regs.parse_token(&toks[2]);
try_set!(state.regs.modify(&toks[1], |v| v - subtractor))
}
pub fn mul(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: mul <register name> <eval-ue>
let multiplier = state.regs.parse_token(&toks[2]);
try_set!(state.regs.modify(&toks[1], |v| v * multiplier))
}
pub fn div(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: div <register name> <eval-ue>
// Note: floor the result
let quotient = state.regs.parse_token(&toks[2]);
try_set!(state.regs.modify(&toks[1], |v| v / quotient))
}
pub fn cpy(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: cpy <eval-ue> <register name>
let newval = state.regs.parse_token(&toks[1]);
try_set!(state.regs.set(&toks[2], newval))
}
pub fn jnz(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: cpy <eval-ue> <literal>
// Since IP is incremented after each line, go to relative line **minus 1** so the program works properly.
if state.regs.parse_token(&toks[1])!= 0 {
// TODO: add under/overflow checks
// Ugly hack for u32 adding i32; hope this will be supported in future versions of Rust.
let diff = state.regs.parse_token(&toks[2]) - 1;
if diff < 0 {
state.ip -= (-diff) as u32
} else {
state.ip += diff as u32
}
}
Ok(())
}
pub fn out(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: out <eval-ue>
print!("{} ", state.regs.parse_token(&toks[1]));
Ok(())
}
pub fn outn(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: outn <eval-ue>
println!("{}", state.regs.parse_token(&toks[1]));
Ok(())
}
pub fn outc(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: outc <eval-ue>
let val = state.regs.parse_token(&toks[1]);
if val < 0 {
return Err(format!("Char code ({}) should not be less than zero", val));
}
match char::from_u32(val as u32) {
Some(v) => print!("{}", v),
_ => return Err(format!("Char code ({}) is invalid", val))
}
Ok(())
}
pub const INDEX: [fn(&mut AsmbiState, &Vec<Token>) -> Response; 12] = [def, inc, inct, dec, dect, mul, div, cpy, jnz, out, outn, outc];
}
pub fn execute(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
assert_eq!(toks[0].type_, TokenType::KEYWORD);
exec::INDEX[toks[0].val as usize](state, toks)
}
pub fn new_state(capacity: usize) -> AsmbiState {
AsmbiState {
regs: RegisterMap::new(capacity),
ip: 0
}
}
|
{
// Syntax: inct <register index> <value to add>
let adder = state.regs.parse_token(&toks[2]);
try_set!(state.regs.modify(&toks[1], |v| v + adder))
}
|
identifier_body
|
interpret.rs
|
use parser::{Token, TokenType};
use std::ops::Index;
use std::iter;
use std::iter::FromIterator;
/*
This mod contains the interpreter part of Assembunny+. The abbreviated terminology for this mod is "ASMBI", for "ASseMBunny+ Interpreter".
One of the functions here is meant to be called directly from main.rs so that file can focus on command line handling.
*/
pub struct AsmbiState {
/// Register map (with its own type)
pub regs: RegisterMap,
/// Instruction Pointer, declared as u32 for ability to run more than 4 billion lines of ASMB.
/// (I don't anticipate any combined ASMB program to have more than 4 billion lines!)
pub ip: u32,
}
/// This struct/impl wraps the Register Vec in order to reduce boilerplate and redundancy on certain functions; It also makes code more readable.
pub struct RegisterMap {
pub vec: Vec<i32>,
}
impl RegisterMap {
pub fn index_set(&mut self, regindex: usize, val: i32) -> bool {
if self.vec.len() <= regindex {
return false;
}
self.vec[regindex] = val;
true
}
pub fn set(&mut self, regtok: &Token, newval: i32) -> bool {
self.index_set(regtok.val as usize, newval)
}
pub fn get(&self, index: usize) -> Option<&i32> {
if self.vec.len() <= index {
None
} else {
Some(self.vec.index(index))
}
}
#[allow(unused_assignments)]
pub fn index_modify<F>(&mut self, index: usize, modifier: F) -> bool
where F: Fn(i32) -> i32 {
let mut optval: i32 = 0;
{
match self.get(index) {
Some(val) => optval = *val,
None => return false
}
}
self.index_set(index, modifier(optval))
}
pub fn modify<F>(&mut self, regtok: &Token, modifier: F) -> bool
where F: Fn(i32) -> i32 {
self.index_modify(regtok.val as usize, modifier)
}
pub fn parse_token(&self, tok: &Token) -> i32 {
match tok.type_ {
TokenType::LITERAL => tok.val,
TokenType::REGISTER => *self.get(tok.val as usize).unwrap(),
_ => panic!("parse_token does not parse keyword tokens.")
}
}
pub fn new(capacity: usize) -> Self {
RegisterMap {
vec: Vec::from_iter(iter::repeat(0).take(capacity)),
}
}
}
/// Syntactic sugar for all return values in exec.
type Response = Result<(), String>;
/// Module consisting of executors for each keyword.
/// Each function has two arguments: mutable reference to AsmbiState and Vec<&str> tokens from the parser.
/// The tokens are expected to be passed by parser::line_valid. If an error that was supposed to be caught in that function is encountered here, the program will panic!, reminding the developer that parser::line_valid is not working properly.
mod exec {
use std::char;
use interpret::{AsmbiState, Response};
use parser::Token;
macro_rules! try_do {
( $fun:expr, $err:expr ) => (if $fun {
Ok(())
} else {
Err($err)
})
}
macro_rules! try_set {
( $fun:expr ) => (try_do!($fun, "Failed to set register value".to_owned()))
}
pub fn def(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: def <new register index> <new value>
// NOTE: All the separate `let` statements were adopted in order to prevent compiler errors regarding simultaneous mutable/immutable borrowing of state
let newval = state.regs.parse_token(&toks[2]);
try_set!(state.regs.set(&toks[1], newval))
}
pub fn inc(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: inc <register index>
try_set!(state.regs.modify(&toks[1], |v| v + 1))
}
pub fn inct(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: inct <register index> <value to add>
let adder = state.regs.parse_token(&toks[2]);
try_set!(state.regs.modify(&toks[1], |v| v + adder))
}
pub fn dec(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: dec <register name>
try_set!(state.regs.modify(&toks[1], |v| v - 1))
}
pub fn dect(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: dect <register name> <value to be eval'd>
let subtractor = state.regs.parse_token(&toks[2]);
try_set!(state.regs.modify(&toks[1], |v| v - subtractor))
}
pub fn mul(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: mul <register name> <eval-ue>
let multiplier = state.regs.parse_token(&toks[2]);
try_set!(state.regs.modify(&toks[1], |v| v * multiplier))
}
pub fn div(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: div <register name> <eval-ue>
// Note: floor the result
let quotient = state.regs.parse_token(&toks[2]);
try_set!(state.regs.modify(&toks[1], |v| v / quotient))
}
pub fn cpy(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: cpy <eval-ue> <register name>
let newval = state.regs.parse_token(&toks[1]);
try_set!(state.regs.set(&toks[2], newval))
}
pub fn jnz(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: cpy <eval-ue> <literal>
// Since IP is incremented after each line, go to relative line **minus 1** so the program works properly.
if state.regs.parse_token(&toks[1])!= 0 {
// TODO: add under/overflow checks
// Ugly hack for u32 adding i32; hope this will be supported in future versions of Rust.
let diff = state.regs.parse_token(&toks[2]) - 1;
if diff < 0 {
state.ip -= (-diff) as u32
} else {
state.ip += diff as u32
}
}
Ok(())
}
pub fn out(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: out <eval-ue>
print!("{} ", state.regs.parse_token(&toks[1]));
Ok(())
}
pub fn outn(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: outn <eval-ue>
println!("{}", state.regs.parse_token(&toks[1]));
Ok(())
}
pub fn outc(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: outc <eval-ue>
let val = state.regs.parse_token(&toks[1]);
if val < 0 {
return Err(format!("Char code ({}) should not be less than zero", val));
}
match char::from_u32(val as u32) {
Some(v) => print!("{}", v),
_ => return Err(format!("Char code ({}) is invalid", val))
}
Ok(())
}
pub const INDEX: [fn(&mut AsmbiState, &Vec<Token>) -> Response; 12] = [def, inc, inct, dec, dect, mul, div, cpy, jnz, out, outn, outc];
}
pub fn execute(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
assert_eq!(toks[0].type_, TokenType::KEYWORD);
exec::INDEX[toks[0].val as usize](state, toks)
}
pub fn
|
(capacity: usize) -> AsmbiState {
AsmbiState {
regs: RegisterMap::new(capacity),
ip: 0
}
}
|
new_state
|
identifier_name
|
interpret.rs
|
use parser::{Token, TokenType};
use std::ops::Index;
use std::iter;
use std::iter::FromIterator;
/*
This mod contains the interpreter part of Assembunny+. The abbreviated terminology for this mod is "ASMBI", for "ASseMBunny+ Interpreter".
One of the functions here is meant to be called directly from main.rs so that file can focus on command line handling.
*/
pub struct AsmbiState {
/// Register map (with its own type)
pub regs: RegisterMap,
/// Instruction Pointer, declared as u32 for ability to run more than 4 billion lines of ASMB.
/// (I don't anticipate any combined ASMB program to have more than 4 billion lines!)
pub ip: u32,
}
/// This struct/impl wraps the Register Vec in order to reduce boilerplate and redundancy on certain functions; It also makes code more readable.
pub struct RegisterMap {
pub vec: Vec<i32>,
}
impl RegisterMap {
pub fn index_set(&mut self, regindex: usize, val: i32) -> bool {
if self.vec.len() <= regindex {
return false;
}
self.vec[regindex] = val;
true
}
pub fn set(&mut self, regtok: &Token, newval: i32) -> bool {
self.index_set(regtok.val as usize, newval)
}
pub fn get(&self, index: usize) -> Option<&i32> {
if self.vec.len() <= index {
None
} else {
Some(self.vec.index(index))
}
}
#[allow(unused_assignments)]
pub fn index_modify<F>(&mut self, index: usize, modifier: F) -> bool
where F: Fn(i32) -> i32 {
let mut optval: i32 = 0;
{
match self.get(index) {
Some(val) => optval = *val,
None => return false
}
}
self.index_set(index, modifier(optval))
}
pub fn modify<F>(&mut self, regtok: &Token, modifier: F) -> bool
where F: Fn(i32) -> i32 {
self.index_modify(regtok.val as usize, modifier)
}
pub fn parse_token(&self, tok: &Token) -> i32 {
match tok.type_ {
TokenType::LITERAL => tok.val,
TokenType::REGISTER => *self.get(tok.val as usize).unwrap(),
_ => panic!("parse_token does not parse keyword tokens.")
}
}
pub fn new(capacity: usize) -> Self {
RegisterMap {
vec: Vec::from_iter(iter::repeat(0).take(capacity)),
}
}
}
/// Syntactic sugar for all return values in exec.
type Response = Result<(), String>;
/// Module consisting of executors for each keyword.
/// Each function has two arguments: mutable reference to AsmbiState and Vec<&str> tokens from the parser.
/// The tokens are expected to be passed by parser::line_valid. If an error that was supposed to be caught in that function is encountered here, the program will panic!, reminding the developer that parser::line_valid is not working properly.
mod exec {
use std::char;
use interpret::{AsmbiState, Response};
use parser::Token;
macro_rules! try_do {
( $fun:expr, $err:expr ) => (if $fun {
Ok(())
} else {
Err($err)
})
}
macro_rules! try_set {
( $fun:expr ) => (try_do!($fun, "Failed to set register value".to_owned()))
}
pub fn def(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: def <new register index> <new value>
// NOTE: All the separate `let` statements were adopted in order to prevent compiler errors regarding simultaneous mutable/immutable borrowing of state
let newval = state.regs.parse_token(&toks[2]);
try_set!(state.regs.set(&toks[1], newval))
}
pub fn inc(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: inc <register index>
try_set!(state.regs.modify(&toks[1], |v| v + 1))
}
pub fn inct(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: inct <register index> <value to add>
let adder = state.regs.parse_token(&toks[2]);
try_set!(state.regs.modify(&toks[1], |v| v + adder))
}
pub fn dec(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: dec <register name>
try_set!(state.regs.modify(&toks[1], |v| v - 1))
}
pub fn dect(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: dect <register name> <value to be eval'd>
let subtractor = state.regs.parse_token(&toks[2]);
try_set!(state.regs.modify(&toks[1], |v| v - subtractor))
|
}
pub fn mul(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: mul <register name> <eval-ue>
let multiplier = state.regs.parse_token(&toks[2]);
try_set!(state.regs.modify(&toks[1], |v| v * multiplier))
}
pub fn div(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: div <register name> <eval-ue>
// Note: floor the result
let quotient = state.regs.parse_token(&toks[2]);
try_set!(state.regs.modify(&toks[1], |v| v / quotient))
}
pub fn cpy(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: cpy <eval-ue> <register name>
let newval = state.regs.parse_token(&toks[1]);
try_set!(state.regs.set(&toks[2], newval))
}
pub fn jnz(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: cpy <eval-ue> <literal>
// Since IP is incremented after each line, go to relative line **minus 1** so the program works properly.
if state.regs.parse_token(&toks[1])!= 0 {
// TODO: add under/overflow checks
// Ugly hack for u32 adding i32; hope this will be supported in future versions of Rust.
let diff = state.regs.parse_token(&toks[2]) - 1;
if diff < 0 {
state.ip -= (-diff) as u32
} else {
state.ip += diff as u32
}
}
Ok(())
}
pub fn out(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: out <eval-ue>
print!("{} ", state.regs.parse_token(&toks[1]));
Ok(())
}
pub fn outn(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: outn <eval-ue>
println!("{}", state.regs.parse_token(&toks[1]));
Ok(())
}
pub fn outc(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
// Syntax: outc <eval-ue>
let val = state.regs.parse_token(&toks[1]);
if val < 0 {
return Err(format!("Char code ({}) should not be less than zero", val));
}
match char::from_u32(val as u32) {
Some(v) => print!("{}", v),
_ => return Err(format!("Char code ({}) is invalid", val))
}
Ok(())
}
pub const INDEX: [fn(&mut AsmbiState, &Vec<Token>) -> Response; 12] = [def, inc, inct, dec, dect, mul, div, cpy, jnz, out, outn, outc];
}
pub fn execute(state: &mut AsmbiState, toks: &Vec<Token>) -> Response {
assert_eq!(toks[0].type_, TokenType::KEYWORD);
exec::INDEX[toks[0].val as usize](state, toks)
}
pub fn new_state(capacity: usize) -> AsmbiState {
AsmbiState {
regs: RegisterMap::new(capacity),
ip: 0
}
}
|
random_line_split
|
|
transaction_request.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! `TransactionRequest` type
use v1::types::{Bytes, H160, U256, BlockNumber};
use v1::helpers;
use util::log::Colour;
use std::fmt;
/// Transaction request coming from RPC
#[derive(Debug, Clone, Default, Eq, PartialEq, Hash, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct TransactionRequest {
/// Sender
pub from: H160,
/// Recipient
pub to: Option<H160>,
/// Gas Price
#[serde(rename="gasPrice")]
pub gas_price: Option<U256>,
/// Gas
pub gas: Option<U256>,
/// Value of transaction in wei
pub value: Option<U256>,
/// Additional data sent with transaction
pub data: Option<Bytes>,
/// Transaction's nonce
pub nonce: Option<U256>,
/// Delay until this block if specified.
#[serde(rename="minBlock")]
pub min_block: Option<BlockNumber>,
}
pub fn format_ether(i: U256) -> String {
let mut string = format!("{}", i);
let idx = string.len() as isize - 18;
if idx <= 0 {
let mut prefix = String::from("0.");
for _ in 0..idx.abs() {
prefix.push('0');
}
string = prefix + &string;
} else {
string.insert(idx as usize, '.');
}
String::from(string.trim_right_matches('0')
.trim_right_matches('.'))
}
impl fmt::Display for TransactionRequest {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let eth = self.value.unwrap_or(U256::from(0));
match self.to {
Some(ref to) => write!(
f,
"{} ETH from {} to 0x{:?}",
Colour::White.bold().paint(format_ether(eth)),
Colour::White.bold().paint(format!("0x{:?}", self.from)),
to
),
None => write!(
f,
"{} ETH from {} for contract creation",
Colour::White.bold().paint(format_ether(eth)),
Colour::White.bold().paint(format!("0x{:?}", self.from)),
),
}
}
}
impl From<helpers::TransactionRequest> for TransactionRequest {
fn from(r: helpers::TransactionRequest) -> Self {
TransactionRequest {
from: r.from.into(),
to: r.to.map(Into::into),
gas_price: r.gas_price.map(Into::into),
gas: r.gas.map(Into::into),
value: r.value.map(Into::into),
data: r.data.map(Into::into),
nonce: r.nonce.map(Into::into),
min_block: r.min_block.map(|b| BlockNumber::Num(b)),
}
}
}
impl From<helpers::FilledTransactionRequest> for TransactionRequest {
fn from(r: helpers::FilledTransactionRequest) -> Self {
TransactionRequest {
from: r.from.into(),
to: r.to.map(Into::into),
gas_price: Some(r.gas_price.into()),
gas: Some(r.gas.into()),
value: Some(r.value.into()),
data: Some(r.data.into()),
nonce: r.nonce.map(Into::into),
min_block: r.min_block.map(|b| BlockNumber::Num(b)),
}
}
}
impl Into<helpers::TransactionRequest> for TransactionRequest {
fn into(self) -> helpers::TransactionRequest
|
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use rustc_serialize::hex::FromHex;
use serde_json;
use v1::types::{U256, H160, BlockNumber};
use super::*;
#[test]
fn transaction_request_deserialize() {
let s = r#"{
"from":"0x0000000000000000000000000000000000000001",
"to":"0x0000000000000000000000000000000000000002",
"gasPrice":"0x1",
"gas":"0x2",
"value":"0x3",
"data":"0x123456",
"nonce":"0x4",
"minBlock":"0x13"
}"#;
let deserialized: TransactionRequest = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, TransactionRequest {
from: H160::from(1),
to: Some(H160::from(2)),
gas_price: Some(U256::from(1)),
gas: Some(U256::from(2)),
value: Some(U256::from(3)),
data: Some(vec![0x12, 0x34, 0x56].into()),
nonce: Some(U256::from(4)),
min_block: Some(BlockNumber::Num(0x13)),
});
}
#[test]
fn transaction_request_deserialize2() {
let s = r#"{
"from": "0xb60e8dd61c5d32be8058bb8eb970870f07233155",
"to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567",
"gas": "0x76c0",
"gasPrice": "0x9184e72a000",
"value": "0x9184e72a",
"data": "0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675"
}"#;
let deserialized: TransactionRequest = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, TransactionRequest {
from: H160::from_str("b60e8dd61c5d32be8058bb8eb970870f07233155").unwrap(),
to: Some(H160::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
gas_price: Some(U256::from_str("9184e72a000").unwrap()),
gas: Some(U256::from_str("76c0").unwrap()),
value: Some(U256::from_str("9184e72a").unwrap()),
data: Some("d46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675".from_hex().unwrap().into()),
nonce: None,
min_block: None,
});
}
#[test]
fn transaction_request_deserialize_empty() {
let s = r#"{"from":"0x0000000000000000000000000000000000000001"}"#;
let deserialized: TransactionRequest = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, TransactionRequest {
from: H160::from(1).into(),
to: None,
gas_price: None,
gas: None,
value: None,
data: None,
nonce: None,
min_block: None,
});
}
#[test]
fn transaction_request_deserialize_test() {
let s = r#"{
"from":"0xb5f7502a2807cb23615c7456055e1d65b2508625",
"to":"0x895d32f2db7d01ebb50053f9e48aacf26584fe40",
"data":"0x8595bab1",
"gas":"0x2fd618",
"gasPrice":"0x0ba43b7400"
}"#;
let deserialized: TransactionRequest = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, TransactionRequest {
from: H160::from_str("b5f7502a2807cb23615c7456055e1d65b2508625").unwrap(),
to: Some(H160::from_str("895d32f2db7d01ebb50053f9e48aacf26584fe40").unwrap()),
gas_price: Some(U256::from_str("0ba43b7400").unwrap()),
gas: Some(U256::from_str("2fd618").unwrap()),
value: None,
data: Some(vec![0x85, 0x95, 0xba, 0xb1].into()),
nonce: None,
min_block: None,
});
}
#[test]
fn transaction_request_deserialize_error() {
let s = r#"{
"from":"0xb5f7502a2807cb23615c7456055e1d65b2508625",
"to":"",
"data":"0x8595bab1",
"gas":"0x2fd618",
"gasPrice":"0x0ba43b7400"
}"#;
let deserialized = serde_json::from_str::<TransactionRequest>(s);
assert!(deserialized.is_err(), "Should be error because to is empty");
}
#[test]
fn test_format_ether() {
assert_eq!(&format_ether(U256::from(1000000000000000000u64)), "1");
assert_eq!(&format_ether(U256::from(500000000000000000u64)), "0.5");
assert_eq!(&format_ether(U256::from(50000000000000000u64)), "0.05");
assert_eq!(&format_ether(U256::from(5000000000000000u64)), "0.005");
assert_eq!(&format_ether(U256::from(2000000000000000000u64)), "2");
assert_eq!(&format_ether(U256::from(2500000000000000000u64)), "2.5");
assert_eq!(&format_ether(U256::from(10000000000000000000u64)), "10");
}
}
|
{
helpers::TransactionRequest {
from: self.from.into(),
to: self.to.map(Into::into),
gas_price: self.gas_price.map(Into::into),
gas: self.gas.map(Into::into),
value: self.value.map(Into::into),
data: self.data.map(Into::into),
nonce: self.nonce.map(Into::into),
min_block: self.min_block.and_then(|b| b.to_min_block_num()),
}
}
|
identifier_body
|
transaction_request.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! `TransactionRequest` type
use v1::types::{Bytes, H160, U256, BlockNumber};
use v1::helpers;
use util::log::Colour;
|
use std::fmt;
/// Transaction request coming from RPC
#[derive(Debug, Clone, Default, Eq, PartialEq, Hash, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct TransactionRequest {
/// Sender
pub from: H160,
/// Recipient
pub to: Option<H160>,
/// Gas Price
#[serde(rename="gasPrice")]
pub gas_price: Option<U256>,
/// Gas
pub gas: Option<U256>,
/// Value of transaction in wei
pub value: Option<U256>,
/// Additional data sent with transaction
pub data: Option<Bytes>,
/// Transaction's nonce
pub nonce: Option<U256>,
/// Delay until this block if specified.
#[serde(rename="minBlock")]
pub min_block: Option<BlockNumber>,
}
pub fn format_ether(i: U256) -> String {
let mut string = format!("{}", i);
let idx = string.len() as isize - 18;
if idx <= 0 {
let mut prefix = String::from("0.");
for _ in 0..idx.abs() {
prefix.push('0');
}
string = prefix + &string;
} else {
string.insert(idx as usize, '.');
}
String::from(string.trim_right_matches('0')
.trim_right_matches('.'))
}
impl fmt::Display for TransactionRequest {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let eth = self.value.unwrap_or(U256::from(0));
match self.to {
Some(ref to) => write!(
f,
"{} ETH from {} to 0x{:?}",
Colour::White.bold().paint(format_ether(eth)),
Colour::White.bold().paint(format!("0x{:?}", self.from)),
to
),
None => write!(
f,
"{} ETH from {} for contract creation",
Colour::White.bold().paint(format_ether(eth)),
Colour::White.bold().paint(format!("0x{:?}", self.from)),
),
}
}
}
impl From<helpers::TransactionRequest> for TransactionRequest {
fn from(r: helpers::TransactionRequest) -> Self {
TransactionRequest {
from: r.from.into(),
to: r.to.map(Into::into),
gas_price: r.gas_price.map(Into::into),
gas: r.gas.map(Into::into),
value: r.value.map(Into::into),
data: r.data.map(Into::into),
nonce: r.nonce.map(Into::into),
min_block: r.min_block.map(|b| BlockNumber::Num(b)),
}
}
}
impl From<helpers::FilledTransactionRequest> for TransactionRequest {
fn from(r: helpers::FilledTransactionRequest) -> Self {
TransactionRequest {
from: r.from.into(),
to: r.to.map(Into::into),
gas_price: Some(r.gas_price.into()),
gas: Some(r.gas.into()),
value: Some(r.value.into()),
data: Some(r.data.into()),
nonce: r.nonce.map(Into::into),
min_block: r.min_block.map(|b| BlockNumber::Num(b)),
}
}
}
impl Into<helpers::TransactionRequest> for TransactionRequest {
fn into(self) -> helpers::TransactionRequest {
helpers::TransactionRequest {
from: self.from.into(),
to: self.to.map(Into::into),
gas_price: self.gas_price.map(Into::into),
gas: self.gas.map(Into::into),
value: self.value.map(Into::into),
data: self.data.map(Into::into),
nonce: self.nonce.map(Into::into),
min_block: self.min_block.and_then(|b| b.to_min_block_num()),
}
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use rustc_serialize::hex::FromHex;
use serde_json;
use v1::types::{U256, H160, BlockNumber};
use super::*;
#[test]
fn transaction_request_deserialize() {
let s = r#"{
"from":"0x0000000000000000000000000000000000000001",
"to":"0x0000000000000000000000000000000000000002",
"gasPrice":"0x1",
"gas":"0x2",
"value":"0x3",
"data":"0x123456",
"nonce":"0x4",
"minBlock":"0x13"
}"#;
let deserialized: TransactionRequest = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, TransactionRequest {
from: H160::from(1),
to: Some(H160::from(2)),
gas_price: Some(U256::from(1)),
gas: Some(U256::from(2)),
value: Some(U256::from(3)),
data: Some(vec![0x12, 0x34, 0x56].into()),
nonce: Some(U256::from(4)),
min_block: Some(BlockNumber::Num(0x13)),
});
}
#[test]
fn transaction_request_deserialize2() {
let s = r#"{
"from": "0xb60e8dd61c5d32be8058bb8eb970870f07233155",
"to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567",
"gas": "0x76c0",
"gasPrice": "0x9184e72a000",
"value": "0x9184e72a",
"data": "0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675"
}"#;
let deserialized: TransactionRequest = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, TransactionRequest {
from: H160::from_str("b60e8dd61c5d32be8058bb8eb970870f07233155").unwrap(),
to: Some(H160::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
gas_price: Some(U256::from_str("9184e72a000").unwrap()),
gas: Some(U256::from_str("76c0").unwrap()),
value: Some(U256::from_str("9184e72a").unwrap()),
data: Some("d46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675".from_hex().unwrap().into()),
nonce: None,
min_block: None,
});
}
#[test]
fn transaction_request_deserialize_empty() {
let s = r#"{"from":"0x0000000000000000000000000000000000000001"}"#;
let deserialized: TransactionRequest = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, TransactionRequest {
from: H160::from(1).into(),
to: None,
gas_price: None,
gas: None,
value: None,
data: None,
nonce: None,
min_block: None,
});
}
#[test]
fn transaction_request_deserialize_test() {
let s = r#"{
"from":"0xb5f7502a2807cb23615c7456055e1d65b2508625",
"to":"0x895d32f2db7d01ebb50053f9e48aacf26584fe40",
"data":"0x8595bab1",
"gas":"0x2fd618",
"gasPrice":"0x0ba43b7400"
}"#;
let deserialized: TransactionRequest = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, TransactionRequest {
from: H160::from_str("b5f7502a2807cb23615c7456055e1d65b2508625").unwrap(),
to: Some(H160::from_str("895d32f2db7d01ebb50053f9e48aacf26584fe40").unwrap()),
gas_price: Some(U256::from_str("0ba43b7400").unwrap()),
gas: Some(U256::from_str("2fd618").unwrap()),
value: None,
data: Some(vec![0x85, 0x95, 0xba, 0xb1].into()),
nonce: None,
min_block: None,
});
}
#[test]
fn transaction_request_deserialize_error() {
let s = r#"{
"from":"0xb5f7502a2807cb23615c7456055e1d65b2508625",
"to":"",
"data":"0x8595bab1",
"gas":"0x2fd618",
"gasPrice":"0x0ba43b7400"
}"#;
let deserialized = serde_json::from_str::<TransactionRequest>(s);
assert!(deserialized.is_err(), "Should be error because to is empty");
}
#[test]
fn test_format_ether() {
assert_eq!(&format_ether(U256::from(1000000000000000000u64)), "1");
assert_eq!(&format_ether(U256::from(500000000000000000u64)), "0.5");
assert_eq!(&format_ether(U256::from(50000000000000000u64)), "0.05");
assert_eq!(&format_ether(U256::from(5000000000000000u64)), "0.005");
assert_eq!(&format_ether(U256::from(2000000000000000000u64)), "2");
assert_eq!(&format_ether(U256::from(2500000000000000000u64)), "2.5");
assert_eq!(&format_ether(U256::from(10000000000000000000u64)), "10");
}
}
|
random_line_split
|
|
transaction_request.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! `TransactionRequest` type
use v1::types::{Bytes, H160, U256, BlockNumber};
use v1::helpers;
use util::log::Colour;
use std::fmt;
/// Transaction request coming from RPC
#[derive(Debug, Clone, Default, Eq, PartialEq, Hash, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct TransactionRequest {
/// Sender
pub from: H160,
/// Recipient
pub to: Option<H160>,
/// Gas Price
#[serde(rename="gasPrice")]
pub gas_price: Option<U256>,
/// Gas
pub gas: Option<U256>,
/// Value of transaction in wei
pub value: Option<U256>,
/// Additional data sent with transaction
pub data: Option<Bytes>,
/// Transaction's nonce
pub nonce: Option<U256>,
/// Delay until this block if specified.
#[serde(rename="minBlock")]
pub min_block: Option<BlockNumber>,
}
pub fn format_ether(i: U256) -> String {
let mut string = format!("{}", i);
let idx = string.len() as isize - 18;
if idx <= 0 {
let mut prefix = String::from("0.");
for _ in 0..idx.abs() {
prefix.push('0');
}
string = prefix + &string;
} else {
string.insert(idx as usize, '.');
}
String::from(string.trim_right_matches('0')
.trim_right_matches('.'))
}
impl fmt::Display for TransactionRequest {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let eth = self.value.unwrap_or(U256::from(0));
match self.to {
Some(ref to) => write!(
f,
"{} ETH from {} to 0x{:?}",
Colour::White.bold().paint(format_ether(eth)),
Colour::White.bold().paint(format!("0x{:?}", self.from)),
to
),
None => write!(
f,
"{} ETH from {} for contract creation",
Colour::White.bold().paint(format_ether(eth)),
Colour::White.bold().paint(format!("0x{:?}", self.from)),
),
}
}
}
impl From<helpers::TransactionRequest> for TransactionRequest {
fn from(r: helpers::TransactionRequest) -> Self {
TransactionRequest {
from: r.from.into(),
to: r.to.map(Into::into),
gas_price: r.gas_price.map(Into::into),
gas: r.gas.map(Into::into),
value: r.value.map(Into::into),
data: r.data.map(Into::into),
nonce: r.nonce.map(Into::into),
min_block: r.min_block.map(|b| BlockNumber::Num(b)),
}
}
}
impl From<helpers::FilledTransactionRequest> for TransactionRequest {
fn from(r: helpers::FilledTransactionRequest) -> Self {
TransactionRequest {
from: r.from.into(),
to: r.to.map(Into::into),
gas_price: Some(r.gas_price.into()),
gas: Some(r.gas.into()),
value: Some(r.value.into()),
data: Some(r.data.into()),
nonce: r.nonce.map(Into::into),
min_block: r.min_block.map(|b| BlockNumber::Num(b)),
}
}
}
impl Into<helpers::TransactionRequest> for TransactionRequest {
fn into(self) -> helpers::TransactionRequest {
helpers::TransactionRequest {
from: self.from.into(),
to: self.to.map(Into::into),
gas_price: self.gas_price.map(Into::into),
gas: self.gas.map(Into::into),
value: self.value.map(Into::into),
data: self.data.map(Into::into),
nonce: self.nonce.map(Into::into),
min_block: self.min_block.and_then(|b| b.to_min_block_num()),
}
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use rustc_serialize::hex::FromHex;
use serde_json;
use v1::types::{U256, H160, BlockNumber};
use super::*;
#[test]
fn transaction_request_deserialize() {
let s = r#"{
"from":"0x0000000000000000000000000000000000000001",
"to":"0x0000000000000000000000000000000000000002",
"gasPrice":"0x1",
"gas":"0x2",
"value":"0x3",
"data":"0x123456",
"nonce":"0x4",
"minBlock":"0x13"
}"#;
let deserialized: TransactionRequest = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, TransactionRequest {
from: H160::from(1),
to: Some(H160::from(2)),
gas_price: Some(U256::from(1)),
gas: Some(U256::from(2)),
value: Some(U256::from(3)),
data: Some(vec![0x12, 0x34, 0x56].into()),
nonce: Some(U256::from(4)),
min_block: Some(BlockNumber::Num(0x13)),
});
}
#[test]
fn transaction_request_deserialize2() {
let s = r#"{
"from": "0xb60e8dd61c5d32be8058bb8eb970870f07233155",
"to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567",
"gas": "0x76c0",
"gasPrice": "0x9184e72a000",
"value": "0x9184e72a",
"data": "0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675"
}"#;
let deserialized: TransactionRequest = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, TransactionRequest {
from: H160::from_str("b60e8dd61c5d32be8058bb8eb970870f07233155").unwrap(),
to: Some(H160::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
gas_price: Some(U256::from_str("9184e72a000").unwrap()),
gas: Some(U256::from_str("76c0").unwrap()),
value: Some(U256::from_str("9184e72a").unwrap()),
data: Some("d46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675".from_hex().unwrap().into()),
nonce: None,
min_block: None,
});
}
#[test]
fn transaction_request_deserialize_empty() {
let s = r#"{"from":"0x0000000000000000000000000000000000000001"}"#;
let deserialized: TransactionRequest = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, TransactionRequest {
from: H160::from(1).into(),
to: None,
gas_price: None,
gas: None,
value: None,
data: None,
nonce: None,
min_block: None,
});
}
#[test]
fn transaction_request_deserialize_test() {
let s = r#"{
"from":"0xb5f7502a2807cb23615c7456055e1d65b2508625",
"to":"0x895d32f2db7d01ebb50053f9e48aacf26584fe40",
"data":"0x8595bab1",
"gas":"0x2fd618",
"gasPrice":"0x0ba43b7400"
}"#;
let deserialized: TransactionRequest = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, TransactionRequest {
from: H160::from_str("b5f7502a2807cb23615c7456055e1d65b2508625").unwrap(),
to: Some(H160::from_str("895d32f2db7d01ebb50053f9e48aacf26584fe40").unwrap()),
gas_price: Some(U256::from_str("0ba43b7400").unwrap()),
gas: Some(U256::from_str("2fd618").unwrap()),
value: None,
data: Some(vec![0x85, 0x95, 0xba, 0xb1].into()),
nonce: None,
min_block: None,
});
}
#[test]
fn
|
() {
let s = r#"{
"from":"0xb5f7502a2807cb23615c7456055e1d65b2508625",
"to":"",
"data":"0x8595bab1",
"gas":"0x2fd618",
"gasPrice":"0x0ba43b7400"
}"#;
let deserialized = serde_json::from_str::<TransactionRequest>(s);
assert!(deserialized.is_err(), "Should be error because to is empty");
}
#[test]
fn test_format_ether() {
assert_eq!(&format_ether(U256::from(1000000000000000000u64)), "1");
assert_eq!(&format_ether(U256::from(500000000000000000u64)), "0.5");
assert_eq!(&format_ether(U256::from(50000000000000000u64)), "0.05");
assert_eq!(&format_ether(U256::from(5000000000000000u64)), "0.005");
assert_eq!(&format_ether(U256::from(2000000000000000000u64)), "2");
assert_eq!(&format_ether(U256::from(2500000000000000000u64)), "2.5");
assert_eq!(&format_ether(U256::from(10000000000000000000u64)), "10");
}
}
|
transaction_request_deserialize_error
|
identifier_name
|
xrsystem.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::XRSystemBinding::XRSessionInit;
use crate::dom::bindings::codegen::Bindings::XRSystemBinding::{XRSessionMode, XRSystemMethods};
use crate::dom::bindings::conversions::{ConversionResult, FromJSValConvertible};
use crate::dom::bindings::error::Error;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::refcounted::{Trusted, TrustedPromise};
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject};
use crate::dom::bindings::root::{Dom, DomRoot, MutNullableDom};
use crate::dom::bindings::trace::RootedTraceableBox;
use crate::dom::eventtarget::EventTarget;
use crate::dom::gamepad::Gamepad;
use crate::dom::promise::Promise;
use crate::dom::window::Window;
use crate::dom::xrsession::XRSession;
use crate::dom::xrtest::XRTest;
use crate::realms::InRealm;
use crate::script_thread::ScriptThread;
use crate::task_source::TaskSource;
use dom_struct::dom_struct;
use ipc_channel::ipc::{self as ipc_crate, IpcReceiver};
use ipc_channel::router::ROUTER;
use msg::constellation_msg::PipelineId;
use profile_traits::ipc;
use servo_config::pref;
use std::cell::Cell;
use std::rc::Rc;
use webxr_api::{Error as XRError, Frame, Session, SessionInit, SessionMode};
#[dom_struct]
pub struct XRSystem {
eventtarget: EventTarget,
gamepads: DomRefCell<Vec<Dom<Gamepad>>>,
pending_immersive_session: Cell<bool>,
active_immersive_session: MutNullableDom<XRSession>,
active_inline_sessions: DomRefCell<Vec<Dom<XRSession>>>,
test: MutNullableDom<XRTest>,
pipeline: PipelineId,
}
impl XRSystem {
fn new_inherited(pipeline: PipelineId) -> XRSystem {
XRSystem {
eventtarget: EventTarget::new_inherited(),
gamepads: DomRefCell::new(Vec::new()),
pending_immersive_session: Cell::new(false),
active_immersive_session: Default::default(),
active_inline_sessions: DomRefCell::new(Vec::new()),
test: Default::default(),
pipeline,
}
}
pub fn new(window: &Window) -> DomRoot<XRSystem> {
reflect_dom_object(
Box::new(XRSystem::new_inherited(window.pipeline_id())),
window,
)
}
pub fn pending_or_active_session(&self) -> bool {
self.pending_immersive_session.get() || self.active_immersive_session.get().is_some()
}
pub fn set_pending(&self) {
self.pending_immersive_session.set(true)
}
pub fn set_active_immersive_session(&self, session: &XRSession) {
// XXXManishearth when we support non-immersive (inline) sessions we should
// ensure they never reach these codepaths
self.pending_immersive_session.set(false);
self.active_immersive_session.set(Some(session))
}
/// https://immersive-web.github.io/webxr/#ref-for-eventdef-xrsession-end
pub fn end_session(&self, session: &XRSession) {
// Step 3
if let Some(active) = self.active_immersive_session.get() {
if Dom::from_ref(&*active) == Dom::from_ref(session) {
self.active_immersive_session.set(None);
// Dirty the canvas, since it has been skipping this step whilst in immersive
// mode
session.dirty_layers();
}
}
self.active_inline_sessions
.borrow_mut()
.retain(|sess| Dom::from_ref(&**sess)!= Dom::from_ref(session));
}
}
impl Into<SessionMode> for XRSessionMode {
fn into(self) -> SessionMode {
match self {
XRSessionMode::Immersive_vr => SessionMode::ImmersiveVR,
XRSessionMode::Immersive_ar => SessionMode::ImmersiveAR,
XRSessionMode::Inline => SessionMode::Inline,
}
}
}
impl XRSystemMethods for XRSystem {
/// https://immersive-web.github.io/webxr/#dom-xr-issessionsupported
fn IsSessionSupported(&self, mode: XRSessionMode) -> Rc<Promise> {
// XXXManishearth this should select an XR device first
let promise = Promise::new(&self.global());
let mut trusted = Some(TrustedPromise::new(promise.clone()));
let global = self.global();
let window = global.as_window();
let (task_source, canceller) = window
.task_manager()
.dom_manipulation_task_source_with_canceller();
let (sender, receiver) = ipc::channel(global.time_profiler_chan().clone()).unwrap();
ROUTER.add_route(
receiver.to_opaque(),
Box::new(move |message| {
// router doesn't know this is only called once
let trusted = if let Some(trusted) = trusted.take() {
trusted
} else {
error!("supportsSession callback called twice!");
return;
};
let message: Result<(), webxr_api::Error> = if let Ok(message) = message.to() {
message
} else {
error!("supportsSession callback given incorrect payload");
return;
};
if let Ok(()) = message {
let _ =
task_source.queue_with_canceller(trusted.resolve_task(true), &canceller);
} else {
let _ =
task_source.queue_with_canceller(trusted.resolve_task(false), &canceller);
};
}),
);
window
.webxr_registry()
.supports_session(mode.into(), sender);
promise
}
/// https://immersive-web.github.io/webxr/#dom-xr-requestsession
#[allow(unsafe_code)]
fn RequestSession(
&self,
mode: XRSessionMode,
init: RootedTraceableBox<XRSessionInit>,
comp: InRealm,
) -> Rc<Promise> {
let global = self.global();
let window = global.as_window();
let promise = Promise::new_in_current_realm(&global, comp);
if mode!= XRSessionMode::Inline {
if!ScriptThread::is_user_interacting() {
if pref!(dom.webxr.unsafe_assume_user_intent)
|
else {
promise.reject_error(Error::Security);
return promise;
}
}
if self.pending_or_active_session() {
promise.reject_error(Error::InvalidState);
return promise;
}
self.set_pending();
}
let mut required_features = vec![];
let mut optional_features = vec![];
let cx = global.get_cx();
// We are supposed to include "viewer" and on immersive devices "local"
// by default here, but this is handled directly in requestReferenceSpace()
if let Some(ref r) = init.requiredFeatures {
for feature in r {
unsafe {
if let Ok(ConversionResult::Success(s)) =
String::from_jsval(*cx, feature.handle(), ())
{
required_features.push(s)
} else {
warn!("Unable to convert required feature to string");
if mode!= XRSessionMode::Inline {
self.pending_immersive_session.set(false);
}
promise.reject_error(Error::NotSupported);
return promise;
}
}
}
}
if let Some(ref o) = init.optionalFeatures {
for feature in o {
unsafe {
if let Ok(ConversionResult::Success(s)) =
String::from_jsval(*cx, feature.handle(), ())
{
optional_features.push(s)
} else {
warn!("Unable to convert optional feature to string");
}
}
}
}
let init = SessionInit {
required_features,
optional_features,
first_person_observer_view: pref!(dom.webxr.first_person_observer_view),
};
let mut trusted = Some(TrustedPromise::new(promise.clone()));
let this = Trusted::new(self);
let (task_source, canceller) = window
.task_manager()
.dom_manipulation_task_source_with_canceller();
let (sender, receiver) = ipc::channel(global.time_profiler_chan().clone()).unwrap();
let (frame_sender, frame_receiver) = ipc_crate::channel().unwrap();
let mut frame_receiver = Some(frame_receiver);
ROUTER.add_route(
receiver.to_opaque(),
Box::new(move |message| {
// router doesn't know this is only called once
let trusted = trusted.take().unwrap();
let this = this.clone();
let frame_receiver = frame_receiver.take().unwrap();
let message: Result<Session, webxr_api::Error> = if let Ok(message) = message.to() {
message
} else {
error!("requestSession callback given incorrect payload");
return;
};
let _ = task_source.queue_with_canceller(
task!(request_session: move || {
this.root().session_obtained(message, trusted.root(), mode, frame_receiver);
}),
&canceller,
);
}),
);
window
.webxr_registry()
.request_session(mode.into(), init, sender, frame_sender);
promise
}
// https://github.com/immersive-web/webxr-test-api/blob/master/explainer.md
fn Test(&self) -> DomRoot<XRTest> {
self.test.or_init(|| XRTest::new(&self.global()))
}
}
impl XRSystem {
fn session_obtained(
&self,
response: Result<Session, XRError>,
promise: Rc<Promise>,
mode: XRSessionMode,
frame_receiver: IpcReceiver<Frame>,
) {
let session = match response {
Ok(session) => session,
Err(e) => {
warn!("Error requesting XR session: {:?}", e);
if mode!= XRSessionMode::Inline {
self.pending_immersive_session.set(false);
}
promise.reject_error(Error::NotSupported);
return;
},
};
let session = XRSession::new(&self.global(), session, mode, frame_receiver);
if mode == XRSessionMode::Inline {
self.active_inline_sessions
.borrow_mut()
.push(Dom::from_ref(&*session));
} else {
self.set_active_immersive_session(&session);
}
promise.resolve_native(&session);
// https://github.com/immersive-web/webxr/issues/961
// This must be called _after_ the promise is resolved
session.setup_initial_inputs();
}
// https://github.com/immersive-web/navigation/issues/10
pub fn dispatch_sessionavailable(&self) {
let xr = Trusted::new(self);
let global = self.global();
let window = global.as_window();
window
.task_manager()
.dom_manipulation_task_source()
.queue(
task!(fire_sessionavailable_event: move || {
// The sessionavailable event indicates user intent to enter an XR session
let xr = xr.root();
let interacting = ScriptThread::is_user_interacting();
ScriptThread::set_user_interacting(true);
xr.upcast::<EventTarget>().fire_bubbling_event(atom!("sessionavailable"));
ScriptThread::set_user_interacting(interacting);
}),
window.upcast(),
)
.unwrap();
}
}
|
{
warn!("The dom.webxr.unsafe-assume-user-intent preference assumes user intent to enter WebXR.");
}
|
conditional_block
|
xrsystem.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::XRSystemBinding::XRSessionInit;
use crate::dom::bindings::codegen::Bindings::XRSystemBinding::{XRSessionMode, XRSystemMethods};
use crate::dom::bindings::conversions::{ConversionResult, FromJSValConvertible};
use crate::dom::bindings::error::Error;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::refcounted::{Trusted, TrustedPromise};
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject};
use crate::dom::bindings::root::{Dom, DomRoot, MutNullableDom};
use crate::dom::bindings::trace::RootedTraceableBox;
use crate::dom::eventtarget::EventTarget;
use crate::dom::gamepad::Gamepad;
use crate::dom::promise::Promise;
use crate::dom::window::Window;
use crate::dom::xrsession::XRSession;
use crate::dom::xrtest::XRTest;
use crate::realms::InRealm;
use crate::script_thread::ScriptThread;
use crate::task_source::TaskSource;
use dom_struct::dom_struct;
use ipc_channel::ipc::{self as ipc_crate, IpcReceiver};
use ipc_channel::router::ROUTER;
use msg::constellation_msg::PipelineId;
use profile_traits::ipc;
use servo_config::pref;
use std::cell::Cell;
use std::rc::Rc;
use webxr_api::{Error as XRError, Frame, Session, SessionInit, SessionMode};
#[dom_struct]
pub struct XRSystem {
eventtarget: EventTarget,
gamepads: DomRefCell<Vec<Dom<Gamepad>>>,
pending_immersive_session: Cell<bool>,
active_immersive_session: MutNullableDom<XRSession>,
active_inline_sessions: DomRefCell<Vec<Dom<XRSession>>>,
test: MutNullableDom<XRTest>,
pipeline: PipelineId,
}
impl XRSystem {
fn new_inherited(pipeline: PipelineId) -> XRSystem {
XRSystem {
eventtarget: EventTarget::new_inherited(),
gamepads: DomRefCell::new(Vec::new()),
pending_immersive_session: Cell::new(false),
active_immersive_session: Default::default(),
active_inline_sessions: DomRefCell::new(Vec::new()),
test: Default::default(),
pipeline,
}
}
pub fn
|
(window: &Window) -> DomRoot<XRSystem> {
reflect_dom_object(
Box::new(XRSystem::new_inherited(window.pipeline_id())),
window,
)
}
pub fn pending_or_active_session(&self) -> bool {
self.pending_immersive_session.get() || self.active_immersive_session.get().is_some()
}
pub fn set_pending(&self) {
self.pending_immersive_session.set(true)
}
pub fn set_active_immersive_session(&self, session: &XRSession) {
// XXXManishearth when we support non-immersive (inline) sessions we should
// ensure they never reach these codepaths
self.pending_immersive_session.set(false);
self.active_immersive_session.set(Some(session))
}
/// https://immersive-web.github.io/webxr/#ref-for-eventdef-xrsession-end
pub fn end_session(&self, session: &XRSession) {
// Step 3
if let Some(active) = self.active_immersive_session.get() {
if Dom::from_ref(&*active) == Dom::from_ref(session) {
self.active_immersive_session.set(None);
// Dirty the canvas, since it has been skipping this step whilst in immersive
// mode
session.dirty_layers();
}
}
self.active_inline_sessions
.borrow_mut()
.retain(|sess| Dom::from_ref(&**sess)!= Dom::from_ref(session));
}
}
impl Into<SessionMode> for XRSessionMode {
fn into(self) -> SessionMode {
match self {
XRSessionMode::Immersive_vr => SessionMode::ImmersiveVR,
XRSessionMode::Immersive_ar => SessionMode::ImmersiveAR,
XRSessionMode::Inline => SessionMode::Inline,
}
}
}
impl XRSystemMethods for XRSystem {
/// https://immersive-web.github.io/webxr/#dom-xr-issessionsupported
fn IsSessionSupported(&self, mode: XRSessionMode) -> Rc<Promise> {
// XXXManishearth this should select an XR device first
let promise = Promise::new(&self.global());
let mut trusted = Some(TrustedPromise::new(promise.clone()));
let global = self.global();
let window = global.as_window();
let (task_source, canceller) = window
.task_manager()
.dom_manipulation_task_source_with_canceller();
let (sender, receiver) = ipc::channel(global.time_profiler_chan().clone()).unwrap();
ROUTER.add_route(
receiver.to_opaque(),
Box::new(move |message| {
// router doesn't know this is only called once
let trusted = if let Some(trusted) = trusted.take() {
trusted
} else {
error!("supportsSession callback called twice!");
return;
};
let message: Result<(), webxr_api::Error> = if let Ok(message) = message.to() {
message
} else {
error!("supportsSession callback given incorrect payload");
return;
};
if let Ok(()) = message {
let _ =
task_source.queue_with_canceller(trusted.resolve_task(true), &canceller);
} else {
let _ =
task_source.queue_with_canceller(trusted.resolve_task(false), &canceller);
};
}),
);
window
.webxr_registry()
.supports_session(mode.into(), sender);
promise
}
/// https://immersive-web.github.io/webxr/#dom-xr-requestsession
#[allow(unsafe_code)]
fn RequestSession(
&self,
mode: XRSessionMode,
init: RootedTraceableBox<XRSessionInit>,
comp: InRealm,
) -> Rc<Promise> {
let global = self.global();
let window = global.as_window();
let promise = Promise::new_in_current_realm(&global, comp);
if mode!= XRSessionMode::Inline {
if!ScriptThread::is_user_interacting() {
if pref!(dom.webxr.unsafe_assume_user_intent) {
warn!("The dom.webxr.unsafe-assume-user-intent preference assumes user intent to enter WebXR.");
} else {
promise.reject_error(Error::Security);
return promise;
}
}
if self.pending_or_active_session() {
promise.reject_error(Error::InvalidState);
return promise;
}
self.set_pending();
}
let mut required_features = vec![];
let mut optional_features = vec![];
let cx = global.get_cx();
// We are supposed to include "viewer" and on immersive devices "local"
// by default here, but this is handled directly in requestReferenceSpace()
if let Some(ref r) = init.requiredFeatures {
for feature in r {
unsafe {
if let Ok(ConversionResult::Success(s)) =
String::from_jsval(*cx, feature.handle(), ())
{
required_features.push(s)
} else {
warn!("Unable to convert required feature to string");
if mode!= XRSessionMode::Inline {
self.pending_immersive_session.set(false);
}
promise.reject_error(Error::NotSupported);
return promise;
}
}
}
}
if let Some(ref o) = init.optionalFeatures {
for feature in o {
unsafe {
if let Ok(ConversionResult::Success(s)) =
String::from_jsval(*cx, feature.handle(), ())
{
optional_features.push(s)
} else {
warn!("Unable to convert optional feature to string");
}
}
}
}
let init = SessionInit {
required_features,
optional_features,
first_person_observer_view: pref!(dom.webxr.first_person_observer_view),
};
let mut trusted = Some(TrustedPromise::new(promise.clone()));
let this = Trusted::new(self);
let (task_source, canceller) = window
.task_manager()
.dom_manipulation_task_source_with_canceller();
let (sender, receiver) = ipc::channel(global.time_profiler_chan().clone()).unwrap();
let (frame_sender, frame_receiver) = ipc_crate::channel().unwrap();
let mut frame_receiver = Some(frame_receiver);
ROUTER.add_route(
receiver.to_opaque(),
Box::new(move |message| {
// router doesn't know this is only called once
let trusted = trusted.take().unwrap();
let this = this.clone();
let frame_receiver = frame_receiver.take().unwrap();
let message: Result<Session, webxr_api::Error> = if let Ok(message) = message.to() {
message
} else {
error!("requestSession callback given incorrect payload");
return;
};
let _ = task_source.queue_with_canceller(
task!(request_session: move || {
this.root().session_obtained(message, trusted.root(), mode, frame_receiver);
}),
&canceller,
);
}),
);
window
.webxr_registry()
.request_session(mode.into(), init, sender, frame_sender);
promise
}
// https://github.com/immersive-web/webxr-test-api/blob/master/explainer.md
fn Test(&self) -> DomRoot<XRTest> {
self.test.or_init(|| XRTest::new(&self.global()))
}
}
impl XRSystem {
fn session_obtained(
&self,
response: Result<Session, XRError>,
promise: Rc<Promise>,
mode: XRSessionMode,
frame_receiver: IpcReceiver<Frame>,
) {
let session = match response {
Ok(session) => session,
Err(e) => {
warn!("Error requesting XR session: {:?}", e);
if mode!= XRSessionMode::Inline {
self.pending_immersive_session.set(false);
}
promise.reject_error(Error::NotSupported);
return;
},
};
let session = XRSession::new(&self.global(), session, mode, frame_receiver);
if mode == XRSessionMode::Inline {
self.active_inline_sessions
.borrow_mut()
.push(Dom::from_ref(&*session));
} else {
self.set_active_immersive_session(&session);
}
promise.resolve_native(&session);
// https://github.com/immersive-web/webxr/issues/961
// This must be called _after_ the promise is resolved
session.setup_initial_inputs();
}
// https://github.com/immersive-web/navigation/issues/10
pub fn dispatch_sessionavailable(&self) {
let xr = Trusted::new(self);
let global = self.global();
let window = global.as_window();
window
.task_manager()
.dom_manipulation_task_source()
.queue(
task!(fire_sessionavailable_event: move || {
// The sessionavailable event indicates user intent to enter an XR session
let xr = xr.root();
let interacting = ScriptThread::is_user_interacting();
ScriptThread::set_user_interacting(true);
xr.upcast::<EventTarget>().fire_bubbling_event(atom!("sessionavailable"));
ScriptThread::set_user_interacting(interacting);
}),
window.upcast(),
)
.unwrap();
}
}
|
new
|
identifier_name
|
xrsystem.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::XRSystemBinding::XRSessionInit;
use crate::dom::bindings::codegen::Bindings::XRSystemBinding::{XRSessionMode, XRSystemMethods};
use crate::dom::bindings::conversions::{ConversionResult, FromJSValConvertible};
use crate::dom::bindings::error::Error;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::refcounted::{Trusted, TrustedPromise};
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject};
use crate::dom::bindings::root::{Dom, DomRoot, MutNullableDom};
use crate::dom::bindings::trace::RootedTraceableBox;
use crate::dom::eventtarget::EventTarget;
use crate::dom::gamepad::Gamepad;
use crate::dom::promise::Promise;
use crate::dom::window::Window;
use crate::dom::xrsession::XRSession;
use crate::dom::xrtest::XRTest;
use crate::realms::InRealm;
use crate::script_thread::ScriptThread;
use crate::task_source::TaskSource;
use dom_struct::dom_struct;
use ipc_channel::ipc::{self as ipc_crate, IpcReceiver};
use ipc_channel::router::ROUTER;
use msg::constellation_msg::PipelineId;
use profile_traits::ipc;
use servo_config::pref;
use std::cell::Cell;
use std::rc::Rc;
use webxr_api::{Error as XRError, Frame, Session, SessionInit, SessionMode};
#[dom_struct]
pub struct XRSystem {
eventtarget: EventTarget,
gamepads: DomRefCell<Vec<Dom<Gamepad>>>,
pending_immersive_session: Cell<bool>,
active_immersive_session: MutNullableDom<XRSession>,
active_inline_sessions: DomRefCell<Vec<Dom<XRSession>>>,
test: MutNullableDom<XRTest>,
pipeline: PipelineId,
}
impl XRSystem {
fn new_inherited(pipeline: PipelineId) -> XRSystem {
XRSystem {
eventtarget: EventTarget::new_inherited(),
gamepads: DomRefCell::new(Vec::new()),
pending_immersive_session: Cell::new(false),
active_immersive_session: Default::default(),
active_inline_sessions: DomRefCell::new(Vec::new()),
test: Default::default(),
pipeline,
}
}
pub fn new(window: &Window) -> DomRoot<XRSystem> {
reflect_dom_object(
Box::new(XRSystem::new_inherited(window.pipeline_id())),
window,
)
}
pub fn pending_or_active_session(&self) -> bool {
self.pending_immersive_session.get() || self.active_immersive_session.get().is_some()
}
pub fn set_pending(&self) {
self.pending_immersive_session.set(true)
}
pub fn set_active_immersive_session(&self, session: &XRSession) {
// XXXManishearth when we support non-immersive (inline) sessions we should
// ensure they never reach these codepaths
self.pending_immersive_session.set(false);
self.active_immersive_session.set(Some(session))
}
/// https://immersive-web.github.io/webxr/#ref-for-eventdef-xrsession-end
pub fn end_session(&self, session: &XRSession) {
// Step 3
if let Some(active) = self.active_immersive_session.get() {
if Dom::from_ref(&*active) == Dom::from_ref(session) {
self.active_immersive_session.set(None);
// Dirty the canvas, since it has been skipping this step whilst in immersive
// mode
session.dirty_layers();
}
}
self.active_inline_sessions
.borrow_mut()
.retain(|sess| Dom::from_ref(&**sess)!= Dom::from_ref(session));
}
}
impl Into<SessionMode> for XRSessionMode {
fn into(self) -> SessionMode {
match self {
XRSessionMode::Immersive_vr => SessionMode::ImmersiveVR,
XRSessionMode::Immersive_ar => SessionMode::ImmersiveAR,
XRSessionMode::Inline => SessionMode::Inline,
|
}
}
impl XRSystemMethods for XRSystem {
/// https://immersive-web.github.io/webxr/#dom-xr-issessionsupported
fn IsSessionSupported(&self, mode: XRSessionMode) -> Rc<Promise> {
// XXXManishearth this should select an XR device first
let promise = Promise::new(&self.global());
let mut trusted = Some(TrustedPromise::new(promise.clone()));
let global = self.global();
let window = global.as_window();
let (task_source, canceller) = window
.task_manager()
.dom_manipulation_task_source_with_canceller();
let (sender, receiver) = ipc::channel(global.time_profiler_chan().clone()).unwrap();
ROUTER.add_route(
receiver.to_opaque(),
Box::new(move |message| {
// router doesn't know this is only called once
let trusted = if let Some(trusted) = trusted.take() {
trusted
} else {
error!("supportsSession callback called twice!");
return;
};
let message: Result<(), webxr_api::Error> = if let Ok(message) = message.to() {
message
} else {
error!("supportsSession callback given incorrect payload");
return;
};
if let Ok(()) = message {
let _ =
task_source.queue_with_canceller(trusted.resolve_task(true), &canceller);
} else {
let _ =
task_source.queue_with_canceller(trusted.resolve_task(false), &canceller);
};
}),
);
window
.webxr_registry()
.supports_session(mode.into(), sender);
promise
}
/// https://immersive-web.github.io/webxr/#dom-xr-requestsession
#[allow(unsafe_code)]
fn RequestSession(
&self,
mode: XRSessionMode,
init: RootedTraceableBox<XRSessionInit>,
comp: InRealm,
) -> Rc<Promise> {
let global = self.global();
let window = global.as_window();
let promise = Promise::new_in_current_realm(&global, comp);
if mode!= XRSessionMode::Inline {
if!ScriptThread::is_user_interacting() {
if pref!(dom.webxr.unsafe_assume_user_intent) {
warn!("The dom.webxr.unsafe-assume-user-intent preference assumes user intent to enter WebXR.");
} else {
promise.reject_error(Error::Security);
return promise;
}
}
if self.pending_or_active_session() {
promise.reject_error(Error::InvalidState);
return promise;
}
self.set_pending();
}
let mut required_features = vec![];
let mut optional_features = vec![];
let cx = global.get_cx();
// We are supposed to include "viewer" and on immersive devices "local"
// by default here, but this is handled directly in requestReferenceSpace()
if let Some(ref r) = init.requiredFeatures {
for feature in r {
unsafe {
if let Ok(ConversionResult::Success(s)) =
String::from_jsval(*cx, feature.handle(), ())
{
required_features.push(s)
} else {
warn!("Unable to convert required feature to string");
if mode!= XRSessionMode::Inline {
self.pending_immersive_session.set(false);
}
promise.reject_error(Error::NotSupported);
return promise;
}
}
}
}
if let Some(ref o) = init.optionalFeatures {
for feature in o {
unsafe {
if let Ok(ConversionResult::Success(s)) =
String::from_jsval(*cx, feature.handle(), ())
{
optional_features.push(s)
} else {
warn!("Unable to convert optional feature to string");
}
}
}
}
let init = SessionInit {
required_features,
optional_features,
first_person_observer_view: pref!(dom.webxr.first_person_observer_view),
};
let mut trusted = Some(TrustedPromise::new(promise.clone()));
let this = Trusted::new(self);
let (task_source, canceller) = window
.task_manager()
.dom_manipulation_task_source_with_canceller();
let (sender, receiver) = ipc::channel(global.time_profiler_chan().clone()).unwrap();
let (frame_sender, frame_receiver) = ipc_crate::channel().unwrap();
let mut frame_receiver = Some(frame_receiver);
ROUTER.add_route(
receiver.to_opaque(),
Box::new(move |message| {
// router doesn't know this is only called once
let trusted = trusted.take().unwrap();
let this = this.clone();
let frame_receiver = frame_receiver.take().unwrap();
let message: Result<Session, webxr_api::Error> = if let Ok(message) = message.to() {
message
} else {
error!("requestSession callback given incorrect payload");
return;
};
let _ = task_source.queue_with_canceller(
task!(request_session: move || {
this.root().session_obtained(message, trusted.root(), mode, frame_receiver);
}),
&canceller,
);
}),
);
window
.webxr_registry()
.request_session(mode.into(), init, sender, frame_sender);
promise
}
// https://github.com/immersive-web/webxr-test-api/blob/master/explainer.md
fn Test(&self) -> DomRoot<XRTest> {
self.test.or_init(|| XRTest::new(&self.global()))
}
}
impl XRSystem {
fn session_obtained(
&self,
response: Result<Session, XRError>,
promise: Rc<Promise>,
mode: XRSessionMode,
frame_receiver: IpcReceiver<Frame>,
) {
let session = match response {
Ok(session) => session,
Err(e) => {
warn!("Error requesting XR session: {:?}", e);
if mode!= XRSessionMode::Inline {
self.pending_immersive_session.set(false);
}
promise.reject_error(Error::NotSupported);
return;
},
};
let session = XRSession::new(&self.global(), session, mode, frame_receiver);
if mode == XRSessionMode::Inline {
self.active_inline_sessions
.borrow_mut()
.push(Dom::from_ref(&*session));
} else {
self.set_active_immersive_session(&session);
}
promise.resolve_native(&session);
// https://github.com/immersive-web/webxr/issues/961
// This must be called _after_ the promise is resolved
session.setup_initial_inputs();
}
// https://github.com/immersive-web/navigation/issues/10
pub fn dispatch_sessionavailable(&self) {
let xr = Trusted::new(self);
let global = self.global();
let window = global.as_window();
window
.task_manager()
.dom_manipulation_task_source()
.queue(
task!(fire_sessionavailable_event: move || {
// The sessionavailable event indicates user intent to enter an XR session
let xr = xr.root();
let interacting = ScriptThread::is_user_interacting();
ScriptThread::set_user_interacting(true);
xr.upcast::<EventTarget>().fire_bubbling_event(atom!("sessionavailable"));
ScriptThread::set_user_interacting(interacting);
}),
window.upcast(),
)
.unwrap();
}
}
|
}
|
random_line_split
|
xrsystem.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::XRSystemBinding::XRSessionInit;
use crate::dom::bindings::codegen::Bindings::XRSystemBinding::{XRSessionMode, XRSystemMethods};
use crate::dom::bindings::conversions::{ConversionResult, FromJSValConvertible};
use crate::dom::bindings::error::Error;
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::refcounted::{Trusted, TrustedPromise};
use crate::dom::bindings::reflector::{reflect_dom_object, DomObject};
use crate::dom::bindings::root::{Dom, DomRoot, MutNullableDom};
use crate::dom::bindings::trace::RootedTraceableBox;
use crate::dom::eventtarget::EventTarget;
use crate::dom::gamepad::Gamepad;
use crate::dom::promise::Promise;
use crate::dom::window::Window;
use crate::dom::xrsession::XRSession;
use crate::dom::xrtest::XRTest;
use crate::realms::InRealm;
use crate::script_thread::ScriptThread;
use crate::task_source::TaskSource;
use dom_struct::dom_struct;
use ipc_channel::ipc::{self as ipc_crate, IpcReceiver};
use ipc_channel::router::ROUTER;
use msg::constellation_msg::PipelineId;
use profile_traits::ipc;
use servo_config::pref;
use std::cell::Cell;
use std::rc::Rc;
use webxr_api::{Error as XRError, Frame, Session, SessionInit, SessionMode};
#[dom_struct]
pub struct XRSystem {
eventtarget: EventTarget,
gamepads: DomRefCell<Vec<Dom<Gamepad>>>,
pending_immersive_session: Cell<bool>,
active_immersive_session: MutNullableDom<XRSession>,
active_inline_sessions: DomRefCell<Vec<Dom<XRSession>>>,
test: MutNullableDom<XRTest>,
pipeline: PipelineId,
}
impl XRSystem {
fn new_inherited(pipeline: PipelineId) -> XRSystem {
XRSystem {
eventtarget: EventTarget::new_inherited(),
gamepads: DomRefCell::new(Vec::new()),
pending_immersive_session: Cell::new(false),
active_immersive_session: Default::default(),
active_inline_sessions: DomRefCell::new(Vec::new()),
test: Default::default(),
pipeline,
}
}
pub fn new(window: &Window) -> DomRoot<XRSystem> {
reflect_dom_object(
Box::new(XRSystem::new_inherited(window.pipeline_id())),
window,
)
}
pub fn pending_or_active_session(&self) -> bool {
self.pending_immersive_session.get() || self.active_immersive_session.get().is_some()
}
pub fn set_pending(&self) {
self.pending_immersive_session.set(true)
}
pub fn set_active_immersive_session(&self, session: &XRSession)
|
/// https://immersive-web.github.io/webxr/#ref-for-eventdef-xrsession-end
pub fn end_session(&self, session: &XRSession) {
// Step 3
if let Some(active) = self.active_immersive_session.get() {
if Dom::from_ref(&*active) == Dom::from_ref(session) {
self.active_immersive_session.set(None);
// Dirty the canvas, since it has been skipping this step whilst in immersive
// mode
session.dirty_layers();
}
}
self.active_inline_sessions
.borrow_mut()
.retain(|sess| Dom::from_ref(&**sess)!= Dom::from_ref(session));
}
}
impl Into<SessionMode> for XRSessionMode {
fn into(self) -> SessionMode {
match self {
XRSessionMode::Immersive_vr => SessionMode::ImmersiveVR,
XRSessionMode::Immersive_ar => SessionMode::ImmersiveAR,
XRSessionMode::Inline => SessionMode::Inline,
}
}
}
impl XRSystemMethods for XRSystem {
/// https://immersive-web.github.io/webxr/#dom-xr-issessionsupported
fn IsSessionSupported(&self, mode: XRSessionMode) -> Rc<Promise> {
// XXXManishearth this should select an XR device first
let promise = Promise::new(&self.global());
let mut trusted = Some(TrustedPromise::new(promise.clone()));
let global = self.global();
let window = global.as_window();
let (task_source, canceller) = window
.task_manager()
.dom_manipulation_task_source_with_canceller();
let (sender, receiver) = ipc::channel(global.time_profiler_chan().clone()).unwrap();
ROUTER.add_route(
receiver.to_opaque(),
Box::new(move |message| {
// router doesn't know this is only called once
let trusted = if let Some(trusted) = trusted.take() {
trusted
} else {
error!("supportsSession callback called twice!");
return;
};
let message: Result<(), webxr_api::Error> = if let Ok(message) = message.to() {
message
} else {
error!("supportsSession callback given incorrect payload");
return;
};
if let Ok(()) = message {
let _ =
task_source.queue_with_canceller(trusted.resolve_task(true), &canceller);
} else {
let _ =
task_source.queue_with_canceller(trusted.resolve_task(false), &canceller);
};
}),
);
window
.webxr_registry()
.supports_session(mode.into(), sender);
promise
}
/// https://immersive-web.github.io/webxr/#dom-xr-requestsession
#[allow(unsafe_code)]
fn RequestSession(
&self,
mode: XRSessionMode,
init: RootedTraceableBox<XRSessionInit>,
comp: InRealm,
) -> Rc<Promise> {
let global = self.global();
let window = global.as_window();
let promise = Promise::new_in_current_realm(&global, comp);
if mode!= XRSessionMode::Inline {
if!ScriptThread::is_user_interacting() {
if pref!(dom.webxr.unsafe_assume_user_intent) {
warn!("The dom.webxr.unsafe-assume-user-intent preference assumes user intent to enter WebXR.");
} else {
promise.reject_error(Error::Security);
return promise;
}
}
if self.pending_or_active_session() {
promise.reject_error(Error::InvalidState);
return promise;
}
self.set_pending();
}
let mut required_features = vec![];
let mut optional_features = vec![];
let cx = global.get_cx();
// We are supposed to include "viewer" and on immersive devices "local"
// by default here, but this is handled directly in requestReferenceSpace()
if let Some(ref r) = init.requiredFeatures {
for feature in r {
unsafe {
if let Ok(ConversionResult::Success(s)) =
String::from_jsval(*cx, feature.handle(), ())
{
required_features.push(s)
} else {
warn!("Unable to convert required feature to string");
if mode!= XRSessionMode::Inline {
self.pending_immersive_session.set(false);
}
promise.reject_error(Error::NotSupported);
return promise;
}
}
}
}
if let Some(ref o) = init.optionalFeatures {
for feature in o {
unsafe {
if let Ok(ConversionResult::Success(s)) =
String::from_jsval(*cx, feature.handle(), ())
{
optional_features.push(s)
} else {
warn!("Unable to convert optional feature to string");
}
}
}
}
let init = SessionInit {
required_features,
optional_features,
first_person_observer_view: pref!(dom.webxr.first_person_observer_view),
};
let mut trusted = Some(TrustedPromise::new(promise.clone()));
let this = Trusted::new(self);
let (task_source, canceller) = window
.task_manager()
.dom_manipulation_task_source_with_canceller();
let (sender, receiver) = ipc::channel(global.time_profiler_chan().clone()).unwrap();
let (frame_sender, frame_receiver) = ipc_crate::channel().unwrap();
let mut frame_receiver = Some(frame_receiver);
ROUTER.add_route(
receiver.to_opaque(),
Box::new(move |message| {
// router doesn't know this is only called once
let trusted = trusted.take().unwrap();
let this = this.clone();
let frame_receiver = frame_receiver.take().unwrap();
let message: Result<Session, webxr_api::Error> = if let Ok(message) = message.to() {
message
} else {
error!("requestSession callback given incorrect payload");
return;
};
let _ = task_source.queue_with_canceller(
task!(request_session: move || {
this.root().session_obtained(message, trusted.root(), mode, frame_receiver);
}),
&canceller,
);
}),
);
window
.webxr_registry()
.request_session(mode.into(), init, sender, frame_sender);
promise
}
// https://github.com/immersive-web/webxr-test-api/blob/master/explainer.md
fn Test(&self) -> DomRoot<XRTest> {
self.test.or_init(|| XRTest::new(&self.global()))
}
}
impl XRSystem {
fn session_obtained(
&self,
response: Result<Session, XRError>,
promise: Rc<Promise>,
mode: XRSessionMode,
frame_receiver: IpcReceiver<Frame>,
) {
let session = match response {
Ok(session) => session,
Err(e) => {
warn!("Error requesting XR session: {:?}", e);
if mode!= XRSessionMode::Inline {
self.pending_immersive_session.set(false);
}
promise.reject_error(Error::NotSupported);
return;
},
};
let session = XRSession::new(&self.global(), session, mode, frame_receiver);
if mode == XRSessionMode::Inline {
self.active_inline_sessions
.borrow_mut()
.push(Dom::from_ref(&*session));
} else {
self.set_active_immersive_session(&session);
}
promise.resolve_native(&session);
// https://github.com/immersive-web/webxr/issues/961
// This must be called _after_ the promise is resolved
session.setup_initial_inputs();
}
// https://github.com/immersive-web/navigation/issues/10
pub fn dispatch_sessionavailable(&self) {
let xr = Trusted::new(self);
let global = self.global();
let window = global.as_window();
window
.task_manager()
.dom_manipulation_task_source()
.queue(
task!(fire_sessionavailable_event: move || {
// The sessionavailable event indicates user intent to enter an XR session
let xr = xr.root();
let interacting = ScriptThread::is_user_interacting();
ScriptThread::set_user_interacting(true);
xr.upcast::<EventTarget>().fire_bubbling_event(atom!("sessionavailable"));
ScriptThread::set_user_interacting(interacting);
}),
window.upcast(),
)
.unwrap();
}
}
|
{
// XXXManishearth when we support non-immersive (inline) sessions we should
// ensure they never reach these codepaths
self.pending_immersive_session.set(false);
self.active_immersive_session.set(Some(session))
}
|
identifier_body
|
mod.rs
|
// bound region from A that we find it to be associated
// with.
for (a_br, a_r) in a_map {
if tainted.iter().any(|x| x == a_r) {
debug!("generalize_region(r0={:?}): \
replacing with {:?}, tainted={:?}",
r0, *a_br, tainted);
return ty::ReLateBound(debruijn, *a_br);
}
}
infcx.tcx.sess.span_bug(
span,
&format!("region {:?} is not associated with \
any bound region from A!",
r0))
}
}
fn higher_ranked_glb<T>(&self, a: &Binder<T>, b: &Binder<T>) -> cres<'tcx, Binder<T>>
where T : Combineable<'tcx>
{
debug!("{}.higher_ranked_glb({}, {})",
self.tag(), a.repr(self.tcx()), b.repr(self.tcx()));
// Make a snapshot so we can examine "all bindings that were
// created as part of this type comparison".
return self.infcx().try(|snapshot| {
// Instantiate each bound region with a fresh region variable.
let (a_with_fresh, a_map) =
self.infcx().replace_late_bound_regions_with_fresh_var(
self.trace().origin.span(), HigherRankedType, a);
let (b_with_fresh, b_map) =
self.infcx().replace_late_bound_regions_with_fresh_var(
self.trace().origin.span(), HigherRankedType, b);
let a_vars = var_ids(self, &a_map);
let b_vars = var_ids(self, &b_map);
// Collect constraints.
let result0 =
try!(Combineable::combine(self, &a_with_fresh, &b_with_fresh));
let result0 =
self.infcx().resolve_type_vars_if_possible(&result0);
debug!("glb result0 = {}", result0.repr(self.tcx()));
// Generalize the regions appearing in result0 if possible
let new_vars = self.infcx().region_vars_confined_to_snapshot(snapshot);
let span = self.trace().origin.span();
let result1 =
fold_regions_in(
self.tcx(),
&result0,
|r, debruijn| generalize_region(self.infcx(), span, snapshot, debruijn,
&new_vars,
&a_map, &a_vars, &b_vars,
r));
debug!("glb({},{}) = {}",
a.repr(self.tcx()),
b.repr(self.tcx()),
result1.repr(self.tcx()));
Ok(ty::Binder(result1))
});
fn generalize_region(infcx: &InferCtxt,
span: Span,
snapshot: &CombinedSnapshot,
debruijn: ty::DebruijnIndex,
new_vars: &[ty::RegionVid],
a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
a_vars: &[ty::RegionVid],
b_vars: &[ty::RegionVid],
r0: ty::Region) -> ty::Region {
if!is_var_in_set(new_vars, r0) {
assert!(!r0.is_bound());
return r0;
}
let tainted = infcx.tainted_regions(snapshot, r0);
let mut a_r = None;
let mut b_r = None;
let mut only_new_vars = true;
for r in &tainted {
if is_var_in_set(a_vars, *r) {
if a_r.is_some() {
return fresh_bound_variable(infcx, debruijn);
} else {
a_r = Some(*r);
}
} else if is_var_in_set(b_vars, *r) {
if b_r.is_some() {
return fresh_bound_variable(infcx, debruijn);
} else {
b_r = Some(*r);
}
} else if!is_var_in_set(new_vars, *r) {
only_new_vars = false;
}
}
// NB---I do not believe this algorithm computes
// (necessarily) the GLB. As written it can
// spuriously fail. In particular, if there is a case
// like: |fn(&a)| and fn(fn(&b)), where a and b are
// free, it will return fn(&c) where c = GLB(a,b). If
// however this GLB is not defined, then the result is
// an error, even though something like
// "fn<X>(fn(&X))" where X is bound would be a
// subtype of both of those.
//
// The problem is that if we were to return a bound
// variable, we'd be computing a lower-bound, but not
// necessarily the *greatest* lower-bound.
//
// Unfortunately, this problem is non-trivial to solve,
// because we do not know at the time of computing the GLB
// whether a GLB(a,b) exists or not, because we haven't
// run region inference (or indeed, even fully computed
// the region hierarchy!). The current algorithm seems to
// works ok in practice.
if a_r.is_some() && b_r.is_some() && only_new_vars {
// Related to exactly one bound variable from each fn:
return rev_lookup(infcx, span, a_map, a_r.unwrap());
} else if a_r.is_none() && b_r.is_none() {
// Not related to bound variables from either fn:
assert!(!r0.is_bound());
return r0;
} else {
// Other:
return fresh_bound_variable(infcx, debruijn);
}
}
fn rev_lookup(infcx: &InferCtxt,
span: Span,
a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
r: ty::Region) -> ty::Region
{
for (a_br, a_r) in a_map {
if *a_r == r {
return ty::ReLateBound(ty::DebruijnIndex::new(1), *a_br);
}
}
infcx.tcx.sess.span_bug(
span,
&format!("could not find original bound region for {:?}", r));
}
fn fresh_bound_variable(infcx: &InferCtxt, debruijn: ty::DebruijnIndex) -> ty::Region {
infcx.region_vars.new_bound(debruijn)
}
}
}
fn var_ids<'tcx, T: Combine<'tcx>>(combiner: &T,
map: &FnvHashMap<ty::BoundRegion, ty::Region>)
-> Vec<ty::RegionVid> {
map.iter().map(|(_, r)| match *r {
ty::ReInfer(ty::ReVar(r)) => { r }
r => {
combiner.infcx().tcx.sess.span_bug(
combiner.trace().origin.span(),
&format!("found non-region-vid: {:?}", r));
}
}).collect()
}
fn is_var_in_set(new_vars: &[ty::RegionVid], r: ty::Region) -> bool {
match r {
ty::ReInfer(ty::ReVar(ref v)) => new_vars.iter().any(|x| x == v),
_ => false
}
}
fn fold_regions_in<'tcx, T, F>(tcx: &ty::ctxt<'tcx>,
unbound_value: &T,
mut fldr: F)
-> T
where T : Combineable<'tcx>,
F : FnMut(ty::Region, ty::DebruijnIndex) -> ty::Region,
{
unbound_value.fold_with(&mut ty_fold::RegionFolder::new(tcx, &mut |region, current_depth| {
// we should only be encountering "escaping" late-bound regions here,
// because the ones at the current level should have been replaced
// with fresh variables
assert!(match region {
ty::ReLateBound(..) => false,
_ => true
});
fldr(region, ty::DebruijnIndex::new(current_depth))
}))
}
impl<'a,'tcx> InferCtxtExt for InferCtxt<'a,'tcx> {
fn tainted_regions(&self, snapshot: &CombinedSnapshot, r: ty::Region) -> Vec<ty::Region> {
self.region_vars.tainted(&snapshot.region_vars_snapshot, r)
}
fn region_vars_confined_to_snapshot(&self,
snapshot: &CombinedSnapshot)
-> Vec<ty::RegionVid>
{
/*!
* Returns the set of region variables that do not affect any
* types/regions which existed before `snapshot` was
* started. This is used in the sub/lub/glb computations. The
* idea here is that when we are computing lub/glb of two
* regions, we sometimes create intermediate region variables.
* Those region variables may touch some of the skolemized or
* other "forbidden" regions we created to replace bound
* regions, but they don't really represent an "external"
* constraint.
*
* However, sometimes fresh variables are created for other
* purposes too, and those *may* represent an external
* constraint. In particular, when a type variable is
* instantiated, we create region variables for all the
* regions that appear within, and if that type variable
* pre-existed the snapshot, then those region variables
* represent external constraints.
*
* An example appears in the unit test
* `sub_free_bound_false_infer`. In this test, we want to
* know whether
*
* ```rust
* fn(_#0t) <: for<'a> fn(&'a int)
* ```
*
* Note that the subtype has a type variable. Because the type
* variable can't be instantiated with a region that is bound
* in the fn signature, this comparison ought to fail. But if
* we're not careful, it will succeed.
*
* The reason is that when we walk through the subtyping
* algorith, we begin by replacing `'a` with a skolemized
* variable `'1`. We then have `fn(_#0t) <: fn(&'1 int)`. This
* can be made true by unifying `_#0t` with `&'1 int`. In the
* process, we create a fresh variable for the skolemized
* region, `'$2`, and hence we have that `_#0t == &'$2
* int`. However, because `'$2` was created during the sub
* computation, if we're not careful we will erroneously
* assume it is one of the transient region variables
* representing a lub/glb internally. Not good.
*
* To prevent this, we check for type variables which were
* unified during the snapshot, and say that any region
* variable created during the snapshot but which finds its
* way into a type variable is considered to "escape" the
* snapshot.
*/
let mut region_vars =
self.region_vars.vars_created_since_snapshot(&snapshot.region_vars_snapshot);
let escaping_types =
self.type_variables.borrow().types_escaping_snapshot(&snapshot.type_snapshot);
let escaping_region_vars: FnvHashSet<_> =
escaping_types
.iter()
.flat_map(|&t| ty_fold::collect_regions(self.tcx, &t).into_iter())
.collect();
region_vars.retain(|®ion_vid| {
let r = ty::ReInfer(ty::ReVar(region_vid));
!escaping_region_vars.contains(&r)
});
debug!("region_vars_confined_to_snapshot: region_vars={} escaping_types={}",
region_vars.repr(self.tcx),
escaping_types.repr(self.tcx));
region_vars
}
}
pub fn skolemize_late_bound_regions<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
binder: &ty::Binder<T>,
snapshot: &CombinedSnapshot)
-> (T, SkolemizationMap)
where T : TypeFoldable<'tcx> + Repr<'tcx>
{
/*!
* Replace all regions bound by `binder` with skolemized regions and
* return a map indicating which bound-region was replaced with what
* skolemized region. This is the first step of checking subtyping
* when higher-ranked things are involved. See `doc.rs` for more details.
*/
let (result, map) = ty::replace_late_bound_regions(infcx.tcx, binder, |br| {
infcx.region_vars.new_skolemized(br, &snapshot.region_vars_snapshot)
});
debug!("skolemize_bound_regions(binder={}, result={}, map={})",
binder.repr(infcx.tcx),
result.repr(infcx.tcx),
map.repr(infcx.tcx));
(result, map)
}
pub fn leak_check<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>,
skol_map: &SkolemizationMap,
snapshot: &CombinedSnapshot)
-> Result<(),(ty::BoundRegion,ty::Region)>
{
/*!
* Searches the region constriants created since `snapshot` was started
* and checks to determine whether any of the skolemized regions created
* in `skol_map` would "escape" -- meaning that they are related to
* other regions in some way. If so, the higher-ranked subtyping doesn't
* hold. See `doc.rs` for more details.
*/
debug!("leak_check: skol_map={}",
skol_map.repr(infcx.tcx));
let new_vars = infcx.region_vars_confined_to_snapshot(snapshot);
for (&skol_br, &skol) in skol_map {
let tainted = infcx.tainted_regions(snapshot, skol);
for &tainted_region in &tainted {
// Each skolemized should only be relatable to itself
// or new variables:
match tainted_region {
ty::ReInfer(ty::ReVar(vid)) => {
if new_vars.iter().any(|&x| x == vid) { continue; }
}
_ => {
if tainted_region == skol { continue; }
}
};
debug!("{} (which replaced {}) is tainted by {}",
skol.repr(infcx.tcx),
skol_br.repr(infcx.tcx),
tainted_region.repr(infcx.tcx));
// A is not as polymorphic as B:
return Err((skol_br, tainted_region));
}
}
Ok(())
}
/// This code converts from skolemized regions back to late-bound
/// regions. It works by replacing each region in the taint set of a
/// skolemized region with a bound-region. The bound region will be bound
/// by the outer-most binder in `value`; the caller must ensure that there is
/// such a binder and it is the right place.
///
/// This routine is only intended to be used when the leak-check has
/// passed; currently, it's used in the trait matching code to create
/// a set of nested obligations frmo an impl that matches against
/// something higher-ranked. More details can be found in
/// `middle::traits::doc.rs`.
///
/// As a brief example, consider the obligation `for<'a> Fn(&'a int)
/// -> &'a int`, and the impl:
///
/// impl<A,R> Fn<A,R> for SomethingOrOther
/// where A : Clone
/// {... }
///
/// Here we will have replaced `'a` with a skolemized region
/// `'0`. This means that our substitution will be `{A=>&'0
/// int, R=>&'0 int}`.
///
/// When we apply the substitution to the bounds, we will wind up with
/// `&'0 int : Clone` as a predicate. As a last step, we then go and
/// replace `'0` with a late-bound region `'a`. The depth is matched
/// to the depth of the predicate, in this case 1, so that the final
/// predicate is `for<'a> &'a int : Clone`.
pub fn plug_leaks<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>,
skol_map: SkolemizationMap,
snapshot: &CombinedSnapshot,
value: &T)
-> T
where T : TypeFoldable<'tcx> + Repr<'tcx>
|
{
debug_assert!(leak_check(infcx, &skol_map, snapshot).is_ok());
debug!("plug_leaks(skol_map={}, value={})",
skol_map.repr(infcx.tcx),
value.repr(infcx.tcx));
// Compute a mapping from the "taint set" of each skolemized
// region back to the `ty::BoundRegion` that it originally
// represented. Because `leak_check` passed, we know that that
// these taint sets are mutually disjoint.
let inv_skol_map: FnvHashMap<ty::Region, ty::BoundRegion> =
skol_map
.into_iter()
.flat_map(|(skol_br, skol)| {
infcx.tainted_regions(snapshot, skol)
.into_iter()
.map(move |tainted_region| (tainted_region, skol_br))
})
.collect();
|
identifier_body
|
|
mod.rs
|
()));
// Compare types now that bound regions have been replaced.
let result = try!(Combineable::combine(self, &a_prime, &b_prime));
// Presuming type comparison succeeds, we need to check
// that the skolemized regions do not "leak".
match leak_check(self.infcx(), &skol_map, snapshot) {
Ok(()) => { }
Err((skol_br, tainted_region)) => {
if self.a_is_expected() {
debug!("Not as polymorphic!");
return Err(ty::terr_regions_insufficiently_polymorphic(skol_br,
tainted_region));
} else {
debug!("Overly polymorphic!");
return Err(ty::terr_regions_overly_polymorphic(skol_br,
tainted_region));
}
}
}
debug!("higher_ranked_sub: OK result={}",
result.repr(self.tcx()));
Ok(ty::Binder(result))
});
}
fn higher_ranked_lub<T>(&self, a: &Binder<T>, b: &Binder<T>) -> cres<'tcx, Binder<T>>
where T : Combineable<'tcx>
{
// Start a snapshot so we can examine "all bindings that were
// created as part of this type comparison".
return self.infcx().try(|snapshot| {
// Instantiate each bound region with a fresh region variable.
let span = self.trace().origin.span();
let (a_with_fresh, a_map) =
self.infcx().replace_late_bound_regions_with_fresh_var(
span, HigherRankedType, a);
let (b_with_fresh, _) =
self.infcx().replace_late_bound_regions_with_fresh_var(
span, HigherRankedType, b);
// Collect constraints.
let result0 =
try!(Combineable::combine(self, &a_with_fresh, &b_with_fresh));
let result0 =
self.infcx().resolve_type_vars_if_possible(&result0);
debug!("lub result0 = {}", result0.repr(self.tcx()));
// Generalize the regions appearing in result0 if possible
let new_vars = self.infcx().region_vars_confined_to_snapshot(snapshot);
let span = self.trace().origin.span();
let result1 =
fold_regions_in(
self.tcx(),
&result0,
|r, debruijn| generalize_region(self.infcx(), span, snapshot, debruijn,
&new_vars, &a_map, r));
debug!("lub({},{}) = {}",
a.repr(self.tcx()),
b.repr(self.tcx()),
result1.repr(self.tcx()));
Ok(ty::Binder(result1))
});
fn generalize_region(infcx: &InferCtxt,
span: Span,
snapshot: &CombinedSnapshot,
debruijn: ty::DebruijnIndex,
new_vars: &[ty::RegionVid],
a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
r0: ty::Region)
-> ty::Region {
// Regions that pre-dated the LUB computation stay as they are.
if!is_var_in_set(new_vars, r0) {
assert!(!r0.is_bound());
debug!("generalize_region(r0={:?}): not new variable", r0);
return r0;
}
let tainted = infcx.tainted_regions(snapshot, r0);
// Variables created during LUB computation which are
// *related* to regions that pre-date the LUB computation
// stay as they are.
if!tainted.iter().all(|r| is_var_in_set(new_vars, *r)) {
debug!("generalize_region(r0={:?}): \
non-new-variables found in {:?}",
r0, tainted);
assert!(!r0.is_bound());
return r0;
}
// Otherwise, the variable must be associated with at
// least one of the variables representing bound regions
// in both A and B. Replace the variable with the "first"
// bound region from A that we find it to be associated
// with.
for (a_br, a_r) in a_map {
if tainted.iter().any(|x| x == a_r) {
debug!("generalize_region(r0={:?}): \
replacing with {:?}, tainted={:?}",
r0, *a_br, tainted);
return ty::ReLateBound(debruijn, *a_br);
}
}
infcx.tcx.sess.span_bug(
span,
&format!("region {:?} is not associated with \
any bound region from A!",
r0))
}
}
fn higher_ranked_glb<T>(&self, a: &Binder<T>, b: &Binder<T>) -> cres<'tcx, Binder<T>>
where T : Combineable<'tcx>
{
debug!("{}.higher_ranked_glb({}, {})",
self.tag(), a.repr(self.tcx()), b.repr(self.tcx()));
// Make a snapshot so we can examine "all bindings that were
// created as part of this type comparison".
return self.infcx().try(|snapshot| {
// Instantiate each bound region with a fresh region variable.
let (a_with_fresh, a_map) =
self.infcx().replace_late_bound_regions_with_fresh_var(
self.trace().origin.span(), HigherRankedType, a);
let (b_with_fresh, b_map) =
self.infcx().replace_late_bound_regions_with_fresh_var(
self.trace().origin.span(), HigherRankedType, b);
let a_vars = var_ids(self, &a_map);
let b_vars = var_ids(self, &b_map);
// Collect constraints.
let result0 =
try!(Combineable::combine(self, &a_with_fresh, &b_with_fresh));
let result0 =
self.infcx().resolve_type_vars_if_possible(&result0);
debug!("glb result0 = {}", result0.repr(self.tcx()));
// Generalize the regions appearing in result0 if possible
let new_vars = self.infcx().region_vars_confined_to_snapshot(snapshot);
let span = self.trace().origin.span();
let result1 =
fold_regions_in(
self.tcx(),
&result0,
|r, debruijn| generalize_region(self.infcx(), span, snapshot, debruijn,
&new_vars,
&a_map, &a_vars, &b_vars,
r));
debug!("glb({},{}) = {}",
a.repr(self.tcx()),
b.repr(self.tcx()),
result1.repr(self.tcx()));
Ok(ty::Binder(result1))
});
fn generalize_region(infcx: &InferCtxt,
span: Span,
snapshot: &CombinedSnapshot,
debruijn: ty::DebruijnIndex,
new_vars: &[ty::RegionVid],
a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
a_vars: &[ty::RegionVid],
b_vars: &[ty::RegionVid],
r0: ty::Region) -> ty::Region {
if!is_var_in_set(new_vars, r0) {
assert!(!r0.is_bound());
return r0;
}
let tainted = infcx.tainted_regions(snapshot, r0);
let mut a_r = None;
let mut b_r = None;
let mut only_new_vars = true;
for r in &tainted {
if is_var_in_set(a_vars, *r) {
if a_r.is_some() {
return fresh_bound_variable(infcx, debruijn);
} else {
a_r = Some(*r);
}
} else if is_var_in_set(b_vars, *r) {
if b_r.is_some() {
return fresh_bound_variable(infcx, debruijn);
} else {
b_r = Some(*r);
}
} else if!is_var_in_set(new_vars, *r) {
only_new_vars = false;
}
}
// NB---I do not believe this algorithm computes
// (necessarily) the GLB. As written it can
// spuriously fail. In particular, if there is a case
// like: |fn(&a)| and fn(fn(&b)), where a and b are
// free, it will return fn(&c) where c = GLB(a,b). If
// however this GLB is not defined, then the result is
// an error, even though something like
// "fn<X>(fn(&X))" where X is bound would be a
// subtype of both of those.
//
// The problem is that if we were to return a bound
// variable, we'd be computing a lower-bound, but not
// necessarily the *greatest* lower-bound.
//
// Unfortunately, this problem is non-trivial to solve,
// because we do not know at the time of computing the GLB
// whether a GLB(a,b) exists or not, because we haven't
// run region inference (or indeed, even fully computed
// the region hierarchy!). The current algorithm seems to
// works ok in practice.
if a_r.is_some() && b_r.is_some() && only_new_vars {
// Related to exactly one bound variable from each fn:
return rev_lookup(infcx, span, a_map, a_r.unwrap());
} else if a_r.is_none() && b_r.is_none() {
// Not related to bound variables from either fn:
assert!(!r0.is_bound());
return r0;
} else {
// Other:
return fresh_bound_variable(infcx, debruijn);
}
}
fn rev_lookup(infcx: &InferCtxt,
span: Span,
a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
r: ty::Region) -> ty::Region
{
for (a_br, a_r) in a_map {
if *a_r == r {
return ty::ReLateBound(ty::DebruijnIndex::new(1), *a_br);
}
}
infcx.tcx.sess.span_bug(
span,
&format!("could not find original bound region for {:?}", r));
}
fn
|
(infcx: &InferCtxt, debruijn: ty::DebruijnIndex) -> ty::Region {
infcx.region_vars.new_bound(debruijn)
}
}
}
fn var_ids<'tcx, T: Combine<'tcx>>(combiner: &T,
map: &FnvHashMap<ty::BoundRegion, ty::Region>)
-> Vec<ty::RegionVid> {
map.iter().map(|(_, r)| match *r {
ty::ReInfer(ty::ReVar(r)) => { r }
r => {
combiner.infcx().tcx.sess.span_bug(
combiner.trace().origin.span(),
&format!("found non-region-vid: {:?}", r));
}
}).collect()
}
fn is_var_in_set(new_vars: &[ty::RegionVid], r: ty::Region) -> bool {
match r {
ty::ReInfer(ty::ReVar(ref v)) => new_vars.iter().any(|x| x == v),
_ => false
}
}
fn fold_regions_in<'tcx, T, F>(tcx: &ty::ctxt<'tcx>,
unbound_value: &T,
mut fldr: F)
-> T
where T : Combineable<'tcx>,
F : FnMut(ty::Region, ty::DebruijnIndex) -> ty::Region,
{
unbound_value.fold_with(&mut ty_fold::RegionFolder::new(tcx, &mut |region, current_depth| {
// we should only be encountering "escaping" late-bound regions here,
// because the ones at the current level should have been replaced
// with fresh variables
assert!(match region {
ty::ReLateBound(..) => false,
_ => true
});
fldr(region, ty::DebruijnIndex::new(current_depth))
}))
}
impl<'a,'tcx> InferCtxtExt for InferCtxt<'a,'tcx> {
fn tainted_regions(&self, snapshot: &CombinedSnapshot, r: ty::Region) -> Vec<ty::Region> {
self.region_vars.tainted(&snapshot.region_vars_snapshot, r)
}
fn region_vars_confined_to_snapshot(&self,
snapshot: &CombinedSnapshot)
-> Vec<ty::RegionVid>
{
/*!
* Returns the set of region variables that do not affect any
* types/regions which existed before `snapshot` was
* started. This is used in the sub/lub/glb computations. The
* idea here is that when we are computing lub/glb of two
* regions, we sometimes create intermediate region variables.
* Those region variables may touch some of the skolemized or
* other "forbidden" regions we created to replace bound
* regions, but they don't really represent an "external"
* constraint.
*
* However, sometimes fresh variables are created for other
* purposes too, and those *may* represent an external
* constraint. In particular, when a type variable is
* instantiated, we create region variables for all the
* regions that appear within, and if that type variable
* pre-existed the snapshot, then those region variables
* represent external constraints.
*
* An example appears in the unit test
* `sub_free_bound_false_infer`. In this test, we want to
* know whether
*
* ```rust
* fn(_#0t) <: for<'a> fn(&'a int)
* ```
*
* Note that the subtype has a type variable. Because the type
* variable can't be instantiated with a region that is bound
* in the fn signature, this comparison ought to fail. But if
* we're not careful, it will succeed.
*
* The reason is that when we walk through the subtyping
* algorith, we begin by replacing `'a` with a skolemized
* variable `'1`. We then have `fn(_#0t) <: fn(&'1 int)`. This
* can be made true by unifying `_#0t` with `&'1 int`. In the
* process, we create a fresh variable for the skolemized
* region, `'$2`, and hence we have that `_#0t == &'$2
* int`. However, because `'$2` was created during the sub
* computation, if we're not careful we will erroneously
* assume it is one of the transient region variables
* representing a lub/glb internally. Not good.
*
* To prevent this, we check for type variables which were
* unified during the snapshot, and say that any region
* variable created during the snapshot but which finds its
* way into a type variable is considered to "escape" the
* snapshot.
*/
let mut region_vars =
self.region_vars.vars_created_since_snapshot(&snapshot.region_vars_snapshot);
let escaping_types =
self.type_variables.borrow().types_escaping_snapshot(&snapshot.type_snapshot);
let escaping_region_vars: FnvHashSet<_> =
escaping_types
.iter()
.flat_map(|&t| ty_fold::collect_regions(self.tcx, &t).
|
fresh_bound_variable
|
identifier_name
|
mod.rs
|
.tcx()));
// Compare types now that bound regions have been replaced.
let result = try!(Combineable::combine(self, &a_prime, &b_prime));
// Presuming type comparison succeeds, we need to check
// that the skolemized regions do not "leak".
match leak_check(self.infcx(), &skol_map, snapshot) {
Ok(()) => { }
Err((skol_br, tainted_region)) => {
if self.a_is_expected() {
debug!("Not as polymorphic!");
return Err(ty::terr_regions_insufficiently_polymorphic(skol_br,
tainted_region));
} else {
debug!("Overly polymorphic!");
return Err(ty::terr_regions_overly_polymorphic(skol_br,
tainted_region));
}
}
}
debug!("higher_ranked_sub: OK result={}",
result.repr(self.tcx()));
Ok(ty::Binder(result))
});
}
fn higher_ranked_lub<T>(&self, a: &Binder<T>, b: &Binder<T>) -> cres<'tcx, Binder<T>>
where T : Combineable<'tcx>
{
// Start a snapshot so we can examine "all bindings that were
// created as part of this type comparison".
return self.infcx().try(|snapshot| {
// Instantiate each bound region with a fresh region variable.
let span = self.trace().origin.span();
let (a_with_fresh, a_map) =
self.infcx().replace_late_bound_regions_with_fresh_var(
span, HigherRankedType, a);
let (b_with_fresh, _) =
self.infcx().replace_late_bound_regions_with_fresh_var(
span, HigherRankedType, b);
// Collect constraints.
let result0 =
try!(Combineable::combine(self, &a_with_fresh, &b_with_fresh));
let result0 =
self.infcx().resolve_type_vars_if_possible(&result0);
debug!("lub result0 = {}", result0.repr(self.tcx()));
// Generalize the regions appearing in result0 if possible
let new_vars = self.infcx().region_vars_confined_to_snapshot(snapshot);
let span = self.trace().origin.span();
let result1 =
fold_regions_in(
self.tcx(),
&result0,
|r, debruijn| generalize_region(self.infcx(), span, snapshot, debruijn,
&new_vars, &a_map, r));
debug!("lub({},{}) = {}",
a.repr(self.tcx()),
b.repr(self.tcx()),
result1.repr(self.tcx()));
Ok(ty::Binder(result1))
});
fn generalize_region(infcx: &InferCtxt,
span: Span,
snapshot: &CombinedSnapshot,
debruijn: ty::DebruijnIndex,
new_vars: &[ty::RegionVid],
a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
r0: ty::Region)
-> ty::Region {
// Regions that pre-dated the LUB computation stay as they are.
if!is_var_in_set(new_vars, r0) {
assert!(!r0.is_bound());
debug!("generalize_region(r0={:?}): not new variable", r0);
return r0;
}
let tainted = infcx.tainted_regions(snapshot, r0);
// Variables created during LUB computation which are
// *related* to regions that pre-date the LUB computation
// stay as they are.
if!tainted.iter().all(|r| is_var_in_set(new_vars, *r)) {
debug!("generalize_region(r0={:?}): \
non-new-variables found in {:?}",
r0, tainted);
assert!(!r0.is_bound());
return r0;
}
// Otherwise, the variable must be associated with at
// least one of the variables representing bound regions
// in both A and B. Replace the variable with the "first"
// bound region from A that we find it to be associated
// with.
for (a_br, a_r) in a_map {
if tainted.iter().any(|x| x == a_r) {
debug!("generalize_region(r0={:?}): \
replacing with {:?}, tainted={:?}",
r0, *a_br, tainted);
return ty::ReLateBound(debruijn, *a_br);
}
}
infcx.tcx.sess.span_bug(
span,
&format!("region {:?} is not associated with \
any bound region from A!",
r0))
}
}
fn higher_ranked_glb<T>(&self, a: &Binder<T>, b: &Binder<T>) -> cres<'tcx, Binder<T>>
where T : Combineable<'tcx>
{
debug!("{}.higher_ranked_glb({}, {})",
self.tag(), a.repr(self.tcx()), b.repr(self.tcx()));
// Make a snapshot so we can examine "all bindings that were
// created as part of this type comparison".
return self.infcx().try(|snapshot| {
// Instantiate each bound region with a fresh region variable.
let (a_with_fresh, a_map) =
self.infcx().replace_late_bound_regions_with_fresh_var(
self.trace().origin.span(), HigherRankedType, a);
let (b_with_fresh, b_map) =
self.infcx().replace_late_bound_regions_with_fresh_var(
self.trace().origin.span(), HigherRankedType, b);
let a_vars = var_ids(self, &a_map);
let b_vars = var_ids(self, &b_map);
// Collect constraints.
let result0 =
try!(Combineable::combine(self, &a_with_fresh, &b_with_fresh));
let result0 =
self.infcx().resolve_type_vars_if_possible(&result0);
debug!("glb result0 = {}", result0.repr(self.tcx()));
// Generalize the regions appearing in result0 if possible
let new_vars = self.infcx().region_vars_confined_to_snapshot(snapshot);
let span = self.trace().origin.span();
let result1 =
fold_regions_in(
self.tcx(),
&result0,
|r, debruijn| generalize_region(self.infcx(), span, snapshot, debruijn,
&new_vars,
&a_map, &a_vars, &b_vars,
r));
debug!("glb({},{}) = {}",
a.repr(self.tcx()),
b.repr(self.tcx()),
result1.repr(self.tcx()));
Ok(ty::Binder(result1))
});
fn generalize_region(infcx: &InferCtxt,
span: Span,
snapshot: &CombinedSnapshot,
debruijn: ty::DebruijnIndex,
new_vars: &[ty::RegionVid],
a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
a_vars: &[ty::RegionVid],
b_vars: &[ty::RegionVid],
r0: ty::Region) -> ty::Region {
if!is_var_in_set(new_vars, r0) {
assert!(!r0.is_bound());
return r0;
}
|
let mut a_r = None;
let mut b_r = None;
let mut only_new_vars = true;
for r in &tainted {
if is_var_in_set(a_vars, *r) {
if a_r.is_some() {
return fresh_bound_variable(infcx, debruijn);
} else {
a_r = Some(*r);
}
} else if is_var_in_set(b_vars, *r) {
if b_r.is_some() {
return fresh_bound_variable(infcx, debruijn);
} else {
b_r = Some(*r);
}
} else if!is_var_in_set(new_vars, *r) {
only_new_vars = false;
}
}
// NB---I do not believe this algorithm computes
// (necessarily) the GLB. As written it can
// spuriously fail. In particular, if there is a case
// like: |fn(&a)| and fn(fn(&b)), where a and b are
// free, it will return fn(&c) where c = GLB(a,b). If
// however this GLB is not defined, then the result is
// an error, even though something like
// "fn<X>(fn(&X))" where X is bound would be a
// subtype of both of those.
//
// The problem is that if we were to return a bound
// variable, we'd be computing a lower-bound, but not
// necessarily the *greatest* lower-bound.
//
// Unfortunately, this problem is non-trivial to solve,
// because we do not know at the time of computing the GLB
// whether a GLB(a,b) exists or not, because we haven't
// run region inference (or indeed, even fully computed
// the region hierarchy!). The current algorithm seems to
// works ok in practice.
if a_r.is_some() && b_r.is_some() && only_new_vars {
// Related to exactly one bound variable from each fn:
return rev_lookup(infcx, span, a_map, a_r.unwrap());
} else if a_r.is_none() && b_r.is_none() {
// Not related to bound variables from either fn:
assert!(!r0.is_bound());
return r0;
} else {
// Other:
return fresh_bound_variable(infcx, debruijn);
}
}
fn rev_lookup(infcx: &InferCtxt,
span: Span,
a_map: &FnvHashMap<ty::BoundRegion, ty::Region>,
r: ty::Region) -> ty::Region
{
for (a_br, a_r) in a_map {
if *a_r == r {
return ty::ReLateBound(ty::DebruijnIndex::new(1), *a_br);
}
}
infcx.tcx.sess.span_bug(
span,
&format!("could not find original bound region for {:?}", r));
}
fn fresh_bound_variable(infcx: &InferCtxt, debruijn: ty::DebruijnIndex) -> ty::Region {
infcx.region_vars.new_bound(debruijn)
}
}
}
fn var_ids<'tcx, T: Combine<'tcx>>(combiner: &T,
map: &FnvHashMap<ty::BoundRegion, ty::Region>)
-> Vec<ty::RegionVid> {
map.iter().map(|(_, r)| match *r {
ty::ReInfer(ty::ReVar(r)) => { r }
r => {
combiner.infcx().tcx.sess.span_bug(
combiner.trace().origin.span(),
&format!("found non-region-vid: {:?}", r));
}
}).collect()
}
fn is_var_in_set(new_vars: &[ty::RegionVid], r: ty::Region) -> bool {
match r {
ty::ReInfer(ty::ReVar(ref v)) => new_vars.iter().any(|x| x == v),
_ => false
}
}
fn fold_regions_in<'tcx, T, F>(tcx: &ty::ctxt<'tcx>,
unbound_value: &T,
mut fldr: F)
-> T
where T : Combineable<'tcx>,
F : FnMut(ty::Region, ty::DebruijnIndex) -> ty::Region,
{
unbound_value.fold_with(&mut ty_fold::RegionFolder::new(tcx, &mut |region, current_depth| {
// we should only be encountering "escaping" late-bound regions here,
// because the ones at the current level should have been replaced
// with fresh variables
assert!(match region {
ty::ReLateBound(..) => false,
_ => true
});
fldr(region, ty::DebruijnIndex::new(current_depth))
}))
}
impl<'a,'tcx> InferCtxtExt for InferCtxt<'a,'tcx> {
fn tainted_regions(&self, snapshot: &CombinedSnapshot, r: ty::Region) -> Vec<ty::Region> {
self.region_vars.tainted(&snapshot.region_vars_snapshot, r)
}
fn region_vars_confined_to_snapshot(&self,
snapshot: &CombinedSnapshot)
-> Vec<ty::RegionVid>
{
/*!
* Returns the set of region variables that do not affect any
* types/regions which existed before `snapshot` was
* started. This is used in the sub/lub/glb computations. The
* idea here is that when we are computing lub/glb of two
* regions, we sometimes create intermediate region variables.
* Those region variables may touch some of the skolemized or
* other "forbidden" regions we created to replace bound
* regions, but they don't really represent an "external"
* constraint.
*
* However, sometimes fresh variables are created for other
* purposes too, and those *may* represent an external
* constraint. In particular, when a type variable is
* instantiated, we create region variables for all the
* regions that appear within, and if that type variable
* pre-existed the snapshot, then those region variables
* represent external constraints.
*
* An example appears in the unit test
* `sub_free_bound_false_infer`. In this test, we want to
* know whether
*
* ```rust
* fn(_#0t) <: for<'a> fn(&'a int)
* ```
*
* Note that the subtype has a type variable. Because the type
* variable can't be instantiated with a region that is bound
* in the fn signature, this comparison ought to fail. But if
* we're not careful, it will succeed.
*
* The reason is that when we walk through the subtyping
* algorith, we begin by replacing `'a` with a skolemized
* variable `'1`. We then have `fn(_#0t) <: fn(&'1 int)`. This
* can be made true by unifying `_#0t` with `&'1 int`. In the
* process, we create a fresh variable for the skolemized
* region, `'$2`, and hence we have that `_#0t == &'$2
* int`. However, because `'$2` was created during the sub
* computation, if we're not careful we will erroneously
* assume it is one of the transient region variables
* representing a lub/glb internally. Not good.
*
* To prevent this, we check for type variables which were
* unified during the snapshot, and say that any region
* variable created during the snapshot but which finds its
* way into a type variable is considered to "escape" the
* snapshot.
*/
let mut region_vars =
self.region_vars.vars_created_since_snapshot(&snapshot.region_vars_snapshot);
let escaping_types =
self.type_variables.borrow().types_escaping_snapshot(&snapshot.type_snapshot);
let escaping_region_vars: FnvHashSet<_> =
escaping_types
.iter()
.flat_map(|&t| ty_fold::collect_regions(self.tcx, &t).into
|
let tainted = infcx.tainted_regions(snapshot, r0);
|
random_line_split
|
intf.rs
|
//! formatter for unsigned and signed int subs
//! unsigned ints: %X %x (hex u64) %o (octal u64) %u (base ten u64)
//! signed ints: %i %d (both base ten i64)
use std::u64;
use std::i64;
use super::super::format_field::FormatField;
use super::super::formatter::{InPrefix, FormatPrimitive, Base, Formatter, warn_incomplete_conv,
get_it_at};
pub struct Intf {
a: u32,
}
// see the Intf::analyze() function below
struct IntAnalysis {
check_past_max: bool,
past_max: bool,
is_zero: bool,
len_digits: u8,
}
impl Intf {
pub fn new() -> Intf {
Intf { a: 0 }
}
// take a ref to argument string, and basic information
// about prefix (offset, radix, sign), and analyze string
// to gain the IntAnalysis information above
// check_past_max: true if the number *may* be above max,
// but we don't know either way. One of several reasons
// we may have to parse as int.
// past_max: true if the object is past max, false if not
// in the future we should probably combine these into an
// Option<bool>
// is_zero: true if number is zero, false otherwise
// len_digits: length of digits used to create the int
// important, for example, if we run into a non-valid character
fn analyze(str_in: &str, signed_out: bool, inprefix: &InPrefix) -> IntAnalysis {
// the maximum number of digits we could conceivably
// have before the decimal point without exceeding the
// max
let mut str_it = get_it_at(inprefix.offset, str_in);
let max_sd_in = if signed_out {
match inprefix.radix_in {
Base::Ten => 19,
Base::Octal => 21,
Base::Hex => 16,
}
} else {
match inprefix.radix_in {
Base::Ten => 20,
Base::Octal => 22,
Base::Hex => 16,
}
};
let mut ret = IntAnalysis {
check_past_max: false,
past_max: false,
is_zero: false,
len_digits: 0,
};
// todo turn this to a while let now that we know
// no special behavior on EOI break
loop {
let c_opt = str_it.next();
if let Some(c) = c_opt {
match c {
'0'...'9' | 'a'...'f' | 'A'...'F' => {
if ret.len_digits == 0 && c == '0' {
ret.is_zero = true;
} else if ret.is_zero {
ret.is_zero = false;
}
ret.len_digits += 1;
if ret.len_digits == max_sd_in {
if let Some(next_ch) = str_it.next() {
match next_ch {
'0'...'9' => {
ret.past_max = true;
}
_ => {
// force conversion
// to check if its above max.
// todo: spin out convert
// into fn, call it here to try
// read val, on Ok()
// save val for reuse later
// that way on same-base in and out
// we don't needlessly convert int
// to str, we can just copy it over.
ret.check_past_max = true;
str_it.put_back(next_ch);
}
}
if ret.past_max {
break;
}
} else {
ret.check_past_max = true;
}
}
}
_ => {
warn_incomplete_conv(str_in);
break;
}
}
} else {
// breaks on EOL
break;
}
}
ret
}
// get a FormatPrimitive of the maximum value for the field char
// and given sign
fn get_max(fchar: char, sign: i8) -> FormatPrimitive {
let mut fmt_prim: FormatPrimitive = Default::default();
fmt_prim.pre_decimal = Some(String::from(match fchar {
'd' | 'i' => {
match sign {
1 => "9223372036854775807",
_ => {
fmt_prim.prefix = Some(String::from("-"));
"9223372036854775808"
}
}
}
'x' | 'X' => "ffffffffffffffff",
'o' => "1777777777777777777777",
'u' | _ => "18446744073709551615",
}));
fmt_prim
}
// conv_from_segment contract:
// 1. takes
// - a string that begins with a non-zero digit, and proceeds
// with zero or more following digits until the end of the string
// - a radix to interpret those digits as
// - a char that communicates:
// whether to interpret+output the string as an i64 or u64
// what radix to write the parsed number as.
// 2. parses it as a rust integral type
// 3. outputs FormatPrimitive with:
// - if the string falls within bounds:
// number parsed and written in the correct radix
// - if the string falls outside bounds:
// for i64 output, the int minimum or int max (depending on sign)
// for u64 output, the u64 max in the output radix
fn conv_from_segment(segment: &str, radix_in: Base, fchar: char, sign: i8) -> FormatPrimitive {
match fchar {
'i' | 'd' => {
match i64::from_str_radix(segment, radix_in as u32) {
Ok(i) => {
let mut fmt_prim: FormatPrimitive = Default::default();
if sign == -1 {
fmt_prim.prefix = Some(String::from("-"));
}
fmt_prim.pre_decimal = Some(format!("{}", i));
fmt_prim
}
Err(_) => Intf::get_max(fchar, sign),
}
}
_ => {
match u64::from_str_radix(segment, radix_in as u32) {
Ok(u) => {
let mut fmt_prim: FormatPrimitive = Default::default();
let u_f = if sign == -1 {
u64::MAX - (u - 1)
} else {
u
};
fmt_prim.pre_decimal = Some(match fchar {
'X' => format!("{:X}", u_f),
'x' => format!("{:x}", u_f),
'o' => format!("{:o}", u_f),
_ => format!("{}", u_f),
});
fmt_prim
}
Err(_) => Intf::get_max(fchar, sign),
}
}
}
}
}
impl Formatter for Intf {
fn get_primitive(&self,
field: &FormatField,
inprefix: &InPrefix,
str_in: &str)
-> Option<FormatPrimitive> {
let begin = inprefix.offset;
// get information about the string. see Intf::Analyze
// def above.
let convert_hints = Intf::analyze(str_in,
*field.field_char == 'i' || *field.field_char == 'd',
inprefix);
// We always will have a formatprimitive to return
Some(if convert_hints.len_digits == 0 || convert_hints.is_zero {
// if non-digit or end is reached before a non-zero digit
let mut fmt_prim: FormatPrimitive = Default::default();
fmt_prim.pre_decimal = Some(String::from("0"));
fmt_prim
} else if!convert_hints.past_max {
// if the number is or may be below the bounds limit
let radix_out = match *field.field_char {
'd' | 'i' | 'u' => Base::Ten,
'x' | 'X' => Base::Hex,
'o' | _ => Base::Octal,
};
let radix_mismatch =!radix_out.eq(&inprefix.radix_in);
let decr_from_max: bool = inprefix.sign == -1 && *field.field_char!= 'i';
let end = begin + convert_hints.len_digits as usize;
// convert to int if any one of these is true:
// - number of digits in int indicates it may be past max
// - we're subtracting from the max
// - we're converting the base
if convert_hints.check_past_max || decr_from_max || radix_mismatch {
// radix of in and out is the same.
let segment = String::from(&str_in[begin..end]);
let m = Intf::conv_from_segment(&segment,
inprefix.radix_in.clone(),
*field.field_char,
inprefix.sign);
m
} else {
// otherwise just do a straight string copy.
let mut fmt_prim: FormatPrimitive = Default::default();
// this is here and not earlier because
// zero doesn't get a sign, and conv_from_segment
// creates its format primitive separately
if inprefix.sign == -1 && *field.field_char == 'i' {
fmt_prim.prefix = Some(String::from("-"));
}
fmt_prim.pre_decimal = Some(String::from(&str_in[begin..end]));
fmt_prim
}
} else {
Intf::get_max(*field.field_char, inprefix.sign)
})
}
fn
|
(&self, prim: &FormatPrimitive, field: FormatField) -> String {
let mut finalstr: String = String::new();
match prim.prefix {
Some(ref prefix) => {
finalstr.push_str(&prefix);
}
None => {}
}
// integral second fields is zero-padded minimum-width
// which gets handled before general minimum-width
match prim.pre_decimal {
Some(ref pre_decimal) => {
match field.second_field {
Some(min) => {
let mut i = min;
let len = pre_decimal.len() as u32;
while i > len {
finalstr.push('0');
i -= 1;
}
}
None => {}
}
finalstr.push_str(&pre_decimal);
}
None => {
panic!("error, format primitives provided to int, will, incidentally under \
correct behavior, always have a pre_dec value.");
}
}
finalstr
}
}
|
primitive_to_str
|
identifier_name
|
intf.rs
|
//! formatter for unsigned and signed int subs
//! unsigned ints: %X %x (hex u64) %o (octal u64) %u (base ten u64)
//! signed ints: %i %d (both base ten i64)
use std::u64;
use std::i64;
use super::super::format_field::FormatField;
use super::super::formatter::{InPrefix, FormatPrimitive, Base, Formatter, warn_incomplete_conv,
get_it_at};
pub struct Intf {
a: u32,
}
// see the Intf::analyze() function below
struct IntAnalysis {
check_past_max: bool,
past_max: bool,
is_zero: bool,
len_digits: u8,
}
impl Intf {
pub fn new() -> Intf {
Intf { a: 0 }
}
// take a ref to argument string, and basic information
// about prefix (offset, radix, sign), and analyze string
// to gain the IntAnalysis information above
// check_past_max: true if the number *may* be above max,
// but we don't know either way. One of several reasons
// we may have to parse as int.
// past_max: true if the object is past max, false if not
// in the future we should probably combine these into an
// Option<bool>
// is_zero: true if number is zero, false otherwise
// len_digits: length of digits used to create the int
// important, for example, if we run into a non-valid character
fn analyze(str_in: &str, signed_out: bool, inprefix: &InPrefix) -> IntAnalysis {
// the maximum number of digits we could conceivably
// have before the decimal point without exceeding the
// max
let mut str_it = get_it_at(inprefix.offset, str_in);
let max_sd_in = if signed_out {
match inprefix.radix_in {
Base::Ten => 19,
Base::Octal => 21,
Base::Hex => 16,
}
} else {
match inprefix.radix_in {
Base::Ten => 20,
Base::Octal => 22,
Base::Hex => 16,
}
};
let mut ret = IntAnalysis {
check_past_max: false,
past_max: false,
is_zero: false,
len_digits: 0,
};
// todo turn this to a while let now that we know
// no special behavior on EOI break
loop {
let c_opt = str_it.next();
if let Some(c) = c_opt {
match c {
'0'...'9' | 'a'...'f' | 'A'...'F' => {
if ret.len_digits == 0 && c == '0' {
ret.is_zero = true;
} else if ret.is_zero {
ret.is_zero = false;
}
ret.len_digits += 1;
if ret.len_digits == max_sd_in {
if let Some(next_ch) = str_it.next() {
match next_ch {
'0'...'9' => {
ret.past_max = true;
}
_ => {
// force conversion
// to check if its above max.
// todo: spin out convert
// into fn, call it here to try
// read val, on Ok()
// save val for reuse later
// that way on same-base in and out
// we don't needlessly convert int
// to str, we can just copy it over.
ret.check_past_max = true;
str_it.put_back(next_ch);
}
}
if ret.past_max {
break;
}
} else {
ret.check_past_max = true;
}
}
}
_ => {
warn_incomplete_conv(str_in);
break;
}
}
} else {
// breaks on EOL
break;
}
}
ret
}
// get a FormatPrimitive of the maximum value for the field char
// and given sign
fn get_max(fchar: char, sign: i8) -> FormatPrimitive {
let mut fmt_prim: FormatPrimitive = Default::default();
fmt_prim.pre_decimal = Some(String::from(match fchar {
'd' | 'i' => {
match sign {
1 => "9223372036854775807",
_ => {
fmt_prim.prefix = Some(String::from("-"));
"9223372036854775808"
}
}
}
'x' | 'X' => "ffffffffffffffff",
'o' => "1777777777777777777777",
'u' | _ => "18446744073709551615",
}));
fmt_prim
}
// conv_from_segment contract:
// 1. takes
// - a string that begins with a non-zero digit, and proceeds
// with zero or more following digits until the end of the string
// - a radix to interpret those digits as
// - a char that communicates:
// whether to interpret+output the string as an i64 or u64
// what radix to write the parsed number as.
// 2. parses it as a rust integral type
// 3. outputs FormatPrimitive with:
// - if the string falls within bounds:
// number parsed and written in the correct radix
// - if the string falls outside bounds:
// for i64 output, the int minimum or int max (depending on sign)
// for u64 output, the u64 max in the output radix
fn conv_from_segment(segment: &str, radix_in: Base, fchar: char, sign: i8) -> FormatPrimitive {
match fchar {
'i' | 'd' => {
match i64::from_str_radix(segment, radix_in as u32) {
Ok(i) => {
let mut fmt_prim: FormatPrimitive = Default::default();
if sign == -1 {
fmt_prim.prefix = Some(String::from("-"));
}
fmt_prim.pre_decimal = Some(format!("{}", i));
fmt_prim
}
Err(_) => Intf::get_max(fchar, sign),
}
}
_ => {
match u64::from_str_radix(segment, radix_in as u32) {
Ok(u) => {
let mut fmt_prim: FormatPrimitive = Default::default();
let u_f = if sign == -1 {
u64::MAX - (u - 1)
} else {
u
};
fmt_prim.pre_decimal = Some(match fchar {
'X' => format!("{:X}", u_f),
'x' => format!("{:x}", u_f),
'o' => format!("{:o}", u_f),
_ => format!("{}", u_f),
});
fmt_prim
}
Err(_) => Intf::get_max(fchar, sign),
}
}
}
}
}
impl Formatter for Intf {
fn get_primitive(&self,
field: &FormatField,
inprefix: &InPrefix,
str_in: &str)
-> Option<FormatPrimitive> {
let begin = inprefix.offset;
// get information about the string. see Intf::Analyze
// def above.
let convert_hints = Intf::analyze(str_in,
*field.field_char == 'i' || *field.field_char == 'd',
inprefix);
// We always will have a formatprimitive to return
Some(if convert_hints.len_digits == 0 || convert_hints.is_zero {
// if non-digit or end is reached before a non-zero digit
let mut fmt_prim: FormatPrimitive = Default::default();
fmt_prim.pre_decimal = Some(String::from("0"));
fmt_prim
} else if!convert_hints.past_max {
// if the number is or may be below the bounds limit
let radix_out = match *field.field_char {
'd' | 'i' | 'u' => Base::Ten,
'x' | 'X' => Base::Hex,
'o' | _ => Base::Octal,
};
let radix_mismatch =!radix_out.eq(&inprefix.radix_in);
let decr_from_max: bool = inprefix.sign == -1 && *field.field_char!= 'i';
let end = begin + convert_hints.len_digits as usize;
// convert to int if any one of these is true:
// - number of digits in int indicates it may be past max
// - we're subtracting from the max
// - we're converting the base
if convert_hints.check_past_max || decr_from_max || radix_mismatch {
// radix of in and out is the same.
let segment = String::from(&str_in[begin..end]);
let m = Intf::conv_from_segment(&segment,
inprefix.radix_in.clone(),
*field.field_char,
inprefix.sign);
m
} else {
// otherwise just do a straight string copy.
let mut fmt_prim: FormatPrimitive = Default::default();
// this is here and not earlier because
// zero doesn't get a sign, and conv_from_segment
// creates its format primitive separately
if inprefix.sign == -1 && *field.field_char == 'i' {
fmt_prim.prefix = Some(String::from("-"));
}
fmt_prim.pre_decimal = Some(String::from(&str_in[begin..end]));
fmt_prim
}
} else {
Intf::get_max(*field.field_char, inprefix.sign)
})
}
fn primitive_to_str(&self, prim: &FormatPrimitive, field: FormatField) -> String
|
}
None => {}
}
finalstr.push_str(&pre_decimal);
}
None => {
panic!("error, format primitives provided to int, will, incidentally under \
correct behavior, always have a pre_dec value.");
}
}
finalstr
}
}
|
{
let mut finalstr: String = String::new();
match prim.prefix {
Some(ref prefix) => {
finalstr.push_str(&prefix);
}
None => {}
}
// integral second fields is zero-padded minimum-width
// which gets handled before general minimum-width
match prim.pre_decimal {
Some(ref pre_decimal) => {
match field.second_field {
Some(min) => {
let mut i = min;
let len = pre_decimal.len() as u32;
while i > len {
finalstr.push('0');
i -= 1;
}
|
identifier_body
|
intf.rs
|
//! formatter for unsigned and signed int subs
//! unsigned ints: %X %x (hex u64) %o (octal u64) %u (base ten u64)
//! signed ints: %i %d (both base ten i64)
use std::u64;
use std::i64;
use super::super::format_field::FormatField;
use super::super::formatter::{InPrefix, FormatPrimitive, Base, Formatter, warn_incomplete_conv,
get_it_at};
pub struct Intf {
a: u32,
}
// see the Intf::analyze() function below
struct IntAnalysis {
check_past_max: bool,
past_max: bool,
is_zero: bool,
len_digits: u8,
}
impl Intf {
pub fn new() -> Intf {
Intf { a: 0 }
}
// take a ref to argument string, and basic information
// about prefix (offset, radix, sign), and analyze string
// to gain the IntAnalysis information above
// check_past_max: true if the number *may* be above max,
// but we don't know either way. One of several reasons
// we may have to parse as int.
// past_max: true if the object is past max, false if not
// in the future we should probably combine these into an
// Option<bool>
// is_zero: true if number is zero, false otherwise
// len_digits: length of digits used to create the int
// important, for example, if we run into a non-valid character
fn analyze(str_in: &str, signed_out: bool, inprefix: &InPrefix) -> IntAnalysis {
// the maximum number of digits we could conceivably
// have before the decimal point without exceeding the
// max
let mut str_it = get_it_at(inprefix.offset, str_in);
let max_sd_in = if signed_out {
match inprefix.radix_in {
Base::Ten => 19,
Base::Octal => 21,
Base::Hex => 16,
}
} else {
match inprefix.radix_in {
Base::Ten => 20,
Base::Octal => 22,
Base::Hex => 16,
}
};
let mut ret = IntAnalysis {
check_past_max: false,
past_max: false,
is_zero: false,
len_digits: 0,
};
// todo turn this to a while let now that we know
// no special behavior on EOI break
loop {
let c_opt = str_it.next();
if let Some(c) = c_opt {
match c {
'0'...'9' | 'a'...'f' | 'A'...'F' => {
if ret.len_digits == 0 && c == '0' {
ret.is_zero = true;
} else if ret.is_zero {
ret.is_zero = false;
}
ret.len_digits += 1;
if ret.len_digits == max_sd_in {
if let Some(next_ch) = str_it.next() {
match next_ch {
'0'...'9' => {
ret.past_max = true;
}
_ =>
|
}
if ret.past_max {
break;
}
} else {
ret.check_past_max = true;
}
}
}
_ => {
warn_incomplete_conv(str_in);
break;
}
}
} else {
// breaks on EOL
break;
}
}
ret
}
// get a FormatPrimitive of the maximum value for the field char
// and given sign
fn get_max(fchar: char, sign: i8) -> FormatPrimitive {
let mut fmt_prim: FormatPrimitive = Default::default();
fmt_prim.pre_decimal = Some(String::from(match fchar {
'd' | 'i' => {
match sign {
1 => "9223372036854775807",
_ => {
fmt_prim.prefix = Some(String::from("-"));
"9223372036854775808"
}
}
}
'x' | 'X' => "ffffffffffffffff",
'o' => "1777777777777777777777",
'u' | _ => "18446744073709551615",
}));
fmt_prim
}
// conv_from_segment contract:
// 1. takes
// - a string that begins with a non-zero digit, and proceeds
// with zero or more following digits until the end of the string
// - a radix to interpret those digits as
// - a char that communicates:
// whether to interpret+output the string as an i64 or u64
// what radix to write the parsed number as.
// 2. parses it as a rust integral type
// 3. outputs FormatPrimitive with:
// - if the string falls within bounds:
// number parsed and written in the correct radix
// - if the string falls outside bounds:
// for i64 output, the int minimum or int max (depending on sign)
// for u64 output, the u64 max in the output radix
fn conv_from_segment(segment: &str, radix_in: Base, fchar: char, sign: i8) -> FormatPrimitive {
match fchar {
'i' | 'd' => {
match i64::from_str_radix(segment, radix_in as u32) {
Ok(i) => {
let mut fmt_prim: FormatPrimitive = Default::default();
if sign == -1 {
fmt_prim.prefix = Some(String::from("-"));
}
fmt_prim.pre_decimal = Some(format!("{}", i));
fmt_prim
}
Err(_) => Intf::get_max(fchar, sign),
}
}
_ => {
match u64::from_str_radix(segment, radix_in as u32) {
Ok(u) => {
let mut fmt_prim: FormatPrimitive = Default::default();
let u_f = if sign == -1 {
u64::MAX - (u - 1)
} else {
u
};
fmt_prim.pre_decimal = Some(match fchar {
'X' => format!("{:X}", u_f),
'x' => format!("{:x}", u_f),
'o' => format!("{:o}", u_f),
_ => format!("{}", u_f),
});
fmt_prim
}
Err(_) => Intf::get_max(fchar, sign),
}
}
}
}
}
impl Formatter for Intf {
fn get_primitive(&self,
field: &FormatField,
inprefix: &InPrefix,
str_in: &str)
-> Option<FormatPrimitive> {
let begin = inprefix.offset;
// get information about the string. see Intf::Analyze
// def above.
let convert_hints = Intf::analyze(str_in,
*field.field_char == 'i' || *field.field_char == 'd',
inprefix);
// We always will have a formatprimitive to return
Some(if convert_hints.len_digits == 0 || convert_hints.is_zero {
// if non-digit or end is reached before a non-zero digit
let mut fmt_prim: FormatPrimitive = Default::default();
fmt_prim.pre_decimal = Some(String::from("0"));
fmt_prim
} else if!convert_hints.past_max {
// if the number is or may be below the bounds limit
let radix_out = match *field.field_char {
'd' | 'i' | 'u' => Base::Ten,
'x' | 'X' => Base::Hex,
'o' | _ => Base::Octal,
};
let radix_mismatch =!radix_out.eq(&inprefix.radix_in);
let decr_from_max: bool = inprefix.sign == -1 && *field.field_char!= 'i';
let end = begin + convert_hints.len_digits as usize;
// convert to int if any one of these is true:
// - number of digits in int indicates it may be past max
// - we're subtracting from the max
// - we're converting the base
if convert_hints.check_past_max || decr_from_max || radix_mismatch {
// radix of in and out is the same.
let segment = String::from(&str_in[begin..end]);
let m = Intf::conv_from_segment(&segment,
inprefix.radix_in.clone(),
*field.field_char,
inprefix.sign);
m
} else {
// otherwise just do a straight string copy.
let mut fmt_prim: FormatPrimitive = Default::default();
// this is here and not earlier because
// zero doesn't get a sign, and conv_from_segment
// creates its format primitive separately
if inprefix.sign == -1 && *field.field_char == 'i' {
fmt_prim.prefix = Some(String::from("-"));
}
fmt_prim.pre_decimal = Some(String::from(&str_in[begin..end]));
fmt_prim
}
} else {
Intf::get_max(*field.field_char, inprefix.sign)
})
}
fn primitive_to_str(&self, prim: &FormatPrimitive, field: FormatField) -> String {
let mut finalstr: String = String::new();
match prim.prefix {
Some(ref prefix) => {
finalstr.push_str(&prefix);
}
None => {}
}
// integral second fields is zero-padded minimum-width
// which gets handled before general minimum-width
match prim.pre_decimal {
Some(ref pre_decimal) => {
match field.second_field {
Some(min) => {
let mut i = min;
let len = pre_decimal.len() as u32;
while i > len {
finalstr.push('0');
i -= 1;
}
}
None => {}
}
finalstr.push_str(&pre_decimal);
}
None => {
panic!("error, format primitives provided to int, will, incidentally under \
correct behavior, always have a pre_dec value.");
}
}
finalstr
}
}
|
{
// force conversion
// to check if its above max.
// todo: spin out convert
// into fn, call it here to try
// read val, on Ok()
// save val for reuse later
// that way on same-base in and out
// we don't needlessly convert int
// to str, we can just copy it over.
ret.check_past_max = true;
str_it.put_back(next_ch);
}
|
conditional_block
|
intf.rs
|
//! formatter for unsigned and signed int subs
//! unsigned ints: %X %x (hex u64) %o (octal u64) %u (base ten u64)
//! signed ints: %i %d (both base ten i64)
use std::u64;
use std::i64;
use super::super::format_field::FormatField;
use super::super::formatter::{InPrefix, FormatPrimitive, Base, Formatter, warn_incomplete_conv,
get_it_at};
pub struct Intf {
a: u32,
}
// see the Intf::analyze() function below
struct IntAnalysis {
check_past_max: bool,
past_max: bool,
is_zero: bool,
len_digits: u8,
}
impl Intf {
pub fn new() -> Intf {
Intf { a: 0 }
}
// take a ref to argument string, and basic information
// about prefix (offset, radix, sign), and analyze string
// to gain the IntAnalysis information above
// check_past_max: true if the number *may* be above max,
// but we don't know either way. One of several reasons
// we may have to parse as int.
// past_max: true if the object is past max, false if not
// in the future we should probably combine these into an
// Option<bool>
// is_zero: true if number is zero, false otherwise
// len_digits: length of digits used to create the int
// important, for example, if we run into a non-valid character
fn analyze(str_in: &str, signed_out: bool, inprefix: &InPrefix) -> IntAnalysis {
// the maximum number of digits we could conceivably
// have before the decimal point without exceeding the
// max
let mut str_it = get_it_at(inprefix.offset, str_in);
let max_sd_in = if signed_out {
match inprefix.radix_in {
Base::Ten => 19,
Base::Octal => 21,
Base::Hex => 16,
}
} else {
match inprefix.radix_in {
Base::Ten => 20,
Base::Octal => 22,
Base::Hex => 16,
}
};
let mut ret = IntAnalysis {
check_past_max: false,
past_max: false,
is_zero: false,
len_digits: 0,
};
// todo turn this to a while let now that we know
// no special behavior on EOI break
loop {
let c_opt = str_it.next();
if let Some(c) = c_opt {
match c {
'0'...'9' | 'a'...'f' | 'A'...'F' => {
if ret.len_digits == 0 && c == '0' {
ret.is_zero = true;
} else if ret.is_zero {
ret.is_zero = false;
}
ret.len_digits += 1;
if ret.len_digits == max_sd_in {
if let Some(next_ch) = str_it.next() {
match next_ch {
'0'...'9' => {
ret.past_max = true;
}
_ => {
// force conversion
// to check if its above max.
// todo: spin out convert
// into fn, call it here to try
// read val, on Ok()
// save val for reuse later
// that way on same-base in and out
// we don't needlessly convert int
// to str, we can just copy it over.
ret.check_past_max = true;
str_it.put_back(next_ch);
}
}
if ret.past_max {
|
} else {
ret.check_past_max = true;
}
}
}
_ => {
warn_incomplete_conv(str_in);
break;
}
}
} else {
// breaks on EOL
break;
}
}
ret
}
// get a FormatPrimitive of the maximum value for the field char
// and given sign
fn get_max(fchar: char, sign: i8) -> FormatPrimitive {
let mut fmt_prim: FormatPrimitive = Default::default();
fmt_prim.pre_decimal = Some(String::from(match fchar {
'd' | 'i' => {
match sign {
1 => "9223372036854775807",
_ => {
fmt_prim.prefix = Some(String::from("-"));
"9223372036854775808"
}
}
}
'x' | 'X' => "ffffffffffffffff",
'o' => "1777777777777777777777",
'u' | _ => "18446744073709551615",
}));
fmt_prim
}
// conv_from_segment contract:
// 1. takes
// - a string that begins with a non-zero digit, and proceeds
// with zero or more following digits until the end of the string
// - a radix to interpret those digits as
// - a char that communicates:
// whether to interpret+output the string as an i64 or u64
// what radix to write the parsed number as.
// 2. parses it as a rust integral type
// 3. outputs FormatPrimitive with:
// - if the string falls within bounds:
// number parsed and written in the correct radix
// - if the string falls outside bounds:
// for i64 output, the int minimum or int max (depending on sign)
// for u64 output, the u64 max in the output radix
fn conv_from_segment(segment: &str, radix_in: Base, fchar: char, sign: i8) -> FormatPrimitive {
match fchar {
'i' | 'd' => {
match i64::from_str_radix(segment, radix_in as u32) {
Ok(i) => {
let mut fmt_prim: FormatPrimitive = Default::default();
if sign == -1 {
fmt_prim.prefix = Some(String::from("-"));
}
fmt_prim.pre_decimal = Some(format!("{}", i));
fmt_prim
}
Err(_) => Intf::get_max(fchar, sign),
}
}
_ => {
match u64::from_str_radix(segment, radix_in as u32) {
Ok(u) => {
let mut fmt_prim: FormatPrimitive = Default::default();
let u_f = if sign == -1 {
u64::MAX - (u - 1)
} else {
u
};
fmt_prim.pre_decimal = Some(match fchar {
'X' => format!("{:X}", u_f),
'x' => format!("{:x}", u_f),
'o' => format!("{:o}", u_f),
_ => format!("{}", u_f),
});
fmt_prim
}
Err(_) => Intf::get_max(fchar, sign),
}
}
}
}
}
impl Formatter for Intf {
fn get_primitive(&self,
field: &FormatField,
inprefix: &InPrefix,
str_in: &str)
-> Option<FormatPrimitive> {
let begin = inprefix.offset;
// get information about the string. see Intf::Analyze
// def above.
let convert_hints = Intf::analyze(str_in,
*field.field_char == 'i' || *field.field_char == 'd',
inprefix);
// We always will have a formatprimitive to return
Some(if convert_hints.len_digits == 0 || convert_hints.is_zero {
// if non-digit or end is reached before a non-zero digit
let mut fmt_prim: FormatPrimitive = Default::default();
fmt_prim.pre_decimal = Some(String::from("0"));
fmt_prim
} else if!convert_hints.past_max {
// if the number is or may be below the bounds limit
let radix_out = match *field.field_char {
'd' | 'i' | 'u' => Base::Ten,
'x' | 'X' => Base::Hex,
'o' | _ => Base::Octal,
};
let radix_mismatch =!radix_out.eq(&inprefix.radix_in);
let decr_from_max: bool = inprefix.sign == -1 && *field.field_char!= 'i';
let end = begin + convert_hints.len_digits as usize;
// convert to int if any one of these is true:
// - number of digits in int indicates it may be past max
// - we're subtracting from the max
// - we're converting the base
if convert_hints.check_past_max || decr_from_max || radix_mismatch {
// radix of in and out is the same.
let segment = String::from(&str_in[begin..end]);
let m = Intf::conv_from_segment(&segment,
inprefix.radix_in.clone(),
*field.field_char,
inprefix.sign);
m
} else {
// otherwise just do a straight string copy.
let mut fmt_prim: FormatPrimitive = Default::default();
// this is here and not earlier because
// zero doesn't get a sign, and conv_from_segment
// creates its format primitive separately
if inprefix.sign == -1 && *field.field_char == 'i' {
fmt_prim.prefix = Some(String::from("-"));
}
fmt_prim.pre_decimal = Some(String::from(&str_in[begin..end]));
fmt_prim
}
} else {
Intf::get_max(*field.field_char, inprefix.sign)
})
}
fn primitive_to_str(&self, prim: &FormatPrimitive, field: FormatField) -> String {
let mut finalstr: String = String::new();
match prim.prefix {
Some(ref prefix) => {
finalstr.push_str(&prefix);
}
None => {}
}
// integral second fields is zero-padded minimum-width
// which gets handled before general minimum-width
match prim.pre_decimal {
Some(ref pre_decimal) => {
match field.second_field {
Some(min) => {
let mut i = min;
let len = pre_decimal.len() as u32;
while i > len {
finalstr.push('0');
i -= 1;
}
}
None => {}
}
finalstr.push_str(&pre_decimal);
}
None => {
panic!("error, format primitives provided to int, will, incidentally under \
correct behavior, always have a pre_dec value.");
}
}
finalstr
}
}
|
break;
}
|
random_line_split
|
issue-3601.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
struct HTMLImageData {
image: Option<String>
}
struct
|
{
kind: Box<ElementKind>
}
enum ElementKind {
HTMLImageElement(HTMLImageData)
}
enum NodeKind {
Element(ElementData)
}
struct NodeData {
kind: Box<NodeKind>,
}
fn main() {
let mut id = HTMLImageData { image: None };
let ed = ElementData { kind: box ElementKind::HTMLImageElement(id) };
let n = NodeData {kind : box NodeKind::Element(ed)};
// n.b. span could be better
match n.kind {
box NodeKind::Element(ed) => match ed.kind { //~ ERROR non-exhaustive patterns
box ElementKind::HTMLImageElement(ref d) if d.image.is_some() => { true }
},
_ => panic!("WAT") //~ ERROR unreachable pattern
};
}
|
ElementData
|
identifier_name
|
issue-3601.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
struct HTMLImageData {
image: Option<String>
}
struct ElementData {
kind: Box<ElementKind>
}
enum ElementKind {
HTMLImageElement(HTMLImageData)
}
enum NodeKind {
Element(ElementData)
}
struct NodeData {
kind: Box<NodeKind>,
}
fn main() {
let mut id = HTMLImageData { image: None };
let ed = ElementData { kind: box ElementKind::HTMLImageElement(id) };
let n = NodeData {kind : box NodeKind::Element(ed)};
// n.b. span could be better
match n.kind {
box NodeKind::Element(ed) => match ed.kind { //~ ERROR non-exhaustive patterns
|
box ElementKind::HTMLImageElement(ref d) if d.image.is_some() => { true }
},
_ => panic!("WAT") //~ ERROR unreachable pattern
};
}
|
random_line_split
|
|
lib.rs
|
/**
* Copyright (c) 2015 Alex Maslakov, <http://www.gildedhonour.com>, <http://www.alexmaslakov.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For questions and comments about this product, please see the project page at:
*
* https://github.com/GildedHonour/frank_jwt
*
*/
extern crate rustc_serialize;
extern crate time;
extern crate crypto;
use time::Duration;
use rustc_serialize::base64;
use rustc_serialize::base64::{ToBase64, FromBase64};
use rustc_serialize::json;
use rustc_serialize::json::{ToJson, Json};
use std::collections::BTreeMap;
use crypto::sha2::{Sha256, Sha384, Sha512};
use crypto::hmac::Hmac;
use crypto::digest::Digest;
use crypto::mac::Mac;
use std::str;
use std::fmt;
use std::fmt::Formatter;
use std::fmt::Debug;
pub type Payload = BTreeMap<String, String>; //todo replace with &str
pub struct
|
{
algorithm: Algorithm,
ttype: String
}
impl Header {
pub fn new(alg: Algorithm) -> Header {
Header { algorithm: alg, ttype: Header::std_type() }
}
pub fn std_type() -> String {
"JWT".to_string()
}
}
#[derive(Clone, Copy)]
pub enum Algorithm {
HS256,
HS384,
HS512,
RS256
}
impl ToString for Algorithm {
fn to_string(&self) -> String {
match *self {
Algorithm::HS256 => "HS256".to_string(),
Algorithm::HS384 => "HS384".to_string(),
Algorithm::HS512 => "HS512".to_string(),
Algorithm::RS256 => "RS256".to_string()
}
}
}
pub enum Error {
SignatureExpired,
SignatureInvalid,
JWTInvalid,
IssuerInvalid,
ExpirationInvalid,
AudienceInvalid
}
impl ToJson for Header {
fn to_json(&self) -> json::Json {
let mut map = BTreeMap::new();
map.insert("typ".to_string(), self.ttype.to_json());
map.insert("alg".to_string(), self.algorithm.to_string().to_json());
Json::Object(map)
}
}
pub fn encode(header: Header, secret: String, payload: Payload) -> String {
let signing_input = get_signing_input(payload, &header.algorithm);
let signature = sign_hmac(&signing_input, secret, header.algorithm);
format!("{}.{}", signing_input, signature)
}
pub fn decode(encoded_token: String, secret: String, algorithm: Algorithm) -> Result<(Header, Payload), Error> {
match decode_segments(encoded_token) {
Some((header, payload, signature, signing_input)) => {
if!verify_signature(algorithm, signing_input, &signature, secret.to_string()) {
return Err(Error::SignatureInvalid)
}
//todo
// verify_issuer(payload_json);
// verify_expiration(payload_json);
// verify_audience();
// verify_subject();
// verify_notbefore();
// verify_issuedat();
// verify_jwtid();
//todo
Ok((header, payload))
},
None => Err(Error::JWTInvalid)
}
}
fn segments_count() -> usize {
3
}
fn get_signing_input(payload: Payload, algorithm: &Algorithm) -> String {
let header = Header::new(*algorithm);
let header_json_str = header.to_json();
let encoded_header = base64_url_encode(header_json_str.to_string().as_bytes()).to_string();
let p = payload.into_iter().map(|(k, v)| (k, v.to_json())).collect();
let payload_json = Json::Object(p);
let encoded_payload = base64_url_encode(payload_json.to_string().as_bytes()).to_string();
format!("{}.{}", encoded_header, encoded_payload)
}
fn sign_hmac(signing_input: &str, secret: String, algorithm: Algorithm) -> String {
let mut hmac = match algorithm {
Algorithm::HS256 => create_hmac(Sha256::new(), secret),
Algorithm::HS384 => create_hmac(Sha384::new(), secret),
Algorithm::HS512 => create_hmac(Sha512::new(), secret),
Algorithm::RS256 => unimplemented!()
};
hmac.input(signing_input.as_bytes());
base64_url_encode(hmac.result().code())
}
fn base64_url_encode(bytes: &[u8]) -> String {
bytes.to_base64(base64::URL_SAFE)
}
fn json_to_tree(input: Json) -> BTreeMap<String, String> {
match input {
Json::Object(json_tree) => json_tree.into_iter().map(|(k, v)| (k, match v {
Json::String(s) => s,
_ => unreachable!()
})).collect(),
_ => unreachable!()
}
}
fn decode_segments(encoded_token: String) -> Option<(Header, Payload, Vec<u8>, String)> {
let raw_segments: Vec<&str> = encoded_token.split(".").collect();
if raw_segments.len()!= segments_count() {
return None
}
let header_segment = raw_segments[0];
let payload_segment = raw_segments[1];
let crypto_segment = raw_segments[2];
let (header, payload) = decode_header_and_payload(header_segment, payload_segment);
let signature = &crypto_segment.as_bytes().from_base64().unwrap();
let signing_input = format!("{}.{}", header_segment, payload_segment);
Some((header, payload, signature.clone(), signing_input))
}
fn decode_header_and_payload<'a>(header_segment: &str, payload_segment: &str) -> (Header, Payload) {
fn base64_to_json(input: &str) -> Json {
let bytes = input.as_bytes().from_base64().unwrap();
let s = str::from_utf8(&bytes).unwrap();
Json::from_str(s).unwrap()
};
let header_json = base64_to_json(header_segment);
let header_tree = json_to_tree(header_json);
let alg = header_tree.get("alg").unwrap();
let header = Header::new(parse_algorithm(alg));
let payload_json = base64_to_json(payload_segment);
let payload = json_to_tree(payload_json);
(header, payload)
}
fn parse_algorithm(alg: &str) -> Algorithm {
match alg {
"HS256" => Algorithm::HS256,
"HS384" => Algorithm::HS384,
"HS512" => Algorithm::HS512,
"RS256" => Algorithm::HS512,
_ => panic!("Unknown algorithm")
}
}
fn verify_signature(algorithm: Algorithm, signing_input: String, signature: &[u8], secret: String) -> bool {
let mut hmac = match algorithm {
Algorithm::HS256 => create_hmac(Sha256::new(), secret),
Algorithm::HS384 => create_hmac(Sha384::new(), secret),
Algorithm::HS512 => create_hmac(Sha512::new(), secret),
Algorithm::RS256 => unimplemented!()
};
hmac.input(signing_input.to_string().as_bytes());
secure_compare(signature, hmac.result().code())
}
fn secure_compare(a: &[u8], b: &[u8]) -> bool {
if a.len()!= b.len() {
return false
}
let mut res = 0_u8;
for (&x, &y) in a.iter().zip(b.iter()) {
res |= x ^ y;
}
res == 0
}
fn create_hmac<'a, D: Digest + 'a>(digest: D, some_str: String) -> Box<Mac + 'a> {
Box::new(Hmac::new(digest, some_str.as_bytes()))
}
#[cfg(test)]
mod tests {
extern crate time;
use time::Duration;
use super::Header;
use super::Payload;
use super::encode;
use super::decode;
use super::Algorithm;
use super::secure_compare;
#[test]
fn test_encode_and_decode_jwt_hs256() {
let mut p1 = Payload::new();
p1.insert("key1".to_string(), "val1".to_string());
p1.insert("key2".to_string(), "val2".to_string());
p1.insert("key3".to_string(), "val3".to_string());
let secret = "secret123";
let header = Header::new(Algorithm::HS256);
let jwt1 = encode(header, secret.to_string(), p1.clone());
let maybe_res = decode(jwt1, secret.to_string(), Algorithm::HS256);
assert!(maybe_res.is_ok());
}
#[test]
fn test_decode_valid_jwt_hs256() {
let mut p1 = Payload::new();
p1.insert("key11".to_string(), "val1".to_string());
p1.insert("key22".to_string(), "val2".to_string());
let secret = "secret123";
let jwt = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkxMSI6InZhbDEiLCJrZXkyMiI6InZhbDIifQ.jrcoVcRsmQqDEzSW9qOhG1HIrzV_n3nMhykNPnGvp9c";
let maybe_res = decode(jwt.to_string(), secret.to_string(), Algorithm::HS256);
assert!(maybe_res.is_ok());
}
#[test]
fn test_secure_compare_same_strings() {
let str1 = "same same".as_bytes();
let str2 = "same same".as_bytes();
let res = secure_compare(str1, str2);
assert!(res);
}
#[test]
fn test_fails_when_secure_compare_different_strings() {
let str1 = "same same".as_bytes();
let str2 = "same same but different".as_bytes();
let res = secure_compare(str1, str2);
assert!(!res);
}
#[test]
fn test_encode_and_decode_jwt_hs384() {
let mut p1 = Payload::new();
p1.insert("key1".to_string(), "val1".to_string());
p1.insert("key2".to_string(), "val2".to_string());
p1.insert("key3".to_string(), "val3".to_string());
let secret = "secret123";
let header = Header::new(Algorithm::HS384);
let jwt1 = encode(header, secret.to_string(), p1.clone());
let maybe_res = decode(jwt1, secret.to_string(), Algorithm::HS384);
assert!(maybe_res.is_ok());
}
#[test]
fn test_encode_and_decode_jwt_hs512() {
let mut p1 = Payload::new();
p1.insert("key12".to_string(), "val1".to_string());
p1.insert("key22".to_string(), "val2".to_string());
p1.insert("key33".to_string(), "val3".to_string());
let secret = "secret123456";
let header = Header::new(Algorithm::HS512);
let jwt1 = encode(header, secret.to_string(), p1.clone());
let maybe_res = decode(jwt1, secret.to_string(), Algorithm::HS512);
assert!(maybe_res.is_ok());
}
// #[test]
// fn test_fails_when_expired() {
// let now = time::get_time();
// let past = now + Duration::minutes(-5);
// let mut p1 = BTreeMap::new();
// p1.insert("exp".to_string(), past.sec.to_string());
// p1.insert("key1".to_string(), "val1".to_string());
// let secret = "secret123";
// let jwt = sign(secret, Some(p1.clone()), None);
// let res = verify(jwt.as_slice(), secret, None);
// assert!(res.is_ok());
// }
// #[test]
// fn test_ok_when_expired_not_verified() {
// let now = time::get_time();
// let past = now + Duration::minutes(-5);
// let mut p1 = BTreeMap::new();
// p1.insert("exp".to_string(), past.sec.to_string());
// p1.insert("key1".to_string(), "val1".to_string());
// let secret = "secret123";
// let jwt = sign(secret, Some(p1.clone()), None);
// let res = verify(jwt.as_slice(), secret, None);
// assert!(res.is_ok());
// }
}
|
Header
|
identifier_name
|
lib.rs
|
/**
* Copyright (c) 2015 Alex Maslakov, <http://www.gildedhonour.com>, <http://www.alexmaslakov.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For questions and comments about this product, please see the project page at:
*
* https://github.com/GildedHonour/frank_jwt
*
*/
extern crate rustc_serialize;
extern crate time;
extern crate crypto;
use time::Duration;
use rustc_serialize::base64;
use rustc_serialize::base64::{ToBase64, FromBase64};
use rustc_serialize::json;
use rustc_serialize::json::{ToJson, Json};
use std::collections::BTreeMap;
use crypto::sha2::{Sha256, Sha384, Sha512};
use crypto::hmac::Hmac;
use crypto::digest::Digest;
use crypto::mac::Mac;
use std::str;
use std::fmt;
use std::fmt::Formatter;
use std::fmt::Debug;
pub type Payload = BTreeMap<String, String>; //todo replace with &str
pub struct Header {
algorithm: Algorithm,
ttype: String
}
impl Header {
pub fn new(alg: Algorithm) -> Header {
Header { algorithm: alg, ttype: Header::std_type() }
}
pub fn std_type() -> String {
"JWT".to_string()
}
}
#[derive(Clone, Copy)]
pub enum Algorithm {
HS256,
HS384,
HS512,
RS256
}
impl ToString for Algorithm {
fn to_string(&self) -> String {
match *self {
Algorithm::HS256 => "HS256".to_string(),
Algorithm::HS384 => "HS384".to_string(),
Algorithm::HS512 => "HS512".to_string(),
Algorithm::RS256 => "RS256".to_string()
}
}
}
pub enum Error {
SignatureExpired,
SignatureInvalid,
JWTInvalid,
IssuerInvalid,
ExpirationInvalid,
AudienceInvalid
}
impl ToJson for Header {
fn to_json(&self) -> json::Json {
let mut map = BTreeMap::new();
map.insert("typ".to_string(), self.ttype.to_json());
map.insert("alg".to_string(), self.algorithm.to_string().to_json());
Json::Object(map)
}
}
pub fn encode(header: Header, secret: String, payload: Payload) -> String {
let signing_input = get_signing_input(payload, &header.algorithm);
let signature = sign_hmac(&signing_input, secret, header.algorithm);
format!("{}.{}", signing_input, signature)
}
pub fn decode(encoded_token: String, secret: String, algorithm: Algorithm) -> Result<(Header, Payload), Error> {
match decode_segments(encoded_token) {
Some((header, payload, signature, signing_input)) => {
if!verify_signature(algorithm, signing_input, &signature, secret.to_string()) {
return Err(Error::SignatureInvalid)
}
//todo
// verify_issuer(payload_json);
// verify_expiration(payload_json);
// verify_audience();
// verify_subject();
// verify_notbefore();
// verify_issuedat();
// verify_jwtid();
//todo
Ok((header, payload))
},
None => Err(Error::JWTInvalid)
}
}
fn segments_count() -> usize {
3
}
fn get_signing_input(payload: Payload, algorithm: &Algorithm) -> String {
let header = Header::new(*algorithm);
let header_json_str = header.to_json();
let encoded_header = base64_url_encode(header_json_str.to_string().as_bytes()).to_string();
let p = payload.into_iter().map(|(k, v)| (k, v.to_json())).collect();
let payload_json = Json::Object(p);
let encoded_payload = base64_url_encode(payload_json.to_string().as_bytes()).to_string();
format!("{}.{}", encoded_header, encoded_payload)
}
fn sign_hmac(signing_input: &str, secret: String, algorithm: Algorithm) -> String {
let mut hmac = match algorithm {
Algorithm::HS256 => create_hmac(Sha256::new(), secret),
Algorithm::HS384 => create_hmac(Sha384::new(), secret),
Algorithm::HS512 => create_hmac(Sha512::new(), secret),
Algorithm::RS256 => unimplemented!()
};
hmac.input(signing_input.as_bytes());
base64_url_encode(hmac.result().code())
}
fn base64_url_encode(bytes: &[u8]) -> String {
bytes.to_base64(base64::URL_SAFE)
}
fn json_to_tree(input: Json) -> BTreeMap<String, String> {
match input {
Json::Object(json_tree) => json_tree.into_iter().map(|(k, v)| (k, match v {
Json::String(s) => s,
_ => unreachable!()
})).collect(),
_ => unreachable!()
}
}
fn decode_segments(encoded_token: String) -> Option<(Header, Payload, Vec<u8>, String)> {
let raw_segments: Vec<&str> = encoded_token.split(".").collect();
if raw_segments.len()!= segments_count() {
return None
}
let header_segment = raw_segments[0];
let payload_segment = raw_segments[1];
let crypto_segment = raw_segments[2];
let (header, payload) = decode_header_and_payload(header_segment, payload_segment);
let signature = &crypto_segment.as_bytes().from_base64().unwrap();
let signing_input = format!("{}.{}", header_segment, payload_segment);
Some((header, payload, signature.clone(), signing_input))
}
fn decode_header_and_payload<'a>(header_segment: &str, payload_segment: &str) -> (Header, Payload) {
fn base64_to_json(input: &str) -> Json {
let bytes = input.as_bytes().from_base64().unwrap();
let s = str::from_utf8(&bytes).unwrap();
Json::from_str(s).unwrap()
};
let header_json = base64_to_json(header_segment);
let header_tree = json_to_tree(header_json);
let alg = header_tree.get("alg").unwrap();
let header = Header::new(parse_algorithm(alg));
let payload_json = base64_to_json(payload_segment);
let payload = json_to_tree(payload_json);
(header, payload)
}
fn parse_algorithm(alg: &str) -> Algorithm {
match alg {
"HS256" => Algorithm::HS256,
"HS384" => Algorithm::HS384,
"HS512" => Algorithm::HS512,
"RS256" => Algorithm::HS512,
_ => panic!("Unknown algorithm")
}
}
fn verify_signature(algorithm: Algorithm, signing_input: String, signature: &[u8], secret: String) -> bool {
let mut hmac = match algorithm {
Algorithm::HS256 => create_hmac(Sha256::new(), secret),
Algorithm::HS384 => create_hmac(Sha384::new(), secret),
Algorithm::HS512 => create_hmac(Sha512::new(), secret),
Algorithm::RS256 => unimplemented!()
};
hmac.input(signing_input.to_string().as_bytes());
secure_compare(signature, hmac.result().code())
}
fn secure_compare(a: &[u8], b: &[u8]) -> bool
|
fn create_hmac<'a, D: Digest + 'a>(digest: D, some_str: String) -> Box<Mac + 'a> {
Box::new(Hmac::new(digest, some_str.as_bytes()))
}
#[cfg(test)]
mod tests {
extern crate time;
use time::Duration;
use super::Header;
use super::Payload;
use super::encode;
use super::decode;
use super::Algorithm;
use super::secure_compare;
#[test]
fn test_encode_and_decode_jwt_hs256() {
let mut p1 = Payload::new();
p1.insert("key1".to_string(), "val1".to_string());
p1.insert("key2".to_string(), "val2".to_string());
p1.insert("key3".to_string(), "val3".to_string());
let secret = "secret123";
let header = Header::new(Algorithm::HS256);
let jwt1 = encode(header, secret.to_string(), p1.clone());
let maybe_res = decode(jwt1, secret.to_string(), Algorithm::HS256);
assert!(maybe_res.is_ok());
}
#[test]
fn test_decode_valid_jwt_hs256() {
let mut p1 = Payload::new();
p1.insert("key11".to_string(), "val1".to_string());
p1.insert("key22".to_string(), "val2".to_string());
let secret = "secret123";
let jwt = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkxMSI6InZhbDEiLCJrZXkyMiI6InZhbDIifQ.jrcoVcRsmQqDEzSW9qOhG1HIrzV_n3nMhykNPnGvp9c";
let maybe_res = decode(jwt.to_string(), secret.to_string(), Algorithm::HS256);
assert!(maybe_res.is_ok());
}
#[test]
fn test_secure_compare_same_strings() {
let str1 = "same same".as_bytes();
let str2 = "same same".as_bytes();
let res = secure_compare(str1, str2);
assert!(res);
}
#[test]
fn test_fails_when_secure_compare_different_strings() {
let str1 = "same same".as_bytes();
let str2 = "same same but different".as_bytes();
let res = secure_compare(str1, str2);
assert!(!res);
}
#[test]
fn test_encode_and_decode_jwt_hs384() {
let mut p1 = Payload::new();
p1.insert("key1".to_string(), "val1".to_string());
p1.insert("key2".to_string(), "val2".to_string());
p1.insert("key3".to_string(), "val3".to_string());
let secret = "secret123";
let header = Header::new(Algorithm::HS384);
let jwt1 = encode(header, secret.to_string(), p1.clone());
let maybe_res = decode(jwt1, secret.to_string(), Algorithm::HS384);
assert!(maybe_res.is_ok());
}
#[test]
fn test_encode_and_decode_jwt_hs512() {
let mut p1 = Payload::new();
p1.insert("key12".to_string(), "val1".to_string());
p1.insert("key22".to_string(), "val2".to_string());
p1.insert("key33".to_string(), "val3".to_string());
let secret = "secret123456";
let header = Header::new(Algorithm::HS512);
let jwt1 = encode(header, secret.to_string(), p1.clone());
let maybe_res = decode(jwt1, secret.to_string(), Algorithm::HS512);
assert!(maybe_res.is_ok());
}
// #[test]
// fn test_fails_when_expired() {
// let now = time::get_time();
// let past = now + Duration::minutes(-5);
// let mut p1 = BTreeMap::new();
// p1.insert("exp".to_string(), past.sec.to_string());
// p1.insert("key1".to_string(), "val1".to_string());
// let secret = "secret123";
// let jwt = sign(secret, Some(p1.clone()), None);
// let res = verify(jwt.as_slice(), secret, None);
// assert!(res.is_ok());
// }
// #[test]
// fn test_ok_when_expired_not_verified() {
// let now = time::get_time();
// let past = now + Duration::minutes(-5);
// let mut p1 = BTreeMap::new();
// p1.insert("exp".to_string(), past.sec.to_string());
// p1.insert("key1".to_string(), "val1".to_string());
// let secret = "secret123";
// let jwt = sign(secret, Some(p1.clone()), None);
// let res = verify(jwt.as_slice(), secret, None);
// assert!(res.is_ok());
// }
}
|
{
if a.len() != b.len() {
return false
}
let mut res = 0_u8;
for (&x, &y) in a.iter().zip(b.iter()) {
res |= x ^ y;
}
res == 0
}
|
identifier_body
|
lib.rs
|
/**
* Copyright (c) 2015 Alex Maslakov, <http://www.gildedhonour.com>, <http://www.alexmaslakov.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For questions and comments about this product, please see the project page at:
*
* https://github.com/GildedHonour/frank_jwt
*
*/
extern crate rustc_serialize;
extern crate time;
extern crate crypto;
use time::Duration;
use rustc_serialize::base64;
use rustc_serialize::base64::{ToBase64, FromBase64};
use rustc_serialize::json;
use rustc_serialize::json::{ToJson, Json};
use std::collections::BTreeMap;
use crypto::sha2::{Sha256, Sha384, Sha512};
use crypto::hmac::Hmac;
use crypto::digest::Digest;
use crypto::mac::Mac;
use std::str;
use std::fmt;
use std::fmt::Formatter;
use std::fmt::Debug;
pub type Payload = BTreeMap<String, String>; //todo replace with &str
pub struct Header {
algorithm: Algorithm,
ttype: String
}
impl Header {
pub fn new(alg: Algorithm) -> Header {
Header { algorithm: alg, ttype: Header::std_type() }
}
pub fn std_type() -> String {
"JWT".to_string()
}
}
#[derive(Clone, Copy)]
pub enum Algorithm {
HS256,
HS384,
HS512,
RS256
}
impl ToString for Algorithm {
fn to_string(&self) -> String {
match *self {
Algorithm::HS256 => "HS256".to_string(),
Algorithm::HS384 => "HS384".to_string(),
Algorithm::HS512 => "HS512".to_string(),
Algorithm::RS256 => "RS256".to_string()
}
}
}
pub enum Error {
SignatureExpired,
SignatureInvalid,
JWTInvalid,
IssuerInvalid,
ExpirationInvalid,
AudienceInvalid
}
impl ToJson for Header {
fn to_json(&self) -> json::Json {
let mut map = BTreeMap::new();
map.insert("typ".to_string(), self.ttype.to_json());
map.insert("alg".to_string(), self.algorithm.to_string().to_json());
Json::Object(map)
}
}
pub fn encode(header: Header, secret: String, payload: Payload) -> String {
let signing_input = get_signing_input(payload, &header.algorithm);
let signature = sign_hmac(&signing_input, secret, header.algorithm);
format!("{}.{}", signing_input, signature)
}
pub fn decode(encoded_token: String, secret: String, algorithm: Algorithm) -> Result<(Header, Payload), Error> {
match decode_segments(encoded_token) {
Some((header, payload, signature, signing_input)) => {
if!verify_signature(algorithm, signing_input, &signature, secret.to_string()) {
return Err(Error::SignatureInvalid)
}
//todo
// verify_issuer(payload_json);
// verify_expiration(payload_json);
// verify_audience();
// verify_subject();
// verify_notbefore();
// verify_issuedat();
// verify_jwtid();
//todo
Ok((header, payload))
},
None => Err(Error::JWTInvalid)
}
}
fn segments_count() -> usize {
3
}
fn get_signing_input(payload: Payload, algorithm: &Algorithm) -> String {
let header = Header::new(*algorithm);
let header_json_str = header.to_json();
let encoded_header = base64_url_encode(header_json_str.to_string().as_bytes()).to_string();
let p = payload.into_iter().map(|(k, v)| (k, v.to_json())).collect();
let payload_json = Json::Object(p);
let encoded_payload = base64_url_encode(payload_json.to_string().as_bytes()).to_string();
format!("{}.{}", encoded_header, encoded_payload)
}
fn sign_hmac(signing_input: &str, secret: String, algorithm: Algorithm) -> String {
let mut hmac = match algorithm {
Algorithm::HS256 => create_hmac(Sha256::new(), secret),
Algorithm::HS384 => create_hmac(Sha384::new(), secret),
Algorithm::HS512 => create_hmac(Sha512::new(), secret),
Algorithm::RS256 => unimplemented!()
};
hmac.input(signing_input.as_bytes());
base64_url_encode(hmac.result().code())
}
fn base64_url_encode(bytes: &[u8]) -> String {
bytes.to_base64(base64::URL_SAFE)
}
fn json_to_tree(input: Json) -> BTreeMap<String, String> {
match input {
Json::Object(json_tree) => json_tree.into_iter().map(|(k, v)| (k, match v {
Json::String(s) => s,
_ => unreachable!()
})).collect(),
_ => unreachable!()
}
}
fn decode_segments(encoded_token: String) -> Option<(Header, Payload, Vec<u8>, String)> {
let raw_segments: Vec<&str> = encoded_token.split(".").collect();
if raw_segments.len()!= segments_count() {
return None
}
let header_segment = raw_segments[0];
let payload_segment = raw_segments[1];
let crypto_segment = raw_segments[2];
let (header, payload) = decode_header_and_payload(header_segment, payload_segment);
let signature = &crypto_segment.as_bytes().from_base64().unwrap();
let signing_input = format!("{}.{}", header_segment, payload_segment);
Some((header, payload, signature.clone(), signing_input))
}
fn decode_header_and_payload<'a>(header_segment: &str, payload_segment: &str) -> (Header, Payload) {
fn base64_to_json(input: &str) -> Json {
let bytes = input.as_bytes().from_base64().unwrap();
let s = str::from_utf8(&bytes).unwrap();
Json::from_str(s).unwrap()
};
|
let header_tree = json_to_tree(header_json);
let alg = header_tree.get("alg").unwrap();
let header = Header::new(parse_algorithm(alg));
let payload_json = base64_to_json(payload_segment);
let payload = json_to_tree(payload_json);
(header, payload)
}
fn parse_algorithm(alg: &str) -> Algorithm {
match alg {
"HS256" => Algorithm::HS256,
"HS384" => Algorithm::HS384,
"HS512" => Algorithm::HS512,
"RS256" => Algorithm::HS512,
_ => panic!("Unknown algorithm")
}
}
fn verify_signature(algorithm: Algorithm, signing_input: String, signature: &[u8], secret: String) -> bool {
let mut hmac = match algorithm {
Algorithm::HS256 => create_hmac(Sha256::new(), secret),
Algorithm::HS384 => create_hmac(Sha384::new(), secret),
Algorithm::HS512 => create_hmac(Sha512::new(), secret),
Algorithm::RS256 => unimplemented!()
};
hmac.input(signing_input.to_string().as_bytes());
secure_compare(signature, hmac.result().code())
}
fn secure_compare(a: &[u8], b: &[u8]) -> bool {
if a.len()!= b.len() {
return false
}
let mut res = 0_u8;
for (&x, &y) in a.iter().zip(b.iter()) {
res |= x ^ y;
}
res == 0
}
fn create_hmac<'a, D: Digest + 'a>(digest: D, some_str: String) -> Box<Mac + 'a> {
Box::new(Hmac::new(digest, some_str.as_bytes()))
}
#[cfg(test)]
mod tests {
extern crate time;
use time::Duration;
use super::Header;
use super::Payload;
use super::encode;
use super::decode;
use super::Algorithm;
use super::secure_compare;
#[test]
fn test_encode_and_decode_jwt_hs256() {
let mut p1 = Payload::new();
p1.insert("key1".to_string(), "val1".to_string());
p1.insert("key2".to_string(), "val2".to_string());
p1.insert("key3".to_string(), "val3".to_string());
let secret = "secret123";
let header = Header::new(Algorithm::HS256);
let jwt1 = encode(header, secret.to_string(), p1.clone());
let maybe_res = decode(jwt1, secret.to_string(), Algorithm::HS256);
assert!(maybe_res.is_ok());
}
#[test]
fn test_decode_valid_jwt_hs256() {
let mut p1 = Payload::new();
p1.insert("key11".to_string(), "val1".to_string());
p1.insert("key22".to_string(), "val2".to_string());
let secret = "secret123";
let jwt = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkxMSI6InZhbDEiLCJrZXkyMiI6InZhbDIifQ.jrcoVcRsmQqDEzSW9qOhG1HIrzV_n3nMhykNPnGvp9c";
let maybe_res = decode(jwt.to_string(), secret.to_string(), Algorithm::HS256);
assert!(maybe_res.is_ok());
}
#[test]
fn test_secure_compare_same_strings() {
let str1 = "same same".as_bytes();
let str2 = "same same".as_bytes();
let res = secure_compare(str1, str2);
assert!(res);
}
#[test]
fn test_fails_when_secure_compare_different_strings() {
let str1 = "same same".as_bytes();
let str2 = "same same but different".as_bytes();
let res = secure_compare(str1, str2);
assert!(!res);
}
#[test]
fn test_encode_and_decode_jwt_hs384() {
let mut p1 = Payload::new();
p1.insert("key1".to_string(), "val1".to_string());
p1.insert("key2".to_string(), "val2".to_string());
p1.insert("key3".to_string(), "val3".to_string());
let secret = "secret123";
let header = Header::new(Algorithm::HS384);
let jwt1 = encode(header, secret.to_string(), p1.clone());
let maybe_res = decode(jwt1, secret.to_string(), Algorithm::HS384);
assert!(maybe_res.is_ok());
}
#[test]
fn test_encode_and_decode_jwt_hs512() {
let mut p1 = Payload::new();
p1.insert("key12".to_string(), "val1".to_string());
p1.insert("key22".to_string(), "val2".to_string());
p1.insert("key33".to_string(), "val3".to_string());
let secret = "secret123456";
let header = Header::new(Algorithm::HS512);
let jwt1 = encode(header, secret.to_string(), p1.clone());
let maybe_res = decode(jwt1, secret.to_string(), Algorithm::HS512);
assert!(maybe_res.is_ok());
}
// #[test]
// fn test_fails_when_expired() {
// let now = time::get_time();
// let past = now + Duration::minutes(-5);
// let mut p1 = BTreeMap::new();
// p1.insert("exp".to_string(), past.sec.to_string());
// p1.insert("key1".to_string(), "val1".to_string());
// let secret = "secret123";
// let jwt = sign(secret, Some(p1.clone()), None);
// let res = verify(jwt.as_slice(), secret, None);
// assert!(res.is_ok());
// }
// #[test]
// fn test_ok_when_expired_not_verified() {
// let now = time::get_time();
// let past = now + Duration::minutes(-5);
// let mut p1 = BTreeMap::new();
// p1.insert("exp".to_string(), past.sec.to_string());
// p1.insert("key1".to_string(), "val1".to_string());
// let secret = "secret123";
// let jwt = sign(secret, Some(p1.clone()), None);
// let res = verify(jwt.as_slice(), secret, None);
// assert!(res.is_ok());
// }
}
|
let header_json = base64_to_json(header_segment);
|
random_line_split
|
any_error.rs
|
//! Defines an alias for the type returned by `std::panic::catch_unwind`.
// All of this module is `pub(crate)` and should not appear in the C header file
// or documentation.
use crate::error::error_msg::ErrorMsg;
use std::any::Any;
/// The error type returned by `std::panic::catch_unwind`.
pub(crate) type AnyError = Box<dyn Any + Send +'static>;
/// An extension trait for extracting an error message out of an `AnyError`.
|
impl ToErrorMsg for AnyError {
/// This works with an `AnyError` taken from `std::panic::catch_unwind`,
/// attempts to extract an error message out of it by constructing the
/// `ErrorMsg` type, and then converts that to a string, which is passed
/// to `update_last_error`.
///
/// Note that if an error message can't be extracted from the `AnyError`,
/// there will still be an update to the `LAST_ERROR`, reporting that an
/// unknown error occurred.
fn into_error_msg(self) -> String {
ErrorMsg::from(self).to_string()
}
}
|
pub(crate) trait ToErrorMsg {
fn into_error_msg(self) -> String;
}
|
random_line_split
|
any_error.rs
|
//! Defines an alias for the type returned by `std::panic::catch_unwind`.
// All of this module is `pub(crate)` and should not appear in the C header file
// or documentation.
use crate::error::error_msg::ErrorMsg;
use std::any::Any;
/// The error type returned by `std::panic::catch_unwind`.
pub(crate) type AnyError = Box<dyn Any + Send +'static>;
/// An extension trait for extracting an error message out of an `AnyError`.
pub(crate) trait ToErrorMsg {
fn into_error_msg(self) -> String;
}
impl ToErrorMsg for AnyError {
/// This works with an `AnyError` taken from `std::panic::catch_unwind`,
/// attempts to extract an error message out of it by constructing the
/// `ErrorMsg` type, and then converts that to a string, which is passed
/// to `update_last_error`.
///
/// Note that if an error message can't be extracted from the `AnyError`,
/// there will still be an update to the `LAST_ERROR`, reporting that an
/// unknown error occurred.
fn
|
(self) -> String {
ErrorMsg::from(self).to_string()
}
}
|
into_error_msg
|
identifier_name
|
any_error.rs
|
//! Defines an alias for the type returned by `std::panic::catch_unwind`.
// All of this module is `pub(crate)` and should not appear in the C header file
// or documentation.
use crate::error::error_msg::ErrorMsg;
use std::any::Any;
/// The error type returned by `std::panic::catch_unwind`.
pub(crate) type AnyError = Box<dyn Any + Send +'static>;
/// An extension trait for extracting an error message out of an `AnyError`.
pub(crate) trait ToErrorMsg {
fn into_error_msg(self) -> String;
}
impl ToErrorMsg for AnyError {
/// This works with an `AnyError` taken from `std::panic::catch_unwind`,
/// attempts to extract an error message out of it by constructing the
/// `ErrorMsg` type, and then converts that to a string, which is passed
/// to `update_last_error`.
///
/// Note that if an error message can't be extracted from the `AnyError`,
/// there will still be an update to the `LAST_ERROR`, reporting that an
/// unknown error occurred.
fn into_error_msg(self) -> String
|
}
|
{
ErrorMsg::from(self).to_string()
}
|
identifier_body
|
tree_gravity.rs
|
//! Simple integration tests oriented towards gravity computations
extern crate acacia;
extern crate nalgebra;
extern crate quickcheck;
use nalgebra::{ApproxEq, Point2, Point3, FloatPoint, Vector2, Vector3, zero, Norm, Origin};
use quickcheck::{TestResult, quickcheck};
use acacia::{Tree, Node, AssociatedData, DataQuery, Positioned};
use acacia::partition::Ncube;
#[test]
fn tree_center_of_mass() {
fn tree_center_of_mass(data: Vec<(f64, (f64, f64))>) -> TestResult {
// Only test non-empty lists with positive masses
if data.is_empty() || data.iter().any(|&(m, _)| m <= 0.0) {
return TestResult::discard();
}
// No two points should be in the same place
for i in 0..data.len() {
for j in 0..i {
let (_, pi) = data[i];
let (_, pj) = data[j];
if pi == pj {
return TestResult::discard();
}
}
}
// Compute center of mass in the traditional way
let (mps, ms) = data.iter()
.map(|&(m, (x, y))| (Vector2::new(x, y) * m, m))
.fold((zero::<Vector2<f64>>(), 0.0f64), |(mps, ms), (mp, m)| (mps + mp, ms + m));
let com = mps / ms;
// Now use the tree
let tree = Tree::new(
data.iter().map(|&(m, (x, y))|
Positioned { object: m, position: Point2::new(x, y) }
),
Ncube::new(Origin::origin(), 200.0f64),
(zero(), 0.0),
&|obj| (obj.position.to_vector() * obj.object, obj.object),
&|&(mps, ms), &(mp, m)| (mps + mp, ms + m)
).expect("Couldn't construct tree");
let (tree_mps, tree_ms) = *tree.data();
// …and compare
TestResult::from_bool(ApproxEq::approx_eq(&(tree_mps / tree_ms), &com))
}
quickcheck(tree_center_of_mass as fn(Vec<(f64, (f64, f64))>) -> TestResult);
}
#[test]
fn tree_gravity_approx() {
fn tree_gravity_approx(
starfield: Vec<(f64, (f64, f64, f64))>,
test_point: (f64, f64, f64)
) -> TestResult
{
// We want to have at least one star
if starfield.is_empty() {
return TestResult::discard();
}
// Only test positive masses
if starfield.iter().any(|&(m, _)| m <= 0.0) {
return TestResult::discard();
}
// The test point should not be in the same place as any star
if starfield.iter().any(|&(_, p)| p == test_point) {
return TestResult::discard();
}
// No two stars should be in the same place
for i in 0..starfield.len() {
for j in 0..i {
let (_, pi) = starfield[i];
let (_, pj) = starfield[j];
if pi == pj {
return TestResult::discard();
}
}
}
// (T, T, T) -> Point3<T>
fn pn
|
>(p: (T, T, T)) -> Point3<T> {
let (x, y, z) = p;
Point3::new(x, y, z)
}
let test_point = pnt(test_point);
// Newton's law of gravity for two point masses (with G = 1)
let newton = |(m, p1): (f64, Point3<f64>), p2| {
let diff: Vector3<f64> = p1 - p2;
let r = diff.norm();
diff * (m / r.powi(3))
};
// Calculate gravity exactly
let simple_gravity = starfield.iter()
.map(|&(m, p)| newton((m, pnt(p)), test_point))
.fold(zero(), |a: Vector3<_>, b| a + b);
// Calculate gravity using a tree
let orig: Point3<f64> = Origin::origin();
let data_width = test_point.as_vector().norm() * 2.0;
let width = if data_width < 200.0 { 200.0 } else { data_width };
let tree = Tree::new(
starfield.iter().map(|&(m, (x, y, z))|
Positioned { object: m, position: Point3::new(x, y, z) }
),
Ncube::new(orig, width),
(orig, zero()),
&|obj| (obj.position, obj.object),
&|&(com1, m1), &(com2, m2)|
if m1 + m2 > zero() {(
orig + (com1.to_vector() * m1 + com2.to_vector() * m2) / (m1 + m2),
m1 + m2,
)}
else {
(orig, zero())
}
).expect("Couldn't construct tree");
let theta = 0.5; // A bit arbitrary but this appears to work
let tree_gravity =
tree.query_data(|node| {
let &(ref center_of_mass, _) = node.data();
let d = FloatPoint::distance(&test_point, center_of_mass);
let delta = FloatPoint::distance(&node.partition().center(), center_of_mass);
d < node.partition().width() / theta + delta
})
.map(|&(com, m)| newton((m, com), test_point))
.fold(zero::<Vector3<f64>>(), |a, b| a + b);
// Now the tree gravity should approximate the exact one, within 10 %
TestResult::from_bool(simple_gravity.approx_eq_eps(&tree_gravity, &(0.1 * simple_gravity.norm())))
}
quickcheck(tree_gravity_approx as fn(Vec<(f64, (f64, f64, f64))>, (f64, f64, f64)) -> TestResult)
}
|
t<T
|
identifier_name
|
tree_gravity.rs
|
//! Simple integration tests oriented towards gravity computations
extern crate acacia;
extern crate nalgebra;
extern crate quickcheck;
use nalgebra::{ApproxEq, Point2, Point3, FloatPoint, Vector2, Vector3, zero, Norm, Origin};
use quickcheck::{TestResult, quickcheck};
use acacia::{Tree, Node, AssociatedData, DataQuery, Positioned};
use acacia::partition::Ncube;
#[test]
fn tree_center_of_mass() {
fn tree_center_of_mass(data: Vec<(f64, (f64, f64))>) -> TestResult {
// Only test non-empty lists with positive masses
if data.is_empty() || data.iter().any(|&(m, _)| m <= 0.0) {
return TestResult::discard();
}
// No two points should be in the same place
for i in 0..data.len() {
for j in 0..i {
let (_, pi) = data[i];
let (_, pj) = data[j];
if pi == pj {
return TestResult::discard();
}
}
}
// Compute center of mass in the traditional way
let (mps, ms) = data.iter()
.map(|&(m, (x, y))| (Vector2::new(x, y) * m, m))
.fold((zero::<Vector2<f64>>(), 0.0f64), |(mps, ms), (mp, m)| (mps + mp, ms + m));
let com = mps / ms;
// Now use the tree
let tree = Tree::new(
data.iter().map(|&(m, (x, y))|
Positioned { object: m, position: Point2::new(x, y) }
),
Ncube::new(Origin::origin(), 200.0f64),
(zero(), 0.0),
&|obj| (obj.position.to_vector() * obj.object, obj.object),
&|&(mps, ms), &(mp, m)| (mps + mp, ms + m)
).expect("Couldn't construct tree");
let (tree_mps, tree_ms) = *tree.data();
// …and compare
TestResult::from_bool(ApproxEq::approx_eq(&(tree_mps / tree_ms), &com))
}
quickcheck(tree_center_of_mass as fn(Vec<(f64, (f64, f64))>) -> TestResult);
}
#[test]
fn tree_gravity_approx() {
fn tree_gravity_approx(
starfield: Vec<(f64, (f64, f64, f64))>,
test_point: (f64, f64, f64)
) -> TestResult
{
// We want to have at least one star
if starfield.is_empty() {
return TestResult::discard();
}
// Only test positive masses
if starfield.iter().any(|&(m, _)| m <= 0.0) {
return TestResult::discard();
}
// The test point should not be in the same place as any star
if starfield.iter().any(|&(_, p)| p == test_point) {
return TestResult::discard();
}
// No two stars should be in the same place
for i in 0..starfield.len() {
for j in 0..i {
let (_, pi) = starfield[i];
let (_, pj) = starfield[j];
if pi == pj {
return TestResult::discard();
}
}
}
// (T, T, T) -> Point3<T>
fn pnt<T>(p: (T, T, T)) -> Point3<T> {
let (x, y, z) = p;
Point3::new(x, y, z)
}
let test_point = pnt(test_point);
// Newton's law of gravity for two point masses (with G = 1)
let newton = |(m, p1): (f64, Point3<f64>), p2| {
let diff: Vector3<f64> = p1 - p2;
let r = diff.norm();
diff * (m / r.powi(3))
};
// Calculate gravity exactly
let simple_gravity = starfield.iter()
.map(|&(m, p)| newton((m, pnt(p)), test_point))
.fold(zero(), |a: Vector3<_>, b| a + b);
// Calculate gravity using a tree
let orig: Point3<f64> = Origin::origin();
let data_width = test_point.as_vector().norm() * 2.0;
let width = if data_width < 200.0 { 200.0 } else { data_width };
let tree = Tree::new(
starfield.iter().map(|&(m, (x, y, z))|
Positioned { object: m, position: Point3::new(x, y, z) }
),
Ncube::new(orig, width),
(orig, zero()),
&|obj| (obj.position, obj.object),
&|&(com1, m1), &(com2, m2)|
if m1 + m2 > zero() {(
orig + (com1.to_vector() * m1 + com2.to_vector() * m2) / (m1 + m2),
m1 + m2,
)}
else {
(orig, zero())
}
|
).expect("Couldn't construct tree");
let theta = 0.5; // A bit arbitrary but this appears to work
let tree_gravity =
tree.query_data(|node| {
let &(ref center_of_mass, _) = node.data();
let d = FloatPoint::distance(&test_point, center_of_mass);
let delta = FloatPoint::distance(&node.partition().center(), center_of_mass);
d < node.partition().width() / theta + delta
})
.map(|&(com, m)| newton((m, com), test_point))
.fold(zero::<Vector3<f64>>(), |a, b| a + b);
// Now the tree gravity should approximate the exact one, within 10 %
TestResult::from_bool(simple_gravity.approx_eq_eps(&tree_gravity, &(0.1 * simple_gravity.norm())))
}
quickcheck(tree_gravity_approx as fn(Vec<(f64, (f64, f64, f64))>, (f64, f64, f64)) -> TestResult)
}
|
random_line_split
|
|
tree_gravity.rs
|
//! Simple integration tests oriented towards gravity computations
extern crate acacia;
extern crate nalgebra;
extern crate quickcheck;
use nalgebra::{ApproxEq, Point2, Point3, FloatPoint, Vector2, Vector3, zero, Norm, Origin};
use quickcheck::{TestResult, quickcheck};
use acacia::{Tree, Node, AssociatedData, DataQuery, Positioned};
use acacia::partition::Ncube;
#[test]
fn tree_center_of_mass() {
fn tree_center_of_mass(data: Vec<(f64, (f64, f64))>) -> TestResult {
// Only test non-empty lists with positive masses
if data.is_empty() || data.iter().any(|&(m, _)| m <= 0.0) {
return TestResult::discard();
}
// No two points should be in the same place
for i in 0..data.len() {
for j in 0..i {
let (_, pi) = data[i];
let (_, pj) = data[j];
if pi == pj {
return TestResult::discard();
}
}
}
// Compute center of mass in the traditional way
let (mps, ms) = data.iter()
.map(|&(m, (x, y))| (Vector2::new(x, y) * m, m))
.fold((zero::<Vector2<f64>>(), 0.0f64), |(mps, ms), (mp, m)| (mps + mp, ms + m));
let com = mps / ms;
// Now use the tree
let tree = Tree::new(
data.iter().map(|&(m, (x, y))|
Positioned { object: m, position: Point2::new(x, y) }
),
Ncube::new(Origin::origin(), 200.0f64),
(zero(), 0.0),
&|obj| (obj.position.to_vector() * obj.object, obj.object),
&|&(mps, ms), &(mp, m)| (mps + mp, ms + m)
).expect("Couldn't construct tree");
let (tree_mps, tree_ms) = *tree.data();
// …and compare
TestResult::from_bool(ApproxEq::approx_eq(&(tree_mps / tree_ms), &com))
}
quickcheck(tree_center_of_mass as fn(Vec<(f64, (f64, f64))>) -> TestResult);
}
#[test]
fn tree_gravity_approx() {
fn tree_gravity_approx(
starfield: Vec<(f64, (f64, f64, f64))>,
test_point: (f64, f64, f64)
) -> TestResult
{
// We want to have at least one star
if starfield.is_empty() {
return TestResult::discard();
}
// Only test positive masses
if starfield.iter().any(|&(m, _)| m <= 0.0) {
return TestResult::discard();
}
// The test point should not be in the same place as any star
if starfield.iter().any(|&(_, p)| p == test_point) {
|
// No two stars should be in the same place
for i in 0..starfield.len() {
for j in 0..i {
let (_, pi) = starfield[i];
let (_, pj) = starfield[j];
if pi == pj {
return TestResult::discard();
}
}
}
// (T, T, T) -> Point3<T>
fn pnt<T>(p: (T, T, T)) -> Point3<T> {
let (x, y, z) = p;
Point3::new(x, y, z)
}
let test_point = pnt(test_point);
// Newton's law of gravity for two point masses (with G = 1)
let newton = |(m, p1): (f64, Point3<f64>), p2| {
let diff: Vector3<f64> = p1 - p2;
let r = diff.norm();
diff * (m / r.powi(3))
};
// Calculate gravity exactly
let simple_gravity = starfield.iter()
.map(|&(m, p)| newton((m, pnt(p)), test_point))
.fold(zero(), |a: Vector3<_>, b| a + b);
// Calculate gravity using a tree
let orig: Point3<f64> = Origin::origin();
let data_width = test_point.as_vector().norm() * 2.0;
let width = if data_width < 200.0 { 200.0 } else { data_width };
let tree = Tree::new(
starfield.iter().map(|&(m, (x, y, z))|
Positioned { object: m, position: Point3::new(x, y, z) }
),
Ncube::new(orig, width),
(orig, zero()),
&|obj| (obj.position, obj.object),
&|&(com1, m1), &(com2, m2)|
if m1 + m2 > zero() {(
orig + (com1.to_vector() * m1 + com2.to_vector() * m2) / (m1 + m2),
m1 + m2,
)}
else {
(orig, zero())
}
).expect("Couldn't construct tree");
let theta = 0.5; // A bit arbitrary but this appears to work
let tree_gravity =
tree.query_data(|node| {
let &(ref center_of_mass, _) = node.data();
let d = FloatPoint::distance(&test_point, center_of_mass);
let delta = FloatPoint::distance(&node.partition().center(), center_of_mass);
d < node.partition().width() / theta + delta
})
.map(|&(com, m)| newton((m, com), test_point))
.fold(zero::<Vector3<f64>>(), |a, b| a + b);
// Now the tree gravity should approximate the exact one, within 10 %
TestResult::from_bool(simple_gravity.approx_eq_eps(&tree_gravity, &(0.1 * simple_gravity.norm())))
}
quickcheck(tree_gravity_approx as fn(Vec<(f64, (f64, f64, f64))>, (f64, f64, f64)) -> TestResult)
}
|
return TestResult::discard();
}
|
conditional_block
|
tree_gravity.rs
|
//! Simple integration tests oriented towards gravity computations
extern crate acacia;
extern crate nalgebra;
extern crate quickcheck;
use nalgebra::{ApproxEq, Point2, Point3, FloatPoint, Vector2, Vector3, zero, Norm, Origin};
use quickcheck::{TestResult, quickcheck};
use acacia::{Tree, Node, AssociatedData, DataQuery, Positioned};
use acacia::partition::Ncube;
#[test]
fn tree_center_of_mass() {
fn tree_center_of_mass(data: Vec<(f64, (f64, f64))>) -> TestResult {
// Only test non-empty lists with positive masses
if data.is_empty() || data.iter().any(|&(m, _)| m <= 0.0) {
return TestResult::discard();
}
// No two points should be in the same place
for i in 0..data.len() {
for j in 0..i {
let (_, pi) = data[i];
let (_, pj) = data[j];
if pi == pj {
return TestResult::discard();
}
}
}
// Compute center of mass in the traditional way
let (mps, ms) = data.iter()
.map(|&(m, (x, y))| (Vector2::new(x, y) * m, m))
.fold((zero::<Vector2<f64>>(), 0.0f64), |(mps, ms), (mp, m)| (mps + mp, ms + m));
let com = mps / ms;
// Now use the tree
let tree = Tree::new(
data.iter().map(|&(m, (x, y))|
Positioned { object: m, position: Point2::new(x, y) }
),
Ncube::new(Origin::origin(), 200.0f64),
(zero(), 0.0),
&|obj| (obj.position.to_vector() * obj.object, obj.object),
&|&(mps, ms), &(mp, m)| (mps + mp, ms + m)
).expect("Couldn't construct tree");
let (tree_mps, tree_ms) = *tree.data();
// …and compare
TestResult::from_bool(ApproxEq::approx_eq(&(tree_mps / tree_ms), &com))
}
quickcheck(tree_center_of_mass as fn(Vec<(f64, (f64, f64))>) -> TestResult);
}
#[test]
fn tree_gravity_approx() {
fn tree_gravity_approx(
starfield: Vec<(f64, (f64, f64, f64))>,
test_point: (f64, f64, f64)
) -> TestResult
{
// We want to have at least one star
if starfield.is_empty() {
return TestResult::discard();
}
// Only test positive masses
if starfield.iter().any(|&(m, _)| m <= 0.0) {
return TestResult::discard();
}
// The test point should not be in the same place as any star
if starfield.iter().any(|&(_, p)| p == test_point) {
return TestResult::discard();
}
// No two stars should be in the same place
for i in 0..starfield.len() {
for j in 0..i {
let (_, pi) = starfield[i];
let (_, pj) = starfield[j];
if pi == pj {
return TestResult::discard();
}
}
}
// (T, T, T) -> Point3<T>
fn pnt<T>(p: (T, T, T)) -> Point3<T> {
|
let test_point = pnt(test_point);
// Newton's law of gravity for two point masses (with G = 1)
let newton = |(m, p1): (f64, Point3<f64>), p2| {
let diff: Vector3<f64> = p1 - p2;
let r = diff.norm();
diff * (m / r.powi(3))
};
// Calculate gravity exactly
let simple_gravity = starfield.iter()
.map(|&(m, p)| newton((m, pnt(p)), test_point))
.fold(zero(), |a: Vector3<_>, b| a + b);
// Calculate gravity using a tree
let orig: Point3<f64> = Origin::origin();
let data_width = test_point.as_vector().norm() * 2.0;
let width = if data_width < 200.0 { 200.0 } else { data_width };
let tree = Tree::new(
starfield.iter().map(|&(m, (x, y, z))|
Positioned { object: m, position: Point3::new(x, y, z) }
),
Ncube::new(orig, width),
(orig, zero()),
&|obj| (obj.position, obj.object),
&|&(com1, m1), &(com2, m2)|
if m1 + m2 > zero() {(
orig + (com1.to_vector() * m1 + com2.to_vector() * m2) / (m1 + m2),
m1 + m2,
)}
else {
(orig, zero())
}
).expect("Couldn't construct tree");
let theta = 0.5; // A bit arbitrary but this appears to work
let tree_gravity =
tree.query_data(|node| {
let &(ref center_of_mass, _) = node.data();
let d = FloatPoint::distance(&test_point, center_of_mass);
let delta = FloatPoint::distance(&node.partition().center(), center_of_mass);
d < node.partition().width() / theta + delta
})
.map(|&(com, m)| newton((m, com), test_point))
.fold(zero::<Vector3<f64>>(), |a, b| a + b);
// Now the tree gravity should approximate the exact one, within 10 %
TestResult::from_bool(simple_gravity.approx_eq_eps(&tree_gravity, &(0.1 * simple_gravity.norm())))
}
quickcheck(tree_gravity_approx as fn(Vec<(f64, (f64, f64, f64))>, (f64, f64, f64)) -> TestResult)
}
|
let (x, y, z) = p;
Point3::new(x, y, z)
}
|
identifier_body
|
example.rs
|
use std::iter::FromIterator;
pub struct SimpleLinkedList<T> {
head: Option<Box<Node<T>>>,
len: usize,
}
struct Node<T> {
data: T,
next: Option<Box<Node<T>>>,
}
impl<T> SimpleLinkedList<T> {
pub fn new() -> Self {
SimpleLinkedList { head: None, len: 0 }
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
pub fn len(&self) -> usize {
self.len
}
pub fn push(&mut self, element: T) {
let node = Box::new(Node::new(element, self.head.take()));
self.head = Some(node);
self.len += 1;
}
pub fn pop(&mut self) -> Option<T> {
match self.len {
0 => None,
_ => {
self.len -= 1;
self.head.take().map(|node| {
let node = *node;
self.head = node.next;
node.data
})
}
}
}
pub fn
|
(&self) -> Option<&T> {
self.head.as_ref().map(|node| &node.data)
}
pub fn rev(self) -> SimpleLinkedList<T> {
let mut rev_list = SimpleLinkedList::new();
let mut vec: Vec<_> = self.into();
for t in vec.drain(..).rev() {
rev_list.push(t);
}
rev_list
}
}
impl<T> FromIterator<T> for SimpleLinkedList<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let mut sll = SimpleLinkedList::new();
for t in iter {
sll.push(t);
}
sll
}
}
impl<T> From<SimpleLinkedList<T>> for Vec<T> {
fn from(mut linked_list: SimpleLinkedList<T>) -> Vec<T> {
let mut vec: Vec<T> = Vec::with_capacity(linked_list.len());
while let Some(data) = linked_list.pop() {
vec.push(data);
}
vec.reverse();
vec
}
}
impl<T> Node<T> {
pub fn new(element: T, next: Option<Box<Node<T>>>) -> Self {
Node {
data: element,
next,
}
}
}
|
peek
|
identifier_name
|
example.rs
|
use std::iter::FromIterator;
pub struct SimpleLinkedList<T> {
head: Option<Box<Node<T>>>,
len: usize,
}
struct Node<T> {
data: T,
next: Option<Box<Node<T>>>,
}
impl<T> SimpleLinkedList<T> {
pub fn new() -> Self {
SimpleLinkedList { head: None, len: 0 }
|
pub fn len(&self) -> usize {
self.len
}
pub fn push(&mut self, element: T) {
let node = Box::new(Node::new(element, self.head.take()));
self.head = Some(node);
self.len += 1;
}
pub fn pop(&mut self) -> Option<T> {
match self.len {
0 => None,
_ => {
self.len -= 1;
self.head.take().map(|node| {
let node = *node;
self.head = node.next;
node.data
})
}
}
}
pub fn peek(&self) -> Option<&T> {
self.head.as_ref().map(|node| &node.data)
}
pub fn rev(self) -> SimpleLinkedList<T> {
let mut rev_list = SimpleLinkedList::new();
let mut vec: Vec<_> = self.into();
for t in vec.drain(..).rev() {
rev_list.push(t);
}
rev_list
}
}
impl<T> FromIterator<T> for SimpleLinkedList<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let mut sll = SimpleLinkedList::new();
for t in iter {
sll.push(t);
}
sll
}
}
impl<T> From<SimpleLinkedList<T>> for Vec<T> {
fn from(mut linked_list: SimpleLinkedList<T>) -> Vec<T> {
let mut vec: Vec<T> = Vec::with_capacity(linked_list.len());
while let Some(data) = linked_list.pop() {
vec.push(data);
}
vec.reverse();
vec
}
}
impl<T> Node<T> {
pub fn new(element: T, next: Option<Box<Node<T>>>) -> Self {
Node {
data: element,
next,
}
}
}
|
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
|
random_line_split
|
bits.rs
|
//! View bits in memory with turtles
//!
//! This example uses [@myrrlyn]'s [`bitvec`] crate to turn data into strings of
//! bits, and then draws them on the screen.
//!
//! You are encouraged to change both the data used to seed the turtle, and the
//! `bitvec` calls that control how the turtle acts, to see what changes.
//!
//! [@myrrlyn]: //github.com/myrrlyn
//! [`bitvec`]: //crates.io/crates/bitvec
// This imports the things we need from `bitvec`, including the `Bits` trait for
// the `.view_bits::<_>()` method we use to view memory.
use bitvec::prelude::*;
use turtle::Turtle;
// Modify these constants to change the behavior of the example.
/// This text will be inspected as individual bytes, and drawn on the screen.
/// You can change it to see what different text looks like when viewed as bits.
///
/// The example program will print more information about the parts of the text
/// to the console while the turtle draws, so that you can see how each glyph
/// corresponds to parts of the rendered memory.
static TEXT: &str = "¡Hola, mundo! 🌍🌏🌎";
/// This number will have its bit pattern printed on screen. Rust provides some
/// interesting numbers in its standard library; you can replace this with other
/// numbers to see what they look like. Pi is provided as the default solely
/// because it is well-known, and has an interesting pattern.
const NUMBER: f32 = std::f32::consts::PI;
/// This controls the width of the drawn line for each bit.
const BIT_WIDTH: f64 = 20.0;
/// This controls the vertical spacing between rows of bit lines.
const BIT_HEIGHT: f64 = 10.0;
/// Set the horizontal spacing between successive bits in a row
const BIT_MARGIN: f64 = BIT_WIDTH / 2.0;
/// Compute the total width of a bit plus its spacing
const BIT_BOX: f64 = BIT_WIDTH + BIT_MARGIN;
fn main() {
// This block sets up the turtle to draw bits more or less centered in the
// screen. The turtle works by walking horizontally for each bit in a byte,
// then backtracking and walking vertically to the next byte.
let mut turtle = Turtle::new();
// The turtle starts in the center of the screen, but we want to move it
// around before drawing.
turtle.pen_up();
// Compute the boundaries of the part of the screen where the turtle will
// draw. We expect to be drawing eight bits, with half to the right of
// center and half to the left.
let right_edge = BIT_BOX * 8.0 / 2.0;
// We also expect to be drawing a row for each byte in the text, with an
// additional separator row for each *character*, half above and half below
// the center of the screen. This computes how many rows of text we will
// draw, then moves the turtle appropriately.
let byte_rows = TEXT.len();
let char_gaps = TEXT.chars().count();
let top_edge = BIT_HEIGHT * ((byte_rows + char_gaps) as f64 / 2.0);
// The turtle starts from the top right of the region,
turtle.forward(top_edge);
turtle.right(90.0);
turtle.forward(right_edge);
// and walks left
turtle.left(180.0);
draw_text(&mut turtle, TEXT);
// The `draw_number` function reads bits from left to right, so the turtle
// should also walk from left to right. The `draw_number` function expects
// that it will be drawing rows sixteen bits long, so it needs to move
// forward another four bits' worth of space in order to be in the correct
// spot.
turtle.forward(8.0 * BIT_BOX / 2.0);
turtle.forward(16.0 * BIT_BOX / 2.0);
// Then, it needs to turn around, to walk in the other direction.
turtle.right(180.0);
draw_number(&mut turtle, NUMBER);
}
/// Draws the bits of a text span on the screen.
fn draw_text(turtle: &mut Turtle, text: &str) {
// Rust strings can iterate over their individual characters. This block
// loops over characters, collecting their start point in the text so that
// we can grab the encoded bytes of each one.
let mut row_num = 0;
for (char_num, (start, codepoint)) in text.char_indices().enumerate() {
println!("Character {}: {}", char_num, codepoint);
// Each character has a variable width, so we need to find that.
let byte_count = codepoint.len_utf8();
// And then collect the bytes of the string that make up the character.
// `start` gives us the starting position in the text sequence, and
// `byte_count` gives us the length in bytes of the character, so we
// need to select the range beginning at `start`, running for
// `byte_count`. Another style of writing this that you might see in
// Rust libraries is `[start..][.. length]`.
let row: &[u8] = &text.as_bytes()[start.. start + byte_count];
// For each byte (`u8`), we use `bitvec` to make a view into its bits.
// `bitvec` provides the `.view_bits::<_>()` method on Rust integers for
// easy access to its view types.
//
// The `Lsb0` means that the view moves from least significant bit to
// most significant. Since we want to display on screen the most
// significant bit on the left, and the least on the right, the turtle
// will have to move from right to left to match.
//
// The `Lsb0` and `Msb0` types describe different ways to view the same
// data. You can read more about them in the `bitvec` docs, and at
// Wikipedia:
// https://docs.rs/bitvec/0.16.1/bitvec/cursor/index.html
// https://en.wikipedia.org/wiki/Endianness#Bit_endianness
for byte in row {
println!(" Byte {:02}:\n Value: 0x{:02X}\n Bits: {:08b}", row_num, byte, byte);
let bits: &BitSlice<_, _> = byte.view_bits::<Lsb0>();
// Then we draw the byte's bits as a row
draw_row(turtle, bits);
// And go to the next row
next_row(turtle, 90.0);
row_num += 1;
}
// This puts a dividing line between each *character* in the text.
// Some characters may have more than one byte, and those bytes will be
// grouped together.
delimit(turtle, 8.0 * BIT_BOX - BIT_MARGIN);
}
}
/// Draws the bits of a number on screen.
fn draw_number(turtle: &mut Turtle, number: f32) {
// `bitvec` can look at more than just `u8`. Let's try looking at the bits
// that represent a number!
//
// Some numbers, like `f32`, have special rules for their representation in
// bits. `bitvec` only knows about raw bits, so it does not provide direct
// support for `f32`. Rust lets us get the bit representation from an `f32`
// with the method `to_bits(f32) -> u32`, which forgets about the `f32`
// rules and uses the number's storage as ordinary bits.
//
// You can read more about the rules for `f32`'s storage in memory, and
// behavior in programs, here:
// https://en.wikipedia.org/wiki/Double-precision_floating-point_format
let raw_number: u32 = number.to_bits();
// `bitvec` can also view bits from left to right, with `Msb0`.
let bits: &BitSlice<_, _> = raw_number.view_bits::<Msb0>();
// The `&BitSlice` type acts just like `&[bool]`, so it comes with a
// `.chunks` method which divides it into smaller pieces. `bitvec` can take
// any number, not just multiples of 8, but 16 is a convenient number to
// look at. Try changing it to a different number, like 10, to see what
// happens!
for (num, row) in bits.chunks(16).enumerate() {
println!("Row {} bits: {:b}", num, row);
// Each chunk produced is a smaller `&BitSlice`, just like
// `&[bool].chunks` produces smaller `&[bool]`s, so we can draw it.
draw_row(turtle, row);
next_row(turtle, -90.0);
}
// Reader exercise:
//
// The IEEE-754 format for `f32` numbers separates them into three parts:
//
// 1. The sign marks whether the number is positive or negative: 1 bit
// 2. The exponent marks how far from zero the number is: 8 bits
// 3. The fraction describes the number: 23 bits.
//
// Using these widths (1 bit, 8 bits, 23 bits), the knowledge that
// `&BitSlice` is a normal Rust slice, and the API documentation for
// `std::iter::Iterator`, see if you can display each portion of an `f32`
// as its own row.
//
// Hints:
//
// - The variable `bits` is set up to view the entire number, from most
// significant bit to least.
// - You can get access to a structure that performs iteration by calling
// `bits.iter()`.
// - You can use the `Iterator::by_ref` method to prevent `Iterator` adapter
// functions from destroying the source iterator.
// - `&BitSlice` is an ordinary Rust slice, so you can use `[start.. end]`
// range indexing to get smaller pieces of it.
}
/// Draw a row of bits on the screen.
///
/// This takes a reference to a turtle, which draws, and a reference to a slice
/// of bits, which provides the data to draw.
///
/// Note that this works whether we're going through the bits left to right
/// (`Msb0`) or right to left (`Lsb0`), because we assume that the turtle is
/// going to start on the correct side and be facing the correct way for this
/// drawing to work.
fn draw_row<O, T>(turtle: &mut Turtle, row: &BitSlice<O, T>)
where O: BitOrder, T: BitStore {
// `&BitSlice` can iterate over bits. It is just like `&[bool]`, and so it
// produces `&bool` for each loop.
for bit in row.iter().by_val() {
// This checks if the bit produced by the row is `1` or `0`, and sets
// the pen color to black (`1`) or light grey (`0`)
if bit {
turtle.set_pen_color("black");
}
else {
|
// For each bit, the loop puts down the pen to draw a line of the bit's
// color, then picks up the pen to add some horizontal spacing between
// them.
turtle.pen_down();
turtle.forward(BIT_WIDTH);
turtle.pen_up();
turtle.forward(BIT_MARGIN);
}
// Rewind the turtle
for _ in 0.. row.len() {
turtle.backward(BIT_BOX);
}
}
/// Produces a separator line to demark different sections of memory.
fn delimit(turtle: &mut Turtle, width: f64) {
turtle.set_pen_color("grey");
turtle.pen_down();
turtle.forward(width);
turtle.backward(width);
next_row(turtle, 90.0);
}
/// Moves the turtle down a row
fn next_row(turtle: &mut Turtle, angle: f64) {
turtle.pen_up();
turtle.left(angle);
turtle.forward(BIT_HEIGHT);
turtle.right(angle);
}
|
turtle.set_pen_color("light grey");
}
|
conditional_block
|
bits.rs
|
//! View bits in memory with turtles
//!
//! This example uses [@myrrlyn]'s [`bitvec`] crate to turn data into strings of
//! bits, and then draws them on the screen.
//!
//! You are encouraged to change both the data used to seed the turtle, and the
//! `bitvec` calls that control how the turtle acts, to see what changes.
//!
//! [@myrrlyn]: //github.com/myrrlyn
//! [`bitvec`]: //crates.io/crates/bitvec
// This imports the things we need from `bitvec`, including the `Bits` trait for
// the `.view_bits::<_>()` method we use to view memory.
use bitvec::prelude::*;
use turtle::Turtle;
// Modify these constants to change the behavior of the example.
/// This text will be inspected as individual bytes, and drawn on the screen.
/// You can change it to see what different text looks like when viewed as bits.
///
/// The example program will print more information about the parts of the text
/// to the console while the turtle draws, so that you can see how each glyph
/// corresponds to parts of the rendered memory.
static TEXT: &str = "¡Hola, mundo! 🌍🌏🌎";
/// This number will have its bit pattern printed on screen. Rust provides some
/// interesting numbers in its standard library; you can replace this with other
/// numbers to see what they look like. Pi is provided as the default solely
/// because it is well-known, and has an interesting pattern.
const NUMBER: f32 = std::f32::consts::PI;
/// This controls the width of the drawn line for each bit.
const BIT_WIDTH: f64 = 20.0;
/// This controls the vertical spacing between rows of bit lines.
const BIT_HEIGHT: f64 = 10.0;
/// Set the horizontal spacing between successive bits in a row
const BIT_MARGIN: f64 = BIT_WIDTH / 2.0;
/// Compute the total width of a bit plus its spacing
const BIT_BOX: f64 = BIT_WIDTH + BIT_MARGIN;
fn main() {
// This block sets up the turtle to draw bits more or less centered in the
// screen. The turtle works by walking horizontally for each bit in a byte,
// then backtracking and walking vertically to the next byte.
let mut turtle = Turtle::new();
// The turtle starts in the center of the screen, but we want to move it
// around before drawing.
turtle.pen_up();
// Compute the boundaries of the part of the screen where the turtle will
// draw. We expect to be drawing eight bits, with half to the right of
// center and half to the left.
let right_edge = BIT_BOX * 8.0 / 2.0;
// We also expect to be drawing a row for each byte in the text, with an
// additional separator row for each *character*, half above and half below
// the center of the screen. This computes how many rows of text we will
// draw, then moves the turtle appropriately.
let byte_rows = TEXT.len();
let char_gaps = TEXT.chars().count();
let top_edge = BIT_HEIGHT * ((byte_rows + char_gaps) as f64 / 2.0);
// The turtle starts from the top right of the region,
turtle.forward(top_edge);
turtle.right(90.0);
turtle.forward(right_edge);
// and walks left
turtle.left(180.0);
draw_text(&mut turtle, TEXT);
// The `draw_number` function reads bits from left to right, so the turtle
// should also walk from left to right. The `draw_number` function expects
// that it will be drawing rows sixteen bits long, so it needs to move
// forward another four bits' worth of space in order to be in the correct
// spot.
turtle.forward(8.0 * BIT_BOX / 2.0);
turtle.forward(16.0 * BIT_BOX / 2.0);
// Then, it needs to turn around, to walk in the other direction.
turtle.right(180.0);
draw_number(&mut turtle, NUMBER);
}
/// Draws the bits of a text span on the screen.
fn draw_text(turtle: &mut Turtle, text: &str) {
// Rust strings can iterate over their individual characters. This block
// loops over characters, collecting their start point in the text so that
// we can grab the encoded bytes of each one.
let mut row_num = 0;
for (char_num, (start, codepoint)) in text.char_indices().enumerate() {
println!("Character {}: {}", char_num, codepoint);
// Each character has a variable width, so we need to find that.
let byte_count = codepoint.len_utf8();
// And then collect the bytes of the string that make up the character.
// `start` gives us the starting position in the text sequence, and
// `byte_count` gives us the length in bytes of the character, so we
// need to select the range beginning at `start`, running for
// `byte_count`. Another style of writing this that you might see in
// Rust libraries is `[start..][.. length]`.
let row: &[u8] = &text.as_bytes()[start.. start + byte_count];
// For each byte (`u8`), we use `bitvec` to make a view into its bits.
// `bitvec` provides the `.view_bits::<_>()` method on Rust integers for
// easy access to its view types.
//
// The `Lsb0` means that the view moves from least significant bit to
// most significant. Since we want to display on screen the most
// significant bit on the left, and the least on the right, the turtle
// will have to move from right to left to match.
//
// The `Lsb0` and `Msb0` types describe different ways to view the same
// data. You can read more about them in the `bitvec` docs, and at
// Wikipedia:
// https://docs.rs/bitvec/0.16.1/bitvec/cursor/index.html
// https://en.wikipedia.org/wiki/Endianness#Bit_endianness
for byte in row {
println!(" Byte {:02}:\n Value: 0x{:02X}\n Bits: {:08b}", row_num, byte, byte);
let bits: &BitSlice<_, _> = byte.view_bits::<Lsb0>();
// Then we draw the byte's bits as a row
draw_row(turtle, bits);
// And go to the next row
next_row(turtle, 90.0);
row_num += 1;
}
// This puts a dividing line between each *character* in the text.
// Some characters may have more than one byte, and those bytes will be
// grouped together.
delimit(turtle, 8.0 * BIT_BOX - BIT_MARGIN);
}
}
/// Draws the bits of a number on screen.
fn draw_number(turtle: &mut Turtle, number: f32) {
// `bitvec` can look at more than just `u8`. Let's try looking at the bits
// that represent a number!
//
// Some numbers, like `f32`, have special rules for their representation in
// bits. `bitvec` only knows about raw bits, so it does not provide direct
// support for `f32`. Rust lets us get the bit representation from an `f32`
// with the method `to_bits(f32) -> u32`, which forgets about the `f32`
// rules and uses the number's storage as ordinary bits.
//
// You can read more about the rules for `f32`'s storage in memory, and
// behavior in programs, here:
// https://en.wikipedia.org/wiki/Double-precision_floating-point_format
let raw_number: u32 = number.to_bits();
// `bitvec` can also view bits from left to right, with `Msb0`.
let bits: &BitSlice<_, _> = raw_number.view_bits::<Msb0>();
// The `&BitSlice` type acts just like `&[bool]`, so it comes with a
// `.chunks` method which divides it into smaller pieces. `bitvec` can take
// any number, not just multiples of 8, but 16 is a convenient number to
// look at. Try changing it to a different number, like 10, to see what
// happens!
for (num, row) in bits.chunks(16).enumerate() {
println!("Row {} bits: {:b}", num, row);
// Each chunk produced is a smaller `&BitSlice`, just like
// `&[bool].chunks` produces smaller `&[bool]`s, so we can draw it.
draw_row(turtle, row);
next_row(turtle, -90.0);
}
// Reader exercise:
//
// The IEEE-754 format for `f32` numbers separates them into three parts:
//
// 1. The sign marks whether the number is positive or negative: 1 bit
// 2. The exponent marks how far from zero the number is: 8 bits
// 3. The fraction describes the number: 23 bits.
//
// Using these widths (1 bit, 8 bits, 23 bits), the knowledge that
// `&BitSlice` is a normal Rust slice, and the API documentation for
// `std::iter::Iterator`, see if you can display each portion of an `f32`
// as its own row.
//
// Hints:
//
// - The variable `bits` is set up to view the entire number, from most
// significant bit to least.
// - You can get access to a structure that performs iteration by calling
// `bits.iter()`.
// - You can use the `Iterator::by_ref` method to prevent `Iterator` adapter
// functions from destroying the source iterator.
// - `&BitSlice` is an ordinary Rust slice, so you can use `[start.. end]`
// range indexing to get smaller pieces of it.
}
/// Draw a row of bits on the screen.
///
/// This takes a reference to a turtle, which draws, and a reference to a slice
/// of bits, which provides the data to draw.
///
/// Note that this works whether we're going through the bits left to right
/// (`Msb0`) or right to left (`Lsb0`), because we assume that the turtle is
/// going to start on the correct side and be facing the correct way for this
/// drawing to work.
fn draw_row<O, T>(turtle: &mut Turtle, row: &BitSlice<O, T>)
where O: BitOrder, T: BitStore {
// `&BitSlice` can iterate over bits. It is just like `&[bool]`, and so it
// produces `&bool` for each loop.
for bit in row.iter().by_val() {
// This checks if the bit produced by the row is `1` or `0`, and sets
// the pen color to black (`1`) or light grey (`0`)
if bit {
turtle.set_pen_color("black");
}
else {
turtle.set_pen_color("light grey");
}
// For each bit, the loop puts down the pen to draw a line of the bit's
// color, then picks up the pen to add some horizontal spacing between
// them.
turtle.pen_down();
turtle.forward(BIT_WIDTH);
turtle.pen_up();
turtle.forward(BIT_MARGIN);
}
// Rewind the turtle
for _ in 0.. row.len() {
turtle.backward(BIT_BOX);
}
}
/// Produces a separator line to demark different sections of memory.
fn delimit(turtle: &mut Turtle, width: f64) {
turtle.set_pen_color("grey");
turtle.pen_down();
turtle.forward(width);
turtle.backward(width);
next_row(turtle, 90.0);
}
/// Moves the turtle down a row
fn next_row(turtle: &mut Turtle, angle: f64) {
turt
|
le.pen_up();
turtle.left(angle);
turtle.forward(BIT_HEIGHT);
turtle.right(angle);
}
|
identifier_body
|
|
bits.rs
|
//! View bits in memory with turtles
//!
//! This example uses [@myrrlyn]'s [`bitvec`] crate to turn data into strings of
//! bits, and then draws them on the screen.
//!
//! You are encouraged to change both the data used to seed the turtle, and the
//! `bitvec` calls that control how the turtle acts, to see what changes.
//!
//! [@myrrlyn]: //github.com/myrrlyn
//! [`bitvec`]: //crates.io/crates/bitvec
// This imports the things we need from `bitvec`, including the `Bits` trait for
// the `.view_bits::<_>()` method we use to view memory.
use bitvec::prelude::*;
use turtle::Turtle;
// Modify these constants to change the behavior of the example.
/// This text will be inspected as individual bytes, and drawn on the screen.
/// You can change it to see what different text looks like when viewed as bits.
///
/// The example program will print more information about the parts of the text
/// to the console while the turtle draws, so that you can see how each glyph
/// corresponds to parts of the rendered memory.
static TEXT: &str = "¡Hola, mundo! 🌍🌏🌎";
/// This number will have its bit pattern printed on screen. Rust provides some
/// interesting numbers in its standard library; you can replace this with other
/// numbers to see what they look like. Pi is provided as the default solely
/// because it is well-known, and has an interesting pattern.
const NUMBER: f32 = std::f32::consts::PI;
/// This controls the width of the drawn line for each bit.
const BIT_WIDTH: f64 = 20.0;
/// This controls the vertical spacing between rows of bit lines.
const BIT_HEIGHT: f64 = 10.0;
/// Set the horizontal spacing between successive bits in a row
const BIT_MARGIN: f64 = BIT_WIDTH / 2.0;
/// Compute the total width of a bit plus its spacing
const BIT_BOX: f64 = BIT_WIDTH + BIT_MARGIN;
fn main() {
// This block sets up the turtle to draw bits more or less centered in the
// screen. The turtle works by walking horizontally for each bit in a byte,
// then backtracking and walking vertically to the next byte.
let mut turtle = Turtle::new();
// The turtle starts in the center of the screen, but we want to move it
// around before drawing.
turtle.pen_up();
// Compute the boundaries of the part of the screen where the turtle will
// draw. We expect to be drawing eight bits, with half to the right of
// center and half to the left.
let right_edge = BIT_BOX * 8.0 / 2.0;
// We also expect to be drawing a row for each byte in the text, with an
// additional separator row for each *character*, half above and half below
// the center of the screen. This computes how many rows of text we will
// draw, then moves the turtle appropriately.
let byte_rows = TEXT.len();
let char_gaps = TEXT.chars().count();
let top_edge = BIT_HEIGHT * ((byte_rows + char_gaps) as f64 / 2.0);
// The turtle starts from the top right of the region,
turtle.forward(top_edge);
turtle.right(90.0);
turtle.forward(right_edge);
// and walks left
turtle.left(180.0);
draw_text(&mut turtle, TEXT);
// The `draw_number` function reads bits from left to right, so the turtle
// should also walk from left to right. The `draw_number` function expects
// that it will be drawing rows sixteen bits long, so it needs to move
// forward another four bits' worth of space in order to be in the correct
// spot.
turtle.forward(8.0 * BIT_BOX / 2.0);
turtle.forward(16.0 * BIT_BOX / 2.0);
// Then, it needs to turn around, to walk in the other direction.
turtle.right(180.0);
draw_number(&mut turtle, NUMBER);
}
/// Draws the bits of a text span on the screen.
fn draw_text(turtle: &mut Turtle, text: &str) {
// Rust strings can iterate over their individual characters. This block
|
// loops over characters, collecting their start point in the text so that
// we can grab the encoded bytes of each one.
let mut row_num = 0;
for (char_num, (start, codepoint)) in text.char_indices().enumerate() {
println!("Character {}: {}", char_num, codepoint);
// Each character has a variable width, so we need to find that.
let byte_count = codepoint.len_utf8();
// And then collect the bytes of the string that make up the character.
// `start` gives us the starting position in the text sequence, and
// `byte_count` gives us the length in bytes of the character, so we
// need to select the range beginning at `start`, running for
// `byte_count`. Another style of writing this that you might see in
// Rust libraries is `[start..][.. length]`.
let row: &[u8] = &text.as_bytes()[start.. start + byte_count];
// For each byte (`u8`), we use `bitvec` to make a view into its bits.
// `bitvec` provides the `.view_bits::<_>()` method on Rust integers for
// easy access to its view types.
//
// The `Lsb0` means that the view moves from least significant bit to
// most significant. Since we want to display on screen the most
// significant bit on the left, and the least on the right, the turtle
// will have to move from right to left to match.
//
// The `Lsb0` and `Msb0` types describe different ways to view the same
// data. You can read more about them in the `bitvec` docs, and at
// Wikipedia:
// https://docs.rs/bitvec/0.16.1/bitvec/cursor/index.html
// https://en.wikipedia.org/wiki/Endianness#Bit_endianness
for byte in row {
println!(" Byte {:02}:\n Value: 0x{:02X}\n Bits: {:08b}", row_num, byte, byte);
let bits: &BitSlice<_, _> = byte.view_bits::<Lsb0>();
// Then we draw the byte's bits as a row
draw_row(turtle, bits);
// And go to the next row
next_row(turtle, 90.0);
row_num += 1;
}
// This puts a dividing line between each *character* in the text.
// Some characters may have more than one byte, and those bytes will be
// grouped together.
delimit(turtle, 8.0 * BIT_BOX - BIT_MARGIN);
}
}
/// Draws the bits of a number on screen.
fn draw_number(turtle: &mut Turtle, number: f32) {
// `bitvec` can look at more than just `u8`. Let's try looking at the bits
// that represent a number!
//
// Some numbers, like `f32`, have special rules for their representation in
// bits. `bitvec` only knows about raw bits, so it does not provide direct
// support for `f32`. Rust lets us get the bit representation from an `f32`
// with the method `to_bits(f32) -> u32`, which forgets about the `f32`
// rules and uses the number's storage as ordinary bits.
//
// You can read more about the rules for `f32`'s storage in memory, and
// behavior in programs, here:
// https://en.wikipedia.org/wiki/Double-precision_floating-point_format
let raw_number: u32 = number.to_bits();
// `bitvec` can also view bits from left to right, with `Msb0`.
let bits: &BitSlice<_, _> = raw_number.view_bits::<Msb0>();
// The `&BitSlice` type acts just like `&[bool]`, so it comes with a
// `.chunks` method which divides it into smaller pieces. `bitvec` can take
// any number, not just multiples of 8, but 16 is a convenient number to
// look at. Try changing it to a different number, like 10, to see what
// happens!
for (num, row) in bits.chunks(16).enumerate() {
println!("Row {} bits: {:b}", num, row);
// Each chunk produced is a smaller `&BitSlice`, just like
// `&[bool].chunks` produces smaller `&[bool]`s, so we can draw it.
draw_row(turtle, row);
next_row(turtle, -90.0);
}
// Reader exercise:
//
// The IEEE-754 format for `f32` numbers separates them into three parts:
//
// 1. The sign marks whether the number is positive or negative: 1 bit
// 2. The exponent marks how far from zero the number is: 8 bits
// 3. The fraction describes the number: 23 bits.
//
// Using these widths (1 bit, 8 bits, 23 bits), the knowledge that
// `&BitSlice` is a normal Rust slice, and the API documentation for
// `std::iter::Iterator`, see if you can display each portion of an `f32`
// as its own row.
//
// Hints:
//
// - The variable `bits` is set up to view the entire number, from most
// significant bit to least.
// - You can get access to a structure that performs iteration by calling
// `bits.iter()`.
// - You can use the `Iterator::by_ref` method to prevent `Iterator` adapter
// functions from destroying the source iterator.
// - `&BitSlice` is an ordinary Rust slice, so you can use `[start.. end]`
// range indexing to get smaller pieces of it.
}
/// Draw a row of bits on the screen.
///
/// This takes a reference to a turtle, which draws, and a reference to a slice
/// of bits, which provides the data to draw.
///
/// Note that this works whether we're going through the bits left to right
/// (`Msb0`) or right to left (`Lsb0`), because we assume that the turtle is
/// going to start on the correct side and be facing the correct way for this
/// drawing to work.
fn draw_row<O, T>(turtle: &mut Turtle, row: &BitSlice<O, T>)
where O: BitOrder, T: BitStore {
// `&BitSlice` can iterate over bits. It is just like `&[bool]`, and so it
// produces `&bool` for each loop.
for bit in row.iter().by_val() {
// This checks if the bit produced by the row is `1` or `0`, and sets
// the pen color to black (`1`) or light grey (`0`)
if bit {
turtle.set_pen_color("black");
}
else {
turtle.set_pen_color("light grey");
}
// For each bit, the loop puts down the pen to draw a line of the bit's
// color, then picks up the pen to add some horizontal spacing between
// them.
turtle.pen_down();
turtle.forward(BIT_WIDTH);
turtle.pen_up();
turtle.forward(BIT_MARGIN);
}
// Rewind the turtle
for _ in 0.. row.len() {
turtle.backward(BIT_BOX);
}
}
/// Produces a separator line to demark different sections of memory.
fn delimit(turtle: &mut Turtle, width: f64) {
turtle.set_pen_color("grey");
turtle.pen_down();
turtle.forward(width);
turtle.backward(width);
next_row(turtle, 90.0);
}
/// Moves the turtle down a row
fn next_row(turtle: &mut Turtle, angle: f64) {
turtle.pen_up();
turtle.left(angle);
turtle.forward(BIT_HEIGHT);
turtle.right(angle);
}
|
random_line_split
|
|
bits.rs
|
//! View bits in memory with turtles
//!
//! This example uses [@myrrlyn]'s [`bitvec`] crate to turn data into strings of
//! bits, and then draws them on the screen.
//!
//! You are encouraged to change both the data used to seed the turtle, and the
//! `bitvec` calls that control how the turtle acts, to see what changes.
//!
//! [@myrrlyn]: //github.com/myrrlyn
//! [`bitvec`]: //crates.io/crates/bitvec
// This imports the things we need from `bitvec`, including the `Bits` trait for
// the `.view_bits::<_>()` method we use to view memory.
use bitvec::prelude::*;
use turtle::Turtle;
// Modify these constants to change the behavior of the example.
/// This text will be inspected as individual bytes, and drawn on the screen.
/// You can change it to see what different text looks like when viewed as bits.
///
/// The example program will print more information about the parts of the text
/// to the console while the turtle draws, so that you can see how each glyph
/// corresponds to parts of the rendered memory.
static TEXT: &str = "¡Hola, mundo! 🌍🌏🌎";
/// This number will have its bit pattern printed on screen. Rust provides some
/// interesting numbers in its standard library; you can replace this with other
/// numbers to see what they look like. Pi is provided as the default solely
/// because it is well-known, and has an interesting pattern.
const NUMBER: f32 = std::f32::consts::PI;
/// This controls the width of the drawn line for each bit.
const BIT_WIDTH: f64 = 20.0;
/// This controls the vertical spacing between rows of bit lines.
const BIT_HEIGHT: f64 = 10.0;
/// Set the horizontal spacing between successive bits in a row
const BIT_MARGIN: f64 = BIT_WIDTH / 2.0;
/// Compute the total width of a bit plus its spacing
const BIT_BOX: f64 = BIT_WIDTH + BIT_MARGIN;
fn main() {
// This block sets up the turtle to draw bits more or less centered in the
// screen. The turtle works by walking horizontally for each bit in a byte,
// then backtracking and walking vertically to the next byte.
let mut turtle = Turtle::new();
// The turtle starts in the center of the screen, but we want to move it
// around before drawing.
turtle.pen_up();
// Compute the boundaries of the part of the screen where the turtle will
// draw. We expect to be drawing eight bits, with half to the right of
// center and half to the left.
let right_edge = BIT_BOX * 8.0 / 2.0;
// We also expect to be drawing a row for each byte in the text, with an
// additional separator row for each *character*, half above and half below
// the center of the screen. This computes how many rows of text we will
// draw, then moves the turtle appropriately.
let byte_rows = TEXT.len();
let char_gaps = TEXT.chars().count();
let top_edge = BIT_HEIGHT * ((byte_rows + char_gaps) as f64 / 2.0);
// The turtle starts from the top right of the region,
turtle.forward(top_edge);
turtle.right(90.0);
turtle.forward(right_edge);
// and walks left
turtle.left(180.0);
draw_text(&mut turtle, TEXT);
// The `draw_number` function reads bits from left to right, so the turtle
// should also walk from left to right. The `draw_number` function expects
// that it will be drawing rows sixteen bits long, so it needs to move
// forward another four bits' worth of space in order to be in the correct
// spot.
turtle.forward(8.0 * BIT_BOX / 2.0);
turtle.forward(16.0 * BIT_BOX / 2.0);
// Then, it needs to turn around, to walk in the other direction.
turtle.right(180.0);
draw_number(&mut turtle, NUMBER);
}
/// Draws the bits of a text span on the screen.
fn draw_text(turtle: &mut Turtle, text: &str) {
// Rust strings can iterate over their individual characters. This block
// loops over characters, collecting their start point in the text so that
// we can grab the encoded bytes of each one.
let mut row_num = 0;
for (char_num, (start, codepoint)) in text.char_indices().enumerate() {
println!("Character {}: {}", char_num, codepoint);
// Each character has a variable width, so we need to find that.
let byte_count = codepoint.len_utf8();
// And then collect the bytes of the string that make up the character.
// `start` gives us the starting position in the text sequence, and
// `byte_count` gives us the length in bytes of the character, so we
// need to select the range beginning at `start`, running for
// `byte_count`. Another style of writing this that you might see in
// Rust libraries is `[start..][.. length]`.
let row: &[u8] = &text.as_bytes()[start.. start + byte_count];
// For each byte (`u8`), we use `bitvec` to make a view into its bits.
// `bitvec` provides the `.view_bits::<_>()` method on Rust integers for
// easy access to its view types.
//
// The `Lsb0` means that the view moves from least significant bit to
// most significant. Since we want to display on screen the most
// significant bit on the left, and the least on the right, the turtle
// will have to move from right to left to match.
//
// The `Lsb0` and `Msb0` types describe different ways to view the same
// data. You can read more about them in the `bitvec` docs, and at
// Wikipedia:
// https://docs.rs/bitvec/0.16.1/bitvec/cursor/index.html
// https://en.wikipedia.org/wiki/Endianness#Bit_endianness
for byte in row {
println!(" Byte {:02}:\n Value: 0x{:02X}\n Bits: {:08b}", row_num, byte, byte);
let bits: &BitSlice<_, _> = byte.view_bits::<Lsb0>();
// Then we draw the byte's bits as a row
draw_row(turtle, bits);
// And go to the next row
next_row(turtle, 90.0);
row_num += 1;
}
// This puts a dividing line between each *character* in the text.
// Some characters may have more than one byte, and those bytes will be
// grouped together.
delimit(turtle, 8.0 * BIT_BOX - BIT_MARGIN);
}
}
/// Draws the bits of a number on screen.
fn draw_numbe
|
mut Turtle, number: f32) {
// `bitvec` can look at more than just `u8`. Let's try looking at the bits
// that represent a number!
//
// Some numbers, like `f32`, have special rules for their representation in
// bits. `bitvec` only knows about raw bits, so it does not provide direct
// support for `f32`. Rust lets us get the bit representation from an `f32`
// with the method `to_bits(f32) -> u32`, which forgets about the `f32`
// rules and uses the number's storage as ordinary bits.
//
// You can read more about the rules for `f32`'s storage in memory, and
// behavior in programs, here:
// https://en.wikipedia.org/wiki/Double-precision_floating-point_format
let raw_number: u32 = number.to_bits();
// `bitvec` can also view bits from left to right, with `Msb0`.
let bits: &BitSlice<_, _> = raw_number.view_bits::<Msb0>();
// The `&BitSlice` type acts just like `&[bool]`, so it comes with a
// `.chunks` method which divides it into smaller pieces. `bitvec` can take
// any number, not just multiples of 8, but 16 is a convenient number to
// look at. Try changing it to a different number, like 10, to see what
// happens!
for (num, row) in bits.chunks(16).enumerate() {
println!("Row {} bits: {:b}", num, row);
// Each chunk produced is a smaller `&BitSlice`, just like
// `&[bool].chunks` produces smaller `&[bool]`s, so we can draw it.
draw_row(turtle, row);
next_row(turtle, -90.0);
}
// Reader exercise:
//
// The IEEE-754 format for `f32` numbers separates them into three parts:
//
// 1. The sign marks whether the number is positive or negative: 1 bit
// 2. The exponent marks how far from zero the number is: 8 bits
// 3. The fraction describes the number: 23 bits.
//
// Using these widths (1 bit, 8 bits, 23 bits), the knowledge that
// `&BitSlice` is a normal Rust slice, and the API documentation for
// `std::iter::Iterator`, see if you can display each portion of an `f32`
// as its own row.
//
// Hints:
//
// - The variable `bits` is set up to view the entire number, from most
// significant bit to least.
// - You can get access to a structure that performs iteration by calling
// `bits.iter()`.
// - You can use the `Iterator::by_ref` method to prevent `Iterator` adapter
// functions from destroying the source iterator.
// - `&BitSlice` is an ordinary Rust slice, so you can use `[start.. end]`
// range indexing to get smaller pieces of it.
}
/// Draw a row of bits on the screen.
///
/// This takes a reference to a turtle, which draws, and a reference to a slice
/// of bits, which provides the data to draw.
///
/// Note that this works whether we're going through the bits left to right
/// (`Msb0`) or right to left (`Lsb0`), because we assume that the turtle is
/// going to start on the correct side and be facing the correct way for this
/// drawing to work.
fn draw_row<O, T>(turtle: &mut Turtle, row: &BitSlice<O, T>)
where O: BitOrder, T: BitStore {
// `&BitSlice` can iterate over bits. It is just like `&[bool]`, and so it
// produces `&bool` for each loop.
for bit in row.iter().by_val() {
// This checks if the bit produced by the row is `1` or `0`, and sets
// the pen color to black (`1`) or light grey (`0`)
if bit {
turtle.set_pen_color("black");
}
else {
turtle.set_pen_color("light grey");
}
// For each bit, the loop puts down the pen to draw a line of the bit's
// color, then picks up the pen to add some horizontal spacing between
// them.
turtle.pen_down();
turtle.forward(BIT_WIDTH);
turtle.pen_up();
turtle.forward(BIT_MARGIN);
}
// Rewind the turtle
for _ in 0.. row.len() {
turtle.backward(BIT_BOX);
}
}
/// Produces a separator line to demark different sections of memory.
fn delimit(turtle: &mut Turtle, width: f64) {
turtle.set_pen_color("grey");
turtle.pen_down();
turtle.forward(width);
turtle.backward(width);
next_row(turtle, 90.0);
}
/// Moves the turtle down a row
fn next_row(turtle: &mut Turtle, angle: f64) {
turtle.pen_up();
turtle.left(angle);
turtle.forward(BIT_HEIGHT);
turtle.right(angle);
}
|
r(turtle: &
|
identifier_name
|
tvout.rs
|
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
use shared::guiddef::GUID;
use shared::minwindef::{UCHAR, ULONG};
STRUCT!{struct VIDEOPARAMETERS {
Guid: GUID,
dwOffset: ULONG,
dwCommand: ULONG,
dwFlags: ULONG,
dwMode: ULONG,
dwTVStandard: ULONG,
dwAvailableModes: ULONG,
dwAvailableTVStandard: ULONG,
dwFlickerFilter: ULONG,
dwOverScanX: ULONG,
dwOverScanY: ULONG,
dwMaxUnscaledX: ULONG,
dwMaxUnscaledY: ULONG,
dwPositionX: ULONG,
dwPositionY: ULONG,
dwBrightness: ULONG,
dwContrast: ULONG,
dwCPType: ULONG,
dwCPCommand: ULONG,
dwCPStandard: ULONG,
dwCPKey: ULONG,
bCP_APSTriggerBits: ULONG,
bOEMCopyProtection: [UCHAR; 256],
}}
pub type PVIDEOPARAMETERS = *mut VIDEOPARAMETERS;
pub type LPVIDEOPARAMETERS = *mut VIDEOPARAMETERS;
pub const VP_COMMAND_GET: ULONG = 0x0001;
pub const VP_COMMAND_SET: ULONG = 0x0002;
pub const VP_FLAGS_TV_MODE: ULONG = 0x0001;
pub const VP_FLAGS_TV_STANDARD: ULONG = 0x0002;
pub const VP_FLAGS_FLICKER: ULONG = 0x0004;
pub const VP_FLAGS_OVERSCAN: ULONG = 0x0008;
pub const VP_FLAGS_MAX_UNSCALED: ULONG = 0x0010;
pub const VP_FLAGS_POSITION: ULONG = 0x0020;
pub const VP_FLAGS_BRIGHTNESS: ULONG = 0x0040;
pub const VP_FLAGS_CONTRAST: ULONG = 0x0080;
pub const VP_FLAGS_COPYPROTECT: ULONG = 0x0100;
pub const VP_MODE_WIN_GRAPHICS: ULONG = 0x0001;
pub const VP_MODE_TV_PLAYBACK: ULONG = 0x0002;
pub const VP_TV_STANDARD_NTSC_M: ULONG = 0x0001;
pub const VP_TV_STANDARD_NTSC_M_J: ULONG = 0x0002;
pub const VP_TV_STANDARD_PAL_B: ULONG = 0x0004;
pub const VP_TV_STANDARD_PAL_D: ULONG = 0x0008;
pub const VP_TV_STANDARD_PAL_H: ULONG = 0x0010;
pub const VP_TV_STANDARD_PAL_I: ULONG = 0x0020;
pub const VP_TV_STANDARD_PAL_M: ULONG = 0x0040;
pub const VP_TV_STANDARD_PAL_N: ULONG = 0x0080;
pub const VP_TV_STANDARD_SECAM_B: ULONG = 0x0100;
pub const VP_TV_STANDARD_SECAM_D: ULONG = 0x0200;
pub const VP_TV_STANDARD_SECAM_G: ULONG = 0x0400;
pub const VP_TV_STANDARD_SECAM_H: ULONG = 0x0800;
|
pub const VP_TV_STANDARD_PAL_G: ULONG = 0x00020000;
pub const VP_TV_STANDARD_PAL_60: ULONG = 0x00040000;
pub const VP_TV_STANDARD_SECAM_L1: ULONG = 0x00080000;
pub const VP_CP_TYPE_APS_TRIGGER: ULONG = 0x0001;
pub const VP_CP_TYPE_MACROVISION: ULONG = 0x0002;
pub const VP_CP_CMD_ACTIVATE: ULONG = 0x0001;
pub const VP_CP_CMD_DEACTIVATE: ULONG = 0x0002;
pub const VP_CP_CMD_CHANGE: ULONG = 0x0004;
|
pub const VP_TV_STANDARD_SECAM_K: ULONG = 0x1000;
pub const VP_TV_STANDARD_SECAM_K1: ULONG = 0x2000;
pub const VP_TV_STANDARD_SECAM_L: ULONG = 0x4000;
pub const VP_TV_STANDARD_WIN_VGA: ULONG = 0x8000;
pub const VP_TV_STANDARD_NTSC_433: ULONG = 0x00010000;
|
random_line_split
|
font.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Animation implementation for various font-related types.
use super::{Animate, Procedure, ToAnimatedZero};
use values::computed::font::{FontVariationSettings, FontWeight};
use values::computed::Number;
use values::distance::{ComputeSquaredDistance, SquaredDistance};
use values::generics::font::{FontSettings as GenericFontSettings, FontTag, VariationValue};
impl ToAnimatedZero for FontWeight {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> {
Ok(FontWeight::normal())
}
}
/// <https://drafts.csswg.org/css-fonts-4/#font-variation-settings-def>
impl Animate for FontVariationSettings {
#[inline]
fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()>
|
}
impl ComputeSquaredDistance for FontVariationSettings {
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
FontSettingTagIter::new(self, other)?
.map(|r| r.and_then(|(st, ot)| st.compute_squared_distance(&ot)))
.sum()
}
}
impl ToAnimatedZero for FontVariationSettings {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> {
Err(())
}
}
type ComputedVariationValue = VariationValue<Number>;
// FIXME: Could do a rename, this is only used for font variations.
struct FontSettingTagIterState<'a> {
tags: Vec<(&'a ComputedVariationValue)>,
index: usize,
prev_tag: FontTag,
}
impl<'a> FontSettingTagIterState<'a> {
fn new(tags: Vec<&'a ComputedVariationValue>) -> FontSettingTagIterState<'a> {
FontSettingTagIterState {
index: tags.len(),
tags,
prev_tag: FontTag(0),
}
}
}
/// Iterator for font-variation-settings tag lists
///
/// [CSS fonts level 4](https://drafts.csswg.org/css-fonts-4/#descdef-font-face-font-variation-settings)
/// defines the animation of font-variation-settings as follows:
///
/// Two declarations of font-feature-settings[sic] can be animated between if
/// they are "like". "Like" declarations are ones where the same set of
/// properties appear (in any order). Because succesive[sic] duplicate
/// properties are applied instead of prior duplicate properties, two
/// declarations can be "like" even if they have differing number of
/// properties. If two declarations are "like" then animation occurs pairwise
/// between corresponding values in the declarations.
///
/// In other words if we have the following lists:
///
/// "wght" 1.4, "wdth" 5, "wght" 2
/// "wdth" 8, "wght" 4, "wdth" 10
///
/// We should animate between:
///
/// "wdth" 5, "wght" 2
/// "wght" 4, "wdth" 10
///
/// This iterator supports this by sorting the two lists, then iterating them in
/// reverse, and skipping entries with repeated tag names. It will return
/// Some(Err()) if it reaches the end of one list before the other, or if the
/// tag names do not match.
///
/// For the above example, this iterator would return:
///
/// Some(Ok("wght" 2, "wght" 4))
/// Some(Ok("wdth" 5, "wdth" 10))
/// None
///
struct FontSettingTagIter<'a> {
a_state: FontSettingTagIterState<'a>,
b_state: FontSettingTagIterState<'a>,
}
impl<'a> FontSettingTagIter<'a> {
fn new(
a_settings: &'a FontVariationSettings,
b_settings: &'a FontVariationSettings,
) -> Result<FontSettingTagIter<'a>, ()> {
if a_settings.0.is_empty() || b_settings.0.is_empty() {
return Err(());
}
fn as_new_sorted_tags(tags: &[ComputedVariationValue]) -> Vec<&ComputedVariationValue> {
use std::iter::FromIterator;
let mut sorted_tags = Vec::from_iter(tags.iter());
sorted_tags.sort_by_key(|k| k.tag.0);
sorted_tags
};
Ok(FontSettingTagIter {
a_state: FontSettingTagIterState::new(as_new_sorted_tags(&a_settings.0)),
b_state: FontSettingTagIterState::new(as_new_sorted_tags(&b_settings.0)),
})
}
fn next_tag(state: &mut FontSettingTagIterState<'a>) -> Option<&'a ComputedVariationValue> {
if state.index == 0 {
return None;
}
state.index -= 1;
let tag = state.tags[state.index];
if tag.tag == state.prev_tag {
FontSettingTagIter::next_tag(state)
} else {
state.prev_tag = tag.tag;
Some(tag)
}
}
}
impl<'a> Iterator for FontSettingTagIter<'a> {
type Item = Result<(&'a ComputedVariationValue, &'a ComputedVariationValue), ()>;
fn next(
&mut self,
) -> Option<Result<(&'a ComputedVariationValue, &'a ComputedVariationValue), ()>> {
match (
FontSettingTagIter::next_tag(&mut self.a_state),
FontSettingTagIter::next_tag(&mut self.b_state),
) {
(Some(at), Some(bt)) if at.tag == bt.tag => Some(Ok((at, bt))),
(None, None) => None,
_ => Some(Err(())), // Mismatch number of unique tags or tag names.
}
}
}
|
{
FontSettingTagIter::new(self, other)?
.map(|r| r.and_then(|(st, ot)| st.animate(&ot, procedure)))
.collect::<Result<Vec<ComputedVariationValue>, ()>>()
.map(|v| GenericFontSettings(v.into_boxed_slice()))
}
|
identifier_body
|
font.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Animation implementation for various font-related types.
use super::{Animate, Procedure, ToAnimatedZero};
use values::computed::font::{FontVariationSettings, FontWeight};
use values::computed::Number;
use values::distance::{ComputeSquaredDistance, SquaredDistance};
use values::generics::font::{FontSettings as GenericFontSettings, FontTag, VariationValue};
impl ToAnimatedZero for FontWeight {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> {
Ok(FontWeight::normal())
}
}
/// <https://drafts.csswg.org/css-fonts-4/#font-variation-settings-def>
impl Animate for FontVariationSettings {
#[inline]
fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> {
FontSettingTagIter::new(self, other)?
.map(|r| r.and_then(|(st, ot)| st.animate(&ot, procedure)))
.collect::<Result<Vec<ComputedVariationValue>, ()>>()
.map(|v| GenericFontSettings(v.into_boxed_slice()))
}
}
impl ComputeSquaredDistance for FontVariationSettings {
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
FontSettingTagIter::new(self, other)?
.map(|r| r.and_then(|(st, ot)| st.compute_squared_distance(&ot)))
.sum()
}
}
impl ToAnimatedZero for FontVariationSettings {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> {
Err(())
}
}
type ComputedVariationValue = VariationValue<Number>;
// FIXME: Could do a rename, this is only used for font variations.
struct FontSettingTagIterState<'a> {
tags: Vec<(&'a ComputedVariationValue)>,
index: usize,
prev_tag: FontTag,
}
impl<'a> FontSettingTagIterState<'a> {
fn
|
(tags: Vec<&'a ComputedVariationValue>) -> FontSettingTagIterState<'a> {
FontSettingTagIterState {
index: tags.len(),
tags,
prev_tag: FontTag(0),
}
}
}
/// Iterator for font-variation-settings tag lists
///
/// [CSS fonts level 4](https://drafts.csswg.org/css-fonts-4/#descdef-font-face-font-variation-settings)
/// defines the animation of font-variation-settings as follows:
///
/// Two declarations of font-feature-settings[sic] can be animated between if
/// they are "like". "Like" declarations are ones where the same set of
/// properties appear (in any order). Because succesive[sic] duplicate
/// properties are applied instead of prior duplicate properties, two
/// declarations can be "like" even if they have differing number of
/// properties. If two declarations are "like" then animation occurs pairwise
/// between corresponding values in the declarations.
///
/// In other words if we have the following lists:
///
/// "wght" 1.4, "wdth" 5, "wght" 2
/// "wdth" 8, "wght" 4, "wdth" 10
///
/// We should animate between:
///
/// "wdth" 5, "wght" 2
/// "wght" 4, "wdth" 10
///
/// This iterator supports this by sorting the two lists, then iterating them in
/// reverse, and skipping entries with repeated tag names. It will return
/// Some(Err()) if it reaches the end of one list before the other, or if the
/// tag names do not match.
///
/// For the above example, this iterator would return:
///
/// Some(Ok("wght" 2, "wght" 4))
/// Some(Ok("wdth" 5, "wdth" 10))
/// None
///
struct FontSettingTagIter<'a> {
a_state: FontSettingTagIterState<'a>,
b_state: FontSettingTagIterState<'a>,
}
impl<'a> FontSettingTagIter<'a> {
fn new(
a_settings: &'a FontVariationSettings,
b_settings: &'a FontVariationSettings,
) -> Result<FontSettingTagIter<'a>, ()> {
if a_settings.0.is_empty() || b_settings.0.is_empty() {
return Err(());
}
fn as_new_sorted_tags(tags: &[ComputedVariationValue]) -> Vec<&ComputedVariationValue> {
use std::iter::FromIterator;
let mut sorted_tags = Vec::from_iter(tags.iter());
sorted_tags.sort_by_key(|k| k.tag.0);
sorted_tags
};
Ok(FontSettingTagIter {
a_state: FontSettingTagIterState::new(as_new_sorted_tags(&a_settings.0)),
b_state: FontSettingTagIterState::new(as_new_sorted_tags(&b_settings.0)),
})
}
fn next_tag(state: &mut FontSettingTagIterState<'a>) -> Option<&'a ComputedVariationValue> {
if state.index == 0 {
return None;
}
state.index -= 1;
let tag = state.tags[state.index];
if tag.tag == state.prev_tag {
FontSettingTagIter::next_tag(state)
} else {
state.prev_tag = tag.tag;
Some(tag)
}
}
}
impl<'a> Iterator for FontSettingTagIter<'a> {
type Item = Result<(&'a ComputedVariationValue, &'a ComputedVariationValue), ()>;
fn next(
&mut self,
) -> Option<Result<(&'a ComputedVariationValue, &'a ComputedVariationValue), ()>> {
match (
FontSettingTagIter::next_tag(&mut self.a_state),
FontSettingTagIter::next_tag(&mut self.b_state),
) {
(Some(at), Some(bt)) if at.tag == bt.tag => Some(Ok((at, bt))),
(None, None) => None,
_ => Some(Err(())), // Mismatch number of unique tags or tag names.
}
}
}
|
new
|
identifier_name
|
font.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Animation implementation for various font-related types.
use super::{Animate, Procedure, ToAnimatedZero};
use values::computed::font::{FontVariationSettings, FontWeight};
use values::computed::Number;
use values::distance::{ComputeSquaredDistance, SquaredDistance};
use values::generics::font::{FontSettings as GenericFontSettings, FontTag, VariationValue};
impl ToAnimatedZero for FontWeight {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> {
Ok(FontWeight::normal())
}
}
/// <https://drafts.csswg.org/css-fonts-4/#font-variation-settings-def>
impl Animate for FontVariationSettings {
#[inline]
fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> {
FontSettingTagIter::new(self, other)?
.map(|r| r.and_then(|(st, ot)| st.animate(&ot, procedure)))
.collect::<Result<Vec<ComputedVariationValue>, ()>>()
.map(|v| GenericFontSettings(v.into_boxed_slice()))
}
}
|
fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> {
FontSettingTagIter::new(self, other)?
.map(|r| r.and_then(|(st, ot)| st.compute_squared_distance(&ot)))
.sum()
}
}
impl ToAnimatedZero for FontVariationSettings {
#[inline]
fn to_animated_zero(&self) -> Result<Self, ()> {
Err(())
}
}
type ComputedVariationValue = VariationValue<Number>;
// FIXME: Could do a rename, this is only used for font variations.
struct FontSettingTagIterState<'a> {
tags: Vec<(&'a ComputedVariationValue)>,
index: usize,
prev_tag: FontTag,
}
impl<'a> FontSettingTagIterState<'a> {
fn new(tags: Vec<&'a ComputedVariationValue>) -> FontSettingTagIterState<'a> {
FontSettingTagIterState {
index: tags.len(),
tags,
prev_tag: FontTag(0),
}
}
}
/// Iterator for font-variation-settings tag lists
///
/// [CSS fonts level 4](https://drafts.csswg.org/css-fonts-4/#descdef-font-face-font-variation-settings)
/// defines the animation of font-variation-settings as follows:
///
/// Two declarations of font-feature-settings[sic] can be animated between if
/// they are "like". "Like" declarations are ones where the same set of
/// properties appear (in any order). Because succesive[sic] duplicate
/// properties are applied instead of prior duplicate properties, two
/// declarations can be "like" even if they have differing number of
/// properties. If two declarations are "like" then animation occurs pairwise
/// between corresponding values in the declarations.
///
/// In other words if we have the following lists:
///
/// "wght" 1.4, "wdth" 5, "wght" 2
/// "wdth" 8, "wght" 4, "wdth" 10
///
/// We should animate between:
///
/// "wdth" 5, "wght" 2
/// "wght" 4, "wdth" 10
///
/// This iterator supports this by sorting the two lists, then iterating them in
/// reverse, and skipping entries with repeated tag names. It will return
/// Some(Err()) if it reaches the end of one list before the other, or if the
/// tag names do not match.
///
/// For the above example, this iterator would return:
///
/// Some(Ok("wght" 2, "wght" 4))
/// Some(Ok("wdth" 5, "wdth" 10))
/// None
///
struct FontSettingTagIter<'a> {
a_state: FontSettingTagIterState<'a>,
b_state: FontSettingTagIterState<'a>,
}
impl<'a> FontSettingTagIter<'a> {
fn new(
a_settings: &'a FontVariationSettings,
b_settings: &'a FontVariationSettings,
) -> Result<FontSettingTagIter<'a>, ()> {
if a_settings.0.is_empty() || b_settings.0.is_empty() {
return Err(());
}
fn as_new_sorted_tags(tags: &[ComputedVariationValue]) -> Vec<&ComputedVariationValue> {
use std::iter::FromIterator;
let mut sorted_tags = Vec::from_iter(tags.iter());
sorted_tags.sort_by_key(|k| k.tag.0);
sorted_tags
};
Ok(FontSettingTagIter {
a_state: FontSettingTagIterState::new(as_new_sorted_tags(&a_settings.0)),
b_state: FontSettingTagIterState::new(as_new_sorted_tags(&b_settings.0)),
})
}
fn next_tag(state: &mut FontSettingTagIterState<'a>) -> Option<&'a ComputedVariationValue> {
if state.index == 0 {
return None;
}
state.index -= 1;
let tag = state.tags[state.index];
if tag.tag == state.prev_tag {
FontSettingTagIter::next_tag(state)
} else {
state.prev_tag = tag.tag;
Some(tag)
}
}
}
impl<'a> Iterator for FontSettingTagIter<'a> {
type Item = Result<(&'a ComputedVariationValue, &'a ComputedVariationValue), ()>;
fn next(
&mut self,
) -> Option<Result<(&'a ComputedVariationValue, &'a ComputedVariationValue), ()>> {
match (
FontSettingTagIter::next_tag(&mut self.a_state),
FontSettingTagIter::next_tag(&mut self.b_state),
) {
(Some(at), Some(bt)) if at.tag == bt.tag => Some(Ok((at, bt))),
(None, None) => None,
_ => Some(Err(())), // Mismatch number of unique tags or tag names.
}
}
}
|
impl ComputeSquaredDistance for FontVariationSettings {
#[inline]
|
random_line_split
|
lib.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Rust compiler.
//!
//! # Note
//!
//! This API is completely unstable and subject to change.
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(box_patterns)]
#![feature(box_syntax)]
#![feature(crate_visibility_modifier)]
#![feature(custom_attribute)]
#![feature(extern_types)]
#![feature(in_band_lifetimes)]
#![allow(unused_attributes)]
#![feature(libc)]
#![feature(nll)]
#![feature(quote)]
#![feature(range_contains)]
#![feature(rustc_diagnostic_macros)]
#![feature(slice_sort_by_cached_key)]
#![feature(optin_builtin_traits)]
#![feature(concat_idents)]
#![feature(link_args)]
#![feature(static_nobundle)]
use back::write::create_target_machine;
use syntax_pos::symbol::Symbol;
extern crate flate2;
#[macro_use] extern crate bitflags;
extern crate libc;
#[macro_use] extern crate rustc;
extern crate jobserver;
extern crate num_cpus;
extern crate rustc_mir;
extern crate rustc_allocator;
extern crate rustc_apfloat;
extern crate rustc_target;
#[macro_use] extern crate rustc_data_structures;
extern crate rustc_demangle;
extern crate rustc_incremental;
extern crate rustc_llvm;
extern crate rustc_platform_intrinsics as intrinsics;
extern crate rustc_codegen_utils;
extern crate rustc_codegen_ssa;
extern crate rustc_fs_util;
#[macro_use] extern crate log;
#[macro_use] extern crate syntax;
extern crate syntax_pos;
extern crate rustc_errors as errors;
extern crate serialize;
extern crate cc; // Used to locate MSVC
extern crate tempfile;
extern crate memmap;
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig};
use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinModule};
use rustc_codegen_ssa::CompiledModule;
use errors::{FatalError, Handler};
use rustc::dep_graph::WorkProduct;
use rustc::util::time_graph::Timeline;
use syntax_pos::symbol::InternedString;
use rustc::mir::mono::Stats;
pub use llvm_util::target_features;
use std::any::Any;
use std::sync::{mpsc, Arc};
use rustc::dep_graph::DepGraph;
use rustc::middle::allocator::AllocatorKind;
use rustc::middle::cstore::{EncodedMetadata, MetadataLoader};
use rustc::session::{Session, CompileIncomplete};
use rustc::session::config::{OutputFilenames, OutputType, PrintRequest};
use rustc::ty::{self, TyCtxt};
use rustc::util::time_graph;
use rustc::util::profiling::ProfileCategory;
use rustc_mir::monomorphize;
use rustc_codegen_ssa::ModuleCodegen;
use rustc_codegen_utils::codegen_backend::CodegenBackend;
mod diagnostics;
mod back {
mod archive;
pub mod bytecode;
pub mod link;
pub mod lto;
pub mod write;
mod rpath;
pub mod wasm;
}
mod abi;
mod allocator;
mod asm;
mod attributes;
mod base;
mod builder;
mod callee;
mod common;
mod consts;
mod context;
mod debuginfo;
mod declare;
mod intrinsic;
// The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912.
#[path = "llvm/mod.rs"] mod llvm_; pub mod llvm { pub use super::llvm_::*; }
mod llvm_util;
mod metadata;
mod mono_item;
mod type_;
mod type_of;
mod value;
mod va_arg;
#[derive(Clone)]
pub struct LlvmCodegenBackend(());
impl ExtraBackendMethods for LlvmCodegenBackend {
fn new_metadata(&self, sess: &Session, mod_name: &str) -> ModuleLlvm {
ModuleLlvm::new(sess, mod_name)
}
fn write_metadata<'b, 'gcx>(
&self,
tcx: TyCtxt<'b, 'gcx, 'gcx>,
metadata: &ModuleLlvm
) -> EncodedMetadata {
base::write_metadata(tcx, metadata)
}
fn codegen_allocator(&self, tcx: TyCtxt, mods: &ModuleLlvm, kind: AllocatorKind) {
unsafe { allocator::codegen(tcx, mods, kind) }
}
fn compile_codegen_unit<'a, 'tcx: 'a>(
&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
cgu_name: InternedString,
) -> Stats {
base::compile_codegen_unit(tcx, cgu_name)
}
fn target_machine_factory(
&self,
sess: &Session,
find_features: bool
) -> Arc<dyn Fn() ->
Result<&'static mut llvm::TargetMachine, String> + Send + Sync> {
back::write::target_machine_factory(sess, find_features)
}
fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str {
llvm_util::target_cpu(sess)
}
}
impl WriteBackendMethods for LlvmCodegenBackend {
type Module = ModuleLlvm;
type ModuleBuffer = back::lto::ModuleBuffer;
type Context = llvm::Context;
type TargetMachine = &'static mut llvm::TargetMachine;
type ThinData = back::lto::ThinData;
type ThinBuffer = back::lto::ThinBuffer;
fn print_pass_timings(&self) {
unsafe { llvm::LLVMRustPrintPassTimings(); }
}
fn run_fat_lto(
cgcx: &CodegenContext<Self>,
modules: Vec<ModuleCodegen<Self::Module>>,
timeline: &mut Timeline
) -> Result<LtoModuleCodegen<Self>, FatalError> {
back::lto::run_fat(cgcx, modules, timeline)
}
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
timeline: &mut Timeline
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
back::lto::run_thin(cgcx, modules, cached_modules, timeline)
}
unsafe fn optimize(
cgcx: &CodegenContext<Self>,
diag_handler: &Handler,
module: &ModuleCodegen<Self::Module>,
config: &ModuleConfig,
timeline: &mut Timeline
) -> Result<(), FatalError> {
back::write::optimize(cgcx, diag_handler, module, config, timeline)
}
unsafe fn optimize_thin(
cgcx: &CodegenContext<Self>,
thin: &mut ThinModule<Self>,
timeline: &mut Timeline
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
back::lto::optimize_thin_module(thin, cgcx, timeline)
}
unsafe fn codegen(
cgcx: &CodegenContext<Self>,
diag_handler: &Handler,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
timeline: &mut Timeline
) -> Result<CompiledModule, FatalError> {
back::write::codegen(cgcx, diag_handler, module, config, timeline)
}
fn prepare_thin(
cgcx: &CodegenContext<Self>,
module: ModuleCodegen<Self::Module>
) -> (String, Self::ThinBuffer) {
back::lto::prepare_thin(cgcx, module)
}
fn run_lto_pass_manager(
cgcx: &CodegenContext<Self>,
module: &ModuleCodegen<Self::Module>,
config: &ModuleConfig,
thin: bool
) {
back::lto::run_pass_manager(cgcx, module, config, thin)
}
}
unsafe impl Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis
unsafe impl Sync for LlvmCodegenBackend {}
impl LlvmCodegenBackend {
pub fn new() -> Box<dyn CodegenBackend> {
box LlvmCodegenBackend(())
}
}
impl CodegenBackend for LlvmCodegenBackend {
fn
|
(&self, sess: &Session) {
llvm_util::init(sess); // Make sure llvm is inited
}
fn print(&self, req: PrintRequest, sess: &Session) {
match req {
PrintRequest::RelocationModels => {
println!("Available relocation models:");
for &(name, _) in back::write::RELOC_MODEL_ARGS.iter() {
println!(" {}", name);
}
println!("");
}
PrintRequest::CodeModels => {
println!("Available code models:");
for &(name, _) in back::write::CODE_GEN_MODEL_ARGS.iter(){
println!(" {}", name);
}
println!("");
}
PrintRequest::TlsModels => {
println!("Available TLS models:");
for &(name, _) in back::write::TLS_MODEL_ARGS.iter(){
println!(" {}", name);
}
println!("");
}
req => llvm_util::print(req, sess),
}
}
fn print_passes(&self) {
llvm_util::print_passes();
}
fn print_version(&self) {
llvm_util::print_version();
}
fn diagnostics(&self) -> &[(&'static str, &'static str)] {
&DIAGNOSTICS
}
fn target_features(&self, sess: &Session) -> Vec<Symbol> {
target_features(sess)
}
fn metadata_loader(&self) -> Box<dyn MetadataLoader + Sync> {
box metadata::LlvmMetadataLoader
}
fn provide(&self, providers: &mut ty::query::Providers) {
rustc_codegen_utils::symbol_names::provide(providers);
rustc_codegen_ssa::back::symbol_export::provide(providers);
rustc_codegen_ssa::base::provide_both(providers);
attributes::provide(providers);
}
fn provide_extern(&self, providers: &mut ty::query::Providers) {
rustc_codegen_ssa::back::symbol_export::provide_extern(providers);
rustc_codegen_ssa::base::provide_both(providers);
attributes::provide_extern(providers);
}
fn codegen_crate<'b, 'tcx>(
&self,
tcx: TyCtxt<'b, 'tcx, 'tcx>,
rx: mpsc::Receiver<Box<dyn Any + Send>>
) -> Box<dyn Any> {
box rustc_codegen_ssa::base::codegen_crate(LlvmCodegenBackend(()), tcx, rx)
}
fn join_codegen_and_link(
&self,
ongoing_codegen: Box<dyn Any>,
sess: &Session,
dep_graph: &DepGraph,
outputs: &OutputFilenames,
) -> Result<(), CompileIncomplete>{
use rustc::util::common::time;
let (codegen_results, work_products) =
ongoing_codegen.downcast::
<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
.expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
.join(sess);
if sess.opts.debugging_opts.incremental_info {
rustc_codegen_ssa::back::write::dump_incremental_data(&codegen_results);
}
time(sess,
"serialize work products",
move || rustc_incremental::save_work_product_index(sess, &dep_graph, work_products));
sess.compile_status()?;
if!sess.opts.output_types.keys().any(|&i| i == OutputType::Exe ||
i == OutputType::Metadata) {
return Ok(());
}
// Run the linker on any artifacts that resulted from the LLVM run.
// This should produce either a finished executable or library.
sess.profiler(|p| p.start_activity(ProfileCategory::Linking));
time(sess, "linking", || {
back::link::link_binary(sess, &codegen_results,
outputs, &codegen_results.crate_name.as_str());
});
sess.profiler(|p| p.end_activity(ProfileCategory::Linking));
// Now that we won't touch anything in the incremental compilation directory
// any more, we can finalize it (which involves renaming it)
rustc_incremental::finalize_session_directory(sess, codegen_results.crate_hash);
Ok(())
}
}
/// This is the entrypoint for a hot plugged rustc_codegen_llvm
#[no_mangle]
pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
LlvmCodegenBackend::new()
}
pub struct ModuleLlvm {
llcx: &'static mut llvm::Context,
llmod_raw: *const llvm::Module,
tm: &'static mut llvm::TargetMachine,
}
unsafe impl Send for ModuleLlvm { }
unsafe impl Sync for ModuleLlvm { }
impl ModuleLlvm {
fn new(sess: &Session, mod_name: &str) -> Self {
unsafe {
let llcx = llvm::LLVMRustContextCreate(sess.fewer_names());
let llmod_raw = context::create_module(sess, llcx, mod_name) as *const _;
ModuleLlvm {
llmod_raw,
llcx,
tm: create_target_machine(sess, false),
}
}
}
fn llmod(&self) -> &llvm::Module {
unsafe {
&*self.llmod_raw
}
}
}
impl Drop for ModuleLlvm {
fn drop(&mut self) {
unsafe {
llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _));
}
}
}
__build_diagnostic_array! { librustc_codegen_llvm, DIAGNOSTICS }
|
init
|
identifier_name
|
lib.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Rust compiler.
//!
//! # Note
//!
//! This API is completely unstable and subject to change.
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(box_patterns)]
#![feature(box_syntax)]
#![feature(crate_visibility_modifier)]
#![feature(custom_attribute)]
#![feature(extern_types)]
#![feature(in_band_lifetimes)]
#![allow(unused_attributes)]
#![feature(libc)]
#![feature(nll)]
#![feature(quote)]
#![feature(range_contains)]
#![feature(rustc_diagnostic_macros)]
#![feature(slice_sort_by_cached_key)]
#![feature(optin_builtin_traits)]
#![feature(concat_idents)]
#![feature(link_args)]
#![feature(static_nobundle)]
use back::write::create_target_machine;
use syntax_pos::symbol::Symbol;
extern crate flate2;
#[macro_use] extern crate bitflags;
extern crate libc;
#[macro_use] extern crate rustc;
extern crate jobserver;
extern crate num_cpus;
extern crate rustc_mir;
extern crate rustc_allocator;
extern crate rustc_apfloat;
extern crate rustc_target;
#[macro_use] extern crate rustc_data_structures;
extern crate rustc_demangle;
extern crate rustc_incremental;
extern crate rustc_llvm;
extern crate rustc_platform_intrinsics as intrinsics;
extern crate rustc_codegen_utils;
extern crate rustc_codegen_ssa;
extern crate rustc_fs_util;
#[macro_use] extern crate log;
#[macro_use] extern crate syntax;
extern crate syntax_pos;
extern crate rustc_errors as errors;
extern crate serialize;
extern crate cc; // Used to locate MSVC
extern crate tempfile;
extern crate memmap;
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig};
use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinModule};
use rustc_codegen_ssa::CompiledModule;
use errors::{FatalError, Handler};
use rustc::dep_graph::WorkProduct;
use rustc::util::time_graph::Timeline;
use syntax_pos::symbol::InternedString;
use rustc::mir::mono::Stats;
pub use llvm_util::target_features;
use std::any::Any;
use std::sync::{mpsc, Arc};
use rustc::dep_graph::DepGraph;
use rustc::middle::allocator::AllocatorKind;
use rustc::middle::cstore::{EncodedMetadata, MetadataLoader};
use rustc::session::{Session, CompileIncomplete};
use rustc::session::config::{OutputFilenames, OutputType, PrintRequest};
use rustc::ty::{self, TyCtxt};
use rustc::util::time_graph;
use rustc::util::profiling::ProfileCategory;
use rustc_mir::monomorphize;
use rustc_codegen_ssa::ModuleCodegen;
use rustc_codegen_utils::codegen_backend::CodegenBackend;
mod diagnostics;
mod back {
mod archive;
pub mod bytecode;
pub mod link;
pub mod lto;
pub mod write;
mod rpath;
pub mod wasm;
}
mod abi;
mod allocator;
mod asm;
mod attributes;
mod base;
mod builder;
mod callee;
mod common;
mod consts;
mod context;
mod debuginfo;
mod declare;
mod intrinsic;
// The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912.
#[path = "llvm/mod.rs"] mod llvm_; pub mod llvm { pub use super::llvm_::*; }
mod llvm_util;
mod metadata;
mod mono_item;
mod type_;
mod type_of;
mod value;
mod va_arg;
#[derive(Clone)]
pub struct LlvmCodegenBackend(());
impl ExtraBackendMethods for LlvmCodegenBackend {
fn new_metadata(&self, sess: &Session, mod_name: &str) -> ModuleLlvm {
ModuleLlvm::new(sess, mod_name)
}
fn write_metadata<'b, 'gcx>(
&self,
tcx: TyCtxt<'b, 'gcx, 'gcx>,
metadata: &ModuleLlvm
) -> EncodedMetadata {
base::write_metadata(tcx, metadata)
}
fn codegen_allocator(&self, tcx: TyCtxt, mods: &ModuleLlvm, kind: AllocatorKind) {
unsafe { allocator::codegen(tcx, mods, kind) }
}
fn compile_codegen_unit<'a, 'tcx: 'a>(
&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
cgu_name: InternedString,
) -> Stats {
base::compile_codegen_unit(tcx, cgu_name)
}
fn target_machine_factory(
&self,
sess: &Session,
find_features: bool
) -> Arc<dyn Fn() ->
Result<&'static mut llvm::TargetMachine, String> + Send + Sync> {
back::write::target_machine_factory(sess, find_features)
}
fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str {
llvm_util::target_cpu(sess)
}
}
impl WriteBackendMethods for LlvmCodegenBackend {
type Module = ModuleLlvm;
type ModuleBuffer = back::lto::ModuleBuffer;
type Context = llvm::Context;
type TargetMachine = &'static mut llvm::TargetMachine;
type ThinData = back::lto::ThinData;
type ThinBuffer = back::lto::ThinBuffer;
fn print_pass_timings(&self) {
unsafe { llvm::LLVMRustPrintPassTimings(); }
}
fn run_fat_lto(
cgcx: &CodegenContext<Self>,
modules: Vec<ModuleCodegen<Self::Module>>,
timeline: &mut Timeline
) -> Result<LtoModuleCodegen<Self>, FatalError> {
back::lto::run_fat(cgcx, modules, timeline)
}
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
timeline: &mut Timeline
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError>
|
unsafe fn optimize(
cgcx: &CodegenContext<Self>,
diag_handler: &Handler,
module: &ModuleCodegen<Self::Module>,
config: &ModuleConfig,
timeline: &mut Timeline
) -> Result<(), FatalError> {
back::write::optimize(cgcx, diag_handler, module, config, timeline)
}
unsafe fn optimize_thin(
cgcx: &CodegenContext<Self>,
thin: &mut ThinModule<Self>,
timeline: &mut Timeline
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
back::lto::optimize_thin_module(thin, cgcx, timeline)
}
unsafe fn codegen(
cgcx: &CodegenContext<Self>,
diag_handler: &Handler,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
timeline: &mut Timeline
) -> Result<CompiledModule, FatalError> {
back::write::codegen(cgcx, diag_handler, module, config, timeline)
}
fn prepare_thin(
cgcx: &CodegenContext<Self>,
module: ModuleCodegen<Self::Module>
) -> (String, Self::ThinBuffer) {
back::lto::prepare_thin(cgcx, module)
}
fn run_lto_pass_manager(
cgcx: &CodegenContext<Self>,
module: &ModuleCodegen<Self::Module>,
config: &ModuleConfig,
thin: bool
) {
back::lto::run_pass_manager(cgcx, module, config, thin)
}
}
unsafe impl Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis
unsafe impl Sync for LlvmCodegenBackend {}
impl LlvmCodegenBackend {
pub fn new() -> Box<dyn CodegenBackend> {
box LlvmCodegenBackend(())
}
}
impl CodegenBackend for LlvmCodegenBackend {
fn init(&self, sess: &Session) {
llvm_util::init(sess); // Make sure llvm is inited
}
fn print(&self, req: PrintRequest, sess: &Session) {
match req {
PrintRequest::RelocationModels => {
println!("Available relocation models:");
for &(name, _) in back::write::RELOC_MODEL_ARGS.iter() {
println!(" {}", name);
}
println!("");
}
PrintRequest::CodeModels => {
println!("Available code models:");
for &(name, _) in back::write::CODE_GEN_MODEL_ARGS.iter(){
println!(" {}", name);
}
println!("");
}
PrintRequest::TlsModels => {
println!("Available TLS models:");
for &(name, _) in back::write::TLS_MODEL_ARGS.iter(){
println!(" {}", name);
}
println!("");
}
req => llvm_util::print(req, sess),
}
}
fn print_passes(&self) {
llvm_util::print_passes();
}
fn print_version(&self) {
llvm_util::print_version();
}
fn diagnostics(&self) -> &[(&'static str, &'static str)] {
&DIAGNOSTICS
}
fn target_features(&self, sess: &Session) -> Vec<Symbol> {
target_features(sess)
}
fn metadata_loader(&self) -> Box<dyn MetadataLoader + Sync> {
box metadata::LlvmMetadataLoader
}
fn provide(&self, providers: &mut ty::query::Providers) {
rustc_codegen_utils::symbol_names::provide(providers);
rustc_codegen_ssa::back::symbol_export::provide(providers);
rustc_codegen_ssa::base::provide_both(providers);
attributes::provide(providers);
}
fn provide_extern(&self, providers: &mut ty::query::Providers) {
rustc_codegen_ssa::back::symbol_export::provide_extern(providers);
rustc_codegen_ssa::base::provide_both(providers);
attributes::provide_extern(providers);
}
fn codegen_crate<'b, 'tcx>(
&self,
tcx: TyCtxt<'b, 'tcx, 'tcx>,
rx: mpsc::Receiver<Box<dyn Any + Send>>
) -> Box<dyn Any> {
box rustc_codegen_ssa::base::codegen_crate(LlvmCodegenBackend(()), tcx, rx)
}
fn join_codegen_and_link(
&self,
ongoing_codegen: Box<dyn Any>,
sess: &Session,
dep_graph: &DepGraph,
outputs: &OutputFilenames,
) -> Result<(), CompileIncomplete>{
use rustc::util::common::time;
let (codegen_results, work_products) =
ongoing_codegen.downcast::
<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
.expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
.join(sess);
if sess.opts.debugging_opts.incremental_info {
rustc_codegen_ssa::back::write::dump_incremental_data(&codegen_results);
}
time(sess,
"serialize work products",
move || rustc_incremental::save_work_product_index(sess, &dep_graph, work_products));
sess.compile_status()?;
if!sess.opts.output_types.keys().any(|&i| i == OutputType::Exe ||
i == OutputType::Metadata) {
return Ok(());
}
// Run the linker on any artifacts that resulted from the LLVM run.
// This should produce either a finished executable or library.
sess.profiler(|p| p.start_activity(ProfileCategory::Linking));
time(sess, "linking", || {
back::link::link_binary(sess, &codegen_results,
outputs, &codegen_results.crate_name.as_str());
});
sess.profiler(|p| p.end_activity(ProfileCategory::Linking));
// Now that we won't touch anything in the incremental compilation directory
// any more, we can finalize it (which involves renaming it)
rustc_incremental::finalize_session_directory(sess, codegen_results.crate_hash);
Ok(())
}
}
/// This is the entrypoint for a hot plugged rustc_codegen_llvm
#[no_mangle]
pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
LlvmCodegenBackend::new()
}
pub struct ModuleLlvm {
llcx: &'static mut llvm::Context,
llmod_raw: *const llvm::Module,
tm: &'static mut llvm::TargetMachine,
}
unsafe impl Send for ModuleLlvm { }
unsafe impl Sync for ModuleLlvm { }
impl ModuleLlvm {
fn new(sess: &Session, mod_name: &str) -> Self {
unsafe {
let llcx = llvm::LLVMRustContextCreate(sess.fewer_names());
let llmod_raw = context::create_module(sess, llcx, mod_name) as *const _;
ModuleLlvm {
llmod_raw,
llcx,
tm: create_target_machine(sess, false),
}
}
}
fn llmod(&self) -> &llvm::Module {
unsafe {
&*self.llmod_raw
}
}
}
impl Drop for ModuleLlvm {
fn drop(&mut self) {
unsafe {
llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _));
}
}
}
__build_diagnostic_array! { librustc_codegen_llvm, DIAGNOSTICS }
|
{
back::lto::run_thin(cgcx, modules, cached_modules, timeline)
}
|
identifier_body
|
lib.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Rust compiler.
//!
//! # Note
//!
//! This API is completely unstable and subject to change.
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(box_patterns)]
#![feature(box_syntax)]
#![feature(crate_visibility_modifier)]
#![feature(custom_attribute)]
#![feature(extern_types)]
#![feature(in_band_lifetimes)]
#![allow(unused_attributes)]
#![feature(libc)]
#![feature(nll)]
#![feature(quote)]
#![feature(range_contains)]
#![feature(rustc_diagnostic_macros)]
#![feature(slice_sort_by_cached_key)]
#![feature(optin_builtin_traits)]
#![feature(concat_idents)]
#![feature(link_args)]
#![feature(static_nobundle)]
use back::write::create_target_machine;
use syntax_pos::symbol::Symbol;
extern crate flate2;
#[macro_use] extern crate bitflags;
extern crate libc;
#[macro_use] extern crate rustc;
extern crate jobserver;
extern crate num_cpus;
extern crate rustc_mir;
extern crate rustc_allocator;
extern crate rustc_apfloat;
extern crate rustc_target;
#[macro_use] extern crate rustc_data_structures;
extern crate rustc_demangle;
extern crate rustc_incremental;
extern crate rustc_llvm;
extern crate rustc_platform_intrinsics as intrinsics;
extern crate rustc_codegen_utils;
extern crate rustc_codegen_ssa;
extern crate rustc_fs_util;
#[macro_use] extern crate log;
#[macro_use] extern crate syntax;
extern crate syntax_pos;
extern crate rustc_errors as errors;
extern crate serialize;
extern crate cc; // Used to locate MSVC
extern crate tempfile;
extern crate memmap;
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig};
use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinModule};
use rustc_codegen_ssa::CompiledModule;
use errors::{FatalError, Handler};
use rustc::dep_graph::WorkProduct;
use rustc::util::time_graph::Timeline;
use syntax_pos::symbol::InternedString;
use rustc::mir::mono::Stats;
pub use llvm_util::target_features;
use std::any::Any;
use std::sync::{mpsc, Arc};
use rustc::dep_graph::DepGraph;
use rustc::middle::allocator::AllocatorKind;
use rustc::middle::cstore::{EncodedMetadata, MetadataLoader};
use rustc::session::{Session, CompileIncomplete};
use rustc::session::config::{OutputFilenames, OutputType, PrintRequest};
use rustc::ty::{self, TyCtxt};
use rustc::util::time_graph;
use rustc::util::profiling::ProfileCategory;
use rustc_mir::monomorphize;
use rustc_codegen_ssa::ModuleCodegen;
use rustc_codegen_utils::codegen_backend::CodegenBackend;
mod diagnostics;
mod back {
mod archive;
pub mod bytecode;
pub mod link;
pub mod lto;
pub mod write;
mod rpath;
pub mod wasm;
}
mod abi;
mod allocator;
mod asm;
mod attributes;
mod base;
mod builder;
mod callee;
mod common;
mod consts;
mod context;
mod debuginfo;
mod declare;
mod intrinsic;
// The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912.
#[path = "llvm/mod.rs"] mod llvm_; pub mod llvm { pub use super::llvm_::*; }
mod llvm_util;
mod metadata;
mod mono_item;
mod type_;
mod type_of;
mod value;
mod va_arg;
#[derive(Clone)]
pub struct LlvmCodegenBackend(());
impl ExtraBackendMethods for LlvmCodegenBackend {
fn new_metadata(&self, sess: &Session, mod_name: &str) -> ModuleLlvm {
ModuleLlvm::new(sess, mod_name)
}
fn write_metadata<'b, 'gcx>(
&self,
tcx: TyCtxt<'b, 'gcx, 'gcx>,
metadata: &ModuleLlvm
) -> EncodedMetadata {
base::write_metadata(tcx, metadata)
}
fn codegen_allocator(&self, tcx: TyCtxt, mods: &ModuleLlvm, kind: AllocatorKind) {
unsafe { allocator::codegen(tcx, mods, kind) }
}
fn compile_codegen_unit<'a, 'tcx: 'a>(
&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
cgu_name: InternedString,
) -> Stats {
base::compile_codegen_unit(tcx, cgu_name)
}
fn target_machine_factory(
&self,
sess: &Session,
find_features: bool
) -> Arc<dyn Fn() ->
Result<&'static mut llvm::TargetMachine, String> + Send + Sync> {
back::write::target_machine_factory(sess, find_features)
}
fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str {
llvm_util::target_cpu(sess)
}
}
impl WriteBackendMethods for LlvmCodegenBackend {
type Module = ModuleLlvm;
type ModuleBuffer = back::lto::ModuleBuffer;
type Context = llvm::Context;
type TargetMachine = &'static mut llvm::TargetMachine;
type ThinData = back::lto::ThinData;
type ThinBuffer = back::lto::ThinBuffer;
fn print_pass_timings(&self) {
unsafe { llvm::LLVMRustPrintPassTimings(); }
}
fn run_fat_lto(
cgcx: &CodegenContext<Self>,
modules: Vec<ModuleCodegen<Self::Module>>,
timeline: &mut Timeline
) -> Result<LtoModuleCodegen<Self>, FatalError> {
back::lto::run_fat(cgcx, modules, timeline)
}
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
timeline: &mut Timeline
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
back::lto::run_thin(cgcx, modules, cached_modules, timeline)
}
unsafe fn optimize(
cgcx: &CodegenContext<Self>,
diag_handler: &Handler,
module: &ModuleCodegen<Self::Module>,
config: &ModuleConfig,
timeline: &mut Timeline
) -> Result<(), FatalError> {
back::write::optimize(cgcx, diag_handler, module, config, timeline)
}
unsafe fn optimize_thin(
cgcx: &CodegenContext<Self>,
thin: &mut ThinModule<Self>,
timeline: &mut Timeline
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
back::lto::optimize_thin_module(thin, cgcx, timeline)
}
unsafe fn codegen(
cgcx: &CodegenContext<Self>,
diag_handler: &Handler,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
timeline: &mut Timeline
) -> Result<CompiledModule, FatalError> {
back::write::codegen(cgcx, diag_handler, module, config, timeline)
}
fn prepare_thin(
cgcx: &CodegenContext<Self>,
module: ModuleCodegen<Self::Module>
) -> (String, Self::ThinBuffer) {
back::lto::prepare_thin(cgcx, module)
}
fn run_lto_pass_manager(
cgcx: &CodegenContext<Self>,
module: &ModuleCodegen<Self::Module>,
config: &ModuleConfig,
thin: bool
) {
back::lto::run_pass_manager(cgcx, module, config, thin)
}
}
unsafe impl Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis
unsafe impl Sync for LlvmCodegenBackend {}
impl LlvmCodegenBackend {
pub fn new() -> Box<dyn CodegenBackend> {
box LlvmCodegenBackend(())
}
}
impl CodegenBackend for LlvmCodegenBackend {
fn init(&self, sess: &Session) {
llvm_util::init(sess); // Make sure llvm is inited
}
fn print(&self, req: PrintRequest, sess: &Session) {
match req {
PrintRequest::RelocationModels => {
println!("Available relocation models:");
for &(name, _) in back::write::RELOC_MODEL_ARGS.iter() {
println!(" {}", name);
}
println!("");
}
PrintRequest::CodeModels => {
println!("Available code models:");
for &(name, _) in back::write::CODE_GEN_MODEL_ARGS.iter(){
println!(" {}", name);
}
println!("");
}
PrintRequest::TlsModels => {
println!("Available TLS models:");
for &(name, _) in back::write::TLS_MODEL_ARGS.iter(){
println!(" {}", name);
}
println!("");
}
req => llvm_util::print(req, sess),
}
}
fn print_passes(&self) {
llvm_util::print_passes();
}
fn print_version(&self) {
llvm_util::print_version();
}
fn diagnostics(&self) -> &[(&'static str, &'static str)] {
&DIAGNOSTICS
}
fn target_features(&self, sess: &Session) -> Vec<Symbol> {
target_features(sess)
}
fn metadata_loader(&self) -> Box<dyn MetadataLoader + Sync> {
box metadata::LlvmMetadataLoader
}
fn provide(&self, providers: &mut ty::query::Providers) {
rustc_codegen_utils::symbol_names::provide(providers);
rustc_codegen_ssa::back::symbol_export::provide(providers);
rustc_codegen_ssa::base::provide_both(providers);
attributes::provide(providers);
}
fn provide_extern(&self, providers: &mut ty::query::Providers) {
rustc_codegen_ssa::back::symbol_export::provide_extern(providers);
rustc_codegen_ssa::base::provide_both(providers);
attributes::provide_extern(providers);
}
fn codegen_crate<'b, 'tcx>(
&self,
tcx: TyCtxt<'b, 'tcx, 'tcx>,
rx: mpsc::Receiver<Box<dyn Any + Send>>
) -> Box<dyn Any> {
box rustc_codegen_ssa::base::codegen_crate(LlvmCodegenBackend(()), tcx, rx)
}
fn join_codegen_and_link(
&self,
ongoing_codegen: Box<dyn Any>,
sess: &Session,
dep_graph: &DepGraph,
outputs: &OutputFilenames,
) -> Result<(), CompileIncomplete>{
use rustc::util::common::time;
let (codegen_results, work_products) =
ongoing_codegen.downcast::
<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
.expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
.join(sess);
if sess.opts.debugging_opts.incremental_info {
rustc_codegen_ssa::back::write::dump_incremental_data(&codegen_results);
}
time(sess,
"serialize work products",
|
if!sess.opts.output_types.keys().any(|&i| i == OutputType::Exe ||
i == OutputType::Metadata) {
return Ok(());
}
// Run the linker on any artifacts that resulted from the LLVM run.
// This should produce either a finished executable or library.
sess.profiler(|p| p.start_activity(ProfileCategory::Linking));
time(sess, "linking", || {
back::link::link_binary(sess, &codegen_results,
outputs, &codegen_results.crate_name.as_str());
});
sess.profiler(|p| p.end_activity(ProfileCategory::Linking));
// Now that we won't touch anything in the incremental compilation directory
// any more, we can finalize it (which involves renaming it)
rustc_incremental::finalize_session_directory(sess, codegen_results.crate_hash);
Ok(())
}
}
/// This is the entrypoint for a hot plugged rustc_codegen_llvm
#[no_mangle]
pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
LlvmCodegenBackend::new()
}
pub struct ModuleLlvm {
llcx: &'static mut llvm::Context,
llmod_raw: *const llvm::Module,
tm: &'static mut llvm::TargetMachine,
}
unsafe impl Send for ModuleLlvm { }
unsafe impl Sync for ModuleLlvm { }
impl ModuleLlvm {
fn new(sess: &Session, mod_name: &str) -> Self {
unsafe {
let llcx = llvm::LLVMRustContextCreate(sess.fewer_names());
let llmod_raw = context::create_module(sess, llcx, mod_name) as *const _;
ModuleLlvm {
llmod_raw,
llcx,
tm: create_target_machine(sess, false),
}
}
}
fn llmod(&self) -> &llvm::Module {
unsafe {
&*self.llmod_raw
}
}
}
impl Drop for ModuleLlvm {
fn drop(&mut self) {
unsafe {
llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _));
}
}
}
__build_diagnostic_array! { librustc_codegen_llvm, DIAGNOSTICS }
|
move || rustc_incremental::save_work_product_index(sess, &dep_graph, work_products));
sess.compile_status()?;
|
random_line_split
|
lib.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Rust compiler.
//!
//! # Note
//!
//! This API is completely unstable and subject to change.
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(box_patterns)]
#![feature(box_syntax)]
#![feature(crate_visibility_modifier)]
#![feature(custom_attribute)]
#![feature(extern_types)]
#![feature(in_band_lifetimes)]
#![allow(unused_attributes)]
#![feature(libc)]
#![feature(nll)]
#![feature(quote)]
#![feature(range_contains)]
#![feature(rustc_diagnostic_macros)]
#![feature(slice_sort_by_cached_key)]
#![feature(optin_builtin_traits)]
#![feature(concat_idents)]
#![feature(link_args)]
#![feature(static_nobundle)]
use back::write::create_target_machine;
use syntax_pos::symbol::Symbol;
extern crate flate2;
#[macro_use] extern crate bitflags;
extern crate libc;
#[macro_use] extern crate rustc;
extern crate jobserver;
extern crate num_cpus;
extern crate rustc_mir;
extern crate rustc_allocator;
extern crate rustc_apfloat;
extern crate rustc_target;
#[macro_use] extern crate rustc_data_structures;
extern crate rustc_demangle;
extern crate rustc_incremental;
extern crate rustc_llvm;
extern crate rustc_platform_intrinsics as intrinsics;
extern crate rustc_codegen_utils;
extern crate rustc_codegen_ssa;
extern crate rustc_fs_util;
#[macro_use] extern crate log;
#[macro_use] extern crate syntax;
extern crate syntax_pos;
extern crate rustc_errors as errors;
extern crate serialize;
extern crate cc; // Used to locate MSVC
extern crate tempfile;
extern crate memmap;
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig};
use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinModule};
use rustc_codegen_ssa::CompiledModule;
use errors::{FatalError, Handler};
use rustc::dep_graph::WorkProduct;
use rustc::util::time_graph::Timeline;
use syntax_pos::symbol::InternedString;
use rustc::mir::mono::Stats;
pub use llvm_util::target_features;
use std::any::Any;
use std::sync::{mpsc, Arc};
use rustc::dep_graph::DepGraph;
use rustc::middle::allocator::AllocatorKind;
use rustc::middle::cstore::{EncodedMetadata, MetadataLoader};
use rustc::session::{Session, CompileIncomplete};
use rustc::session::config::{OutputFilenames, OutputType, PrintRequest};
use rustc::ty::{self, TyCtxt};
use rustc::util::time_graph;
use rustc::util::profiling::ProfileCategory;
use rustc_mir::monomorphize;
use rustc_codegen_ssa::ModuleCodegen;
use rustc_codegen_utils::codegen_backend::CodegenBackend;
mod diagnostics;
mod back {
mod archive;
pub mod bytecode;
pub mod link;
pub mod lto;
pub mod write;
mod rpath;
pub mod wasm;
}
mod abi;
mod allocator;
mod asm;
mod attributes;
mod base;
mod builder;
mod callee;
mod common;
mod consts;
mod context;
mod debuginfo;
mod declare;
mod intrinsic;
// The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912.
#[path = "llvm/mod.rs"] mod llvm_; pub mod llvm { pub use super::llvm_::*; }
mod llvm_util;
mod metadata;
mod mono_item;
mod type_;
mod type_of;
mod value;
mod va_arg;
#[derive(Clone)]
pub struct LlvmCodegenBackend(());
impl ExtraBackendMethods for LlvmCodegenBackend {
fn new_metadata(&self, sess: &Session, mod_name: &str) -> ModuleLlvm {
ModuleLlvm::new(sess, mod_name)
}
fn write_metadata<'b, 'gcx>(
&self,
tcx: TyCtxt<'b, 'gcx, 'gcx>,
metadata: &ModuleLlvm
) -> EncodedMetadata {
base::write_metadata(tcx, metadata)
}
fn codegen_allocator(&self, tcx: TyCtxt, mods: &ModuleLlvm, kind: AllocatorKind) {
unsafe { allocator::codegen(tcx, mods, kind) }
}
fn compile_codegen_unit<'a, 'tcx: 'a>(
&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
cgu_name: InternedString,
) -> Stats {
base::compile_codegen_unit(tcx, cgu_name)
}
fn target_machine_factory(
&self,
sess: &Session,
find_features: bool
) -> Arc<dyn Fn() ->
Result<&'static mut llvm::TargetMachine, String> + Send + Sync> {
back::write::target_machine_factory(sess, find_features)
}
fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str {
llvm_util::target_cpu(sess)
}
}
impl WriteBackendMethods for LlvmCodegenBackend {
type Module = ModuleLlvm;
type ModuleBuffer = back::lto::ModuleBuffer;
type Context = llvm::Context;
type TargetMachine = &'static mut llvm::TargetMachine;
type ThinData = back::lto::ThinData;
type ThinBuffer = back::lto::ThinBuffer;
fn print_pass_timings(&self) {
unsafe { llvm::LLVMRustPrintPassTimings(); }
}
fn run_fat_lto(
cgcx: &CodegenContext<Self>,
modules: Vec<ModuleCodegen<Self::Module>>,
timeline: &mut Timeline
) -> Result<LtoModuleCodegen<Self>, FatalError> {
back::lto::run_fat(cgcx, modules, timeline)
}
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
timeline: &mut Timeline
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
back::lto::run_thin(cgcx, modules, cached_modules, timeline)
}
unsafe fn optimize(
cgcx: &CodegenContext<Self>,
diag_handler: &Handler,
module: &ModuleCodegen<Self::Module>,
config: &ModuleConfig,
timeline: &mut Timeline
) -> Result<(), FatalError> {
back::write::optimize(cgcx, diag_handler, module, config, timeline)
}
unsafe fn optimize_thin(
cgcx: &CodegenContext<Self>,
thin: &mut ThinModule<Self>,
timeline: &mut Timeline
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
back::lto::optimize_thin_module(thin, cgcx, timeline)
}
unsafe fn codegen(
cgcx: &CodegenContext<Self>,
diag_handler: &Handler,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
timeline: &mut Timeline
) -> Result<CompiledModule, FatalError> {
back::write::codegen(cgcx, diag_handler, module, config, timeline)
}
fn prepare_thin(
cgcx: &CodegenContext<Self>,
module: ModuleCodegen<Self::Module>
) -> (String, Self::ThinBuffer) {
back::lto::prepare_thin(cgcx, module)
}
fn run_lto_pass_manager(
cgcx: &CodegenContext<Self>,
module: &ModuleCodegen<Self::Module>,
config: &ModuleConfig,
thin: bool
) {
back::lto::run_pass_manager(cgcx, module, config, thin)
}
}
unsafe impl Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis
unsafe impl Sync for LlvmCodegenBackend {}
impl LlvmCodegenBackend {
pub fn new() -> Box<dyn CodegenBackend> {
box LlvmCodegenBackend(())
}
}
impl CodegenBackend for LlvmCodegenBackend {
fn init(&self, sess: &Session) {
llvm_util::init(sess); // Make sure llvm is inited
}
fn print(&self, req: PrintRequest, sess: &Session) {
match req {
PrintRequest::RelocationModels =>
|
PrintRequest::CodeModels => {
println!("Available code models:");
for &(name, _) in back::write::CODE_GEN_MODEL_ARGS.iter(){
println!(" {}", name);
}
println!("");
}
PrintRequest::TlsModels => {
println!("Available TLS models:");
for &(name, _) in back::write::TLS_MODEL_ARGS.iter(){
println!(" {}", name);
}
println!("");
}
req => llvm_util::print(req, sess),
}
}
fn print_passes(&self) {
llvm_util::print_passes();
}
fn print_version(&self) {
llvm_util::print_version();
}
fn diagnostics(&self) -> &[(&'static str, &'static str)] {
&DIAGNOSTICS
}
fn target_features(&self, sess: &Session) -> Vec<Symbol> {
target_features(sess)
}
fn metadata_loader(&self) -> Box<dyn MetadataLoader + Sync> {
box metadata::LlvmMetadataLoader
}
fn provide(&self, providers: &mut ty::query::Providers) {
rustc_codegen_utils::symbol_names::provide(providers);
rustc_codegen_ssa::back::symbol_export::provide(providers);
rustc_codegen_ssa::base::provide_both(providers);
attributes::provide(providers);
}
fn provide_extern(&self, providers: &mut ty::query::Providers) {
rustc_codegen_ssa::back::symbol_export::provide_extern(providers);
rustc_codegen_ssa::base::provide_both(providers);
attributes::provide_extern(providers);
}
fn codegen_crate<'b, 'tcx>(
&self,
tcx: TyCtxt<'b, 'tcx, 'tcx>,
rx: mpsc::Receiver<Box<dyn Any + Send>>
) -> Box<dyn Any> {
box rustc_codegen_ssa::base::codegen_crate(LlvmCodegenBackend(()), tcx, rx)
}
fn join_codegen_and_link(
&self,
ongoing_codegen: Box<dyn Any>,
sess: &Session,
dep_graph: &DepGraph,
outputs: &OutputFilenames,
) -> Result<(), CompileIncomplete>{
use rustc::util::common::time;
let (codegen_results, work_products) =
ongoing_codegen.downcast::
<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
.expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
.join(sess);
if sess.opts.debugging_opts.incremental_info {
rustc_codegen_ssa::back::write::dump_incremental_data(&codegen_results);
}
time(sess,
"serialize work products",
move || rustc_incremental::save_work_product_index(sess, &dep_graph, work_products));
sess.compile_status()?;
if!sess.opts.output_types.keys().any(|&i| i == OutputType::Exe ||
i == OutputType::Metadata) {
return Ok(());
}
// Run the linker on any artifacts that resulted from the LLVM run.
// This should produce either a finished executable or library.
sess.profiler(|p| p.start_activity(ProfileCategory::Linking));
time(sess, "linking", || {
back::link::link_binary(sess, &codegen_results,
outputs, &codegen_results.crate_name.as_str());
});
sess.profiler(|p| p.end_activity(ProfileCategory::Linking));
// Now that we won't touch anything in the incremental compilation directory
// any more, we can finalize it (which involves renaming it)
rustc_incremental::finalize_session_directory(sess, codegen_results.crate_hash);
Ok(())
}
}
/// This is the entrypoint for a hot plugged rustc_codegen_llvm
#[no_mangle]
pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
LlvmCodegenBackend::new()
}
pub struct ModuleLlvm {
llcx: &'static mut llvm::Context,
llmod_raw: *const llvm::Module,
tm: &'static mut llvm::TargetMachine,
}
unsafe impl Send for ModuleLlvm { }
unsafe impl Sync for ModuleLlvm { }
impl ModuleLlvm {
fn new(sess: &Session, mod_name: &str) -> Self {
unsafe {
let llcx = llvm::LLVMRustContextCreate(sess.fewer_names());
let llmod_raw = context::create_module(sess, llcx, mod_name) as *const _;
ModuleLlvm {
llmod_raw,
llcx,
tm: create_target_machine(sess, false),
}
}
}
fn llmod(&self) -> &llvm::Module {
unsafe {
&*self.llmod_raw
}
}
}
impl Drop for ModuleLlvm {
fn drop(&mut self) {
unsafe {
llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _));
}
}
}
__build_diagnostic_array! { librustc_codegen_llvm, DIAGNOSTICS }
|
{
println!("Available relocation models:");
for &(name, _) in back::write::RELOC_MODEL_ARGS.iter() {
println!(" {}", name);
}
println!("");
}
|
conditional_block
|
net.rs
|
use std::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr};
use libc::{size_t, malloc, sockaddr, sockaddr_in, sockaddr_in6, in_addr, in6_addr, c_int, c_char, socklen_t, AF_INET, AF_INET6};
use std::mem::{size_of, transmute};
use std::string;
/*
const char *
inet_ntop(int af, const void * restrict src, char * restrict dst,
socklen_t size);
*/
extern {
fn getnameinfo(sa: *const sockaddr, salen: socklen_t,
host: *mut c_char, hostlen: socklen_t,
serv: *mut c_char, servlen: socklen_t,
flags: c_int) -> c_int;
}
#[cfg(target_os = "linux")]
#[cfg(target_os = "android")]
fn new_sockaddr_in(port: u16, addr: in_addr) -> sockaddr_in {
sockaddr_in {
sin_family: AF_INET as u16,
sin_port: port,
sin_addr: addr,
sin_zero: [0,..8]
}
}
#[cfg(target_os = "macos")]
fn new_sockaddr_in(port: u16, addr: in_addr) -> sockaddr_in
|
#[cfg(target_os = "linux")]
#[cfg(target_os = "android")]
fn new_sockaddr_in6(port: u16, addr: in6_addr) -> sockaddr_in6 {
sockaddr_in6 {
sin6_family: AF_INET6 as u16,
sin6_port: port,
sin6_flowinfo: 0,
sin6_addr: addr,
sin6_scope_id: 0,
}
}
#[cfg(target_os = "macos")]
fn new_sockaddr_in6(port: u16, addr: in6_addr) -> sockaddr_in6 {
sockaddr_in6 {
sin6_len: size_of::<sockaddr_in6>() as u8,
sin6_family: AF_INET6 as u8,
sin6_port: port,
sin6_flowinfo: 0,
sin6_addr: addr,
sin6_scope_id: 0,
}
}
//static NI_NUMERICHOST: c_int = 0x00000002;
//static NI_NAMEREQD: c_int = 0x00000004;
/// Returns the hostname for an ip address
/// TODO: make this safe, see manpage
pub fn get_nameinfo(peer_socket: SocketAddr) -> String {
let SocketAddr { ip, port } = peer_socket;
let buf: *mut i8;
let _ = unsafe {
let hostlen = 80;
buf = transmute(malloc(hostlen as size_t + 1));
match ip {
Ipv4Addr(a, b, c, d) => {
let addr = in_addr {
s_addr: a as u32 << 24
| b as u32 << 16
| c as u32 << 8
| d as u32
};
let sockaddr = new_sockaddr_in(port, addr);
getnameinfo(transmute(&sockaddr), size_of::<sockaddr_in>() as socklen_t,
buf, hostlen, transmute(0u), 0, 0)
},
Ipv6Addr(a, b, c, d, e, f, g, h) => {
let addr = in6_addr {
s6_addr: [a, b, c, d, e, f, g, h]
};
let sockaddr = new_sockaddr_in6(port, addr);
getnameinfo(transmute(&sockaddr), size_of::<sockaddr_in6>() as socklen_t,
buf, hostlen, transmute(0u), 0, 0)
},
}
};
unsafe {string::raw::from_buf(transmute(buf))}
}
|
{
sockaddr_in {
sin_len: size_of::<sockaddr_in>() as u8,
sin_family: AF_INET as u8,
sin_port: port,
sin_addr: addr,
sin_zero: [0, ..8]
}
}
|
identifier_body
|
net.rs
|
use std::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr};
use libc::{size_t, malloc, sockaddr, sockaddr_in, sockaddr_in6, in_addr, in6_addr, c_int, c_char, socklen_t, AF_INET, AF_INET6};
use std::mem::{size_of, transmute};
use std::string;
/*
const char *
inet_ntop(int af, const void * restrict src, char * restrict dst,
socklen_t size);
*/
extern {
fn getnameinfo(sa: *const sockaddr, salen: socklen_t,
host: *mut c_char, hostlen: socklen_t,
serv: *mut c_char, servlen: socklen_t,
flags: c_int) -> c_int;
}
#[cfg(target_os = "linux")]
#[cfg(target_os = "android")]
fn new_sockaddr_in(port: u16, addr: in_addr) -> sockaddr_in {
sockaddr_in {
sin_family: AF_INET as u16,
sin_port: port,
sin_addr: addr,
sin_zero: [0,..8]
}
}
#[cfg(target_os = "macos")]
fn new_sockaddr_in(port: u16, addr: in_addr) -> sockaddr_in {
sockaddr_in {
sin_len: size_of::<sockaddr_in>() as u8,
sin_family: AF_INET as u8,
sin_port: port,
sin_addr: addr,
sin_zero: [0,..8]
}
}
#[cfg(target_os = "linux")]
#[cfg(target_os = "android")]
fn
|
(port: u16, addr: in6_addr) -> sockaddr_in6 {
sockaddr_in6 {
sin6_family: AF_INET6 as u16,
sin6_port: port,
sin6_flowinfo: 0,
sin6_addr: addr,
sin6_scope_id: 0,
}
}
#[cfg(target_os = "macos")]
fn new_sockaddr_in6(port: u16, addr: in6_addr) -> sockaddr_in6 {
sockaddr_in6 {
sin6_len: size_of::<sockaddr_in6>() as u8,
sin6_family: AF_INET6 as u8,
sin6_port: port,
sin6_flowinfo: 0,
sin6_addr: addr,
sin6_scope_id: 0,
}
}
//static NI_NUMERICHOST: c_int = 0x00000002;
//static NI_NAMEREQD: c_int = 0x00000004;
/// Returns the hostname for an ip address
/// TODO: make this safe, see manpage
pub fn get_nameinfo(peer_socket: SocketAddr) -> String {
let SocketAddr { ip, port } = peer_socket;
let buf: *mut i8;
let _ = unsafe {
let hostlen = 80;
buf = transmute(malloc(hostlen as size_t + 1));
match ip {
Ipv4Addr(a, b, c, d) => {
let addr = in_addr {
s_addr: a as u32 << 24
| b as u32 << 16
| c as u32 << 8
| d as u32
};
let sockaddr = new_sockaddr_in(port, addr);
getnameinfo(transmute(&sockaddr), size_of::<sockaddr_in>() as socklen_t,
buf, hostlen, transmute(0u), 0, 0)
},
Ipv6Addr(a, b, c, d, e, f, g, h) => {
let addr = in6_addr {
s6_addr: [a, b, c, d, e, f, g, h]
};
let sockaddr = new_sockaddr_in6(port, addr);
getnameinfo(transmute(&sockaddr), size_of::<sockaddr_in6>() as socklen_t,
buf, hostlen, transmute(0u), 0, 0)
},
}
};
unsafe {string::raw::from_buf(transmute(buf))}
}
|
new_sockaddr_in6
|
identifier_name
|
net.rs
|
use std::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr};
use libc::{size_t, malloc, sockaddr, sockaddr_in, sockaddr_in6, in_addr, in6_addr, c_int, c_char, socklen_t, AF_INET, AF_INET6};
use std::mem::{size_of, transmute};
use std::string;
/*
const char *
inet_ntop(int af, const void * restrict src, char * restrict dst,
socklen_t size);
*/
extern {
fn getnameinfo(sa: *const sockaddr, salen: socklen_t,
host: *mut c_char, hostlen: socklen_t,
serv: *mut c_char, servlen: socklen_t,
flags: c_int) -> c_int;
}
#[cfg(target_os = "linux")]
#[cfg(target_os = "android")]
fn new_sockaddr_in(port: u16, addr: in_addr) -> sockaddr_in {
sockaddr_in {
sin_family: AF_INET as u16,
sin_port: port,
sin_addr: addr,
sin_zero: [0,..8]
}
}
#[cfg(target_os = "macos")]
fn new_sockaddr_in(port: u16, addr: in_addr) -> sockaddr_in {
sockaddr_in {
sin_len: size_of::<sockaddr_in>() as u8,
sin_family: AF_INET as u8,
sin_port: port,
sin_addr: addr,
sin_zero: [0,..8]
}
}
#[cfg(target_os = "linux")]
#[cfg(target_os = "android")]
fn new_sockaddr_in6(port: u16, addr: in6_addr) -> sockaddr_in6 {
sockaddr_in6 {
sin6_family: AF_INET6 as u16,
sin6_port: port,
sin6_flowinfo: 0,
sin6_addr: addr,
sin6_scope_id: 0,
}
}
#[cfg(target_os = "macos")]
fn new_sockaddr_in6(port: u16, addr: in6_addr) -> sockaddr_in6 {
sockaddr_in6 {
sin6_len: size_of::<sockaddr_in6>() as u8,
sin6_family: AF_INET6 as u8,
sin6_port: port,
sin6_flowinfo: 0,
sin6_addr: addr,
sin6_scope_id: 0,
}
}
//static NI_NUMERICHOST: c_int = 0x00000002;
//static NI_NAMEREQD: c_int = 0x00000004;
/// Returns the hostname for an ip address
/// TODO: make this safe, see manpage
pub fn get_nameinfo(peer_socket: SocketAddr) -> String {
let SocketAddr { ip, port } = peer_socket;
let buf: *mut i8;
let _ = unsafe {
let hostlen = 80;
buf = transmute(malloc(hostlen as size_t + 1));
match ip {
Ipv4Addr(a, b, c, d) => {
let addr = in_addr {
s_addr: a as u32 << 24
| b as u32 << 16
| c as u32 << 8
| d as u32
};
let sockaddr = new_sockaddr_in(port, addr);
getnameinfo(transmute(&sockaddr), size_of::<sockaddr_in>() as socklen_t,
buf, hostlen, transmute(0u), 0, 0)
},
Ipv6Addr(a, b, c, d, e, f, g, h) =>
|
,
}
};
unsafe {string::raw::from_buf(transmute(buf))}
}
|
{
let addr = in6_addr {
s6_addr: [a, b, c, d, e, f, g, h]
};
let sockaddr = new_sockaddr_in6(port, addr);
getnameinfo(transmute(&sockaddr), size_of::<sockaddr_in6>() as socklen_t,
buf, hostlen, transmute(0u), 0, 0)
}
|
conditional_block
|
net.rs
|
use std::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr};
use libc::{size_t, malloc, sockaddr, sockaddr_in, sockaddr_in6, in_addr, in6_addr, c_int, c_char, socklen_t, AF_INET, AF_INET6};
use std::mem::{size_of, transmute};
use std::string;
/*
const char *
inet_ntop(int af, const void * restrict src, char * restrict dst,
socklen_t size);
*/
extern {
fn getnameinfo(sa: *const sockaddr, salen: socklen_t,
host: *mut c_char, hostlen: socklen_t,
serv: *mut c_char, servlen: socklen_t,
flags: c_int) -> c_int;
}
#[cfg(target_os = "linux")]
#[cfg(target_os = "android")]
fn new_sockaddr_in(port: u16, addr: in_addr) -> sockaddr_in {
sockaddr_in {
sin_family: AF_INET as u16,
sin_port: port,
sin_addr: addr,
sin_zero: [0,..8]
}
}
#[cfg(target_os = "macos")]
fn new_sockaddr_in(port: u16, addr: in_addr) -> sockaddr_in {
sockaddr_in {
sin_len: size_of::<sockaddr_in>() as u8,
sin_family: AF_INET as u8,
sin_port: port,
sin_addr: addr,
sin_zero: [0,..8]
}
}
#[cfg(target_os = "linux")]
#[cfg(target_os = "android")]
fn new_sockaddr_in6(port: u16, addr: in6_addr) -> sockaddr_in6 {
sockaddr_in6 {
sin6_family: AF_INET6 as u16,
sin6_port: port,
sin6_flowinfo: 0,
sin6_addr: addr,
sin6_scope_id: 0,
}
}
#[cfg(target_os = "macos")]
fn new_sockaddr_in6(port: u16, addr: in6_addr) -> sockaddr_in6 {
sockaddr_in6 {
sin6_len: size_of::<sockaddr_in6>() as u8,
sin6_family: AF_INET6 as u8,
sin6_port: port,
sin6_flowinfo: 0,
sin6_addr: addr,
sin6_scope_id: 0,
}
}
//static NI_NUMERICHOST: c_int = 0x00000002;
//static NI_NAMEREQD: c_int = 0x00000004;
/// Returns the hostname for an ip address
/// TODO: make this safe, see manpage
|
let hostlen = 80;
buf = transmute(malloc(hostlen as size_t + 1));
match ip {
Ipv4Addr(a, b, c, d) => {
let addr = in_addr {
s_addr: a as u32 << 24
| b as u32 << 16
| c as u32 << 8
| d as u32
};
let sockaddr = new_sockaddr_in(port, addr);
getnameinfo(transmute(&sockaddr), size_of::<sockaddr_in>() as socklen_t,
buf, hostlen, transmute(0u), 0, 0)
},
Ipv6Addr(a, b, c, d, e, f, g, h) => {
let addr = in6_addr {
s6_addr: [a, b, c, d, e, f, g, h]
};
let sockaddr = new_sockaddr_in6(port, addr);
getnameinfo(transmute(&sockaddr), size_of::<sockaddr_in6>() as socklen_t,
buf, hostlen, transmute(0u), 0, 0)
},
}
};
unsafe {string::raw::from_buf(transmute(buf))}
}
|
pub fn get_nameinfo(peer_socket: SocketAddr) -> String {
let SocketAddr { ip, port } = peer_socket;
let buf: *mut i8;
let _ = unsafe {
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.